diff --git a/docs/CONFIG-VARS.md b/docs/CONFIG-VARS.md
index baa44f58..f5e8fa1a 100644
--- a/docs/CONFIG-VARS.md
+++ b/docs/CONFIG-VARS.md
@@ -11,6 +11,8 @@ Supported configuration variables are listed in the tables below. All variables
- [Using Static Credentials](#using-static-credentials)
- [Using AWS Profile](#using-aws-profile)
- [Admin Access](#admin-access)
+ - [Public Access CIDRs](#public-access-cidrs)
+ - [Private Access CIDRs](#private-access-cidrs)
- [Networking](#networking)
- [Use Existing](#use-existing)
- [IAM](#iam)
@@ -72,14 +74,35 @@ NOTE: When deploying infrastructure into a private network (e.g. a VPN), with no
NOTE: The script will either create a new Security Group, or use an existing Security Group, if specified in the `security_group_id` variable.
+### Public Access CIDRs
+
You can use `default_public_access_cidrs` to set a default range for all created resources. To set different ranges for other resources, define the appropriate variable. Use an empty list [] to disallow access explicitly.
|
Name
| Description
| Type
| Default
| Notes
|
| :--- | :--- | :--- | :--- | :--- |
| default_public_access_cidrs | IP address ranges that are allowed to access all created cloud resources | list of strings | | Set a default for all resources. |
-| cluster_endpoint_public_access_cidrs | IP address ranges that are allowed to access the AKS cluster API | list of strings | | For client admin access to the cluster api (by kubectl, for example). Only used with `cluster_api_mode=public` |
+| cluster_endpoint_public_access_cidrs | IP address ranges that are allowed to access the EKS cluster API | list of strings | | For client admin access to the cluster api (by kubectl, for example). Only used with `cluster_api_mode=public` |
| vm_public_access_cidrs | IP address ranges that are allowed to access the VMs | list of strings | | Opens port 22 for SSH access to the jump server and/or NFS VM by adding Ingress Rule on the Security Group. Only used with `create_jump_public_ip=true` or `create_nfs_public_ip=true`. |
-| postgres_access_cidrs | IP address ranges that are allowed to access the AWS PostgreSQL server | list of strings || Opens port 5432 by adding Ingress Rule on the Security Group. Only used when creating postgres instances.|
+| postgres_public_access_cidrs | IP address ranges that are allowed to access the AWS PostgreSQL server | list of strings || Opens port 5432 by adding Ingress Rule on the Security Group. Only used when creating postgres instances.|
+
+### Private Access CIDRs
+
+For resources accessible at private IP addresses only, it may be necessary, depending upon your networking configuration, to specify additional CIDRs for clients requiring access to those resources. There are three private access CIDR variables provided so that you may specify distinct IP ranges needing access for each of the three different contexts:
+
+1. Cluster API Server Endpoint is Private - use `cluster_endpoint_private_access_cidrs` to indicate the client IP ranges needing access
+2. Jump or NFS Server VMs have only private IPs - use `vm_private_access_cidrs` to indicate the IP ranges for the DAC client VM needing access. DAC's baseline module will require SSH access to the Jump VM and/or NFS Server VM.
+3. VPC has no public egress - use `vpc_endpoint_private_access_cidrs` to allow access to AWS private link services required to build the cluster, e.g. EC2.
+
+For example, with a cluster API server endpoint that is private, the IAC client VM must have API server endpoint access during cluster creation to perform a health check. If your IAC client VM is not in your private subnet, its IP or CIDR range should be present in `cluster_endpoint_private_access_cidrs`.
+
+You can also use `default_private_access_cidrs` to apply the same CIDR range to all three private contexts. To set different CIDR ranges for a specific private context, set the appropriate variable. Use an empty list [] to disallow access explicitly.
+
+| Name
| Description
| Type
| Default
| Notes
|
+| :--- | :--- | :--- | :--- | :--- |
+| default_private_access_cidrs | IP address ranges that are allowed to access all created private cloud resources | list of strings | | Set a list of CIDR ranges that will be applied as a default value for `cluster_endpoint_private_access_cidrs`, `vpc_endpoint_private_access_cidrs` and `vm_private_access_cidrs`. **Note:** If you need to set distinct IP CIDR ranges for any of these contexts, use the specific variables below rather than this one. |
+| cluster_endpoint_private_access_cidrs | IP address ranges that are allowed to access the EKS cluster API Server endpoint| list of strings | | For clients needing access to the cluster api server endpoint (e.g. for VMs running terraform apply and for VMs where admins will use kubectl). Only used with `cluster_api_mode=private` |
+| vpc_endpoint_private_access_cidrs | IP address ranges that are allowed to access all AWS Services targeted by the VPC endpoints | list of strings | | Adds an ingress rule to the auxiliary security group (_prefix_-sg) protecting the VPC Endpoints, allowing HTTPS access at port 443. Only used with `vpc_private_endpoints_enabled=true`. |
+| vm_private_access_cidrs | IP address ranges that are allowed to access private IP based Jump or NFS Server VMs.| list of strings | | Opens port 22 for SSH access to the jump server and/or NFS VM by adding Ingress Rule on the Workers Security Group. Only used with `create_jump_public_ip=false` or `create_nfs_public_ip=false`. |
## Networking
| Name | Description | Type | Default | Notes |
@@ -109,7 +132,7 @@ The variables in the table below can be used to define the existing resources. R
| :--- | ---: | ---: | ---: | ---: |
| vpc_id | ID of existing VPC | string | null | Only required if deploying into existing VPC. |
| subnet_ids | List of existing subnets mapped to desired usage | map(string) | {} | Only required if deploying into existing subnets. |
- | nat_id | ID of existing AWS NAT gateway | string | null | Only required if deploying into existing VPC and subnets. |
+ | nat_id | ID of existing AWS NAT gateway | string | null | Optional if deploying into existing VPC and subnets for [BYON scenarios 2 & 3](./user/BYOnetwork.md#supported-scenarios-and-requirements-for-using-existing-network-resources)|
| security_group_id | ID of existing Security Group that controls external access to Jump/NFS VMs and Postgres | string | null | Only required if using existing Security Group. See [Security Group](./user/BYOnetwork.md#external-access-security-group) for requirements. |
| cluster_security_group_id | ID of existing Security Group that controls Pod access to the control plane | string | null | Only required if using existing Cluster Security Group. See [Cluster Security Group](./user/BYOnetwork.md#cluster-security-group) for requirements.|
| workers_security_group_id | ID of existing Security Group that allows access between node VMs, Jump VM, and data sources (nfs, efs, postges) | string | null | Only required if using existing Security Group for Node Group VMs. See [Workers Security Group](./user/BYOnetwork.md#workers-security-group) for requirements. |
@@ -124,6 +147,12 @@ subnet_ids = {
}
```
+### VPC Endpoints
+| Name | Description | Type | Default | Notes |
+ | :--- | ---: | ---: | ---: | ---: |
+ | vpc_private_endpoints_enabled | Enable the creation of VPC private endpoints | bool | true | Setting to false prevents IaC from creating and managing VPC private endpoints in the cluster |
+
+
## IAM
By default, two custom IAM policies and two custom IAM roles (with instance profiles) are created. If your site security protocol does not allow for automatic creation of IAM resources, you can provide pre-created roles using the following options:
diff --git a/docs/user/BYOnetwork.md b/docs/user/BYOnetwork.md
index 46eae4c5..98edd5fe 100644
--- a/docs/user/BYOnetwork.md
+++ b/docs/user/BYOnetwork.md
@@ -4,12 +4,14 @@ You have the option to use existing network resources with SAS Viya 4 Terraform
**NOTE:** We refer to the use of existing resources as "bring your own" or "BYO" resources.
-| Scenario|Required Variables|Additional Requirements|Resources to be Created|
-| :--- | :--- | :--- | :--- |
-| 1. To work with an existing VPC | `vpc_id` | - VPC does not contain any Subnets or other [Network components](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Networking.html)
- VPC block size must be IPv4 with '/16' netmask (supports 65,536 IP addresses)
- `DNS hostnames` and `DNS resolution` are enabled
- [`subnets`](../CONFIG-VARS.md#networking) CIDR blocks must match with VPC IPv4 CIDR block
| Subnets, NAT Gateway and Security Group|
-| 2. To configure all components of your VPC network - Subnets, Routes & associations, Internet and NAT Gateways | `vpc_id`,
`subnet_ids` and
`nat_id` | - all requirements from Scenario #1
- Subnets Availability Zones must be within the [location](../CONFIG-VARS.md#required-variables)
- AWS Tags with `` value replaced with the [prefix](../CONFIG-VARS.md#required-variables) input value for
- Public Subnets:- `{"kubernetes.io/role/elb"="1"}`
- `{"kubernetes.io/cluster/-eks"="shared"}`
-Private Subnets:- `{"kubernetes.io/role/internal-elb"="1"}`
- `{"kubernetes.io/cluster/-eks"="shared"}`
See [AWS docs](https://docs.aws.amazon.com/eks/latest/userguide/alb-ingress.html) for background on subnet tag requirements to match EKS Cluster name| Security Group |
-| 3. To configure all components of your VPC network and Security Groups | `vpc_id`,
`subnet_ids`,
`nat_id`,
`security_group_id`,
`cluster_security_group_id`, and
`workers_security_group_id` |- all requirements from Scenarios #2 and [these pre-defined Security Groups](#security-groups)
| None |
+|Scenario |Description|Required Variables|Optional Variables|Additional Requirements|Resources to be Created|
+| -: | :--- | :--- | :--- | :--- | :---|
+| 0|No existing network resources | None | | Not a BYO network scenario | IaC creates the required network resources |
+| 1|To work with an existing VPC | `vpc_id` | | - VPC does not contain any Subnets or other [Network components](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Networking.html)
- VPC block size must be IPv4 with '/16' netmask (supports 65,536 IP addresses)
- `DNS hostnames` and `DNS resolution` are enabled
- [`subnets`](../CONFIG-VARS.md#networking) CIDR blocks must match with VPC IPv4 CIDR block
| Subnets, NAT Gateway and Security Groups|
+| 2|To configure all components of your VPC network - Subnets, Routes & associations and optionally Internet and NAT Gateways | `vpc_id`,
`private` subnet list within the [subnet_ids](../CONFIG-VARS.md#use-existing) map| `nat_id`,
`public` and `database` subnet lists within the [subnet_ids](../CONFIG-VARS.md#use-existing) map | - all requirements from Scenario #1
- Subnets Availability Zones must be within the [location](../CONFIG-VARS.md#required-variables)
- AWS Tags with `` value replaced with the [prefix](../CONFIG-VARS.md#required-variables) input value for
- Public Subnets:- `{"kubernetes.io/role/elb"="1"}`
- `{"kubernetes.io/cluster/-eks"="shared"}`
-Private Subnets:- `{"kubernetes.io/role/internal-elb"="1"}`
- `{"kubernetes.io/cluster/-eks"="shared"}`
See [AWS docs](https://docs.aws.amazon.com/eks/latest/userguide/network-load-balancing.html) for background on subnet tag requirements to match EKS Cluster name| Security Groups |
+| 3|To configure all components of your VPC network and Security Groups and optionally Internet and NAT Gateways| `vpc_id`,
`private` subnet list within the [subnet_ids](../CONFIG-VARS.md#use-existing) map,
`security_group_id`,
`cluster_security_group_id`, and
`workers_security_group_id` | `nat_id`,
`public` and `database` subnet lists within the [subnet_ids](../CONFIG-VARS.md#use-existing) map |- all requirements from Scenarios #2 and [these pre-defined Security Groups](#security-groups)
| None |
+**Note**: The `byo_network_scenario` IAC output value is informational only and is intended to convey the BYO network scenario that IAC has selected according to the [Use Existing](../CONFIG-VARS.md#use-existing) input variable values provided to IAC.
### Security Groups
@@ -50,9 +52,9 @@ For more information on these Security Groups, please see https://docs.aws.amazo
When creating your BYO Network resources you should consult with your Network Administrator and use any of these methods to create a working AWS VPC Network:
- [AWS QuickStarts for VPC](https://aws.amazon.com/quickstart/architecture/vpc/)
-- See the "simple-vpc" and "complete-vpc" examples in [terraform-aws-vpc module](https://github.com/terraform-aws-modules/terraform-aws-vpc/tree/master/examples)
+- See the "simple-vpc" and "complete-vpc" examples in [terraform-aws-vpc module](https://github.com/terraform-aws-modules/terraform-aws-vpc/tree/master/examples)
-AWS documentation for reference:
+AWS documentation for reference:
- [How Amazon VPC works](https://docs.aws.amazon.com/vpc/latest/userguide/how-it-works.html)
- [VPC and subnet sizing for IPv4](https://docs.aws.amazon.com/vpc/latest/userguide/VPC_Subnets.html#vpc-sizing-ipv4)
diff --git a/locals.tf b/locals.tf
index d749f22e..06106bad 100755
--- a/locals.tf
+++ b/locals.tf
@@ -7,7 +7,8 @@ locals {
aws_caller_identity_user_name = element(split("/", data.aws_caller_identity.terraform.arn), length(split("/", data.aws_caller_identity.terraform.arn)) - 1)
# General
- security_group_id = var.security_group_id == null ? aws_security_group.sg[0].id : data.aws_security_group.sg[0].id
+ sec_group = coalescelist(aws_security_group.sg_a, aws_security_group.sg_b)
+ security_group_id = var.security_group_id == null ? local.sec_group[0].id : data.aws_security_group.sg[0].id
cluster_security_group_id = var.cluster_security_group_id == null ? aws_security_group.cluster_security_group[0].id : var.cluster_security_group_id
workers_security_group_id = var.workers_security_group_id == null ? aws_security_group.workers_security_group[0].id : var.workers_security_group_id
cluster_name = "${var.prefix}-eks"
@@ -20,11 +21,19 @@ locals {
aws_shared_credentials = local.use_aws_shared_credentials_file ? [var.aws_shared_credentials_file] : var.aws_shared_credentials_files
# CIDRs
- default_public_access_cidrs = var.default_public_access_cidrs == null ? [] : var.default_public_access_cidrs
- vm_public_access_cidrs = var.vm_public_access_cidrs == null ? local.default_public_access_cidrs : var.vm_public_access_cidrs
- cluster_endpoint_public_access_cidrs = var.cluster_api_mode == "private" ? [] : (var.cluster_endpoint_public_access_cidrs == null ? local.default_public_access_cidrs : var.cluster_endpoint_public_access_cidrs)
- cluster_endpoint_private_access_cidrs = var.cluster_endpoint_private_access_cidrs == null ? distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs)) : var.cluster_endpoint_private_access_cidrs # tflint-ignore: terraform_unused_declarations
- postgres_public_access_cidrs = var.postgres_public_access_cidrs == null ? local.default_public_access_cidrs : var.postgres_public_access_cidrs
+ default_public_access_cidrs = var.default_public_access_cidrs == null ? [] : var.default_public_access_cidrs
+ default_private_access_cidrs = var.default_private_access_cidrs == null ? [] : var.default_private_access_cidrs
+
+ vm_public_access_cidrs = var.vm_public_access_cidrs == null ? local.default_public_access_cidrs : var.vm_public_access_cidrs
+ vm_private_access_cidrs = var.vm_private_access_cidrs == null ? local.default_private_access_cidrs : var.vm_private_access_cidrs
+
+ cluster_endpoint_public_access_cidrs = var.cluster_api_mode == "private" ? [] : (var.cluster_endpoint_public_access_cidrs == null ? local.default_public_access_cidrs : var.cluster_endpoint_public_access_cidrs)
+
+ cluster_endpoint_private_access_cidrs = var.cluster_api_mode == "public" ? [] : var.cluster_endpoint_private_access_cidrs == null ? distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs, local.default_private_access_cidrs)) : distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs, local.default_private_access_cidrs, var.cluster_endpoint_private_access_cidrs)) # tflint-ignore: terraform_unused_declarations
+
+ vpc_endpoint_private_access_cidrs = var.vpc_endpoint_private_access_cidrs == null ? distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs, local.default_private_access_cidrs)) : distinct(concat(module.vpc.public_subnet_cidrs, module.vpc.private_subnet_cidrs, local.default_private_access_cidrs, var.vpc_endpoint_private_access_cidrs))
+
+ postgres_public_access_cidrs = var.postgres_public_access_cidrs == null ? local.default_public_access_cidrs : var.postgres_public_access_cidrs
# Subnets
jump_vm_subnet = var.create_jump_public_ip ? module.vpc.public_subnets[0] : module.vpc.private_subnets[0]
@@ -89,7 +98,7 @@ locals {
tags = var.autoscaling_enabled ? merge(local.tags, { key = "k8s.io/cluster-autoscaler/${local.cluster_name}", value = "owned", propagate_at_launch = true }, { key = "k8s.io/cluster-autoscaler/enabled", value = "true", propagate_at_launch = true }) : local.tags
# Node Pool IAM Configuration
iam_role_use_name_prefix = false
- iam_role_name = "${var.prefix}-default-eks-node-group"
+ iam_role_name = "${var.prefix}-default-eks-node-group"
}
}
@@ -138,7 +147,7 @@ locals {
tags = var.autoscaling_enabled ? merge(local.tags, { key = "k8s.io/cluster-autoscaler/${local.cluster_name}", value = "owned", propagate_at_launch = true }, { key = "k8s.io/cluster-autoscaler/enabled", value = "true", propagate_at_launch = true }) : local.tags
# Node Pool IAM Configuration
iam_role_use_name_prefix = false
- iam_role_name = "${var.prefix}-${key}-eks-node-group"
+ iam_role_name = "${var.prefix}-${key}-eks-node-group"
}
}
diff --git a/main.tf b/main.tf
index 7b9d7ca3..c21e335a 100755
--- a/main.tf
+++ b/main.tf
@@ -70,15 +70,19 @@ provider "kubernetes" {
module "vpc" {
source = "./modules/aws_vpc"
- name = var.prefix
- vpc_id = var.vpc_id
- region = var.location
- security_group_id = local.security_group_id
- cidr = var.vpc_cidr
- azs = data.aws_availability_zones.available.names
- existing_subnet_ids = var.subnet_ids
- subnets = var.subnets
- existing_nat_id = var.nat_id
+ name = var.prefix
+ vpc_id = var.vpc_id
+ region = var.location
+ security_group_id = local.security_group_id
+ raw_sec_group_id = var.security_group_id
+ cluster_security_group_id = var.cluster_security_group_id
+ workers_security_group_id = var.workers_security_group_id
+ cidr = var.vpc_cidr
+ azs = data.aws_availability_zones.available.names
+ existing_subnet_ids = var.subnet_ids
+ subnets = var.subnets
+ existing_nat_id = var.nat_id
+ vpc_private_endpoints_enabled = var.vpc_private_endpoints_enabled
tags = local.tags
public_subnet_tags = merge(local.tags, { "kubernetes.io/role/elb" = "1" }, { "kubernetes.io/cluster/${local.cluster_name}" = "shared" })
@@ -208,6 +212,7 @@ module "kubeconfig" {
region = var.location
endpoint = module.eks.cluster_endpoint
ca_crt = local.kubeconfig_ca_cert
+ sg_id = local.cluster_security_group_id
depends_on = [module.eks.cluster_id] # The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready.
}
@@ -246,7 +251,7 @@ module "postgresql" {
# DB subnet group - use public subnet if public access is requested
publicly_accessible = length(local.postgres_public_access_cidrs) > 0 ? true : false
- subnet_ids = length(local.postgres_public_access_cidrs) > 0 ? module.vpc.public_subnets : module.vpc.database_subnets
+ subnet_ids = length(local.postgres_public_access_cidrs) > 0 ? length(module.vpc.public_subnets) > 0 ? module.vpc.public_subnets : module.vpc.database_subnets : module.vpc.database_subnets
# DB parameter group
family = "postgres${each.value.server_version}"
diff --git a/modules/aws_vpc/main.tf b/modules/aws_vpc/main.tf
index 7817ff4a..1df43775 100644
--- a/modules/aws_vpc/main.tf
+++ b/modules/aws_vpc/main.tf
@@ -14,6 +14,14 @@ locals {
# public_subnets = local.existing_public_subnets ? data.aws_subnet.public : aws_subnet.public # not used keeping for ref
private_subnets = local.existing_private_subnets ? data.aws_subnet.private : aws_subnet.private
+ # Use private subnets if we are not creating db subnets and there are no existing db subnets
+ database_subnets = local.existing_database_subnets ? data.aws_subnet.database : element(concat(aws_subnet.database[*].id, tolist([""])), 0) != "" ? aws_subnet.database : local.private_subnets
+
+ byon_tier = var.vpc_id == null ? 0 : local.existing_private_subnets ? (var.raw_sec_group_id == null && var.cluster_security_group_id == null && var.workers_security_group_id == null) ? 2 : 3 : 1
+ byon_scenario = local.byon_tier
+
+ create_nat_gateway = (local.byon_scenario == 0 || local.byon_scenario == 1) ? true : false
+ create_subnets = (local.byon_scenario == 0 || local.byon_scenario == 1) ? true : false
}
data "aws_vpc" "vpc" {
@@ -38,36 +46,37 @@ resource "aws_vpc" "vpc" {
}
resource "aws_vpc_endpoint" "private_endpoints" {
- count = length(var.vpc_private_endpoints)
- vpc_id = local.vpc_id
- service_name = "com.amazonaws.${var.region}.${var.vpc_private_endpoints[count.index]}"
- vpc_endpoint_type = "Interface"
- security_group_ids = [var.security_group_id]
+ for_each = var.vpc_private_endpoints_enabled ? var.vpc_private_endpoints : {}
+ vpc_id = local.vpc_id
+ service_name = "com.amazonaws.${var.region}.${each.key}"
+ vpc_endpoint_type = each.value
+ security_group_ids = each.value == "Interface" ? [var.security_group_id] : null
+ private_dns_enabled = each.value == "Interface" ? true : null
tags = merge(
{
- "Name" = format("%s", "${var.name}-private-endpoint-${var.vpc_private_endpoints[count.index]}")
+ "Name" = format("%s", "${var.name}-private-endpoint-${each.key}")
},
var.tags,
)
- subnet_ids = [
+ subnet_ids = each.value == "Interface" ? [
for subnet in local.private_subnets : subnet.id
- ]
+ ] : null
}
data "aws_subnet" "public" {
- count = local.existing_public_subnets ? length(var.subnets["public"]) : 0
+ count = local.existing_public_subnets ? length(var.existing_subnet_ids["public"]) : 0
id = element(var.existing_subnet_ids["public"], count.index)
}
data "aws_subnet" "private" {
- count = local.existing_private_subnets ? length(var.subnets["private"]) : 0
+ count = local.existing_private_subnets ? length(var.existing_subnet_ids["private"]) : 0
id = element(var.existing_subnet_ids["private"], count.index)
}
data "aws_subnet" "database" {
- count = local.existing_database_subnets ? length(var.subnets["database"]) : 0
+ count = local.existing_database_subnets ? length(var.existing_subnet_ids["database"]) : 0
id = element(var.existing_subnet_ids["database"], count.index)
}
@@ -75,7 +84,7 @@ data "aws_subnet" "database" {
# Public subnet
################
resource "aws_subnet" "public" {
- count = local.existing_public_subnets ? 0 : length(var.subnets["public"])
+ count = local.existing_public_subnets ? 0 : local.create_subnets ? length(var.subnets["public"]) : 0
vpc_id = local.vpc_id
cidr_block = element(var.subnets["public"], count.index)
availability_zone = length(regexall("^[a-z]{2}-", element(var.azs, count.index))) > 0 ? element(var.azs, count.index) : null
@@ -99,7 +108,7 @@ resource "aws_subnet" "public" {
# Internet Gateway
###################
resource "aws_internet_gateway" "this" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
vpc_id = local.vpc_id
@@ -115,7 +124,7 @@ resource "aws_internet_gateway" "this" {
# PubliŃ routes
################
resource "aws_route_table" "public" {
- count = local.existing_public_subnets ? 0 : 1
+ count = local.existing_public_subnets ? 0 : local.create_subnets ? 1 : 0
vpc_id = local.vpc_id
tags = merge(
@@ -131,7 +140,7 @@ resource "aws_route_table" "public" {
}
resource "aws_route" "public_internet_gateway" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
route_table_id = aws_route_table.public[0].id
destination_cidr_block = "0.0.0.0/0"
@@ -153,14 +162,14 @@ resource "aws_route_table_association" "private" {
}
resource "aws_route_table_association" "public" {
- count = local.existing_public_subnets ? 0 : length(var.subnets["public"])
+ count = local.existing_public_subnets ? 0 : local.create_subnets ? length(var.subnets["public"]) : 0
subnet_id = element(aws_subnet.public[*].id, count.index)
route_table_id = element(aws_route_table.public[*].id, 0)
}
resource "aws_route_table_association" "database" {
- count = local.existing_database_subnets ? 0 : length(var.subnets["database"])
+ count = local.existing_database_subnets ? 0 : local.create_subnets ? length(var.subnets["database"]) : 0
subnet_id = element(aws_subnet.database[*].id, count.index)
route_table_id = element(aws_route_table.private[*].id, 0)
@@ -214,7 +223,7 @@ resource "aws_route_table" "private" {
# Database subnet
##################
resource "aws_subnet" "database" {
- count = local.existing_database_subnets ? 0 : length(var.subnets["database"])
+ count = local.existing_database_subnets ? 0 : local.create_subnets ? length(var.subnets["database"]) : 0
vpc_id = local.vpc_id
cidr_block = element(var.subnets["database"], count.index)
availability_zone = length(regexall("^[a-z]{2}-", element(var.azs, count.index))) > 0 ? element(var.azs, count.index) : null
@@ -233,7 +242,7 @@ resource "aws_subnet" "database" {
}
resource "aws_db_subnet_group" "database" {
- count = local.existing_database_subnets == false && length(var.subnets["database"]) > 0 ? 1 : 0
+ count = local.existing_database_subnets == false ? local.create_subnets ? contains(keys(var.subnets), "database") ? length(var.subnets["database"]) > 0 ? 1 : 0 : 0 : 0 : 0
name = lower(var.name)
description = "Database subnet group for ${var.name}"
@@ -248,7 +257,7 @@ resource "aws_db_subnet_group" "database" {
}
resource "aws_eip" "nat" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
domain = "vpc"
@@ -270,7 +279,7 @@ data "aws_nat_gateway" "nat_gateway" {
}
resource "aws_nat_gateway" "nat_gateway" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
allocation_id = element(aws_eip.nat[*].id, 0)
subnet_id = local.existing_public_subnets ? element(data.aws_subnet.public[*].id, 0) : element(aws_subnet.public[*].id, 0)
@@ -290,7 +299,7 @@ resource "aws_nat_gateway" "nat_gateway" {
}
resource "aws_route" "private_nat_gateway" {
- count = var.existing_nat_id == null ? 1 : 0
+ count = var.existing_nat_id == null ? local.create_nat_gateway ? 1 : 0 : 0
route_table_id = element(aws_route_table.private[*].id, count.index)
destination_cidr_block = "0.0.0.0/0"
diff --git a/modules/aws_vpc/outputs.tf b/modules/aws_vpc/outputs.tf
index 651479b3..3e30b363 100644
--- a/modules/aws_vpc/outputs.tf
+++ b/modules/aws_vpc/outputs.tf
@@ -42,12 +42,12 @@ output "private_subnet_cidrs" {
output "database_subnets" {
description = "List of IDs of database subnets"
- value = local.existing_database_subnets ? data.aws_subnet.database[*].id : aws_subnet.database[*].id
+ value = local.existing_database_subnets ? data.aws_subnet.database[*].id : local.database_subnets[*].id
}
output "nat_public_ips" {
description = "List of public Elastic IPs created for AWS NAT Gateway"
- value = var.existing_nat_id == null ? aws_eip.nat[*].public_ip : data.aws_nat_gateway.nat_gateway[*].public_ip
+ value = var.existing_nat_id == null ? local.create_nat_gateway ? aws_eip.nat[*].public_ip : null : data.aws_nat_gateway.nat_gateway[*].public_ip
}
output "public_route_table_ids" {
@@ -64,3 +64,13 @@ output "vpc_cidr" {
description = "CIDR block of VPC"
value = var.vpc_id == null ? var.cidr : data.aws_vpc.vpc[0].cidr_block
}
+
+output "byon_scenario" {
+ description = "The BYO networking configuration (0,1,2, or 3) determined by the set of networking input values configured"
+ value = local.byon_scenario
+}
+
+output "create_nat_gateway" {
+ description = "The networking configuration will create a NAT gateway"
+ value = local.create_nat_gateway
+}
diff --git a/modules/aws_vpc/variables.tf b/modules/aws_vpc/variables.tf
index 3b2b272a..5c61ffaa 100644
--- a/modules/aws_vpc/variables.tf
+++ b/modules/aws_vpc/variables.tf
@@ -97,8 +97,23 @@ variable "map_public_ip_on_launch" {
variable "vpc_private_endpoints" {
description = "Endpoints needed for private cluster"
- type = list(string)
- default = ["ec2", "ecr.api", "ecr.dkr", "s3", "logs", "sts", "elasticloadbalancing", "autoscaling"]
+ type = map(string)
+ default = {
+ "ec2" = "Interface",
+ "ecr.api" = "Interface",
+ "ecr.dkr" = "Interface",
+ "s3" = "Gateway",
+ "logs" = "Interface",
+ "sts" = "Interface",
+ "elasticloadbalancing" = "Interface",
+ "autoscaling" = "Interface"
+ }
+}
+
+variable "vpc_private_endpoints_enabled" {
+ description = "Enable the creation of vpc private endpoint resources"
+ type = bool
+ default = true
}
variable "region" {
@@ -107,6 +122,21 @@ variable "region" {
}
variable "security_group_id" {
- description = "Security Group ID"
+ description = "Security Group ID local variable value"
+ type = string
+}
+
+variable "raw_sec_group_id" {
+ description = "Security Group ID input variable value"
+ type = string
+}
+
+variable "cluster_security_group_id" {
+ description = "Cluster Security Group ID input variable value"
+ type = string
+}
+
+variable "workers_security_group_id" {
+ description = "Workers Security Group ID input variable value"
type = string
}
diff --git a/modules/kubeconfig/main.tf b/modules/kubeconfig/main.tf
index 99b75e13..6a0dbb91 100644
--- a/modules/kubeconfig/main.tf
+++ b/modules/kubeconfig/main.tf
@@ -42,6 +42,10 @@ locals {
}
+data "aws_security_group" "selected" {
+ id = var.sg_id
+}
+
data "kubernetes_secret" "sa_secret" {
count = var.create_static_kubeconfig ? 1 : 0
metadata {
@@ -61,8 +65,12 @@ resource "kubernetes_secret" "sa_secret" {
"kubernetes.io/service-account.name" = local.service_account_name
}
}
- type = "kubernetes.io/service-account-token"
- depends_on = [kubernetes_service_account.kubernetes_sa]
+ type = "kubernetes.io/service-account-token"
+
+ depends_on = [
+ kubernetes_service_account.kubernetes_sa,
+ data.aws_security_group.selected,
+ ]
}
# Starting K8s v1.24+ hashicorp/terraform-provider-kubernetes issues warning message:
@@ -90,6 +98,10 @@ resource "kubernetes_cluster_role_binding" "kubernetes_crb" {
name = local.service_account_name
namespace = var.namespace
}
+
+ depends_on = [
+ data.aws_security_group.selected,
+ ]
}
# kube config file generation
diff --git a/modules/kubeconfig/variables.tf b/modules/kubeconfig/variables.tf
index 45575e7e..c5e0ca27 100644
--- a/modules/kubeconfig/variables.tf
+++ b/modules/kubeconfig/variables.tf
@@ -43,3 +43,8 @@ variable "ca_crt" {
description = "Kubernetes CA certificate"
type = string
}
+
+variable "sg_id" {
+ description = "Security group ID"
+ type = string
+}
diff --git a/outputs.tf b/outputs.tf
index 5a874847..cbe5bcc0 100755
--- a/outputs.tf
+++ b/outputs.tf
@@ -104,7 +104,7 @@ output "postgres_servers" {
}
output "nat_ip" {
- value = module.vpc.nat_public_ips[0]
+ value = module.vpc.create_nat_gateway ? module.vpc.nat_public_ips[0] : null
}
output "prefix" {
@@ -170,7 +170,8 @@ output "storage_type_backend" {
condition = (var.storage_type == "standard" && var.storage_type_backend == "nfs"
|| var.storage_type == "ha" && var.storage_type_backend == "nfs"
|| var.storage_type == "ha" && var.storage_type_backend == "efs"
- || var.storage_type == "ha" && var.storage_type_backend == "ontap")
+ || var.storage_type == "ha" && var.storage_type_backend == "ontap"
+ || var.storage_type == "none" && var.storage_type_backend == "none")
error_message = "nfs is the only valid storage_type_backend when storage_type == 'standard'"
}
}
@@ -179,3 +180,7 @@ output "aws_fsx_ontap_fsxadmin_password" {
value = (local.storage_type_backend == "ontap" ? var.aws_fsx_ontap_fsxadmin_password : null)
sensitive = true
}
+
+output "byo_network_scenario" {
+ value = module.vpc.byon_scenario
+}
diff --git a/security.tf b/security.tf
index dec8e74c..a4fe9131 100644
--- a/security.tf
+++ b/security.tf
@@ -7,11 +7,12 @@ data "aws_security_group" "sg" {
}
# Security Groups - https://www.terraform.io/docs/providers/aws/r/security_group.html
-resource "aws_security_group" "sg" {
- count = var.security_group_id == null ? 1 : 0
+resource "aws_security_group" "sg_a" {
+ count = var.security_group_id == null && var.vpc_private_endpoints_enabled == false ? 1 : 0
name = "${var.prefix}-sg"
vpc_id = module.vpc.vpc_id
+ description = "Auxiliary security group associated with RDS ENIs and VPC Endpoint ENIs as well as Jump/NFS VM ENIs when they have public IPs"
egress {
description = "Allow all outbound traffic."
from_port = 0
@@ -22,6 +23,30 @@ resource "aws_security_group" "sg" {
tags = merge(local.tags, { "Name" : "${var.prefix}-sg" })
}
+# Security Groups - https://www.terraform.io/docs/providers/aws/r/security_group.html
+resource "aws_security_group" "sg_b" {
+ count = var.security_group_id == null && var.vpc_private_endpoints_enabled ? 1 : 0
+ name = "${var.prefix}-sg"
+ vpc_id = module.vpc.vpc_id
+
+ description = "Auxiliary security group associated with RDS ENIs and VPC Endpoint ENIs as well as Jump/NFS VM ENIs when they have public IPs"
+ egress {
+ description = "Allow all outbound traffic."
+ from_port = 0
+ to_port = 0
+ protocol = "-1"
+ cidr_blocks = ["0.0.0.0/0"]
+ }
+ ingress {
+ description = "Allow tcp port 443 ingress to all AWS Services targeted by the VPC endpoints"
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ cidr_blocks = local.vpc_endpoint_private_access_cidrs
+ }
+ tags = merge(local.tags, { "Name" : "${var.prefix}-sg" })
+}
+
resource "aws_security_group_rule" "vms" {
count = (length(local.vm_public_access_cidrs) > 0
&& var.security_group_id == null
@@ -93,6 +118,13 @@ resource "aws_security_group" "cluster_security_group" {
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
+ ingress {
+ description = "Allow additional HTTPS/443 ingress to private EKS cluster API server endpoint per var.cluster_endpoint_private_access_cidrs"
+ from_port = 443
+ to_port = 443
+ protocol = "tcp"
+ cidr_blocks = local.cluster_endpoint_private_access_cidrs
+ }
}
@@ -105,11 +137,10 @@ resource "aws_security_group_rule" "cluster_ingress" {
from_port = 443
to_port = 443
protocol = "tcp"
- source_security_group_id = aws_security_group.workers_security_group[0].id
+ source_security_group_id = local.workers_security_group_id
security_group_id = local.cluster_security_group_id
}
-
resource "aws_security_group" "workers_security_group" {
name = "${var.prefix}-eks_worker_sg"
vpc_id = module.vpc.vpc_id
@@ -177,3 +208,22 @@ resource "aws_security_group_rule" "worker_cluster_api_443" {
to_port = 443
security_group_id = aws_security_group.workers_security_group[0].id
}
+
+
+resource "aws_security_group_rule" "vm_private_access_22" {
+
+ count = (length(local.vm_private_access_cidrs) > 0
+ && var.workers_security_group_id == null
+ && ((var.create_jump_public_ip == false && var.create_jump_vm)
+ || (var.create_nfs_public_ip == false && var.storage_type == "standard")
+ )
+ ? 1 : 0
+ )
+ type = "ingress"
+ description = "Allow SSH to a private IP based Jump VM per var.vm_private_access_cidrs. Required for DAC baseline client VM."
+ from_port = 22
+ to_port = 22
+ protocol = "tcp"
+ cidr_blocks = local.vm_private_access_cidrs
+ security_group_id = aws_security_group.workers_security_group[0].id
+}
diff --git a/variables.tf b/variables.tf
index 574d7465..c9712a97 100644
--- a/variables.tf
+++ b/variables.tf
@@ -61,9 +61,15 @@ variable "iac_tooling" {
default = "terraform"
}
-## Public Access
+## Public & Private Access
variable "default_public_access_cidrs" {
- description = "List of CIDRs to access created resources."
+ description = "List of CIDRs to access created resources - Public."
+ type = list(string)
+ default = null
+}
+
+variable "default_private_access_cidrs" {
+ description = "List of CIDRs to access created resources - Private."
type = list(string)
default = null
}
@@ -80,8 +86,20 @@ variable "cluster_endpoint_private_access_cidrs" {
default = null
}
+variable "vpc_endpoint_private_access_cidrs" {
+ description = "List of CIDRs to access VPC endpoints - Private."
+ type = list(string)
+ default = null
+}
+
variable "vm_public_access_cidrs" {
- description = "List of CIDRs to access jump VM or NFS VM."
+ description = "List of CIDRs to access jump VM or NFS VM - Public."
+ type = list(string)
+ default = null
+}
+
+variable "vm_private_access_cidrs" {
+ description = "List of CIDRs to access jump VM or NFS VM - Private."
type = list(string)
default = null
}
@@ -599,8 +617,23 @@ variable "cluster_api_mode" {
variable "vpc_private_endpoints" { # tflint-ignore: terraform_unused_declarations
description = "Endpoints needed for private cluster."
- type = list(string)
- default = ["ec2", "ecr.api", "ecr.dkr", "s3", "logs", "sts", "elasticloadbalancing", "autoscaling"]
+ type = map(string)
+ default = {
+ "ec2" = "Interface",
+ "ecr.api" = "Interface",
+ "ecr.dkr" = "Interface",
+ "s3" = "Gateway",
+ "logs" = "Interface",
+ "sts" = "Interface",
+ "elasticloadbalancing" = "Interface",
+ "autoscaling" = "Interface"
+ }
+}
+
+variable "vpc_private_endpoints_enabled" {
+ description = "Enable the creation of vpc private endpoint resources"
+ type = bool
+ default = true
}
variable "cluster_node_pool_mode" {