diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 5384ded3..9c82a10f 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -1,7 +1,7 @@ name: Bug report description: Report a reproducible bug to help us improve title: "Bug: TITLE" -labels: ["bug"] +labels: ["bug", "triage"] body: - type: markdown attributes: diff --git a/.github/ISSUE_TEMPLATE/documentation_improvements.yml b/.github/ISSUE_TEMPLATE/documentation_improvements.yml index b297018a..9cb71745 100644 --- a/.github/ISSUE_TEMPLATE/documentation_improvements.yml +++ b/.github/ISSUE_TEMPLATE/documentation_improvements.yml @@ -1,7 +1,7 @@ name: Documentation improvements description: Suggest a documentation update title: "Docs: TITLE" -labels: ["documentation"] +labels: ["documentation", "triage"] body: - type: textarea id: search_area diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml index 738920e0..fef9414b 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -1,7 +1,7 @@ name: Feature request description: Suggest an idea for Cloud Game Development Toolkit title: "Feature request: TITLE" -labels: ["feature-request"] +labels: ["feature-request", "triage"] body: - type: markdown attributes: diff --git a/.github/workflows/jenkins-deployment.yml b/.github/workflows/jenkins-deployment.yml new file mode 100644 index 00000000..ad40fafa --- /dev/null +++ b/.github/workflows/jenkins-deployment.yml @@ -0,0 +1,186 @@ +name: Jenkins Deployment Test + +env: + TF_VAR_fully_qualified_domain_name: ${{ secrets.CI_FULLY_QUALIFIED_DOMAIN_NAME }} + STATE_BUCKET_NAME: ${{ secrets.TF_REMOTE_STATE_BUCKET_NAME }} + +# Triggers on any changes to modules/jenkins +on: + pull_request: # change to pull_request before publish + paths: + - 'modules/jenkins/**' + # - '.github/workflows/**' + workflow_dispatch: + +permissions: + id-token: write + contents: read + issues: write + +jobs: + # Plan: Generates a tf plan of the deployment and posts it as a comment in the triggering PR + plan: + runs-on: ubuntu-latest + environment: aws-ci + permissions: + id-token: write + issues: write + pull-requests: write + defaults: + run: + working-directory: modules/jenkins/examples/complete + steps: + # Retrieve necessary AWS permissions + - name: configure aws credentials + uses: aws-actions/configure-aws-credentials@v1.7.0 + with: + role-to-assume: ${{ secrets.AWS_CI_ROLE_ARN }} + role-session-name: GitHub_to_AWS_via_FederatedOIDC + aws-region: ${{ vars.AWS_REGION }} + # Checkout Repository + - name: Checkout Git Repository + uses: actions/checkout@v3.0.0 + with: + ref: ${{ github.ref }} + # Install Terraform + - name: Install Terraform + uses: hashicorp/setup-terraform@v1 + with: + terraform_version: 1.6.3 + # Inject remote state block + # This is required to enable remote state + - name: Inject Remote State + run: | + cat > backend.tf << EOF + terraform { + backend "s3" { + } + } + # Initialize S3 remote state + # The triggering commit hash is used as the key of the remote state + - name: Terraform init + id: init + run: | + terraform init -backend-config="bucket=${STATE_BUCKET_NAME}" -backend-config="key=${{ github.sha }}" -backend-config="region=${{ vars.AWS_REGION }}" + + # Generate tf plan + - name: Terraform plan + id: plan + run: | + terraform plan -no-color + + # Post the tf plan as a comment in the triggering PR + - name: Update Pull Request + uses: actions/github-script@v7 + with: + script: | + const output = `#### Terraform Plan 📖\`${{ steps.plan.outcome }}\` +
Show Plan + + \`\`\`\n + ${{ steps.plan.outputs.stdout }} + \`\`\` + +
+ + *Pushed by: @${{ github.actor }}, Action: \`${{ github.event_name }}\`*`; + + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: output + }) + + # Deploy: After manual approval, deploys the solution to the designated AWS account + deploy: + needs: [ plan ] + environment: aws-ci + runs-on: ubuntu-latest + defaults: + run: + working-directory: modules/jenkins/examples/complete + steps: + # Checkout Repository + - name: Checkout Git Repository + uses: actions/checkout@v3.0.0 + with: + ref: ${{ github.ref }} + # Retrieve necessary AWS permissions + - name: configure aws credentials + uses: aws-actions/configure-aws-credentials@v1.7.0 + with: + role-to-assume: ${{ secrets.AWS_CI_ROLE_ARN }} + role-session-name: GitHub_to_AWS_via_FederatedOIDC + aws-region: ${{ vars.AWS_REGION }} + # Install Terraform + - name: Install Terraform + uses: hashicorp/setup-terraform@v1 + with: + terraform_version: 1.6.3 + # Inject remote state block + # This is required to enable remote state + - name: Inject Remote State + run: | + cat > backend.tf << EOF + terraform { + backend "s3" { + } + } + # Initialize S3 remote state + # The triggering commit hash is used as the key of the remote state + - name: Terraform init + id: init + run: | + terraform init -backend-config="bucket=${STATE_BUCKET_NAME}" -backend-config="key=${{ github.sha }}" -backend-config="region=${{ vars.AWS_REGION }}" + + # Deploys the solution + - name: Terraform apply + run: | + terraform apply -auto-approve + + # Destroy: After manual approval, destroy the solution in the designated AWS account + destroy: + needs: [ deploy ] + runs-on: ubuntu-latest + environment: aws-ci + defaults: + run: + working-directory: modules/jenkins/examples/complete + steps: + # Checkout Repository + - name: Checkout Git Repository + uses: actions/checkout@v3.0.0 + with: + ref: ${{ github.ref }} + # Retrieve necessary AWS permissions + - name: configure aws credentials + uses: aws-actions/configure-aws-credentials@v1.7.0 + with: + role-to-assume: ${{ secrets.AWS_CI_ROLE_ARN }} + role-session-name: GitHub_to_AWS_via_FederatedOIDC + aws-region: ${{ vars.AWS_REGION }} + # Install Terraform + - name: Install Terraform + uses: hashicorp/setup-terraform@v1 + with: + terraform_version: 1.6.3 + # Inject remote state block + # This is required to enable remote state + - name: Inject Remote State + run: | + cat > backend.tf << EOF + terraform { + backend "s3" { + } + } + # Initialize S3 remote state + # The triggering commit hash is used as the key of the remote state + - name: Terraform init + id: init + run: | + terraform init -backend-config="bucket=${STATE_BUCKET_NAME}" -backend-config="key=${{ github.sha }}" -backend-config="region=${{ vars.AWS_REGION }}" + # Destroys the solution + - name: Terraform Destroy + run: | + terraform destroy -auto-approve diff --git a/.github/workflows/ossf_scorecard.yml b/.github/workflows/ossf_scorecard.yml index 61ddf2d0..6961cc9e 100644 --- a/.github/workflows/ossf_scorecard.yml +++ b/.github/workflows/ossf_scorecard.yml @@ -27,7 +27,7 @@ jobs: results_format: sarif publish_results: true # Publish results to OpenSSF REST API for easy access by consumers - name: "Upload artifact" - uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + uses: actions/upload-artifact@b4b15b8c7c6ac21ea08fcf65892d2ee8f75cf882 # v4.4.3 with: name: SARIF file path: results.sarif diff --git a/.github/workflows/trivy.yml b/.github/workflows/trivy.yml index 6b3fcb13..012aa10a 100644 --- a/.github/workflows/trivy.yml +++ b/.github/workflows/trivy.yml @@ -19,7 +19,7 @@ jobs: - name: Checkout code uses: actions/checkout@v4 - name: Run Trivy vulnerability scanner in repo mode - uses: aquasecurity/trivy-action@0.24.0 + uses: aquasecurity/trivy-action@0.28.0 with: scan-type: 'repo' ignore-unfixed: true diff --git a/assets/packer/build-agents/linux/amazon-linux-2023-arm64.pkr.hcl b/assets/packer/build-agents/linux/amazon-linux-2023-arm64.pkr.hcl index 578e46de..ba76dcdc 100644 --- a/assets/packer/build-agents/linux/amazon-linux-2023-arm64.pkr.hcl +++ b/assets/packer/build-agents/linux/amazon-linux-2023-arm64.pkr.hcl @@ -14,10 +14,12 @@ variable "region" { variable "vpc_id" { type = string + default = null } variable "subnet_id" { type = string + default = null } variable "associate_public_ip_address" { diff --git a/assets/packer/perforce/helix-core/p4_configure.sh b/assets/packer/perforce/helix-core/p4_configure.sh index 44cc567b..1604eaed 100644 --- a/assets/packer/perforce/helix-core/p4_configure.sh +++ b/assets/packer/perforce/helix-core/p4_configure.sh @@ -531,4 +531,4 @@ fi touch "$FLAG_FILE" # Ending the script -log_message "EC2 mount script finished." \ No newline at end of file +log_message "EC2 mount script finished." diff --git a/docs/Dockerfile b/docs/Dockerfile index 57eb0475..6074bb4b 100644 --- a/docs/Dockerfile +++ b/docs/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.12.7 +FROM python:3.13.0 WORKDIR /build COPY . . ARG VERSION ALIAS diff --git a/docs/getting-started.md b/docs/getting-started.md index c37b05bb..0557f4e3 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -41,6 +41,9 @@ packer build ./assets/packer/perforce/helix-core/perforce_arm64.pkr.hcl This will use your AWS credentials to provision an [EC2 instance](https://aws.amazon.com/ec2/instance-types/) in your [Default VPC](https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html). The Region, VPC, and Subnet where this instance is provisioned and the AMI is created are configurable - please consult the [`example.pkrvars.hcl`](./assets/packer/perforce/helix-core/example.pkrvars.hcl) file and the [Packer documentation on assigning variables](https://developer.hashicorp.com/packer/guides/hcl/variables#assigning-variables) for more details. +???+ Note + The Perforce Helix Core template will default to the _us-west-2_ (Oregon) region, if a region is not provided. + ???+ Note The AWS Region where this AMI is created _must_ be the same Region where you intend to deploy the Simple Build Pipeline. @@ -48,6 +51,9 @@ This will use your AWS credentials to provision an [EC2 instance](https://aws.am This section covers the creation of Amazon Machine Images used to provision Jenkins build agents. Different studios have different needs at this stage, so we'll cover the creation of three different build agent AMIs. +???+ Note + The Build Agent templates will default to the _us-west-2_ (Oregon) region, if a region is not provided. + #### Amazon Linux 2023 ARM based Amazon Machine Image This Amazon Machine Image is provisioned using the [Amazon Linux 2023](https://aws.amazon.com/linux/amazon-linux-2023/) base operating system. It is highly configurable through variables, but there is only one variable that is required: A public SSH key. This public SSH key is used by the Jenkins orchestration service to establish an initial connection to the agent. @@ -55,8 +61,8 @@ This Amazon Machine Image is provisioned using the [Amazon Linux 2023](https://a This variable can be passed to Packer using the `-var-file` or `-var` command line flag. If you are using a variable file, please consult the [`example.pkrvars.hcl`](./assets/packer/build-agents/linux/example.pkrvars.hcl) for overridable fields. You can also pass the SSH key directly at the command line: ``` bash -packer build amazon-linux-2023-arm64.pkr.hcl \ - -var "public_key=" +packer build -var "public_key=" amazon-linux-2023-arm64.pkr.hcl + ``` ???+ Note @@ -79,11 +85,12 @@ Take note of the output of this CLI command. You will need the ARN later. This Amazon Machine Image is provisioned using the Ubuntu Jammy 22.04 base operating system. Just like the Amazon Linux 2023 AMI above, the only required variable is a public SSH key. All Linux Packer templates use the same variables file, so if you would like to share a public key across all build nodes we recommend using a variables file. To build this AMI with a variables file called `linux.pkrvars.hcl` you would use the following command: ``` bash -packer build ubuntu-jammy-22.04-amd64-server.pkr.hcl \ - -var-file="linux.pkrvars.hcl" +# This command fails if not run from the '/assets/packer/build-agents/linux' directory. +packer build -var "public_key=" ubuntu-jammy-22.04-amd64-server.pkr.hcl + ``` -???+ Note +???+ Warning The above command assumes you are running `packer` from the `/assets/packer/build-agents/linux` directory. Finally, you'll want to upload the private SSH key to AWS Secrets Manager so that the Jenkins orchestration service can use it to connect to this build agent. @@ -105,8 +112,7 @@ This Amazon Machine Image is provisioned using the Windows Server 2022 base oper Again, the only required variable for building this Amazon Machine Image is a public SSH key. ``` bash -packer build windows.pkr.hcl \ - -var "public_key=" +packer build -var "public_key=" windows.pkr.hcl ``` ???+ Note @@ -136,22 +142,25 @@ Once your hosted zone exists you can proceed to the next step. ### Step 5. Configure Simple Build Pipeline Variables -All configuration of the _Simple Build Pipeline_ occurs in the [`local.tf`](./samples/simple-build-pipeline/local.tf) file. Before you deploy this architecture you will need to provide the outputs from previous steps. - -We'll walk through the required configurations in [`local.tf`](./samples/simple-build-pipeline/local.tf). +Configurations for the _Simple Build Pipeline_ are split between 2 files: [`local.tf`](./samples/simple-build-pipeline/local.tf) and [`variables.tf`](./samples/simple-build-pipeline/variables.tf). Variables in [`local.tf`](./samples/simple-build-pipeline/local.tf) are typically static and can be modified within the file itself. Variables in [`variables.tf`](./samples/simple-build-pipeline/variables.tf), tend to be more dynamic and are passed in through the `terraform apply` command either directly through a `-var` flag or as file using the `-var-file` flag. -1. `fully_qualified_domain_name` must be set to the domain name you created a public hosted zone for in [Step 4](#step-4-create-route53-hosted-zone). Your applications will be deployed at subdomains. For example, if `fully_qualified_domain_name=example.com` then Jenkins will be available at `jenkins.example.com` and Perforce Helix Core will be available at `core.helix.example.com`. +We'll start by walking through the required configurations in [`local.tf`](./samples/simple-build-pipeline/local.tf). -2. `allowlist` grants public internet access to the various applications deployed in the _Simple Build Pipeline_. At a minimum you will need to include your own IP address to gain access to Jenkins and Perforce Helix Core for configuration following deployment. For example, if your IP address is `192.158.1.38` you would want to set `allowlist=["192.158.1.38/32"]` to grant yourself access. +1. `allowlist` grants public internet access to the various applications deployed in the _Simple Build Pipeline_. At a minimum you will need to include your own IP address to gain access to Jenkins and Perforce Helix Core for configuration following deployment. For example, if your IP address is `192.158.1.38` you would want to set `allowlist=["192.158.1.38/32"]` to grant yourself access. ???+ Note The `/32` suffix above is a subnet mask that specifies a single IP address. If you have different CIDR blocks that you would like to grant access to you can include those as well. -3. `jenkins_agent_secret_arns` is a list of [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) ARNs that the Jenkins orchestration service will be granted access to. This is primarily used for providing private SSH keys to Jenkins so that the orchestration service can connect to your build agents. When you created build agent AMIs earlier you also uploaded private SSH keys to AWS Secrets Manager. The ARNs of those secrets should be added to the `jenkins_agent_secret_arns` list so that Jenkins can connect to the provisioned build agents. +2. `jenkins_agent_secret_arns` is a list of [AWS Secrets Manager](https://aws.amazon.com/secrets-manager/) ARNs that the Jenkins orchestration service will be granted access to. This is primarily used for providing private SSH keys to Jenkins so that the orchestration service can connect to your build agents. When you created build agent AMIs earlier you also uploaded private SSH keys to AWS Secrets Manager. The ARNs of those secrets should be added to the `jenkins_agent_secret_arns` list so that Jenkins can connect to the provisioned build agents. + +3. The `build_farm_compute` map contains all of the information needed to provision your Jenkins build farms. Each entry in this map corresponds to an [EC2 Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-groups.html), and requires two fields to be specified: `ami` and `instance_type`. The `local.tf` file contains an example configuration that has been commented out. Using the AMI IDs from [Step 3](#step-3-create-build-agent-amazon-machine-images), please specify the build farms you would like to provision. Selecting the right instance type for your build farm is highly dependent on your build process. Larger instances are more expensive, but provide improved performance. For example, large Unreal Engine compilation jobs will perform significantly better on [Compute Optimized](https://aws.amazon.com/ec2/instance-types/#Compute_Optimized) instances, while cook jobs tend to benefit from the increased RAM available from [Memory Optimized](https://aws.amazon.com/ec2/instance-types/#Memory_Optimized) instances. It can be a good practice to provision an EC2 instance using your custom AMI, and run your build process locally to determine the right instance size for your build farm. Once you have settled on an instance type, complete the `build_farm_compute` map to configure your build farms. + +4. Finally, the `build_farm_fsx_openzfs_storage` field configures file systems used by your build agents for mounting Helix Core workspaces and shared caches. Again, an example configuration is provided but commented out. Depending on the number of builds you expect to be performing and the size of your project, you may want to adjust the size of the suggested file systems. + +The variables in [`variables.tf`] are as follows: -4. The `build_farm_compute` map contains all of the information needed to provision your Jenkins build farms. Each entry in this map corresponds to an [EC2 Auto Scaling group](https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-groups.html), and requires two fields to be specified: `ami` and `instance_type`. The `local.tf` file contains an example configuration that has been commented out. Using the AMI IDs from [Step 3](#step-3-create-build-agent-amazon-machine-images), please specify the build farms you would like to provision. Selecting the right instance type for your build farm is highly dependent on your build process. Larger instances are more expensive, but provide improved performance. For example, large Unreal Engine compilation jobs will perform significantly better on [Compute Optimized](https://aws.amazon.com/ec2/instance-types/#Compute_Optimized) instances, while cook jobs tend to benefit from the increased RAM available from [Memory Optimized](https://aws.amazon.com/ec2/instance-types/#Memory_Optimized) instances. It can be a good practice to provision an EC2 instance using your custom AMI, and run your build process locally to determine the right instance size for your build farm. Once you have settled on an instance type, complete the `build_farm_compute` map to configure your build farms. +1. `root_domain_name` must be set to the domain name you created a public hosted zone for in [Step 4](#step-4-create-route53-hosted-zone). Your applications will be deployed at subdomains. For example, if `root_domain_name=example.com` then Jenkins will be available at `jenkins.example.com` and Perforce Helix Core will be available at `core.helix.example.com`. -5. Finally, the `build_farm_fsx_openzfs_storage` field configures file systems used by your build agents for mounting Helix Core workspaces and shared caches. Again, an example configuration is provided but commented out. Depending on the number of builds you expect to be performing and the size of your project, you may want to adjust the size of the suggested file systems. ### Step 6. Deploy Simple Build Pipeline @@ -164,7 +173,7 @@ terraform init This will install the modules and required Terraform providers. ``` bash -terraform apply +terraform apply -var "root_domain_name=" ``` This will create a Terraform plan, and wait for manual approval to deploy the proposed resources. Once approval is given the entire deployment process takes roughly 10 minutes. diff --git a/docs/requirements.txt b/docs/requirements.txt index 67412e67..54401de9 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,6 +1,6 @@ # Include required packages for mkdocs -mkdocs-material==9.5.39 +mkdocs-material==9.5.42 mkdocs-material-extensions==1.3.1 mike==2.1.3 mkdocs-git-revision-date-plugin==0.3.2 -mkdocs-open-in-new-tab==1.0.6 +mkdocs-open-in-new-tab==1.0.7 diff --git a/modules/jenkins/examples/complete/main.tf b/modules/jenkins/examples/complete/main.tf index d9f4fbcb..95a4e23a 100644 --- a/modules/jenkins/examples/complete/main.tf +++ b/modules/jenkins/examples/complete/main.tf @@ -41,6 +41,10 @@ module "jenkins" { certificate_arn = aws_acm_certificate.jenkins.arn jenkins_agent_secret_arns = var.jenkins_agent_secret_arns create_ec2_fleet_plugin_policy = true + enable_jenkins_alb_access_logs = false + #checkov:skip=CKV_AWS_150:Disabling to allow for automated destroy during test deploys + enable_jenkins_alb_deletion_protection = false + enable_default_efs_backup_plan = false # Build Farms build_farm_subnets = aws_subnet.private_subnets[*].id diff --git a/modules/jenkins/examples/complete/versions.tf b/modules/jenkins/examples/complete/versions.tf index a49b9edb..03a42aae 100644 --- a/modules/jenkins/examples/complete/versions.tf +++ b/modules/jenkins/examples/complete/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.66.0" + version = "5.70.0" } } } diff --git a/modules/jenkins/examples/complete/vpc.tf b/modules/jenkins/examples/complete/vpc.tf index 532e5edc..d5d1dbb9 100644 --- a/modules/jenkins/examples/complete/vpc.tf +++ b/modules/jenkins/examples/complete/vpc.tf @@ -109,9 +109,9 @@ resource "aws_route_table" "private_rt" { # route to the internet through NAT gateway resource "aws_route" "private_rt_nat_gateway" { - route_table_id = aws_route_table.private_rt.id - destination_cidr_block = "0.0.0.0/0" - nat_gateway_id = aws_nat_gateway.nat_gateway.id + route_table_id = aws_route_table.private_rt.id + destination_cidr_block = "0.0.0.0/0" + nat_gateway_id = aws_nat_gateway.nat_gateway.id } resource "aws_route_table_association" "private_rt_asso" { diff --git a/modules/jenkins/versions.tf b/modules/jenkins/versions.tf index cf1bfeaf..24c5b878 100644 --- a/modules/jenkins/versions.tf +++ b/modules/jenkins/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.70.0" + version = "5.72.1" } random = { source = "hashicorp/random" diff --git a/modules/perforce/helix-authentication-service/versions.tf b/modules/perforce/helix-authentication-service/versions.tf index ae8ccbe4..a3b8ec35 100644 --- a/modules/perforce/helix-authentication-service/versions.tf +++ b/modules/perforce/helix-authentication-service/versions.tf @@ -4,11 +4,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.70.0" + version = "5.72.1" } awscc = { source = "hashicorp/awscc" - version = "1.16.1" + version = "1.17.0" } random = { source = "hashicorp/random" diff --git a/modules/perforce/helix-core/versions.tf b/modules/perforce/helix-core/versions.tf index ae8ccbe4..a3b8ec35 100644 --- a/modules/perforce/helix-core/versions.tf +++ b/modules/perforce/helix-core/versions.tf @@ -4,11 +4,11 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.70.0" + version = "5.72.1" } awscc = { source = "hashicorp/awscc" - version = "1.16.1" + version = "1.17.0" } random = { source = "hashicorp/random" diff --git a/modules/perforce/helix-swarm/versions.tf b/modules/perforce/helix-swarm/versions.tf index cf1bfeaf..24c5b878 100644 --- a/modules/perforce/helix-swarm/versions.tf +++ b/modules/perforce/helix-swarm/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.70.0" + version = "5.72.1" } random = { source = "hashicorp/random" diff --git a/modules/unreal/horde/sg.tf b/modules/unreal/horde/sg.tf index 9792c81f..73c729f9 100644 --- a/modules/unreal/horde/sg.tf +++ b/modules/unreal/horde/sg.tf @@ -220,3 +220,14 @@ resource "aws_vpc_security_group_ingress_rule" "unreal_horde_service_inbound_age to_port = 443 ip_protocol = "tcp" } + +# Horde agents allow inbound access from other agents +resource "aws_vpc_security_group_ingress_rule" "unreal_horde_agents_inbound_agents" { + count = length(var.agents) > 0 ? 1 : 0 + security_group_id = aws_security_group.unreal_horde_agent_sg[0].id + description = "Allow inbound traffic to Horde Agents from other Horde Agents." + referenced_security_group_id = aws_security_group.unreal_horde_agent_sg[0].id + from_port = 7000 + to_port = 7010 + ip_protocol = "tcp" +} diff --git a/samples/simple-build-pipeline/versions.tf b/samples/simple-build-pipeline/versions.tf index 03a42aae..03abd058 100644 --- a/samples/simple-build-pipeline/versions.tf +++ b/samples/simple-build-pipeline/versions.tf @@ -4,7 +4,7 @@ terraform { required_providers { aws = { source = "hashicorp/aws" - version = "5.70.0" + version = "5.72.1" } } }