From 1af2df2646aedd8d1429d499d32d9a4f03aa5984 Mon Sep 17 00:00:00 2001 From: Joris van de Donk <90917667+jorisdon@users.noreply.github.com> Date: Mon, 3 Jun 2024 20:17:53 +0200 Subject: [PATCH] feat: Add packer build agent templates for Linux (Ubuntu Jammy 22.04, Amazon Linux 2023) (#46) --- assets/packer/build-agents/linux/README.md | 85 +++++++ .../aarch64/amazon-linux-2023-arm64.pkr.hcl | 213 ++++++++++++++++++ .../ubuntu-jammy-22.04-arm64-server.pkr.hcl | 211 +++++++++++++++++ .../build-agents/linux/create_swap.service | 12 + .../packer/build-agents/linux/create_swap.sh | 6 + .../build-agents/linux/example.pkrvars.hcl | 5 + .../build-agents/linux/fsx_automounter.py | 61 +++++ .../linux/fsx_automounter.service | 12 + .../linux/install_common.al2023.sh | 37 +++ .../linux/install_common.ubuntu.sh | 35 +++ .../packer/build-agents/linux/install_mold.sh | 5 + .../linux/install_octobuild.al2023.x86_64.sh | 12 + .../linux/install_octobuild.ubuntu.x86_64.sh | 8 + .../build-agents/linux/install_sccache.sh | 7 + .../linux/mount_ephemeral.service | 12 + .../build-agents/linux/mount_ephemeral.sh | 15 ++ .../packer/build-agents/linux/octobuild.conf | 2 + .../packer/build-agents/linux/sccache.service | 15 ++ .../x86_64/amazon-linux-2023-x86_64.pkr.hcl | 212 +++++++++++++++++ .../ubuntu-jammy-22.04-amd64-server.pkr.hcl | 211 +++++++++++++++++ 20 files changed, 1176 insertions(+) create mode 100644 assets/packer/build-agents/linux/README.md create mode 100644 assets/packer/build-agents/linux/aarch64/amazon-linux-2023-arm64.pkr.hcl create mode 100644 assets/packer/build-agents/linux/aarch64/ubuntu-jammy-22.04-arm64-server.pkr.hcl create mode 100644 assets/packer/build-agents/linux/create_swap.service create mode 100644 assets/packer/build-agents/linux/create_swap.sh create mode 100644 assets/packer/build-agents/linux/example.pkrvars.hcl create mode 100644 assets/packer/build-agents/linux/fsx_automounter.py create mode 100644 assets/packer/build-agents/linux/fsx_automounter.service create mode 100644 assets/packer/build-agents/linux/install_common.al2023.sh create mode 100644 assets/packer/build-agents/linux/install_common.ubuntu.sh create mode 100644 assets/packer/build-agents/linux/install_mold.sh create mode 100644 assets/packer/build-agents/linux/install_octobuild.al2023.x86_64.sh create mode 100644 assets/packer/build-agents/linux/install_octobuild.ubuntu.x86_64.sh create mode 100644 assets/packer/build-agents/linux/install_sccache.sh create mode 100644 assets/packer/build-agents/linux/mount_ephemeral.service create mode 100644 assets/packer/build-agents/linux/mount_ephemeral.sh create mode 100644 assets/packer/build-agents/linux/octobuild.conf create mode 100644 assets/packer/build-agents/linux/sccache.service create mode 100644 assets/packer/build-agents/linux/x86_64/amazon-linux-2023-x86_64.pkr.hcl create mode 100644 assets/packer/build-agents/linux/x86_64/ubuntu-jammy-22.04-amd64-server.pkr.hcl diff --git a/assets/packer/build-agents/linux/README.md b/assets/packer/build-agents/linux/README.md new file mode 100644 index 00000000..fbafc995 --- /dev/null +++ b/assets/packer/build-agents/linux/README.md @@ -0,0 +1,85 @@ +# Packer templates for Linux build agants + +This folder contains [Packer](https://www.packer.io/) templates for Linux build agents. You can use these templates as-is, or modify them to suit your needs. + +The following templates are currently supported: +|Operating sytem | CPU architecture | file location | +|---|---|---| +|Ubuntu Jammy 22.04 | x86_64 (a.k.a. amd64) | `x86_64/ubuntu-jammy-22.04-amd64-server.pkr.hcl` | +|Ubuntu Jammy 22.04 | aarch64 (a.k.a. arm64) | `aarch64/ubuntu-jammy-22.04-arm64-server.pkr.hcl` | +|Amazon Linux 2023 | x86_64 (a.k.a. amd64) | `x86_64/amazon-linux-2023-x86_64.pkr.hcl` | +|Amazon Linux 2023 | aarch64 (a.k.a. arm64) | `aarch64/amazon-linux-2023-arm64.pkr.hcl` | + +## Usage + +1. Make a copy of `example.pkrvars.hcl` and adjust the input variables as needed +2. Ensure you have active AWS credentials +3. Invoke `packer build --var-file= `, then wait for the build to complete. + +## Software packages included + +The templates install various software packages: + +### common tools + +Some common tools are installed to enable installing other software, performing maintenance tasks, and compile some C++ software: + +* git +* curl +* jq +* unzip +* dos2unix +* [AWS CLI v2](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-welcome.html) +* [AWS Systems Manager Agent](https://docs.aws.amazon.com/systems-manager/latest/userguide/ssm-agent.html) +* [Amazon Corretto](https://aws.amazon.com/corretto/) +* mount.nfs, to be able to mount FSx volumes over NFS +* python3 +* python3 packages: 'pip', 'requests', 'boto3' and 'botocore' +* clang +* cmake3 +* scons +* Development libraries for compiling the [Amazon GameLift Server SDK for C++](https://aws.amazon.com/gamelift/) +* Development libraries for compiling the Godot 4 game engine (if available in the OS's package manager) + +### mold + +The '[mold](https://github.com/rui314/mold)' linker is installed to enable faster linking. + +### FSx automounter service + +The FSx automounter systemd service is a service written in Python that automatically mounts FSx for OpenZFS volumes on instance bootup. The service uses resource tags on FSx volumes to determine if and where to mount volumes on. + +You can use the following tags on FSx volumes: +* '_automount-fsx-volume-name_' tag: specifies the name of the local mount point. The mount point specified will be prefixed with 'fsx_' by the service. +* '_automount-fsx-volume-on_' tag: This tag contains a space-delimited list of EC2 instance names on which the volume will be automatically mounted by this service (if it is running on that instance). + +For example, if the FSx automounter service is running on an EC2 instance with Name tag 'ubuntu-builder', and an FSx volume has tag `automount-fsx-volume-on`=`al2023-builder ubuntu-builder` and tag `automount-fsx-volume-name`=`workspace`, then the automounter will automatically mount that volume on `/mnt/fsx_workspace`. + +Note that the automounter service makes use of the [ListTagsForResource](https://docs.aws.amazon.com/fsx/latest/APIReference/API_ListTagsForResource.html) FSx API call, which is rate-limited. If you intend to scale up hundreds of EC2 instances that are running this service, then we recommend [automatically mounting FSx volumes using `/etc/fstab`](https://docs.aws.amazon.com/fsx/latest/OpenZFSGuide/attach-linux-client.html). + +### mount_ephemeral service + +The mount_ephemeral service is a systemd service written as a simple bash script that mounts NVMe attached instance storage volume automatically as temporary storage. It does this by formatting `/dev/nvme1n1` as xfs and then mounting it on `/tmp`. This service runs on instance bootup. + +### create_swap service + +The create_swap service is a systemd service written as a simple bash script that creates a 1GB swap file on `/swapfile`. This service runs on instance bootup. + +### sccache + +'[sccache](https://github.com/mozilla/sccache)' is installed to cache c/c++ compilation artefacts, which can speed up builds by avoiding unneeded work. + +sccache is installed as a _systemd service_, and configured to use `/mnt/fsx_cache/sccache` as its cache folder. The service expects this folder to be available or set up by another service. + +### octobuild + +'[Octobuild](https://github.com/octobuild/octobuild)' is installed to act as a compilation cache for Unreal Engine. + +Octobuild is configured (in [octobuild.conf](octobuild.conf)) to use `/mnt/fsx_cache/octobuild_cache` as its cache folder, and expects this folder to be available or set up by another service. + +NOTE: Octobuild is not supported on aarch64, and therefore not installed there. + + +## Processor architectures and naming conventions + +Within this folder, the processor architecture naming conventions as reported by `uname -m` are used, hence why there are scripts here with names containing "x86_64" or "aarch64". The packer template `.hcl` files are named following the naming conventions of the operating system that they are based on. Unfortunately, because some operating systems don't use the same terminology in their naming conventions throughout, this means that you'll see this lack of consistency here has well. \ No newline at end of file diff --git a/assets/packer/build-agents/linux/aarch64/amazon-linux-2023-arm64.pkr.hcl b/assets/packer/build-agents/linux/aarch64/amazon-linux-2023-arm64.pkr.hcl new file mode 100644 index 00000000..c7721345 --- /dev/null +++ b/assets/packer/build-agents/linux/aarch64/amazon-linux-2023-arm64.pkr.hcl @@ -0,0 +1,213 @@ +packer { + required_plugins { + amazon = { + version = ">= 1.2.8" + source = "github.com/hashicorp/amazon" + } + } +} + +variable "region" { + type = string + default = "us-west-2" +} + +variable "profile" { + type = string + default = "DEFAULT" +} + +variable "vpc_id" { + type = string +} + +variable "subnet_id" { + type = string +} + +variable "ami_prefix" { + type = string + default = "jenkins-builder-amazon-linux-2023-arm64" +} + +variable "public_key" { + type = string +} + +locals { + timestamp = regex_replace(timestamp(), "[- TZ:]", "") +} + +source "amazon-ebs" "al2023" { + ami_name = "${var.ami_prefix}-${local.timestamp}" + instance_type = "t4g.small" + region = var.region + profile = var.profile + source_ami_filter { + filters = { + name = "al2023-ami-2023.*-arm64" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + owners = ["amazon"] + } + ssh_username = "ec2-user" + metadata_options { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 1 + instance_metadata_tags = "enabled" + } + imds_support = "v2.0" + + # network specific details + vpc_id = var.vpc_id + subnet_id = var.subnet_id + associate_public_ip_address = true +} + +build { + name = "jenkins-linux-packer" + sources = [ + "source.amazon-ebs.al2023" + ] + + provisioner "file" { + source = "install_common.al2023.sh" + destination = "/tmp/install_common.al2023.sh" + } + provisioner "shell" { + inline = [ <<-EOF +cloud-init status --wait +sudo chmod 755 /tmp/install_common.al2023.sh +/tmp/install_common.al2023.sh +EOF + ] + } + + # add the public key + provisioner "shell" { + inline = [ <<-EOF +echo "${var.public_key}" >> ~/.ssh/authorized_keys +chmod 700 ~/.ssh +chmod 600 ~/.ssh/authorized_keys +EOF + ] + } + + provisioner "file" { + source = "install_mold.sh" + destination = "/tmp/install_mold.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_mold.sh +/tmp/install_mold.sh +EOF + ] + } + + # octobuild currently does not build on Arm64, so skipping... + #provisioner "file" { + # source = "octobuild.conf" + # destination = "/tmp/octobuild.conf" + #} + #provisioner "file" { + # source = "install_octobuild.al2023.arm64.sh" + # destination = "/tmp/install_octobuild.al2023.arm64.sh" + #} + #provisioner "shell" { + # inline = [ <<-EOF +#sudo chmod 755 /tmp/install_octobuild.al2023.arm64.sh +#/tmp/install_octobuild.al2023.arm64.sh +#sudo mkdir -p /etc/octobuild/ +#sudo cp /tmp/octobuild.conf /etc/octobuild/octobuild.conf +#EOF + # ] + #} + + provisioner "file" { + source = "fsx_automounter.py" + destination = "/tmp/fsx_automounter.py" + } + provisioner "file" { + source = "fsx_automounter.service" + destination = "/tmp/fsx_automounter.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/fsx_automounter.py /opt/fsx_automounter.py +sudo dos2unix /opt/fsx_automounter.py +sudo chmod 755 /opt/fsx_automounter.py +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/fsx_automounter.service /etc/systemd/system/fsx_automounter.service +sudo chmod 755 /etc/systemd/system/fsx_automounter.service +sudo systemctl enable fsx_automounter.service +EOF + ] + } + + # set up script to automatically format and mount ephemeral storage + provisioner "file" { + source = "mount_ephemeral.sh" + destination = "/tmp/mount_ephemeral.sh" + } + provisioner "file" { + source = "mount_ephemeral.service" + destination = "/tmp/mount_ephemeral.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/mount_ephemeral.sh /opt/mount_ephemeral.sh +sudo dos2unix /opt/mount_ephemeral.sh +sudo chmod 755 /opt/mount_ephemeral.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/mount_ephemeral.service /etc/systemd/system/mount_ephemeral.service +sudo chmod 755 /etc/systemd/system/mount_ephemeral.service +sudo systemctl enable mount_ephemeral.service +EOF + ] + } + + provisioner "file" { + source = "create_swap.sh" + destination = "/tmp/create_swap.sh" + } + provisioner "file" { + source = "create_swap.service" + destination = "/tmp/create_swap.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/create_swap.sh /opt/create_swap.sh +sudo dos2unix /opt/create_swap.sh +sudo chmod 755 /opt/create_swap.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/create_swap.service /etc/systemd/system/create_swap.service +sudo chmod 755 /etc/systemd/system/create_swap.service +sudo systemctl enable create_swap.service +EOF + ] + } + + provisioner "file" { + source = "sccache.service" + destination = "/tmp/sccache.service" + } + provisioner "file" { + source = "install_sccache.sh" + destination = "/tmp/install_sccache.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_sccache.sh +/tmp/install_sccache.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/sccache.service /etc/systemd/system/sccache.service +sudo chmod 755 /etc/systemd/system/sccache.service +sudo systemctl enable sccache.service +EOF + ] + } +} diff --git a/assets/packer/build-agents/linux/aarch64/ubuntu-jammy-22.04-arm64-server.pkr.hcl b/assets/packer/build-agents/linux/aarch64/ubuntu-jammy-22.04-arm64-server.pkr.hcl new file mode 100644 index 00000000..ef2ea65d --- /dev/null +++ b/assets/packer/build-agents/linux/aarch64/ubuntu-jammy-22.04-arm64-server.pkr.hcl @@ -0,0 +1,211 @@ +packer { + required_plugins { + amazon = { + version = ">= 1.2.8" + source = "github.com/hashicorp/amazon" + } + } +} + +variable "region" { + type = string + default = "us-west-2" +} + +variable "profile" { + type = string + default = "DEFAULT" +} + +variable "vpc_id" { + type = string +} + +variable "subnet_id" { + type = string +} + +variable "ami_prefix" { + type = string + default = "jenkins-builder-ubuntu-jammy-22.04-arm64" +} + +variable "public_key" { + type = string +} + +locals { + timestamp = regex_replace(timestamp(), "[- TZ:]", "") +} + +source "amazon-ebs" "ubuntu" { + ami_name = "${var.ami_prefix}-${local.timestamp}" + instance_type = "t4g.small" + region = var.region + profile = var.profile + source_ami_filter { + filters = { + name = "ubuntu/images/*ubuntu-jammy-22.04-arm64-server-*" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + owners = ["amazon"] + } + ssh_username = "ubuntu" + metadata_options { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 1 + instance_metadata_tags = "enabled" + } + imds_support = "v2.0" + + # network specific details + vpc_id = var.vpc_id + subnet_id = var.subnet_id + associate_public_ip_address = true +} + +build { + name = "jenkins-linux-packer" + sources = [ + "source.amazon-ebs.ubuntu" + ] + + provisioner "file" { + source = "install_common.ubuntu.sh" + destination = "/tmp/install_common.ubuntu.sh" + } + provisioner "shell" { + inline = [ <<-EOF +cloud-init status --wait +sudo chmod 755 /tmp/install_common.ubuntu.sh +/tmp/install_common.ubuntu.sh +EOF + ] + } + + # add the public key + provisioner "shell" { + inline = [ <<-EOF +echo "${var.public_key}" >> ~/.ssh/authorized_keys +chmod 700 ~/.ssh +chmod 600 ~/.ssh/authorized_keys +EOF + ] + } + + provisioner "file" { + source = "install_mold.sh" + destination = "/tmp/install_mold.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_mold.sh +/tmp/install_mold.sh +EOF + ] + } + + + # octobuild does not seem to have binaries for ARM yet... skipping this section for now + #provisioner "file" { + # source = "octobuild.conf" + # destination = "/tmp/octobuild.conf" + #} + #provisioner "shell" { + # inline = [ <<-EOF +#curl -1sLf 'https://dl.cloudsmith.io/public/octobuild/octobuild/setup.deb.sh' | sudo -E bash +#sudo apt-get -o DPkg::Lock::Timeout=180 update -y +#sudo NEEDRESTART_MODE=a DEBIAN_FRONTEND=noninteractive apt-get -o DPkg::Lock::Timeout=180 install -y octobuild +#sudo mkdir -p /etc/octobuild +#sudo cp /tmp/octobuild.conf /etc/octobuild/octobuild.conf +#EOF + # ] + #} + + provisioner "file" { + source = "fsx_automounter.py" + destination = "/tmp/fsx_automounter.py" + } + provisioner "file" { + source = "fsx_automounter.service" + destination = "/tmp/fsx_automounter.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/fsx_automounter.py /opt/fsx_automounter.py +sudo dos2unix /opt/fsx_automounter.py +sudo chmod 755 /opt/fsx_automounter.py +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/fsx_automounter.service /etc/systemd/system/fsx_automounter.service +sudo chmod 755 /etc/systemd/system/fsx_automounter.service +sudo systemctl enable fsx_automounter.service +EOF + ] + } + + # set up script to automatically format and mount ephemeral storage + provisioner "file" { + source = "mount_ephemeral.sh" + destination = "/tmp/mount_ephemeral.sh" + } + provisioner "file" { + source = "mount_ephemeral.service" + destination = "/tmp/mount_ephemeral.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/mount_ephemeral.sh /opt/mount_ephemeral.sh +sudo dos2unix /opt/mount_ephemeral.sh +sudo chmod 755 /opt/mount_ephemeral.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/mount_ephemeral.service /etc/systemd/system/mount_ephemeral.service +sudo chmod 755 /etc/systemd/system/mount_ephemeral.service +sudo systemctl enable mount_ephemeral.service +EOF + ] + } + + provisioner "file" { + source = "create_swap.sh" + destination = "/tmp/create_swap.sh" + } + provisioner "file" { + source = "create_swap.service" + destination = "/tmp/create_swap.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/create_swap.sh /opt/create_swap.sh +sudo dos2unix /opt/create_swap.sh +sudo chmod 755 /opt/create_swap.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/create_swap.service /etc/systemd/system/create_swap.service +sudo chmod 755 /etc/systemd/system/create_swap.service +sudo systemctl enable create_swap.service +EOF + ] + } + + provisioner "file" { + source = "sccache.service" + destination = "/tmp/sccache.service" + } + provisioner "file" { + source = "install_sccache.sh" + destination = "/tmp/install_sccache.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_sccache.sh +/tmp/install_sccache.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/sccache.service /etc/systemd/system/sccache.service +sudo chmod 755 /etc/systemd/system/sccache.service +sudo systemctl enable sccache.service +EOF + ] + } +} diff --git a/assets/packer/build-agents/linux/create_swap.service b/assets/packer/build-agents/linux/create_swap.service new file mode 100644 index 00000000..fdb33a73 --- /dev/null +++ b/assets/packer/build-agents/linux/create_swap.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run create_swap.sh script at boot +Before=multi-user.target + +[Service] +Type=oneshot +ExecStart=/bin/bash /opt/create_swap.sh +User=root +Group=root + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/assets/packer/build-agents/linux/create_swap.sh b/assets/packer/build-agents/linux/create_swap.sh new file mode 100644 index 00000000..369e9760 --- /dev/null +++ b/assets/packer/build-agents/linux/create_swap.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash +fallocate -l 1G -x /swapfile +chmod 600 /swapfile +mkswap /swapfile +swapon /swapfile +echo "/swapfile none swap sw 0 0" | tee -a /etc/fstab \ No newline at end of file diff --git a/assets/packer/build-agents/linux/example.pkrvars.hcl b/assets/packer/build-agents/linux/example.pkrvars.hcl new file mode 100644 index 00000000..9ddeade1 --- /dev/null +++ b/assets/packer/build-agents/linux/example.pkrvars.hcl @@ -0,0 +1,5 @@ +region = "us-west-2" +vpc_id = "PLACEHOLDER" # VPC id to create the AMI in +subnet_id = "PLACEHOLDER" # Subnet to create the AMI in +profile = "DEFAULT" # AWS CLI profile to use +public_key = "ssh-rsa EXAMPLE" # the public key that will be added to ~/.ssh/authorized_keys for the default user. Set this to the public key of a keypair which your build orchestrator has access to. diff --git a/assets/packer/build-agents/linux/fsx_automounter.py b/assets/packer/build-agents/linux/fsx_automounter.py new file mode 100644 index 00000000..b6d443e7 --- /dev/null +++ b/assets/packer/build-agents/linux/fsx_automounter.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +from botocore.utils import IMDSFetcher +import boto3 +import subprocess +import os + +# get this EC2 instance's "name" tag via EC2 instance metadata. NOTE: this won't work if the EC2 instance doesn't have access to tags through metadata +def get_instance_name(): + return IMDSFetcher()._get_request("/latest/meta-data/tags/instance/Name", None, token=IMDSFetcher()._fetch_metadata_token()).text.strip() + +# get this EC2 instance's region via EC2 instance metadata +def get_instance_region(): + return IMDSFetcher()._get_request("/latest/meta-data/placement/region", None, token=IMDSFetcher()._fetch_metadata_token()).text.strip() + +REGION = get_instance_region() + +client = boto3.client('fsx', region_name=REGION) + +# Retrieve the volumes to mount to this EC2 instance, as well as some relevant data to be able to mount them +def get_volumes_with_automount_tags(client): + instance_name = get_instance_name() + volumes = client.describe_volumes()['Volumes'] + returninfo = [] + for volume in volumes: + tags = client.list_tags_for_resource(ResourceARN=volume['ResourceARN'])['Tags'] + mount_on_tag = [t for t in tags if t['Key'] == 'automount-fsx-volume-on'] + mount_name_tag = [t for t in tags if t['Key'] == 'automount-fsx-volume-name'] + if mount_on_tag and mount_name_tag: + if instance_name in mount_on_tag[0]['Value'].split(' '): + if volume['VolumeType'] == 'OPENZFS': + returninfo.append({ + 'Volume': volume, + 'Name': mount_name_tag[0]['Value'], + 'DNS': '%s.fsx.%s.amazonaws.com' % (volume['FileSystemId'], REGION), + 'VolumeType': volume['VolumeType'], + 'VolumePath': volume['OpenZFSConfiguration']['VolumePath'] + }) + else: + print("Currently not supported: volumeType %s" % volume['VolumeType']) + return returninfo + +# Mount FSx volumes to this EC2 instance, based on the "automount-fsx-volume-on" tag on the volume. +def mount_fsx_volumes(client): + volumeInfos = get_volumes_with_automount_tags(client) + for volumeInfo in volumeInfos: + # mount -t nfs -o noatime,nfsvers=4.2,sync,nconnect=16,rsize=1048576,wsize=1048576 $FSX_WORKSPACE_DNS:/fsx/ /mnt/fsx_workspace + if volumeInfo['VolumeType'] == 'OPENZFS': + try: + os.makedirs("/mnt/fsx_%s" % volumeInfo['Name'], exist_ok=True) + runCmd = ["mount", "-t", "nfs", "-o", "noatime,nfsvers=4.2,sync,nconnect=16,rsize=1048576,wsize=1048576", "%s:%s/" % (volumeInfo['DNS'], volumeInfo['VolumePath']), "/mnt/fsx_%s" % volumeInfo['Name']] + procinfo = subprocess.run(runCmd) + # throw an exception if the mount command failed + if procinfo.returncode != 0: + raise Exception("Exit code (%s) of mount process is nonzero when mounting '%s'. Runcmd: %s" % (procinfo.returncode, volumeInfo['Name'], runCmd)) + except Exception as e: + print("Failed to mount volume '%s'" % volumeInfo['Name']) + print(e) + else: + print("Currently not supported: volumeType %s" % volumeInfo['VolumeType']) + +mount_fsx_volumes(client) diff --git a/assets/packer/build-agents/linux/fsx_automounter.service b/assets/packer/build-agents/linux/fsx_automounter.service new file mode 100644 index 00000000..32e7f200 --- /dev/null +++ b/assets/packer/build-agents/linux/fsx_automounter.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run fsx_automounter script at boot +Before=multi-user.target + +[Service] +Type=oneshot +ExecStart=/opt/fsx_automounter.py +User=root +Group=root + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/assets/packer/build-agents/linux/install_common.al2023.sh b/assets/packer/build-agents/linux/install_common.al2023.sh new file mode 100644 index 00000000..98a1eaf7 --- /dev/null +++ b/assets/packer/build-agents/linux/install_common.al2023.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# Install common tools on Amazon Linux 2023, architecture-independent. +# These common tools are necessary for Jenkins Agents, and to build/install various other software. +# Core common tools: +# git +# curl +# jq +# unzip +# AWS CLI +# AWS Systems Manager Agent +# Amazon Corretto +# mount.nfs (already installed on Amazon Linux) +# python3 +# python3-pip +# python3-requests +# boto3 +# botocore +# dos2unix +# clang +# scons +# cmake3 + +cloud-init status --wait +echo "Updating packages..." +sudo yum update -y +echo "Installing packages..." +sudo yum -y groupinstall "Development Tools" +sudo yum install -y awscli java-11-amazon-corretto-headless java-11-amazon-corretto-devel libarchive libarchive-devel unzip cmake python3 python3-pip python3-requests clang lld git openssl libcurl-devel openssl-devel uuid-devel zlib-devel pulseaudio-libs-devel jq freetype-devel libsndfile-devel python3 jq libX11-devel libXcursor-devel libXinerama-devel mesa-libGL-devel mesa-libGLU-devel libudev-devel libXi-devel libXrandr-devel dos2unix +sudo pip install boto3 botocore scons +if [ "$(uname -p)" == "x86_64" ]; then + sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm +fi +if [ "$(uname -p)" == "aarch64" ]; then + sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_arm64/amazon-ssm-agent.rpm +fi +sudo systemctl enable amazon-ssm-agent +sudo systemctl start amazon-ssm-agent diff --git a/assets/packer/build-agents/linux/install_common.ubuntu.sh b/assets/packer/build-agents/linux/install_common.ubuntu.sh new file mode 100644 index 00000000..9785e0bb --- /dev/null +++ b/assets/packer/build-agents/linux/install_common.ubuntu.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# Install common tools on Ubuntu, architecture-independent. +# These common tools are necessary for Jenkins Agents, and to build/install various other software. +# Core common tools: +# git +# curl +# jq +# unzip +# AWS CLI +# AWS Systems Manager Agent +# Amazon Corretto +# mount.nfs +# python3 +# python3-pip +# python3-requests +# python3-botocore +# boto3 +# dos2unix +# clang +# scons +# cmake3 + +cloud-init status --wait +wget -O - https://apt.corretto.aws/corretto.key | sudo gpg --dearmor -o /usr/share/keyrings/corretto-keyring.gpg && \ +echo "deb [signed-by=/usr/share/keyrings/corretto-keyring.gpg] https://apt.corretto.aws stable main" | sudo tee /etc/apt/sources.list.d/corretto.list +echo "Updating apt.." +sudo apt-get -o DPkg::Lock::Timeout=180 update -y +echo "Installing packages..." +sudo apt-get install -y nfs-common libarchive-tools unzip cmake build-essential python3 python3-pip python3-requests python3-botocore clang lld git openssl libcurl4-openssl-dev libssl-dev uuid-dev zlib1g-dev libpulse-dev scons jq libsdl2-mixer-dev libsdl2-image-dev libsdl2-dev libfreetype-dev libsndfile1-dev libopenal-dev python3 jq libx11-dev libxcursor-dev libxinerama-dev libgl1-mesa-dev libglu-dev libasound2-dev libudev-dev libxi-dev libxrandr-dev java-11-amazon-corretto-jdk dos2unix +sudo pip install boto3 +echo "Installing AWS cli..." +curl "https://awscli.amazonaws.com/awscli-exe-linux-$(uname -m).zip" -o "awscliv2.zip" +unzip awscliv2.zip +sudo ./aws/install +sudo snap install amazon-ssm-agent --classic \ No newline at end of file diff --git a/assets/packer/build-agents/linux/install_mold.sh b/assets/packer/build-agents/linux/install_mold.sh new file mode 100644 index 00000000..61f3830a --- /dev/null +++ b/assets/packer/build-agents/linux/install_mold.sh @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# Install the mold linker on Linux (any OS), architecture-independent +# Requires common tools to be installed first. +echo "Installing mold..." +curl -s -L https://github.com/rui314/mold/releases/download/v2.31.0/mold-2.31.0-$(uname -m)-linux.tar.gz | sudo tar -xvzf - --strip-components=1 -C /usr diff --git a/assets/packer/build-agents/linux/install_octobuild.al2023.x86_64.sh b/assets/packer/build-agents/linux/install_octobuild.al2023.x86_64.sh new file mode 100644 index 00000000..a6269c20 --- /dev/null +++ b/assets/packer/build-agents/linux/install_octobuild.al2023.x86_64.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash +# Compile and octobuild on Amazon Linux 2023, x86_64 only +# (not tested on aarch64 at the moment) +# Requires common tools to be installed first. +# Will install Rust and cargo as well +sudo yum update -y +echo "Installing Rust..." +curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y +. "$HOME/.cargo/env" +cd $(mktemp -d) +git clone https://github.com/octobuild/octobuild.git . +cargo install --path . \ No newline at end of file diff --git a/assets/packer/build-agents/linux/install_octobuild.ubuntu.x86_64.sh b/assets/packer/build-agents/linux/install_octobuild.ubuntu.x86_64.sh new file mode 100644 index 00000000..eb806ccc --- /dev/null +++ b/assets/packer/build-agents/linux/install_octobuild.ubuntu.x86_64.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +# Install octobuild on Ubuntu, x86_64 only +# (octobuild does not seem to have packages available for aarch64 at the moment) +# Requires common tools to be installed first. +curl -1sLf 'https://dl.cloudsmith.io/public/octobuild/octobuild/setup.deb.sh' | sudo -E bash +sudo apt-get -o DPkg::Lock::Timeout=180 update -y +sudo NEEDRESTART_MODE=a DEBIAN_FRONTEND=noninteractive apt-get -o DPkg::Lock::Timeout=180 install -y octobuild +sudo mkdir -p /etc/octobuild diff --git a/assets/packer/build-agents/linux/install_sccache.sh b/assets/packer/build-agents/linux/install_sccache.sh new file mode 100644 index 00000000..82028139 --- /dev/null +++ b/assets/packer/build-agents/linux/install_sccache.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# Install sccache on Linux (any OS), architecture-independent +# Requires common tools to be installed first. +# This script does not set up a service or anything to automatically start it! +cd $(mktemp -d) +curl -s -L "https://github.com/mozilla/sccache/releases/download/v0.5.3/sccache-v0.5.3-$(uname -m)-unknown-linux-musl.tar.gz" | tar xvzf - +sudo cp sccache*/sccache /usr/bin/ diff --git a/assets/packer/build-agents/linux/mount_ephemeral.service b/assets/packer/build-agents/linux/mount_ephemeral.service new file mode 100644 index 00000000..f0abf1bf --- /dev/null +++ b/assets/packer/build-agents/linux/mount_ephemeral.service @@ -0,0 +1,12 @@ +[Unit] +Description=Run mount_ephemeral.sh script at boot +Before=multi-user.target + +[Service] +Type=oneshot +ExecStart=/bin/bash /opt/mount_ephemeral.sh +User=root +Group=root + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/assets/packer/build-agents/linux/mount_ephemeral.sh b/assets/packer/build-agents/linux/mount_ephemeral.sh new file mode 100644 index 00000000..5d2ffef0 --- /dev/null +++ b/assets/packer/build-agents/linux/mount_ephemeral.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +if lsblk /dev/nvme1n1 ; then + echo "/dev/nvme1n1 exists" + if [ $(lsblk --json -fs /dev/nvme1n1 | jq -r ".blockdevices[0].fstype") != "xfs" ] ; then + echo "/dev/nvme1n1 is NOT xfs - formatting..." + mkfs -t xfs /dev/nvme1n1 + fi + + if [ $(lsblk --json /dev/nvme1n1 | jq -r "[.blockdevices[0].mountpoints[] | select(. != null)] | length") -eq "0" ] ; then + echo "/dev/nvme1n1 is not mounted - mounting..." + mount /dev/nvme1n1 /tmp + chmod 777 /tmp + fi +fi diff --git a/assets/packer/build-agents/linux/octobuild.conf b/assets/packer/build-agents/linux/octobuild.conf new file mode 100644 index 00000000..9379abe2 --- /dev/null +++ b/assets/packer/build-agents/linux/octobuild.conf @@ -0,0 +1,2 @@ +cache: /mnt/fsx_cache/octobuild_cache +cache_limit_mb: 128000 \ No newline at end of file diff --git a/assets/packer/build-agents/linux/sccache.service b/assets/packer/build-agents/linux/sccache.service new file mode 100644 index 00000000..92e2f62c --- /dev/null +++ b/assets/packer/build-agents/linux/sccache.service @@ -0,0 +1,15 @@ +[Unit] +Description=sccache server +Wants=network-online.target +After=network-online.target + +[Service] +Environment=SCCACHE_IDLE_TIMEOUT=0 +Environment=SCCACHE_NO_DAEMON=1 +Environment=SCCACHE_START_SERVER=1 +Environment=SCCACHE_LOG=debug +Environment=SCCACHE_DIR=/mnt/fsx_cache/sccache +ExecStart=/usr/bin/sccache + +[Install] +WantedBy=multi-user.target \ No newline at end of file diff --git a/assets/packer/build-agents/linux/x86_64/amazon-linux-2023-x86_64.pkr.hcl b/assets/packer/build-agents/linux/x86_64/amazon-linux-2023-x86_64.pkr.hcl new file mode 100644 index 00000000..2abcdb6c --- /dev/null +++ b/assets/packer/build-agents/linux/x86_64/amazon-linux-2023-x86_64.pkr.hcl @@ -0,0 +1,212 @@ +packer { + required_plugins { + amazon = { + version = ">= 1.2.8" + source = "github.com/hashicorp/amazon" + } + } +} + +variable "region" { + type = string + default = "us-west-2" +} + +variable "profile" { + type = string + default = "DEFAULT" +} + +variable "vpc_id" { + type = string +} + +variable "subnet_id" { + type = string +} + +variable "ami_prefix" { + type = string + default = "jenkins-builder-amazon-linux-2023-x86_64" +} + +variable "public_key" { + type = string +} + +locals { + timestamp = regex_replace(timestamp(), "[- TZ:]", "") +} + +source "amazon-ebs" "al2023" { + ami_name = "${var.ami_prefix}-${local.timestamp}" + instance_type = "c5.2xlarge" # we need to build octobuild from source as there are no packages available for AL2023, and this requires a bit beefier of an instance + region = var.region + profile = var.profile + source_ami_filter { + filters = { + name = "al2023-ami-2023.*-x86_64" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + owners = ["amazon"] + } + ssh_username = "ec2-user" + metadata_options { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 1 + instance_metadata_tags = "enabled" + } + imds_support = "v2.0" + + # network specific details + vpc_id = var.vpc_id + subnet_id = var.subnet_id + associate_public_ip_address = true +} + +build { + name = "jenkins-linux-packer" + sources = [ + "source.amazon-ebs.al2023" + ] + + provisioner "file" { + source = "install_common.al2023.sh" + destination = "/tmp/install_common.al2023.sh" + } + provisioner "shell" { + inline = [ <<-EOF +cloud-init status --wait +sudo chmod 755 /tmp/install_common.al2023.sh +/tmp/install_common.al2023.sh +EOF + ] + } + + # add the public key + provisioner "shell" { + inline = [ <<-EOF +echo "${var.public_key}" >> ~/.ssh/authorized_keys +chmod 700 ~/.ssh +chmod 600 ~/.ssh/authorized_keys +EOF + ] + } + + provisioner "file" { + source = "install_mold.sh" + destination = "/tmp/install_mold.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_mold.sh +/tmp/install_mold.sh +EOF + ] + } + + provisioner "file" { + source = "octobuild.conf" + destination = "/tmp/octobuild.conf" + } + provisioner "file" { + source = "install_octobuild.al2023.x86_64.sh" + destination = "/tmp/install_octobuild.al2023.x86_64.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_octobuild.al2023.x86_64.sh +/tmp/install_octobuild.al2023.x86_64.sh +sudo mkdir -p /etc/octobuild/ +sudo cp /tmp/octobuild.conf /etc/octobuild/octobuild.conf +EOF + ] + } + + provisioner "file" { + source = "fsx_automounter.py" + destination = "/tmp/fsx_automounter.py" + } + provisioner "file" { + source = "fsx_automounter.service" + destination = "/tmp/fsx_automounter.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/fsx_automounter.py /opt/fsx_automounter.py +sudo dos2unix /opt/fsx_automounter.py +sudo chmod 755 /opt/fsx_automounter.py +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/fsx_automounter.service /etc/systemd/system/fsx_automounter.service +sudo chmod 755 /etc/systemd/system/fsx_automounter.service +sudo systemctl enable fsx_automounter.service +EOF + ] + } + + # set up script to automatically format and mount ephemeral storage + provisioner "file" { + source = "mount_ephemeral.sh" + destination = "/tmp/mount_ephemeral.sh" + } + provisioner "file" { + source = "mount_ephemeral.service" + destination = "/tmp/mount_ephemeral.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/mount_ephemeral.sh /opt/mount_ephemeral.sh +sudo dos2unix /opt/mount_ephemeral.sh +sudo chmod 755 /opt/mount_ephemeral.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/mount_ephemeral.service /etc/systemd/system/mount_ephemeral.service +sudo chmod 755 /etc/systemd/system/mount_ephemeral.service +sudo systemctl enable mount_ephemeral.service +EOF + ] + } + + provisioner "file" { + source = "create_swap.sh" + destination = "/tmp/create_swap.sh" + } + provisioner "file" { + source = "create_swap.service" + destination = "/tmp/create_swap.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/create_swap.sh /opt/create_swap.sh +sudo dos2unix /opt/create_swap.sh +sudo chmod 755 /opt/create_swap.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/create_swap.service /etc/systemd/system/create_swap.service +sudo chmod 755 /etc/systemd/system/create_swap.service +sudo systemctl enable create_swap.service +EOF + ] + } + + provisioner "file" { + source = "sccache.service" + destination = "/tmp/sccache.service" + } + provisioner "file" { + source = "install_sccache.sh" + destination = "/tmp/install_sccache.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_sccache.sh +/tmp/install_sccache.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/sccache.service /etc/systemd/system/sccache.service +sudo chmod 755 /etc/systemd/system/sccache.service +sudo systemctl enable sccache.service +EOF + ] + } +} diff --git a/assets/packer/build-agents/linux/x86_64/ubuntu-jammy-22.04-amd64-server.pkr.hcl b/assets/packer/build-agents/linux/x86_64/ubuntu-jammy-22.04-amd64-server.pkr.hcl new file mode 100644 index 00000000..1920d2e6 --- /dev/null +++ b/assets/packer/build-agents/linux/x86_64/ubuntu-jammy-22.04-amd64-server.pkr.hcl @@ -0,0 +1,211 @@ +packer { + required_plugins { + amazon = { + version = ">= 1.2.8" + source = "github.com/hashicorp/amazon" + } + } +} + +variable "region" { + type = string + default = "us-west-2" +} + +variable "profile" { + type = string + default = "DEFAULT" +} + +variable "vpc_id" { + type = string +} + +variable "subnet_id" { + type = string +} + +variable "ami_prefix" { + type = string + default = "jenkins-builder-ubuntu-jammy-22.04-amd64" +} + +variable "public_key" { + type = string +} + +locals { + timestamp = regex_replace(timestamp(), "[- TZ:]", "") +} + +source "amazon-ebs" "ubuntu" { + ami_name = "${var.ami_prefix}-${local.timestamp}" + instance_type = "t3.small" + region = var.region + profile = var.profile + source_ami_filter { + filters = { + name = "ubuntu/images/*ubuntu-jammy-22.04-amd64-server-*" + root-device-type = "ebs" + virtualization-type = "hvm" + } + most_recent = true + owners = ["amazon"] + } + ssh_username = "ubuntu" + metadata_options { + http_endpoint = "enabled" + http_tokens = "required" + http_put_response_hop_limit = 1 + instance_metadata_tags = "enabled" + } + imds_support = "v2.0" + + # network specific details + vpc_id = var.vpc_id + subnet_id = var.subnet_id + associate_public_ip_address = true +} + +build { + name = "jenkins-linux-packer" + sources = [ + "source.amazon-ebs.ubuntu" + ] + + provisioner "file" { + source = "install_common.ubuntu.sh" + destination = "/tmp/install_common.ubuntu.sh" + } + provisioner "shell" { + inline = [ <<-EOF +cloud-init status --wait +sudo chmod 755 /tmp/install_common.ubuntu.sh +/tmp/install_common.ubuntu.sh +EOF + ] + } + + # add the public key + provisioner "shell" { + inline = [ <<-EOF +echo "${var.public_key}" >> ~/.ssh/authorized_keys +chmod 700 ~/.ssh +chmod 600 ~/.ssh/authorized_keys +EOF + ] + } + + provisioner "file" { + source = "install_mold.sh" + destination = "/tmp/install_mold.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_mold.sh +/tmp/install_mold.sh +EOF + ] + } + + provisioner "file" { + source = "octobuild.conf" + destination = "/tmp/octobuild.conf" + } + provisioner "file" { + source = "install_octobuild.ubuntu.x86_64.sh" + destination = "/tmp/install_octobuild.ubuntu.x86_64.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_octobuild.ubuntu.x86_64.sh +/tmp/install_octobuild.ubuntu.x86_64.sh +sudo cp /tmp/octobuild.conf /etc/octobuild/octobuild.conf +EOF + ] + } + + provisioner "file" { + source = "fsx_automounter.py" + destination = "/tmp/fsx_automounter.py" + } + provisioner "file" { + source = "fsx_automounter.service" + destination = "/tmp/fsx_automounter.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/fsx_automounter.py /opt/fsx_automounter.py +sudo dos2unix /opt/fsx_automounter.py +sudo chmod 755 /opt/fsx_automounter.py +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/fsx_automounter.service /etc/systemd/system/fsx_automounter.service +sudo chmod 755 /etc/systemd/system/fsx_automounter.service +sudo systemctl enable fsx_automounter.service +EOF + ] + } + + # set up script to automatically format and mount ephemeral storage + provisioner "file" { + source = "mount_ephemeral.sh" + destination = "/tmp/mount_ephemeral.sh" + } + provisioner "file" { + source = "mount_ephemeral.service" + destination = "/tmp/mount_ephemeral.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/mount_ephemeral.sh /opt/mount_ephemeral.sh +sudo dos2unix /opt/mount_ephemeral.sh +sudo chmod 755 /opt/mount_ephemeral.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/mount_ephemeral.service /etc/systemd/system/mount_ephemeral.service +sudo chmod 755 /etc/systemd/system/mount_ephemeral.service +sudo systemctl enable mount_ephemeral.service +EOF + ] + } + + provisioner "file" { + source = "create_swap.sh" + destination = "/tmp/create_swap.sh" + } + provisioner "file" { + source = "create_swap.service" + destination = "/tmp/create_swap.service" + } + provisioner "shell" { + inline = [ <<-EOF +sudo cp /tmp/create_swap.sh /opt/create_swap.sh +sudo dos2unix /opt/create_swap.sh +sudo chmod 755 /opt/create_swap.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/create_swap.service /etc/systemd/system/create_swap.service +sudo chmod 755 /etc/systemd/system/create_swap.service +sudo systemctl enable create_swap.service +EOF + ] + } + + provisioner "file" { + source = "sccache.service" + destination = "/tmp/sccache.service" + } + provisioner "file" { + source = "install_sccache.sh" + destination = "/tmp/install_sccache.sh" + } + provisioner "shell" { + inline = [ <<-EOF +sudo chmod 755 /tmp/install_sccache.sh +/tmp/install_sccache.sh +sudo mkdir -p /etc/systemd/system/ +sudo cp /tmp/sccache.service /etc/systemd/system/sccache.service +sudo chmod 755 /etc/systemd/system/sccache.service +sudo systemctl enable sccache.service +EOF + ] + } +}