diff --git a/.packit.yaml b/.packit.yaml index e25124bdc..56894d60b 100644 --- a/.packit.yaml +++ b/.packit.yaml @@ -68,23 +68,3 @@ jobs: tmt_plan: /integration-build skip_build: true identifier: integration-test - - # To avoid https://issues.redhat.com/browse/TFT-2691 - # x86_64 tf runner is not enough now - - job: tests - trigger: pull_request - targets: - - centos-stream-9-x86_64 - - centos-stream-10-aarch64 - - fedora-41-x86_64 - tmt_plan: /to-existing-root - identifier: e2e-test-to-existing-root - - - job: tests - trigger: pull_request - targets: - - centos-stream-9-aarch64 - - centos-stream-10-aarch64 - - fedora-41-aarch64 - tmt_plan: /to-disk - identifier: e2e-test-to-disk diff --git a/plans/e2e.fmf b/plans/e2e.fmf deleted file mode 100644 index 563dccf58..000000000 --- a/plans/e2e.fmf +++ /dev/null @@ -1,52 +0,0 @@ -discover: - how: fmf - test: e2e -adjust: - - when: arch == x86_64 or arch == aarch64 - provision: - hardware: - cpu: - processors: ">= 2" - memory: ">= 6 GB" - virtualization: - is-supported: true -prepare: - - how: install - package: - - ansible-core - - firewalld - - podman - - skopeo - - jq - - openssl - - qemu-img - - qemu-kvm - - libvirt - - virt-install - - xorriso - - how: shell - script: ansible-galaxy collection install https://ansible-collection.s3.amazonaws.com/ansible-posix-1.5.4.tar.gz https://ansible-collection.s3.amazonaws.com/community-general-8.5.0.tar.gz -execute: - how: tmt - -/to-existing-root: - summary: Run bootc install to-existing-root and bootc switch test locally (nested) - environment+: - TEST_CASE: to-existing-root - discover+: - test: - - /to-existing-root - adjust+: - - when: arch == ppc64le - enabled: false - -/to-disk: - summary: Run bootc install to-disk and bootc upgrade test locally (nested) - environment+: - TEST_CASE: to-disk - discover+: - test: - - /to-disk - adjust+: - - when: arch == ppc64le - enabled: false diff --git a/tests/e2e/README.md b/tests/e2e/README.md deleted file mode 100644 index 54d2815ec..000000000 --- a/tests/e2e/README.md +++ /dev/null @@ -1,11 +0,0 @@ -## End to end (e2e) Test - -### Scenarios - -End to end (e2e) test includes `bootc install to-existing-root`, `bootc install to-disk`, `bootc upgrade`, and `bootc switch` tests - -* bootc install/upgrade/switch scenario will install, upgrade, and switch bootc image and have some system checking, such as check mount point/permission, run podman with root and rootless, check persistent log, etc. - -### Run end to end Test - -Test run is drived by [Packit](https://packit.dev/) and running on [Testing-farm](https://docs.testing-farm.io/). diff --git a/tests/e2e/bootc-install.sh b/tests/e2e/bootc-install.sh deleted file mode 100755 index 52b186d9b..000000000 --- a/tests/e2e/bootc-install.sh +++ /dev/null @@ -1,352 +0,0 @@ -#!/bin/bash -set -exuo pipefail - -source ./shared_lib.sh -dump_runner -deploy_libvirt_network - -ARCH=$(uname -m) - -TEMPDIR=$(mktemp -d) -trap 'rm -rf -- "$TEMPDIR"' EXIT - -# SSH configurations -SSH_KEY=${TEMPDIR}/id_rsa -ssh-keygen -f "${SSH_KEY}" -N "" -q -t rsa-sha2-256 -b 2048 -SSH_KEY_PUB="${SSH_KEY}.pub" - -INSTALL_CONTAINERFILE=${TEMPDIR}/Containerfile.install -UPGRADE_CONTAINERFILE=${TEMPDIR}/Containerfile.upgrade -QUAY_REPO_TAG="${QUAY_REPO_TAG:-$(tr -dc a-z0-9 < /dev/urandom | head -c 4 ; echo '')}" -INVENTORY_FILE="${TEMPDIR}/inventory" -# Local registry IP and port -REGISTRY_IP="192.168.100.1" -REGISTRY_PORT=5000 - -# VM firmware -if [[ "$ARCH" == "x86_64" ]]; then - FIRMWARE_LIST=( \ - "bios" \ - "uefi" \ - ) - RND_LINE=$((RANDOM % 2)) - FIRMWARE="${FIRMWARE_LIST[$RND_LINE]}" -else - FIRMWARE="uefi" -fi - -# Get OS data. -source /etc/os-release - -case ""${ID}-${VERSION_ID}"" in - "centos-9") - TEST_OS="centos-stream-9" - TIER1_IMAGE_URL="quay.io/centos-bootc/centos-bootc:stream9" - SSH_USER="cloud-user" - REDHAT_VERSION_ID="9" - BOOT_ARGS="uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no" - ;; - "centos-10") - TEST_OS="centos-stream-10" - TIER1_IMAGE_URL="quay.io/centos-bootc/centos-bootc:stream10" - SSH_USER="cloud-user" - REDHAT_VERSION_ID="10" - BOOT_ARGS="uefi,firmware.feature0.name=secure-boot,firmware.feature0.enabled=no" - # workaround CS10 libvirt selinux policy issue https://issues.redhat.com/browse/RHEL-46893 - sudo setenforce 0 - ;; - "fedora-"*) - TEST_OS="fedora-${VERSION_ID}" - TIER1_IMAGE_URL="quay.io/fedora/fedora-bootc:${VERSION_ID}" - REDHAT_VERSION_ID="${VERSION_ID}" - SSH_USER="fedora" - BOOT_ARGS="uefi" - ;; - *) - redprint "Variable TEST_OS has to be defined" - exit 1 - ;; -esac - - -# FIXME: https://github.com/containers/podman/issues/22813 -if [[ "$REDHAT_VERSION_ID" == "10" ]]; then - sed -i 's/^compression_format = .*/compression_format = "gzip"/' /usr/share/containers/containers.conf -fi - -# Setup local registry -greenprint "Generate certificate" -openssl req \ - -newkey rsa:4096 \ - -nodes \ - -sha256 \ - -keyout "${TEMPDIR}/domain.key" \ - -addext "subjectAltName = IP:${REGISTRY_IP}" \ - -x509 \ - -days 365 \ - -out "${TEMPDIR}/domain.crt" \ - -subj "/C=US/ST=Denial/L=Stockholm/O=bootc/OU=bootc-test/CN=bootc-test/emailAddress=bootc-test@bootc-test.org" - -greenprint "Update CA Trust" -sudo cp "${TEMPDIR}/domain.crt" "/etc/pki/ca-trust/source/anchors/${REGISTRY_IP}.crt" -sudo update-ca-trust - -greenprint "Deploy local registry" -sudo podman run \ - -d \ - --name registry \ - --replace \ - --network host \ - -v "${TEMPDIR}":/certs:z \ - -e REGISTRY_HTTP_ADDR="${REGISTRY_IP}:${REGISTRY_PORT}" \ - -e REGISTRY_HTTP_TLS_CERTIFICATE=/certs/domain.crt \ - -e REGISTRY_HTTP_TLS_KEY=/certs/domain.key \ - quay.io/bootc-test/registry:2.8.3 -sudo podman ps -a - -# Test image URL -TEST_IMAGE_NAME="bootc-workflow-test" -TEST_IMAGE_URL="${REGISTRY_IP}:${REGISTRY_PORT}/${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" - -# Debug PACKIT_COPR_PROJECT and PACKIT_COPR_RPMS -echo "$PACKIT_COPR_PROJECT and $PACKIT_COPR_RPMS" - -# Generate bootc copr repo file -if [[ "$VERSION_ID" == 41 ]]; then - REPLACE_TEST_OS="${ID}-rawhide" -else - REPLACE_TEST_OS="$TEST_OS" -fi -sed "s|REPLACE_COPR_PROJECT|${PACKIT_COPR_PROJECT}|; s|REPLACE_TEST_OS|${REPLACE_TEST_OS}|" files/bootc.repo.template | tee "${TEMPDIR}"/bootc.repo > /dev/null - -# Configure continerfile -greenprint "Create $TEST_OS installation Containerfile" -tee "$INSTALL_CONTAINERFILE" > /dev/null << REALEOF -FROM "$TIER1_IMAGE_URL" -COPY bootc.repo /etc/yum.repos.d/ -COPY domain.crt /etc/pki/ca-trust/source/anchors/ -RUN dnf -y update bootc && \ - update-ca-trust -RUN cat <> /usr/lib/bootc/install/00-mitigations.toml -[install.filesystem.root] -type = "xfs" -[install] -kargs = ["mitigations=on", "nosmt"] -EOF -RUN mkdir -p /usr/lib/bootc/kargs.d -RUN cat <> /usr/lib/bootc/kargs.d/01-console.toml -kargs = ["console=ttyS0","panic=0"] -EOF -REALEOF - -case "$TEST_CASE" in - "to-existing-root") - SSH_USER="root" - SSH_KEY_PUB_CONTENT=$(cat "${SSH_KEY_PUB}") - mkdir -p "${TEMPDIR}/usr/share/containers/systemd" - cp files/caddy.container files/node_exporter.container "${TEMPDIR}/usr/share/containers/systemd" - tee -a "$INSTALL_CONTAINERFILE" > /dev/null << EOF -COPY usr/ usr/ -RUN mkdir -p /usr/etc-system/ && \ - echo 'AuthorizedKeysFile /usr/etc-system/%u.keys' >> /etc/ssh/sshd_config.d/30-auth-system.conf && \ - echo "$SSH_KEY_PUB_CONTENT" > /usr/etc-system/root.keys && \ - chmod 0600 /usr/etc-system/root.keys && \ - dnf -y install qemu-guest-agent && \ - dnf clean all && \ - systemctl enable qemu-guest-agent && \ - ln -s /usr/share/containers/systemd/caddy.container /usr/lib/bootc/bound-images.d/caddy.container && \ - ln -s /usr/share/containers/systemd/node_exporter.container /usr/lib/bootc/bound-images.d/node_exporter.container -EOF - # logical bound image - LBI="enabled" - ;; - "to-disk") - tee -a "$INSTALL_CONTAINERFILE" > /dev/null << EOF -RUN dnf -y install python3 cloud-init && \ - dnf -y clean all -EOF - # LBI is disabled in to-disk test - LBI="disabled" - ;; -esac - -greenprint "Check $TEST_OS installation Containerfile" -cat "$INSTALL_CONTAINERFILE" - -# Build test bootc image and push to local registry -greenprint "Build $TEST_OS installation container image" -sudo podman build --tls-verify=false -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$INSTALL_CONTAINERFILE" "$TEMPDIR" - -greenprint "Push $TEST_OS installation container image" -sudo podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL" - -# Prepare Ansible inventory file and ansible.cfg -greenprint "Prepare inventory file" -tee -a "$INVENTORY_FILE" > /dev/null << EOF -[cloud] -localhost - -[guest] - -[cloud:vars] -ansible_connection=local - -[guest:vars] -ansible_user="$SSH_USER" -ansible_private_key_file="$SSH_KEY" -ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" - -[all:vars] -ansible_python_interpreter=/usr/bin/python3 -EOF - -greenprint "Prepare ansible.cfg" -export ANSIBLE_CONFIG="playbooks/ansible.cfg" - -# Run bootc install to-disk test -case "$TEST_CASE" in - "to-existing-root") - DOWNLOAD_IMAGE="true" - AIR_GAPPED_DIR="$TEMPDIR"/virtiofs - mkdir "$AIR_GAPPED_DIR" - ;; - "to-disk") - DOWNLOAD_IMAGE="false" - AIR_GAPPED_DIR="" - greenprint "Configure rootfs randomly" - ROOTFS_LIST=( \ - "ext4" \ - "xfs" \ - ) - RND_LINE=$((RANDOM % 2)) - ROOTFS="${ROOTFS_LIST[$RND_LINE]}" - - if [[ "$TEST_OS" == "fedora"* ]]; then - ROOTFS="btrfs" - fi - - greenprint "💾 Create disk.raw" - sudo truncate -s 10G disk.raw - - greenprint "bootc install to disk.raw" - sudo podman run \ - --rm \ - --privileged \ - --pid=host \ - --security-opt label=type:unconfined_t \ - -v .:/output \ - "$TEST_IMAGE_URL" \ - bootc install to-disk --filesystem "$ROOTFS" --generic-image --via-loopback /output/disk.raw - - sudo qemu-img convert -f raw ./disk.raw -O qcow2 "/var/lib/libvirt/images/disk.qcow2" - rm -f disk.raw - ;; -esac - -# Start disk.qcow for to-disk test -# Start a new VM for to-existing-root test -greenprint "Deploy VM" -ansible-playbook -v \ - -i "$INVENTORY_FILE" \ - -e test_os="$TEST_OS" \ - -e ssh_user="$SSH_USER" \ - -e ssh_key_pub="$SSH_KEY_PUB" \ - -e inventory_file="$INVENTORY_FILE" \ - -e download_image="$DOWNLOAD_IMAGE" \ - -e air_gapped_dir="$AIR_GAPPED_DIR" \ - -e firmware="$FIRMWARE" \ - -e boot_args="$BOOT_ARGS" \ - playbooks/deploy-libvirt.yaml - -# Run bootc install to-existing-root test -if [[ "$TEST_CASE" == "to-existing-root" ]]; then - greenprint "Install $TEST_OS bootc system" - ansible-playbook -v \ - -i "$INVENTORY_FILE" \ - -e test_os="$TEST_OS" \ - -e test_image_url="$TEST_IMAGE_URL" \ - -e test_case="$TEST_CASE" \ - playbooks/install.yaml -fi - -# Check bootc system -greenprint "Run ostree checking test on VM" -ansible-playbook -v \ - -i "$INVENTORY_FILE" \ - -e test_os="$TEST_OS" \ - -e bootc_image="$TEST_IMAGE_URL" \ - -e image_label_version_id="$REDHAT_VERSION_ID" \ - -e kargs="mitigations=on,nosmt,console=ttyS0,panic=0" \ - -e lbi="$LBI" \ - playbooks/check-system.yaml - -# Prepare upgrade containerfile -greenprint "Create upgrade Containerfile" -tee "$UPGRADE_CONTAINERFILE" > /dev/null << REALEOF -FROM "$TEST_IMAGE_URL" -RUN dnf -y install wget && \ - dnf -y clean all -RUN rm /usr/lib/bootc/kargs.d/01-console.toml -RUN cat <> /usr/lib/bootc/kargs.d/01-console.toml -kargs = ["systemd.unified_cgroup_hierarchy=1","console=ttyS","panic=0"] -EOF -REALEOF - -# Build upgrade container image and push to locay registry -greenprint "Build $TEST_OS upgrade container image" -sudo podman build --tls-verify=false -t "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" -f "$UPGRADE_CONTAINERFILE" . - -greenprint "Push $TEST_OS upgrade container image" -sudo podman push --tls-verify=false --quiet "${TEST_IMAGE_NAME}:${QUAY_REPO_TAG}" "$TEST_IMAGE_URL" - -# Copy upgrade image to local folder for bootc switch test -if [[ "$AIR_GAPPED_DIR" != "" ]]; then - skopeo copy docker://"$TEST_IMAGE_URL" dir://"$AIR_GAPPED_DIR" - BOOTC_IMAGE="/mnt" -else - BOOTC_IMAGE="$TEST_IMAGE_URL" -fi - -# bootc upgrade/switch test -greenprint "Upgrade $TEST_OS system" -ansible-playbook -v \ - -i "$INVENTORY_FILE" \ - -e air_gapped_dir="$AIR_GAPPED_DIR" \ - playbooks/upgrade.yaml - -# Check bootc system after upgrade/switch -greenprint "Run ostree checking test after upgrade on VM" -ansible-playbook -v \ - -i "$INVENTORY_FILE" \ - -e test_os="$TEST_OS" \ - -e bootc_image="$BOOTC_IMAGE" \ - -e image_label_version_id="$REDHAT_VERSION_ID" \ - -e upgrade="true" \ - -e kargs="systemd.unified_cgroup_hierarchy=1,console=ttyS,panic=0" \ - -e lbi="$LBI" \ - playbooks/check-system.yaml - -# bootc rollback test -greenprint "Rollback $TEST_OS system" -ansible-playbook -v \ - -i "$INVENTORY_FILE" \ - -e air_gapped_dir="$AIR_GAPPED_DIR" \ - playbooks/rollback.yaml - -# Test finished and system clean up -greenprint "Clean up" -unset ANSIBLE_CONFIG -sudo virsh destroy "bootc-${TEST_OS}" -if [[ "$FIRMWARE" == "uefi" ]]; then - sudo virsh undefine "bootc-${TEST_OS}" --nvram -else - sudo virsh undefine "bootc-${TEST_OS}" -fi -if [[ "$TEST_CASE" == "to-disk" ]]; then - sudo virsh vol-delete --pool images disk.qcow2 -else - sudo virsh vol-delete --pool images "bootc-${TEST_OS}.qcow2" -fi - -greenprint "🎉 All tests passed." -exit 0 diff --git a/tests/e2e/e2e.fmf b/tests/e2e/e2e.fmf deleted file mode 100644 index 06eb9fbaa..000000000 --- a/tests/e2e/e2e.fmf +++ /dev/null @@ -1,9 +0,0 @@ -/to-existing-root: - summary: bootc install to-existing-root and bootc switch test - test: ./bootc-install.sh - duration: 90m - -/to-disk: - summary: bootc install to-disk and bootc upgrade test - test: ./bootc-install.sh - duration: 90m diff --git a/tests/e2e/files/bootc.repo.template b/tests/e2e/files/bootc.repo.template deleted file mode 100644 index c6072a609..000000000 --- a/tests/e2e/files/bootc.repo.template +++ /dev/null @@ -1,6 +0,0 @@ -[bootc] -name=bootc -baseurl=https://download.copr.fedorainfracloud.org/results/REPLACE_COPR_PROJECT/REPLACE_TEST_OS-$basearch/ -enabled=1 -gpgcheck=0 -repo_gpgcheck=0 diff --git a/tests/e2e/files/caddy.container b/tests/e2e/files/caddy.container deleted file mode 100644 index 507917ef1..000000000 --- a/tests/e2e/files/caddy.container +++ /dev/null @@ -1,11 +0,0 @@ -[Unit] -Description=Run a demo webserver - -[Container] -GlobalArgs=--storage-opt=additionalimagestore=/usr/lib/bootc/storage -Image=docker.io/library/caddy:2.8.4 -PublishPort=80:80 -ReadOnly=true - -[Install] -WantedBy=default.target diff --git a/tests/e2e/files/node_exporter.container b/tests/e2e/files/node_exporter.container deleted file mode 100644 index cf3a69c82..000000000 --- a/tests/e2e/files/node_exporter.container +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Prometheus node exporter - -[Container] -Network=host -PodmanArgs=--pid=host --storage-opt=additionalimagestore=/usr/lib/bootc/storage -Volume=/:/host,ro,rslave -Image=quay.io/prometheus/node-exporter:v1.8.2 -Exec=--path.rootfs=/host -ReadOnly=true - -[Install] -WantedBy=default.target diff --git a/tests/e2e/playbooks/ansible.cfg b/tests/e2e/playbooks/ansible.cfg deleted file mode 100644 index af6f37801..000000000 --- a/tests/e2e/playbooks/ansible.cfg +++ /dev/null @@ -1,8 +0,0 @@ -[defaults] -timeout = 30 -# human-readable stdout/stderr results display -stdout_callback = yaml - -[ssh_connection] -# scp_if_ssh=True -pipelining=True diff --git a/tests/e2e/playbooks/check-system.yaml b/tests/e2e/playbooks/check-system.yaml deleted file mode 100644 index 38cf0ae4d..000000000 --- a/tests/e2e/playbooks/check-system.yaml +++ /dev/null @@ -1,558 +0,0 @@ ---- -- hosts: guest - become: false - vars: - bootc_image: "" - upgrade: "" - kargs: "" - lbi: "" - total_counter: "0" - failed_counter: "0" - - tasks: - # current target host's IP address - - debug: var=ansible_all_ipv4_addresses - - debug: var=ansible_facts['distribution_version'] - - debug: var=ansible_facts['distribution'] - - debug: var=ansible_facts['architecture'] - - - name: check bios or uefi - stat: - path: /sys/firmware/efi - - - name: check secure boot status - command: mokutil --sb-state - ignore_errors: true - - - name: check tpm device - stat: - path: /dev/tpm0 - ignore_errors: true - - - name: check partition size - command: df -Th - ignore_errors: true - become: true - - - name: check disk partition table - command: fdisk -l - ignore_errors: true - become: true - - - name: check mount table - command: findmnt - ignore_errors: true - - - name: check rpm-ostree status - command: rpm-ostree status - register: result_rpm_ostree_status - ignore_errors: true - - # issue https://github.com/containers/bootc/issues/800 - # ostree-rs-ext version-skew test (bumped in rpm-ostree and in bootc) - - name: check rpm-ostree output - block: - - assert: - that: - - "'error' not in result_rpm_ostree_status.stdout" - fail_msg: "rpm-ostree status failed" - success_msg: "rpm-ostree status succeeded" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - - name: check bootc status - command: bootc status - ignore_errors: true - become: true - - - name: check ostree finalize staged log - command: journalctl -b -1 -u ostree-finalize-staged.service - ignore_errors: true - become: true - - # case: check installed container image - - name: get installed container image - shell: bootc status --json | jq -r '.status.booted.image.image.image' - register: result_bootc_status - become: true - - - set_fact: - installed_image: "{{ result_bootc_status.stdout }}" - - - name: check commit deployed and built - block: - - assert: - that: - - installed_image == bootc_image - fail_msg: "{{ bootc_image }} IS NOT installed" - success_msg: "{{ bootc_image }} installed" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - # case: check ostree-remount service status - - name: check ostree-remount service status - command: systemctl is-active ostree-remount.service - register: result_remount - - - name: ostree-remount should be started - block: - - assert: - that: - - result_remount.stdout == "active" - fail_msg: "ostree-remount is not started by default" - success_msg: "starting ostree-remount successful" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - - name: set mount point device name - command: findmnt -r -v -o SOURCE -n /sysroot - register: result_sysroot_source - - - set_fact: - device_name: "{{ result_sysroot_source.stdout }}" - - - name: get filesystem type - shell: df --output=fstype -v /sysroot | grep -v Type - register: result_fstype - - - set_fact: - fstype: "{{ result_fstype.stdout }}" - - - name: get ostree osname - shell: rpm-ostree status --json | jq -r '.deployments[0].osname' - register: result_osname - - - set_fact: - osname: "{{ result_osname.stdout }}" - - - name: get ostree checksum - shell: bootc status --json | jq -r '.status.booted.ostree.checksum' - register: result_ostree_checksum - become: true - - - set_fact: - ostree_checksum: "{{ result_ostree_checksum.stdout }}" - - # case: check /sysroot mount status - - name: check /sysroot mount status - shell: findmnt -r -o OPTIONS -n /sysroot | awk -F "," '{print $1}' - register: result_sysroot_mount_status - - - name: /sysroot should be mount with ro permission - block: - - assert: - that: - - result_sysroot_mount_status.stdout == "ro" - fail_msg: "/sysroot is not mounted with ro permission" - success_msg: "/sysroot is mounted with ro permission" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - # case: check /var mount point - - name: check /var mount point - command: findmnt -r -o SOURCE -n /var - register: result_var_mount_point - - - name: /var mount point checking - block: - - assert: - that: - - result_var_mount_point.stdout == var_mount_path - fail_msg: "/var does not mount on {{ var_mount_path }}" - success_msg: "/var mounts on {{ var_mount_path }}" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - vars: - var_mount_path: "{{ device_name }}[/ostree/deploy/{{ osname }}/var]" - when: fstype != "btrfs" - - # btrfs defines subvolume /root in fedora - # but for bootc install to-disk will set btrfs subvolume / - - name: /var mount point checking - btrfs - block: - - assert: - that: - - result_var_mount_point.stdout == var_mount_path_1 or result_var_mount_point.stdout == var_mount_path_2 - fail_msg: "/var does not mount on {{ var_mount_path_1 }} or {{ var_mount_path_2 }}" - success_msg: "/var mounts on {{ var_mount_path_1 }} or {{ var_mount_path_2 }}" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - vars: - var_mount_path_1: "{{ device_name }}[/root/ostree/deploy/{{ osname }}/var]" - var_mount_path_2: "{{ device_name }}[/ostree/deploy/{{ osname }}/var]" - when: fstype == "btrfs" - - # case: check /var mount status - - name: check /var mount status - shell: findmnt -r -o OPTIONS -n /var | awk -F "," '{print $1}' - register: result_var_mount_status - - - name: /var should be mount with rw permission - block: - - assert: - that: - - result_var_mount_status.stdout == "rw" - fail_msg: "/var is not mounted with rw permission" - success_msg: "/var is mounted with rw permission" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - # case: check / mount point - - name: check / mount point - command: findmnt -r -o SOURCE -n / - register: result_root_mount_point - - # overlay: as default - # none: workaround issue https://gitlab.com/redhat/centos-stream/containers/bootc/-/issues/301 - # composefs: issue has been fixed by https://github.com/containers/composefs/pull/303 - - name: / mount point checking - block: - - assert: - that: - - result_root_mount_point.stdout == "overlay" or result_root_mount_point.stdout == "none" or result_root_mount_point.stdout == "composefs" - fail_msg: "/ does not mount with overlay or none or composefs" - success_msg: "/ mounts with overlay or none or composefs" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - - name: check VERSION_ID in /etc/os-release - shell: awk -F '=' '/^VERSION_ID/ {print $2}' /etc/os-release | tr -d '"' - register: result_os_release_version_id - - - name: redhat.version-id == VERSION_ID - block: - - assert: - that: - - image_label_version_id == result_os_release_version_id.stdout - fail_msg: "version_id in label != version_id in /etc/os-release" - success_msg: "version_id in label == version_id in /etc/os-release" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - - name: check selinux status - shell: getenforce - register: result_selinux - - - name: selinux is Enforcing - block: - - assert: - that: - - result_selinux.stdout == "Enforcing" - fail_msg: "SELinux status is not Enforcing" - success_msg: "SELinux is Enforcing" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - - name: check bootc-fetch-apply-updates.timer left time - shell: systemctl list-timers bootc-fetch-apply-updates.timer --output json | jq -r '.[].left' - register: result_bootc_timer_left - - - name: check bootc-fetch-apply-updates.timer left time greater than 0 - block: - - assert: - that: - - result_bootc_timer_left.stdout | int > 0 - fail_msg: "bootc-fetch-apply-updates.timer won't be triggered" - success_msg: "bootc-fetch-apply-updates.timer is good" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - - name: check installed package - shell: rpm -qa | sort - register: result_packages - - - name: upgrade checking - block: - # case: check booted ostree chacksum != rollback ostree checksum - - name: get rollback ostree checksum - shell: bootc status --json | jq -r '.status.rollback.ostree.checksum' - register: result_rollback_ostree_checksum - become: true - - - name: check booted and rollback ostree chacksum - block: - - assert: - that: - - ostree_checksum != result_rollback_ostree_checksum.stdout - fail_msg: "upgrade failed" - success_msg: "upgrade passed" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - set_fact: - ostree_checksum: "{{ result_ostree_checksum.stdout }}" - - # case: check wget installed after upgrade - - name: check wget installed - block: - - assert: - that: - - "'wget' in result_packages.stdout" - fail_msg: "wget not installed, ostree upgrade might be failed" - success_msg: "wget installed in ostree upgrade" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - when: upgrade == "true" - - # case: check dmesg error and failed log - - name: check dmesg output - command: dmesg - become: true - - - name: check dmesg error and fail log - shell: dmesg --notime | grep -i "error\|fail" | grep -v "skipped" | grep -v "failover" | grep -v "ignition" | grep -v "Driver 'pcspkr'" || true - register: result_dmesg_error - become: true - - - name: check journal error and fail log - shell: journalctl | grep -i "error\|fail" | grep -v "skipped" | grep -v "failover" | grep -v "ignition" | grep -v "Driver 'pcspkr'" || true - register: result_journalctl_error - become: true - - - name: check selinux deny log - shell: journalctl | grep -i denied - register: result_selinux_denied - become: true - ignore_errors: true - - # case: check karg - - name: grep for kargs - shell: cat /proc/cmdline | grep {{item}} - with_items: "{{ kargs.split(',') }}" - register: kargs_check - # grep will exit with 1 when no results found. - # This causes the task not to halt play. - ignore_errors: true - when: kargs != '' - - - name: check if kargs exist - block: - - assert: - that: - - kargs_check is succeeded - fail_msg: install kargs not found - success_msg: install kargs found - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - when: kargs != '' - - # case: check running container with podman in root - - name: run CentOS Stream 9 image with podman in root - command: podman run --rm quay.io/centos/centos:stream9 cat /etc/redhat-release - register: podman_result - become: true - retries: 30 - delay: 2 - until: podman_result is success - ignore_errors: true - - - name: run container test - block: - - assert: - that: - - podman_result is succeeded - - "'CentOS Stream release 9' in podman_result.stdout" - fail_msg: "failed run container with podman (root)" - success_msg: "running container with podman (root) succeeded" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - # case: check running container with podman in rootless - - name: run CentOS Stream 9 image with in rootless - command: podman run --rm quay.io/centos/centos:stream9 cat /etc/redhat-release - register: podman_result - retries: 30 - delay: 2 - until: podman_result is success - ignore_errors: true - - - name: run container test - block: - - assert: - that: - - podman_result is succeeded - - "'CentOS Stream release 9' in podman_result.stdout" - fail_msg: "failed run container with podman (non-root)" - success_msg: "running container with podman (non-root) succeeded" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - # case: check logically bound image caddy container status - - name: check LBI caddy container status - shell: podman ps --filter "name=systemd-caddy" --format json | jq -r '.[].State' - register: result_caddy_state - when: lbi == "enabled" - - - name: run caddy container status test - block: - - assert: - that: - - result_caddy_state.stdout == "running" - fail_msg: "failed to run caddy container" - success_msg: "running caddy container" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - when: lbi == "enabled" - - # case: check logically bound image node_exporter container status - - name: check LBI node_exporter container status - shell: podman ps --filter "name=systemd-node_exporter" --format json | jq -r '.[].State' - register: result_node_exporter_state - when: lbi == "enabled" - - - name: run node_exporter container status test - block: - - assert: - that: - - result_node_exporter_state.stdout == "running" - fail_msg: "failed to run node_exporter container" - success_msg: "running node_exporter container" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - when: lbi == "enabled" - - # case: check system reboot - - name: check system reboot - block: - - name: check system reboot - reboot: - post_reboot_delay: 60 - pre_reboot_delay: 60 - reboot_timeout: 180 - become: true - ignore_errors: true - - - name: wait for connection to become reachable/usable - wait_for_connection: - delay: 30 - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - # case: check persistent log in system - - name: check journald persistent logging - block: - - name: list boots - shell: journalctl --list-boots -q - register: result_list_boots - become: true - - - assert: - that: - - result_list_boots.stdout_lines | length > 1 - fail_msg: "NO journald persistent logging configured" - success_msg: "journald persistent logging configured" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - when: ansible_facts['distribution_version'] != "41" - # workaround for issue https://gitlab.com/fedora/bootc/base-images/-/issues/10 - - # case: check reboot times - - name: check reboot times - command: last reboot - ignore_errors: true - become: true - - - assert: - that: - - failed_counter == "0" - fail_msg: "Run {{ total_counter }} tests, but {{ failed_counter }} of them failed" - success_msg: "Totally {{ total_counter }} test passed" diff --git a/tests/e2e/playbooks/deploy-libvirt.yaml b/tests/e2e/playbooks/deploy-libvirt.yaml deleted file mode 100644 index 7541bfa64..000000000 --- a/tests/e2e/playbooks/deploy-libvirt.yaml +++ /dev/null @@ -1,191 +0,0 @@ ---- -- hosts: cloud - become: false - vars: - test_os: "" - ssh_key_pub: "" - ssh_user: "cloud-user" - inventory_file: "" - instance_name: "bootc-{{ test_os }}" - image_path: "/var/lib/libvirt/images" - download_image: "true" - air_gapped_dir: "" - firmware: "" - boot_args: "" - os_variant: - centos-stream-9: centos-stream9 - centos-stream-10: centos-stream9 - fedora-40: fedora-unknown - fedora-41: fedora-unknown - - tasks: - - set_fact: - arch: "{{ ansible_facts['architecture'] }}" - - - name: Get temp folder - command: dirname "{{ inventory_file }}" - register: result_temp_folder - - - set_fact: - temp_folder: "{{ result_temp_folder.stdout }}" - - - set_fact: - random_num: "{{ 9999 | random(start=1001) }}" - - - name: Get CentOS-Stream-GenericCloud image filename - block: - - name: Get CentOS-Stream-GenericCloud image filename - shell: curl -s https://composes.stream.centos.org/production/latest-CentOS-Stream/compose/BaseOS/{{ arch }}/images/ | grep -oP '(?<=href=")CentOS-Stream-GenericCloud-(9|10)-[^"]+.qcow2(?=")' - register: out - - - set_fact: - download_image_name: "{{ out.stdout }}" - guest_image_fname: "{{ instance_name }}.qcow2" - when: - - "'centos' in test_os" - - download_image == "true" - - - name: Get Fedora-Cloud-Base-Generic 40 image filename - block: - - name: Get CentOS-Stream-GenericCloud image filename - shell: curl -s https://dl.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/{{ arch }}/images/ | grep -ioE '>Fedora-Cloud-Base-Generic.*.qcow2' | tr -d '><' - register: out - - - set_fact: - download_image_name: "{{ out.stdout }}" - guest_image_fname: "{{ instance_name }}.qcow2" - when: - - test_os == "fedora-40" - - download_image == "true" - - - name: Get Fedora-Cloud-Base-Generic 41 image filename - block: - - name: Get CentOS-Stream-GenericCloud image filename - shell: curl -s https://dl.fedoraproject.org/pub/fedora/linux/development/rawhide/Cloud/{{ arch }}/images/ | grep -ioE '>Fedora-Cloud-Base-Generic.*.qcow2' | tr -d '><' - register: out - - - set_fact: - download_image_name: "{{ out.stdout }}" - guest_image_fname: "{{ instance_name }}.qcow2" - when: - - test_os == "fedora-41" - - download_image == "true" - - - name: Download CentOS-Stream-GenericCloud image - get_url: - url: "https://composes.stream.centos.org/production/latest-CentOS-Stream/compose/BaseOS/{{ arch }}/images/{{ download_image_name }}" - dest: "{{ image_path }}/{{ guest_image_fname }}" - validate_certs: false - become: true - when: - - "'centos' in test_os" - - download_image == "true" - - - name: Download Fedora-Cloud-Base-Generic 40 - get_url: - url: "https://dl.fedoraproject.org/pub/fedora/linux/releases/40/Cloud/{{ arch }}/images/{{ download_image_name }}" - dest: "{{ image_path }}/{{ guest_image_fname }}" - validate_certs: false - become: true - when: - - test_os == "fedora-40" - - download_image == "true" - - - name: Download Fedora-Cloud-Base-Generic 41 - get_url: - url: "https://dl.fedoraproject.org/pub/fedora/linux/development/rawhide/Cloud/{{ arch }}/images/{{ download_image_name }}" - dest: "{{ image_path }}/{{ guest_image_fname }}" - validate_certs: false - become: true - when: - - test_os == "fedora-41" - - download_image == "true" - - - set_fact: - guest_image_fname: "disk.qcow2" - when: download_image == "false" - - - name: Generate user-data and meta-data - template: - src: "{{ item }}.j2" - dest: "{{ temp_folder }}/{{ item }}" - loop: - - user-data - - meta-data - - # virt-install with uefi, the system has a "system reset" in the first boot and shutdown - # Then the --cloud-init will be dropped when start after system reset - - name: Generate seed.iso for NoCloud cloud-init - command: | - xorriso -as mkisofs -input-charset utf8 \ - -o "{{ image_path }}/seed.iso" \ - -volid cidata \ - -joliet \ - -rock \ - "{{ temp_folder }}/user-data" "{{ temp_folder }}/meta-data" - become: true - - - name: Generate virt-install script - template: - src: virt-install.bash.j2 - dest: "{{ temp_folder }}/virt-install.bash" - mode: 0755 - - - name: Run virt-install - command: "{{ temp_folder }}/virt-install.bash" - become: true - - - name: Wait until VM is shut off - command: virsh domstate {{ instance_name }} - become: true - register: result_domestate - retries: 10 - until: result_domestate.stdout == "shut off" - - - name: Start vm - shell: | - virsh start {{ instance_name }} - become: true - - - name: Get VM xml - command: virsh dumpxml {{ instance_name }} - become: true - - - name: Get VM IP address - shell: - virsh domifaddr {{ instance_name }} | grep -oP '(?:\d+\.){3}\d+' - register: result - until: result.stdout != "" - retries: 30 - delay: 10 - become: true - - - set_fact: - instance_ip: "{{ result.stdout }}" - - - name: Remove seed.iso - command: rm -f "{{ image_path }}/seed.iso" - become: true - - - name: Waits until instance is reachable - wait_for: - host: "{{ instance_ip }}" - port: 22 - search_regex: OpenSSH - delay: 10 - retries: 30 - register: result_ssh_check - until: result_ssh_check is success - - - name: Add instance ip into host group guest - add_host: - name: "{{ instance_ip }}" - groups: guest - - - name: Write instance ip to inventory file - community.general.ini_file: - path: "{{ inventory_file }}" - section: guest - option: guest ansible_host - value: "{{ instance_ip }}" - no_extra_spaces: true diff --git a/tests/e2e/playbooks/install.yaml b/tests/e2e/playbooks/install.yaml deleted file mode 100644 index 8fe7809a8..000000000 --- a/tests/e2e/playbooks/install.yaml +++ /dev/null @@ -1,71 +0,0 @@ ---- -- hosts: guest - become: false - vars: - test_os: "" - test_image_url: "" - test_case: "" - - tasks: - - name: check bios or uefi - stat: - path: /sys/firmware/efi - - - name: check partition size - command: df -Th - ignore_errors: true - become: true - - - name: check disk partition table - command: fdisk -l - ignore_errors: true - become: true - - - name: check mount table - command: findmnt - ignore_errors: true - - # installing SELinux-enabled targets from SELinux-disabled hosts - # https://github.com/containers/bootc/issues/419 - # only run on to-existing-root case - - name: disable selinux for libvirt only - command: setenforce 0 - become: true - ignore_errors: true - when: - - test_case == "to-existing-root" - - # ansible dnf5 module needs python3-libdnf5 - - name: Install podman dnf and dnf5 - command: dnf -y install podman - become: true - - - name: Pull logical bound image - command: podman pull --tls-verify=false {{ item }} - become: true - loop: - - "docker.io/library/caddy:2.8.4" - - "quay.io/prometheus/node-exporter:v1.8.2" - - - name: Install image - command: - "podman run \ - --rm \ - --privileged \ - --tls-verify=false \ - --pid=host \ - --security-opt label=type:unconfined_t \ - {{ test_image_url }} \ - bootc install to-existing-root" - become: true - - - name: Reboot to deploy new system - reboot: - post_reboot_delay: 60 - reboot_timeout: 180 - become: true - ignore_errors: true - - - name: Wait for connection to become reachable/usable - wait_for_connection: - delay: 30 diff --git a/tests/e2e/playbooks/rollback.yaml b/tests/e2e/playbooks/rollback.yaml deleted file mode 100644 index 85abcc8ec..000000000 --- a/tests/e2e/playbooks/rollback.yaml +++ /dev/null @@ -1,55 +0,0 @@ ---- -- hosts: guest - become: false - vars: - total_counter: "0" - failed_counter: "0" - - tasks: - - name: bootc rollback - command: bootc rollback - become: true - - - name: Reboot to deploy new system - reboot: - post_reboot_delay: 60 - reboot_timeout: 180 - become: true - ignore_errors: true - - - name: Wait for connection to become reachable/usable - wait_for_connection: - delay: 30 - - - name: check bootc status - command: bootc status - ignore_errors: true - become: true - - - name: rollback checking - block: - - name: check installed package - shell: rpm -qa | sort - register: result_packages - - # case: check wget not installed after rollback - - name: check wget not installed - block: - - assert: - that: - - "'wget' not in result_packages.stdout" - fail_msg: "wget installed, ostree rollback might be failed" - success_msg: "wget not installed in ostree rollback" - always: - - set_fact: - total_counter: "{{ total_counter | int + 1 }}" - rescue: - - name: failed count + 1 - set_fact: - failed_counter: "{{ failed_counter | int + 1 }}" - - - assert: - that: - - failed_counter == "0" - fail_msg: "Run {{ total_counter }} tests, but {{ failed_counter }} of them failed" - success_msg: "Totally {{ total_counter }} test passed" diff --git a/tests/e2e/playbooks/templates/meta-data.j2 b/tests/e2e/playbooks/templates/meta-data.j2 deleted file mode 100644 index d8f308775..000000000 --- a/tests/e2e/playbooks/templates/meta-data.j2 +++ /dev/null @@ -1,2 +0,0 @@ -instance-id: libvirt-{{ random_num }} -local-hostname: libvirt-{{ test_os }} diff --git a/tests/e2e/playbooks/templates/user-data.j2 b/tests/e2e/playbooks/templates/user-data.j2 deleted file mode 100644 index ec086235a..000000000 --- a/tests/e2e/playbooks/templates/user-data.j2 +++ /dev/null @@ -1,19 +0,0 @@ -#cloud-config -users: - - default - - name: {{ ssh_user }} - groups: wheel - sudo: ALL=(ALL) NOPASSWD:ALL - lock_passwd: true - ssh_authorized_keys: - - {{ lookup('ansible.builtin.file', ssh_key_pub) }} - -# install with --cloud-init always shutdown vm on the first reboot -# https://github.com/virt-manager/virt-manager/issues/497 -# workaround is shutdown vm in cloud-init when cloud-init finished -# then start vm -power_state: - delay: now - mode: poweroff - message: Cloud Init Finalized - Shutting down machine - timeout: 30 diff --git a/tests/e2e/playbooks/templates/virt-install.bash.j2 b/tests/e2e/playbooks/templates/virt-install.bash.j2 deleted file mode 100644 index bd359b838..000000000 --- a/tests/e2e/playbooks/templates/virt-install.bash.j2 +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -virt-install \ - --name {{ instance_name }} \ - --ram 3072 \ - --vcpus 2 \ - --os-variant {{ os_variant[test_os] }} \ - --network network=integration \ - --disk size=10,path="{{ image_path }}/{{ guest_image_fname }}" \ -{% if firmware == 'uefi' %} - --boot {{ boot_args }} \ -{% endif %} - --cdrom "{{ image_path }}/seed.iso" \ - --install no_install=yes \ -{% if air_gapped_dir != '' %} - --filesystem={{ air_gapped_dir }},mount_tag,driver.type=virtiofs,accessmode=passthrough \ - --memorybacking=source.type=memfd,access.mode=shared \ -{% endif %} - --console file,source.path="/tmp/{{ test_os }}-{{ firmware }}-console.log" \ - --noautoconsole \ - --wait diff --git a/tests/e2e/playbooks/upgrade.yaml b/tests/e2e/playbooks/upgrade.yaml deleted file mode 100644 index 27ab7ba49..000000000 --- a/tests/e2e/playbooks/upgrade.yaml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- hosts: guest - become: false - vars: - - tasks: - - name: Air-gapped upgrade - block: - - name: Mount virtiofs - mount: - path: /mnt - src: mount_tag - fstype: virtiofs - state: ephemeral - become: true - - - name: ls - command: ls /mnt - become: true - - - name: bootc switch - command: bootc switch --transport dir /mnt - become: true - when: - - air_gapped_dir | default('') != "" - - - name: bootc upgrade - command: bootc upgrade - become: true - when: - - air_gapped_dir | default('') == "" - - - name: Reboot to deploy new system - reboot: - post_reboot_delay: 60 - reboot_timeout: 180 - become: true - ignore_errors: true - - - name: Wait for connection to become reachable/usable - wait_for_connection: - delay: 30 - - - name: bootc booted status - command: bootc status --booted - become: true diff --git a/tests/e2e/shared_lib.sh b/tests/e2e/shared_lib.sh deleted file mode 100755 index c5ab26b81..000000000 --- a/tests/e2e/shared_lib.sh +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash - -# Dumps details about the instance running the CI job. -function dump_runner { - RUNNER_CPUS=$(nproc) - RUNNER_MEM=$(free -m | grep -oP '\d+' | head -n 1) - RUNNER_DISK=$(df --output=size -h / | sed '1d;s/[^0-9]//g') - RUNNER_HOSTNAME=$(uname -n) - RUNNER_USER=$(whoami) - RUNNER_ARCH=$(uname -m) - RUNNER_KERNEL=$(uname -r) - - echo -e "\033[0;36m" - cat << EOF ------------------------------------------------------------------------------- -CI MACHINE SPECS ------------------------------------------------------------------------------- - Hostname: ${RUNNER_HOSTNAME} - User: ${RUNNER_USER} - CPUs: ${RUNNER_CPUS} - RAM: ${RUNNER_MEM} MB - DISK: ${RUNNER_DISK} GB - ARCH: ${RUNNER_ARCH} - KERNEL: ${RUNNER_KERNEL} ------------------------------------------------------------------------------- -EOF -} - -# Colorful timestamped output. -function greenprint { - echo -e "\033[1;32m[$(date -Isecond)] ${1}\033[0m" -} - -function redprint { - echo -e "\033[1;31m[$(date -Isecond)] ${1}\033[0m" -} - -function deploy_libvirt_network { - greenprint "Start firewalld" - sudo systemctl enable --now firewalld - - greenprint "🚀 Starting libvirt daemon" - sudo systemctl start libvirtd - sudo virsh list --all > /dev/null - - # Set a customized dnsmasq configuration for libvirt so we always get the - # same address on boot up. - greenprint "💡 Setup libvirt network" - sudo tee /tmp/integration.xml > /dev/null << EOF - -integration -1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579 - - - - - - - - - - - - - - - - - - - - - -EOF - if ! sudo virsh net-info integration > /dev/null 2>&1; then - sudo virsh net-define /tmp/integration.xml - fi - if [[ $(sudo virsh net-info integration | grep 'Active' | awk '{print $2}') == 'no' ]]; then - sudo virsh net-start integration - fi - sudo rm -f /tmp/integration.xml -}