diff --git a/tests/framework/microvm.py b/tests/framework/microvm.py index ed02488bc7b..1c5b6998a42 100644 --- a/tests/framework/microvm.py +++ b/tests/framework/microvm.py @@ -1003,14 +1003,13 @@ def thread_backtraces(self): ) return "\n".join(backtraces) - def _dump_debug_information(self, exc: Exception): + def _dump_debug_information(self): """ Dumps debug information about this microvm Used for example when running a command inside the guest via `SSHConnection.check_output` fails. """ print( - f"Failure executing command via SSH in microVM: {exc}\n\n" f"Firecracker logs:\n{self.log_data}\n" f"Thread backtraces:\n{self.thread_backtraces}" ) diff --git a/tests/framework/utils.py b/tests/framework/utils.py index a8715f00e94..7c928b9430b 100644 --- a/tests/framework/utils.py +++ b/tests/framework/utils.py @@ -350,8 +350,7 @@ def get_free_mem_ssh(ssh_connection): :param ssh_connection: connection to the guest :return: available mem column output of 'free' """ - _, stdout, stderr = ssh_connection.run("cat /proc/meminfo | grep MemAvailable") - assert stderr == "" + _, stdout, _ = ssh_connection.run("cat /proc/meminfo | grep MemAvailable") # Split "MemAvailable: 123456 kB" and validate it meminfo_data = stdout.split() @@ -625,8 +624,7 @@ def guest_run_fio_iteration(ssh_connection, iteration): --output /tmp/fio{} > /dev/null &""".format( iteration ) - exit_code, _, stderr = ssh_connection.run(fio) - assert exit_code == 0, stderr + ssh_connection.run(fio) def check_filesystem(ssh_connection, disk_fmt, disk): diff --git a/tests/framework/utils_iperf.py b/tests/framework/utils_iperf.py index aa2b663c1c7..dd4fb44e075 100644 --- a/tests/framework/utils_iperf.py +++ b/tests/framework/utils_iperf.py @@ -119,7 +119,7 @@ def spawn_iperf3_client(self, client_idx, client_mode_flag): .build() ) - return self._microvm.ssh.check_output(cmd).stdout + return self._microvm.ssh.run(cmd).stdout def host_command(self, port_offset): """Builds the command used for spawning an iperf3 server on the host""" diff --git a/tests/framework/utils_vsock.py b/tests/framework/utils_vsock.py index 3f6885e3afd..c88b0192152 100644 --- a/tests/framework/utils_vsock.py +++ b/tests/framework/utils_vsock.py @@ -100,7 +100,7 @@ def start_guest_echo_server(vm): Returns a UDS path to connect to the server. """ cmd = f"nohup socat VSOCK-LISTEN:{ECHO_SERVER_PORT},backlog=128,reuseaddr,fork EXEC:'/bin/cat' > /dev/null 2>&1 &" - vm.ssh.check_output(cmd) + vm.ssh.run(cmd) # Give the server time to initialise time.sleep(1) @@ -214,8 +214,7 @@ def _copy_vsock_data_to_guest(ssh_connection, blob_path, vm_blob_path, vsock_hel # Copy the data file and a vsock helper to the guest. cmd = "mkdir -p /tmp/vsock" - ecode, _, _ = ssh_connection.run(cmd) - assert ecode == 0, "Failed to set up tmpfs drive on the guest." + ssh_connection.run(cmd) ssh_connection.scp_put(vsock_helper, "/tmp/vsock_helper") ssh_connection.scp_put(blob_path, vm_blob_path) diff --git a/tests/host_tools/network.py b/tests/host_tools/network.py index 7877b914d28..21bc49e7314 100644 --- a/tests/host_tools/network.py +++ b/tests/host_tools/network.py @@ -79,7 +79,7 @@ def remote_path(self, path): def _scp(self, path1, path2, options): """Copy files to/from the VM using scp.""" - self._exec(["scp", *options, path1, path2], check=True) + self._exec(["scp", *options, path1, path2]) def scp_put(self, local_path, remote_path, recursive=False): """Copy files to the VM using scp.""" @@ -111,9 +111,10 @@ def _init_connection(self): """ self.check_output("true", timeout=100, debug=True) - def run(self, cmd_string, timeout=None, *, check=False, debug=False): + def run(self, cmd_string, timeout=None, *, check=True, debug=False): """ Execute the command passed as a string in the ssh context. + By default raises an exception on non-zero return code of remote command. If `debug` is set, pass `-vvv` to `ssh`. Note that this will clobber stderr. """ @@ -124,22 +125,29 @@ def run(self, cmd_string, timeout=None, *, check=False, debug=False): return self._exec(command, timeout, check=check) - def check_output(self, cmd_string, timeout=None, *, debug=False): - """Same as `run`, but raises an exception on non-zero return code of remote command""" - return self.run(cmd_string, timeout, check=True, debug=debug) - - def _exec(self, cmd, timeout=None, check=False): + def _exec(self, cmd, timeout=None, check=True): """Private function that handles the ssh client invocation.""" if self.netns is not None: cmd = ["ip", "netns", "exec", self.netns] + cmd - try: - return utils.run_cmd(cmd, check=check, timeout=timeout) - except Exception as exc: + rc, stdout, stderr = utils.run_cmd(cmd, timeout=timeout) + + if not check: + return rc, stdout, stderr + + if rc != 0: + print( + f"SSH command {cmd} exited with non zero error code: {rc}\n" + f"stdout: {stdout}\n" + f"stderr: {stderr}\n" + ) + if self._on_error: - self._on_error(exc) + self._on_error() - raise + assert False + + return rc, stdout, stderr # pylint:disable=invalid-name def Popen( diff --git a/tests/integration_tests/functional/test_balloon.py b/tests/integration_tests/functional/test_balloon.py index ee750dcac7d..4ed3a4244b3 100644 --- a/tests/integration_tests/functional/test_balloon.py +++ b/tests/integration_tests/functional/test_balloon.py @@ -41,23 +41,11 @@ def get_rss_from_pmap(): def lower_ssh_oom_chance(ssh_connection): """Lure OOM away from ssh process""" - logger = logging.getLogger("lower_ssh_oom_chance") - cmd = "cat /run/sshd.pid" - exit_code, stdout, stderr = ssh_connection.run(cmd) - # add something to the logs for troubleshooting - if exit_code != 0: - logger.error("while running: %s", cmd) - logger.error("stdout: %s", stdout) - logger.error("stderr: %s", stderr) - + _, stdout, _ = ssh_connection.run(cmd) for pid in stdout.split(" "): cmd = f"choom -n -1000 -p {pid}" - exit_code, stdout, stderr = ssh_connection.run(cmd) - if exit_code != 0: - logger.error("while running: %s", cmd) - logger.error("stdout: %s", stdout) - logger.error("stderr: %s", stderr) + ssh_connection.run(cmd) def make_guest_dirty_memory(ssh_connection, amount_mib=32): @@ -68,12 +56,7 @@ def make_guest_dirty_memory(ssh_connection, amount_mib=32): cmd = f"/usr/local/bin/fillmem {amount_mib}" try: - exit_code, stdout, stderr = ssh_connection.run(cmd, timeout=1.0) - # add something to the logs for troubleshooting - if exit_code != 0: - logger.error("while running: %s", cmd) - logger.error("stdout: %s", stdout) - logger.error("stderr: %s", stderr) + ssh_connection.run(cmd, timeout=1.0) except TimeoutExpired: # It's ok if this expires. Sometimes the SSH connection # gets killed by the OOM killer *after* the fillmem program @@ -558,4 +541,4 @@ def test_memory_scrub(microvm_factory, guest_kernel, rootfs): # Wait for the deflate to complete. _ = get_stable_rss_mem_by_pid(firecracker_pid) - microvm.ssh.check_output("/usr/local/bin/readmem {} {}".format(60, 1)) + microvm.ssh.run("/usr/local/bin/readmem {} {}".format(60, 1)) diff --git a/tests/integration_tests/functional/test_cpu_features_aarch64.py b/tests/integration_tests/functional/test_cpu_features_aarch64.py index 8357f54b568..d26e8854de7 100644 --- a/tests/integration_tests/functional/test_cpu_features_aarch64.py +++ b/tests/integration_tests/functional/test_cpu_features_aarch64.py @@ -51,7 +51,7 @@ def _check_cpu_features_arm(test_microvm, guest_kv, template_name=None): case CpuModel.ARM_NEOVERSE_V1, _, None: expected_cpu_features = DEFAULT_G3_FEATURES_5_10 - _, stdout, _ = test_microvm.ssh.check_output(CPU_FEATURES_CMD) + _, stdout, _ = test_microvm.ssh.run(CPU_FEATURES_CMD) flags = set(stdout.strip().split(" ")) assert flags == expected_cpu_features @@ -77,7 +77,7 @@ def test_host_vs_guest_cpu_features_aarch64(uvm_nano): vm.add_net_iface() vm.start() host_feats = set(utils.check_output(CPU_FEATURES_CMD).stdout.strip().split(" ")) - guest_feats = set(vm.ssh.check_output(CPU_FEATURES_CMD).stdout.strip().split(" ")) + guest_feats = set(vm.ssh.run(CPU_FEATURES_CMD).stdout.strip().split(" ")) cpu_model = cpuid_utils.get_cpu_model_name() match cpu_model: diff --git a/tests/integration_tests/functional/test_cpu_features_x86_64.py b/tests/integration_tests/functional/test_cpu_features_x86_64.py index 23818ddc6b1..db20fa2e333 100644 --- a/tests/integration_tests/functional/test_cpu_features_x86_64.py +++ b/tests/integration_tests/functional/test_cpu_features_x86_64.py @@ -215,7 +215,7 @@ def test_host_vs_guest_cpu_features_x86_64(uvm_nano): vm.add_net_iface() vm.start() host_feats = set(utils.check_output(CPU_FEATURES_CMD).stdout.strip().split(" ")) - guest_feats = set(vm.ssh.check_output(CPU_FEATURES_CMD).stdout.strip().split(" ")) + guest_feats = set(vm.ssh.run(CPU_FEATURES_CMD).stdout.strip().split(" ")) cpu_model = cpuid_utils.get_cpu_codename() match cpu_model: diff --git a/tests/integration_tests/functional/test_drive_vhost_user.py b/tests/integration_tests/functional/test_drive_vhost_user.py index 79cc41b0f3a..5a416317bd3 100644 --- a/tests/integration_tests/functional/test_drive_vhost_user.py +++ b/tests/integration_tests/functional/test_drive_vhost_user.py @@ -15,8 +15,7 @@ def _check_block_size(ssh_connection, dev_path, size): """ Checks the size of the block device. """ - _, stdout, stderr = ssh_connection.run("blockdev --getsize64 {}".format(dev_path)) - assert stderr == "" + _, stdout, _ = ssh_connection.run("blockdev --getsize64 {}".format(dev_path)) assert stdout.strip() == str(size) @@ -297,7 +296,7 @@ def test_config_change(microvm_factory, guest_kernel, rootfs): _check_block_size(vm.ssh, "/dev/vdb", orig_size * 1024 * 1024) # Check that we can create a filesystem and mount it - vm.ssh.check_output(mkfs_mount_cmd) + vm.ssh.run(mkfs_mount_cmd) for new_size in new_sizes: # Instruct the backend to resize the device. @@ -312,4 +311,4 @@ def test_config_change(microvm_factory, guest_kernel, rootfs): _check_block_size(vm.ssh, "/dev/vdb", new_size * 1024 * 1024) # Check that we can create a filesystem and mount it - vm.ssh.check_output(mkfs_mount_cmd) + vm.ssh.run(mkfs_mount_cmd) diff --git a/tests/integration_tests/functional/test_drive_virtio.py b/tests/integration_tests/functional/test_drive_virtio.py index 9c61ead56a9..80e5ef64d4f 100644 --- a/tests/integration_tests/functional/test_drive_virtio.py +++ b/tests/integration_tests/functional/test_drive_virtio.py @@ -283,7 +283,7 @@ def test_patch_drive(uvm_plain_any, io_engine): # of the device, in bytes. blksize_cmd = "LSBLK_DEBUG=all lsblk -b /dev/vdb --output SIZE" size_bytes_str = "536870912" # = 512 MiB - _, stdout, _ = test_microvm.ssh.check_output(blksize_cmd) + _, stdout, _ = test_microvm.ssh.run(blksize_cmd) lines = stdout.split("\n") # skip "SIZE" assert lines[1].strip() == size_bytes_str @@ -354,14 +354,12 @@ def test_flush(uvm_plain_rw, io_engine): def _check_block_size(ssh_connection, dev_path, size): - _, stdout, stderr = ssh_connection.run("blockdev --getsize64 {}".format(dev_path)) - assert stderr == "" + _, stdout, _ = ssh_connection.run("blockdev --getsize64 {}".format(dev_path)) assert stdout.strip() == str(size) def _check_file_size(ssh_connection, dev_path, size): - _, stdout, stderr = ssh_connection.run("stat --format=%s {}".format(dev_path)) - assert stderr == "" + _, stdout, _ = ssh_connection.run("stat --format=%s {}".format(dev_path)) assert stdout.strip() == str(size) @@ -379,7 +377,5 @@ def _check_drives(test_microvm, assert_dict, keys_array): def _check_mount(ssh_connection, dev_path): - _, _, stderr = ssh_connection.run(f"mount {dev_path} /tmp", timeout=30.0) - assert stderr == "" - _, _, stderr = ssh_connection.run("umount /tmp", timeout=30.0) - assert stderr == "" + ssh_connection.run(f"mount {dev_path} /tmp", timeout=30.0) + ssh_connection.run("umount /tmp", timeout=30.0) diff --git a/tests/integration_tests/functional/test_kvm_ptp.py b/tests/integration_tests/functional/test_kvm_ptp.py index 70b5bb877bc..cf82de7da3b 100644 --- a/tests/integration_tests/functional/test_kvm_ptp.py +++ b/tests/integration_tests/functional/test_kvm_ptp.py @@ -18,7 +18,7 @@ def test_kvm_ptp(uvm_plain_any): vm.add_net_iface() vm.start() - vm.ssh.check_output("[ -c /dev/ptp0 ]") + vm.ssh.run("[ -c /dev/ptp0 ]") # phc_ctl[14515.127]: clock time is 1697545854.728335694 or Tue Oct 17 12:30:54 2023 - vm.ssh.check_output("phc_ctl /dev/ptp0 -- get") + vm.ssh.run("phc_ctl /dev/ptp0 -- get") diff --git a/tests/integration_tests/functional/test_net.py b/tests/integration_tests/functional/test_net.py index a804b8f90a8..675e5d6e1d7 100644 --- a/tests/integration_tests/functional/test_net.py +++ b/tests/integration_tests/functional/test_net.py @@ -37,7 +37,7 @@ def test_high_ingress_traffic(uvm_plain_any): test_microvm.start() # Start iperf3 server on the guest. - test_microvm.ssh.check_output("{} -sD\n".format(IPERF_BINARY_GUEST)) + test_microvm.ssh.run("{} -sD\n".format(IPERF_BINARY_GUEST)) time.sleep(1) # Start iperf3 client on the host. Send 1Gbps UDP traffic. @@ -53,7 +53,7 @@ def test_high_ingress_traffic(uvm_plain_any): # Check if the high ingress traffic broke the net interface. # If the net interface still works we should be able to execute # ssh commands. - test_microvm.ssh.check_output("echo success\n") + test_microvm.ssh.run("echo success\n") def test_multi_queue_unsupported(uvm_plain): @@ -97,8 +97,8 @@ def run_udp_offload_test(vm): message = "x" # Start a UDP server in the guest - # vm.ssh.check_output(f"nohup socat UDP-LISTEN:{port} - > {out_filename} &") - vm.ssh.check_output( + # vm.ssh.run(f"nohup socat UDP-LISTEN:{port} - > {out_filename} &") + vm.ssh.run( f"nohup socat UDP4-LISTEN:{port} OPEN:{out_filename},creat > /dev/null 2>&1 &" ) diff --git a/tests/integration_tests/functional/test_net_config_space.py b/tests/integration_tests/functional/test_net_config_space.py index c4ddfea9189..ab0f99525c4 100644 --- a/tests/integration_tests/functional/test_net_config_space.py +++ b/tests/integration_tests/functional/test_net_config_space.py @@ -219,8 +219,7 @@ def _find_iomem_range(ssh_connection, dev_name): # its contents and grep for the VirtIO device name, which # with ACPI is "LNRO0005:XY". cmd = f"cat /proc/iomem | grep -m 1 {dev_name}" - rc, stdout, stderr = ssh_connection.run(cmd) - assert rc == 0, stderr + _, stdout, _ = ssh_connection.run(cmd) # Take range in the form 'start-end' from line. The line looks like this: # d00002000-d0002fff : LNRO0005:02 @@ -259,8 +258,7 @@ def _get_net_mem_addr_base_x86_cmdline(ssh_connection, if_name): """Check for net device memory start address via command line arguments""" sys_virtio_mmio_cmdline = "/sys/devices/virtio-mmio-cmdline/" cmd = "ls {} | grep virtio-mmio. | sed 's/virtio-mmio.//'" - exit_code, stdout, stderr = ssh_connection.run(cmd.format(sys_virtio_mmio_cmdline)) - assert exit_code == 0, stderr + _, stdout, _ = ssh_connection.run(cmd.format(sys_virtio_mmio_cmdline)) virtio_devs_idx = stdout.strip().split() cmd = "cat /proc/cmdline" @@ -299,8 +297,7 @@ def _get_net_mem_addr_base(ssh_connection, if_name): if platform.machine() == "aarch64": sys_virtio_mmio_cmdline = "/sys/devices/platform" cmd = "ls {} | grep .virtio_mmio".format(sys_virtio_mmio_cmdline) - rc, stdout, _ = ssh_connection.run(cmd) - assert rc == 0 + _, stdout, _ = ssh_connection.run(cmd) virtio_devs = stdout.split() devs_addr = list(map(lambda dev: dev.split(".")[0], virtio_devs)) diff --git a/tests/integration_tests/functional/test_pause_resume.py b/tests/integration_tests/functional/test_pause_resume.py index 3d0ac124c11..6064abd1f75 100644 --- a/tests/integration_tests/functional/test_pause_resume.py +++ b/tests/integration_tests/functional/test_pause_resume.py @@ -53,7 +53,7 @@ def test_pause_resume(uvm_nano): # Verify guest is no longer active. with pytest.raises(ChildProcessError): - microvm.ssh.check_output("true") + microvm.ssh.run("true") # Verify emulation was indeed paused and no events from either # guest or host side were handled. @@ -61,7 +61,7 @@ def test_pause_resume(uvm_nano): # Verify guest is no longer active. with pytest.raises(ChildProcessError): - microvm.ssh.check_output("true") + microvm.ssh.run("true") # Pausing the microVM when it is already `Paused` is allowed # (microVM remains in `Paused` state). @@ -152,7 +152,7 @@ def test_kvmclock_ctrl(uvm_plain_any): # console. This detail is important as it writing in the console seems to increase the probability # that we will pause the execution inside the kernel and cause a lock up. Setting KVM_CLOCK_CTRL # bit that informs the guest we're pausing the vCPUs, should avoid that lock up. - microvm.ssh.check_output( + microvm.ssh.run( "timeout 60 sh -c 'while true; do ls -R /; done' > /dev/ttyS0 2>&1 < /dev/null &" ) @@ -161,7 +161,7 @@ def test_kvmclock_ctrl(uvm_plain_any): time.sleep(5) microvm.api.vm.patch(state="Resumed") - dmesg = microvm.ssh.check_output("dmesg").stdout + dmesg = microvm.ssh.run("dmesg").stdout assert "rcu_sched self-detected stall on CPU" not in dmesg assert "rcu_preempt detected stalls on CPUs/tasks" not in dmesg assert "BUG: soft lockup -" not in dmesg diff --git a/tests/integration_tests/functional/test_rng.py b/tests/integration_tests/functional/test_rng.py index b40aa66033d..ff09f07913a 100644 --- a/tests/integration_tests/functional/test_rng.py +++ b/tests/integration_tests/functional/test_rng.py @@ -150,7 +150,7 @@ def _get_throughput(ssh, random_bytes): # Issue a `dd` command to request 100 times `random_bytes` from the device. # 100 here is used to get enough confidence on the achieved throughput. cmd = "dd if=/dev/hwrng of=/dev/null bs={} count=100".format(random_bytes) - _, _, stderr = ssh.check_output(cmd) + _, _, stderr = ssh.run(cmd) # dd gives its output on stderr return _process_dd_output(stderr) diff --git a/tests/integration_tests/functional/test_serial_io.py b/tests/integration_tests/functional/test_serial_io.py index db1521d4a44..faad24fa097 100644 --- a/tests/integration_tests/functional/test_serial_io.py +++ b/tests/integration_tests/functional/test_serial_io.py @@ -188,15 +188,13 @@ def test_serial_block(uvm_plain_any): os.kill(test_microvm.screen_pid, signal.SIGSTOP) # Generate a random text file. - test_microvm.ssh.check_output( - "base64 /dev/urandom | head -c 100000 > /tmp/file.txt" - ) + test_microvm.ssh.run("base64 /dev/urandom | head -c 100000 > /tmp/file.txt") # Dump output to terminal - test_microvm.ssh.check_output("cat /tmp/file.txt > /dev/ttyS0") + test_microvm.ssh.run("cat /tmp/file.txt > /dev/ttyS0") # Check that the vCPU isn't blocked. - test_microvm.ssh.check_output("cd /") + test_microvm.ssh.run("cd /") # Check the metrics to see if the serial missed bytes. fc_metrics = test_microvm.flush_metrics() diff --git a/tests/integration_tests/functional/test_snapshot_basic.py b/tests/integration_tests/functional/test_snapshot_basic.py index ac596440f67..90b69743ffa 100644 --- a/tests/integration_tests/functional/test_snapshot_basic.py +++ b/tests/integration_tests/functional/test_snapshot_basic.py @@ -36,7 +36,7 @@ def check_vmgenid_update_count(vm, resume_count): Kernel will emit the DMESG_VMGENID_RESUME every time we resume from a snapshot """ - _, stdout, _ = vm.ssh.check_output("dmesg") + _, stdout, _ = vm.ssh.run("dmesg") assert resume_count == stdout.count(DMESG_VMGENID_RESUME) @@ -44,8 +44,7 @@ def _get_guest_drive_size(ssh_connection, guest_dev_name="/dev/vdb"): # `lsblk` command outputs 2 lines to STDOUT: # "SIZE" and the size of the device, in bytes. blksize_cmd = "LSBLK_DEBUG=all lsblk -b {} --output SIZE".format(guest_dev_name) - rc, stdout, stderr = ssh_connection.run(blksize_cmd) - assert rc == 0, stderr + _, stdout, _ = ssh_connection.run(blksize_cmd) lines = stdout.split("\n") return lines[1].strip() @@ -473,7 +472,7 @@ def test_diff_snapshot_overlay(guest_kernel, rootfs, microvm_factory): basevm.resume() # Run some command to dirty some pages - basevm.ssh.check_output("true") + basevm.ssh.run("true") # First copy the base snapshot somewhere else, so we can make sure # it will actually get updated diff --git a/tests/integration_tests/functional/test_snapshot_phase1.py b/tests/integration_tests/functional/test_snapshot_phase1.py index 7436c19d875..f4d3c56f772 100644 --- a/tests/integration_tests/functional/test_snapshot_phase1.py +++ b/tests/integration_tests/functional/test_snapshot_phase1.py @@ -93,7 +93,7 @@ def test_snapshot_phase1( # Validate MMDS. # Configure interface to route MMDS requests - vm.ssh.check_output(f"ip route add {IPV4_ADDRESS} dev {NET_IFACE_FOR_MMDS}") + vm.ssh.run(f"ip route add {IPV4_ADDRESS} dev {NET_IFACE_FOR_MMDS}") # Fetch metadata to ensure MMDS is accessible. token = generate_mmds_session_token(vm.ssh, IPV4_ADDRESS, token_ttl=60) diff --git a/tests/integration_tests/functional/test_snapshot_restore_cross_kernel.py b/tests/integration_tests/functional/test_snapshot_restore_cross_kernel.py index bfe5316d9e5..85713450708 100644 --- a/tests/integration_tests/functional/test_snapshot_restore_cross_kernel.py +++ b/tests/integration_tests/functional/test_snapshot_restore_cross_kernel.py @@ -59,7 +59,7 @@ def _test_mmds(vm, mmds_net_iface): cmd = "ip route add {} dev {}".format( mmds_net_iface.guest_ip, mmds_net_iface.dev_name ) - vm.ssh.check_output(cmd) + vm.ssh.run(cmd) # The base microVM had MMDS version 2 configured, which was persisted # across the snapshot-restore. diff --git a/tests/integration_tests/functional/test_vsock.py b/tests/integration_tests/functional/test_vsock.py index 2d540b8f934..1d086ff9d98 100644 --- a/tests/integration_tests/functional/test_vsock.py +++ b/tests/integration_tests/functional/test_vsock.py @@ -238,7 +238,7 @@ def test_vsock_transport_reset_g2h(uvm_nano, microvm_factory): # After snap restore all vsock connections should be # dropped. This means guest socat should exit same way # as it did after snapshot was taken. - code, _, _ = new_vm.ssh.run("pidof socat") + code, _, _ = new_vm.ssh.run("pidof socat", check=False) assert code == 1 host_socket_path = os.path.join( @@ -266,15 +266,14 @@ def test_vsock_transport_reset_g2h(uvm_nano, microvm_factory): new_vm.ssh.run(guest_socat_commmand) # socat should be running in the guest now - code, _, _ = new_vm.ssh.run("pidof socat") - assert code == 0 + new_vm.ssh.run("pidof socat") # Create snapshot. snapshot = new_vm.snapshot_full() new_vm.resume() # After `create_snapshot` + 'restore' calls, connection should be dropped - code, _, _ = new_vm.ssh.run("pidof socat") + code, _, _ = new_vm.ssh.run("pidof socat", check=False) assert code == 1 # Kill host socat as it is not useful anymore diff --git a/tests/integration_tests/performance/test_block_ab.py b/tests/integration_tests/performance/test_block_ab.py index b10a41b7c85..aa6b1cba4fa 100644 --- a/tests/integration_tests/performance/test_block_ab.py +++ b/tests/integration_tests/performance/test_block_ab.py @@ -28,15 +28,13 @@ def prepare_microvm_for_test(microvm): """Prepares the microvm for running a fio-based performance test by tweaking various performance related parameters.""" - _, _, stderr = microvm.ssh.check_output( - "echo 'none' > /sys/block/vdb/queue/scheduler" - ) + _, _, stderr = microvm.ssh.run("echo 'none' > /sys/block/vdb/queue/scheduler") assert stderr == "" # First, flush all guest cached data to host, then drop guest FS caches. - _, _, stderr = microvm.ssh.check_output("sync") + _, _, stderr = microvm.ssh.run("sync") assert stderr == "" - _, _, stderr = microvm.ssh.check_output("echo 3 > /proc/sys/vm/drop_caches") + _, _, stderr = microvm.ssh.run("echo 3 > /proc/sys/vm/drop_caches") assert stderr == "" # Then, flush all host cached data to hardware, also drop host FS caches. @@ -97,7 +95,7 @@ def run_fio(microvm, mode, block_size): assert stderr == "" microvm.ssh.scp_get("/tmp/*.log", logs_path) - microvm.ssh.check_output("rm /tmp/*.log") + microvm.ssh.run("rm /tmp/*.log") return logs_path, cpu_load_future.result() diff --git a/tests/integration_tests/performance/test_huge_pages.py b/tests/integration_tests/performance/test_huge_pages.py index 8437d78c7d3..e370ec34481 100644 --- a/tests/integration_tests/performance/test_huge_pages.py +++ b/tests/integration_tests/performance/test_huge_pages.py @@ -128,7 +128,7 @@ def test_hugetlbfs_diff_snapshot(microvm_factory, uvm_plain, uffd_handler_paths) uvm_plain.resume() # Run command to dirty some pages - uvm_plain.ssh.check_output("sync") + uvm_plain.ssh.run("sync") snapshot_diff = uvm_plain.snapshot_diff() snapshot_merged = snapshot_diff.rebase_snapshot(base_snapshot) @@ -180,11 +180,11 @@ def test_ept_violation_count( # Wait for microvm to boot. Then spawn fast_page_fault_helper to setup an environment where we can trigger # a lot of fast_page_faults after restoring the snapshot. - vm.ssh.check_output( + vm.ssh.run( "nohup /usr/local/bin/fast_page_fault_helper >/dev/null 2>&1