Skip to content

Commit

Permalink
Merge branch 'main' into test-fix-double-publishing
Browse files Browse the repository at this point in the history
  • Loading branch information
pb8o authored Dec 16, 2024
2 parents 37892a8 + 0bee970 commit 23abf17
Show file tree
Hide file tree
Showing 11 changed files with 99 additions and 103 deletions.
4 changes: 2 additions & 2 deletions .gitlint
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@ line-length=72

[ignore-body-lines]
# Ignore HTTP reference links
# Ignore lines that start with 'Co-Authored-By' or with 'Signed-off-by'
regex=(^\[.+\]: http.+)|(^Co-Authored-By)|(^Signed-off-by)
# Ignore lines that start with 'Co-Authored-By', with 'Signed-off-by' or with 'Fixes'
regex=(^\[.+\]: http.+)|(^Co-Authored-By)|(^Signed-off-by)|(^Fixes:)

[ignore-by-author-name]
# Ignore certain rules for commits of which the author name matches a regex
Expand Down
27 changes: 1 addition & 26 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 0 additions & 1 deletion src/jailer/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ bench = false
[dependencies]
libc = "0.2.168"
log-instrument = { path = "../log-instrument", optional = true }
nix = { version = "0.29.0", default-features = false, features = ["dir"] }
regex = { version = "1.11.1", default-features = false, features = ["std"] }
thiserror = "2.0.6"
vmm-sys-util = "0.12.1"
Expand Down
7 changes: 0 additions & 7 deletions src/utils/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,17 +9,10 @@ license = "Apache-2.0"
bench = false

[dependencies]
derive_more = { version = "1.0.0", default-features = false, features = ["from"] }
displaydoc = "0.2.5"
libc = "0.2.168"
log-instrument = { path = "../log-instrument", optional = true }
serde = { version = "1.0.215", features = ["derive"] }
thiserror = "2.0.6"
vm-memory = { version = "0.16.1", features = ["backend-mmap", "backend-bitmap"] }
vmm-sys-util = "0.12.1"

[dev-dependencies]
serde_json = "1.0.133"

[features]
tracing = ["log-instrument"]
Expand Down
1 change: 0 additions & 1 deletion src/vmm/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ gdbstub = { version = "0.7.3", optional = true }
gdbstub_arch = { version = "0.3.1", optional = true }
kvm-bindings = { version = "0.10.0", features = ["fam-wrappers", "serde"] }
kvm-ioctls = "0.19.1"
lazy_static = "1.5.0"
libc = "0.2.168"
linux-loader = "0.13.0"
log = { version = "0.4.22", features = ["std", "serde"] }
Expand Down
100 changes: 53 additions & 47 deletions tests/framework/utils_vsock.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
from subprocess import Popen
from threading import Thread

from tenacity import Retrying, stop_after_attempt, wait_fixed

ECHO_SERVER_PORT = 5252
SERVER_ACCEPT_BACKLOG = 128
TEST_CONNECTION_COUNT = 50
Expand Down Expand Up @@ -142,53 +144,57 @@ def check_guest_connections(vm, server_port_path, blob_path, blob_hash):
["socat", f"UNIX-LISTEN:{server_port_path},fork,backlog=5", "exec:'/bin/cat'"]
)

# Link the listening Unix socket into the VM's jail, so that
# Firecracker can connect to it.
attempt = 0
# But 1st, give socat a bit of time to create the socket
while not Path(server_port_path).exists() and attempt < 3:
time.sleep(0.2)
attempt += 1
vm.create_jailed_resource(server_port_path)

# Increase maximum process count for the ssh service.
# Avoids: "bash: fork: retry: Resource temporarily unavailable"
# Needed to execute the bash script that tests for concurrent
# vsock guest initiated connections.
pids_max_file = "/sys/fs/cgroup/system.slice/ssh.service/pids.max"
ecode, _, _ = vm.ssh.run(f"echo 1024 > {pids_max_file}")
assert ecode == 0, "Unable to set max process count for guest ssh service."

# Build the guest worker sub-command.
# `vsock_helper` will read the blob file from STDIN and send the echo
# server response to STDOUT. This response is then hashed, and the
# hash is compared against `blob_hash` (computed on the host). This
# comparison sets the exit status of the worker command.
worker_cmd = "hash=$("
worker_cmd += "cat {}".format(blob_path)
worker_cmd += " | /tmp/vsock_helper echo 2 {}".format(ECHO_SERVER_PORT)
worker_cmd += " | md5sum | cut -f1 -d\\ "
worker_cmd += ")"
worker_cmd += ' && [[ "$hash" = "{}" ]]'.format(blob_hash)

# Run `TEST_CONNECTION_COUNT` concurrent workers, using the above
# worker sub-command.
# If any worker fails, this command will fail. If all worker sub-commands
# succeed, this will also succeed.
cmd = 'workers="";'
cmd += "for i in $(seq 1 {}); do".format(TEST_CONNECTION_COUNT)
cmd += " ({})& ".format(worker_cmd)
cmd += ' workers="$workers $!";'
cmd += "done;"
cmd += "for w in $workers; do wait $w || (wait; exit 1); done"

ecode, _, stderr = vm.ssh.run(cmd)
echo_server.terminate()
rc = echo_server.wait()
# socat exits with 128 + 15 (SIGTERM)
assert rc == 143

assert ecode == 0, stderr
try:
# Give socat a bit of time to create the socket
for attempt in Retrying(
wait=wait_fixed(0.2),
stop=stop_after_attempt(3),
reraise=True,
):
with attempt:
assert Path(server_port_path).exists()

# Link the listening Unix socket into the VM's jail, so that
# Firecracker can connect to it.
vm.create_jailed_resource(server_port_path)

# Increase maximum process count for the ssh service.
# Avoids: "bash: fork: retry: Resource temporarily unavailable"
# Needed to execute the bash script that tests for concurrent
# vsock guest initiated connections.
vm.ssh.check_output(
"echo 1024 > /sys/fs/cgroup/system.slice/ssh.service/pids.max"
)

# Build the guest worker sub-command.
# `vsock_helper` will read the blob file from STDIN and send the echo
# server response to STDOUT. This response is then hashed, and the
# hash is compared against `blob_hash` (computed on the host). This
# comparison sets the exit status of the worker command.
worker_cmd = "hash=$("
worker_cmd += "cat {}".format(blob_path)
worker_cmd += " | /tmp/vsock_helper echo 2 {}".format(ECHO_SERVER_PORT)
worker_cmd += " | md5sum | cut -f1 -d\\ "
worker_cmd += ")"
worker_cmd += ' && [[ "$hash" = "{}" ]]'.format(blob_hash)

# Run `TEST_CONNECTION_COUNT` concurrent workers, using the above
# worker sub-command.
# If any worker fails, this command will fail. If all worker sub-commands
# succeed, this will also succeed.
cmd = 'workers="";'
cmd += "for i in $(seq 1 {}); do".format(TEST_CONNECTION_COUNT)
cmd += " ({})& ".format(worker_cmd)
cmd += ' workers="$workers $!";'
cmd += "done;"
cmd += "for w in $workers; do wait $w || (wait; exit 1); done"

vm.ssh.check_output(cmd)
finally:
echo_server.terminate()
rc = echo_server.wait()
# socat exits with 128 + 15 (SIGTERM)
assert rc == 143


def make_host_port_path(uds_path, port):
Expand Down
11 changes: 10 additions & 1 deletion tests/host_tools/cargo_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,27 @@
DEFAULT_TARGET_DIR = f"{DEFAULT_TARGET}/release/"


def nightly_toolchain() -> str:
"""Receives the name of the installed nightly toolchain"""
return utils.check_output("rustup toolchain list | grep nightly").stdout.strip()


def cargo(
subcommand,
cargo_args: str = "",
subcommand_args: str = "",
*,
env: dict = None,
cwd: str = None,
nightly: bool = False,
):
"""Executes the specified cargo subcommand"""
toolchain = f"+{nightly_toolchain()}" if nightly else ""
env = env or {}
env_string = " ".join(f'{key}="{str(value)}"' for key, value in env.items())
cmd = f"{env_string} cargo {subcommand} {cargo_args} -- {subcommand_args}"
cmd = (
f"{env_string} cargo {toolchain} {subcommand} {cargo_args} -- {subcommand_args}"
)
return utils.check_output(cmd, cwd=cwd)


Expand Down
13 changes: 6 additions & 7 deletions tests/host_tools/test_syscalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -28,14 +28,14 @@ void install_bpf_filter(char *bpf_file) {
exit(EXIT_FAILURE);
}
size_t size = sb.st_size;
size_t insn_len = size / sizeof(struct sock_filter);
struct sock_filter *filterbuf = (struct sock_filter*)malloc(size);
if (read(fd, filterbuf, size) == -1) {
perror("read");
exit(EXIT_FAILURE);
}

/* Install seccomp filter */
size_t insn_len = size / sizeof(struct sock_filter);
struct sock_fprog prog = {
.len = (unsigned short)(insn_len),
.filter = filterbuf,
Expand All @@ -60,18 +60,17 @@ int main(int argc, char **argv) {
char *bpf_file = argv[1];
long syscall_id = atoi(argv[2]);
long arg0, arg1, arg2, arg3;
arg0 = arg1 = arg2 = arg3 = 0;
if (argc > 3) arg0 = atoi(argv[3]);
if (argc > 4) arg1 = atoi(argv[4]);
if (argc > 5) arg2 = atoi(argv[5]);
if (argc > 6) arg3 = atoi(argv[6]);
arg0 = arg1 = arg2 = arg3 = 0L;
if (argc > 3) arg0 = atol(argv[3]);
if (argc > 4) arg1 = atol(argv[4]);
if (argc > 5) arg2 = atol(argv[5]);
if (argc > 6) arg3 = atol(argv[6]);

/* read seccomp filter from file */
if (strcmp(bpf_file, "/dev/null") != 0) {
install_bpf_filter(bpf_file);
}

long res = syscall(syscall_id, arg0, arg1, arg2, arg3);
printf("%ld\n", res);
return EXIT_SUCCESS;
}
12 changes: 12 additions & 0 deletions tests/integration_tests/build/test_dependencies.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# Copyright 2024 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Enforces controls over dependencies."""

from host_tools.cargo_build import cargo


def test_unused_dependencies():
"""
Test that there are no unused dependencies.
"""
cargo("udeps", "--all", nightly=True)
10 changes: 7 additions & 3 deletions tests/integration_tests/functional/test_cpu_features_x86_64.py
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,7 @@ def test_cpu_rdmsr(
)
vm.start()
vm.ssh.scp_put(DATA_FILES / "msr_reader.sh", "/tmp/msr_reader.sh")
_, stdout, stderr = vm.ssh.run("/tmp/msr_reader.sh")
_, stdout, stderr = vm.ssh.run("/tmp/msr_reader.sh", timeout=None)
assert stderr == ""

# Load results read from the microvm
Expand Down Expand Up @@ -362,7 +362,9 @@ def dump_msr_state_to_file(dump_fname, ssh_conn, shared_names):
ssh_conn.scp_put(
shared_names["msr_reader_host_fname"], shared_names["msr_reader_guest_fname"]
)
_, stdout, stderr = ssh_conn.run(shared_names["msr_reader_guest_fname"])
_, stdout, stderr = ssh_conn.run(
shared_names["msr_reader_guest_fname"], timeout=None
)
assert stderr == ""

with open(dump_fname, "w", encoding="UTF-8") as file:
Expand Down Expand Up @@ -416,7 +418,9 @@ def test_cpu_wrmsr_snapshot(microvm_factory, guest_kernel, rootfs, msr_cpu_templ
wrmsr_input_guest_fname = "/tmp/wrmsr_input.txt"
vm.ssh.scp_put(wrmsr_input_host_fname, wrmsr_input_guest_fname)

_, _, stderr = vm.ssh.run(f"{msr_writer_guest_fname} {wrmsr_input_guest_fname}")
_, _, stderr = vm.ssh.run(
f"{msr_writer_guest_fname} {wrmsr_input_guest_fname}", timeout=None
)
assert stderr == ""

# Dump MSR state to a file that will be published to S3 for the 2nd part of the test
Expand Down
16 changes: 8 additions & 8 deletions tests/integration_tests/security/test_seccomp_validate.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,18 +13,18 @@
import seccomp

from framework import utils
from host_tools import cargo_build

ARCH = platform.machine()


@pytest.fixture(scope="session")
def bin_test_syscall(test_fc_session_root_path):
@pytest.fixture
def bin_test_syscall(tmp_path):
"""Build the test_syscall binary."""
test_syscall_bin = Path(test_fc_session_root_path) / "test_syscall"
cargo_build.gcc_compile("host_tools/test_syscalls.c", test_syscall_bin)
test_syscall_bin = tmp_path / "test_syscall"
compile_cmd = f"musl-gcc -static host_tools/test_syscalls.c -o {test_syscall_bin}"
utils.check_output(compile_cmd)
assert test_syscall_bin.exists()
yield test_syscall_bin
yield test_syscall_bin.resolve()


class BpfMapReader:
Expand Down Expand Up @@ -77,11 +77,11 @@ def split(self):
for _ in range(map_len):
# read key
key_str_len = self.read_format("<Q")
key_str = self.read_format(f"{key_str_len}s")
key_str = self.read_format(f"{key_str_len}s").decode("ascii")
# read value: vec of instructions
insn_len = self.read_format("<Q")
data = self.lookahead(insn_len * self.INSN_SIZEOF)
threads[key_str.decode("ascii")] = data
threads[key_str] = data
self.offset += len(data)

assert self.is_eof()
Expand Down

0 comments on commit 23abf17

Please sign in to comment.