From 13ac3fcead73415f5efdd4aa1fff71d43e6a4d1d Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Mon, 25 Sep 2023 15:55:40 +0100 Subject: [PATCH 01/14] refactor(memory): moved vm_memory from utils Moved `utils/vm_memory` crate into `vmm/vstate/memory`. Removed `utils/vm_memory`. This module contains `vmm` specific code and it make sense to have it in `vmm`. Signed-off-by: Egor Lazarchuk --- Cargo.lock | 1 + src/utils/src/lib.rs | 1 - src/vmm/Cargo.toml | 1 + src/vmm/src/arch/aarch64/fdt.rs | 12 +++---- src/vmm/src/arch/aarch64/mod.rs | 9 +++--- src/vmm/src/arch/aarch64/regs.rs | 3 +- src/vmm/src/arch/aarch64/vcpu.rs | 4 +-- src/vmm/src/arch/mod.rs | 2 +- src/vmm/src/arch/x86_64/mod.rs | 21 +++++++----- src/vmm/src/arch/x86_64/mptable.rs | 21 ++++++------ src/vmm/src/arch/x86_64/regs.rs | 8 ++--- src/vmm/src/builder.rs | 17 +++++----- src/vmm/src/device_manager/legacy.rs | 5 ++- src/vmm/src/device_manager/mmio.rs | 14 ++++---- src/vmm/src/device_manager/persist.rs | 2 +- src/vmm/src/devices/virtio/balloon/device.rs | 7 ++-- .../devices/virtio/balloon/event_handler.rs | 2 +- src/vmm/src/devices/virtio/balloon/mod.rs | 3 +- src/vmm/src/devices/virtio/balloon/persist.rs | 2 +- src/vmm/src/devices/virtio/balloon/util.rs | 5 ++- src/vmm/src/devices/virtio/block/device.rs | 4 +-- .../src/devices/virtio/block/event_handler.rs | 2 +- .../src/devices/virtio/block/io/async_io.rs | 4 +-- src/vmm/src/devices/virtio/block/io/mod.rs | 12 ++++--- .../src/devices/virtio/block/io/sync_io.rs | 2 +- src/vmm/src/devices/virtio/block/mod.rs | 3 +- src/vmm/src/devices/virtio/block/persist.rs | 2 +- src/vmm/src/devices/virtio/block/request.rs | 8 ++--- .../src/devices/virtio/block/test_utils.rs | 2 +- src/vmm/src/devices/virtio/device.rs | 2 +- src/vmm/src/devices/virtio/iovec.rs | 8 ++--- src/vmm/src/devices/virtio/mmio.rs | 16 +++++----- src/vmm/src/devices/virtio/net/device.rs | 32 ++++++++++--------- src/vmm/src/devices/virtio/net/persist.rs | 2 +- src/vmm/src/devices/virtio/net/test_utils.rs | 6 ++-- src/vmm/src/devices/virtio/persist.rs | 4 +-- src/vmm/src/devices/virtio/queue.rs | 19 +++++------ src/vmm/src/devices/virtio/rng/device.rs | 2 +- src/vmm/src/devices/virtio/rng/persist.rs | 2 +- src/vmm/src/devices/virtio/test_utils.rs | 19 ++++++----- .../devices/virtio/vsock/csm/connection.rs | 8 ++--- src/vmm/src/devices/virtio/vsock/csm/txbuf.rs | 4 ++- src/vmm/src/devices/virtio/vsock/device.rs | 2 +- .../src/devices/virtio/vsock/event_handler.rs | 8 ++--- src/vmm/src/devices/virtio/vsock/mod.rs | 2 +- src/vmm/src/devices/virtio/vsock/packet.rs | 12 +++---- src/vmm/src/devices/virtio/vsock/persist.rs | 2 +- .../src/devices/virtio/vsock/test_utils.rs | 2 +- .../src/devices/virtio/vsock/unix/muxer.rs | 2 +- src/vmm/src/io_uring/mod.rs | 2 +- src/vmm/src/io_uring/operation/cqe.rs | 3 +- src/vmm/src/io_uring/operation/sqe.rs | 3 +- src/vmm/src/io_uring/queue/completion.rs | 3 +- src/vmm/src/io_uring/queue/mmap.rs | 2 +- src/vmm/src/io_uring/queue/submission.rs | 2 +- src/vmm/src/lib.rs | 2 +- src/vmm/src/memory_snapshot.rs | 25 ++++++++------- src/vmm/src/persist.rs | 5 ++- .../vm_memory.rs => vmm/src/vstate/memory.rs} | 17 ++++++---- src/vmm/src/vstate/mod.rs | 2 ++ src/vmm/src/vstate/vcpu/aarch64.rs | 4 +-- src/vmm/src/vstate/vcpu/mod.rs | 2 +- src/vmm/src/vstate/vcpu/x86_64.rs | 2 +- src/vmm/src/vstate/vm.rs | 14 ++++---- src/vmm/tests/integration_tests.rs | 2 +- src/vmm/tests/io_uring.rs | 4 +-- 66 files changed, 218 insertions(+), 209 deletions(-) rename src/{utils/src/vm_memory.rs => vmm/src/vstate/memory.rs} (98%) diff --git a/Cargo.lock b/Cargo.lock index 859151deee9..519d6bc0336 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1427,6 +1427,7 @@ dependencies = [ "versionize_derive", "vm-allocator", "vm-fdt", + "vm-memory", "vm-superio", ] diff --git a/src/utils/src/lib.rs b/src/utils/src/lib.rs index cf8cd4fdb54..2747cd32fef 100644 --- a/src/utils/src/lib.rs +++ b/src/utils/src/lib.rs @@ -19,7 +19,6 @@ pub mod signal; pub mod sm; pub mod time; pub mod validators; -pub mod vm_memory; use std::num::Wrapping; use std::result::Result; diff --git a/src/vmm/Cargo.toml b/src/vmm/Cargo.toml index d9b33db0f8a..ea9e0dc60aa 100644 --- a/src/vmm/Cargo.toml +++ b/src/vmm/Cargo.toml @@ -29,6 +29,7 @@ versionize = "0.1.10" versionize_derive = "0.1.6" vm-allocator = "0.1.0" vm-superio = "0.7.0" +vm-memory = { version = "0.12.0", features = ["backend-mmap", "backend-bitmap"] } log = { version = "0.4.17", features = ["std", "serde"] } aes-gcm = { version = "0.10.1", default-features = false, features = ["aes"] } base64 = "0.13.0" diff --git a/src/vmm/src/arch/aarch64/fdt.rs b/src/vmm/src/arch/aarch64/fdt.rs index 5ef771a9756..4bcf0a0cdfe 100644 --- a/src/vmm/src/arch/aarch64/fdt.rs +++ b/src/vmm/src/arch/aarch64/fdt.rs @@ -9,15 +9,15 @@ use std::collections::HashMap; use std::ffi::CString; use std::fmt::Debug; -use utils::vm_memory::{ - Address, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, -}; use vm_fdt::{Error as VmFdtError, FdtWriter, FdtWriterNode}; use super::super::{DeviceType, InitrdConfig}; use super::cache_info::{read_cache_config, CacheEntry}; use super::get_fdt_addr; use super::gic::GICDevice; +use crate::vstate::memory::{ + Address, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, +}; // This is a value for uniquely identifying the FDT node declaring the interrupt controller. const GIC_PHANDLE: u32 = 1; @@ -459,7 +459,7 @@ mod tests { #[test] fn test_create_fdt_with_devices() { let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = utils::vm_memory::test_utils::create_anon_guest_memory(®ions, false) + let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) .expect("Cannot initialize memory"); let dev_info: HashMap<(DeviceType, std::string::String), MMIODeviceInfo> = [ @@ -499,7 +499,7 @@ mod tests { #[test] fn test_create_fdt() { let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = utils::vm_memory::test_utils::create_anon_guest_memory(®ions, false) + let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) .expect("Cannot initialize memory"); let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); @@ -557,7 +557,7 @@ mod tests { #[test] fn test_create_fdt_with_initrd() { let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = utils::vm_memory::test_utils::create_anon_guest_memory(®ions, false) + let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) .expect("Cannot initialize memory"); let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); diff --git a/src/vmm/src/arch/aarch64/mod.rs b/src/vmm/src/arch/aarch64/mod.rs index 6713d98464d..7c28d554f8f 100644 --- a/src/vmm/src/arch/aarch64/mod.rs +++ b/src/vmm/src/arch/aarch64/mod.rs @@ -17,11 +17,10 @@ use std::collections::HashMap; use std::ffi::CString; use std::fmt::Debug; -use utils::vm_memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap}; - pub use self::fdt::DeviceInfoForFDT; use self::gic::GICDevice; use crate::arch::DeviceType; +use crate::vstate::memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap}; /// Errors thrown while configuring aarch64 system. #[derive(Debug, derive_more::From)] @@ -135,17 +134,17 @@ mod tests { #[test] fn test_get_fdt_addr() { let regions = arch_memory_regions(layout::FDT_MAX_SIZE - 0x1000); - let mem = utils::vm_memory::test_utils::create_anon_guest_memory(®ions, false) + let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) .expect("Cannot initialize memory"); assert_eq!(get_fdt_addr(&mem), layout::DRAM_MEM_START); let regions = arch_memory_regions(layout::FDT_MAX_SIZE); - let mem = utils::vm_memory::test_utils::create_anon_guest_memory(®ions, false) + let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) .expect("Cannot initialize memory"); assert_eq!(get_fdt_addr(&mem), layout::DRAM_MEM_START); let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = utils::vm_memory::test_utils::create_anon_guest_memory(®ions, false) + let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) .expect("Cannot initialize memory"); assert_eq!(get_fdt_addr(&mem), 0x1000 + layout::DRAM_MEM_START); } diff --git a/src/vmm/src/arch/aarch64/regs.rs b/src/vmm/src/arch/aarch64/regs.rs index 0f455097651..3ffe939d2a7 100644 --- a/src/vmm/src/arch/aarch64/regs.rs +++ b/src/vmm/src/arch/aarch64/regs.rs @@ -72,7 +72,8 @@ macro_rules! arm64_core_reg_id { }; } pub(crate) use arm64_core_reg_id; -use utils::vm_memory::ByteValued; + +use crate::vstate::memory::ByteValued; /// This macro computes the ID of a specific ARM64 system register similar to how /// the kernel C macro does. diff --git a/src/vmm/src/arch/aarch64/vcpu.rs b/src/vmm/src/arch/aarch64/vcpu.rs index 9182e6b5ed4..48e7c92f556 100644 --- a/src/vmm/src/arch/aarch64/vcpu.rs +++ b/src/vmm/src/arch/aarch64/vcpu.rs @@ -9,10 +9,10 @@ use std::path::PathBuf; use kvm_bindings::*; use kvm_ioctls::VcpuFd; -use utils::vm_memory::GuestMemoryMmap; use super::get_fdt_addr; use super::regs::*; +use crate::vstate::memory::GuestMemoryMmap; /// Errors thrown while setting aarch64 registers. #[derive(Debug, PartialEq, Eq, thiserror::Error, displaydoc::Display)] @@ -208,7 +208,7 @@ mod tests { let vm = kvm.create_vm().unwrap(); let vcpu = vm.create_vcpu(0).unwrap(); let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = utils::vm_memory::test_utils::create_anon_guest_memory(®ions, false) + let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) .expect("Cannot initialize memory"); let res = setup_boot_regs(&vcpu, 0, 0x0, &mem); diff --git a/src/vmm/src/arch/mod.rs b/src/vmm/src/arch/mod.rs index 83b373af445..15c88ead491 100644 --- a/src/vmm/src/arch/mod.rs +++ b/src/vmm/src/arch/mod.rs @@ -47,7 +47,7 @@ pub enum DeviceType { #[derive(Debug)] pub struct InitrdConfig { /// Load address of initrd in guest memory - pub address: utils::vm_memory::GuestAddress, + pub address: crate::vstate::memory::GuestAddress, /// Size of initrd in guest memory pub size: usize, } diff --git a/src/vmm/src/arch/x86_64/mod.rs b/src/vmm/src/arch/x86_64/mod.rs index 0ee2c61c4ce..47af1ecd8d3 100644 --- a/src/vmm/src/arch/x86_64/mod.rs +++ b/src/vmm/src/arch/x86_64/mod.rs @@ -22,9 +22,11 @@ use linux_loader::configurator::linux::LinuxBootConfigurator; use linux_loader::configurator::{BootConfigurator, BootParams}; use linux_loader::loader::bootparam::boot_params; use utils::u64_to_usize; -use utils::vm_memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap, GuestMemoryRegion}; use crate::arch::InitrdConfig; +use crate::vstate::memory::{ + Address, GuestAddress, GuestMemory, GuestMemoryMmap, GuestMemoryRegion, +}; // Value taken from https://elixir.bootlin.com/linux/v5.10.68/source/arch/x86/include/uapi/asm/e820.h#L31 // Usable normal RAM @@ -228,7 +230,7 @@ mod tests { #[test] fn test_system_configuration() { let no_vcpus = 4; - let gm = utils::vm_memory::test_utils::create_anon_guest_memory( + let gm = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), 0x10000)], false, ) @@ -243,22 +245,25 @@ mod tests { // Now assigning some memory that falls before the 32bit memory hole. let mem_size = 128 << 20; let arch_mem_regions = arch_memory_regions(mem_size); - let gm = utils::vm_memory::test_utils::create_anon_guest_memory(&arch_mem_regions, false) - .unwrap(); + let gm = + crate::vstate::memory::test_utils::create_anon_guest_memory(&arch_mem_regions, false) + .unwrap(); configure_system(&gm, GuestAddress(0), 0, &None, no_vcpus).unwrap(); // Now assigning some memory that is equal to the start of the 32bit memory hole. let mem_size = 3328 << 20; let arch_mem_regions = arch_memory_regions(mem_size); - let gm = utils::vm_memory::test_utils::create_anon_guest_memory(&arch_mem_regions, false) - .unwrap(); + let gm = + crate::vstate::memory::test_utils::create_anon_guest_memory(&arch_mem_regions, false) + .unwrap(); configure_system(&gm, GuestAddress(0), 0, &None, no_vcpus).unwrap(); // Now assigning some memory that falls after the 32bit memory hole. let mem_size = 3330 << 20; let arch_mem_regions = arch_memory_regions(mem_size); - let gm = utils::vm_memory::test_utils::create_anon_guest_memory(&arch_mem_regions, false) - .unwrap(); + let gm = + crate::vstate::memory::test_utils::create_anon_guest_memory(&arch_mem_regions, false) + .unwrap(); configure_system(&gm, GuestAddress(0), 0, &None, no_vcpus).unwrap(); } diff --git a/src/vmm/src/arch/x86_64/mptable.rs b/src/vmm/src/arch/x86_64/mptable.rs index 07699a464cf..cd8ab5b97fa 100644 --- a/src/vmm/src/arch/x86_64/mptable.rs +++ b/src/vmm/src/arch/x86_64/mptable.rs @@ -10,10 +10,12 @@ use std::fmt::Debug; use std::{io, mem}; use libc::c_char; -use utils::vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap}; use crate::arch::IRQ_MAX; use crate::arch_gen::x86::mpspec; +use crate::vstate::memory::{ + Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap, +}; // These `mpspec` wrapper types are only data, reading them from data is a safe initialization. // SAFETY: POD @@ -286,9 +288,8 @@ pub fn setup_mptable(mem: &GuestMemoryMmap, num_cpus: u8) -> Result<(), MptableE #[cfg(test)] mod tests { - use utils::vm_memory::Bytes; - use super::*; + use crate::vstate::memory::Bytes; fn table_entry_size(type_: u8) -> usize { match u32::from(type_) { @@ -304,7 +305,7 @@ mod tests { #[test] fn bounds_check() { let num_cpus = 4; - let mem = utils::vm_memory::test_utils::create_guest_memory_unguarded( + let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[(GuestAddress(MPTABLE_START), compute_mp_size(num_cpus))], false, ) @@ -316,7 +317,7 @@ mod tests { #[test] fn bounds_check_fails() { let num_cpus = 4; - let mem = utils::vm_memory::test_utils::create_guest_memory_unguarded( + let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[(GuestAddress(MPTABLE_START), compute_mp_size(num_cpus) - 1)], false, ) @@ -328,7 +329,7 @@ mod tests { #[test] fn mpf_intel_checksum() { let num_cpus = 1; - let mem = utils::vm_memory::test_utils::create_guest_memory_unguarded( + let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[(GuestAddress(MPTABLE_START), compute_mp_size(num_cpus))], false, ) @@ -344,7 +345,7 @@ mod tests { #[test] fn mpc_table_checksum() { let num_cpus = 4; - let mem = utils::vm_memory::test_utils::create_guest_memory_unguarded( + let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[(GuestAddress(MPTABLE_START), compute_mp_size(num_cpus))], false, ) @@ -378,7 +379,7 @@ mod tests { #[test] fn cpu_entry_count() { - let mem = utils::vm_memory::test_utils::create_guest_memory_unguarded( + let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[( GuestAddress(MPTABLE_START), compute_mp_size(MAX_SUPPORTED_CPUS), @@ -416,8 +417,8 @@ mod tests { #[test] fn cpu_entry_count_max() { let cpus = MAX_SUPPORTED_CPUS + 1; - let mem = utils::vm_memory::test_utils::create_guest_memory_unguarded( - &[(GuestAddress(MPTABLE_START), compute_mp_size(cpus))], + let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( + &[(GuestAddress(MPTABLE_START), compute_mp_size(cpus as u8))], false, ) .unwrap(); diff --git a/src/vmm/src/arch/x86_64/regs.rs b/src/vmm/src/arch/x86_64/regs.rs index 6679026aedb..226ba204c67 100644 --- a/src/vmm/src/arch/x86_64/regs.rs +++ b/src/vmm/src/arch/x86_64/regs.rs @@ -9,9 +9,9 @@ use std::mem; use kvm_bindings::{kvm_fpu, kvm_regs, kvm_sregs}; use kvm_ioctls::VcpuFd; -use utils::vm_memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap}; use super::gdt::{gdt_entry, kvm_segment_from_gdt}; +use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap}; // Initial pagetables. const PML4_START: u64 = 0x9000; @@ -240,21 +240,21 @@ fn setup_page_tables(mem: &GuestMemoryMmap, sregs: &mut kvm_sregs) -> Result<(), mod tests { use kvm_ioctls::Kvm; use utils::u64_to_usize; - use utils::vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; use super::*; + use crate::vstate::memory::{Bytes, GuestAddress, GuestMemoryMmap}; fn create_guest_mem(mem_size: Option) -> GuestMemoryMmap { let page_size = 0x10000usize; let mem_size = u64_to_usize(mem_size.unwrap_or(page_size as u64)); if mem_size % page_size == 0 { - utils::vm_memory::test_utils::create_anon_guest_memory( + crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), mem_size)], false, ) .unwrap() } else { - utils::vm_memory::test_utils::create_guest_memory_unguarded( + crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[(GuestAddress(0), mem_size)], false, ) diff --git a/src/vmm/src/builder.rs b/src/vmm/src/builder.rs index 5def742bf57..8e25c7f61bf 100644 --- a/src/vmm/src/builder.rs +++ b/src/vmm/src/builder.rs @@ -23,7 +23,6 @@ use userfaultfd::Uffd; use utils::eventfd::EventFd; use utils::time::TimestampUs; use utils::u64_to_usize; -use utils::vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, ReadVolatile}; #[cfg(target_arch = "aarch64")] use vm_superio::Rtc; use vm_superio::Serial; @@ -55,6 +54,7 @@ use crate::resources::VmResources; use crate::vmm_config::boot_source::BootConfig; use crate::vmm_config::instance_info::InstanceInfo; use crate::vmm_config::machine_config::{MachineConfigUpdate, VmConfig, VmConfigError}; +use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryMmap, ReadVolatile}; use crate::vstate::vcpu::{Vcpu, VcpuConfig}; use crate::vstate::vm::Vm; use crate::{device_manager, EventManager, RestoreVcpusError, Vmm, VmmError}; @@ -84,7 +84,7 @@ pub enum StartMicrovmError { CreateLegacyDevice(device_manager::legacy::LegacyDeviceError), /// Memory regions are overlapping or mmap fails. #[error("Invalid Memory Configuration: {}", format!("{:?}", .0).replace('\"', ""))] - GuestMemoryMmap(utils::vm_memory::Error), + GuestMemoryMmap(crate::vstate::memory::Error), /// Cannot load initrd due to an invalid memory configuration. #[error("Cannot load initrd due to an invalid memory configuration.")] InitrdLoad, @@ -547,7 +547,7 @@ pub fn create_guest_memory( let mem_size = mem_size_mib << 20; let arch_mem_regions = crate::arch::arch_memory_regions(mem_size); - utils::vm_memory::create_guest_memory( + crate::vstate::memory::create_guest_memory( &arch_mem_regions .iter() .map(|(addr, size)| (None, *addr, *size)) @@ -801,7 +801,7 @@ pub fn configure_system_for_boot( .as_cstring() .map(|cmdline_cstring| cmdline_cstring.as_bytes_with_nul().len())?; - linux_loader::loader::load_cmdline::( + linux_loader::loader::load_cmdline::( vmm.guest_memory(), GuestAddress(crate::arch::x86_64::layout::CMDLINE_START), &boot_cmdline, @@ -809,7 +809,7 @@ pub fn configure_system_for_boot( .map_err(LoadCommandline)?; crate::arch::x86_64::configure_system( &vmm.guest_memory, - utils::vm_memory::GuestAddress(crate::arch::x86_64::layout::CMDLINE_START), + crate::vstate::memory::GuestAddress(crate::arch::x86_64::layout::CMDLINE_START), cmdline_size, initrd, vcpu_config.vcpu_count, @@ -970,7 +970,6 @@ pub mod tests { use linux_loader::cmdline::Cmdline; use utils::tempfile::TempFile; - use utils::vm_memory::GuestMemory; use super::*; use crate::arch::DeviceType; @@ -986,6 +985,7 @@ pub mod tests { use crate::vmm_config::net::{NetBuilder, NetworkInterfaceConfig}; use crate::vmm_config::vsock::tests::default_config; use crate::vmm_config::vsock::{VsockBuilder, VsockDeviceConfig}; + use crate::vstate::memory::GuestMemory; #[derive(Debug)] pub(crate) struct CustomBlockConfig { @@ -1227,7 +1227,8 @@ pub mod tests { } fn create_guest_mem_at(at: GuestAddress, size: usize) -> GuestMemoryMmap { - utils::vm_memory::test_utils::create_guest_memory_unguarded(&[(at, size)], false).unwrap() + crate::vstate::memory::test_utils::create_guest_memory_unguarded(&[(at, size)], false) + .unwrap() } pub(crate) fn create_guest_mem_with_size(size: usize) -> GuestMemoryMmap { @@ -1241,7 +1242,7 @@ pub mod tests { #[test] // Test that loading the initrd is successful on different archs. fn test_load_initrd() { - use utils::vm_memory::GuestMemory; + use crate::vstate::memory::GuestMemory; let image = make_test_bin(); let mem_size: usize = image.len() * 2 + crate::arch::PAGE_SIZE; diff --git a/src/vmm/src/device_manager/legacy.rs b/src/vmm/src/device_manager/legacy.rs index 0430d92e4f8..75dce565c76 100644 --- a/src/vmm/src/device_manager/legacy.rs +++ b/src/vmm/src/device_manager/legacy.rs @@ -171,14 +171,13 @@ impl PortIODeviceManager { #[cfg(test)] mod tests { - use utils::vm_memory::GuestAddress; - use super::*; + use crate::vstate::memory::GuestAddress; use crate::Vm; #[test] fn test_register_legacy_devices() { - let guest_mem = utils::vm_memory::test_utils::create_anon_guest_memory( + let guest_mem = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0x0), 0x1000)], false, ) diff --git a/src/vmm/src/device_manager/mmio.rs b/src/vmm/src/device_manager/mmio.rs index bdb87c6a495..3f9258d8346 100644 --- a/src/vmm/src/device_manager/mmio.rs +++ b/src/vmm/src/device_manager/mmio.rs @@ -11,8 +11,7 @@ use std::sync::{Arc, Mutex}; use kvm_ioctls::{IoEventAddress, VmFd}; use linux_loader::cmdline as kernel_cmdline; -#[cfg(target_arch = "x86_64")] -use utils::vm_memory::GuestAddress; +use log::info; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; use vm_allocator::{AddressAllocator, AllocPolicy, IdAllocator}; @@ -29,7 +28,8 @@ use crate::devices::virtio::{ TYPE_RNG, TYPE_VSOCK, }; use crate::devices::BusDevice; -use crate::logger::info; +#[cfg(target_arch = "x86_64")] +use crate::vstate::memory::GuestAddress; /// Errors for MMIO device manager. #[derive(Debug, thiserror::Error, displaydoc::Display)] @@ -466,10 +466,10 @@ mod tests { use std::sync::Arc; use utils::eventfd::EventFd; - use utils::vm_memory::{GuestAddress, GuestMemoryMmap}; use super::*; use crate::devices::virtio::{ActivateError, Queue, VirtioDevice}; + use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; use crate::{builder, Vm}; const QUEUE_SIZES: &[u16] = &[64]; @@ -573,7 +573,7 @@ mod tests { fn test_register_virtio_device() { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); - let guest_mem = utils::vm_memory::test_utils::create_anon_guest_memory( + let guest_mem = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(start_addr1, 0x1000), (start_addr2, 0x1000)], false, ) @@ -603,7 +603,7 @@ mod tests { fn test_register_too_many_devices() { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); - let guest_mem = utils::vm_memory::test_utils::create_anon_guest_memory( + let guest_mem = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(start_addr1, 0x1000), (start_addr2, 0x1000)], false, ) @@ -663,7 +663,7 @@ mod tests { fn test_device_info() { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); - let guest_mem = utils::vm_memory::test_utils::create_anon_guest_memory( + let guest_mem = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(start_addr1, 0x1000), (start_addr2, 0x1000)], false, ) diff --git a/src/vmm/src/device_manager/persist.rs b/src/vmm/src/device_manager/persist.rs index dcb6083f830..2d9d3148084 100644 --- a/src/vmm/src/device_manager/persist.rs +++ b/src/vmm/src/device_manager/persist.rs @@ -10,7 +10,6 @@ use event_manager::{MutEventSubscriber, SubscriberOps}; use kvm_ioctls::VmFd; use log::{error, warn}; use snapshot::Persist; -use utils::vm_memory::GuestMemoryMmap; use versionize::{VersionMap, Versionize, VersionizeError, VersionizeResult}; use versionize_derive::Versionize; use vm_allocator::AllocPolicy; @@ -43,6 +42,7 @@ use crate::logger; use crate::mmds::data_store::MmdsVersion; use crate::resources::VmResources; use crate::vmm_config::mmds::MmdsConfigError; +use crate::vstate::memory::GuestMemoryMmap; use crate::EventManager; /// Errors for (de)serialization of the MMIO device manager. diff --git a/src/vmm/src/devices/virtio/balloon/device.rs b/src/vmm/src/devices/virtio/balloon/device.rs index e5f263967e7..5e753af9f18 100644 --- a/src/vmm/src/devices/virtio/balloon/device.rs +++ b/src/vmm/src/devices/virtio/balloon/device.rs @@ -12,7 +12,6 @@ use serde::Serialize; use timerfd::{ClockId, SetTimeFlags, TimerFd, TimerState}; use utils::eventfd::EventFd; use utils::u64_to_usize; -use utils::vm_memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryMmap}; use super::super::{ActivateError, DeviceState, Queue, VirtioDevice, TYPE_BALLOON}; use super::util::{compact_page_frame_numbers, remove_range}; @@ -29,6 +28,7 @@ use crate::devices::virtio::balloon::BalloonError; use crate::devices::virtio::gen::virtio_blk::VIRTIO_F_VERSION_1; use crate::devices::virtio::{IrqTrigger, IrqType}; use crate::logger::{IncMetric, METRICS}; +use crate::vstate::memory::{Address, ByteValued, Bytes, GuestAddress, GuestMemoryMmap}; const SIZE_OF_U32: usize = std::mem::size_of::(); const SIZE_OF_STAT: usize = std::mem::size_of::(); @@ -647,8 +647,6 @@ impl VirtioDevice for Balloon { pub(crate) mod tests { use std::u32; - use utils::vm_memory::GuestAddress; - use super::super::BALLOON_CONFIG_SPACE_SIZE; use super::*; use crate::check_metric_after_block; @@ -658,6 +656,7 @@ pub(crate) mod tests { }; use crate::devices::virtio::test_utils::{default_mem, VirtQueue}; use crate::devices::virtio::{VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE}; + use crate::vstate::memory::GuestAddress; impl Balloon { pub(crate) fn set_queue(&mut self, idx: usize, q: Queue) { @@ -1135,7 +1134,7 @@ pub(crate) mod tests { assert!(balloon.update_size(1).is_err()); // Switch the state to active. balloon.device_state = DeviceState::Activated( - utils::vm_memory::test_utils::create_guest_memory_unguarded( + crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[(GuestAddress(0x0), 0x1)], false, ) diff --git a/src/vmm/src/devices/virtio/balloon/event_handler.rs b/src/vmm/src/devices/virtio/balloon/event_handler.rs index 369e97f9430..863f76f0418 100644 --- a/src/vmm/src/devices/virtio/balloon/event_handler.rs +++ b/src/vmm/src/devices/virtio/balloon/event_handler.rs @@ -113,11 +113,11 @@ pub mod tests { use std::sync::{Arc, Mutex}; use event_manager::{EventManager, SubscriberOps}; - use utils::vm_memory::GuestAddress; use super::*; use crate::devices::virtio::balloon::test_utils::set_request; use crate::devices::virtio::test_utils::{default_mem, VirtQueue}; + use crate::vstate::memory::GuestAddress; #[test] fn test_event_handler() { diff --git a/src/vmm/src/devices/virtio/balloon/mod.rs b/src/vmm/src/devices/virtio/balloon/mod.rs index 82202f276da..2e622cdd3d1 100644 --- a/src/vmm/src/devices/virtio/balloon/mod.rs +++ b/src/vmm/src/devices/virtio/balloon/mod.rs @@ -9,10 +9,9 @@ pub mod persist; pub mod test_utils; mod util; -use utils::vm_memory::GuestMemoryError; - pub use self::device::{Balloon, BalloonConfig, BalloonStats}; use crate::devices::virtio::FIRECRACKER_MAX_QUEUE_SIZE; +use crate::vstate::memory::GuestMemoryError; /// Device ID used in MMIO device identification. /// Because Balloon is unique per-vm, this ID can be hardcoded. diff --git a/src/vmm/src/devices/virtio/balloon/persist.rs b/src/vmm/src/devices/virtio/balloon/persist.rs index 40da9b038ec..19c134a0269 100644 --- a/src/vmm/src/devices/virtio/balloon/persist.rs +++ b/src/vmm/src/devices/virtio/balloon/persist.rs @@ -9,7 +9,6 @@ use std::time::Duration; use snapshot::Persist; use timerfd::{SetTimeFlags, TimerState}; -use utils::vm_memory::GuestMemoryMmap; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; @@ -17,6 +16,7 @@ use super::*; use crate::devices::virtio::balloon::device::{BalloonStats, ConfigSpace}; use crate::devices::virtio::persist::VirtioDeviceState; use crate::devices::virtio::{DeviceState, FIRECRACKER_MAX_QUEUE_SIZE, TYPE_BALLOON}; +use crate::vstate::memory::GuestMemoryMmap; /// Information about the balloon config's that are saved /// at snapshot. diff --git a/src/vmm/src/devices/virtio/balloon/util.rs b/src/vmm/src/devices/virtio/balloon/util.rs index d42f94ad2cc..40250d10e39 100644 --- a/src/vmm/src/devices/virtio/balloon/util.rs +++ b/src/vmm/src/devices/virtio/balloon/util.rs @@ -4,10 +4,10 @@ use std::io; use utils::u64_to_usize; -use utils::vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestMemoryRegion}; use super::{RemoveRegionError, MAX_PAGE_COMPACT_BUFFER}; use crate::logger::error; +use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestMemoryRegion}; /// This takes a vector of page frame numbers, and compacts them /// into ranges of consecutive pages. The result is a vector @@ -121,9 +121,8 @@ pub(crate) fn remove_range( mod tests { use std::fmt::Debug; - use utils::vm_memory::Bytes; - use super::*; + use crate::vstate::memory::Bytes; /// This asserts that $lhs matches $rhs. macro_rules! assert_match { diff --git a/src/vmm/src/devices/virtio/block/device.rs b/src/vmm/src/devices/virtio/block/device.rs index c6bd0fbab9c..3203286327b 100644 --- a/src/vmm/src/devices/virtio/block/device.rs +++ b/src/vmm/src/devices/virtio/block/device.rs @@ -19,7 +19,6 @@ use serde::{Deserialize, Serialize}; use utils::eventfd::EventFd; use utils::kernel_version::{min_kernel_version_for_io_uring, KernelVersion}; use utils::u64_to_usize; -use utils::vm_memory::GuestMemoryMmap; use super::super::{ActivateError, DeviceState, Queue, VirtioDevice, TYPE_BLOCK}; use super::io::async_io; @@ -35,6 +34,7 @@ use crate::devices::virtio::gen::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use crate::devices::virtio::{IrqTrigger, IrqType}; use crate::logger::{error, warn, IncMetric, METRICS}; use crate::rate_limiter::{BucketUpdate, RateLimiter}; +use crate::vstate::memory::GuestMemoryMmap; /// Configuration options for disk caching. #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Deserialize, Serialize)] @@ -652,7 +652,6 @@ mod tests { use utils::skip_if_io_uring_unsupported; use utils::tempfile::TempFile; - use utils::vm_memory::{Address, Bytes, GuestAddress}; use super::*; use crate::check_metric_after_block; @@ -664,6 +663,7 @@ mod tests { use crate::devices::virtio::test_utils::{default_mem, VirtQueue}; use crate::devices::virtio::{IO_URING_NUM_ENTRIES, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE}; use crate::rate_limiter::TokenType; + use crate::vstate::memory::{Address, Bytes, GuestAddress}; #[test] fn test_disk_backing_file_helper() { diff --git a/src/vmm/src/devices/virtio/block/event_handler.rs b/src/vmm/src/devices/virtio/block/event_handler.rs index f4c33765550..53754f8a094 100644 --- a/src/vmm/src/devices/virtio/block/event_handler.rs +++ b/src/vmm/src/devices/virtio/block/event_handler.rs @@ -103,7 +103,6 @@ mod tests { use std::sync::{Arc, Mutex}; use event_manager::{EventManager, SubscriberOps}; - use utils::vm_memory::{Bytes, GuestAddress}; use super::*; use crate::devices::virtio::block::device::FileEngineType; @@ -113,6 +112,7 @@ mod tests { use crate::devices::virtio::gen::virtio_blk::{VIRTIO_BLK_S_OK, VIRTIO_BLK_T_OUT}; use crate::devices::virtio::test_utils::{default_mem, VirtQueue}; use crate::devices::virtio::VIRTQ_DESC_F_NEXT; + use crate::vstate::memory::{Bytes, GuestAddress}; #[test] fn test_event_handler() { diff --git a/src/vmm/src/devices/virtio/block/io/async_io.rs b/src/vmm/src/devices/virtio/block/io/async_io.rs index 5e4caaf07fd..c0d7771d0d4 100644 --- a/src/vmm/src/devices/virtio/block/io/async_io.rs +++ b/src/vmm/src/devices/virtio/block/io/async_io.rs @@ -7,7 +7,6 @@ use std::marker::PhantomData; use std::os::unix::io::AsRawFd; use utils::eventfd::EventFd; -use utils::vm_memory::{mark_dirty_mem, GuestAddress, GuestMemory, GuestMemoryMmap}; use crate::devices::virtio::block::io::UserDataError; use crate::devices::virtio::block::IO_URING_NUM_ENTRIES; @@ -15,6 +14,7 @@ use crate::io_uring::operation::{Cqe, OpCode, Operation}; use crate::io_uring::restriction::Restriction; use crate::io_uring::{IoUring, IoUringError}; use crate::logger::log_dev_preview_warning; +use crate::vstate::memory::{mark_dirty_mem, GuestAddress, GuestMemory, GuestMemoryMmap}; #[derive(Debug)] pub enum AsyncIoError { @@ -23,7 +23,7 @@ pub enum AsyncIoError { Submit(std::io::Error), SyncAll(std::io::Error), EventFd(std::io::Error), - GuestMemory(utils::vm_memory::GuestMemoryError), + GuestMemory(crate::vstate::memory::GuestMemoryError), } #[derive(Debug)] diff --git a/src/vmm/src/devices/virtio/block/io/mod.rs b/src/vmm/src/devices/virtio/block/io/mod.rs index 6e7e63ba578..a8c48e29795 100644 --- a/src/vmm/src/devices/virtio/block/io/mod.rs +++ b/src/vmm/src/devices/virtio/block/io/mod.rs @@ -7,11 +7,10 @@ pub mod sync_io; use std::fmt::Debug; use std::fs::File; -use utils::vm_memory::{GuestAddress, GuestMemoryMmap}; - pub use self::async_io::{AsyncFileEngine, AsyncIoError}; pub use self::sync_io::{SyncFileEngine, SyncIoError}; use crate::devices::virtio::block::device::FileEngineType; +use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; #[derive(Debug, PartialEq, Eq)] pub struct UserDataOk { @@ -189,12 +188,12 @@ pub mod tests { use utils::kernel_version::{min_kernel_version_for_io_uring, KernelVersion}; use utils::tempfile::TempFile; - use utils::vm_memory::{Bitmap, Bytes, GuestMemory}; use utils::{skip_if_io_uring_supported, skip_if_io_uring_unsupported, u64_to_usize}; use super::*; use crate::devices::virtio::block::device::FileEngineType; use crate::devices::virtio::block::request::PendingRequest; + use crate::vstate::memory::{Bitmap, Bytes, GuestMemory}; const FILE_LEN: u32 = 1024; // 2 pages of memory should be enough to test read/write ops and also dirty tracking. @@ -244,8 +243,11 @@ pub mod tests { } fn create_mem() -> GuestMemoryMmap { - utils::vm_memory::test_utils::create_anon_guest_memory(&[(GuestAddress(0), MEM_LEN)], true) - .unwrap() + crate::vstate::memory::test_utils::create_anon_guest_memory( + &[(GuestAddress(0), MEM_LEN)], + true, + ) + .unwrap() } fn check_dirty_mem(mem: &GuestMemoryMmap, addr: GuestAddress, len: u32) { diff --git a/src/vmm/src/devices/virtio/block/io/sync_io.rs b/src/vmm/src/devices/virtio/block/io/sync_io.rs index d8aa29b3f9b..393537de59a 100644 --- a/src/vmm/src/devices/virtio/block/io/sync_io.rs +++ b/src/vmm/src/devices/virtio/block/io/sync_io.rs @@ -4,7 +4,7 @@ use std::fs::File; use std::io::{Seek, SeekFrom, Write}; -use utils::vm_memory::{ +use crate::vstate::memory::{ GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, ReadVolatile, WriteVolatile, }; diff --git a/src/vmm/src/devices/virtio/block/mod.rs b/src/vmm/src/devices/virtio/block/mod.rs index 93e5e6e429d..493b672057f 100644 --- a/src/vmm/src/devices/virtio/block/mod.rs +++ b/src/vmm/src/devices/virtio/block/mod.rs @@ -10,12 +10,11 @@ pub mod persist; pub mod request; pub mod test_utils; -use utils::vm_memory::GuestMemoryError; - pub use self::device::{Block, CacheType}; pub use self::event_handler::*; pub use self::request::*; use crate::devices::virtio::FIRECRACKER_MAX_QUEUE_SIZE; +use crate::vstate::memory::GuestMemoryError; /// Size of config space for block device. pub const BLOCK_CONFIG_SPACE_SIZE: usize = 8; diff --git a/src/vmm/src/devices/virtio/block/persist.rs b/src/vmm/src/devices/virtio/block/persist.rs index 663ef2da210..d10db720cb4 100644 --- a/src/vmm/src/devices/virtio/block/persist.rs +++ b/src/vmm/src/devices/virtio/block/persist.rs @@ -7,7 +7,6 @@ use std::sync::atomic::AtomicU32; use std::sync::Arc; use snapshot::Persist; -use utils::vm_memory::GuestMemoryMmap; use versionize::{VersionMap, Versionize, VersionizeError, VersionizeResult}; use versionize_derive::Versionize; @@ -19,6 +18,7 @@ use crate::devices::virtio::{DeviceState, FIRECRACKER_MAX_QUEUE_SIZE, TYPE_BLOCK use crate::logger::warn; use crate::rate_limiter::persist::RateLimiterState; use crate::rate_limiter::RateLimiter; +use crate::vstate::memory::GuestMemoryMmap; /// Holds info about block's cache type. Gets saved in snapshot. // NOTICE: Any changes to this structure require a snapshot version bump. diff --git a/src/vmm/src/devices/virtio/block/request.rs b/src/vmm/src/devices/virtio/block/request.rs index 20c01248dcf..87477396df8 100644 --- a/src/vmm/src/devices/virtio/block/request.rs +++ b/src/vmm/src/devices/virtio/block/request.rs @@ -7,8 +7,6 @@ use std::convert::From; -use utils::vm_memory::{ByteValued, Bytes, GuestAddress, GuestMemoryError, GuestMemoryMmap}; - use super::super::DescriptorChain; use super::{io as block_io, BlockError, SECTOR_SHIFT}; use crate::devices::virtio::block::device::DiskProperties; @@ -19,6 +17,7 @@ pub use crate::devices::virtio::gen::virtio_blk::{ use crate::devices::virtio::SECTOR_SIZE; use crate::logger::{error, IncMetric, METRICS}; use crate::rate_limiter::{RateLimiter, TokenType}; +use crate::vstate::memory::{ByteValued, Bytes, GuestAddress, GuestMemoryError, GuestMemoryMmap}; #[derive(Debug, derive_more::From)] pub enum IoErr { @@ -408,12 +407,11 @@ impl Request { mod tests { #![allow(clippy::undocumented_unsafe_blocks)] - use utils::vm_memory::test_utils::create_anon_guest_memory; - use utils::vm_memory::{Address, GuestAddress, GuestMemory}; - use super::*; use crate::devices::virtio::test_utils::{default_mem, single_region_mem, VirtQueue}; use crate::devices::virtio::{Queue, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE}; + use crate::vstate::memory::test_utils::create_anon_guest_memory; + use crate::vstate::memory::{Address, GuestAddress, GuestMemory}; const NUM_DISK_SECTORS: u64 = 1024; diff --git a/src/vmm/src/devices/virtio/block/test_utils.rs b/src/vmm/src/devices/virtio/block/test_utils.rs index 0e2e041f9d0..dab5848b5e2 100644 --- a/src/vmm/src/devices/virtio/block/test_utils.rs +++ b/src/vmm/src/devices/virtio/block/test_utils.rs @@ -10,7 +10,6 @@ use std::time::Duration; use utils::kernel_version::{min_kernel_version_for_io_uring, KernelVersion}; use utils::tempfile::TempFile; -use utils::vm_memory::{Bytes, GuestAddress}; use crate::devices::virtio::block::device::FileEngineType; #[cfg(test)] @@ -21,6 +20,7 @@ use crate::devices::virtio::test_utils::{VirtQueue, VirtqDesc}; use crate::devices::virtio::IrqType; use crate::devices::virtio::{Block, CacheType, Queue, RequestHeader}; use crate::rate_limiter::RateLimiter; +use crate::vstate::memory::{Bytes, GuestAddress}; /// Create a default Block instance to be used in tests. pub fn default_block(file_engine_type: FileEngineType) -> Block { diff --git a/src/vmm/src/devices/virtio/device.rs b/src/vmm/src/devices/virtio/device.rs index b6a5596af20..ed805116dce 100644 --- a/src/vmm/src/devices/virtio/device.rs +++ b/src/vmm/src/devices/virtio/device.rs @@ -10,11 +10,11 @@ use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::Arc; use utils::eventfd::EventFd; -use utils::vm_memory::GuestMemoryMmap; use super::{ActivateError, Queue}; use crate::devices::virtio::{AsAny, VIRTIO_MMIO_INT_CONFIG, VIRTIO_MMIO_INT_VRING}; use crate::logger::{error, warn}; +use crate::vstate::memory::GuestMemoryMmap; /// Enum that indicates if a VirtioDevice is inactive or has been activated /// and memory attached to it. diff --git a/src/vmm/src/devices/virtio/iovec.rs b/src/vmm/src/devices/virtio/iovec.rs index 47bb20048cf..169d0b205dc 100644 --- a/src/vmm/src/devices/virtio/iovec.rs +++ b/src/vmm/src/devices/virtio/iovec.rs @@ -2,9 +2,9 @@ // SPDX-License-Identifier: Apache-2.0 use libc::{c_void, iovec, size_t}; -use utils::vm_memory::{Bitmap, GuestMemory, GuestMemoryMmap}; use crate::devices::virtio::DescriptorChain; +use crate::vstate::memory::{Bitmap, GuestMemory, GuestMemoryMmap}; #[derive(Debug, thiserror::Error, displaydoc::Display)] pub enum IoVecError { @@ -13,7 +13,7 @@ pub enum IoVecError { /// Tried to create an 'IoVecMut` from a read-only descriptor chain ReadOnlyDescriptor, /// Guest memory error: {0} - GuestMemory(#[from] utils::vm_memory::GuestMemoryError), + GuestMemory(#[from] crate::vstate::memory::GuestMemoryError), } /// This is essentially a wrapper of a `Vec` which can be passed to `libc::writev`. @@ -270,12 +270,12 @@ impl IoVecBufferMut { #[cfg(test)] mod tests { use libc::{c_void, iovec}; - use utils::vm_memory::test_utils::create_anon_guest_memory; - use utils::vm_memory::{Bytes, GuestAddress, GuestMemoryMmap}; use super::{IoVecBuffer, IoVecBufferMut}; use crate::devices::virtio::queue::{Queue, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE}; use crate::devices::virtio::test_utils::VirtQueue; + use crate::vstate::memory::test_utils::create_anon_guest_memory; + use crate::vstate::memory::{Bytes, GuestAddress, GuestMemoryMmap}; impl<'a> From<&'a [u8]> for IoVecBuffer { fn from(buf: &'a [u8]) -> Self { diff --git a/src/vmm/src/devices/virtio/mmio.rs b/src/vmm/src/devices/virtio/mmio.rs index c2c3af98755..f80cce8aec7 100644 --- a/src/vmm/src/devices/virtio/mmio.rs +++ b/src/vmm/src/devices/virtio/mmio.rs @@ -9,11 +9,11 @@ use std::fmt::Debug; use std::sync::atomic::{AtomicU32, Ordering}; use std::sync::{Arc, Mutex, MutexGuard}; +use log::warn; use utils::byte_order; -use utils::vm_memory::{GuestAddress, GuestMemoryMmap}; use super::{device_status, *}; -use crate::logger::warn; +use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; // TODO crosvm uses 0 here, but IIRC virtio specified some other vendor id that should be used const VENDOR_ID: u32 = 0; @@ -330,9 +330,9 @@ pub(crate) mod tests { use utils::byte_order::{read_le_u32, write_le_u32}; use utils::eventfd::EventFd; use utils::u64_to_usize; - use utils::vm_memory::GuestMemoryMmap; use super::*; + use crate::vstate::memory::GuestMemoryMmap; #[derive(Debug)] pub(crate) struct DummyDevice { @@ -433,7 +433,7 @@ pub(crate) mod tests { #[test] fn test_new() { - let m = utils::vm_memory::test_utils::create_anon_guest_memory( + let m = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), 0x1000)], false, ) @@ -469,7 +469,7 @@ pub(crate) mod tests { #[test] fn test_bus_device_read() { - let m = utils::vm_memory::test_utils::create_anon_guest_memory( + let m = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), 0x1000)], false, ) @@ -551,7 +551,7 @@ pub(crate) mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_bus_device_write() { - let m = utils::vm_memory::test_utils::create_anon_guest_memory( + let m = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), 0x1000)], false, ) @@ -714,7 +714,7 @@ pub(crate) mod tests { #[test] fn test_bus_device_activate() { - let m = utils::vm_memory::test_utils::create_anon_guest_memory( + let m = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), 0x1000)], false, ) @@ -836,7 +836,7 @@ pub(crate) mod tests { #[test] fn test_bus_device_reset() { - let m = utils::vm_memory::test_utils::create_anon_guest_memory( + let m = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), 0x1000)], false, ) diff --git a/src/vmm/src/devices/virtio/net/device.rs b/src/vmm/src/devices/virtio/net/device.rs index 363943b9dd8..dddf8c9c632 100755 --- a/src/vmm/src/devices/virtio/net/device.rs +++ b/src/vmm/src/devices/virtio/net/device.rs @@ -5,7 +5,9 @@ // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. -use std::io::{Read, Write}; +#[cfg(not(test))] +use std::io::Read; +use std::io::Write; use std::net::Ipv4Addr; use std::sync::atomic::AtomicU32; use std::sync::{Arc, Mutex}; @@ -16,12 +18,11 @@ use log::{error, warn}; use utils::eventfd::EventFd; use utils::net::mac::MacAddr; use utils::u64_to_usize; -use utils::vm_memory::{ByteValued, Bytes, GuestMemoryError, GuestMemoryMmap}; +use crate::devices::virtio::gen::virtio_blk::VIRTIO_F_VERSION_1; use crate::devices::virtio::gen::virtio_net::{ - virtio_net_hdr_v1, VIRTIO_F_VERSION_1, VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, - VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, - VIRTIO_NET_F_MAC, + virtio_net_hdr_v1, VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, VIRTIO_NET_F_GUEST_TSO4, + VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_MAC, }; use crate::devices::virtio::gen::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use crate::dumbo::pdu::arp::ETH_IPV4_FRAME_LEN; @@ -30,6 +31,7 @@ use crate::logger::{IncMetric, METRICS}; use crate::mmds::data_store::Mmds; use crate::mmds::ns::MmdsNetworkStack; use crate::rate_limiter::{BucketUpdate, RateLimiter, TokenType}; +use crate::vstate::memory::{ByteValued, Bytes, GuestMemoryError, GuestMemoryMmap}; const FRAME_HEADER_MAX_LEN: usize = PAYLOAD_OFFSET + ETH_IPV4_FRAME_LEN; @@ -850,23 +852,20 @@ impl VirtioDevice for Net { #[cfg(test)] #[macro_use] pub mod tests { + use std::io::Read; use std::net::Ipv4Addr; use std::str::FromStr; use std::time::Duration; use std::{io, mem, thread}; - use utils::net::mac::MAC_ADDR_LEN; - use utils::vm_memory::{Address, GuestMemory}; + use utils::net::mac::{MacAddr, MAC_ADDR_LEN}; use super::*; use crate::check_metric_after_block; - use crate::devices::virtio::gen::virtio_net::{ - virtio_net_hdr_v1, VIRTIO_F_VERSION_1, VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, - VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_UFO, VIRTIO_NET_F_HOST_TSO4, - VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_MAC, - }; + use crate::devices::virtio::gen::virtio_ring::VIRTIO_RING_F_EVENT_IDX; + use crate::devices::virtio::iovec::IoVecBuffer; use crate::devices::virtio::net::device::{ - frame_bytes_from_buf, frame_bytes_from_buf_mut, init_vnet_hdr, vnet_hdr_len, + frame_bytes_from_buf, frame_bytes_from_buf_mut, frame_hdr_len, init_vnet_hdr, vnet_hdr_len, }; use crate::devices::virtio::net::test_utils::test::TestHelper; use crate::devices::virtio::net::test_utils::{ @@ -875,12 +874,15 @@ pub mod tests { }; use crate::devices::virtio::net::NET_QUEUE_SIZES; use crate::devices::virtio::{ - Net, VirtioDevice, MAX_BUFFER_SIZE, RX_INDEX, TX_INDEX, TYPE_NET, VIRTQ_DESC_F_WRITE, + IrqType, Net, Tap, VirtioDevice, MAX_BUFFER_SIZE, RX_INDEX, TX_INDEX, TYPE_NET, + VIRTQ_DESC_F_WRITE, }; use crate::dumbo::pdu::arp::{EthIPv4ArpFrame, ETH_IPV4_FRAME_LEN}; use crate::dumbo::pdu::ethernet::ETHERTYPE_ARP; + use crate::dumbo::EthernetFrame; use crate::logger::{IncMetric, METRICS}; - use crate::rate_limiter::{RateLimiter, TokenBucket, TokenType}; + use crate::rate_limiter::{BucketUpdate, RateLimiter, TokenBucket, TokenType}; + use crate::vstate::memory::{Address, GuestMemory}; impl Net { pub(crate) fn read_tap(&mut self) -> io::Result { diff --git a/src/vmm/src/devices/virtio/net/persist.rs b/src/vmm/src/devices/virtio/net/persist.rs index bbaebf6f321..7ce65ec20c3 100644 --- a/src/vmm/src/devices/virtio/net/persist.rs +++ b/src/vmm/src/devices/virtio/net/persist.rs @@ -10,7 +10,6 @@ use std::sync::{Arc, Mutex}; use log::warn; use snapshot::Persist; use utils::net::mac::{MacAddr, MAC_ADDR_LEN}; -use utils::vm_memory::GuestMemoryMmap; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; @@ -23,6 +22,7 @@ use crate::mmds::ns::MmdsNetworkStack; use crate::mmds::persist::MmdsNetworkStackState; use crate::rate_limiter::persist::RateLimiterState; use crate::rate_limiter::RateLimiter; +use crate::vstate::memory::GuestMemoryMmap; /// Information about the network config's that are saved /// at snapshot. diff --git a/src/vmm/src/devices/virtio/net/test_utils.rs b/src/vmm/src/devices/virtio/net/test_utils.rs index 5e43a24ee75..1d868309c81 100644 --- a/src/vmm/src/devices/virtio/net/test_utils.rs +++ b/src/vmm/src/devices/virtio/net/test_utils.rs @@ -14,7 +14,6 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Arc, Mutex}; use utils::net::mac::MacAddr; -use utils::vm_memory::{GuestAddress, GuestMemoryMmap}; #[cfg(test)] use crate::devices::virtio::net::device::vnet_hdr_len; @@ -25,6 +24,7 @@ use crate::devices::DeviceError; use crate::mmds::data_store::Mmds; use crate::mmds::ns::MmdsNetworkStack; use crate::rate_limiter::RateLimiter; +use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; static NEXT_INDEX: AtomicUsize = AtomicUsize::new(1); @@ -351,7 +351,6 @@ pub mod test { use std::{cmp, fmt, mem}; use event_manager::{EventManager, SubscriberId, SubscriberOps}; - use utils::vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; use crate::check_metric_after_block; use crate::devices::virtio::net::device::vnet_hdr_len; @@ -365,6 +364,7 @@ pub mod test { VIRTQ_DESC_F_WRITE, }; use crate::logger::{IncMetric, METRICS}; + use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; pub struct TestHelper<'a> { pub event_manager: EventManager>>, @@ -394,7 +394,7 @@ pub mod test { pub fn get_default() -> TestHelper<'a> { let mut event_manager = EventManager::new().unwrap(); let mut net = default_net(); - let mem = utils::vm_memory::test_utils::create_guest_memory_unguarded( + let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[(GuestAddress(0), MAX_BUFFER_SIZE)], false, ) diff --git a/src/vmm/src/devices/virtio/persist.rs b/src/vmm/src/devices/virtio/persist.rs index 55d253f1952..3fa263467b7 100644 --- a/src/vmm/src/devices/virtio/persist.rs +++ b/src/vmm/src/devices/virtio/persist.rs @@ -8,8 +8,6 @@ use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; use snapshot::Persist; -use utils::vm_memory::address::Address; -use utils::vm_memory::{GuestAddress, GuestMemoryMmap}; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; @@ -17,6 +15,8 @@ use super::device::*; use super::queue::*; use crate::devices::virtio::gen::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use crate::devices::virtio::MmioTransport; +use crate::vstate::memory::address::Address; +use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; /// Errors thrown during restoring virtio state. #[derive(Debug)] diff --git a/src/vmm/src/devices/virtio/queue.rs b/src/vmm/src/devices/virtio/queue.rs index 44475ffb11a..9e2d0e69248 100644 --- a/src/vmm/src/devices/virtio/queue.rs +++ b/src/vmm/src/devices/virtio/queue.rs @@ -9,12 +9,11 @@ use std::cmp::min; use std::num::Wrapping; use std::sync::atomic::{fence, Ordering}; -use utils::vm_memory::{ +use crate::logger::error; +use crate::vstate::memory::{ Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, }; -use crate::logger::error; - pub(super) const VIRTQ_DESC_F_NEXT: u16 = 0x1; pub(super) const VIRTQ_DESC_F_WRITE: u16 = 0x2; @@ -564,15 +563,14 @@ mod verification { use std::mem::ManuallyDrop; use std::num::Wrapping; - use utils::vm_memory::{ - Address, AtomicBitmap, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryMmap, - GuestRegionMmap, MmapRegion, - }; - use crate::devices::virtio::queue::Descriptor; use crate::devices::virtio::{ DescriptorChain, Queue, FIRECRACKER_MAX_QUEUE_SIZE, VIRTQ_DESC_F_NEXT, }; + use crate::vstate::memory::{ + Address, AtomicBitmap, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryMmap, + GuestRegionMmap, MmapRegion, + }; pub struct ProofContext(pub Queue, pub GuestMemoryMmap); @@ -985,12 +983,11 @@ mod verification { #[cfg(test)] mod tests { - use utils::vm_memory::test_utils::create_anon_guest_memory; - use utils::vm_memory::{GuestAddress, GuestMemoryMmap}; - pub use super::*; use crate::devices::virtio::test_utils::{default_mem, single_region_mem, VirtQueue}; use crate::devices::virtio::QueueError::{DescIndexOutOfBounds, UsedRing}; + use crate::vstate::memory::test_utils::create_anon_guest_memory; + use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; impl Queue { fn avail_event(&self, mem: &GuestMemoryMmap) -> u16 { diff --git a/src/vmm/src/devices/virtio/rng/device.rs b/src/vmm/src/devices/virtio/rng/device.rs index 91fcc41f53f..e78f679137d 100644 --- a/src/vmm/src/devices/virtio/rng/device.rs +++ b/src/vmm/src/devices/virtio/rng/device.rs @@ -7,7 +7,6 @@ use std::sync::Arc; use aws_lc_rs::rand; use utils::eventfd::EventFd; -use utils::vm_memory::{GuestMemoryError, GuestMemoryMmap}; use super::{RNG_NUM_QUEUES, RNG_QUEUE}; use crate::devices::virtio::device::{IrqTrigger, IrqType}; @@ -19,6 +18,7 @@ use crate::devices::virtio::{ use crate::devices::DeviceError; use crate::logger::{debug, error, IncMetric, METRICS}; use crate::rate_limiter::{RateLimiter, TokenType}; +use crate::vstate::memory::{GuestMemoryError, GuestMemoryMmap}; pub const ENTROPY_DEV_ID: &str = "rng"; diff --git a/src/vmm/src/devices/virtio/rng/persist.rs b/src/vmm/src/devices/virtio/rng/persist.rs index 8e8c9a32eed..b81170c9878 100644 --- a/src/vmm/src/devices/virtio/rng/persist.rs +++ b/src/vmm/src/devices/virtio/rng/persist.rs @@ -4,7 +4,6 @@ //! Defines the structures needed for saving/restoring entropy devices. use snapshot::Persist; -use utils::vm_memory::GuestMemoryMmap; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; @@ -13,6 +12,7 @@ use crate::devices::virtio::rng::{Entropy, EntropyError, RNG_NUM_QUEUES}; use crate::devices::virtio::{VirtioDeviceState, FIRECRACKER_MAX_QUEUE_SIZE, TYPE_RNG}; use crate::rate_limiter::persist::RateLimiterState; use crate::rate_limiter::RateLimiter; +use crate::vstate::memory::GuestMemoryMmap; #[derive(Debug, Clone, Versionize)] pub struct EntropyState { diff --git a/src/vmm/src/devices/virtio/test_utils.rs b/src/vmm/src/devices/virtio/test_utils.rs index 495bff87fb7..498746c2247 100644 --- a/src/vmm/src/devices/virtio/test_utils.rs +++ b/src/vmm/src/devices/virtio/test_utils.rs @@ -9,9 +9,9 @@ use std::mem; use std::sync::atomic::{AtomicUsize, Ordering}; use utils::u64_to_usize; -use utils::vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; use crate::devices::virtio::Queue; +use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; #[macro_export] macro_rules! check_metric_after_block { @@ -25,8 +25,11 @@ macro_rules! check_metric_after_block { /// Creates a [`GuestMemoryMmap`] with a single region of the given size starting at guest physical /// address 0 pub fn single_region_mem(region_size: usize) -> GuestMemoryMmap { - utils::vm_memory::test_utils::create_anon_guest_memory(&[(GuestAddress(0), region_size)], false) - .unwrap() + crate::vstate::memory::test_utils::create_anon_guest_memory( + &[(GuestAddress(0), region_size)], + false, + ) + .unwrap() } /// Creates a [`GuestMemoryMmap`] with a single region of size 65536 (= 0x10000 hex) starting at @@ -59,7 +62,7 @@ pub struct SomeplaceInMemory<'a, T> { // The ByteValued trait is required to use mem.read_obj_from_addr and write_obj_at_addr. impl<'a, T> SomeplaceInMemory<'a, T> where - T: Debug + utils::vm_memory::ByteValued, + T: Debug + crate::vstate::memory::ByteValued, { fn new(location: GuestAddress, mem: &'a GuestMemoryMmap) -> Self { SomeplaceInMemory { @@ -180,7 +183,7 @@ pub struct VirtqRing<'a, T> { impl<'a, T> VirtqRing<'a, T> where - T: Debug + utils::vm_memory::ByteValued, + T: Debug + crate::vstate::memory::ByteValued, { fn new(start: GuestAddress, mem: &'a GuestMemoryMmap, qsize: u16, alignment: usize) -> Self { assert_eq!(start.0 & (alignment as u64 - 1), 0); @@ -224,7 +227,7 @@ pub struct VirtqUsedElem { } // SAFETY: `VirtqUsedElem` is a POD and contains no padding. -unsafe impl utils::vm_memory::ByteValued for VirtqUsedElem {} +unsafe impl crate::vstate::memory::ByteValued for VirtqUsedElem {} pub type VirtqAvail<'a> = VirtqRing<'a, u16>; pub type VirtqUsed<'a> = VirtqRing<'a, VirtqUsedElem>; @@ -326,13 +329,13 @@ pub(crate) mod test { use std::sync::{Arc, Mutex, MutexGuard}; use event_manager::{EventManager, MutEventSubscriber, SubscriberId, SubscriberOps}; - use utils::vm_memory::{Address, GuestAddress, GuestMemoryMmap}; use crate::devices::virtio::test_utils::{VirtQueue, VirtqDesc}; use crate::devices::virtio::{Queue, VirtioDevice, MAX_BUFFER_SIZE, VIRTQ_DESC_F_NEXT}; + use crate::vstate::memory::{Address, GuestAddress, GuestMemoryMmap}; pub fn create_virtio_mem() -> GuestMemoryMmap { - utils::vm_memory::test_utils::create_guest_memory_unguarded( + crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[(GuestAddress(0), MAX_BUFFER_SIZE)], false, ) diff --git a/src/vmm/src/devices/virtio/vsock/csm/connection.rs b/src/vmm/src/devices/virtio/vsock/csm/connection.rs index 7b6db63eac7..41bc4590944 100644 --- a/src/vmm/src/devices/virtio/vsock/csm/connection.rs +++ b/src/vmm/src/devices/virtio/vsock/csm/connection.rs @@ -84,7 +84,6 @@ use std::time::{Duration, Instant}; use log::{debug, error, info, warn}; use utils::epoll::EventSet; -use utils::vm_memory::{GuestMemoryError, GuestMemoryMmap, ReadVolatile, WriteVolatile}; use utils::wrap_usize_to_u32; use super::super::defs::uapi; @@ -93,6 +92,7 @@ use super::super::{VsockChannel, VsockEpollListener, VsockError}; use super::txbuf::TxBuf; use super::{defs, ConnState, PendingRx, PendingRxSet, VsockCsmError}; use crate::logger::{IncMetric, METRICS}; +use crate::vstate::memory::{GuestMemoryError, GuestMemoryMmap, ReadVolatile, WriteVolatile}; /// Trait that vsock connection backends need to implement. /// @@ -682,7 +682,6 @@ mod tests { use std::time::{Duration, Instant}; use utils::eventfd::EventFd; - use utils::vm_memory::{BitmapSlice, Bytes, VolatileSlice}; use super::super::super::defs::uapi; use super::super::defs as csm_defs; @@ -690,6 +689,7 @@ mod tests { use crate::devices::virtio::vsock::device::RXQ_INDEX; use crate::devices::virtio::vsock::test_utils; use crate::devices::virtio::vsock::test_utils::TestContext; + use crate::vstate::memory::{BitmapSlice, Bytes, VolatileSlice}; const LOCAL_CID: u64 = 2; const PEER_CID: u64 = 3; @@ -760,7 +760,7 @@ mod tests { fn read_volatile( &mut self, buf: &mut VolatileSlice, - ) -> Result { + ) -> Result { // Test code, the additional copy incurred by read_from is fine buf.read_from(0, self, buf.len()) } @@ -787,7 +787,7 @@ mod tests { fn write_volatile( &mut self, buf: &VolatileSlice, - ) -> Result { + ) -> Result { // Test code, the additional copy incurred by write_to is fine buf.write_to(0, self, buf.len()) } diff --git a/src/vmm/src/devices/virtio/vsock/csm/txbuf.rs b/src/vmm/src/devices/virtio/vsock/csm/txbuf.rs index 85b6b07e22d..79fd295dfc5 100644 --- a/src/vmm/src/devices/virtio/vsock/csm/txbuf.rs +++ b/src/vmm/src/devices/virtio/vsock/csm/txbuf.rs @@ -6,10 +6,12 @@ use std::fmt::Debug; use std::io::Write; use std::num::Wrapping; -use utils::vm_memory::{BitmapSlice, Bytes, VolatileMemoryError, VolatileSlice, WriteVolatile}; use utils::wrap_usize_to_u32; use super::{defs, VsockCsmError}; +use crate::vstate::memory::{ + BitmapSlice, Bytes, VolatileMemoryError, VolatileSlice, WriteVolatile, +}; /// A simple ring-buffer implementation, used by vsock connections to buffer TX (guest -> host) /// data. Memory for this buffer is allocated lazily, since buffering will only be needed when diff --git a/src/vmm/src/devices/virtio/vsock/device.rs b/src/vmm/src/devices/virtio/vsock/device.rs index 83331fdea40..17d5d5b5e1d 100644 --- a/src/vmm/src/devices/virtio/vsock/device.rs +++ b/src/vmm/src/devices/virtio/vsock/device.rs @@ -26,7 +26,6 @@ use std::sync::Arc; use log::{debug, error, warn}; use utils::byte_order; use utils::eventfd::EventFd; -use utils::vm_memory::{Bytes, GuestMemoryMmap}; use super::super::super::DeviceError; use super::defs::uapi; @@ -36,6 +35,7 @@ use crate::devices::virtio::{ ActivateError, DeviceState, IrqTrigger, IrqType, Queue as VirtQueue, VirtioDevice, VsockError, }; use crate::logger::{IncMetric, METRICS}; +use crate::vstate::memory::{Bytes, GuestMemoryMmap}; pub(crate) const RXQ_INDEX: usize = 0; pub(crate) const TXQ_INDEX: usize = 1; diff --git a/src/vmm/src/devices/virtio/vsock/event_handler.rs b/src/vmm/src/devices/virtio/vsock/event_handler.rs index 2a20948e6b7..b6cc65154f3 100755 --- a/src/vmm/src/devices/virtio/vsock/event_handler.rs +++ b/src/vmm/src/devices/virtio/vsock/event_handler.rs @@ -208,12 +208,12 @@ mod tests { use std::sync::{Arc, Mutex}; use event_manager::{EventManager, SubscriberOps}; - use utils::vm_memory::Bytes; use super::super::*; use super::*; use crate::devices::virtio::vsock::packet::VSOCK_PKT_HDR_SIZE; use crate::devices::virtio::vsock::test_utils::{EventHandlerContext, TestContext}; + use crate::vstate::memory::Bytes; #[test] fn test_txq_event() { @@ -413,7 +413,7 @@ mod tests { // desc_idx = 0 we are altering the header (first descriptor in the chain), and when // desc_idx = 1 we are altering the packet buffer. fn vsock_bof_helper(test_ctx: &mut TestContext, desc_idx: usize, addr: u64, len: u32) { - use utils::vm_memory::GuestAddress; + use crate::vstate::memory::GuestAddress; assert!(desc_idx <= 1); @@ -453,7 +453,7 @@ mod tests { #[test] fn test_vsock_bof() { - use utils::vm_memory::GuestAddress; + use crate::vstate::memory::GuestAddress; const GAP_SIZE: u32 = 768 << 20; const FIRST_AFTER_GAP: usize = 1 << 32; @@ -461,7 +461,7 @@ mod tests { const MIB: usize = 1 << 20; let mut test_ctx = TestContext::new(); - test_ctx.mem = utils::vm_memory::test_utils::create_anon_guest_memory( + test_ctx.mem = crate::vstate::memory::test_utils::create_anon_guest_memory( &[ (GuestAddress(0), 8 * MIB), (GuestAddress((GAP_START_ADDR - MIB) as u64), MIB), diff --git a/src/vmm/src/devices/virtio/vsock/mod.rs b/src/vmm/src/devices/virtio/vsock/mod.rs index 14ab898839c..f6fa195fb35 100644 --- a/src/vmm/src/devices/virtio/vsock/mod.rs +++ b/src/vmm/src/devices/virtio/vsock/mod.rs @@ -23,13 +23,13 @@ use std::os::unix::io::AsRawFd; use packet::VsockPacket; use utils::epoll::EventSet; -use utils::vm_memory::{GuestMemoryError, GuestMemoryMmap}; pub use self::defs::uapi::VIRTIO_ID_VSOCK as TYPE_VSOCK; pub use self::defs::VSOCK_DEV_ID; pub use self::device::Vsock; pub use self::unix::{VsockUnixBackend, VsockUnixBackendError}; use crate::devices::virtio::persist::PersistError as VirtioStateError; +use crate::vstate::memory::{GuestMemoryError, GuestMemoryMmap}; mod defs { use crate::devices::virtio::FIRECRACKER_MAX_QUEUE_SIZE; diff --git a/src/vmm/src/devices/virtio/vsock/packet.rs b/src/vmm/src/devices/virtio/vsock/packet.rs index 14ecd544876..0b60cf744b4 100644 --- a/src/vmm/src/devices/virtio/vsock/packet.rs +++ b/src/vmm/src/devices/virtio/vsock/packet.rs @@ -18,14 +18,13 @@ use std::fmt::Debug; use std::io::ErrorKind; -use utils::vm_memory::{ +use super::super::DescriptorChain; +use super::{defs, VsockError}; +use crate::vstate::memory::{ Address, AtomicBitmap, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, ReadVolatile, VolatileMemoryError, VolatileSlice, WriteVolatile, BS, }; -use super::super::DescriptorChain; -use super::{defs, VsockError}; - // The vsock packet header is defined by the C struct: // // ```C @@ -435,14 +434,13 @@ impl VsockPacket { #[cfg(test)] mod tests { - use utils::vm_memory::{GuestAddress, GuestMemoryMmap}; - use super::*; use crate::devices::virtio::test_utils::VirtqDesc as GuestQDesc; use crate::devices::virtio::vsock::defs::MAX_PKT_BUF_SIZE; use crate::devices::virtio::vsock::device::{RXQ_INDEX, TXQ_INDEX}; use crate::devices::virtio::vsock::test_utils::TestContext; use crate::devices::virtio::VIRTQ_DESC_F_WRITE; + use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; macro_rules! create_context { ($test_ctx:ident, $handler_ctx:ident) => { @@ -758,7 +756,7 @@ mod tests { fn test_check_bounds_for_buffer_access_edge_cases() { let mut test_ctx = TestContext::new(); - test_ctx.mem = utils::vm_memory::test_utils::create_guest_memory_unguarded( + test_ctx.mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[ (GuestAddress(0), 500), (GuestAddress(500), 100), diff --git a/src/vmm/src/devices/virtio/vsock/persist.rs b/src/vmm/src/devices/virtio/vsock/persist.rs index d582cd0914f..92c0aba2b28 100644 --- a/src/vmm/src/devices/virtio/vsock/persist.rs +++ b/src/vmm/src/devices/virtio/vsock/persist.rs @@ -8,13 +8,13 @@ use std::sync::atomic::AtomicU32; use std::sync::Arc; use snapshot::Persist; -use utils::vm_memory::GuestMemoryMmap; use versionize::{VersionMap, Versionize, VersionizeError, VersionizeResult}; use versionize_derive::Versionize; use super::*; use crate::devices::virtio::persist::VirtioDeviceState; use crate::devices::virtio::{DeviceState, FIRECRACKER_MAX_QUEUE_SIZE, TYPE_VSOCK}; +use crate::vstate::memory::GuestMemoryMmap; /// The Vsock serializable state. // NOTICE: Any changes to this structure require a snapshot version bump. diff --git a/src/vmm/src/devices/virtio/vsock/test_utils.rs b/src/vmm/src/devices/virtio/vsock/test_utils.rs index 44b8117e791..da1f0ec1cce 100644 --- a/src/vmm/src/devices/virtio/vsock/test_utils.rs +++ b/src/vmm/src/devices/virtio/vsock/test_utils.rs @@ -8,7 +8,6 @@ use std::os::unix::io::{AsRawFd, RawFd}; use utils::epoll::EventSet; use utils::eventfd::EventFd; -use utils::vm_memory::{GuestAddress, GuestMemoryMmap}; use crate::devices::virtio::test_utils::{single_region_mem, VirtQueue as GuestQ}; use crate::devices::virtio::vsock::device::{RXQ_INDEX, TXQ_INDEX}; @@ -17,6 +16,7 @@ use crate::devices::virtio::{ VirtioDevice, Vsock, VsockBackend, VsockChannel, VsockEpollListener, VsockError, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE, }; +use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; #[derive(Debug)] pub struct TestBackend { diff --git a/src/vmm/src/devices/virtio/vsock/unix/muxer.rs b/src/vmm/src/devices/virtio/vsock/unix/muxer.rs index 57899fd376d..b037a6977bc 100644 --- a/src/vmm/src/devices/virtio/vsock/unix/muxer.rs +++ b/src/vmm/src/devices/virtio/vsock/unix/muxer.rs @@ -37,7 +37,6 @@ use std::os::unix::net::{UnixListener, UnixStream}; use log::{debug, error, info, warn}; use utils::epoll::{ControlOperation, Epoll, EpollEvent, EventSet}; -use utils::vm_memory::GuestMemoryMmap; use super::super::csm::ConnState; use super::super::defs::uapi; @@ -47,6 +46,7 @@ use super::muxer_killq::MuxerKillQ; use super::muxer_rxq::MuxerRxQ; use super::{defs, MuxerConnection, VsockUnixBackendError}; use crate::logger::{IncMetric, METRICS}; +use crate::vstate::memory::GuestMemoryMmap; /// A unique identifier of a `MuxerConnection` object. Connections are stored in a hash map, /// keyed by a `ConnMapKey` object. diff --git a/src/vmm/src/io_uring/mod.rs b/src/vmm/src/io_uring/mod.rs index 3ba2e6ed7c7..889edad52a8 100644 --- a/src/vmm/src/io_uring/mod.rs +++ b/src/vmm/src/io_uring/mod.rs @@ -384,11 +384,11 @@ mod tests { use utils::skip_if_io_uring_unsupported; use utils::syscall::SyscallReturnCode; use utils::tempfile::TempFile; - use utils::vm_memory::{Bytes, MmapRegion, VolatileMemory}; /// ------------------------------------- /// BEGIN PROPERTY BASED TESTING use super::*; + use crate::vstate::memory::{Bytes, MmapRegion, VolatileMemory}; fn drain_cqueue(ring: &mut IoUring) { while let Some(entry) = unsafe { ring.pop::().unwrap() } { diff --git a/src/vmm/src/io_uring/operation/cqe.rs b/src/vmm/src/io_uring/operation/cqe.rs index 303076e89ec..21cb88d697a 100644 --- a/src/vmm/src/io_uring/operation/cqe.rs +++ b/src/vmm/src/io_uring/operation/cqe.rs @@ -3,9 +3,8 @@ use std::fmt::Debug; -use utils::vm_memory::ByteValued; - use crate::io_uring::bindings::io_uring_cqe; +use crate::vstate::memory::ByteValued; // SAFETY: Struct is POD and contains no references or niches. unsafe impl ByteValued for io_uring_cqe {} diff --git a/src/vmm/src/io_uring/operation/sqe.rs b/src/vmm/src/io_uring/operation/sqe.rs index 7e0542f54c4..082ce95dfca 100644 --- a/src/vmm/src/io_uring/operation/sqe.rs +++ b/src/vmm/src/io_uring/operation/sqe.rs @@ -3,9 +3,8 @@ use std::fmt::{self, Debug}; -use utils::vm_memory::ByteValued; - use crate::io_uring::bindings::io_uring_sqe; +use crate::vstate::memory::ByteValued; // SAFETY: Struct is POD and contains no references or niches. unsafe impl ByteValued for io_uring_sqe {} diff --git a/src/vmm/src/io_uring/queue/completion.rs b/src/vmm/src/io_uring/queue/completion.rs index 37fe1fb0a16..f3493b08cd3 100644 --- a/src/vmm/src/io_uring/queue/completion.rs +++ b/src/vmm/src/io_uring/queue/completion.rs @@ -6,11 +6,10 @@ use std::num::Wrapping; use std::os::unix::io::RawFd; use std::sync::atomic::Ordering; -use utils::vm_memory::{Bytes, MmapRegion, VolatileMemory, VolatileMemoryError}; - use super::mmap::{mmap, MmapError}; use crate::io_uring::bindings; use crate::io_uring::operation::Cqe; +use crate::vstate::memory::{Bytes, MmapRegion, VolatileMemory, VolatileMemoryError}; #[derive(Debug, derive_more::From)] /// CQueue Error. diff --git a/src/vmm/src/io_uring/queue/mmap.rs b/src/vmm/src/io_uring/queue/mmap.rs index 8b9b435d09b..202eb0a3c27 100644 --- a/src/vmm/src/io_uring/queue/mmap.rs +++ b/src/vmm/src/io_uring/queue/mmap.rs @@ -4,7 +4,7 @@ use std::io::Error as IOError; use std::os::unix::io::RawFd; -use utils::vm_memory::{MmapRegion, MmapRegionError}; +use crate::vstate::memory::{MmapRegion, MmapRegionError}; #[derive(Debug)] pub enum MmapError { diff --git a/src/vmm/src/io_uring/queue/submission.rs b/src/vmm/src/io_uring/queue/submission.rs index 50403458106..3a67704d7ef 100644 --- a/src/vmm/src/io_uring/queue/submission.rs +++ b/src/vmm/src/io_uring/queue/submission.rs @@ -9,11 +9,11 @@ use std::os::unix::io::RawFd; use std::sync::atomic::Ordering; use utils::syscall::SyscallReturnCode; -use utils::vm_memory::{Bytes, MmapRegion, VolatileMemory, VolatileMemoryError}; use super::mmap::{mmap, MmapError}; use crate::io_uring::bindings; use crate::io_uring::operation::Sqe; +use crate::vstate::memory::{Bytes, MmapRegion, VolatileMemory, VolatileMemoryError}; #[derive(Debug, derive_more::From)] /// SQueue Error. diff --git a/src/vmm/src/lib.rs b/src/vmm/src/lib.rs index 7fddfdd6de2..79db51de4aa 100644 --- a/src/vmm/src/lib.rs +++ b/src/vmm/src/lib.rs @@ -122,7 +122,6 @@ use utils::epoll::EventSet; use utils::eventfd::EventFd; use utils::terminal::Terminal; use utils::u64_to_usize; -use utils::vm_memory::{GuestMemory, GuestMemoryMmap, GuestMemoryRegion}; use vstate::vcpu::{self, KvmVcpuConfigureError, StartThreadedError, VcpuSendEventError}; use crate::arch::DeviceType; @@ -141,6 +140,7 @@ use crate::memory_snapshot::SnapshotMemory; use crate::persist::{MicrovmState, MicrovmStateError, VmInfo}; use crate::rate_limiter::BucketUpdate; use crate::vmm_config::instance_info::{InstanceInfo, VmState}; +use crate::vstate::memory::{GuestMemory, GuestMemoryMmap, GuestMemoryRegion}; use crate::vstate::vcpu::VcpuState; pub use crate::vstate::vcpu::{Vcpu, VcpuConfig, VcpuEvent, VcpuHandle, VcpuResponse}; pub use crate::vstate::vm::Vm; diff --git a/src/vmm/src/memory_snapshot.rs b/src/vmm/src/memory_snapshot.rs index ef48f7bcd16..3d1f857efe7 100644 --- a/src/vmm/src/memory_snapshot.rs +++ b/src/vmm/src/memory_snapshot.rs @@ -6,14 +6,14 @@ use std::fs::File; use std::io::SeekFrom; -use utils::vm_memory::{ - Bitmap, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, - GuestMemoryRegion, MemoryRegionAddress, WriteVolatile, -}; use utils::{errno, get_page_size, u64_to_usize}; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; +use crate::vstate::memory::{ + Bitmap, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, + GuestMemoryRegion, MemoryRegionAddress, WriteVolatile, +}; use crate::DirtyBitmap; /// State of a guest memory region saved to file/buffer. @@ -68,9 +68,9 @@ pub enum SnapshotMemoryError { /// Cannot access file: {0:?} FileHandle(#[from] std::io::Error), /// Cannot create memory: {0:?} - CreateMemory(#[from] utils::vm_memory::Error), + CreateMemory(#[from] crate::vstate::memory::Error), /// Cannot create memory region: {0:?} - CreateRegion(#[from] utils::vm_memory::MmapRegionError), + CreateRegion(#[from] crate::vstate::memory::MmapRegionError), /// Cannot fetch system's page size: {0:?} PageSize(#[from] errno::Error), /// Cannot dump memory: {0:?} @@ -179,7 +179,7 @@ impl SnapshotMemory for GuestMemoryMmap { regions.push((f, GuestAddress(region.base_address), region.size)); } - utils::vm_memory::create_guest_memory(®ions, track_dirty_pages) + crate::vstate::memory::create_guest_memory(®ions, track_dirty_pages) .map_err(SnapshotMemoryError::CreateMemory) } } @@ -191,9 +191,9 @@ mod tests { use utils::get_page_size; use utils::tempfile::TempFile; - use utils::vm_memory::{Bytes, GuestAddress}; use super::*; + use crate::vstate::memory::{Bytes, GuestAddress}; #[test] fn test_describe_state() { @@ -204,7 +204,8 @@ mod tests { (None, GuestAddress(0), page_size), (None, GuestAddress(page_size as u64 * 2), page_size), ]; - let guest_memory = utils::vm_memory::create_guest_memory(&mem_regions[..], true).unwrap(); + let guest_memory = + crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); let expected_memory_state = GuestMemoryState { regions: vec![ @@ -229,7 +230,8 @@ mod tests { (None, GuestAddress(0), page_size * 3), (None, GuestAddress(page_size as u64 * 4), page_size * 3), ]; - let guest_memory = utils::vm_memory::create_guest_memory(&mem_regions[..], true).unwrap(); + let guest_memory = + crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); let expected_memory_state = GuestMemoryState { regions: vec![ @@ -259,7 +261,8 @@ mod tests { (None, GuestAddress(0), page_size * 2), (None, GuestAddress(page_size as u64 * 3), page_size * 2), ]; - let guest_memory = utils::vm_memory::create_guest_memory(&mem_regions[..], true).unwrap(); + let guest_memory = + crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); // Check that Firecracker bitmap is clean. let _res: Result<(), SnapshotMemoryError> = guest_memory.iter().try_for_each(|r| { assert!(!r.bitmap().dirty_at(0)); diff --git a/src/vmm/src/persist.rs b/src/vmm/src/persist.rs index 933d9861cdb..7a425ed8dbd 100644 --- a/src/vmm/src/persist.rs +++ b/src/vmm/src/persist.rs @@ -18,7 +18,6 @@ use snapshot::Snapshot; use userfaultfd::{FeatureFlags, Uffd, UffdBuilder}; use utils::sock_ctrl_msg::ScmSocket; use utils::u64_to_usize; -use utils::vm_memory::{GuestMemory, GuestMemoryMmap}; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; @@ -47,6 +46,7 @@ use crate::vmm_config::machine_config::MAX_SUPPORTED_VCPUS; use crate::vmm_config::snapshot::{ CreateSnapshotParams, LoadSnapshotParams, MemBackendType, SnapshotType, }; +use crate::vstate::memory::{GuestMemory, GuestMemoryMmap}; use crate::vstate::vcpu::{VcpuSendEventError, VcpuState}; use crate::vstate::vm::VmState; use crate::{mem_size_mib, memory_snapshot, vstate, EventManager, Vmm, VmmError}; @@ -827,9 +827,8 @@ mod tests { #[test] fn test_create_snapshot_error_display() { - use utils::vm_memory::GuestMemoryError; - use crate::persist::CreateSnapshotError::*; + use crate::vstate::memory::GuestMemoryError; let err = DirtyBitmap(VmmError::DirtyBitmap(kvm_ioctls::Error::new(20))); let _ = format!("{}{:?}", err, err); diff --git a/src/utils/src/vm_memory.rs b/src/vmm/src/vstate/memory.rs similarity index 98% rename from src/utils/src/vm_memory.rs rename to src/vmm/src/vstate/memory.rs index e02c5084764..52151c258bc 100644 --- a/src/utils/src/vm_memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -9,6 +9,7 @@ use std::fmt::Debug; use std::io::{Error as IoError, ErrorKind}; use std::os::unix::io::AsRawFd; +use utils::u64_to_usize; pub use vm_memory::bitmap::{AtomicBitmap, Bitmap, BitmapSlice, BS}; use vm_memory::mmap::{check_file_offset, NewBitmap}; pub use vm_memory::mmap::{MmapRegionBuilder, MmapRegionError}; @@ -18,10 +19,11 @@ pub use vm_memory::{ VolatileMemory, VolatileMemoryError, VolatileSlice, }; -use crate::u64_to_usize; - +/// Type of GuestMemoryMmap. pub type GuestMemoryMmap = vm_memory::GuestMemoryMmap>; +/// Type of GuestRegionMmap. pub type GuestRegionMmap = vm_memory::GuestRegionMmap>; +/// Type of GuestMmapRegion. pub type GuestMmapRegion = vm_memory::MmapRegion>; const GUARD_PAGE_COUNT: usize = 1; @@ -46,7 +48,7 @@ fn build_guarded_region( flags: i32, track_dirty_pages: bool, ) -> Result { - let page_size = crate::get_page_size().expect("Cannot retrieve page size."); + let page_size = utils::get_page_size().expect("Cannot retrieve page size."); // Create the guarded range size (received size + X pages), // where X is defined as a constant GUARD_PAGE_COUNT. let guarded_size = size + GUARD_PAGE_COUNT * 2 * page_size; @@ -135,6 +137,7 @@ pub fn create_guest_memory( GuestMemoryMmap::from_regions(mmap_regions) } +/// Mark memory range as dirty pub fn mark_dirty_mem(mem: &GuestMemoryMmap, addr: GuestAddress, len: usize) { let _ = mem.try_access(len, addr, |_total, count, caddr, region| { if let Some(bitmap) = region.bitmap() { @@ -400,6 +403,7 @@ impl ReadVolatile for &[u8] { } } +/// Public module with utilities used for testing. pub mod test_utils { use super::*; @@ -453,9 +457,10 @@ mod tests { #![allow(clippy::undocumented_unsafe_blocks)] use std::io::{Read, Seek, Write}; + use utils::get_page_size; + use utils::tempfile::TempFile; + use super::*; - use crate::get_page_size; - use crate::tempfile::TempFile; #[derive(Debug)] enum AddrOp { @@ -657,7 +662,7 @@ mod tests { #[test] fn test_mark_dirty_mem() { - let page_size = crate::get_page_size().unwrap(); + let page_size = get_page_size().unwrap(); let region_size = page_size * 3; let regions = vec![ diff --git a/src/vmm/src/vstate/mod.rs b/src/vmm/src/vstate/mod.rs index 0d429e305a2..32d7bd7ea7f 100644 --- a/src/vmm/src/vstate/mod.rs +++ b/src/vmm/src/vstate/mod.rs @@ -1,6 +1,8 @@ // Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 +/// Module with GuestMemory implementation. +pub mod memory; /// Module with Vcpu implementation. pub mod vcpu; /// Module with Vm implementation. diff --git a/src/vmm/src/vstate/vcpu/aarch64.rs b/src/vmm/src/vstate/vcpu/aarch64.rs index 052371ee4ba..2364292b7e1 100644 --- a/src/vmm/src/vstate/vcpu/aarch64.rs +++ b/src/vmm/src/vstate/vcpu/aarch64.rs @@ -7,7 +7,6 @@ use kvm_bindings::*; use kvm_ioctls::*; -use utils::vm_memory::{Address, GuestAddress, GuestMemoryMmap}; use versionize::{VersionMap, Versionize, VersionizeError, VersionizeResult}; use versionize_derive::Versionize; @@ -23,6 +22,7 @@ use crate::cpu_config::aarch64::custom_cpu_template::VcpuFeatures; use crate::cpu_config::templates::CpuConfiguration; use crate::logger::{error, IncMetric, METRICS}; use crate::vcpu::{VcpuConfig, VcpuError}; +use crate::vstate::memory::{Address, GuestAddress, GuestMemoryMmap}; use crate::vstate::vcpu::VcpuEmulation; use crate::vstate::vm::Vm; @@ -309,13 +309,13 @@ mod tests { use std::os::unix::io::AsRawFd; use kvm_bindings::KVM_REG_SIZE_U64; - use utils::vm_memory::GuestMemoryMmap; use super::*; use crate::arch::aarch64::regs::Aarch64RegisterRef; use crate::cpu_config::aarch64::CpuConfiguration; use crate::cpu_config::templates::RegisterValueFilter; use crate::vcpu::VcpuConfig; + use crate::vstate::memory::GuestMemoryMmap; use crate::vstate::vm::tests::setup_vm; use crate::vstate::vm::Vm; diff --git a/src/vmm/src/vstate/vcpu/mod.rs b/src/vmm/src/vstate/vcpu/mod.rs index 3a9a8409637..f660f3c322b 100644 --- a/src/vmm/src/vstate/vcpu/mod.rs +++ b/src/vmm/src/vstate/vcpu/mod.rs @@ -695,13 +695,13 @@ pub mod tests { use linux_loader::loader::KernelLoader; use utils::errno; use utils::signal::validate_signal_num; - use utils::vm_memory::{GuestAddress, GuestMemoryMmap}; use super::*; use crate::builder::StartMicrovmError; use crate::devices::bus::DummyDevice; use crate::devices::BusDevice; use crate::seccomp_filters::get_empty_filters; + use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; use crate::vstate::vcpu::VcpuError as EmulationError; use crate::vstate::vm::tests::setup_vm; use crate::vstate::vm::Vm; diff --git a/src/vmm/src/vstate/vcpu/x86_64.rs b/src/vmm/src/vstate/vcpu/x86_64.rs index ffab2b0971a..ceff91d6220 100644 --- a/src/vmm/src/vstate/vcpu/x86_64.rs +++ b/src/vmm/src/vstate/vcpu/x86_64.rs @@ -13,7 +13,6 @@ use kvm_bindings::{ }; use kvm_ioctls::{VcpuExit, VcpuFd}; use log::{error, warn}; -use utils::vm_memory::{Address, GuestAddress, GuestMemoryMmap}; use versionize::{VersionMap, Versionize, VersionizeError, VersionizeResult}; use versionize_derive::Versionize; @@ -22,6 +21,7 @@ use crate::arch::x86_64::msr::{create_boot_msr_entries, MsrError}; use crate::arch::x86_64::regs::{SetupFpuError, SetupRegistersError, SetupSpecialRegistersError}; use crate::cpu_config::x86_64::{cpuid, CpuConfiguration}; use crate::logger::{IncMetric, METRICS}; +use crate::vstate::memory::{Address, GuestAddress, GuestMemoryMmap}; use crate::vstate::vcpu::{VcpuConfig, VcpuEmulation}; use crate::vstate::vm::Vm; diff --git a/src/vmm/src/vstate/vm.rs b/src/vmm/src/vstate/vm.rs index 5ed242f1e66..4e6166c0e08 100644 --- a/src/vmm/src/vstate/vm.rs +++ b/src/vmm/src/vstate/vm.rs @@ -16,9 +16,7 @@ use kvm_bindings::{ }; use kvm_bindings::{kvm_userspace_memory_region, KVM_API_VERSION, KVM_MEM_LOG_DIRTY_PAGES}; use kvm_ioctls::{Kvm, VmFd}; -#[cfg(target_arch = "x86_64")] use utils::u64_to_usize; -use utils::vm_memory::{Address, GuestMemory, GuestMemoryMmap, GuestMemoryRegion}; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; @@ -27,6 +25,7 @@ use crate::arch::aarch64::gic::GICDevice; #[cfg(target_arch = "aarch64")] use crate::arch::aarch64::gic::GicState; use crate::cpu_config::templates::KvmCapability; +use crate::vstate::memory::{Address, GuestMemory, GuestMemoryMmap, GuestMemoryRegion}; /// Errors associated with the wrappers over KVM ioctls. #[derive(Debug, thiserror::Error, PartialEq, Eq)] @@ -501,13 +500,12 @@ impl fmt::Debug for VmState { #[cfg(test)] pub(crate) mod tests { - use utils::vm_memory::GuestAddress; - use super::*; + use crate::vstate::memory::GuestAddress; // Auxiliary function being used throughout the tests. pub(crate) fn setup_vm(mem_size: usize) -> (Vm, GuestMemoryMmap) { - let gm = utils::vm_memory::test_utils::create_anon_guest_memory( + let gm = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), mem_size)], false, ) @@ -548,7 +546,7 @@ pub(crate) mod tests { let vm = Vm::new(vec![]).expect("Cannot create new vm"); // Create valid memory region and test that the initialization is successful. - let gm = utils::vm_memory::test_utils::create_anon_guest_memory( + let gm = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), 0x1000)], false, ) @@ -615,7 +613,7 @@ pub(crate) mod tests { fn test_set_kvm_memory_regions() { let vm = Vm::new(vec![]).expect("Cannot create new vm"); - let gm = utils::vm_memory::test_utils::create_anon_guest_memory( + let gm = crate::vstate::memory::test_utils::create_anon_guest_memory( &[(GuestAddress(0), 0x1000)], false, ) @@ -625,7 +623,7 @@ pub(crate) mod tests { // Trying to set a memory region with a size that is not a multiple of PAGE_SIZE // will result in error. - let gm = utils::vm_memory::test_utils::create_guest_memory_unguarded( + let gm = crate::vstate::memory::test_utils::create_guest_memory_unguarded( &[(GuestAddress(0), 0x10)], false, ) diff --git a/src/vmm/tests/integration_tests.rs b/src/vmm/tests/integration_tests.rs index f55df28294d..80db21e699e 100644 --- a/src/vmm/tests/integration_tests.rs +++ b/src/vmm/tests/integration_tests.rs @@ -233,8 +233,8 @@ fn verify_create_snapshot(is_diff: bool) -> (TempFile, TempFile) { } fn verify_load_snapshot(snapshot_file: TempFile, memory_file: TempFile) { - use utils::vm_memory::GuestMemoryMmap; use vmm::memory_snapshot::SnapshotMemory; + use vmm::vstate::memory::GuestMemoryMmap; let mut event_manager = EventManager::new().unwrap(); let empty_seccomp_filters = get_empty_filters(); diff --git a/src/vmm/tests/io_uring.rs b/src/vmm/tests/io_uring.rs index 88b2667dfd1..420b630fa4d 100644 --- a/src/vmm/tests/io_uring.rs +++ b/src/vmm/tests/io_uring.rs @@ -13,12 +13,12 @@ use utils::eventfd::EventFd; use utils::kernel_version::{min_kernel_version_for_io_uring, KernelVersion}; use utils::skip_if_io_uring_unsupported; use utils::tempfile::TempFile; -use utils::vm_memory::{Bytes, MmapRegion, VolatileMemory}; +use vmm::vstate::memory::{Bytes, MmapRegion, VolatileMemory}; mod test_utils { - use utils::vm_memory::{MmapRegion, VolatileMemory}; use vmm::io_uring::operation::{OpCode, Operation}; use vmm::io_uring::{IoUring, IoUringError, SQueueError}; + use vmm::vstate::memory::{MmapRegion, VolatileMemory}; fn drain_cqueue(ring: &mut IoUring) { while let Some(entry) = unsafe { ring.pop::().unwrap() } { From ce2ff8126ef9f7c89d44d0880d9dc6e4f0e64c9e Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Mon, 25 Sep 2023 16:35:59 +0100 Subject: [PATCH 02/14] refactor(volatile): moved volatile traits into separate module Moved all volatile related things into separate module. Signed-off-by: Egor Lazarchuk --- src/vmm/src/builder.rs | 3 +- .../src/devices/virtio/block/io/sync_io.rs | 5 +- .../devices/virtio/vsock/csm/connection.rs | 10 +- src/vmm/src/devices/virtio/vsock/csm/txbuf.rs | 5 +- src/vmm/src/devices/virtio/vsock/packet.rs | 3 +- src/vmm/src/io_uring/mod.rs | 3 +- src/vmm/src/io_uring/queue/completion.rs | 3 +- src/vmm/src/io_uring/queue/submission.rs | 3 +- src/vmm/src/lib.rs | 2 + src/vmm/src/memory_snapshot.rs | 3 +- src/vmm/src/volatile.rs | 434 ++++++++++++++++++ src/vmm/src/vstate/memory.rs | 415 +---------------- src/vmm/src/vstate/vm.rs | 1 + src/vmm/tests/io_uring.rs | 6 +- 14 files changed, 464 insertions(+), 432 deletions(-) create mode 100644 src/vmm/src/volatile.rs diff --git a/src/vmm/src/builder.rs b/src/vmm/src/builder.rs index 8e25c7f61bf..f8e1964aa78 100644 --- a/src/vmm/src/builder.rs +++ b/src/vmm/src/builder.rs @@ -54,7 +54,8 @@ use crate::resources::VmResources; use crate::vmm_config::boot_source::BootConfig; use crate::vmm_config::instance_info::InstanceInfo; use crate::vmm_config::machine_config::{MachineConfigUpdate, VmConfig, VmConfigError}; -use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryMmap, ReadVolatile}; +use crate::volatile::ReadVolatile; +use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; use crate::vstate::vcpu::{Vcpu, VcpuConfig}; use crate::vstate::vm::Vm; use crate::{device_manager, EventManager, RestoreVcpusError, Vmm, VmmError}; diff --git a/src/vmm/src/devices/virtio/block/io/sync_io.rs b/src/vmm/src/devices/virtio/block/io/sync_io.rs index 393537de59a..8838f755ada 100644 --- a/src/vmm/src/devices/virtio/block/io/sync_io.rs +++ b/src/vmm/src/devices/virtio/block/io/sync_io.rs @@ -4,9 +4,8 @@ use std::fs::File; use std::io::{Seek, SeekFrom, Write}; -use crate::vstate::memory::{ - GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, ReadVolatile, WriteVolatile, -}; +use crate::volatile::{ReadVolatile, WriteVolatile}; +use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap}; #[derive(Debug)] pub enum SyncIoError { diff --git a/src/vmm/src/devices/virtio/vsock/csm/connection.rs b/src/vmm/src/devices/virtio/vsock/csm/connection.rs index 41bc4590944..28681472e26 100644 --- a/src/vmm/src/devices/virtio/vsock/csm/connection.rs +++ b/src/vmm/src/devices/virtio/vsock/csm/connection.rs @@ -92,7 +92,8 @@ use super::super::{VsockChannel, VsockEpollListener, VsockError}; use super::txbuf::TxBuf; use super::{defs, ConnState, PendingRx, PendingRxSet, VsockCsmError}; use crate::logger::{IncMetric, METRICS}; -use crate::vstate::memory::{GuestMemoryError, GuestMemoryMmap, ReadVolatile, WriteVolatile}; +use crate::volatile::{ReadVolatile, WriteVolatile}; +use crate::vstate::memory::{GuestMemoryError, GuestMemoryMmap}; /// Trait that vsock connection backends need to implement. /// @@ -689,7 +690,8 @@ mod tests { use crate::devices::virtio::vsock::device::RXQ_INDEX; use crate::devices::virtio::vsock::test_utils; use crate::devices::virtio::vsock::test_utils::TestContext; - use crate::vstate::memory::{BitmapSlice, Bytes, VolatileSlice}; + use crate::volatile::{VolatileMemoryError, VolatileSlice}; + use crate::vstate::memory::{BitmapSlice, Bytes}; const LOCAL_CID: u64 = 2; const PEER_CID: u64 = 3; @@ -760,7 +762,7 @@ mod tests { fn read_volatile( &mut self, buf: &mut VolatileSlice, - ) -> Result { + ) -> Result { // Test code, the additional copy incurred by read_from is fine buf.read_from(0, self, buf.len()) } @@ -787,7 +789,7 @@ mod tests { fn write_volatile( &mut self, buf: &VolatileSlice, - ) -> Result { + ) -> Result { // Test code, the additional copy incurred by write_to is fine buf.write_to(0, self, buf.len()) } diff --git a/src/vmm/src/devices/virtio/vsock/csm/txbuf.rs b/src/vmm/src/devices/virtio/vsock/csm/txbuf.rs index 79fd295dfc5..9acfbf3e59c 100644 --- a/src/vmm/src/devices/virtio/vsock/csm/txbuf.rs +++ b/src/vmm/src/devices/virtio/vsock/csm/txbuf.rs @@ -9,9 +9,8 @@ use std::num::Wrapping; use utils::wrap_usize_to_u32; use super::{defs, VsockCsmError}; -use crate::vstate::memory::{ - BitmapSlice, Bytes, VolatileMemoryError, VolatileSlice, WriteVolatile, -}; +use crate::volatile::{VolatileMemoryError, VolatileSlice, WriteVolatile}; +use crate::vstate::memory::{BitmapSlice, Bytes}; /// A simple ring-buffer implementation, used by vsock connections to buffer TX (guest -> host) /// data. Memory for this buffer is allocated lazily, since buffering will only be needed when diff --git a/src/vmm/src/devices/virtio/vsock/packet.rs b/src/vmm/src/devices/virtio/vsock/packet.rs index 0b60cf744b4..4261a1a1b82 100644 --- a/src/vmm/src/devices/virtio/vsock/packet.rs +++ b/src/vmm/src/devices/virtio/vsock/packet.rs @@ -20,9 +20,10 @@ use std::io::ErrorKind; use super::super::DescriptorChain; use super::{defs, VsockError}; +use crate::volatile::{ReadVolatile, VolatileMemoryError, VolatileSlice, WriteVolatile}; use crate::vstate::memory::{ Address, AtomicBitmap, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError, - GuestMemoryMmap, ReadVolatile, VolatileMemoryError, VolatileSlice, WriteVolatile, BS, + GuestMemoryMmap, BS, }; // The vsock packet header is defined by the C struct: diff --git a/src/vmm/src/io_uring/mod.rs b/src/vmm/src/io_uring/mod.rs index 889edad52a8..9dd7cbce091 100644 --- a/src/vmm/src/io_uring/mod.rs +++ b/src/vmm/src/io_uring/mod.rs @@ -388,7 +388,8 @@ mod tests { /// ------------------------------------- /// BEGIN PROPERTY BASED TESTING use super::*; - use crate::vstate::memory::{Bytes, MmapRegion, VolatileMemory}; + use crate::volatile::VolatileMemory; + use crate::vstate::memory::{Bytes, MmapRegion}; fn drain_cqueue(ring: &mut IoUring) { while let Some(entry) = unsafe { ring.pop::().unwrap() } { diff --git a/src/vmm/src/io_uring/queue/completion.rs b/src/vmm/src/io_uring/queue/completion.rs index f3493b08cd3..424ca11590e 100644 --- a/src/vmm/src/io_uring/queue/completion.rs +++ b/src/vmm/src/io_uring/queue/completion.rs @@ -9,7 +9,8 @@ use std::sync::atomic::Ordering; use super::mmap::{mmap, MmapError}; use crate::io_uring::bindings; use crate::io_uring::operation::Cqe; -use crate::vstate::memory::{Bytes, MmapRegion, VolatileMemory, VolatileMemoryError}; +use crate::volatile::{VolatileMemory, VolatileMemoryError}; +use crate::vstate::memory::{Bytes, MmapRegion}; #[derive(Debug, derive_more::From)] /// CQueue Error. diff --git a/src/vmm/src/io_uring/queue/submission.rs b/src/vmm/src/io_uring/queue/submission.rs index 3a67704d7ef..3d4b466149d 100644 --- a/src/vmm/src/io_uring/queue/submission.rs +++ b/src/vmm/src/io_uring/queue/submission.rs @@ -13,7 +13,8 @@ use utils::syscall::SyscallReturnCode; use super::mmap::{mmap, MmapError}; use crate::io_uring::bindings; use crate::io_uring::operation::Sqe; -use crate::vstate::memory::{Bytes, MmapRegion, VolatileMemory, VolatileMemoryError}; +use crate::volatile::{VolatileMemory, VolatileMemoryError}; +use crate::vstate::memory::{Bytes, MmapRegion}; #[derive(Debug, derive_more::From)] /// SQueue Error. diff --git a/src/vmm/src/lib.rs b/src/vmm/src/lib.rs index 79db51de4aa..f0ff85c5f57 100644 --- a/src/vmm/src/lib.rs +++ b/src/vmm/src/lib.rs @@ -104,6 +104,8 @@ pub mod utilities; pub mod version_map; /// Wrappers over structures used to configure the VMM. pub mod vmm_config; +/// Module with volotile traits and impls. +pub mod volatile; /// Module with virtual state structs. pub mod vstate; diff --git a/src/vmm/src/memory_snapshot.rs b/src/vmm/src/memory_snapshot.rs index 3d1f857efe7..456bbe4dac1 100644 --- a/src/vmm/src/memory_snapshot.rs +++ b/src/vmm/src/memory_snapshot.rs @@ -10,9 +10,10 @@ use utils::{errno, get_page_size, u64_to_usize}; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; +use crate::volatile::WriteVolatile; use crate::vstate::memory::{ Bitmap, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, - GuestMemoryRegion, MemoryRegionAddress, WriteVolatile, + GuestMemoryRegion, MemoryRegionAddress, }; use crate::DirtyBitmap; diff --git a/src/vmm/src/volatile.rs b/src/vmm/src/volatile.rs new file mode 100644 index 00000000000..598467d3a2b --- /dev/null +++ b/src/vmm/src/volatile.rs @@ -0,0 +1,434 @@ +// Copyright 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved. +// SPDX-License-Identifier: Apache-2.0 +// +// Portions Copyright 2017 The Chromium OS Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the THIRD-PARTY file. + +use std::fmt::Debug; +use std::io::ErrorKind; +use std::os::unix::io::AsRawFd; + +use vm_memory::bitmap::BitmapSlice; +use vm_memory::Bytes; +pub use vm_memory::{VolatileMemory, VolatileMemoryError, VolatileSlice}; + +/// A version of the standard library's [`Read`] trait that operates on volatile memory instead of +/// slices +/// +/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on +/// guest memory [1]. +/// +/// [1]: https://github.com/rust-vmm/vm-memory/pull/217 +pub trait ReadVolatile { + /// Tries to read some bytes into the given [`VolatileSlice`] buffer, returning how many bytes + /// were read. + /// + /// The behavior of implementations should be identical to [`Read::read`] + fn read_volatile( + &mut self, + buf: &mut VolatileSlice, + ) -> Result; + + /// Tries to fill the given [`VolatileSlice`] buffer by reading from `self` returning an error + /// if insufficient bytes could be read. + /// + /// The default implementation is identical to that of [`Read::read_exact`] + fn read_exact_volatile( + &mut self, + buf: &mut VolatileSlice, + ) -> Result<(), VolatileMemoryError> { + // Implementation based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L465 + + let mut partial_buf = buf.offset(0)?; + + while !partial_buf.is_empty() { + match self.read_volatile(&mut partial_buf) { + Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => { + continue + } + Ok(0) => { + return Err(VolatileMemoryError::IOError(std::io::Error::new( + ErrorKind::UnexpectedEof, + "failed to fill whole buffer", + ))) + } + Ok(bytes_read) => partial_buf = partial_buf.offset(bytes_read)?, + Err(err) => return Err(err), + } + } + + Ok(()) + } +} + +/// A version of the standard library's [`Write`] trait that operates on volatile memory instead of +/// slices +/// +/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on +/// guest memory [1]. +/// +/// [1]: https://github.com/rust-vmm/vm-memory/pull/217 +pub trait WriteVolatile: Debug { + /// Tries to write some bytes from the given [`VolatileSlice`] buffer, returning how many bytes + /// were written. + /// + /// The behavior of implementations should be identical to [`Write::write`] + fn write_volatile( + &mut self, + buf: &VolatileSlice, + ) -> Result; + + /// Tries write the entire content of the given [`VolatileSlice`] buffer to `self` returning an + /// error if not all bytes could be written. + /// + /// The default implementation is identical to that of [`Write::write_all`] + fn write_all_volatile( + &mut self, + buf: &VolatileSlice, + ) -> Result<(), VolatileMemoryError> { + // Based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L1570 + + let mut partial_buf = buf.offset(0)?; + + while !partial_buf.is_empty() { + match self.write_volatile(&partial_buf) { + Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => { + continue + } + Ok(0) => { + return Err(VolatileMemoryError::IOError(std::io::Error::new( + ErrorKind::WriteZero, + "failed to write whole buffer", + ))) + } + Ok(bytes_written) => partial_buf = partial_buf.offset(bytes_written)?, + Err(err) => return Err(err), + } + } + + Ok(()) + } +} + +// We explicitly implement our traits for [`std::fs::File`] and [`std::os::unix::net::UnixStream`] +// instead of providing blanket implementation for [`AsRawFd`] due to trait coherence limitations: A +// blanket implementation would prevent us from providing implementations for `&mut [u8]` below, as +// "an upstream crate could implement AsRawFd for &mut [u8]`. + +impl ReadVolatile for std::fs::File { + fn read_volatile( + &mut self, + buf: &mut VolatileSlice, + ) -> Result { + read_volatile_raw_fd(self, buf) + } +} + +impl ReadVolatile for std::os::unix::net::UnixStream { + fn read_volatile( + &mut self, + buf: &mut VolatileSlice, + ) -> Result { + read_volatile_raw_fd(self, buf) + } +} + +/// Tries to do a single `read` syscall on the provided file descriptor, storing the data raed in +/// the given [`VolatileSlice`]. +/// +/// Returns the numbers of bytes read. +fn read_volatile_raw_fd( + raw_fd: &mut Fd, + buf: &mut VolatileSlice, +) -> Result { + let fd = raw_fd.as_raw_fd(); + let guard = buf.ptr_guard_mut(); + let dst = guard.as_ptr().cast::(); + + // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `dst` is + // valid for writes of length `buf.len() by the invariants upheld by the constructor + // of `VolatileSlice`. + let bytes_read = unsafe { libc::read(fd, dst, buf.len()) }; + + if bytes_read < 0 { + // We don't know if a partial read might have happened, so mark everything as dirty + buf.bitmap().mark_dirty(0, buf.len()); + + Err(VolatileMemoryError::IOError(std::io::Error::last_os_error())) + } else { + let bytes_read = bytes_read.try_into().unwrap(); + buf.bitmap().mark_dirty(0, bytes_read); + Ok(bytes_read) + } +} + +impl WriteVolatile for std::fs::File { + fn write_volatile( + &mut self, + buf: &VolatileSlice, + ) -> Result { + write_volatile_raw_fd(self, buf) + } +} + +impl WriteVolatile for std::os::unix::net::UnixStream { + fn write_volatile( + &mut self, + buf: &VolatileSlice, + ) -> Result { + write_volatile_raw_fd(self, buf) + } +} + +/// Tries to do a single `write` syscall on the provided file descriptor, attempting to write the +/// data stored in the given [`VolatileSlice`]. +/// +/// Returns the numbers of bytes written. +fn write_volatile_raw_fd( + raw_fd: &mut Fd, + buf: &VolatileSlice, +) -> Result { + let fd = raw_fd.as_raw_fd(); + let guard = buf.ptr_guard(); + let src = guard.as_ptr().cast::(); + + // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `src` is + // valid for reads of length `buf.len() by the invariants upheld by the constructor + // of `VolatileSlice`. + let bytes_written = unsafe { libc::write(fd, src, buf.len()) }; + + if bytes_written < 0 { + Err(VolatileMemoryError::IOError(std::io::Error::last_os_error())) + } else { + Ok(bytes_written.try_into().unwrap()) + } +} + +impl WriteVolatile for &mut [u8] { + fn write_volatile( + &mut self, + buf: &VolatileSlice, + ) -> Result { + // NOTE: The duality of read <-> write here is correct. This is because we translate a call + // "slice.write(buf)" (e.g. write into slice from buf) into "buf.read(slice)" (e.g. read + // from buffer into slice). Both express data transfer from the buffer to the slice + let read = buf.read(self, 0)?; + + // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#335 + *self = std::mem::take(self).split_at_mut(read).1; + + Ok(read) + } + + fn write_all_volatile( + &mut self, + buf: &VolatileSlice, + ) -> Result<(), VolatileMemoryError> { + // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L376-L382 + if self.write_volatile(buf)? == buf.len() { + Ok(()) + } else { + Err(VolatileMemoryError::IOError(std::io::Error::new( + ErrorKind::WriteZero, + "failed to write whole buffer", + ))) + } + } +} + +impl ReadVolatile for &[u8] { + fn read_volatile( + &mut self, + buf: &mut VolatileSlice, + ) -> Result { + // NOTE: the duality of read <-> write here is correct. This is because we translate a call + // "slice.read(buf)" (e.g. "read from slice into buf") into "buf.write(slice)" (e.g. write + // into buf from slice) + let written = buf.write(self, 0)?; + + // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#232-310 + *self = self.split_at(written).1; + + Ok(written) + } + + fn read_exact_volatile( + &mut self, + buf: &mut VolatileSlice, + ) -> Result<(), VolatileMemoryError> { + // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L282-L302 + if buf.len() > self.len() { + return Err(VolatileMemoryError::IOError(std::io::Error::new( + ErrorKind::UnexpectedEof, + "failed to fill whole buffer", + ))); + } + + self.read_volatile(buf).map(|_| ()) + } +} + +#[cfg(test)] +mod tests { + #![allow(clippy::undocumented_unsafe_blocks)] + use std::io::{Read, Seek, Write}; + + use utils::tempfile::TempFile; + + use super::*; + + #[test] + fn test_read_volatile() { + let test_cases = [ + (vec![1u8, 2], [1u8, 2, 0, 0, 0]), + (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]), + // ensure we don't have a buffer overrun + (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]), + ]; + + for (input, output) in test_cases { + // ---- Test ReadVolatile for &[u8] ---- + // + // Test read_volatile for &[u8] works + let mut memory = vec![0u8; 5]; + + assert_eq!( + (&input[..]) + .read_volatile(&mut VolatileSlice::from(&mut memory[..4])) + .unwrap(), + input.len().min(4) + ); + assert_eq!(&memory, &output); + + // Test read_exact_volatile for &[u8] works + let mut memory = vec![0u8; 5]; + let result = + (&input[..]).read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4])); + + // read_exact fails if there are not enough bytes in input to completely fill + // memory[..4] + if input.len() < 4 { + match result.unwrap_err() { + VolatileMemoryError::IOError(ioe) => { + assert_eq!(ioe.kind(), ErrorKind::UnexpectedEof) + } + err => panic!("{:?}", err), + } + assert_eq!(memory, vec![0u8; 5]); + } else { + result.unwrap(); + assert_eq!(&memory, &output); + } + + // ---- Test ReadVolatile for File ---- + + let mut temp_file = TempFile::new().unwrap().into_file(); + temp_file.write_all(input.as_ref()).unwrap(); + temp_file.rewind().unwrap(); + + // Test read_volatile for File works + let mut memory = vec![0u8; 5]; + + assert_eq!( + temp_file + .read_volatile(&mut VolatileSlice::from(&mut memory[..4])) + .unwrap(), + input.len().min(4) + ); + assert_eq!(&memory, &output); + + temp_file.rewind().unwrap(); + + // Test read_exact_volatile for File works + let mut memory = vec![0u8; 5]; + + let read_exact_result = + temp_file.read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4])); + + if input.len() < 4 { + read_exact_result.unwrap_err(); + } else { + read_exact_result.unwrap(); + } + assert_eq!(&memory, &output); + } + } + + #[test] + fn test_write_volatile() { + let test_cases = [ + (vec![1u8, 2], [1u8, 2, 0, 0, 0]), + (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]), + // ensure we don't have a buffer overrun + (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]), + ]; + + for (mut input, output) in test_cases { + // ---- Test WriteVolatile for &mut [u8] ---- + // + // Test write_volatile for &mut [u8] works + let mut memory = vec![0u8; 5]; + + assert_eq!( + (&mut memory[..4]) + .write_volatile(&VolatileSlice::from(input.as_mut_slice())) + .unwrap(), + input.len().min(4) + ); + assert_eq!(&memory, &output); + + // Test write_all_volatile for &mut [u8] works + let mut memory = vec![0u8; 5]; + + let result = + (&mut memory[..4]).write_all_volatile(&VolatileSlice::from(input.as_mut_slice())); + + if input.len() > 4 { + match result.unwrap_err() { + VolatileMemoryError::IOError(ioe) => { + assert_eq!(ioe.kind(), ErrorKind::WriteZero) + } + err => panic!("{:?}", err), + } + // This quirky behavior of writing to the slice even in the case of failure is also + // exhibited by the stdlib + assert_eq!(&memory, &output); + } else { + result.unwrap(); + assert_eq!(&memory, &output); + } + + // ---- Test ẂriteVolatile for File works + // Test write_volatile for File works + let mut temp_file = TempFile::new().unwrap().into_file(); + + temp_file + .write_volatile(&VolatileSlice::from(input.as_mut_slice())) + .unwrap(); + temp_file.rewind().unwrap(); + + let mut written = vec![0u8; input.len()]; + temp_file.read_exact(written.as_mut_slice()).unwrap(); + + assert_eq!(input, written); + // check no excess bytes were written to the file + assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0); + + // Test write_all_volatile for File works + let mut temp_file = TempFile::new().unwrap().into_file(); + + temp_file + .write_all_volatile(&VolatileSlice::from(input.as_mut_slice())) + .unwrap(); + temp_file.rewind().unwrap(); + + let mut written = vec![0u8; input.len()]; + temp_file.read_exact(written.as_mut_slice()).unwrap(); + + assert_eq!(input, written); + // check no excess bytes were written to the file + assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0); + } + } +} diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index 52151c258bc..2c90e51f779 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -5,8 +5,7 @@ // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. -use std::fmt::Debug; -use std::io::{Error as IoError, ErrorKind}; +use std::io::Error as IoError; use std::os::unix::io::AsRawFd; use utils::u64_to_usize; @@ -16,7 +15,6 @@ pub use vm_memory::mmap::{MmapRegionBuilder, MmapRegionError}; pub use vm_memory::{ address, Address, ByteValued, Bytes, Error, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, MmapRegion, - VolatileMemory, VolatileMemoryError, VolatileSlice, }; /// Type of GuestMemoryMmap. @@ -147,262 +145,6 @@ pub fn mark_dirty_mem(mem: &GuestMemoryMmap, addr: GuestAddress, len: usize) { }); } -/// A version of the standard library's [`Read`] trait that operates on volatile memory instead of -/// slices -/// -/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on -/// guest memory [1]. -/// -/// [1]: https://github.com/rust-vmm/vm-memory/pull/217 -pub trait ReadVolatile { - /// Tries to read some bytes into the given [`VolatileSlice`] buffer, returning how many bytes - /// were read. - /// - /// The behavior of implementations should be identical to [`Read::read`] - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result; - - /// Tries to fill the given [`VolatileSlice`] buffer by reading from `self` returning an error - /// if insufficient bytes could be read. - /// - /// The default implementation is identical to that of [`Read::read_exact`] - fn read_exact_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result<(), VolatileMemoryError> { - // Implementation based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L465 - - let mut partial_buf = buf.offset(0)?; - - while !partial_buf.is_empty() { - match self.read_volatile(&mut partial_buf) { - Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => { - continue - } - Ok(0) => { - return Err(VolatileMemoryError::IOError(std::io::Error::new( - ErrorKind::UnexpectedEof, - "failed to fill whole buffer", - ))) - } - Ok(bytes_read) => partial_buf = partial_buf.offset(bytes_read)?, - Err(err) => return Err(err), - } - } - - Ok(()) - } -} - -/// A version of the standard library's [`Write`] trait that operates on volatile memory instead of -/// slices -/// -/// This trait is needed as rust slices (`&[u8]` and `&mut [u8]`) cannot be used when operating on -/// guest memory [1]. -/// -/// [1]: https://github.com/rust-vmm/vm-memory/pull/217 -pub trait WriteVolatile: Debug { - /// Tries to write some bytes from the given [`VolatileSlice`] buffer, returning how many bytes - /// were written. - /// - /// The behavior of implementations should be identical to [`Write::write`] - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result; - - /// Tries write the entire content of the given [`VolatileSlice`] buffer to `self` returning an - /// error if not all bytes could be written. - /// - /// The default implementation is identical to that of [`Write::write_all`] - fn write_all_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result<(), VolatileMemoryError> { - // Based on https://github.com/rust-lang/rust/blob/7e7483d26e3cec7a44ef00cf7ae6c9c8c918bec6/library/std/src/io/mod.rs#L1570 - - let mut partial_buf = buf.offset(0)?; - - while !partial_buf.is_empty() { - match self.write_volatile(&partial_buf) { - Err(VolatileMemoryError::IOError(err)) if err.kind() == ErrorKind::Interrupted => { - continue - } - Ok(0) => { - return Err(VolatileMemoryError::IOError(std::io::Error::new( - ErrorKind::WriteZero, - "failed to write whole buffer", - ))) - } - Ok(bytes_written) => partial_buf = partial_buf.offset(bytes_written)?, - Err(err) => return Err(err), - } - } - - Ok(()) - } -} - -// We explicitly implement our traits for [`std::fs::File`] and [`std::os::unix::net::UnixStream`] -// instead of providing blanket implementation for [`AsRawFd`] due to trait coherence limitations: A -// blanket implementation would prevent us from providing implementations for `&mut [u8]` below, as -// "an upstream crate could implement AsRawFd for &mut [u8]`. - -impl ReadVolatile for std::fs::File { - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result { - read_volatile_raw_fd(self, buf) - } -} - -impl ReadVolatile for std::os::unix::net::UnixStream { - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result { - read_volatile_raw_fd(self, buf) - } -} - -/// Tries to do a single `read` syscall on the provided file descriptor, storing the data raed in -/// the given [`VolatileSlice`]. -/// -/// Returns the numbers of bytes read. -fn read_volatile_raw_fd( - raw_fd: &mut Fd, - buf: &mut VolatileSlice, -) -> Result { - let fd = raw_fd.as_raw_fd(); - let guard = buf.ptr_guard_mut(); - let dst = guard.as_ptr().cast::(); - - // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `dst` is - // valid for writes of length `buf.len() by the invariants upheld by the constructor - // of `VolatileSlice`. - let bytes_read = unsafe { libc::read(fd, dst, buf.len()) }; - - if bytes_read < 0 { - // We don't know if a partial read might have happened, so mark everything as dirty - buf.bitmap().mark_dirty(0, buf.len()); - - Err(VolatileMemoryError::IOError(std::io::Error::last_os_error())) - } else { - let bytes_read = bytes_read.try_into().unwrap(); - buf.bitmap().mark_dirty(0, bytes_read); - Ok(bytes_read) - } -} - -impl WriteVolatile for std::fs::File { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - write_volatile_raw_fd(self, buf) - } -} - -impl WriteVolatile for std::os::unix::net::UnixStream { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - write_volatile_raw_fd(self, buf) - } -} - -/// Tries to do a single `write` syscall on the provided file descriptor, attempting to write the -/// data stored in the given [`VolatileSlice`]. -/// -/// Returns the numbers of bytes written. -fn write_volatile_raw_fd( - raw_fd: &mut Fd, - buf: &VolatileSlice, -) -> Result { - let fd = raw_fd.as_raw_fd(); - let guard = buf.ptr_guard(); - let src = guard.as_ptr().cast::(); - - // SAFETY: We got a valid file descriptor from `AsRawFd`. The memory pointed to by `src` is - // valid for reads of length `buf.len() by the invariants upheld by the constructor - // of `VolatileSlice`. - let bytes_written = unsafe { libc::write(fd, src, buf.len()) }; - - if bytes_written < 0 { - Err(VolatileMemoryError::IOError(std::io::Error::last_os_error())) - } else { - Ok(bytes_written.try_into().unwrap()) - } -} - -impl WriteVolatile for &mut [u8] { - fn write_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result { - // NOTE: The duality of read <-> write here is correct. This is because we translate a call - // "slice.write(buf)" (e.g. write into slice from buf) into "buf.read(slice)" (e.g. read - // from buffer into slice). Both express data transfer from the buffer to the slice - let read = buf.read(self, 0)?; - - // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#335 - *self = std::mem::take(self).split_at_mut(read).1; - - Ok(read) - } - - fn write_all_volatile( - &mut self, - buf: &VolatileSlice, - ) -> Result<(), VolatileMemoryError> { - // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L376-L382 - if self.write_volatile(buf)? == buf.len() { - Ok(()) - } else { - Err(VolatileMemoryError::IOError(std::io::Error::new( - ErrorKind::WriteZero, - "failed to write whole buffer", - ))) - } - } -} - -impl ReadVolatile for &[u8] { - fn read_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result { - // NOTE: the duality of read <-> write here is correct. This is because we translate a call - // "slice.read(buf)" (e.g. "read from slice into buf") into "buf.write(slice)" (e.g. write - // into buf from slice) - let written = buf.write(self, 0)?; - - // Advance the slice, just like the stdlib: https://doc.rust-lang.org/src/std/io/impls.rs.html#232-310 - *self = self.split_at(written).1; - - Ok(written) - } - - fn read_exact_volatile( - &mut self, - buf: &mut VolatileSlice, - ) -> Result<(), VolatileMemoryError> { - // Based on https://github.com/rust-lang/rust/blob/f7b831ac8a897273f78b9f47165cf8e54066ce4b/library/std/src/io/impls.rs#L282-L302 - if buf.len() > self.len() { - return Err(VolatileMemoryError::IOError(std::io::Error::new( - ErrorKind::UnexpectedEof, - "failed to fill whole buffer", - ))); - } - - self.read_volatile(buf).map(|_| ()) - } -} - /// Public module with utilities used for testing. pub mod test_utils { use super::*; @@ -455,7 +197,6 @@ pub mod test_utils { #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] - use std::io::{Read, Seek, Write}; use utils::get_page_size; use utils::tempfile::TempFile; @@ -710,158 +451,4 @@ mod tests { .unwrap(); } } - - #[test] - fn test_read_volatile() { - let test_cases = [ - (vec![1u8, 2], [1u8, 2, 0, 0, 0]), - (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]), - // ensure we don't have a buffer overrun - (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]), - ]; - - for (input, output) in test_cases { - // ---- Test ReadVolatile for &[u8] ---- - // - // Test read_volatile for &[u8] works - let mut memory = vec![0u8; 5]; - - assert_eq!( - (&input[..]) - .read_volatile(&mut VolatileSlice::from(&mut memory[..4])) - .unwrap(), - input.len().min(4) - ); - assert_eq!(&memory, &output); - - // Test read_exact_volatile for &[u8] works - let mut memory = vec![0u8; 5]; - let result = - (&input[..]).read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4])); - - // read_exact fails if there are not enough bytes in input to completely fill - // memory[..4] - if input.len() < 4 { - match result.unwrap_err() { - VolatileMemoryError::IOError(ioe) => { - assert_eq!(ioe.kind(), ErrorKind::UnexpectedEof) - } - err => panic!("{:?}", err), - } - assert_eq!(memory, vec![0u8; 5]); - } else { - result.unwrap(); - assert_eq!(&memory, &output); - } - - // ---- Test ReadVolatile for File ---- - - let mut temp_file = TempFile::new().unwrap().into_file(); - temp_file.write_all(input.as_ref()).unwrap(); - temp_file.rewind().unwrap(); - - // Test read_volatile for File works - let mut memory = vec![0u8; 5]; - - assert_eq!( - temp_file - .read_volatile(&mut VolatileSlice::from(&mut memory[..4])) - .unwrap(), - input.len().min(4) - ); - assert_eq!(&memory, &output); - - temp_file.rewind().unwrap(); - - // Test read_exact_volatile for File works - let mut memory = vec![0u8; 5]; - - let read_exact_result = - temp_file.read_exact_volatile(&mut VolatileSlice::from(&mut memory[..4])); - - if input.len() < 4 { - read_exact_result.unwrap_err(); - } else { - read_exact_result.unwrap(); - } - assert_eq!(&memory, &output); - } - } - - #[test] - fn test_write_volatile() { - let test_cases = [ - (vec![1u8, 2], [1u8, 2, 0, 0, 0]), - (vec![1, 2, 3, 4], [1, 2, 3, 4, 0]), - // ensure we don't have a buffer overrun - (vec![5, 6, 7, 8, 9], [5, 6, 7, 8, 0]), - ]; - - for (mut input, output) in test_cases { - // ---- Test WriteVolatile for &mut [u8] ---- - // - // Test write_volatile for &mut [u8] works - let mut memory = vec![0u8; 5]; - - assert_eq!( - (&mut memory[..4]) - .write_volatile(&VolatileSlice::from(input.as_mut_slice())) - .unwrap(), - input.len().min(4) - ); - assert_eq!(&memory, &output); - - // Test write_all_volatile for &mut [u8] works - let mut memory = vec![0u8; 5]; - - let result = - (&mut memory[..4]).write_all_volatile(&VolatileSlice::from(input.as_mut_slice())); - - if input.len() > 4 { - match result.unwrap_err() { - VolatileMemoryError::IOError(ioe) => { - assert_eq!(ioe.kind(), ErrorKind::WriteZero) - } - err => panic!("{:?}", err), - } - // This quirky behavior of writing to the slice even in the case of failure is also - // exhibited by the stdlib - assert_eq!(&memory, &output); - } else { - result.unwrap(); - assert_eq!(&memory, &output); - } - - // ---- Test ẂriteVolatile for File works - // Test write_volatile for File works - let mut temp_file = TempFile::new().unwrap().into_file(); - - temp_file - .write_volatile(&VolatileSlice::from(input.as_mut_slice())) - .unwrap(); - temp_file.rewind().unwrap(); - - let mut written = vec![0u8; input.len()]; - temp_file.read_exact(written.as_mut_slice()).unwrap(); - - assert_eq!(input, written); - // check no excess bytes were written to the file - assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0); - - // Test write_all_volatile for File works - let mut temp_file = TempFile::new().unwrap().into_file(); - - temp_file - .write_all_volatile(&VolatileSlice::from(input.as_mut_slice())) - .unwrap(); - temp_file.rewind().unwrap(); - - let mut written = vec![0u8; input.len()]; - temp_file.read_exact(written.as_mut_slice()).unwrap(); - - assert_eq!(input, written); - // check no excess bytes were written to the file - assert_eq!(temp_file.read(&mut [0u8]).unwrap(), 0); - } - } } diff --git a/src/vmm/src/vstate/vm.rs b/src/vmm/src/vstate/vm.rs index 4e6166c0e08..9f38707e957 100644 --- a/src/vmm/src/vstate/vm.rs +++ b/src/vmm/src/vstate/vm.rs @@ -16,6 +16,7 @@ use kvm_bindings::{ }; use kvm_bindings::{kvm_userspace_memory_region, KVM_API_VERSION, KVM_MEM_LOG_DIRTY_PAGES}; use kvm_ioctls::{Kvm, VmFd}; +#[cfg(target_arch = "x86_64")] use utils::u64_to_usize; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; diff --git a/src/vmm/tests/io_uring.rs b/src/vmm/tests/io_uring.rs index 420b630fa4d..81b7932cf29 100644 --- a/src/vmm/tests/io_uring.rs +++ b/src/vmm/tests/io_uring.rs @@ -13,12 +13,14 @@ use utils::eventfd::EventFd; use utils::kernel_version::{min_kernel_version_for_io_uring, KernelVersion}; use utils::skip_if_io_uring_unsupported; use utils::tempfile::TempFile; -use vmm::vstate::memory::{Bytes, MmapRegion, VolatileMemory}; +use vmm::volatile::VolatileMemory; +use vmm::vstate::memory::{Bytes, MmapRegion}; mod test_utils { use vmm::io_uring::operation::{OpCode, Operation}; use vmm::io_uring::{IoUring, IoUringError, SQueueError}; - use vmm::vstate::memory::{MmapRegion, VolatileMemory}; + use vmm::volatile::VolatileMemory; + use vmm::vstate::memory::MmapRegion; fn drain_cqueue(ring: &mut IoUring) { while let Some(entry) = unsafe { ring.pop::().unwrap() } { From 3e8c1fb14f18734720717ba930ae7abbddd0bb2b Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Mon, 25 Sep 2023 16:51:49 +0100 Subject: [PATCH 03/14] refactor(memory): renamed error type Renamed `Error` into `VmMemoryError` We try to avoid usind `Error` word for errors. Renaming it make is clearer what type of error this is. Signed-off-by: Egor Lazarchuk --- src/vmm/src/builder.rs | 2 +- src/vmm/src/memory_snapshot.rs | 2 +- src/vmm/src/vstate/memory.rs | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/vmm/src/builder.rs b/src/vmm/src/builder.rs index f8e1964aa78..39954667376 100644 --- a/src/vmm/src/builder.rs +++ b/src/vmm/src/builder.rs @@ -85,7 +85,7 @@ pub enum StartMicrovmError { CreateLegacyDevice(device_manager::legacy::LegacyDeviceError), /// Memory regions are overlapping or mmap fails. #[error("Invalid Memory Configuration: {}", format!("{:?}", .0).replace('\"', ""))] - GuestMemoryMmap(crate::vstate::memory::Error), + GuestMemoryMmap(crate::vstate::memory::VmMemoryError), /// Cannot load initrd due to an invalid memory configuration. #[error("Cannot load initrd due to an invalid memory configuration.")] InitrdLoad, diff --git a/src/vmm/src/memory_snapshot.rs b/src/vmm/src/memory_snapshot.rs index 456bbe4dac1..48d9ee0bdf6 100644 --- a/src/vmm/src/memory_snapshot.rs +++ b/src/vmm/src/memory_snapshot.rs @@ -69,7 +69,7 @@ pub enum SnapshotMemoryError { /// Cannot access file: {0:?} FileHandle(#[from] std::io::Error), /// Cannot create memory: {0:?} - CreateMemory(#[from] crate::vstate::memory::Error), + CreateMemory(#[from] crate::vstate::memory::VmMemoryError), /// Cannot create memory region: {0:?} CreateRegion(#[from] crate::vstate::memory::MmapRegionError), /// Cannot fetch system's page size: {0:?} diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index 2c90e51f779..e5907e7e21c 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -13,8 +13,8 @@ pub use vm_memory::bitmap::{AtomicBitmap, Bitmap, BitmapSlice, BS}; use vm_memory::mmap::{check_file_offset, NewBitmap}; pub use vm_memory::mmap::{MmapRegionBuilder, MmapRegionError}; pub use vm_memory::{ - address, Address, ByteValued, Bytes, Error, FileOffset, GuestAddress, GuestMemory, - GuestMemoryError, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, MmapRegion, + address, Address, ByteValued, Bytes, Error as VmMemoryError, FileOffset, GuestAddress, + GuestMemory, GuestMemoryError, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, MmapRegion, }; /// Type of GuestMemoryMmap. @@ -115,7 +115,7 @@ fn build_guarded_region( pub fn create_guest_memory( regions: &[(Option, GuestAddress, usize)], track_dirty_pages: bool, -) -> std::result::Result { +) -> Result { let prot = libc::PROT_READ | libc::PROT_WRITE; let mut mmap_regions = Vec::with_capacity(regions.len()); @@ -127,7 +127,7 @@ pub fn create_guest_memory( let mmap_region = build_guarded_region(region.0.clone(), region.2, prot, flags, track_dirty_pages) - .map_err(Error::MmapRegion)?; + .map_err(VmMemoryError::MmapRegion)?; mmap_regions.push(GuestRegionMmap::new(mmap_region, region.1)?); } @@ -157,7 +157,7 @@ pub mod test_utils { pub fn create_guest_memory_unguarded( regions: &[(GuestAddress, usize)], track_dirty_pages: bool, - ) -> std::result::Result { + ) -> Result { let prot = libc::PROT_READ | libc::PROT_WRITE; let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS; let mut mmap_regions = Vec::with_capacity(regions.len()); @@ -174,7 +174,7 @@ pub mod test_utils { .with_mmap_prot(prot) .with_mmap_flags(flags) .build() - .map_err(Error::MmapRegion)?, + .map_err(VmMemoryError::MmapRegion)?, region.0, )?); } @@ -186,7 +186,7 @@ pub mod test_utils { pub fn create_anon_guest_memory( regions: &[(GuestAddress, usize)], track_dirty_pages: bool, - ) -> std::result::Result { + ) -> Result { create_guest_memory( ®ions.iter().map(|r| (None, r.0, r.1)).collect::>(), track_dirty_pages, From 04a46d903515746bc409ceb1f01b5f6c0343ef7c Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Mon, 25 Sep 2023 17:36:34 +0100 Subject: [PATCH 04/14] refactor(memory): minor `memory` module refactor Changed names of some variables and used tuple unpacking. Signed-off-by: Egor Lazarchuk --- src/vmm/src/vstate/memory.rs | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index e5907e7e21c..d9fe2f5c780 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -40,7 +40,7 @@ const GUARD_PAGE_COUNT: usize = 1; /// acts as a safety net for accessing out-of-bounds addresses that are not allocated for the /// guest's memory. fn build_guarded_region( - maybe_file_offset: Option, + file_offset: Option<&FileOffset>, size: usize, prot: i32, flags: i32, @@ -68,8 +68,8 @@ fn build_guarded_region( return Err(MmapRegionError::Mmap(IoError::last_os_error())); } - let (fd, offset) = match maybe_file_offset { - Some(ref file_offset) => { + let (fd, offset) = match file_offset { + Some(file_offset) => { check_file_offset(file_offset, size)?; (file_offset.file().as_raw_fd(), file_offset.start()) } @@ -119,17 +119,22 @@ pub fn create_guest_memory( let prot = libc::PROT_READ | libc::PROT_WRITE; let mut mmap_regions = Vec::with_capacity(regions.len()); - for region in regions { - let flags = match region.0 { + for (file_offset, guest_address, region_size) in regions { + let flags = match file_offset { None => libc::MAP_NORESERVE | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, Some(_) => libc::MAP_NORESERVE | libc::MAP_PRIVATE, }; - let mmap_region = - build_guarded_region(region.0.clone(), region.2, prot, flags, track_dirty_pages) - .map_err(VmMemoryError::MmapRegion)?; + let mmap_region = build_guarded_region( + file_offset.as_ref(), + *region_size, + prot, + flags, + track_dirty_pages, + ) + .map_err(VmMemoryError::MmapRegion)?; - mmap_regions.push(GuestRegionMmap::new(mmap_region, region.1)?); + mmap_regions.push(GuestRegionMmap::new(mmap_region, *guest_address)?); } GuestMemoryMmap::from_regions(mmap_regions) @@ -331,7 +336,7 @@ mod tests { assert_eq!(unsafe { libc::ftruncate(file.as_raw_fd(), 4096 * 10) }, 0); let region = build_guarded_region( - Some(FileOffset::new(file, offset)), + Some(&FileOffset::new(file, offset)), size, prot, flags, From 1e6ae719e734c7dd9b212c999912b19c0b58ed3c Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Mon, 25 Sep 2023 18:18:12 +0100 Subject: [PATCH 05/14] refactor(memory): moved `memory_snapshot` into `memory` Moved all content of `memory_snapshot` module into `vstate/memory` module. Now all memory related code is in one place. Signed-off-by: Egor Lazarchuk --- src/vmm/src/lib.rs | 4 +- src/vmm/src/memory_snapshot.rs | 372 ----------------------------- src/vmm/src/persist.rs | 16 +- src/vmm/src/vstate/memory.rs | 356 ++++++++++++++++++++++++++- src/vmm/tests/integration_tests.rs | 3 +- 5 files changed, 364 insertions(+), 387 deletions(-) delete mode 100644 src/vmm/src/memory_snapshot.rs diff --git a/src/vmm/src/lib.rs b/src/vmm/src/lib.rs index f0ff85c5f57..e44fb748416 100644 --- a/src/vmm/src/lib.rs +++ b/src/vmm/src/lib.rs @@ -85,7 +85,6 @@ pub mod devices; pub mod dumbo; /// Logger pub mod logger; -pub mod memory_snapshot; /// microVM Metadata Service MMDS pub mod mmds; /// Save/restore utilities. @@ -138,11 +137,10 @@ use crate::devices::virtio::{ TYPE_NET, }; use crate::logger::{error, info, warn, MetricsError, METRICS}; -use crate::memory_snapshot::SnapshotMemory; use crate::persist::{MicrovmState, MicrovmStateError, VmInfo}; use crate::rate_limiter::BucketUpdate; use crate::vmm_config::instance_info::{InstanceInfo, VmState}; -use crate::vstate::memory::{GuestMemory, GuestMemoryMmap, GuestMemoryRegion}; +use crate::vstate::memory::{GuestMemory, GuestMemoryMmap, GuestMemoryRegion, SnapshotMemory}; use crate::vstate::vcpu::VcpuState; pub use crate::vstate::vcpu::{Vcpu, VcpuConfig, VcpuEvent, VcpuHandle, VcpuResponse}; pub use crate::vstate::vm::Vm; diff --git a/src/vmm/src/memory_snapshot.rs b/src/vmm/src/memory_snapshot.rs deleted file mode 100644 index 48d9ee0bdf6..00000000000 --- a/src/vmm/src/memory_snapshot.rs +++ /dev/null @@ -1,372 +0,0 @@ -// Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. -// SPDX-License-Identifier: Apache-2.0 - -//! Defines functionality for creating guest memory snapshots. - -use std::fs::File; -use std::io::SeekFrom; - -use utils::{errno, get_page_size, u64_to_usize}; -use versionize::{VersionMap, Versionize, VersionizeResult}; -use versionize_derive::Versionize; - -use crate::volatile::WriteVolatile; -use crate::vstate::memory::{ - Bitmap, FileOffset, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, - GuestMemoryRegion, MemoryRegionAddress, -}; -use crate::DirtyBitmap; - -/// State of a guest memory region saved to file/buffer. -#[derive(Debug, PartialEq, Eq, Versionize)] -// NOTICE: Any changes to this structure require a snapshot version bump. -pub struct GuestMemoryRegionState { - // This should have been named `base_guest_addr` since it's _guest_ addr, but for - // backward compatibility we have to keep this name. At least this comment should help. - /// Base GuestAddress. - pub base_address: u64, - /// Region size. - pub size: usize, - /// Offset in file/buffer where the region is saved. - pub offset: u64, -} - -/// Describes guest memory regions and their snapshot file mappings. -#[derive(Debug, Default, PartialEq, Eq, Versionize)] -// NOTICE: Any changes to this structure require a snapshot version bump. -pub struct GuestMemoryState { - /// List of regions. - pub regions: Vec, -} - -/// Defines the interface for snapshotting memory. -pub trait SnapshotMemory -where - Self: Sized, -{ - /// Describes GuestMemoryMmap through a GuestMemoryState struct. - fn describe(&self) -> GuestMemoryState; - /// Dumps all contents of GuestMemoryMmap to a writer. - fn dump(&self, writer: &mut T) -> Result<(), SnapshotMemoryError>; - /// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer. - fn dump_dirty( - &self, - writer: &mut T, - dirty_bitmap: &DirtyBitmap, - ) -> Result<(), SnapshotMemoryError>; - /// Creates a GuestMemoryMmap given a `file` containing the data - /// and a `state` containing mapping information. - fn restore( - file: Option<&File>, - state: &GuestMemoryState, - track_dirty_pages: bool, - ) -> Result; -} - -/// Errors associated with dumping guest memory to file. -#[derive(Debug, thiserror::Error, displaydoc::Display)] -pub enum SnapshotMemoryError { - /// Cannot access file: {0:?} - FileHandle(#[from] std::io::Error), - /// Cannot create memory: {0:?} - CreateMemory(#[from] crate::vstate::memory::VmMemoryError), - /// Cannot create memory region: {0:?} - CreateRegion(#[from] crate::vstate::memory::MmapRegionError), - /// Cannot fetch system's page size: {0:?} - PageSize(#[from] errno::Error), - /// Cannot dump memory: {0:?} - WriteMemory(#[from] GuestMemoryError), -} - -impl SnapshotMemory for GuestMemoryMmap { - /// Describes GuestMemoryMmap through a GuestMemoryState struct. - fn describe(&self) -> GuestMemoryState { - let mut guest_memory_state = GuestMemoryState::default(); - let mut offset = 0; - self.iter().for_each(|region| { - guest_memory_state.regions.push(GuestMemoryRegionState { - base_address: region.start_addr().0, - size: u64_to_usize(region.len()), - offset, - }); - - offset += region.len(); - }); - guest_memory_state - } - - /// Dumps all contents of GuestMemoryMmap to a writer. - fn dump(&self, writer: &mut T) -> Result<(), SnapshotMemoryError> { - self.iter() - .try_for_each(|region| Ok(writer.write_all_volatile(®ion.as_volatile_slice()?)?)) - .map_err(SnapshotMemoryError::WriteMemory) - } - - /// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer. - fn dump_dirty( - &self, - writer: &mut T, - dirty_bitmap: &DirtyBitmap, - ) -> Result<(), SnapshotMemoryError> { - let mut writer_offset = 0; - let page_size = get_page_size()?; - - self.iter() - .enumerate() - .try_for_each(|(slot, region)| { - let kvm_bitmap = dirty_bitmap.get(&slot).unwrap(); - let firecracker_bitmap = region.bitmap(); - let mut write_size = 0; - let mut dirty_batch_start: u64 = 0; - - for (i, v) in kvm_bitmap.iter().enumerate() { - for j in 0..64 { - let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64; - let page_offset = ((i * 64) + j) * page_size; - let is_firecracker_page_dirty = firecracker_bitmap.dirty_at(page_offset); - if is_kvm_page_dirty || is_firecracker_page_dirty { - // We are at the start of a new batch of dirty pages. - if write_size == 0 { - // Seek forward over the unmodified pages. - writer - .seek(SeekFrom::Start(writer_offset + page_offset as u64)) - .unwrap(); - dirty_batch_start = page_offset as u64; - } - write_size += page_size; - } else if write_size > 0 { - // We are at the end of a batch of dirty pages. - writer.write_all_volatile( - ®ion.get_slice( - MemoryRegionAddress(dirty_batch_start), - write_size, - )?, - )?; - - write_size = 0; - } - } - } - - if write_size > 0 { - writer.write_all_volatile( - ®ion.get_slice(MemoryRegionAddress(dirty_batch_start), write_size)?, - )?; - } - writer_offset += region.len(); - if let Some(bitmap) = firecracker_bitmap { - bitmap.reset(); - } - - Ok(()) - }) - .map_err(SnapshotMemoryError::WriteMemory) - } - - /// Creates a GuestMemoryMmap backed by a `file` if present, otherwise backed - /// by anonymous memory. Memory layout and ranges are described in `state` param. - fn restore( - file: Option<&File>, - state: &GuestMemoryState, - track_dirty_pages: bool, - ) -> Result { - let mut regions = vec![]; - for region in state.regions.iter() { - let f = match file { - Some(f) => Some(FileOffset::new(f.try_clone()?, region.offset)), - None => None, - }; - - regions.push((f, GuestAddress(region.base_address), region.size)); - } - - crate::vstate::memory::create_guest_memory(®ions, track_dirty_pages) - .map_err(SnapshotMemoryError::CreateMemory) - } -} - -#[cfg(test)] -mod tests { - use std::collections::HashMap; - use std::io::{Read, Seek}; - - use utils::get_page_size; - use utils::tempfile::TempFile; - - use super::*; - use crate::vstate::memory::{Bytes, GuestAddress}; - - #[test] - fn test_describe_state() { - let page_size: usize = get_page_size().unwrap(); - - // Two regions of one page each, with a one page gap between them. - let mem_regions = [ - (None, GuestAddress(0), page_size), - (None, GuestAddress(page_size as u64 * 2), page_size), - ]; - let guest_memory = - crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); - - let expected_memory_state = GuestMemoryState { - regions: vec![ - GuestMemoryRegionState { - base_address: 0, - size: page_size, - offset: 0, - }, - GuestMemoryRegionState { - base_address: page_size as u64 * 2, - size: page_size, - offset: page_size as u64, - }, - ], - }; - - let actual_memory_state = guest_memory.describe(); - assert_eq!(expected_memory_state, actual_memory_state); - - // Two regions of three pages each, with a one page gap between them. - let mem_regions = [ - (None, GuestAddress(0), page_size * 3), - (None, GuestAddress(page_size as u64 * 4), page_size * 3), - ]; - let guest_memory = - crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); - - let expected_memory_state = GuestMemoryState { - regions: vec![ - GuestMemoryRegionState { - base_address: 0, - size: page_size * 3, - offset: 0, - }, - GuestMemoryRegionState { - base_address: page_size as u64 * 4, - size: page_size * 3, - offset: page_size as u64 * 3, - }, - ], - }; - - let actual_memory_state = guest_memory.describe(); - assert_eq!(expected_memory_state, actual_memory_state); - } - - #[test] - fn test_restore_memory() { - let page_size: usize = get_page_size().unwrap(); - - // Two regions of two pages each, with a one page gap between them. - let mem_regions = [ - (None, GuestAddress(0), page_size * 2), - (None, GuestAddress(page_size as u64 * 3), page_size * 2), - ]; - let guest_memory = - crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); - // Check that Firecracker bitmap is clean. - let _res: Result<(), SnapshotMemoryError> = guest_memory.iter().try_for_each(|r| { - assert!(!r.bitmap().dirty_at(0)); - assert!(!r.bitmap().dirty_at(1)); - Ok(()) - }); - - // Fill the first region with 1s and the second with 2s. - let first_region = vec![1u8; page_size * 2]; - guest_memory - .write(&first_region[..], GuestAddress(0)) - .unwrap(); - - let second_region = vec![2u8; page_size * 2]; - guest_memory - .write(&second_region[..], GuestAddress(page_size as u64 * 3)) - .unwrap(); - - let memory_state = guest_memory.describe(); - - // Case 1: dump the full memory. - { - let mut memory_file = TempFile::new().unwrap().into_file(); - guest_memory.dump(&mut memory_file).unwrap(); - - let restored_guest_memory = - GuestMemoryMmap::restore(Some(&memory_file), &memory_state, false).unwrap(); - - // Check that the region contents are the same. - let mut actual_region = vec![0u8; page_size * 2]; - restored_guest_memory - .read(actual_region.as_mut_slice(), GuestAddress(0)) - .unwrap(); - assert_eq!(first_region, actual_region); - - restored_guest_memory - .read( - actual_region.as_mut_slice(), - GuestAddress(page_size as u64 * 3), - ) - .unwrap(); - assert_eq!(second_region, actual_region); - } - - // Case 2: dump only the dirty pages. - { - // KVM Bitmap - // First region pages: [dirty, clean] - // Second region pages: [clean, dirty] - let mut dirty_bitmap: DirtyBitmap = HashMap::new(); - dirty_bitmap.insert(0, vec![0b01; 1]); - dirty_bitmap.insert(1, vec![0b10; 1]); - - let mut file = TempFile::new().unwrap().into_file(); - guest_memory.dump_dirty(&mut file, &dirty_bitmap).unwrap(); - - // We can restore from this because this is the first dirty dump. - let restored_guest_memory = - GuestMemoryMmap::restore(Some(&file), &memory_state, false).unwrap(); - - // Check that the region contents are the same. - let mut actual_region = vec![0u8; page_size * 2]; - restored_guest_memory - .read(actual_region.as_mut_slice(), GuestAddress(0)) - .unwrap(); - assert_eq!(first_region, actual_region); - - restored_guest_memory - .read( - actual_region.as_mut_slice(), - GuestAddress(page_size as u64 * 3), - ) - .unwrap(); - assert_eq!(second_region, actual_region); - - // Dirty the memory and dump again - let file = TempFile::new().unwrap(); - let mut reader = file.into_file(); - let zeros = vec![0u8; page_size]; - let ones = vec![1u8; page_size]; - let twos = vec![2u8; page_size]; - - // Firecracker Bitmap - // First region pages: [dirty, clean] - // Second region pages: [clean, clean] - guest_memory - .write(&twos[..], GuestAddress(page_size as u64)) - .unwrap(); - - guest_memory.dump_dirty(&mut reader, &dirty_bitmap).unwrap(); - - // Check that only the dirty regions are dumped. - let mut diff_file_content = Vec::new(); - let expected_first_region = [ - ones.as_slice(), - twos.as_slice(), - zeros.as_slice(), - twos.as_slice(), - ] - .concat(); - reader.seek(SeekFrom::Start(0)).unwrap(); - reader.read_to_end(&mut diff_file_content).unwrap(); - assert_eq!(expected_first_region, diff_file_content); - } - } -} diff --git a/src/vmm/src/persist.rs b/src/vmm/src/persist.rs index 7a425ed8dbd..ccae052df09 100644 --- a/src/vmm/src/persist.rs +++ b/src/vmm/src/persist.rs @@ -33,7 +33,6 @@ use crate::device_manager::persist::{DevicePersistError, DeviceStates}; use crate::devices::virtio::gen::virtio_ring::VIRTIO_RING_F_EVENT_IDX; use crate::devices::virtio::TYPE_NET; use crate::logger::{info, warn}; -use crate::memory_snapshot::{GuestMemoryState, SnapshotMemory}; use crate::resources::VmResources; #[cfg(target_arch = "x86_64")] use crate::version_map::FC_V0_23_SNAP_VERSION; @@ -46,10 +45,12 @@ use crate::vmm_config::machine_config::MAX_SUPPORTED_VCPUS; use crate::vmm_config::snapshot::{ CreateSnapshotParams, LoadSnapshotParams, MemBackendType, SnapshotType, }; -use crate::vstate::memory::{GuestMemory, GuestMemoryMmap}; +use crate::vstate::memory::{ + GuestMemory, GuestMemoryMmap, GuestMemoryState, SnapshotMemory, SnapshotMemoryError, +}; use crate::vstate::vcpu::{VcpuSendEventError, VcpuState}; use crate::vstate::vm::VmState; -use crate::{mem_size_mib, memory_snapshot, vstate, EventManager, Vmm, VmmError}; +use crate::{mem_size_mib, vstate, EventManager, Vmm, VmmError}; #[cfg(target_arch = "x86_64")] const FC_V0_23_MAX_DEVICES: u32 = 11; @@ -194,7 +195,7 @@ pub enum CreateSnapshotError { /// Cannot translate microVM version to snapshot data version UnsupportedVersion, /// Cannot write memory file: {0} - Memory(memory_snapshot::SnapshotMemoryError), + Memory(SnapshotMemoryError), /// Cannot perform {0} on the memory backing file: {1} MemoryBackingFile(&'static str, io::Error), /// Cannot save the microVM state: {0} @@ -558,7 +559,7 @@ pub enum GuestMemoryFromFileError { /// Failed to load guest memory: {0} File(#[from] std::io::Error), /// Failed to restore guest memory: {0} - Restore(#[from] crate::memory_snapshot::SnapshotMemoryError), + Restore(#[from] SnapshotMemoryError), } fn guest_memory_from_file( @@ -575,7 +576,7 @@ fn guest_memory_from_file( #[derive(Debug, thiserror::Error, displaydoc::Display)] pub enum GuestMemoryFromUffdError { /// Failed to restore guest memory: {0} - Restore(#[from] crate::memory_snapshot::SnapshotMemoryError), + Restore(#[from] SnapshotMemoryError), /// Failed to UFFD object: {0} Create(userfaultfd::Error), /// Failed to register memory address range with the userfaultfd object: {0} @@ -688,7 +689,6 @@ mod tests { }; #[cfg(target_arch = "aarch64")] use crate::construct_kvm_mpidrs; - use crate::memory_snapshot::SnapshotMemory; use crate::version_map::{FC_VERSION_TO_SNAP_VERSION, VERSION_MAP}; use crate::vmm_config::balloon::BalloonDeviceConfig; use crate::vmm_config::drive::CacheType; @@ -839,7 +839,7 @@ mod tests { let err = UnsupportedVersion; let _ = format!("{}{:?}", err, err); - let err = Memory(memory_snapshot::SnapshotMemoryError::WriteMemory( + let err = Memory(SnapshotMemoryError::WriteMemory( GuestMemoryError::HostAddressNotAvailable, )); let _ = format!("{}{:?}", err, err); diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index d9fe2f5c780..6185e38b4b3 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -5,10 +5,13 @@ // Use of this source code is governed by a BSD-style license that can be // found in the THIRD-PARTY file. -use std::io::Error as IoError; +use std::fs::File; +use std::io::{Error as IoError, SeekFrom}; use std::os::unix::io::AsRawFd; -use utils::u64_to_usize; +use utils::{errno, get_page_size, u64_to_usize}; +use versionize::{VersionMap, Versionize, VersionizeResult}; +use versionize_derive::Versionize; pub use vm_memory::bitmap::{AtomicBitmap, Bitmap, BitmapSlice, BS}; use vm_memory::mmap::{check_file_offset, NewBitmap}; pub use vm_memory::mmap::{MmapRegionBuilder, MmapRegionError}; @@ -17,6 +20,9 @@ pub use vm_memory::{ GuestMemory, GuestMemoryError, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, MmapRegion, }; +use crate::volatile::WriteVolatile; +use crate::DirtyBitmap; + /// Type of GuestMemoryMmap. pub type GuestMemoryMmap = vm_memory::GuestMemoryMmap>; /// Type of GuestRegionMmap. @@ -26,6 +32,174 @@ pub type GuestMmapRegion = vm_memory::MmapRegion>; const GUARD_PAGE_COUNT: usize = 1; +/// Errors associated with dumping guest memory to file. +#[derive(Debug, thiserror::Error, displaydoc::Display)] +pub enum SnapshotMemoryError { + /// Cannot access file: {0:?} + FileHandle(#[from] std::io::Error), + /// Cannot create memory: {0:?} + CreateMemory(#[from] VmMemoryError), + /// Cannot create memory region: {0:?} + CreateRegion(#[from] MmapRegionError), + /// Cannot fetch system's page size: {0:?} + PageSize(#[from] errno::Error), + /// Cannot dump memory: {0:?} + WriteMemory(#[from] GuestMemoryError), +} + +/// Defines the interface for snapshotting memory. +pub trait SnapshotMemory +where + Self: Sized, +{ + /// Describes GuestMemoryMmap through a GuestMemoryState struct. + fn describe(&self) -> GuestMemoryState; + /// Dumps all contents of GuestMemoryMmap to a writer. + fn dump(&self, writer: &mut T) -> Result<(), SnapshotMemoryError>; + /// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer. + fn dump_dirty( + &self, + writer: &mut T, + dirty_bitmap: &DirtyBitmap, + ) -> Result<(), SnapshotMemoryError>; + /// Creates a GuestMemoryMmap given a `file` containing the data + /// and a `state` containing mapping information. + fn restore( + file: Option<&File>, + state: &GuestMemoryState, + track_dirty_pages: bool, + ) -> Result; +} + +/// State of a guest memory region saved to file/buffer. +#[derive(Debug, PartialEq, Eq, Versionize)] +// NOTICE: Any changes to this structure require a snapshot version bump. +pub struct GuestMemoryRegionState { + // This should have been named `base_guest_addr` since it's _guest_ addr, but for + // backward compatibility we have to keep this name. At least this comment should help. + /// Base GuestAddress. + pub base_address: u64, + /// Region size. + pub size: usize, + /// Offset in file/buffer where the region is saved. + pub offset: u64, +} + +/// Describes guest memory regions and their snapshot file mappings. +#[derive(Debug, Default, PartialEq, Eq, Versionize)] +// NOTICE: Any changes to this structure require a snapshot version bump. +pub struct GuestMemoryState { + /// List of regions. + pub regions: Vec, +} + +impl SnapshotMemory for GuestMemoryMmap { + /// Describes GuestMemoryMmap through a GuestMemoryState struct. + fn describe(&self) -> GuestMemoryState { + let mut guest_memory_state = GuestMemoryState::default(); + let mut offset = 0; + self.iter().for_each(|region| { + guest_memory_state.regions.push(GuestMemoryRegionState { + base_address: region.start_addr().0, + size: u64_to_usize(region.len()), + offset, + }); + + offset += region.len(); + }); + guest_memory_state + } + + /// Dumps all contents of GuestMemoryMmap to a writer. + fn dump(&self, writer: &mut T) -> Result<(), SnapshotMemoryError> { + self.iter() + .try_for_each(|region| Ok(writer.write_all_volatile(®ion.as_volatile_slice()?)?)) + .map_err(SnapshotMemoryError::WriteMemory) + } + + /// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer. + fn dump_dirty( + &self, + writer: &mut T, + dirty_bitmap: &DirtyBitmap, + ) -> Result<(), SnapshotMemoryError> { + let mut writer_offset = 0; + let page_size = get_page_size()?; + + self.iter() + .enumerate() + .try_for_each(|(slot, region)| { + let kvm_bitmap = dirty_bitmap.get(&slot).unwrap(); + let firecracker_bitmap = region.bitmap(); + let mut write_size = 0; + let mut dirty_batch_start: u64 = 0; + + for (i, v) in kvm_bitmap.iter().enumerate() { + for j in 0..64 { + let is_kvm_page_dirty = ((v >> j) & 1u64) != 0u64; + let page_offset = ((i * 64) + j) * page_size; + let is_firecracker_page_dirty = firecracker_bitmap.dirty_at(page_offset); + if is_kvm_page_dirty || is_firecracker_page_dirty { + // We are at the start of a new batch of dirty pages. + if write_size == 0 { + // Seek forward over the unmodified pages. + writer + .seek(SeekFrom::Start(writer_offset + page_offset as u64)) + .unwrap(); + dirty_batch_start = page_offset as u64; + } + write_size += page_size; + } else if write_size > 0 { + // We are at the end of a batch of dirty pages. + writer.write_all_volatile( + ®ion.get_slice( + MemoryRegionAddress(dirty_batch_start), + write_size, + )?, + )?; + + write_size = 0; + } + } + } + + if write_size > 0 { + writer.write_all_volatile( + ®ion.get_slice(MemoryRegionAddress(dirty_batch_start), write_size)?, + )?; + } + writer_offset += region.len(); + if let Some(bitmap) = firecracker_bitmap { + bitmap.reset(); + } + + Ok(()) + }) + .map_err(SnapshotMemoryError::WriteMemory) + } + + /// Creates a GuestMemoryMmap backed by a `file` if present, otherwise backed + /// by anonymous memory. Memory layout and ranges are described in `state` param. + fn restore( + file: Option<&File>, + state: &GuestMemoryState, + track_dirty_pages: bool, + ) -> Result { + let mut regions = vec![]; + for region in state.regions.iter() { + let f = match file { + Some(f) => Some(FileOffset::new(f.try_clone()?, region.offset)), + None => None, + }; + + regions.push((f, GuestAddress(region.base_address), region.size)); + } + + crate::vstate::memory::create_guest_memory(®ions, track_dirty_pages) + .map_err(SnapshotMemoryError::CreateMemory) + } +} + /// Build a `MmapRegion` surrounded by guard pages. /// /// Initially, we map a `PROT_NONE` guard region of size: @@ -203,10 +377,14 @@ pub mod test_utils { mod tests { #![allow(clippy::undocumented_unsafe_blocks)] + use std::collections::HashMap; + use std::io::{Read, Seek}; + use utils::get_page_size; use utils::tempfile::TempFile; use super::*; + use crate::vstate::memory::{Bytes, GuestAddress}; #[derive(Debug)] enum AddrOp { @@ -456,4 +634,178 @@ mod tests { .unwrap(); } } + + #[test] + fn test_describe_state() { + let page_size: usize = get_page_size().unwrap(); + + // Two regions of one page each, with a one page gap between them. + let mem_regions = [ + (None, GuestAddress(0), page_size), + (None, GuestAddress(page_size as u64 * 2), page_size), + ]; + let guest_memory = + crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); + + let expected_memory_state = GuestMemoryState { + regions: vec![ + GuestMemoryRegionState { + base_address: 0, + size: page_size, + offset: 0, + }, + GuestMemoryRegionState { + base_address: page_size as u64 * 2, + size: page_size, + offset: page_size as u64, + }, + ], + }; + + let actual_memory_state = guest_memory.describe(); + assert_eq!(expected_memory_state, actual_memory_state); + + // Two regions of three pages each, with a one page gap between them. + let mem_regions = [ + (None, GuestAddress(0), page_size * 3), + (None, GuestAddress(page_size as u64 * 4), page_size * 3), + ]; + let guest_memory = + crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); + + let expected_memory_state = GuestMemoryState { + regions: vec![ + GuestMemoryRegionState { + base_address: 0, + size: page_size * 3, + offset: 0, + }, + GuestMemoryRegionState { + base_address: page_size as u64 * 4, + size: page_size * 3, + offset: page_size as u64 * 3, + }, + ], + }; + + let actual_memory_state = guest_memory.describe(); + assert_eq!(expected_memory_state, actual_memory_state); + } + + #[test] + fn test_restore_memory() { + let page_size: usize = get_page_size().unwrap(); + + // Two regions of two pages each, with a one page gap between them. + let mem_regions = [ + (None, GuestAddress(0), page_size * 2), + (None, GuestAddress(page_size as u64 * 3), page_size * 2), + ]; + let guest_memory = + crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); + // Check that Firecracker bitmap is clean. + let _res: Result<(), SnapshotMemoryError> = guest_memory.iter().try_for_each(|r| { + assert!(!r.bitmap().dirty_at(0)); + assert!(!r.bitmap().dirty_at(1)); + Ok(()) + }); + + // Fill the first region with 1s and the second with 2s. + let first_region = vec![1u8; page_size * 2]; + guest_memory + .write(&first_region[..], GuestAddress(0)) + .unwrap(); + + let second_region = vec![2u8; page_size * 2]; + guest_memory + .write(&second_region[..], GuestAddress(page_size as u64 * 3)) + .unwrap(); + + let memory_state = guest_memory.describe(); + + // Case 1: dump the full memory. + { + let mut memory_file = TempFile::new().unwrap().into_file(); + guest_memory.dump(&mut memory_file).unwrap(); + + let restored_guest_memory = + GuestMemoryMmap::restore(Some(&memory_file), &memory_state, false).unwrap(); + + // Check that the region contents are the same. + let mut actual_region = vec![0u8; page_size * 2]; + restored_guest_memory + .read(actual_region.as_mut_slice(), GuestAddress(0)) + .unwrap(); + assert_eq!(first_region, actual_region); + + restored_guest_memory + .read( + actual_region.as_mut_slice(), + GuestAddress(page_size as u64 * 3), + ) + .unwrap(); + assert_eq!(second_region, actual_region); + } + + // Case 2: dump only the dirty pages. + { + // KVM Bitmap + // First region pages: [dirty, clean] + // Second region pages: [clean, dirty] + let mut dirty_bitmap: DirtyBitmap = HashMap::new(); + dirty_bitmap.insert(0, vec![0b01; 1]); + dirty_bitmap.insert(1, vec![0b10; 1]); + + let mut file = TempFile::new().unwrap().into_file(); + guest_memory.dump_dirty(&mut file, &dirty_bitmap).unwrap(); + + // We can restore from this because this is the first dirty dump. + let restored_guest_memory = + GuestMemoryMmap::restore(Some(&file), &memory_state, false).unwrap(); + + // Check that the region contents are the same. + let mut actual_region = vec![0u8; page_size * 2]; + restored_guest_memory + .read(actual_region.as_mut_slice(), GuestAddress(0)) + .unwrap(); + assert_eq!(first_region, actual_region); + + restored_guest_memory + .read( + actual_region.as_mut_slice(), + GuestAddress(page_size as u64 * 3), + ) + .unwrap(); + assert_eq!(second_region, actual_region); + + // Dirty the memory and dump again + let file = TempFile::new().unwrap(); + let mut reader = file.into_file(); + let zeros = vec![0u8; page_size]; + let ones = vec![1u8; page_size]; + let twos = vec![2u8; page_size]; + + // Firecracker Bitmap + // First region pages: [dirty, clean] + // Second region pages: [clean, clean] + guest_memory + .write(&twos[..], GuestAddress(page_size as u64)) + .unwrap(); + + guest_memory.dump_dirty(&mut reader, &dirty_bitmap).unwrap(); + + // Check that only the dirty regions are dumped. + let mut diff_file_content = Vec::new(); + let expected_first_region = [ + ones.as_slice(), + twos.as_slice(), + zeros.as_slice(), + twos.as_slice(), + ] + .concat(); + reader.seek(SeekFrom::Start(0)).unwrap(); + reader.read_to_end(&mut diff_file_content).unwrap(); + assert_eq!(expected_first_region, diff_file_content); + } + } } diff --git a/src/vmm/tests/integration_tests.rs b/src/vmm/tests/integration_tests.rs index 80db21e699e..ebc2f913529 100644 --- a/src/vmm/tests/integration_tests.rs +++ b/src/vmm/tests/integration_tests.rs @@ -233,8 +233,7 @@ fn verify_create_snapshot(is_diff: bool) -> (TempFile, TempFile) { } fn verify_load_snapshot(snapshot_file: TempFile, memory_file: TempFile) { - use vmm::memory_snapshot::SnapshotMemory; - use vmm::vstate::memory::GuestMemoryMmap; + use vmm::vstate::memory::{GuestMemoryMmap, SnapshotMemory}; let mut event_manager = EventManager::new().unwrap(); let empty_seccomp_filters = get_empty_filters(); From 996f1e308ac1f4e9e8be8a9643d26dfb1b323580 Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Mon, 25 Sep 2023 22:54:01 +0100 Subject: [PATCH 06/14] refactor(memory): updated memory errors Removed reexports of `vm_memory` error types from `vstate/memory.rs` Renamed `SnapshotMemoryError` into `MemoryError`. Now `memory` only has 1 error type it exposes. Signed-off-by: Egor Lazarchuk --- src/vmm/src/arch/aarch64/fdt.rs | 5 +- src/vmm/src/builder.rs | 4 +- src/vmm/src/devices/virtio/balloon/mod.rs | 3 +- .../src/devices/virtio/block/io/async_io.rs | 3 +- .../src/devices/virtio/block/io/sync_io.rs | 4 +- src/vmm/src/devices/virtio/block/mod.rs | 3 +- src/vmm/src/devices/virtio/block/request.rs | 4 +- src/vmm/src/devices/virtio/iovec.rs | 3 +- src/vmm/src/devices/virtio/net/device.rs | 3 +- src/vmm/src/devices/virtio/queue.rs | 8 +- src/vmm/src/devices/virtio/rng/device.rs | 3 +- .../devices/virtio/vsock/csm/connection.rs | 3 +- src/vmm/src/devices/virtio/vsock/mod.rs | 3 +- src/vmm/src/devices/virtio/vsock/packet.rs | 5 +- src/vmm/src/io_uring/queue/mmap.rs | 4 +- src/vmm/src/persist.rs | 14 ++-- src/vmm/src/vstate/memory.rs | 80 +++++++++++-------- 17 files changed, 91 insertions(+), 61 deletions(-) diff --git a/src/vmm/src/arch/aarch64/fdt.rs b/src/vmm/src/arch/aarch64/fdt.rs index 4bcf0a0cdfe..a2367467d06 100644 --- a/src/vmm/src/arch/aarch64/fdt.rs +++ b/src/vmm/src/arch/aarch64/fdt.rs @@ -10,14 +10,13 @@ use std::ffi::CString; use std::fmt::Debug; use vm_fdt::{Error as VmFdtError, FdtWriter, FdtWriterNode}; +use vm_memory::GuestMemoryError; use super::super::{DeviceType, InitrdConfig}; use super::cache_info::{read_cache_config, CacheEntry}; use super::get_fdt_addr; use super::gic::GICDevice; -use crate::vstate::memory::{ - Address, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, -}; +use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap}; // This is a value for uniquely identifying the FDT node declaring the interrupt controller. const GIC_PHANDLE: u32 = 1; diff --git a/src/vmm/src/builder.rs b/src/vmm/src/builder.rs index 39954667376..a8ada9b828e 100644 --- a/src/vmm/src/builder.rs +++ b/src/vmm/src/builder.rs @@ -85,7 +85,7 @@ pub enum StartMicrovmError { CreateLegacyDevice(device_manager::legacy::LegacyDeviceError), /// Memory regions are overlapping or mmap fails. #[error("Invalid Memory Configuration: {}", format!("{:?}", .0).replace('\"', ""))] - GuestMemoryMmap(crate::vstate::memory::VmMemoryError), + GuestMemory(crate::vstate::memory::MemoryError), /// Cannot load initrd due to an invalid memory configuration. #[error("Cannot load initrd due to an invalid memory configuration.")] InitrdLoad, @@ -555,7 +555,7 @@ pub fn create_guest_memory( .collect::>()[..], track_dirty_pages, ) - .map_err(StartMicrovmError::GuestMemoryMmap) + .map_err(StartMicrovmError::GuestMemory) } fn load_kernel( diff --git a/src/vmm/src/devices/virtio/balloon/mod.rs b/src/vmm/src/devices/virtio/balloon/mod.rs index 2e622cdd3d1..8fa6b3a55b3 100644 --- a/src/vmm/src/devices/virtio/balloon/mod.rs +++ b/src/vmm/src/devices/virtio/balloon/mod.rs @@ -9,9 +9,10 @@ pub mod persist; pub mod test_utils; mod util; +use vm_memory::GuestMemoryError; + pub use self::device::{Balloon, BalloonConfig, BalloonStats}; use crate::devices::virtio::FIRECRACKER_MAX_QUEUE_SIZE; -use crate::vstate::memory::GuestMemoryError; /// Device ID used in MMIO device identification. /// Because Balloon is unique per-vm, this ID can be hardcoded. diff --git a/src/vmm/src/devices/virtio/block/io/async_io.rs b/src/vmm/src/devices/virtio/block/io/async_io.rs index c0d7771d0d4..5583aae09a3 100644 --- a/src/vmm/src/devices/virtio/block/io/async_io.rs +++ b/src/vmm/src/devices/virtio/block/io/async_io.rs @@ -7,6 +7,7 @@ use std::marker::PhantomData; use std::os::unix::io::AsRawFd; use utils::eventfd::EventFd; +use vm_memory::GuestMemoryError; use crate::devices::virtio::block::io::UserDataError; use crate::devices::virtio::block::IO_URING_NUM_ENTRIES; @@ -23,7 +24,7 @@ pub enum AsyncIoError { Submit(std::io::Error), SyncAll(std::io::Error), EventFd(std::io::Error), - GuestMemory(crate::vstate::memory::GuestMemoryError), + GuestMemory(GuestMemoryError), } #[derive(Debug)] diff --git a/src/vmm/src/devices/virtio/block/io/sync_io.rs b/src/vmm/src/devices/virtio/block/io/sync_io.rs index 8838f755ada..73d631ecfc8 100644 --- a/src/vmm/src/devices/virtio/block/io/sync_io.rs +++ b/src/vmm/src/devices/virtio/block/io/sync_io.rs @@ -4,8 +4,10 @@ use std::fs::File; use std::io::{Seek, SeekFrom, Write}; +use vm_memory::GuestMemoryError; + use crate::volatile::{ReadVolatile, WriteVolatile}; -use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap}; +use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; #[derive(Debug)] pub enum SyncIoError { diff --git a/src/vmm/src/devices/virtio/block/mod.rs b/src/vmm/src/devices/virtio/block/mod.rs index 493b672057f..22c7c165a54 100644 --- a/src/vmm/src/devices/virtio/block/mod.rs +++ b/src/vmm/src/devices/virtio/block/mod.rs @@ -10,11 +10,12 @@ pub mod persist; pub mod request; pub mod test_utils; +use vm_memory::GuestMemoryError; + pub use self::device::{Block, CacheType}; pub use self::event_handler::*; pub use self::request::*; use crate::devices::virtio::FIRECRACKER_MAX_QUEUE_SIZE; -use crate::vstate::memory::GuestMemoryError; /// Size of config space for block device. pub const BLOCK_CONFIG_SPACE_SIZE: usize = 8; diff --git a/src/vmm/src/devices/virtio/block/request.rs b/src/vmm/src/devices/virtio/block/request.rs index 87477396df8..d2708038974 100644 --- a/src/vmm/src/devices/virtio/block/request.rs +++ b/src/vmm/src/devices/virtio/block/request.rs @@ -7,6 +7,8 @@ use std::convert::From; +use vm_memory::GuestMemoryError; + use super::super::DescriptorChain; use super::{io as block_io, BlockError, SECTOR_SHIFT}; use crate::devices::virtio::block::device::DiskProperties; @@ -17,7 +19,7 @@ pub use crate::devices::virtio::gen::virtio_blk::{ use crate::devices::virtio::SECTOR_SIZE; use crate::logger::{error, IncMetric, METRICS}; use crate::rate_limiter::{RateLimiter, TokenType}; -use crate::vstate::memory::{ByteValued, Bytes, GuestAddress, GuestMemoryError, GuestMemoryMmap}; +use crate::vstate::memory::{ByteValued, Bytes, GuestAddress, GuestMemoryMmap}; #[derive(Debug, derive_more::From)] pub enum IoErr { diff --git a/src/vmm/src/devices/virtio/iovec.rs b/src/vmm/src/devices/virtio/iovec.rs index 169d0b205dc..382168d895b 100644 --- a/src/vmm/src/devices/virtio/iovec.rs +++ b/src/vmm/src/devices/virtio/iovec.rs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 use libc::{c_void, iovec, size_t}; +use vm_memory::GuestMemoryError; use crate::devices::virtio::DescriptorChain; use crate::vstate::memory::{Bitmap, GuestMemory, GuestMemoryMmap}; @@ -13,7 +14,7 @@ pub enum IoVecError { /// Tried to create an 'IoVecMut` from a read-only descriptor chain ReadOnlyDescriptor, /// Guest memory error: {0} - GuestMemory(#[from] crate::vstate::memory::GuestMemoryError), + GuestMemory(#[from] GuestMemoryError), } /// This is essentially a wrapper of a `Vec` which can be passed to `libc::writev`. diff --git a/src/vmm/src/devices/virtio/net/device.rs b/src/vmm/src/devices/virtio/net/device.rs index dddf8c9c632..d4d3f8448ec 100755 --- a/src/vmm/src/devices/virtio/net/device.rs +++ b/src/vmm/src/devices/virtio/net/device.rs @@ -18,6 +18,7 @@ use log::{error, warn}; use utils::eventfd::EventFd; use utils::net::mac::MacAddr; use utils::u64_to_usize; +use vm_memory::GuestMemoryError; use crate::devices::virtio::gen::virtio_blk::VIRTIO_F_VERSION_1; use crate::devices::virtio::gen::virtio_net::{ @@ -31,7 +32,7 @@ use crate::logger::{IncMetric, METRICS}; use crate::mmds::data_store::Mmds; use crate::mmds::ns::MmdsNetworkStack; use crate::rate_limiter::{BucketUpdate, RateLimiter, TokenType}; -use crate::vstate::memory::{ByteValued, Bytes, GuestMemoryError, GuestMemoryMmap}; +use crate::vstate::memory::{ByteValued, Bytes, GuestMemoryMmap}; const FRAME_HEADER_MAX_LEN: usize = PAYLOAD_OFFSET + ETH_IPV4_FRAME_LEN; diff --git a/src/vmm/src/devices/virtio/queue.rs b/src/vmm/src/devices/virtio/queue.rs index 9e2d0e69248..c05dbe82c55 100644 --- a/src/vmm/src/devices/virtio/queue.rs +++ b/src/vmm/src/devices/virtio/queue.rs @@ -11,7 +11,7 @@ use std::sync::atomic::{fence, Ordering}; use crate::logger::error; use crate::vstate::memory::{ - Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError, GuestMemoryMmap, + Address, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap, }; pub(super) const VIRTQ_DESC_F_NEXT: u16 = 0x1; @@ -33,7 +33,7 @@ pub enum QueueError { /// Descriptor index out of bounds: {0}. DescIndexOutOfBounds(u16), /// Failed to write value into the virtio queue used ring: {0} - UsedRing(#[from] GuestMemoryError), + UsedRing(#[from] vm_memory::GuestMemoryError), } /// A virtio descriptor constraints with C representative. @@ -1420,7 +1420,9 @@ mod tests { #[test] fn test_queue_error_display() { - let err = UsedRing(GuestMemoryError::InvalidGuestAddress(GuestAddress(0))); + let err = UsedRing(vm_memory::GuestMemoryError::InvalidGuestAddress( + GuestAddress(0), + )); let _ = format!("{}{:?}", err, err); let err = DescIndexOutOfBounds(1); diff --git a/src/vmm/src/devices/virtio/rng/device.rs b/src/vmm/src/devices/virtio/rng/device.rs index e78f679137d..d43428ffc84 100644 --- a/src/vmm/src/devices/virtio/rng/device.rs +++ b/src/vmm/src/devices/virtio/rng/device.rs @@ -7,6 +7,7 @@ use std::sync::Arc; use aws_lc_rs::rand; use utils::eventfd::EventFd; +use vm_memory::GuestMemoryError; use super::{RNG_NUM_QUEUES, RNG_QUEUE}; use crate::devices::virtio::device::{IrqTrigger, IrqType}; @@ -18,7 +19,7 @@ use crate::devices::virtio::{ use crate::devices::DeviceError; use crate::logger::{debug, error, IncMetric, METRICS}; use crate::rate_limiter::{RateLimiter, TokenType}; -use crate::vstate::memory::{GuestMemoryError, GuestMemoryMmap}; +use crate::vstate::memory::GuestMemoryMmap; pub const ENTROPY_DEV_ID: &str = "rng"; diff --git a/src/vmm/src/devices/virtio/vsock/csm/connection.rs b/src/vmm/src/devices/virtio/vsock/csm/connection.rs index 28681472e26..ba69cba62a5 100644 --- a/src/vmm/src/devices/virtio/vsock/csm/connection.rs +++ b/src/vmm/src/devices/virtio/vsock/csm/connection.rs @@ -85,6 +85,7 @@ use std::time::{Duration, Instant}; use log::{debug, error, info, warn}; use utils::epoll::EventSet; use utils::wrap_usize_to_u32; +use vm_memory::GuestMemoryError; use super::super::defs::uapi; use super::super::packet::VsockPacket; @@ -93,7 +94,7 @@ use super::txbuf::TxBuf; use super::{defs, ConnState, PendingRx, PendingRxSet, VsockCsmError}; use crate::logger::{IncMetric, METRICS}; use crate::volatile::{ReadVolatile, WriteVolatile}; -use crate::vstate::memory::{GuestMemoryError, GuestMemoryMmap}; +use crate::vstate::memory::GuestMemoryMmap; /// Trait that vsock connection backends need to implement. /// diff --git a/src/vmm/src/devices/virtio/vsock/mod.rs b/src/vmm/src/devices/virtio/vsock/mod.rs index f6fa195fb35..e3907dbb8b1 100644 --- a/src/vmm/src/devices/virtio/vsock/mod.rs +++ b/src/vmm/src/devices/virtio/vsock/mod.rs @@ -23,13 +23,14 @@ use std::os::unix::io::AsRawFd; use packet::VsockPacket; use utils::epoll::EventSet; +use vm_memory::GuestMemoryError; pub use self::defs::uapi::VIRTIO_ID_VSOCK as TYPE_VSOCK; pub use self::defs::VSOCK_DEV_ID; pub use self::device::Vsock; pub use self::unix::{VsockUnixBackend, VsockUnixBackendError}; use crate::devices::virtio::persist::PersistError as VirtioStateError; -use crate::vstate::memory::{GuestMemoryError, GuestMemoryMmap}; +use crate::vstate::memory::GuestMemoryMmap; mod defs { use crate::devices::virtio::FIRECRACKER_MAX_QUEUE_SIZE; diff --git a/src/vmm/src/devices/virtio/vsock/packet.rs b/src/vmm/src/devices/virtio/vsock/packet.rs index 4261a1a1b82..68be5f1bb70 100644 --- a/src/vmm/src/devices/virtio/vsock/packet.rs +++ b/src/vmm/src/devices/virtio/vsock/packet.rs @@ -18,12 +18,13 @@ use std::fmt::Debug; use std::io::ErrorKind; +use vm_memory::GuestMemoryError; + use super::super::DescriptorChain; use super::{defs, VsockError}; use crate::volatile::{ReadVolatile, VolatileMemoryError, VolatileSlice, WriteVolatile}; use crate::vstate::memory::{ - Address, AtomicBitmap, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryError, - GuestMemoryMmap, BS, + Address, AtomicBitmap, ByteValued, Bytes, GuestAddress, GuestMemory, GuestMemoryMmap, BS, }; // The vsock packet header is defined by the C struct: diff --git a/src/vmm/src/io_uring/queue/mmap.rs b/src/vmm/src/io_uring/queue/mmap.rs index 202eb0a3c27..28c4709bbdf 100644 --- a/src/vmm/src/io_uring/queue/mmap.rs +++ b/src/vmm/src/io_uring/queue/mmap.rs @@ -4,7 +4,9 @@ use std::io::Error as IOError; use std::os::unix::io::RawFd; -use crate::vstate::memory::{MmapRegion, MmapRegionError}; +use vm_memory::mmap::MmapRegionError; + +use crate::vstate::memory::MmapRegion; #[derive(Debug)] pub enum MmapError { diff --git a/src/vmm/src/persist.rs b/src/vmm/src/persist.rs index ccae052df09..dbe5a3015de 100644 --- a/src/vmm/src/persist.rs +++ b/src/vmm/src/persist.rs @@ -46,7 +46,7 @@ use crate::vmm_config::snapshot::{ CreateSnapshotParams, LoadSnapshotParams, MemBackendType, SnapshotType, }; use crate::vstate::memory::{ - GuestMemory, GuestMemoryMmap, GuestMemoryState, SnapshotMemory, SnapshotMemoryError, + GuestMemory, GuestMemoryMmap, GuestMemoryState, MemoryError, SnapshotMemory, }; use crate::vstate::vcpu::{VcpuSendEventError, VcpuState}; use crate::vstate::vm::VmState; @@ -195,7 +195,7 @@ pub enum CreateSnapshotError { /// Cannot translate microVM version to snapshot data version UnsupportedVersion, /// Cannot write memory file: {0} - Memory(SnapshotMemoryError), + Memory(MemoryError), /// Cannot perform {0} on the memory backing file: {1} MemoryBackingFile(&'static str, io::Error), /// Cannot save the microVM state: {0} @@ -559,7 +559,7 @@ pub enum GuestMemoryFromFileError { /// Failed to load guest memory: {0} File(#[from] std::io::Error), /// Failed to restore guest memory: {0} - Restore(#[from] SnapshotMemoryError), + Restore(#[from] MemoryError), } fn guest_memory_from_file( @@ -576,7 +576,7 @@ fn guest_memory_from_file( #[derive(Debug, thiserror::Error, displaydoc::Display)] pub enum GuestMemoryFromUffdError { /// Failed to restore guest memory: {0} - Restore(#[from] SnapshotMemoryError), + Restore(#[from] MemoryError), /// Failed to UFFD object: {0} Create(userfaultfd::Error), /// Failed to register memory address range with the userfaultfd object: {0} @@ -828,7 +828,7 @@ mod tests { #[test] fn test_create_snapshot_error_display() { use crate::persist::CreateSnapshotError::*; - use crate::vstate::memory::GuestMemoryError; + use crate::vstate::memory::MemoryError; let err = DirtyBitmap(VmmError::DirtyBitmap(kvm_ioctls::Error::new(20))); let _ = format!("{}{:?}", err, err); @@ -839,8 +839,8 @@ mod tests { let err = UnsupportedVersion; let _ = format!("{}{:?}", err, err); - let err = Memory(SnapshotMemoryError::WriteMemory( - GuestMemoryError::HostAddressNotAvailable, + let err = Memory(MemoryError::WriteMemory( + vm_memory::GuestMemoryError::HostAddressNotAvailable, )); let _ = format!("{}{:?}", err, err); diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index 6185e38b4b3..86881fcca15 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -13,12 +13,13 @@ use utils::{errno, get_page_size, u64_to_usize}; use versionize::{VersionMap, Versionize, VersionizeResult}; use versionize_derive::Versionize; pub use vm_memory::bitmap::{AtomicBitmap, Bitmap, BitmapSlice, BS}; -use vm_memory::mmap::{check_file_offset, NewBitmap}; -pub use vm_memory::mmap::{MmapRegionBuilder, MmapRegionError}; +pub use vm_memory::mmap::MmapRegionBuilder; +use vm_memory::mmap::{check_file_offset, MmapRegionError, NewBitmap}; pub use vm_memory::{ - address, Address, ByteValued, Bytes, Error as VmMemoryError, FileOffset, GuestAddress, - GuestMemory, GuestMemoryError, GuestMemoryRegion, GuestUsize, MemoryRegionAddress, MmapRegion, + address, Address, ByteValued, Bytes, FileOffset, GuestAddress, GuestMemory, GuestMemoryRegion, + GuestUsize, MemoryRegionAddress, MmapRegion, }; +use vm_memory::{Error as VmMemoryError, GuestMemoryError}; use crate::volatile::WriteVolatile; use crate::DirtyBitmap; @@ -34,17 +35,21 @@ const GUARD_PAGE_COUNT: usize = 1; /// Errors associated with dumping guest memory to file. #[derive(Debug, thiserror::Error, displaydoc::Display)] -pub enum SnapshotMemoryError { +pub enum MemoryError { /// Cannot access file: {0:?} - FileHandle(#[from] std::io::Error), + FileHandle(std::io::Error), /// Cannot create memory: {0:?} - CreateMemory(#[from] VmMemoryError), + CreateMemory(VmMemoryError), /// Cannot create memory region: {0:?} - CreateRegion(#[from] MmapRegionError), + CreateRegion(MmapRegionError), /// Cannot fetch system's page size: {0:?} - PageSize(#[from] errno::Error), + PageSize(errno::Error), /// Cannot dump memory: {0:?} - WriteMemory(#[from] GuestMemoryError), + WriteMemory(GuestMemoryError), + /// Cannot create mmap region: {0} + MmapRegionError(MmapRegionError), + /// Cannot create guest memory: {0} + VmMemoryError(VmMemoryError), } /// Defines the interface for snapshotting memory. @@ -55,20 +60,20 @@ where /// Describes GuestMemoryMmap through a GuestMemoryState struct. fn describe(&self) -> GuestMemoryState; /// Dumps all contents of GuestMemoryMmap to a writer. - fn dump(&self, writer: &mut T) -> Result<(), SnapshotMemoryError>; + fn dump(&self, writer: &mut T) -> Result<(), MemoryError>; /// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer. fn dump_dirty( &self, writer: &mut T, dirty_bitmap: &DirtyBitmap, - ) -> Result<(), SnapshotMemoryError>; + ) -> Result<(), MemoryError>; /// Creates a GuestMemoryMmap given a `file` containing the data /// and a `state` containing mapping information. fn restore( file: Option<&File>, state: &GuestMemoryState, track_dirty_pages: bool, - ) -> Result; + ) -> Result; } /// State of a guest memory region saved to file/buffer. @@ -111,10 +116,10 @@ impl SnapshotMemory for GuestMemoryMmap { } /// Dumps all contents of GuestMemoryMmap to a writer. - fn dump(&self, writer: &mut T) -> Result<(), SnapshotMemoryError> { + fn dump(&self, writer: &mut T) -> Result<(), MemoryError> { self.iter() .try_for_each(|region| Ok(writer.write_all_volatile(®ion.as_volatile_slice()?)?)) - .map_err(SnapshotMemoryError::WriteMemory) + .map_err(MemoryError::WriteMemory) } /// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer. @@ -122,9 +127,9 @@ impl SnapshotMemory for GuestMemoryMmap { &self, writer: &mut T, dirty_bitmap: &DirtyBitmap, - ) -> Result<(), SnapshotMemoryError> { + ) -> Result<(), MemoryError> { let mut writer_offset = 0; - let page_size = get_page_size()?; + let page_size = get_page_size().map_err(MemoryError::PageSize)?; self.iter() .enumerate() @@ -175,7 +180,7 @@ impl SnapshotMemory for GuestMemoryMmap { Ok(()) }) - .map_err(SnapshotMemoryError::WriteMemory) + .map_err(MemoryError::WriteMemory) } /// Creates a GuestMemoryMmap backed by a `file` if present, otherwise backed @@ -184,19 +189,21 @@ impl SnapshotMemory for GuestMemoryMmap { file: Option<&File>, state: &GuestMemoryState, track_dirty_pages: bool, - ) -> Result { + ) -> Result { let mut regions = vec![]; for region in state.regions.iter() { let f = match file { - Some(f) => Some(FileOffset::new(f.try_clone()?, region.offset)), + Some(f) => Some(FileOffset::new( + f.try_clone().map_err(MemoryError::FileHandle)?, + region.offset, + )), None => None, }; regions.push((f, GuestAddress(region.base_address), region.size)); } - crate::vstate::memory::create_guest_memory(®ions, track_dirty_pages) - .map_err(SnapshotMemoryError::CreateMemory) + create_guest_memory(®ions, track_dirty_pages) } } @@ -219,7 +226,7 @@ fn build_guarded_region( prot: i32, flags: i32, track_dirty_pages: bool, -) -> Result { +) -> Result { let page_size = utils::get_page_size().expect("Cannot retrieve page size."); // Create the guarded range size (received size + X pages), // where X is defined as a constant GUARD_PAGE_COUNT. @@ -239,12 +246,14 @@ fn build_guarded_region( }; if guard_addr == libc::MAP_FAILED { - return Err(MmapRegionError::Mmap(IoError::last_os_error())); + return Err(MemoryError::MmapRegionError(MmapRegionError::Mmap( + IoError::last_os_error(), + ))); } let (fd, offset) = match file_offset { Some(file_offset) => { - check_file_offset(file_offset, size)?; + check_file_offset(file_offset, size).map_err(MemoryError::MmapRegionError)?; (file_offset.file().as_raw_fd(), file_offset.start()) } None => (-1, 0), @@ -267,7 +276,9 @@ fn build_guarded_region( }; if region_addr == libc::MAP_FAILED { - return Err(MmapRegionError::Mmap(IoError::last_os_error())); + return Err(MemoryError::MmapRegionError(MmapRegionError::Mmap( + IoError::last_os_error(), + ))); } let bitmap = match track_dirty_pages { @@ -282,6 +293,7 @@ fn build_guarded_region( .with_mmap_prot(prot) .with_mmap_flags(flags) .build() + .map_err(MemoryError::MmapRegionError) } } @@ -289,7 +301,7 @@ fn build_guarded_region( pub fn create_guest_memory( regions: &[(Option, GuestAddress, usize)], track_dirty_pages: bool, -) -> Result { +) -> Result { let prot = libc::PROT_READ | libc::PROT_WRITE; let mut mmap_regions = Vec::with_capacity(regions.len()); @@ -305,13 +317,15 @@ pub fn create_guest_memory( prot, flags, track_dirty_pages, - ) - .map_err(VmMemoryError::MmapRegion)?; + )?; - mmap_regions.push(GuestRegionMmap::new(mmap_region, *guest_address)?); + mmap_regions.push( + GuestRegionMmap::new(mmap_region, *guest_address) + .map_err(MemoryError::VmMemoryError)?, + ); } - GuestMemoryMmap::from_regions(mmap_regions) + GuestMemoryMmap::from_regions(mmap_regions).map_err(MemoryError::VmMemoryError) } /// Mark memory range as dirty @@ -365,7 +379,7 @@ pub mod test_utils { pub fn create_anon_guest_memory( regions: &[(GuestAddress, usize)], track_dirty_pages: bool, - ) -> Result { + ) -> Result { create_guest_memory( ®ions.iter().map(|r| (None, r.0, r.1)).collect::>(), track_dirty_pages, @@ -704,7 +718,7 @@ mod tests { let guest_memory = crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); // Check that Firecracker bitmap is clean. - let _res: Result<(), SnapshotMemoryError> = guest_memory.iter().try_for_each(|r| { + let _res: Result<(), MemoryError> = guest_memory.iter().try_for_each(|r| { assert!(!r.bitmap().dirty_at(0)); assert!(!r.bitmap().dirty_at(1)); Ok(()) From eb6858f42682dcb2452a4806201b691037557f45 Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Tue, 26 Sep 2023 15:14:58 +0000 Subject: [PATCH 07/14] refactor(memory): GuestMemoryExtension introduction Changed `SnapshotMemory` trait to `GuestMemoryExtension` and moved all public methods that create GuestMemoryMmap into it. This removed a need for a public `test_utils` module and it makes all methods that directly act on `GuestMemoryMmap` to be associated with it. Signed-off-by: Egor Lazarchuk --- src/vmm/src/arch/aarch64/fdt.rs | 13 +- src/vmm/src/arch/aarch64/mod.rs | 13 +- src/vmm/src/arch/aarch64/vcpu.rs | 5 +- src/vmm/src/arch/x86_64/mod.rs | 19 +- src/vmm/src/arch/x86_64/mptable.rs | 17 +- src/vmm/src/arch/x86_64/regs.rs | 15 +- src/vmm/src/builder.rs | 36 +- src/vmm/src/device_manager/legacy.rs | 9 +- src/vmm/src/device_manager/mmio.rs | 8 +- src/vmm/src/devices/virtio/balloon/device.rs | 9 +- .../src/devices/virtio/block/io/async_io.rs | 4 +- src/vmm/src/devices/virtio/block/io/mod.rs | 8 +- src/vmm/src/devices/virtio/block/request.rs | 7 +- src/vmm/src/devices/virtio/iovec.rs | 5 +- src/vmm/src/devices/virtio/mmio.rs | 32 +- src/vmm/src/devices/virtio/net/test_utils.rs | 6 +- src/vmm/src/devices/virtio/queue.rs | 5 +- src/vmm/src/devices/virtio/test_utils.rs | 17 +- .../src/devices/virtio/vsock/event_handler.rs | 4 +- src/vmm/src/devices/virtio/vsock/packet.rs | 5 +- src/vmm/src/lib.rs | 4 +- src/vmm/src/persist.rs | 6 +- src/vmm/src/vstate/memory.rs | 336 ++++++++++-------- src/vmm/src/vstate/vm.rs | 28 +- src/vmm/tests/integration_tests.rs | 4 +- 25 files changed, 279 insertions(+), 336 deletions(-) diff --git a/src/vmm/src/arch/aarch64/fdt.rs b/src/vmm/src/arch/aarch64/fdt.rs index a2367467d06..fd482f82718 100644 --- a/src/vmm/src/arch/aarch64/fdt.rs +++ b/src/vmm/src/arch/aarch64/fdt.rs @@ -426,6 +426,7 @@ mod tests { use super::*; use crate::arch::aarch64::gic::create_gic; use crate::arch::aarch64::{arch_memory_regions, layout}; + use crate::vstate::memory::{GuestMemoryExtension, GuestMemoryMmap}; const LEN: u64 = 4096; @@ -458,8 +459,8 @@ mod tests { #[test] fn test_create_fdt_with_devices() { let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) - .expect("Cannot initialize memory"); + let mem = + GuestMemoryMmap::from_raw_regions(®ions, false).expect("Cannot initialize memory"); let dev_info: HashMap<(DeviceType, std::string::String), MMIODeviceInfo> = [ ( @@ -498,8 +499,8 @@ mod tests { #[test] fn test_create_fdt() { let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) - .expect("Cannot initialize memory"); + let mem = + GuestMemoryMmap::from_raw_regions(®ions, false).expect("Cannot initialize memory"); let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let gic = create_gic(&vm, 1, None).unwrap(); @@ -556,8 +557,8 @@ mod tests { #[test] fn test_create_fdt_with_initrd() { let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) - .expect("Cannot initialize memory"); + let mem = + GuestMemoryMmap::from_raw_regions(®ions, false).expect("Cannot initialize memory"); let kvm = Kvm::new().unwrap(); let vm = kvm.create_vm().unwrap(); let gic = create_gic(&vm, 1, None).unwrap(); diff --git a/src/vmm/src/arch/aarch64/mod.rs b/src/vmm/src/arch/aarch64/mod.rs index 7c28d554f8f..cb8dedf14c9 100644 --- a/src/vmm/src/arch/aarch64/mod.rs +++ b/src/vmm/src/arch/aarch64/mod.rs @@ -114,6 +114,7 @@ fn get_fdt_addr(mem: &GuestMemoryMmap) -> u64 { #[cfg(test)] mod tests { use super::*; + use crate::vstate::memory::GuestMemoryExtension; #[test] fn test_regions_lt_1024gb() { @@ -134,18 +135,18 @@ mod tests { #[test] fn test_get_fdt_addr() { let regions = arch_memory_regions(layout::FDT_MAX_SIZE - 0x1000); - let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) - .expect("Cannot initialize memory"); + let mem = + GuestMemoryMmap::from_raw_regions(®ions, false).expect("Cannot initialize memory"); assert_eq!(get_fdt_addr(&mem), layout::DRAM_MEM_START); let regions = arch_memory_regions(layout::FDT_MAX_SIZE); - let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) - .expect("Cannot initialize memory"); + let mem = + GuestMemoryMmap::from_raw_regions(®ions, false).expect("Cannot initialize memory"); assert_eq!(get_fdt_addr(&mem), layout::DRAM_MEM_START); let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) - .expect("Cannot initialize memory"); + let mem = + GuestMemoryMmap::from_raw_regions(®ions, false).expect("Cannot initialize memory"); assert_eq!(get_fdt_addr(&mem), 0x1000 + layout::DRAM_MEM_START); } } diff --git a/src/vmm/src/arch/aarch64/vcpu.rs b/src/vmm/src/arch/aarch64/vcpu.rs index 48e7c92f556..ce27272d60d 100644 --- a/src/vmm/src/arch/aarch64/vcpu.rs +++ b/src/vmm/src/arch/aarch64/vcpu.rs @@ -201,6 +201,7 @@ mod tests { use super::*; use crate::arch::aarch64::{arch_memory_regions, layout}; + use crate::vstate::memory::GuestMemoryExtension; #[test] fn test_setup_regs() { @@ -208,8 +209,8 @@ mod tests { let vm = kvm.create_vm().unwrap(); let vcpu = vm.create_vcpu(0).unwrap(); let regions = arch_memory_regions(layout::FDT_MAX_SIZE + 0x1000); - let mem = crate::vstate::memory::test_utils::create_anon_guest_memory(®ions, false) - .expect("Cannot initialize memory"); + let mem = + GuestMemoryMmap::from_raw_regions(®ions, false).expect("Cannot initialize memory"); let res = setup_boot_regs(&vcpu, 0, 0x0, &mem); assert!(matches!( diff --git a/src/vmm/src/arch/x86_64/mod.rs b/src/vmm/src/arch/x86_64/mod.rs index 47af1ecd8d3..ded5d0db26f 100644 --- a/src/vmm/src/arch/x86_64/mod.rs +++ b/src/vmm/src/arch/x86_64/mod.rs @@ -210,6 +210,7 @@ mod tests { use linux_loader::loader::bootparam::boot_e820_entry; use super::*; + use crate::vstate::memory::GuestMemoryExtension; #[test] fn regions_lt_4gb() { @@ -230,11 +231,7 @@ mod tests { #[test] fn test_system_configuration() { let no_vcpus = 4; - let gm = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), 0x10000)], - false, - ) - .unwrap(); + let gm = GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), 0x10000)], false).unwrap(); let config_err = configure_system(&gm, GuestAddress(0), 0, &None, 1); assert!(config_err.is_err()); assert_eq!( @@ -245,25 +242,19 @@ mod tests { // Now assigning some memory that falls before the 32bit memory hole. let mem_size = 128 << 20; let arch_mem_regions = arch_memory_regions(mem_size); - let gm = - crate::vstate::memory::test_utils::create_anon_guest_memory(&arch_mem_regions, false) - .unwrap(); + let gm = GuestMemoryMmap::from_raw_regions(&arch_mem_regions, false).unwrap(); configure_system(&gm, GuestAddress(0), 0, &None, no_vcpus).unwrap(); // Now assigning some memory that is equal to the start of the 32bit memory hole. let mem_size = 3328 << 20; let arch_mem_regions = arch_memory_regions(mem_size); - let gm = - crate::vstate::memory::test_utils::create_anon_guest_memory(&arch_mem_regions, false) - .unwrap(); + let gm = GuestMemoryMmap::from_raw_regions(&arch_mem_regions, false).unwrap(); configure_system(&gm, GuestAddress(0), 0, &None, no_vcpus).unwrap(); // Now assigning some memory that falls after the 32bit memory hole. let mem_size = 3330 << 20; let arch_mem_regions = arch_memory_regions(mem_size); - let gm = - crate::vstate::memory::test_utils::create_anon_guest_memory(&arch_mem_regions, false) - .unwrap(); + let gm = GuestMemoryMmap::from_raw_regions(&arch_mem_regions, false).unwrap(); configure_system(&gm, GuestAddress(0), 0, &None, no_vcpus).unwrap(); } diff --git a/src/vmm/src/arch/x86_64/mptable.rs b/src/vmm/src/arch/x86_64/mptable.rs index cd8ab5b97fa..45d3a1dc2df 100644 --- a/src/vmm/src/arch/x86_64/mptable.rs +++ b/src/vmm/src/arch/x86_64/mptable.rs @@ -288,8 +288,9 @@ pub fn setup_mptable(mem: &GuestMemoryMmap, num_cpus: u8) -> Result<(), MptableE #[cfg(test)] mod tests { + use super::*; - use crate::vstate::memory::Bytes; + use crate::vstate::memory::{Bytes, GuestMemoryExtension}; fn table_entry_size(type_: u8) -> usize { match u32::from(type_) { @@ -305,7 +306,7 @@ mod tests { #[test] fn bounds_check() { let num_cpus = 4; - let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( + let mem = GuestMemoryMmap::from_raw_regions_unguarded( &[(GuestAddress(MPTABLE_START), compute_mp_size(num_cpus))], false, ) @@ -317,7 +318,7 @@ mod tests { #[test] fn bounds_check_fails() { let num_cpus = 4; - let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( + let mem = GuestMemoryMmap::from_raw_regions_unguarded( &[(GuestAddress(MPTABLE_START), compute_mp_size(num_cpus) - 1)], false, ) @@ -329,7 +330,7 @@ mod tests { #[test] fn mpf_intel_checksum() { let num_cpus = 1; - let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( + let mem = GuestMemoryMmap::from_raw_regions_unguarded( &[(GuestAddress(MPTABLE_START), compute_mp_size(num_cpus))], false, ) @@ -345,7 +346,7 @@ mod tests { #[test] fn mpc_table_checksum() { let num_cpus = 4; - let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( + let mem = GuestMemoryMmap::from_raw_regions_unguarded( &[(GuestAddress(MPTABLE_START), compute_mp_size(num_cpus))], false, ) @@ -379,7 +380,7 @@ mod tests { #[test] fn cpu_entry_count() { - let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( + let mem = GuestMemoryMmap::from_raw_regions_unguarded( &[( GuestAddress(MPTABLE_START), compute_mp_size(MAX_SUPPORTED_CPUS), @@ -417,8 +418,8 @@ mod tests { #[test] fn cpu_entry_count_max() { let cpus = MAX_SUPPORTED_CPUS + 1; - let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( - &[(GuestAddress(MPTABLE_START), compute_mp_size(cpus as u8))], + let mem = GuestMemoryMmap::from_raw_regions_unguarded( + &[(GuestAddress(MPTABLE_START), compute_mp_size(cpus))], false, ) .unwrap(); diff --git a/src/vmm/src/arch/x86_64/regs.rs b/src/vmm/src/arch/x86_64/regs.rs index 226ba204c67..b1d344f4e35 100644 --- a/src/vmm/src/arch/x86_64/regs.rs +++ b/src/vmm/src/arch/x86_64/regs.rs @@ -242,23 +242,16 @@ mod tests { use utils::u64_to_usize; use super::*; - use crate::vstate::memory::{Bytes, GuestAddress, GuestMemoryMmap}; + use crate::vstate::memory::{Bytes, GuestAddress, GuestMemoryExtension, GuestMemoryMmap}; fn create_guest_mem(mem_size: Option) -> GuestMemoryMmap { let page_size = 0x10000usize; let mem_size = u64_to_usize(mem_size.unwrap_or(page_size as u64)); if mem_size % page_size == 0 { - crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), mem_size)], - false, - ) - .unwrap() + GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), mem_size)], false).unwrap() } else { - crate::vstate::memory::test_utils::create_guest_memory_unguarded( - &[(GuestAddress(0), mem_size)], - false, - ) - .unwrap() + GuestMemoryMmap::from_raw_regions_unguarded(&[(GuestAddress(0), mem_size)], false) + .unwrap() } } diff --git a/src/vmm/src/builder.rs b/src/vmm/src/builder.rs index a8ada9b828e..0c834366276 100644 --- a/src/vmm/src/builder.rs +++ b/src/vmm/src/builder.rs @@ -55,7 +55,7 @@ use crate::vmm_config::boot_source::BootConfig; use crate::vmm_config::instance_info::InstanceInfo; use crate::vmm_config::machine_config::{MachineConfigUpdate, VmConfig, VmConfigError}; use crate::volatile::ReadVolatile; -use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryMmap}; +use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryExtension, GuestMemoryMmap}; use crate::vstate::vcpu::{Vcpu, VcpuConfig}; use crate::vstate::vm::Vm; use crate::{device_manager, EventManager, RestoreVcpusError, Vmm, VmmError}; @@ -265,7 +265,9 @@ pub fn build_microvm_for_boot( .ok_or(MissingKernelConfig)?; let track_dirty_pages = vm_resources.track_dirty_pages(); - let guest_memory = create_guest_memory(vm_resources.vm_config.mem_size_mib, track_dirty_pages)?; + let guest_memory = + GuestMemoryMmap::with_size(vm_resources.vm_config.mem_size_mib, track_dirty_pages) + .map_err(StartMicrovmError::GuestMemory)?; let entry_addr = load_kernel(boot_config, &guest_memory)?; let initrd = load_initrd_from_config(boot_config, &guest_memory)?; // Clone the command-line so that a failed boot doesn't pollute the original. @@ -540,24 +542,6 @@ pub fn build_microvm_from_snapshot( Ok(vmm) } -/// Creates GuestMemory of `mem_size_mib` MiB in size. -pub fn create_guest_memory( - mem_size_mib: usize, - track_dirty_pages: bool, -) -> Result { - let mem_size = mem_size_mib << 20; - let arch_mem_regions = crate::arch::arch_memory_regions(mem_size); - - crate::vstate::memory::create_guest_memory( - &arch_mem_regions - .iter() - .map(|(addr, size)| (None, *addr, *size)) - .collect::>()[..], - track_dirty_pages, - ) - .map_err(StartMicrovmError::GuestMemory) -} - fn load_kernel( boot_config: &BootConfig, guest_memory: &GuestMemoryMmap, @@ -986,7 +970,6 @@ pub mod tests { use crate::vmm_config::net::{NetBuilder, NetworkInterfaceConfig}; use crate::vmm_config::vsock::tests::default_config; use crate::vmm_config::vsock::{VsockBuilder, VsockDeviceConfig}; - use crate::vstate::memory::GuestMemory; #[derive(Debug)] pub(crate) struct CustomBlockConfig { @@ -1051,7 +1034,7 @@ pub mod tests { } pub(crate) fn default_vmm() -> Vmm { - let guest_memory = create_guest_memory(128, false).unwrap(); + let guest_memory = GuestMemoryMmap::with_size(128, false).unwrap(); let vcpus_exit_evt = EventFd::new(libc::EFD_NONBLOCK) .map_err(VmmError::EventFd) @@ -1228,8 +1211,7 @@ pub mod tests { } fn create_guest_mem_at(at: GuestAddress, size: usize) -> GuestMemoryMmap { - crate::vstate::memory::test_utils::create_guest_memory_unguarded(&[(at, size)], false) - .unwrap() + GuestMemoryMmap::from_raw_regions_unguarded(&[(at, size)], false).unwrap() } pub(crate) fn create_guest_mem_with_size(size: usize) -> GuestMemoryMmap { @@ -1305,13 +1287,13 @@ pub mod tests { // Case 1: create guest memory without dirty page tracking { - let guest_memory = create_guest_memory(mem_size, false).unwrap(); + let guest_memory = GuestMemoryMmap::with_size(mem_size, false).unwrap(); assert!(!is_dirty_tracking_enabled(&guest_memory)); } // Case 2: create guest memory with dirty page tracking { - let guest_memory = create_guest_memory(mem_size, true).unwrap(); + let guest_memory = GuestMemoryMmap::with_size(mem_size, true).unwrap(); assert!(is_dirty_tracking_enabled(&guest_memory)); } } @@ -1319,7 +1301,7 @@ pub mod tests { #[test] fn test_create_vcpus() { let vcpu_count = 2; - let guest_memory = create_guest_memory(128, false).unwrap(); + let guest_memory = GuestMemoryMmap::with_size(128, false).unwrap(); #[allow(unused_mut)] let mut vm = Vm::new(vec![]).unwrap(); diff --git a/src/vmm/src/device_manager/legacy.rs b/src/vmm/src/device_manager/legacy.rs index 75dce565c76..8e8fe4e085b 100644 --- a/src/vmm/src/device_manager/legacy.rs +++ b/src/vmm/src/device_manager/legacy.rs @@ -172,16 +172,13 @@ impl PortIODeviceManager { #[cfg(test)] mod tests { use super::*; - use crate::vstate::memory::GuestAddress; + use crate::vstate::memory::{GuestAddress, GuestMemoryExtension, GuestMemoryMmap}; use crate::Vm; #[test] fn test_register_legacy_devices() { - let guest_mem = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0x0), 0x1000)], - false, - ) - .unwrap(); + let guest_mem = + GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0x0), 0x1000)], false).unwrap(); let mut vm = Vm::new(vec![]).unwrap(); vm.memory_init(&guest_mem, false).unwrap(); crate::builder::setup_interrupt_controller(&mut vm).unwrap(); diff --git a/src/vmm/src/device_manager/mmio.rs b/src/vmm/src/device_manager/mmio.rs index 3f9258d8346..2c31aed8252 100644 --- a/src/vmm/src/device_manager/mmio.rs +++ b/src/vmm/src/device_manager/mmio.rs @@ -469,7 +469,7 @@ mod tests { use super::*; use crate::devices::virtio::{ActivateError, Queue, VirtioDevice}; - use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; + use crate::vstate::memory::{GuestAddress, GuestMemoryExtension, GuestMemoryMmap}; use crate::{builder, Vm}; const QUEUE_SIZES: &[u16] = &[64]; @@ -573,7 +573,7 @@ mod tests { fn test_register_virtio_device() { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); - let guest_mem = crate::vstate::memory::test_utils::create_anon_guest_memory( + let guest_mem = GuestMemoryMmap::from_raw_regions( &[(start_addr1, 0x1000), (start_addr2, 0x1000)], false, ) @@ -603,7 +603,7 @@ mod tests { fn test_register_too_many_devices() { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); - let guest_mem = crate::vstate::memory::test_utils::create_anon_guest_memory( + let guest_mem = GuestMemoryMmap::from_raw_regions( &[(start_addr1, 0x1000), (start_addr2, 0x1000)], false, ) @@ -663,7 +663,7 @@ mod tests { fn test_device_info() { let start_addr1 = GuestAddress(0x0); let start_addr2 = GuestAddress(0x1000); - let guest_mem = crate::vstate::memory::test_utils::create_anon_guest_memory( + let guest_mem = GuestMemoryMmap::from_raw_regions( &[(start_addr1, 0x1000), (start_addr2, 0x1000)], false, ) diff --git a/src/vmm/src/devices/virtio/balloon/device.rs b/src/vmm/src/devices/virtio/balloon/device.rs index 5e753af9f18..0f3efce8422 100644 --- a/src/vmm/src/devices/virtio/balloon/device.rs +++ b/src/vmm/src/devices/virtio/balloon/device.rs @@ -656,7 +656,7 @@ pub(crate) mod tests { }; use crate::devices::virtio::test_utils::{default_mem, VirtQueue}; use crate::devices::virtio::{VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE}; - use crate::vstate::memory::GuestAddress; + use crate::vstate::memory::{GuestAddress, GuestMemoryExtension}; impl Balloon { pub(crate) fn set_queue(&mut self, idx: usize, q: Queue) { @@ -1134,11 +1134,8 @@ pub(crate) mod tests { assert!(balloon.update_size(1).is_err()); // Switch the state to active. balloon.device_state = DeviceState::Activated( - crate::vstate::memory::test_utils::create_guest_memory_unguarded( - &[(GuestAddress(0x0), 0x1)], - false, - ) - .unwrap(), + GuestMemoryMmap::from_raw_regions_unguarded(&[(GuestAddress(0x0), 0x1)], false) + .unwrap(), ); assert_eq!(balloon.num_pages(), 0); diff --git a/src/vmm/src/devices/virtio/block/io/async_io.rs b/src/vmm/src/devices/virtio/block/io/async_io.rs index 5583aae09a3..f762fae6b7e 100644 --- a/src/vmm/src/devices/virtio/block/io/async_io.rs +++ b/src/vmm/src/devices/virtio/block/io/async_io.rs @@ -15,7 +15,7 @@ use crate::io_uring::operation::{Cqe, OpCode, Operation}; use crate::io_uring::restriction::Restriction; use crate::io_uring::{IoUring, IoUringError}; use crate::logger::log_dev_preview_warning; -use crate::vstate::memory::{mark_dirty_mem, GuestAddress, GuestMemory, GuestMemoryMmap}; +use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryExtension, GuestMemoryMmap}; #[derive(Debug)] pub enum AsyncIoError { @@ -58,7 +58,7 @@ impl WrappedUserData { fn mark_dirty_mem_and_unwrap(self, mem: &GuestMemoryMmap, count: u32) -> T { if let Some(addr) = self.addr { - mark_dirty_mem(mem, addr, count as usize) + mem.mark_dirty(addr, count as usize) } self.user_data diff --git a/src/vmm/src/devices/virtio/block/io/mod.rs b/src/vmm/src/devices/virtio/block/io/mod.rs index a8c48e29795..179e45bfae7 100644 --- a/src/vmm/src/devices/virtio/block/io/mod.rs +++ b/src/vmm/src/devices/virtio/block/io/mod.rs @@ -193,7 +193,7 @@ pub mod tests { use super::*; use crate::devices::virtio::block::device::FileEngineType; use crate::devices::virtio::block::request::PendingRequest; - use crate::vstate::memory::{Bitmap, Bytes, GuestMemory}; + use crate::vstate::memory::{Bitmap, Bytes, GuestMemory, GuestMemoryExtension}; const FILE_LEN: u32 = 1024; // 2 pages of memory should be enough to test read/write ops and also dirty tracking. @@ -243,11 +243,7 @@ pub mod tests { } fn create_mem() -> GuestMemoryMmap { - crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), MEM_LEN)], - true, - ) - .unwrap() + GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), MEM_LEN)], true).unwrap() } fn check_dirty_mem(mem: &GuestMemoryMmap, addr: GuestAddress, len: u32) { diff --git a/src/vmm/src/devices/virtio/block/request.rs b/src/vmm/src/devices/virtio/block/request.rs index d2708038974..4937dbba5c1 100644 --- a/src/vmm/src/devices/virtio/block/request.rs +++ b/src/vmm/src/devices/virtio/block/request.rs @@ -412,8 +412,7 @@ mod tests { use super::*; use crate::devices::virtio::test_utils::{default_mem, single_region_mem, VirtQueue}; use crate::devices::virtio::{Queue, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE}; - use crate::vstate::memory::test_utils::create_anon_guest_memory; - use crate::vstate::memory::{Address, GuestAddress, GuestMemory}; + use crate::vstate::memory::{Address, GuestAddress, GuestMemory, GuestMemoryExtension}; const NUM_DISK_SECTORS: u64 = 1024; @@ -777,7 +776,7 @@ mod tests { // Randomize descriptor addresses. Assumed page size as max buffer len. let base_addr = sparsity & 0x0000_FFFF_FFFF_F000; // 48 bit base, page aligned. - let max_desc_len = 0x1000; + let max_desc_len: u32 = 0x1000; // First addr starts at page base + 1. let req_type_addr = GuestAddress(base_addr).checked_add(0x1000).unwrap(); @@ -793,7 +792,7 @@ mod tests { let status_addr = data_addr.checked_add(next_desc_dist).unwrap(); let mem_end = status_addr.checked_add(u64::from(max_desc_len)).unwrap(); - let mem: GuestMemoryMmap = create_anon_guest_memory( + let mem = GuestMemoryMmap::from_raw_regions( &[( GuestAddress(base_addr), (mem_end.0 - base_addr).try_into().unwrap(), diff --git a/src/vmm/src/devices/virtio/iovec.rs b/src/vmm/src/devices/virtio/iovec.rs index 382168d895b..a1e47daee18 100644 --- a/src/vmm/src/devices/virtio/iovec.rs +++ b/src/vmm/src/devices/virtio/iovec.rs @@ -275,8 +275,7 @@ mod tests { use super::{IoVecBuffer, IoVecBufferMut}; use crate::devices::virtio::queue::{Queue, VIRTQ_DESC_F_NEXT, VIRTQ_DESC_F_WRITE}; use crate::devices::virtio::test_utils::VirtQueue; - use crate::vstate::memory::test_utils::create_anon_guest_memory; - use crate::vstate::memory::{Bytes, GuestAddress, GuestMemoryMmap}; + use crate::vstate::memory::{Bytes, GuestAddress, GuestMemoryExtension, GuestMemoryMmap}; impl<'a> From<&'a [u8]> for IoVecBuffer { fn from(buf: &'a [u8]) -> Self { @@ -321,7 +320,7 @@ mod tests { } fn default_mem() -> GuestMemoryMmap { - create_anon_guest_memory( + GuestMemoryMmap::from_raw_regions( &[ (GuestAddress(0), 0x10000), (GuestAddress(0x20000), 0x10000), diff --git a/src/vmm/src/devices/virtio/mmio.rs b/src/vmm/src/devices/virtio/mmio.rs index f80cce8aec7..755650c3f9f 100644 --- a/src/vmm/src/devices/virtio/mmio.rs +++ b/src/vmm/src/devices/virtio/mmio.rs @@ -332,7 +332,7 @@ pub(crate) mod tests { use utils::u64_to_usize; use super::*; - use crate::vstate::memory::GuestMemoryMmap; + use crate::vstate::memory::{GuestMemoryExtension, GuestMemoryMmap}; #[derive(Debug)] pub(crate) struct DummyDevice { @@ -433,11 +433,7 @@ pub(crate) mod tests { #[test] fn test_new() { - let m = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), 0x1000)], - false, - ) - .unwrap(); + let m = GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), 0x1000)], false).unwrap(); let mut dummy = DummyDevice::new(); // Validate reset is no-op. assert!(dummy.reset().is_none()); @@ -469,11 +465,7 @@ pub(crate) mod tests { #[test] fn test_bus_device_read() { - let m = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), 0x1000)], - false, - ) - .unwrap(); + let m = GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), 0x1000)], false).unwrap(); let mut d = MmioTransport::new(m, Arc::new(Mutex::new(DummyDevice::new()))); let mut buf = vec![0xff, 0, 0xfe, 0]; @@ -551,11 +543,7 @@ pub(crate) mod tests { #[test] #[allow(clippy::cognitive_complexity)] fn test_bus_device_write() { - let m = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), 0x1000)], - false, - ) - .unwrap(); + let m = GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), 0x1000)], false).unwrap(); let dummy_dev = Arc::new(Mutex::new(DummyDevice::new())); let mut d = MmioTransport::new(m, dummy_dev.clone()); let mut buf = vec![0; 5]; @@ -714,11 +702,7 @@ pub(crate) mod tests { #[test] fn test_bus_device_activate() { - let m = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), 0x1000)], - false, - ) - .unwrap(); + let m = GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), 0x1000)], false).unwrap(); let mut d = MmioTransport::new(m, Arc::new(Mutex::new(DummyDevice::new()))); assert!(!d.are_queues_valid()); @@ -836,11 +820,7 @@ pub(crate) mod tests { #[test] fn test_bus_device_reset() { - let m = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), 0x1000)], - false, - ) - .unwrap(); + let m = GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), 0x1000)], false).unwrap(); let mut d = MmioTransport::new(m, Arc::new(Mutex::new(DummyDevice::new()))); let mut buf = vec![0; 4]; diff --git a/src/vmm/src/devices/virtio/net/test_utils.rs b/src/vmm/src/devices/virtio/net/test_utils.rs index 1d868309c81..29ccaac9ac4 100644 --- a/src/vmm/src/devices/virtio/net/test_utils.rs +++ b/src/vmm/src/devices/virtio/net/test_utils.rs @@ -364,7 +364,9 @@ pub mod test { VIRTQ_DESC_F_WRITE, }; use crate::logger::{IncMetric, METRICS}; - use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; + use crate::vstate::memory::{ + Address, Bytes, GuestAddress, GuestMemoryExtension, GuestMemoryMmap, + }; pub struct TestHelper<'a> { pub event_manager: EventManager>>, @@ -394,7 +396,7 @@ pub mod test { pub fn get_default() -> TestHelper<'a> { let mut event_manager = EventManager::new().unwrap(); let mut net = default_net(); - let mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( + let mem = GuestMemoryMmap::from_raw_regions_unguarded( &[(GuestAddress(0), MAX_BUFFER_SIZE)], false, ) diff --git a/src/vmm/src/devices/virtio/queue.rs b/src/vmm/src/devices/virtio/queue.rs index c05dbe82c55..3192f85aabd 100644 --- a/src/vmm/src/devices/virtio/queue.rs +++ b/src/vmm/src/devices/virtio/queue.rs @@ -986,8 +986,7 @@ mod tests { pub use super::*; use crate::devices::virtio::test_utils::{default_mem, single_region_mem, VirtQueue}; use crate::devices::virtio::QueueError::{DescIndexOutOfBounds, UsedRing}; - use crate::vstate::memory::test_utils::create_anon_guest_memory; - use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; + use crate::vstate::memory::{GuestAddress, GuestMemoryExtension, GuestMemoryMmap}; impl Queue { fn avail_event(&self, mem: &GuestMemoryMmap) -> u16 { @@ -1001,7 +1000,7 @@ mod tests { #[test] fn test_checked_new_descriptor_chain() { - let m = &create_anon_guest_memory( + let m = &GuestMemoryMmap::from_raw_regions( &[(GuestAddress(0), 0x10000), (GuestAddress(0x20000), 0x2000)], false, ) diff --git a/src/vmm/src/devices/virtio/test_utils.rs b/src/vmm/src/devices/virtio/test_utils.rs index 498746c2247..564b59f7779 100644 --- a/src/vmm/src/devices/virtio/test_utils.rs +++ b/src/vmm/src/devices/virtio/test_utils.rs @@ -11,7 +11,7 @@ use std::sync::atomic::{AtomicUsize, Ordering}; use utils::u64_to_usize; use crate::devices::virtio::Queue; -use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemoryMmap}; +use crate::vstate::memory::{Address, Bytes, GuestAddress, GuestMemoryExtension, GuestMemoryMmap}; #[macro_export] macro_rules! check_metric_after_block { @@ -25,11 +25,7 @@ macro_rules! check_metric_after_block { /// Creates a [`GuestMemoryMmap`] with a single region of the given size starting at guest physical /// address 0 pub fn single_region_mem(region_size: usize) -> GuestMemoryMmap { - crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), region_size)], - false, - ) - .unwrap() + GuestMemoryMmap::from_raw_regions_unguarded(&[(GuestAddress(0), region_size)], false).unwrap() } /// Creates a [`GuestMemoryMmap`] with a single region of size 65536 (= 0x10000 hex) starting at @@ -332,14 +328,11 @@ pub(crate) mod test { use crate::devices::virtio::test_utils::{VirtQueue, VirtqDesc}; use crate::devices::virtio::{Queue, VirtioDevice, MAX_BUFFER_SIZE, VIRTQ_DESC_F_NEXT}; - use crate::vstate::memory::{Address, GuestAddress, GuestMemoryMmap}; + use crate::vstate::memory::{Address, GuestAddress, GuestMemoryExtension, GuestMemoryMmap}; pub fn create_virtio_mem() -> GuestMemoryMmap { - crate::vstate::memory::test_utils::create_guest_memory_unguarded( - &[(GuestAddress(0), MAX_BUFFER_SIZE)], - false, - ) - .unwrap() + GuestMemoryMmap::from_raw_regions_unguarded(&[(GuestAddress(0), MAX_BUFFER_SIZE)], false) + .unwrap() } /// Provides functionality necessary for testing a VirtIO device with diff --git a/src/vmm/src/devices/virtio/vsock/event_handler.rs b/src/vmm/src/devices/virtio/vsock/event_handler.rs index b6cc65154f3..735ff80ae1b 100755 --- a/src/vmm/src/devices/virtio/vsock/event_handler.rs +++ b/src/vmm/src/devices/virtio/vsock/event_handler.rs @@ -213,7 +213,7 @@ mod tests { use super::*; use crate::devices::virtio::vsock::packet::VSOCK_PKT_HDR_SIZE; use crate::devices::virtio::vsock::test_utils::{EventHandlerContext, TestContext}; - use crate::vstate::memory::Bytes; + use crate::vstate::memory::{Bytes, GuestMemoryExtension}; #[test] fn test_txq_event() { @@ -461,7 +461,7 @@ mod tests { const MIB: usize = 1 << 20; let mut test_ctx = TestContext::new(); - test_ctx.mem = crate::vstate::memory::test_utils::create_anon_guest_memory( + test_ctx.mem = GuestMemoryMmap::from_raw_regions( &[ (GuestAddress(0), 8 * MIB), (GuestAddress((GAP_START_ADDR - MIB) as u64), MIB), diff --git a/src/vmm/src/devices/virtio/vsock/packet.rs b/src/vmm/src/devices/virtio/vsock/packet.rs index 68be5f1bb70..d54a94317c6 100644 --- a/src/vmm/src/devices/virtio/vsock/packet.rs +++ b/src/vmm/src/devices/virtio/vsock/packet.rs @@ -436,13 +436,14 @@ impl VsockPacket { #[cfg(test)] mod tests { + use super::*; use crate::devices::virtio::test_utils::VirtqDesc as GuestQDesc; use crate::devices::virtio::vsock::defs::MAX_PKT_BUF_SIZE; use crate::devices::virtio::vsock::device::{RXQ_INDEX, TXQ_INDEX}; use crate::devices::virtio::vsock::test_utils::TestContext; use crate::devices::virtio::VIRTQ_DESC_F_WRITE; - use crate::vstate::memory::{GuestAddress, GuestMemoryMmap}; + use crate::vstate::memory::{GuestAddress, GuestMemoryExtension, GuestMemoryMmap}; macro_rules! create_context { ($test_ctx:ident, $handler_ctx:ident) => { @@ -758,7 +759,7 @@ mod tests { fn test_check_bounds_for_buffer_access_edge_cases() { let mut test_ctx = TestContext::new(); - test_ctx.mem = crate::vstate::memory::test_utils::create_guest_memory_unguarded( + test_ctx.mem = GuestMemoryMmap::from_raw_regions_unguarded( &[ (GuestAddress(0), 500), (GuestAddress(500), 100), diff --git a/src/vmm/src/lib.rs b/src/vmm/src/lib.rs index e44fb748416..866aeb7a8d6 100644 --- a/src/vmm/src/lib.rs +++ b/src/vmm/src/lib.rs @@ -140,7 +140,9 @@ use crate::logger::{error, info, warn, MetricsError, METRICS}; use crate::persist::{MicrovmState, MicrovmStateError, VmInfo}; use crate::rate_limiter::BucketUpdate; use crate::vmm_config::instance_info::{InstanceInfo, VmState}; -use crate::vstate::memory::{GuestMemory, GuestMemoryMmap, GuestMemoryRegion, SnapshotMemory}; +use crate::vstate::memory::{ + GuestMemory, GuestMemoryExtension, GuestMemoryMmap, GuestMemoryRegion, +}; use crate::vstate::vcpu::VcpuState; pub use crate::vstate::vcpu::{Vcpu, VcpuConfig, VcpuEvent, VcpuHandle, VcpuResponse}; pub use crate::vstate::vm::Vm; diff --git a/src/vmm/src/persist.rs b/src/vmm/src/persist.rs index dbe5a3015de..1bcf2502c6a 100644 --- a/src/vmm/src/persist.rs +++ b/src/vmm/src/persist.rs @@ -46,7 +46,7 @@ use crate::vmm_config::snapshot::{ CreateSnapshotParams, LoadSnapshotParams, MemBackendType, SnapshotType, }; use crate::vstate::memory::{ - GuestMemory, GuestMemoryMmap, GuestMemoryState, MemoryError, SnapshotMemory, + GuestMemory, GuestMemoryExtension, GuestMemoryMmap, GuestMemoryState, MemoryError, }; use crate::vstate::vcpu::{VcpuSendEventError, VcpuState}; use crate::vstate::vm::VmState; @@ -568,7 +568,7 @@ fn guest_memory_from_file( track_dirty_pages: bool, ) -> Result { let mem_file = File::open(mem_file_path)?; - let guest_mem = GuestMemoryMmap::restore(Some(&mem_file), mem_state, track_dirty_pages)?; + let guest_mem = GuestMemoryMmap::from_state(Some(&mem_file), mem_state, track_dirty_pages)?; Ok(guest_mem) } @@ -593,7 +593,7 @@ fn guest_memory_from_uffd( track_dirty_pages: bool, enable_balloon: bool, ) -> Result<(GuestMemoryMmap, Option), GuestMemoryFromUffdError> { - let guest_memory = GuestMemoryMmap::restore(None, mem_state, track_dirty_pages)?; + let guest_memory = GuestMemoryMmap::from_state(None, mem_state, track_dirty_pages)?; let mut uffd_builder = UffdBuilder::new(); diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index 86881fcca15..7567ca93353 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -53,12 +53,39 @@ pub enum MemoryError { } /// Defines the interface for snapshotting memory. -pub trait SnapshotMemory +pub trait GuestMemoryExtension where Self: Sized, { + /// Creates a GuestMemoryMmap with `size` in MiB and guard pages. + fn with_size(size: usize, track_dirty_pages: bool) -> Result; + + /// Creates a GuestMemoryMmap from raw regions with guard pages. + fn from_raw_regions( + regions: &[(GuestAddress, usize)], + track_dirty_pages: bool, + ) -> Result; + + /// Creates a GuestMemoryMmap from raw regions with no guard pages. + fn from_raw_regions_unguarded( + regions: &[(GuestAddress, usize)], + track_dirty_pages: bool, + ) -> Result; + + /// Creates a GuestMemoryMmap given a `file` containing the data + /// and a `state` containing mapping information. + fn from_state( + file: Option<&File>, + state: &GuestMemoryState, + track_dirty_pages: bool, + ) -> Result; + /// Describes GuestMemoryMmap through a GuestMemoryState struct. fn describe(&self) -> GuestMemoryState; + + /// Mark memory range as dirty + fn mark_dirty(&self, addr: GuestAddress, len: usize); + /// Dumps all contents of GuestMemoryMmap to a writer. fn dump(&self, writer: &mut T) -> Result<(), MemoryError>; /// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer. @@ -67,13 +94,6 @@ where writer: &mut T, dirty_bitmap: &DirtyBitmap, ) -> Result<(), MemoryError>; - /// Creates a GuestMemoryMmap given a `file` containing the data - /// and a `state` containing mapping information. - fn restore( - file: Option<&File>, - state: &GuestMemoryState, - track_dirty_pages: bool, - ) -> Result; } /// State of a guest memory region saved to file/buffer. @@ -98,7 +118,115 @@ pub struct GuestMemoryState { pub regions: Vec, } -impl SnapshotMemory for GuestMemoryMmap { +impl GuestMemoryExtension for GuestMemoryMmap { + /// Creates a GuestMemoryMmap with `size` in MiB and guard pages. + fn with_size(size: usize, track_dirty_pages: bool) -> Result { + let mem_size = size << 20; + let regions = crate::arch::arch_memory_regions(mem_size); + + Self::from_raw_regions(®ions, track_dirty_pages) + } + + /// Creates a GuestMemoryMmap from raw regions with guard pages. + fn from_raw_regions( + regions: &[(GuestAddress, usize)], + track_dirty_pages: bool, + ) -> Result { + let prot = libc::PROT_READ | libc::PROT_WRITE; + let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS; + + let regions = regions + .iter() + .map(|(guest_address, region_size)| { + let region = + build_guarded_region(None, *region_size, prot, flags, track_dirty_pages)?; + GuestRegionMmap::new(region, *guest_address).map_err(MemoryError::VmMemoryError) + }) + .collect::, MemoryError>>()?; + + GuestMemoryMmap::from_regions(regions).map_err(MemoryError::VmMemoryError) + } + + /// Creates a GuestMemoryMmap from raw regions with no guard pages. + fn from_raw_regions_unguarded( + regions: &[(GuestAddress, usize)], + track_dirty_pages: bool, + ) -> Result { + let prot = libc::PROT_READ | libc::PROT_WRITE; + let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS; + + let regions = regions + .iter() + .map(|(guest_address, region_size)| { + let region = MmapRegionBuilder::new_with_bitmap( + *region_size, + match track_dirty_pages { + true => Some(AtomicBitmap::with_len(*region_size)), + false => None, + }, + ) + .with_mmap_prot(prot) + .with_mmap_flags(flags) + .build() + .map_err(MemoryError::MmapRegionError)?; + GuestRegionMmap::new(region, *guest_address).map_err(MemoryError::VmMemoryError) + }) + .collect::, MemoryError>>()?; + + GuestMemoryMmap::from_regions(regions).map_err(MemoryError::VmMemoryError) + } + + /// Creates a GuestMemoryMmap backed by a `file` if present, otherwise backed + /// by anonymous memory. Memory layout and ranges are described in `state` param. + fn from_state( + file: Option<&File>, + state: &GuestMemoryState, + track_dirty_pages: bool, + ) -> Result { + match file { + Some(f) => { + let regions = state + .regions + .iter() + .map(|r| { + f.try_clone().map(|file_clone| { + let offset = FileOffset::new(file_clone, r.offset); + (offset, GuestAddress(r.base_address), r.size) + }) + }) + .collect::, std::io::Error>>() + .map_err(MemoryError::FileHandle)?; + + let prot = libc::PROT_READ | libc::PROT_WRITE; + let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; + let regions = regions + .iter() + .map(|(file_offset, guest_address, region_size)| { + let region = build_guarded_region( + Some(file_offset), + *region_size, + prot, + flags, + track_dirty_pages, + )?; + GuestRegionMmap::new(region, *guest_address) + .map_err(MemoryError::VmMemoryError) + }) + .collect::, MemoryError>>()?; + + GuestMemoryMmap::from_regions(regions).map_err(MemoryError::VmMemoryError) + } + None => { + let regions = state + .regions + .iter() + .map(|r| (GuestAddress(r.base_address), r.size)) + .collect::>(); + Self::from_raw_regions(®ions, track_dirty_pages) + } + } + } + /// Describes GuestMemoryMmap through a GuestMemoryState struct. fn describe(&self) -> GuestMemoryState { let mut guest_memory_state = GuestMemoryState::default(); @@ -115,6 +243,16 @@ impl SnapshotMemory for GuestMemoryMmap { guest_memory_state } + /// Mark memory range as dirty + fn mark_dirty(&self, addr: GuestAddress, len: usize) { + let _ = self.try_access(len, addr, |_total, count, caddr, region| { + if let Some(bitmap) = region.bitmap() { + bitmap.mark_dirty(u64_to_usize(caddr.0), count); + } + Ok(count) + }); + } + /// Dumps all contents of GuestMemoryMmap to a writer. fn dump(&self, writer: &mut T) -> Result<(), MemoryError> { self.iter() @@ -182,29 +320,6 @@ impl SnapshotMemory for GuestMemoryMmap { }) .map_err(MemoryError::WriteMemory) } - - /// Creates a GuestMemoryMmap backed by a `file` if present, otherwise backed - /// by anonymous memory. Memory layout and ranges are described in `state` param. - fn restore( - file: Option<&File>, - state: &GuestMemoryState, - track_dirty_pages: bool, - ) -> Result { - let mut regions = vec![]; - for region in state.regions.iter() { - let f = match file { - Some(f) => Some(FileOffset::new( - f.try_clone().map_err(MemoryError::FileHandle)?, - region.offset, - )), - None => None, - }; - - regions.push((f, GuestAddress(region.base_address), region.size)); - } - - create_guest_memory(®ions, track_dirty_pages) - } } /// Build a `MmapRegion` surrounded by guard pages. @@ -297,96 +412,6 @@ fn build_guarded_region( } } -/// Helper for creating the guest memory. -pub fn create_guest_memory( - regions: &[(Option, GuestAddress, usize)], - track_dirty_pages: bool, -) -> Result { - let prot = libc::PROT_READ | libc::PROT_WRITE; - let mut mmap_regions = Vec::with_capacity(regions.len()); - - for (file_offset, guest_address, region_size) in regions { - let flags = match file_offset { - None => libc::MAP_NORESERVE | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, - Some(_) => libc::MAP_NORESERVE | libc::MAP_PRIVATE, - }; - - let mmap_region = build_guarded_region( - file_offset.as_ref(), - *region_size, - prot, - flags, - track_dirty_pages, - )?; - - mmap_regions.push( - GuestRegionMmap::new(mmap_region, *guest_address) - .map_err(MemoryError::VmMemoryError)?, - ); - } - - GuestMemoryMmap::from_regions(mmap_regions).map_err(MemoryError::VmMemoryError) -} - -/// Mark memory range as dirty -pub fn mark_dirty_mem(mem: &GuestMemoryMmap, addr: GuestAddress, len: usize) { - let _ = mem.try_access(len, addr, |_total, count, caddr, region| { - if let Some(bitmap) = region.bitmap() { - bitmap.mark_dirty(u64_to_usize(caddr.0), count); - } - Ok(count) - }); -} - -/// Public module with utilities used for testing. -pub mod test_utils { - use super::*; - - /// Test helper used to initialize the guest memory without adding guard pages. - /// This is needed because the default `create_guest_memory` - /// uses MmapRegionBuilder::build_raw() for setting up the memory with guard pages, which would - /// error if the size is not a multiple of the page size. - /// There are unit tests which need a custom memory size, not a multiple of the page size. - pub fn create_guest_memory_unguarded( - regions: &[(GuestAddress, usize)], - track_dirty_pages: bool, - ) -> Result { - let prot = libc::PROT_READ | libc::PROT_WRITE; - let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE | libc::MAP_ANONYMOUS; - let mut mmap_regions = Vec::with_capacity(regions.len()); - - for region in regions { - mmap_regions.push(GuestRegionMmap::new( - MmapRegionBuilder::new_with_bitmap( - region.1, - match track_dirty_pages { - true => Some(AtomicBitmap::with_len(region.1)), - false => None, - }, - ) - .with_mmap_prot(prot) - .with_mmap_flags(flags) - .build() - .map_err(VmMemoryError::MmapRegion)?, - region.0, - )?); - } - GuestMemoryMmap::from_regions(mmap_regions) - } - - /// Test helper used to initialize the guest memory, without the option of file-backed mmap. - /// It is just a little syntactic sugar that helps deduplicate test code. - pub fn create_anon_guest_memory( - regions: &[(GuestAddress, usize)], - track_dirty_pages: bool, - ) -> Result { - create_guest_memory( - ®ions.iter().map(|r| (None, r.0, r.1)).collect::>(), - track_dirty_pages, - ) - } -} - #[cfg(test)] mod tests { #![allow(clippy::undocumented_unsafe_blocks)] @@ -547,18 +572,18 @@ mod tests { } #[test] - fn test_create_guest_memory() { + fn test_from_raw_regions() { // Test that all regions are guarded. { let region_size = 0x10000; let regions = vec![ - (None, GuestAddress(0x0), region_size), - (None, GuestAddress(0x10000), region_size), - (None, GuestAddress(0x20000), region_size), - (None, GuestAddress(0x30000), region_size), + (GuestAddress(0x0), region_size), + (GuestAddress(0x10000), region_size), + (GuestAddress(0x20000), region_size), + (GuestAddress(0x30000), region_size), ]; - let guest_memory = create_guest_memory(®ions, false).unwrap(); + let guest_memory = GuestMemoryMmap::from_raw_regions(®ions, false).unwrap(); guest_memory.iter().for_each(|region| { validate_guard_region(region); loop_guard_region_to_sigsegv(region); @@ -569,13 +594,13 @@ mod tests { { let region_size = 0x10000; let regions = vec![ - (None, GuestAddress(0x0), region_size), - (None, GuestAddress(0x10000), region_size), - (None, GuestAddress(0x20000), region_size), - (None, GuestAddress(0x30000), region_size), + (GuestAddress(0x0), region_size), + (GuestAddress(0x10000), region_size), + (GuestAddress(0x20000), region_size), + (GuestAddress(0x30000), region_size), ]; - let guest_memory = create_guest_memory(®ions, false).unwrap(); + let guest_memory = GuestMemoryMmap::from_raw_regions(®ions, false).unwrap(); guest_memory.iter().for_each(|region| { assert!(region.bitmap().is_none()); }); @@ -585,13 +610,13 @@ mod tests { { let region_size = 0x10000; let regions = vec![ - (None, GuestAddress(0x0), region_size), - (None, GuestAddress(0x10000), region_size), - (None, GuestAddress(0x20000), region_size), - (None, GuestAddress(0x30000), region_size), + (GuestAddress(0x0), region_size), + (GuestAddress(0x10000), region_size), + (GuestAddress(0x20000), region_size), + (GuestAddress(0x30000), region_size), ]; - let guest_memory = create_guest_memory(®ions, true).unwrap(); + let guest_memory = GuestMemoryMmap::from_raw_regions(®ions, true).unwrap(); guest_memory.iter().for_each(|region| { assert!(region.bitmap().is_some()); }); @@ -604,11 +629,11 @@ mod tests { let region_size = page_size * 3; let regions = vec![ - (None, GuestAddress(0), region_size), // pages 0-2 - (None, GuestAddress(region_size as u64), region_size), // pages 3-5 - (None, GuestAddress(region_size as u64 * 2), region_size), // pages 6-8 + (GuestAddress(0), region_size), // pages 0-2 + (GuestAddress(region_size as u64), region_size), // pages 3-5 + (GuestAddress(region_size as u64 * 2), region_size), // pages 6-8 ]; - let guest_memory = create_guest_memory(®ions, true).unwrap(); + let guest_memory = GuestMemoryMmap::from_raw_regions(®ions, true).unwrap(); let dirty_map = [ // page 0: not dirty @@ -626,7 +651,7 @@ mod tests { // Mark dirty memory for (addr, len, dirty) in &dirty_map { if *dirty { - mark_dirty_mem(&guest_memory, GuestAddress(*addr as u64), *len); + guest_memory.mark_dirty(GuestAddress(*addr as u64), *len); } } @@ -655,11 +680,10 @@ mod tests { // Two regions of one page each, with a one page gap between them. let mem_regions = [ - (None, GuestAddress(0), page_size), - (None, GuestAddress(page_size as u64 * 2), page_size), + (GuestAddress(0), page_size), + (GuestAddress(page_size as u64 * 2), page_size), ]; - let guest_memory = - crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); + let guest_memory = GuestMemoryMmap::from_raw_regions(&mem_regions[..], true).unwrap(); let expected_memory_state = GuestMemoryState { regions: vec![ @@ -681,11 +705,10 @@ mod tests { // Two regions of three pages each, with a one page gap between them. let mem_regions = [ - (None, GuestAddress(0), page_size * 3), - (None, GuestAddress(page_size as u64 * 4), page_size * 3), + (GuestAddress(0), page_size * 3), + (GuestAddress(page_size as u64 * 4), page_size * 3), ]; - let guest_memory = - crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); + let guest_memory = GuestMemoryMmap::from_raw_regions(&mem_regions[..], true).unwrap(); let expected_memory_state = GuestMemoryState { regions: vec![ @@ -712,11 +735,10 @@ mod tests { // Two regions of two pages each, with a one page gap between them. let mem_regions = [ - (None, GuestAddress(0), page_size * 2), - (None, GuestAddress(page_size as u64 * 3), page_size * 2), + (GuestAddress(0), page_size * 2), + (GuestAddress(page_size as u64 * 3), page_size * 2), ]; - let guest_memory = - crate::vstate::memory::create_guest_memory(&mem_regions[..], true).unwrap(); + let guest_memory = GuestMemoryMmap::from_raw_regions(&mem_regions[..], true).unwrap(); // Check that Firecracker bitmap is clean. let _res: Result<(), MemoryError> = guest_memory.iter().try_for_each(|r| { assert!(!r.bitmap().dirty_at(0)); @@ -743,7 +765,7 @@ mod tests { guest_memory.dump(&mut memory_file).unwrap(); let restored_guest_memory = - GuestMemoryMmap::restore(Some(&memory_file), &memory_state, false).unwrap(); + GuestMemoryMmap::from_state(Some(&memory_file), &memory_state, false).unwrap(); // Check that the region contents are the same. let mut actual_region = vec![0u8; page_size * 2]; @@ -775,7 +797,7 @@ mod tests { // We can restore from this because this is the first dirty dump. let restored_guest_memory = - GuestMemoryMmap::restore(Some(&file), &memory_state, false).unwrap(); + GuestMemoryMmap::from_state(Some(&file), &memory_state, false).unwrap(); // Check that the region contents are the same. let mut actual_region = vec![0u8; page_size * 2]; diff --git a/src/vmm/src/vstate/vm.rs b/src/vmm/src/vstate/vm.rs index 9f38707e957..e74982a1f6a 100644 --- a/src/vmm/src/vstate/vm.rs +++ b/src/vmm/src/vstate/vm.rs @@ -501,16 +501,13 @@ impl fmt::Debug for VmState { #[cfg(test)] pub(crate) mod tests { + use super::*; - use crate::vstate::memory::GuestAddress; + use crate::vstate::memory::{GuestAddress, GuestMemoryExtension, GuestMemoryMmap}; // Auxiliary function being used throughout the tests. pub(crate) fn setup_vm(mem_size: usize) -> (Vm, GuestMemoryMmap) { - let gm = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), mem_size)], - false, - ) - .unwrap(); + let gm = GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), mem_size)], false).unwrap(); let vm = Vm::new(vec![]).expect("Cannot create new vm"); assert!(vm.memory_init(&gm, false).is_ok()); @@ -547,11 +544,7 @@ pub(crate) mod tests { let vm = Vm::new(vec![]).expect("Cannot create new vm"); // Create valid memory region and test that the initialization is successful. - let gm = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), 0x1000)], - false, - ) - .unwrap(); + let gm = GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), 0x1000)], false).unwrap(); assert!(vm.memory_init(&gm, true).is_ok()); } @@ -614,21 +607,14 @@ pub(crate) mod tests { fn test_set_kvm_memory_regions() { let vm = Vm::new(vec![]).expect("Cannot create new vm"); - let gm = crate::vstate::memory::test_utils::create_anon_guest_memory( - &[(GuestAddress(0), 0x1000)], - false, - ) - .unwrap(); + let gm = GuestMemoryMmap::from_raw_regions(&[(GuestAddress(0), 0x1000)], false).unwrap(); let res = vm.set_kvm_memory_regions(&gm, false); assert!(res.is_ok()); // Trying to set a memory region with a size that is not a multiple of PAGE_SIZE // will result in error. - let gm = crate::vstate::memory::test_utils::create_guest_memory_unguarded( - &[(GuestAddress(0), 0x10)], - false, - ) - .unwrap(); + let gm = + GuestMemoryMmap::from_raw_regions_unguarded(&[(GuestAddress(0), 0x10)], false).unwrap(); let res = vm.set_kvm_memory_regions(&gm, false); assert_eq!( res.unwrap_err().to_string(), diff --git a/src/vmm/tests/integration_tests.rs b/src/vmm/tests/integration_tests.rs index ebc2f913529..06c7c036c8a 100644 --- a/src/vmm/tests/integration_tests.rs +++ b/src/vmm/tests/integration_tests.rs @@ -233,7 +233,7 @@ fn verify_create_snapshot(is_diff: bool) -> (TempFile, TempFile) { } fn verify_load_snapshot(snapshot_file: TempFile, memory_file: TempFile) { - use vmm::vstate::memory::{GuestMemoryMmap, SnapshotMemory}; + use vmm::vstate::memory::{GuestMemoryExtension, GuestMemoryMmap}; let mut event_manager = EventManager::new().unwrap(); let empty_seccomp_filters = get_empty_filters(); @@ -248,7 +248,7 @@ fn verify_load_snapshot(snapshot_file: TempFile, memory_file: TempFile) { VERSION_MAP.clone(), ) .unwrap(); - let mem = GuestMemoryMmap::restore( + let mem = GuestMemoryMmap::from_state( Some(memory_file.as_file()), µvm_state.memory_state, false, From 9cdcc108eeb17c341f94ed217a638f3786293337 Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Tue, 26 Sep 2023 16:36:53 +0000 Subject: [PATCH 08/14] feat(memfd): added `memfd` backed guest memory Added method to create `memfd` file with needed size and seals. Added an ability to construct `GuestMemoryMmap` backed by a file. Changed expected error message for failed memory creation in `test_api` test Signed-off-by: Egor Lazarchuk --- Cargo.lock | 10 +++ src/vmm/Cargo.toml | 1 + src/vmm/src/builder.rs | 7 +- src/vmm/src/vstate/memory.rs | 89 +++++++++++++++++-- .../integration_tests/functional/test_api.py | 3 +- 5 files changed, 100 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 519d6bc0336..4543674013e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -767,6 +767,15 @@ version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +[[package]] +name = "memfd" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2cffa4ad52c6f791f4f8b15f0c05f9824b2ced1160e88cc393d64fff9a8ac64" +dependencies = [ + "rustix 0.38.17", +] + [[package]] name = "micro_http" version = "0.1.0" @@ -1412,6 +1421,7 @@ dependencies = [ "libc", "linux-loader", "log", + "memfd", "micro_http", "proptest", "seccompiler", diff --git a/src/vmm/Cargo.toml b/src/vmm/Cargo.toml index ea9e0dc60aa..3050aa7dc97 100644 --- a/src/vmm/Cargo.toml +++ b/src/vmm/Cargo.toml @@ -18,6 +18,7 @@ kvm-ioctls = "0.15.0" lazy_static = "1.4.0" libc = "0.2.117" linux-loader = "0.9.0" +memfd = "0.6.3" serde = { version = "1.0.136", features = ["derive", "rc"] } semver = { version = "1.0.17", features = ["serde"] } serde_json = "1.0.78" diff --git a/src/vmm/src/builder.rs b/src/vmm/src/builder.rs index 0c834366276..d26070a4442 100644 --- a/src/vmm/src/builder.rs +++ b/src/vmm/src/builder.rs @@ -265,9 +265,10 @@ pub fn build_microvm_for_boot( .ok_or(MissingKernelConfig)?; let track_dirty_pages = vm_resources.track_dirty_pages(); - let guest_memory = - GuestMemoryMmap::with_size(vm_resources.vm_config.mem_size_mib, track_dirty_pages) - .map_err(StartMicrovmError::GuestMemory)?; + let memfd = crate::vstate::memory::create_memfd(vm_resources.vm_config.mem_size_mib) + .map_err(StartMicrovmError::GuestMemory)?; + let guest_memory = GuestMemoryMmap::with_file(memfd.as_file(), track_dirty_pages) + .map_err(StartMicrovmError::GuestMemory)?; let entry_addr = load_kernel(boot_config, &guest_memory)?; let initrd = load_initrd_from_config(boot_config, &guest_memory)?; // Clone the command-line so that a failed boot doesn't pollute the original. diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index 7567ca93353..e461d43e224 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -37,7 +37,7 @@ const GUARD_PAGE_COUNT: usize = 1; #[derive(Debug, thiserror::Error, displaydoc::Display)] pub enum MemoryError { /// Cannot access file: {0:?} - FileHandle(std::io::Error), + FileError(std::io::Error), /// Cannot create memory: {0:?} CreateMemory(VmMemoryError), /// Cannot create memory region: {0:?} @@ -50,6 +50,10 @@ pub enum MemoryError { MmapRegionError(MmapRegionError), /// Cannot create guest memory: {0} VmMemoryError(VmMemoryError), + /// Cannot create memfd: {0:?} + Memfd(memfd::Error), + /// Cannot resize memfd file: {0:?} + MemfdSetLen(std::io::Error), } /// Defines the interface for snapshotting memory. @@ -57,6 +61,9 @@ pub trait GuestMemoryExtension where Self: Sized, { + /// Creates a GuestMemoryMmap with `size` in MiB and guard pages backed by file. + fn with_file(file: &File, track_dirty_pages: bool) -> Result; + /// Creates a GuestMemoryMmap with `size` in MiB and guard pages. fn with_size(size: usize, track_dirty_pages: bool) -> Result; @@ -119,7 +126,37 @@ pub struct GuestMemoryState { } impl GuestMemoryExtension for GuestMemoryMmap { - /// Creates a GuestMemoryMmap with `size` in MiB and guard pages. + /// Creates a GuestMemoryMmap with `size` in MiB and guard pages backed by file. + fn with_file(file: &File, track_dirty_pages: bool) -> Result { + let metadata = file.metadata().map_err(MemoryError::FileError)?; + let mem_size = u64_to_usize(metadata.len()); + let regions = crate::arch::arch_memory_regions(mem_size); + + let prot = libc::PROT_READ | libc::PROT_WRITE; + let flags = libc::MAP_NORESERVE | libc::MAP_SHARED; + + let mut offset: u64 = 0; + let regions = regions + .iter() + .map(|(guest_address, region_size)| { + let file_clone = file.try_clone().map_err(MemoryError::FileError)?; + let file_offset = FileOffset::new(file_clone, offset); + offset += *region_size as u64; + let region = build_guarded_region( + Some(&file_offset), + *region_size, + prot, + flags, + track_dirty_pages, + )?; + GuestRegionMmap::new(region, *guest_address).map_err(MemoryError::VmMemoryError) + }) + .collect::, MemoryError>>()?; + + GuestMemoryMmap::from_regions(regions).map_err(MemoryError::VmMemoryError) + } + + /// Creates a GuestMemoryMmap with `size` in MiB and guard pages backed by anonymous memory. fn with_size(size: usize, track_dirty_pages: bool) -> Result { let mem_size = size << 20; let regions = crate::arch::arch_memory_regions(mem_size); @@ -127,7 +164,7 @@ impl GuestMemoryExtension for GuestMemoryMmap { Self::from_raw_regions(®ions, track_dirty_pages) } - /// Creates a GuestMemoryMmap from raw regions with guard pages. + /// Creates a GuestMemoryMmap from raw regions with guard pages backed by anonymous memory. fn from_raw_regions( regions: &[(GuestAddress, usize)], track_dirty_pages: bool, @@ -147,7 +184,7 @@ impl GuestMemoryExtension for GuestMemoryMmap { GuestMemoryMmap::from_regions(regions).map_err(MemoryError::VmMemoryError) } - /// Creates a GuestMemoryMmap from raw regions with no guard pages. + /// Creates a GuestMemoryMmap from raw regions with no guard pages backed by anonymous memory. fn from_raw_regions_unguarded( regions: &[(GuestAddress, usize)], track_dirty_pages: bool, @@ -195,7 +232,7 @@ impl GuestMemoryExtension for GuestMemoryMmap { }) }) .collect::, std::io::Error>>() - .map_err(MemoryError::FileHandle)?; + .map_err(MemoryError::FileError)?; let prot = libc::PROT_READ | libc::PROT_WRITE; let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; @@ -322,6 +359,33 @@ impl GuestMemoryExtension for GuestMemoryMmap { } } +/// Creates a memfd file with the `size` in MiB. +pub fn create_memfd(size: usize) -> Result { + let mem_size = size << 20; + // Create a memfd. + let opts = memfd::MemfdOptions::default().allow_sealing(true); + let mem_file = opts.create("guest_mem").map_err(MemoryError::Memfd)?; + + // Resize to guest mem size. + mem_file + .as_file() + .set_len(mem_size as u64) + .map_err(MemoryError::MemfdSetLen)?; + + // Add seals to prevent further resizing. + let mut seals = memfd::SealsHashSet::new(); + seals.insert(memfd::FileSeal::SealShrink); + seals.insert(memfd::FileSeal::SealGrow); + mem_file.add_seals(&seals).map_err(MemoryError::Memfd)?; + + // Prevent further sealing changes. + mem_file + .add_seal(memfd::FileSeal::SealSeal) + .map_err(MemoryError::Memfd)?; + + Ok(mem_file) +} + /// Build a `MmapRegion` surrounded by guard pages. /// /// Initially, we map a `PROT_NONE` guard region of size: @@ -844,4 +908,19 @@ mod tests { assert_eq!(expected_first_region, diff_file_content); } } + + #[test] + fn test_create_memfd() { + let size = 1; + let size_mb = 1 << 20; + + let memfd = create_memfd(size).unwrap(); + + assert_eq!(memfd.as_file().metadata().unwrap().len(), size_mb); + assert!(memfd.as_file().set_len(0x69).is_err()); + + let mut seals = memfd::SealsHashSet::new(); + seals.insert(memfd::FileSeal::SealGrow); + assert!(memfd.add_seals(&seals).is_err()); + } } diff --git a/tests/integration_tests/functional/test_api.py b/tests/integration_tests/functional/test_api.py index 9dae036db3e..4632076316c 100644 --- a/tests/integration_tests/functional/test_api.py +++ b/tests/integration_tests/functional/test_api.py @@ -389,8 +389,7 @@ def test_api_machine_config(test_microvm_with_api): test_microvm.api.machine_config.patch(mem_size_mib=bad_size) fail_msg = re.escape( - "Invalid Memory Configuration: MmapRegion(Mmap(Os { code: " - "12, kind: OutOfMemory, message: Out of memory }))" + "Invalid Memory Configuration: MemfdSetLen(Custom { kind: InvalidInput, error: TryFromIntError(()) })" ) with pytest.raises(RuntimeError, match=fail_msg): test_microvm.start() From 1df30db66d51e0364ab1ca6700a3659d1eaaa239 Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Wed, 27 Sep 2023 11:18:33 +0000 Subject: [PATCH 09/14] refactor(memory): minor refactor of `memory` Added `from_raw_regions_file` method to the `GuestMemoryExtension` trait to remove duplicated code that creates file backed `GuestMemoryMap`. Signed-off-by: Egor Lazarchuk --- src/vmm/src/builder.rs | 6 ++- src/vmm/src/vstate/memory.rs | 91 +++++++++++++++++++----------------- 2 files changed, 53 insertions(+), 44 deletions(-) diff --git a/src/vmm/src/builder.rs b/src/vmm/src/builder.rs index d26070a4442..fb11f5d2935 100644 --- a/src/vmm/src/builder.rs +++ b/src/vmm/src/builder.rs @@ -55,7 +55,9 @@ use crate::vmm_config::boot_source::BootConfig; use crate::vmm_config::instance_info::InstanceInfo; use crate::vmm_config::machine_config::{MachineConfigUpdate, VmConfig, VmConfigError}; use crate::volatile::ReadVolatile; -use crate::vstate::memory::{GuestAddress, GuestMemory, GuestMemoryExtension, GuestMemoryMmap}; +use crate::vstate::memory::{ + create_memfd, GuestAddress, GuestMemory, GuestMemoryExtension, GuestMemoryMmap, +}; use crate::vstate::vcpu::{Vcpu, VcpuConfig}; use crate::vstate::vm::Vm; use crate::{device_manager, EventManager, RestoreVcpusError, Vmm, VmmError}; @@ -265,7 +267,7 @@ pub fn build_microvm_for_boot( .ok_or(MissingKernelConfig)?; let track_dirty_pages = vm_resources.track_dirty_pages(); - let memfd = crate::vstate::memory::create_memfd(vm_resources.vm_config.mem_size_mib) + let memfd = create_memfd(vm_resources.vm_config.mem_size_mib) .map_err(StartMicrovmError::GuestMemory)?; let guest_memory = GuestMemoryMmap::with_file(memfd.as_file(), track_dirty_pages) .map_err(StartMicrovmError::GuestMemory)?; diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index e461d43e224..8e59981cea8 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -79,6 +79,13 @@ where track_dirty_pages: bool, ) -> Result; + /// Creates a GuestMemoryMmap from raw regions with guard pages. + fn from_raw_regions_file( + regions: &[(FileOffset, GuestAddress, usize)], + track_dirty_pages: bool, + shared: bool, + ) -> Result; + /// Creates a GuestMemoryMmap given a `file` containing the data /// and a `state` containing mapping information. fn from_state( @@ -95,6 +102,7 @@ where /// Dumps all contents of GuestMemoryMmap to a writer. fn dump(&self, writer: &mut T) -> Result<(), MemoryError>; + /// Dumps all pages of GuestMemoryMmap present in `dirty_bitmap` to a writer. fn dump_dirty( &self, @@ -130,30 +138,19 @@ impl GuestMemoryExtension for GuestMemoryMmap { fn with_file(file: &File, track_dirty_pages: bool) -> Result { let metadata = file.metadata().map_err(MemoryError::FileError)?; let mem_size = u64_to_usize(metadata.len()); - let regions = crate::arch::arch_memory_regions(mem_size); - - let prot = libc::PROT_READ | libc::PROT_WRITE; - let flags = libc::MAP_NORESERVE | libc::MAP_SHARED; let mut offset: u64 = 0; - let regions = regions + let regions = crate::arch::arch_memory_regions(mem_size) .iter() .map(|(guest_address, region_size)| { let file_clone = file.try_clone().map_err(MemoryError::FileError)?; let file_offset = FileOffset::new(file_clone, offset); offset += *region_size as u64; - let region = build_guarded_region( - Some(&file_offset), - *region_size, - prot, - flags, - track_dirty_pages, - )?; - GuestRegionMmap::new(region, *guest_address).map_err(MemoryError::VmMemoryError) + Ok((file_offset, *guest_address, *region_size)) }) .collect::, MemoryError>>()?; - GuestMemoryMmap::from_regions(regions).map_err(MemoryError::VmMemoryError) + Self::from_raw_regions_file(®ions, track_dirty_pages, true) } /// Creates a GuestMemoryMmap with `size` in MiB and guard pages backed by anonymous memory. @@ -195,17 +192,44 @@ impl GuestMemoryExtension for GuestMemoryMmap { let regions = regions .iter() .map(|(guest_address, region_size)| { - let region = MmapRegionBuilder::new_with_bitmap( + let bitmap = match track_dirty_pages { + true => Some(AtomicBitmap::with_len(*region_size)), + false => None, + }; + let region = MmapRegionBuilder::new_with_bitmap(*region_size, bitmap) + .with_mmap_prot(prot) + .with_mmap_flags(flags) + .build() + .map_err(MemoryError::MmapRegionError)?; + GuestRegionMmap::new(region, *guest_address).map_err(MemoryError::VmMemoryError) + }) + .collect::, MemoryError>>()?; + + GuestMemoryMmap::from_regions(regions).map_err(MemoryError::VmMemoryError) + } + + /// Creates a GuestMemoryMmap from raw regions with guard pages backed by file. + fn from_raw_regions_file( + regions: &[(FileOffset, GuestAddress, usize)], + track_dirty_pages: bool, + shared: bool, + ) -> Result { + let prot = libc::PROT_READ | libc::PROT_WRITE; + let flags = if shared { + libc::MAP_NORESERVE | libc::MAP_SHARED + } else { + libc::MAP_NORESERVE | libc::MAP_PRIVATE + }; + let regions = regions + .iter() + .map(|(file_offset, guest_address, region_size)| { + let region = build_guarded_region( + Some(file_offset), *region_size, - match track_dirty_pages { - true => Some(AtomicBitmap::with_len(*region_size)), - false => None, - }, - ) - .with_mmap_prot(prot) - .with_mmap_flags(flags) - .build() - .map_err(MemoryError::MmapRegionError)?; + prot, + flags, + track_dirty_pages, + )?; GuestRegionMmap::new(region, *guest_address).map_err(MemoryError::VmMemoryError) }) .collect::, MemoryError>>()?; @@ -234,24 +258,7 @@ impl GuestMemoryExtension for GuestMemoryMmap { .collect::, std::io::Error>>() .map_err(MemoryError::FileError)?; - let prot = libc::PROT_READ | libc::PROT_WRITE; - let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; - let regions = regions - .iter() - .map(|(file_offset, guest_address, region_size)| { - let region = build_guarded_region( - Some(file_offset), - *region_size, - prot, - flags, - track_dirty_pages, - )?; - GuestRegionMmap::new(region, *guest_address) - .map_err(MemoryError::VmMemoryError) - }) - .collect::, MemoryError>>()?; - - GuestMemoryMmap::from_regions(regions).map_err(MemoryError::VmMemoryError) + Self::from_raw_regions_file(®ions, track_dirty_pages, false) } None => { let regions = state From 6e850374e4cebda393b3702e2d9b18f1c0fffbf3 Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Wed, 27 Sep 2023 14:40:33 +0000 Subject: [PATCH 10/14] refactor(memory): `memory` module tests refactoring Added missing unit tests for memory creation methods. Split some unit tests into multiple for better readability. Signed-off-by: Egor Lazarchuk --- src/vmm/src/vstate/memory.rs | 453 ++++++++++++++++++++--------------- 1 file changed, 261 insertions(+), 192 deletions(-) diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index 8e59981cea8..28dad5cffb1 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -494,28 +494,6 @@ mod tests { use utils::tempfile::TempFile; use super::*; - use crate::vstate::memory::{Bytes, GuestAddress}; - - #[derive(Debug)] - enum AddrOp { - Read, - Write, - } - - impl AddrOp { - fn apply_on_addr(&self, addr: *mut u8) { - match self { - AddrOp::Read => { - // We have to do something perform a read_volatile, otherwise - // the Release version will optimize it out, making the test fail. - unsafe { std::ptr::read_volatile(addr) }; - } - AddrOp::Write => unsafe { - std::ptr::write(addr, 0xFF); - }, - } - } - } fn fork_and_run(function: &dyn Fn(), expect_sigsegv: bool) { let pid = unsafe { libc::fork() }; @@ -544,102 +522,76 @@ mod tests { } fn validate_guard_region(region: &GuestMmapRegion) { + let read_mem = |addr| unsafe { std::ptr::read_volatile::(addr) }; + let write_mem = |addr, val| unsafe { + std::ptr::write(addr, val); + }; + let page_size = get_page_size().unwrap(); // Check that the created range allows us to write inside it - let addr = region.as_ptr(); + let region_first_byte = region.as_ptr(); + let region_last_byte = unsafe { region_first_byte.add(region.size() - 1) }; - unsafe { - std::ptr::write(addr, 0xFF); - assert_eq!(std::ptr::read(addr), 0xFF); - } + // Write and read from the start of the region + write_mem(region_first_byte, 0x69); + assert_eq!(read_mem(region_first_byte), 0x69); + + // Write and read from the end of the region + write_mem(region_last_byte, 0x69); + assert_eq!(read_mem(region_last_byte), 0x69); // Try a read/write operation against the left guard border of the range - let left_border = (addr as usize - page_size) as *mut u8; - fork_and_run(&|| AddrOp::Read.apply_on_addr(left_border), true); - fork_and_run(&|| AddrOp::Write.apply_on_addr(left_border), true); + let left_border_first_byte = unsafe { region_first_byte.sub(page_size) }; + fork_and_run(&|| write_mem(left_border_first_byte, 0x69), true); + fork_and_run(&|| _ = read_mem(left_border_first_byte), true); // Try a read/write operation against the right guard border of the range - let right_border = (addr as usize + region.size()) as *mut u8; - fork_and_run(&|| AddrOp::Read.apply_on_addr(right_border), true); - fork_and_run(&|| AddrOp::Write.apply_on_addr(right_border), true); + let right_border_first_byte = unsafe { region_last_byte.add(1) }; + fork_and_run(&|| write_mem(right_border_first_byte, 0x69), true); + fork_and_run(&|| _ = read_mem(right_border_first_byte), true); } - fn loop_guard_region_to_sigsegv(region: &GuestMmapRegion) { + #[test] + fn test_build_guarded_region() { let page_size = get_page_size().unwrap(); - let right_page_guard = region.as_ptr() as usize + region.size(); - - fork_and_run( - &|| { - let mut addr = region.as_ptr() as usize; - loop { - if addr >= right_page_guard { - break; - } - AddrOp::Write.apply_on_addr(addr as *mut u8); + let region_size = page_size * 10; - addr += page_size; - } - }, - false, - ); - - fork_and_run( - &|| { - AddrOp::Write.apply_on_addr(right_page_guard as *mut u8); - }, - true, - ); - } + let prot = libc::PROT_READ | libc::PROT_WRITE; + let flags = libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE; - #[test] - fn test_build_guarded_region() { - // Create anonymous guarded region. - { - let page_size = get_page_size().unwrap(); - let size = page_size * 10; - let prot = libc::PROT_READ | libc::PROT_WRITE; - let flags = libc::MAP_ANONYMOUS | libc::MAP_NORESERVE | libc::MAP_PRIVATE; + let region = build_guarded_region(None, region_size, prot, flags, false).unwrap(); - let region = build_guarded_region(None, size, prot, flags, false).unwrap(); + // Verify that the region was built correctly + assert_eq!(region.size(), region_size); + assert!(region.file_offset().is_none()); + assert_eq!(region.prot(), prot); + assert_eq!(region.flags(), flags); - // Verify that the region was built correctly - assert_eq!(region.size(), size); - assert!(region.file_offset().is_none()); - assert_eq!(region.prot(), prot); - assert_eq!(region.flags(), flags); + validate_guard_region(®ion); + } + #[test] + fn test_build_guarded_region_file() { + let page_size = get_page_size().unwrap(); + let region_size = page_size * 10; - validate_guard_region(®ion); - } + let prot = libc::PROT_READ | libc::PROT_WRITE; + let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; - // Create guarded region from file. - { - let file = TempFile::new().unwrap().into_file(); - let page_size = get_page_size().unwrap(); - - let prot = libc::PROT_READ | libc::PROT_WRITE; - let flags = libc::MAP_NORESERVE | libc::MAP_PRIVATE; - let offset = 0; - let size = 10 * page_size; - assert_eq!(unsafe { libc::ftruncate(file.as_raw_fd(), 4096 * 10) }, 0); - - let region = build_guarded_region( - Some(&FileOffset::new(file, offset)), - size, - prot, - flags, - false, - ) - .unwrap(); + let file = TempFile::new().unwrap().into_file(); + file.set_len(region_size as u64).unwrap(); + let file_offset = FileOffset::new(file, 0); - // Verify that the region was built correctly - assert_eq!(region.size(), size); - // assert_eq!(region.file_offset().unwrap().start(), offset as u64); - assert_eq!(region.prot(), prot); - assert_eq!(region.flags(), flags); + let region = + build_guarded_region(Some(&file_offset), region_size, prot, flags, false).unwrap(); - validate_guard_region(®ion); - } + // Verify that the region was built correctly + assert_eq!(region.size(), region_size); + assert!(region.file_offset().is_none()); + assert_eq!(region.prot(), prot); + assert_eq!(region.flags(), flags); + + validate_guard_region(®ion); } #[test] @@ -657,7 +609,6 @@ mod tests { let guest_memory = GuestMemoryMmap::from_raw_regions(®ions, false).unwrap(); guest_memory.iter().for_each(|region| { validate_guard_region(region); - loop_guard_region_to_sigsegv(region); }); } @@ -695,7 +646,105 @@ mod tests { } #[test] - fn test_mark_dirty_mem() { + fn test_from_raw_regions_unguarded() { + // Check dirty page tracking is off. + { + let region_size = 0x10000; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x10000), region_size), + (GuestAddress(0x20000), region_size), + (GuestAddress(0x30000), region_size), + ]; + + let guest_memory = + GuestMemoryMmap::from_raw_regions_unguarded(®ions, false).unwrap(); + guest_memory.iter().for_each(|region| { + assert!(region.bitmap().is_none()); + }); + } + + // Check dirty page tracking is on. + { + let region_size = 0x10000; + let regions = vec![ + (GuestAddress(0x0), region_size), + (GuestAddress(0x10000), region_size), + (GuestAddress(0x20000), region_size), + (GuestAddress(0x30000), region_size), + ]; + + let guest_memory = GuestMemoryMmap::from_raw_regions_unguarded(®ions, true).unwrap(); + guest_memory.iter().for_each(|region| { + assert!(region.bitmap().is_some()); + }); + } + } + + #[test] + fn test_from_raw_regions_file() { + let region_size = 0x10000; + + let file = TempFile::new().unwrap().into_file(); + let file_size = 4 * region_size; + file.set_len(file_size as u64).unwrap(); + + let regions = vec![ + ( + FileOffset::new(file.try_clone().unwrap(), 0x0), + GuestAddress(0x0), + region_size, + ), + ( + FileOffset::new(file.try_clone().unwrap(), 0x10000), + GuestAddress(0x10000), + region_size, + ), + ( + FileOffset::new(file.try_clone().unwrap(), 0x20000), + GuestAddress(0x20000), + region_size, + ), + ( + FileOffset::new(file.try_clone().unwrap(), 0x30000), + GuestAddress(0x30000), + region_size, + ), + ]; + + // Test that all regions are guarded. + { + let guest_memory = + GuestMemoryMmap::from_raw_regions_file(®ions, false, false).unwrap(); + guest_memory.iter().for_each(|region| { + assert_eq!(region.size(), region_size); + assert!(region.file_offset().is_none()); + assert!(region.bitmap().is_none()); + validate_guard_region(region); + }); + } + + // Check dirty page tracking is off. + { + let guest_memory = + GuestMemoryMmap::from_raw_regions_file(®ions, false, false).unwrap(); + guest_memory.iter().for_each(|region| { + assert!(region.bitmap().is_none()); + }); + } + + // Check dirty page tracking is on. + { + let guest_memory = + GuestMemoryMmap::from_raw_regions_file(®ions, true, false).unwrap(); + guest_memory.iter().for_each(|region| { + assert!(region.bitmap().is_some()); + }); + } + } + + #[test] + fn test_mark_dirty() { let page_size = get_page_size().unwrap(); let region_size = page_size * 3; @@ -746,7 +795,7 @@ mod tests { } #[test] - fn test_describe_state() { + fn test_describe() { let page_size: usize = get_page_size().unwrap(); // Two regions of one page each, with a one page gap between them. @@ -801,119 +850,139 @@ mod tests { } #[test] - fn test_restore_memory() { - let page_size: usize = get_page_size().unwrap(); + fn test_dump() { + let page_size = get_page_size().unwrap(); // Two regions of two pages each, with a one page gap between them. + let region_1_address = GuestAddress(0); + let region_2_address = GuestAddress(page_size as u64 * 3); + let region_size = page_size * 2; let mem_regions = [ - (GuestAddress(0), page_size * 2), - (GuestAddress(page_size as u64 * 3), page_size * 2), + (region_1_address, region_size), + (region_2_address, region_size), ]; - let guest_memory = GuestMemoryMmap::from_raw_regions(&mem_regions[..], true).unwrap(); + let guest_memory = GuestMemoryMmap::from_raw_regions(&mem_regions, true).unwrap(); // Check that Firecracker bitmap is clean. - let _res: Result<(), MemoryError> = guest_memory.iter().try_for_each(|r| { + guest_memory.iter().for_each(|r| { assert!(!r.bitmap().dirty_at(0)); assert!(!r.bitmap().dirty_at(1)); - Ok(()) }); // Fill the first region with 1s and the second with 2s. - let first_region = vec![1u8; page_size * 2]; + let first_region = vec![1u8; region_size]; + guest_memory.write(&first_region, region_1_address).unwrap(); + + let second_region = vec![2u8; region_size]; guest_memory - .write(&first_region[..], GuestAddress(0)) + .write(&second_region, region_2_address) + .unwrap(); + + let memory_state = guest_memory.describe(); + + // dump the full memory. + let mut memory_file = TempFile::new().unwrap().into_file(); + guest_memory.dump(&mut memory_file).unwrap(); + + let restored_guest_memory = + GuestMemoryMmap::from_state(Some(&memory_file), &memory_state, false).unwrap(); + + // Check that the region contents are the same. + let mut restored_region = vec![0u8; page_size * 2]; + restored_guest_memory + .read(restored_region.as_mut_slice(), region_1_address) .unwrap(); + assert_eq!(first_region, restored_region); - let second_region = vec![2u8; page_size * 2]; + restored_guest_memory + .read(restored_region.as_mut_slice(), region_2_address) + .unwrap(); + assert_eq!(second_region, restored_region); + } + + #[test] + fn test_dump_dirty() { + let page_size = get_page_size().unwrap(); + + // Two regions of two pages each, with a one page gap between them. + let region_1_address = GuestAddress(0); + let region_2_address = GuestAddress(page_size as u64 * 3); + let region_size = page_size * 2; + let mem_regions = [ + (region_1_address, region_size), + (region_2_address, region_size), + ]; + let guest_memory = GuestMemoryMmap::from_raw_regions(&mem_regions, true).unwrap(); + // Check that Firecracker bitmap is clean. + guest_memory.iter().for_each(|r| { + assert!(!r.bitmap().dirty_at(0)); + assert!(!r.bitmap().dirty_at(1)); + }); + + // Fill the first region with 1s and the second with 2s. + let first_region = vec![1u8; region_size]; + guest_memory.write(&first_region, region_1_address).unwrap(); + + let second_region = vec![2u8; region_size]; guest_memory - .write(&second_region[..], GuestAddress(page_size as u64 * 3)) + .write(&second_region, region_2_address) .unwrap(); let memory_state = guest_memory.describe(); - // Case 1: dump the full memory. - { - let mut memory_file = TempFile::new().unwrap().into_file(); - guest_memory.dump(&mut memory_file).unwrap(); + // Dump only the dirty pages. + // First region pages: [dirty, clean] + // Second region pages: [clean, dirty] + let mut dirty_bitmap: DirtyBitmap = HashMap::new(); + dirty_bitmap.insert(0, vec![0b01]); + dirty_bitmap.insert(1, vec![0b10]); - let restored_guest_memory = - GuestMemoryMmap::from_state(Some(&memory_file), &memory_state, false).unwrap(); + let mut file = TempFile::new().unwrap().into_file(); + guest_memory.dump_dirty(&mut file, &dirty_bitmap).unwrap(); - // Check that the region contents are the same. - let mut actual_region = vec![0u8; page_size * 2]; - restored_guest_memory - .read(actual_region.as_mut_slice(), GuestAddress(0)) - .unwrap(); - assert_eq!(first_region, actual_region); - - restored_guest_memory - .read( - actual_region.as_mut_slice(), - GuestAddress(page_size as u64 * 3), - ) - .unwrap(); - assert_eq!(second_region, actual_region); - } + // We can restore from this because this is the first dirty dump. + let restored_guest_memory = + GuestMemoryMmap::from_state(Some(&file), &memory_state, false).unwrap(); - // Case 2: dump only the dirty pages. - { - // KVM Bitmap - // First region pages: [dirty, clean] - // Second region pages: [clean, dirty] - let mut dirty_bitmap: DirtyBitmap = HashMap::new(); - dirty_bitmap.insert(0, vec![0b01; 1]); - dirty_bitmap.insert(1, vec![0b10; 1]); - - let mut file = TempFile::new().unwrap().into_file(); - guest_memory.dump_dirty(&mut file, &dirty_bitmap).unwrap(); - - // We can restore from this because this is the first dirty dump. - let restored_guest_memory = - GuestMemoryMmap::from_state(Some(&file), &memory_state, false).unwrap(); - - // Check that the region contents are the same. - let mut actual_region = vec![0u8; page_size * 2]; - restored_guest_memory - .read(actual_region.as_mut_slice(), GuestAddress(0)) - .unwrap(); - assert_eq!(first_region, actual_region); + // Check that the region contents are the same. + let mut restored_region = vec![0u8; region_size]; + restored_guest_memory + .read(restored_region.as_mut_slice(), region_1_address) + .unwrap(); + assert_eq!(first_region, restored_region); - restored_guest_memory - .read( - actual_region.as_mut_slice(), - GuestAddress(page_size as u64 * 3), - ) - .unwrap(); - assert_eq!(second_region, actual_region); - - // Dirty the memory and dump again - let file = TempFile::new().unwrap(); - let mut reader = file.into_file(); - let zeros = vec![0u8; page_size]; - let ones = vec![1u8; page_size]; - let twos = vec![2u8; page_size]; - - // Firecracker Bitmap - // First region pages: [dirty, clean] - // Second region pages: [clean, clean] - guest_memory - .write(&twos[..], GuestAddress(page_size as u64)) - .unwrap(); + restored_guest_memory + .read(restored_region.as_mut_slice(), region_2_address) + .unwrap(); + assert_eq!(second_region, restored_region); + + // Dirty the memory and dump again + let file = TempFile::new().unwrap(); + let mut reader = file.into_file(); + let zeros = vec![0u8; page_size]; + let ones = vec![1u8; page_size]; + let twos = vec![2u8; page_size]; + + // Firecracker Bitmap + // First region pages: [dirty, clean] + // Second region pages: [clean, clean] + guest_memory + .write(&twos, GuestAddress(page_size as u64)) + .unwrap(); - guest_memory.dump_dirty(&mut reader, &dirty_bitmap).unwrap(); - - // Check that only the dirty regions are dumped. - let mut diff_file_content = Vec::new(); - let expected_first_region = [ - ones.as_slice(), - twos.as_slice(), - zeros.as_slice(), - twos.as_slice(), - ] - .concat(); - reader.seek(SeekFrom::Start(0)).unwrap(); - reader.read_to_end(&mut diff_file_content).unwrap(); - assert_eq!(expected_first_region, diff_file_content); - } + guest_memory.dump_dirty(&mut reader, &dirty_bitmap).unwrap(); + + // Check that only the dirty regions are dumped. + let mut diff_file_content = Vec::new(); + let expected_first_region = [ + ones.as_slice(), + twos.as_slice(), + zeros.as_slice(), + twos.as_slice(), + ] + .concat(); + reader.seek(SeekFrom::Start(0)).unwrap(); + reader.read_to_end(&mut diff_file_content).unwrap(); + assert_eq!(expected_first_region, diff_file_content); } #[test] From d15cfe76b0aed55c1518d4a3f24bc2196acf20ae Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Thu, 28 Sep 2023 11:30:28 +0000 Subject: [PATCH 11/14] test(jailer): updated jailer file size limit tests Updated `test_negative_file_size_limit` to check file size limit by simply starting a uVM, because now FC creates a memfd file for guest memory. Added positive test to verify that vm starts if limit is not hit. Signed-off-by: Egor Lazarchuk --- tests/integration_tests/security/test_jail.py | 37 +++++++++++++------ 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/tests/integration_tests/security/test_jail.py b/tests/integration_tests/security/test_jail.py index 7eb5dc319cf..57bb5538220 100644 --- a/tests/integration_tests/security/test_jail.py +++ b/tests/integration_tests/security/test_jail.py @@ -514,25 +514,40 @@ def test_args_resource_limits(test_microvm_with_api): check_limits(pid, NOFILE, FSIZE) -def test_negative_file_size_limit(uvm_plain): +def test_positive_file_size_limit(uvm_plain): """ - Test creating snapshot file fails when size exceeds `fsize` limit. + Test creating vm succeeds when memory size is under `fsize` limit. """ + + vm_mem_size = 128 + jail_limit = (vm_mem_size + 1) << 20 + test_microvm = uvm_plain - # limit to 1MB, to account for logs and metrics - test_microvm.jailer.resource_limits = [f"fsize={2**20}"] + test_microvm.jailer.resource_limits = [f"fsize={jail_limit}"] test_microvm.spawn() - test_microvm.basic_config() + test_microvm.basic_config(mem_size_mib=vm_mem_size) + + # Attempt to start a vm. test_microvm.start() - test_microvm.pause() - # Attempt to create a snapshot. +def test_negative_file_size_limit(uvm_plain): + """ + Test creating vm fails when memory size exceeds `fsize` limit. + This is caused by the fact that we back guest memory by memfd. + """ + + vm_mem_size = 128 + jail_limit = (vm_mem_size - 1) << 20 + + test_microvm = uvm_plain + test_microvm.jailer.resource_limits = [f"fsize={jail_limit}"] + test_microvm.spawn() + test_microvm.basic_config(mem_size_mib=vm_mem_size) + + # Attempt to start a vm. try: - test_microvm.api.snapshot_create.put( - mem_file_path="/vm.mem", - snapshot_path="/vm.vmstate", - ) + test_microvm.start() except ( http_client.RemoteDisconnected, urllib3.exceptions.ProtocolError, From 6121b6c3b8f0665670bf0a5c3d59fbdb03455556 Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Fri, 29 Sep 2023 01:30:05 +0100 Subject: [PATCH 12/14] test(memory): updated guard pages tests Updated testing for read/write of guard pages. This testing is only possible with gnu target, so all tests that verify guard pages are marked to be compiled only for gnu target. Signed-off-by: Egor Lazarchuk --- src/vmm/src/vstate/memory.rs | 87 +++++++++++++++++++++++------------- 1 file changed, 57 insertions(+), 30 deletions(-) diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index 28dad5cffb1..094e0a48bcf 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -495,38 +495,51 @@ mod tests { use super::*; - fn fork_and_run(function: &dyn Fn(), expect_sigsegv: bool) { - let pid = unsafe { libc::fork() }; - match pid { - 0 => { - function(); - } - child_pid => { - let mut child_status: i32 = -1; - let pid_done = unsafe { libc::waitpid(child_pid, &mut child_status, 0) }; - assert_eq!(pid_done, child_pid); - - if expect_sigsegv { - // Asserts that the child process terminated because - // it received a signal that was not handled. - assert!(libc::WIFSIGNALED(child_status)); - // Signal code should be a SIGSEGV - assert_eq!(libc::WTERMSIG(child_status), libc::SIGSEGV); - } else { - assert!(libc::WIFEXITED(child_status)); - // Signal code should be a SIGSEGV - assert_eq!(libc::WEXITSTATUS(child_status), 0); - } - } - }; - } - + // This method only works on gnu targets + #[cfg(target_env = "gnu")] fn validate_guard_region(region: &GuestMmapRegion) { let read_mem = |addr| unsafe { std::ptr::read_volatile::(addr) }; let write_mem = |addr, val| unsafe { std::ptr::write(addr, val); }; + // We utilize ability to catch panic from threads + // to verify the caught signal. + unsafe extern "C" fn handler(signum: libc::c_int) { + panic!("{}", signum == libc::SIGSEGV); + } + + let read_threaded = |addr: *mut u8| { + let addr_usize = addr as usize; + std::thread::spawn(move || unsafe { std::ptr::read::(addr_usize as *mut u8) }) + .join() + .err() + .unwrap() + .downcast::() + .unwrap() + }; + + let write_threaded = |addr: *mut u8, val: u8| { + let addr_usize = addr as usize; + std::thread::spawn(move || unsafe { + std::ptr::write(addr_usize as *mut u8, val); + }) + .join() + .err() + .unwrap() + .downcast::() + .unwrap() + }; + + // Setting a signal handler for threads to panic + // with specific messages. + let previous_signal_handler = unsafe { + libc::signal( + libc::SIGSEGV, + handler as *const fn(libc::c_int) as libc::size_t, + ) + }; + let page_size = get_page_size().unwrap(); // Check that the created range allows us to write inside it @@ -543,16 +556,26 @@ mod tests { // Try a read/write operation against the left guard border of the range let left_border_first_byte = unsafe { region_first_byte.sub(page_size) }; - fork_and_run(&|| write_mem(left_border_first_byte, 0x69), true); - fork_and_run(&|| _ = read_mem(left_border_first_byte), true); + assert_eq!(read_threaded(left_border_first_byte).as_str(), "true"); + assert_eq!( + write_threaded(left_border_first_byte, 0x69).as_str(), + "true" + ); // Try a read/write operation against the right guard border of the range let right_border_first_byte = unsafe { region_last_byte.add(1) }; - fork_and_run(&|| write_mem(right_border_first_byte, 0x69), true); - fork_and_run(&|| _ = read_mem(right_border_first_byte), true); + assert_eq!(read_threaded(right_border_first_byte).as_str(), "true"); + assert_eq!( + write_threaded(right_border_first_byte, 0x69).as_str(), + "true" + ); + + // Restoring previous signal handler. + unsafe { libc::signal(libc::SIGSEGV, previous_signal_handler) }; } #[test] + #[cfg(target_env = "gnu")] fn test_build_guarded_region() { let page_size = get_page_size().unwrap(); let region_size = page_size * 10; @@ -570,7 +593,9 @@ mod tests { validate_guard_region(®ion); } + #[test] + #[cfg(target_env = "gnu")] fn test_build_guarded_region_file() { let page_size = get_page_size().unwrap(); let region_size = page_size * 10; @@ -595,6 +620,7 @@ mod tests { } #[test] + #[cfg(target_env = "gnu")] fn test_from_raw_regions() { // Test that all regions are guarded. { @@ -682,6 +708,7 @@ mod tests { } #[test] + #[cfg(target_env = "gnu")] fn test_from_raw_regions_file() { let region_size = 0x10000; From c01fb7b186abee0c227593f1e8d87905f62e296f Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Tue, 10 Oct 2023 22:55:01 +0100 Subject: [PATCH 13/14] feat(memory): added `FileOffset` to the `MmapRegion`s Now when memory is backed by the file we store `FileOffset` inside each region. This information will be used to set up vhost-user shared memory. Signed-off-by: Egor Lazarchuk --- src/vmm/src/vstate/memory.rs | 41 ++++++++++++++++++++---------------- 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/src/vmm/src/vstate/memory.rs b/src/vmm/src/vstate/memory.rs index 094e0a48bcf..ffc9a767513 100644 --- a/src/vmm/src/vstate/memory.rs +++ b/src/vmm/src/vstate/memory.rs @@ -81,7 +81,7 @@ where /// Creates a GuestMemoryMmap from raw regions with guard pages. fn from_raw_regions_file( - regions: &[(FileOffset, GuestAddress, usize)], + regions: Vec<(FileOffset, GuestAddress, usize)>, track_dirty_pages: bool, shared: bool, ) -> Result; @@ -150,7 +150,7 @@ impl GuestMemoryExtension for GuestMemoryMmap { }) .collect::, MemoryError>>()?; - Self::from_raw_regions_file(®ions, track_dirty_pages, true) + Self::from_raw_regions_file(regions, track_dirty_pages, true) } /// Creates a GuestMemoryMmap with `size` in MiB and guard pages backed by anonymous memory. @@ -210,7 +210,7 @@ impl GuestMemoryExtension for GuestMemoryMmap { /// Creates a GuestMemoryMmap from raw regions with guard pages backed by file. fn from_raw_regions_file( - regions: &[(FileOffset, GuestAddress, usize)], + regions: Vec<(FileOffset, GuestAddress, usize)>, track_dirty_pages: bool, shared: bool, ) -> Result { @@ -221,16 +221,16 @@ impl GuestMemoryExtension for GuestMemoryMmap { libc::MAP_NORESERVE | libc::MAP_PRIVATE }; let regions = regions - .iter() + .into_iter() .map(|(file_offset, guest_address, region_size)| { let region = build_guarded_region( Some(file_offset), - *region_size, + region_size, prot, flags, track_dirty_pages, )?; - GuestRegionMmap::new(region, *guest_address).map_err(MemoryError::VmMemoryError) + GuestRegionMmap::new(region, guest_address).map_err(MemoryError::VmMemoryError) }) .collect::, MemoryError>>()?; @@ -258,7 +258,7 @@ impl GuestMemoryExtension for GuestMemoryMmap { .collect::, std::io::Error>>() .map_err(MemoryError::FileError)?; - Self::from_raw_regions_file(®ions, track_dirty_pages, false) + Self::from_raw_regions_file(regions, track_dirty_pages, false) } None => { let regions = state @@ -407,7 +407,7 @@ pub fn create_memfd(size: usize) -> Result { /// acts as a safety net for accessing out-of-bounds addresses that are not allocated for the /// guest's memory. fn build_guarded_region( - file_offset: Option<&FileOffset>, + file_offset: Option, size: usize, prot: i32, flags: i32, @@ -438,7 +438,7 @@ fn build_guarded_region( } let (fd, offset) = match file_offset { - Some(file_offset) => { + Some(ref file_offset) => { check_file_offset(file_offset, size).map_err(MemoryError::MmapRegionError)?; (file_offset.file().as_raw_fd(), file_offset.start()) } @@ -473,14 +473,19 @@ fn build_guarded_region( }; // SAFETY: Safe because the parameters are valid. - unsafe { + let builder = unsafe { MmapRegionBuilder::new_with_bitmap(size, bitmap) .with_raw_mmap_pointer(region_addr.cast::()) .with_mmap_prot(prot) .with_mmap_flags(flags) - .build() - .map_err(MemoryError::MmapRegionError) + }; + + match file_offset { + Some(offset) => builder.with_file_offset(offset), + None => builder, } + .build() + .map_err(MemoryError::MmapRegionError) } #[cfg(test)] @@ -608,11 +613,11 @@ mod tests { let file_offset = FileOffset::new(file, 0); let region = - build_guarded_region(Some(&file_offset), region_size, prot, flags, false).unwrap(); + build_guarded_region(Some(file_offset), region_size, prot, flags, false).unwrap(); // Verify that the region was built correctly assert_eq!(region.size(), region_size); - assert!(region.file_offset().is_none()); + assert!(region.file_offset().is_some()); assert_eq!(region.prot(), prot); assert_eq!(region.flags(), flags); @@ -742,10 +747,10 @@ mod tests { // Test that all regions are guarded. { let guest_memory = - GuestMemoryMmap::from_raw_regions_file(®ions, false, false).unwrap(); + GuestMemoryMmap::from_raw_regions_file(regions.clone(), false, false).unwrap(); guest_memory.iter().for_each(|region| { assert_eq!(region.size(), region_size); - assert!(region.file_offset().is_none()); + assert!(region.file_offset().is_some()); assert!(region.bitmap().is_none()); validate_guard_region(region); }); @@ -754,7 +759,7 @@ mod tests { // Check dirty page tracking is off. { let guest_memory = - GuestMemoryMmap::from_raw_regions_file(®ions, false, false).unwrap(); + GuestMemoryMmap::from_raw_regions_file(regions.clone(), false, false).unwrap(); guest_memory.iter().for_each(|region| { assert!(region.bitmap().is_none()); }); @@ -763,7 +768,7 @@ mod tests { // Check dirty page tracking is on. { let guest_memory = - GuestMemoryMmap::from_raw_regions_file(®ions, true, false).unwrap(); + GuestMemoryMmap::from_raw_regions_file(regions, true, false).unwrap(); guest_memory.iter().for_each(|region| { assert!(region.bitmap().is_some()); }); From ef94eaf7dbeb7dcee6d31ff6d25a651dcfa4c0b0 Mon Sep 17 00:00:00 2001 From: Egor Lazarchuk Date: Mon, 16 Oct 2023 18:45:14 +0100 Subject: [PATCH 14/14] feat: added GNU for unit tests targets Now unit tests are run for musl and gnu targets. This is done, because we have some tests that only work with gnu. Signed-off-by: Egor Lazarchuk --- tests/integration_tests/build/test_unittests.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/tests/integration_tests/build/test_unittests.py b/tests/integration_tests/build/test_unittests.py index 8bcb80cb502..9fa2ef04b66 100644 --- a/tests/integration_tests/build/test_unittests.py +++ b/tests/integration_tests/build/test_unittests.py @@ -12,7 +12,10 @@ # Currently profiling with `aarch64-unknown-linux-musl` is unsupported (see # https://github.com/rust-lang/rustup/issues/3095#issuecomment-1280705619) therefore we profile and # run coverage with the `gnu` toolchains and run unit tests with the `musl` toolchains. -TARGET = "{}-unknown-linux-musl".format(MACHINE) +TARGETS = [ + "{}-unknown-linux-musl".format(MACHINE), + "{}-unknown-linux-gnu".format(MACHINE), +] @pytest.mark.timeout(600) @@ -20,11 +23,13 @@ def test_unittests(test_fc_session_root_path): """ Run unit and doc tests for all supported targets. """ - extra_args = "--target {} ".format(TARGET) - host.cargo_test(test_fc_session_root_path, extra_args=extra_args) + for target in TARGETS: + extra_args = "--target {} ".format(target) + host.cargo_test(test_fc_session_root_path, extra_args=extra_args) def test_benchmarks_compile(): """Checks that all benchmarks compile""" - host.cargo("bench", f"--all --no-run --target {TARGET}") + + host.cargo("bench", f"--all --no-run --target {TARGETS[0]}")