diff --git a/src/backend/libc/mm/syscalls.rs b/src/backend/libc/mm/syscalls.rs index 4b23a58b7..ce5edb173 100644 --- a/src/backend/libc/mm/syscalls.rs +++ b/src/backend/libc/mm/syscalls.rs @@ -2,6 +2,8 @@ #[cfg(not(target_os = "redox"))] use super::types::Advice; +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +use super::types::MlockallFlags; #[cfg(any(target_os = "emscripten", target_os = "linux"))] use super::types::MremapFlags; use super::types::{MapFlags, MprotectFlags, MsyncFlags, ProtFlags}; @@ -220,3 +222,22 @@ pub(crate) unsafe fn userfaultfd(flags: UserfaultfdFlags) -> io::Result } ret_owned_fd(userfaultfd(bitflags_bits!(flags))) } + +/// Locks all pages mapped into the address space of the calling process. +/// +/// This includes the pages of the code, data and stack segment, as well as shared libraries, +/// user space kernel data, shared memory, and memory-mapped files. All mapped pages are +/// guaranteed to be resident in RAM when the call returns successfully; +/// the pages are guaranteed to stay in RAM until later unlocked. +#[inline] +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +pub(crate) fn mlockall(flags: MlockAllFlags) -> io::Result<()> { + unsafe { ret(c::mlockall(bitflags_bits!(flags))) } +} + +/// Unlocks all pages mapped into the address space of the calling process. +#[inline] +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +pub(crate) fn munlockall() -> io::Result<()> { + unsafe { ret(c::munlockall()) } +} diff --git a/src/backend/libc/mm/types.rs b/src/backend/libc/mm/types.rs index f0b4ad593..7ec328ca0 100644 --- a/src/backend/libc/mm/types.rs +++ b/src/backend/libc/mm/types.rs @@ -442,3 +442,28 @@ bitflags! { const _ = !0; } } + +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +bitflags! { + /// `MCL_*` flags for use with [`mlockall`]. + /// + /// [`mlockall`]: crate::mm::mlockall + #[repr(transparent)] + #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] + pub struct MlockAllFlags: u32 { + // libc doesn't define `MCL_ONFAULT` yet. + // const ONFAULT = libc::MCL_ONFAULT; + /// Lock all pages which will become mapped into the address + /// space of the process in the future. These could be, for + /// instance, new pages required by a growing heap and stack + /// as well as new memory-mapped files or shared memory + /// regions. + const FUTURE = bitcast!(libc::MCL_FUTURE); + /// Lock all pages which are currently mapped into the address + /// space of the process. + const CURRENT = bitcast!(libc::MCL_CURRENT); + + /// + const _ = !0; + } +} diff --git a/src/backend/linux_raw/conv.rs b/src/backend/linux_raw/conv.rs index f915db140..dcd586e4c 100644 --- a/src/backend/linux_raw/conv.rs +++ b/src/backend/linux_raw/conv.rs @@ -621,6 +621,14 @@ impl<'a, Num: ArgNumber> From for ArgReg< } } +#[cfg(feature = "mm")] +impl<'a, Num: ArgNumber> From for ArgReg<'a, Num> { + #[inline] + fn from(flags: crate::backend::mm::types::MlockAllFlags) -> Self { + c_uint(flags.bits()) + } +} + #[cfg(feature = "mm")] impl<'a, Num: ArgNumber> From for ArgReg<'a, Num> { #[inline] diff --git a/src/backend/linux_raw/mm/syscalls.rs b/src/backend/linux_raw/mm/syscalls.rs index b51f826a9..a7069dd11 100644 --- a/src/backend/linux_raw/mm/syscalls.rs +++ b/src/backend/linux_raw/mm/syscalls.rs @@ -6,6 +6,8 @@ #![allow(unsafe_code)] #![allow(clippy::undocumented_unsafe_blocks)] +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +use super::types::MlockAllFlags; use super::types::{ Advice, MapFlags, MlockFlags, MprotectFlags, MremapFlags, MsyncFlags, ProtFlags, UserfaultfdFlags, @@ -210,3 +212,29 @@ pub(crate) unsafe fn munlock(addr: *mut c::c_void, length: usize) -> io::Result< pub(crate) unsafe fn userfaultfd(flags: UserfaultfdFlags) -> io::Result { ret_owned_fd(syscall_readonly!(__NR_userfaultfd, flags)) } + +/// Locks all pages mapped into the address space of the calling process. +/// +/// This includes the pages of the code, data and stack segment, as well as shared libraries, +/// user space kernel data, shared memory, and memory-mapped files. All mapped pages are +/// guaranteed to be resident in RAM when the call returns successfully; +/// the pages are guaranteed to stay in RAM until later unlocked. +#[inline] +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +pub(crate) fn mlockall(flags: MlockAllFlags) -> io::Result<()> { + // When `mlockall` is used with `MCL_ONFAULT | MCL_FUTURE`, the ordering + // of `mlockall` with respect to arbitrary loads may be significant, + // because if a load happens and evokes a fault before the `mlockall`, + // the memory doesn't get locked, but if the load and therefore + // the fault happens after, then the memory does get locked. + // So to be conservative in this regard, we use `syscall` instead + // of `syscall_readonly` + unsafe { ret(syscall!(__NR_mlockall, flags)) } +} + +/// Unlocks all pages mapped into the address space of the calling process. +#[inline] +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +pub(crate) fn munlockall() -> io::Result<()> { + unsafe { ret(syscall_readonly!(__NR_munlockall)) } +} diff --git a/src/backend/linux_raw/mm/types.rs b/src/backend/linux_raw/mm/types.rs index 0dfb41050..7bd4afbde 100644 --- a/src/backend/linux_raw/mm/types.rs +++ b/src/backend/linux_raw/mm/types.rs @@ -262,3 +262,37 @@ bitflags! { const _ = !0; } } + +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +bitflags! { + /// `MCL_*` flags for use with [`mlockall`]. + /// + /// [`mlockall`]: crate::mm::mlockall + #[repr(transparent)] + #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug)] + pub struct MlockAllFlags: u32 { + /// Used together with `MCL_CURRENT`, `MCL_FUTURE`, or both. Mark + /// all current (with `MCL_CURRENT`) or future (with `MCL_FUTURE`) + /// mappings to lock pages when they are faulted in. When + /// used with `MCL_CURRENT`, all present pages are locked, but + /// `mlockall()` will not fault in non-present pages. When used + /// with `MCL_FUTURE`, all future mappings will be marked to + /// lock pages when they are faulted in, but they will not be + /// populated by the lock when the mapping is created. + /// `MCL_ONFAULT` must be used with either `MCL_CURRENT` or + /// `MCL_FUTURE` or both. + const ONFAULT = linux_raw_sys::general::MCL_ONFAULT; + /// Lock all pages which will become mapped into the address + /// space of the process in the future. These could be, for + /// instance, new pages required by a growing heap and stack + /// as well as new memory-mapped files or shared memory + /// regions. + const FUTURE = linux_raw_sys::general::MCL_FUTURE; + /// Lock all pages which are currently mapped into the address + /// space of the process. + const CURRENT = linux_raw_sys::general::MCL_CURRENT; + + /// + const _ = !0; + } +} diff --git a/src/mm/mmap.rs b/src/mm/mmap.rs index f68a02b72..1c3f8cc5b 100644 --- a/src/mm/mmap.rs +++ b/src/mm/mmap.rs @@ -10,6 +10,8 @@ use crate::{backend, io}; use backend::fd::AsFd; use core::ffi::c_void; +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +pub use backend::mm::types::MlockAllFlags; #[cfg(linux_kernel)] pub use backend::mm::types::MlockFlags; #[cfg(any(target_os = "emscripten", target_os = "linux"))] @@ -340,3 +342,60 @@ pub unsafe fn mlock_with(ptr: *mut c_void, len: usize, flags: MlockFlags) -> io: pub unsafe fn munlock(ptr: *mut c_void, len: usize) -> io::Result<()> { backend::mm::syscalls::munlock(ptr, len) } + +/// Locks all pages mapped into the address space of the calling process. +/// +/// This includes the pages of the code, data and stack segment, as well as shared libraries, +/// user space kernel data, shared memory, and memory-mapped files. All mapped pages are +/// guaranteed to be resident in RAM when the call returns successfully; +/// the pages are guaranteed to stay in RAM until later unlocked. +/// +/// # References +/// - [POSIX] +/// - [Linux] +/// - [FreeBSD] +/// - [NetBSD] +/// - [OpenBSD] +/// - [DragonFly BSD] +/// - [illumos] +/// - [glibc] +/// +/// [POSIX]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/mlockall.html +/// [Linux]: https://man7.org/linux/man-pages/man2/mlockall.2.html +/// [FreeBSD]: https://man.freebsd.org/cgi/man.cgi?query=mlockall&sektion=2 +/// [NetBSD]: https://man.netbsd.org/mlockall.2 +/// [OpenBSD]: https://man.openbsd.org/mlockall.2 +/// [DragonFly BSD]: https://man.dragonflybsd.org/?command=mlockall§ion=2 +/// [illumos]: https://illumos.org/man/3C/mlockall +/// [glibc]: https://www.gnu.org/software/libc/manual/html_node/Page-Lock-Functions.html#index-mlockall +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +#[inline] +pub fn mlockall(flags: MlockallFlags) -> io::Result<()> { + backend::mm::syscalls::mlockall(flags) +} + +/// Unlocks all pages mapped into the address space of the calling process. +/// +/// # References +/// - [POSIX] +/// - [Linux] +/// - [FreeBSD] +/// - [NetBSD] +/// - [OpenBSD] +/// - [DragonFly BSD] +/// - [illumos] +/// - [glibc] +/// +/// [POSIX]: https://pubs.opengroup.org/onlinepubs/9699919799/functions/munlockall.html +/// [Linux]: https://man7.org/linux/man-pages/man2/munlockall.2.html +/// [FreeBSD]: https://man.freebsd.org/cgi/man.cgi?query=munlockall&sektion=2 +/// [NetBSD]: https://man.netbsd.org/munlockall.2 +/// [OpenBSD]: https://man.openbsd.org/munlockall.2 +/// [DragonFly BSD]: https://man.dragonflybsd.org/?command=munlockall§ion=2 +/// [illumos]: https://illumos.org/man/3C/munlockall +/// [glibc]: https://www.gnu.org/software/libc/manual/html_node/Page-Lock-Functions.html#index-munlockall +#[cfg(any(linux_kernel, freebsdlike, netbsdlike))] +#[inline] +pub fn munlockall() -> io::Result<()> { + backend::mm::syscalls::munlockall() +}