From e8f07480ce88c0a2f3bfd24db939138cc9adbfbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=90=D1=80=D1=82=D1=91=D0=BC=20=D0=9F=D0=B0=D0=B2=D0=BB?= =?UTF-8?q?=D0=BE=D0=B2=20=5BArtyom=20Pavlov=5D?= Date: Mon, 9 Oct 2023 17:32:58 +0300 Subject: [PATCH] Add `mlockall` and `munlockall` --- src/backend/libc/mm/syscalls.rs | 17 +++++++++++++++++ src/backend/libc/mm/types.rs | 19 +++++++++++++++++++ src/backend/linux_raw/mm/syscalls.rs | 17 +++++++++++++++++ src/backend/linux_raw/mm/types.rs | 28 ++++++++++++++++++++++++++++ 4 files changed, 81 insertions(+) diff --git a/src/backend/libc/mm/syscalls.rs b/src/backend/libc/mm/syscalls.rs index 4b23a58b7..98d8a0a83 100644 --- a/src/backend/libc/mm/syscalls.rs +++ b/src/backend/libc/mm/syscalls.rs @@ -220,3 +220,20 @@ pub(crate) unsafe fn userfaultfd(flags: UserfaultfdFlags) -> io::Result } ret_owned_fd(userfaultfd(bitflags_bits!(flags))) } + +/// Locks all pages mapped into the address space of the calling process. +/// +/// This includes the pages of the code, data and stack segment, as well as shared libraries, +/// user space kernel data, shared memory, and memory-mapped files. All mapped pages are guaranteed +/// to be resident in RAM when the call returns successfully; the pages are guaranteed to stay in RAM +/// until later unlocked. +#[inline] +pub(crate) fn mlockall(flags: MlockFlags) -> io::Result<()> { + unsafe { ret(c::mlockall(bitflags_bits!(flags))) } +} + +/// Unlocks all pages mapped into the address space of the calling process. +#[inline] +pub(crate) fn munlockall() -> io::Result<()> { + unsafe { ret(c::munlockall()) } +} diff --git a/src/backend/libc/mm/types.rs b/src/backend/libc/mm/types.rs index f0b4ad593..17a1b5629 100644 --- a/src/backend/libc/mm/types.rs +++ b/src/backend/libc/mm/types.rs @@ -442,3 +442,22 @@ bitflags! { const _ = !0; } } + +bitflags! { + /// `MCL_*` flags for use with [`mlockall`]. + /// + /// [`mlockall`]: crate::mm::mlockall + pub struct MlockallFlags: i32 { + // libc doesn't define `MCL_ONFAULT` yet. + // const ONFAULT = libc::MCL_ONFAULT; + /// Lock all pages which will become mapped into the address + /// space of the process in the future. These could be, for + /// instance, new pages required by a growing heap and stack + /// as well as new memory-mapped files or shared memory + /// regions. + const FUTURE = libc::MCL_FUTURE; + /// Lock all pages which are currently mapped into the address + /// space of the process. + const CURRENT = libc::MCL_CURRENT; + } +} diff --git a/src/backend/linux_raw/mm/syscalls.rs b/src/backend/linux_raw/mm/syscalls.rs index b51f826a9..665817835 100644 --- a/src/backend/linux_raw/mm/syscalls.rs +++ b/src/backend/linux_raw/mm/syscalls.rs @@ -210,3 +210,20 @@ pub(crate) unsafe fn munlock(addr: *mut c::c_void, length: usize) -> io::Result< pub(crate) unsafe fn userfaultfd(flags: UserfaultfdFlags) -> io::Result { ret_owned_fd(syscall_readonly!(__NR_userfaultfd, flags)) } + +/// Locks all pages mapped into the address space of the calling process. +/// +/// This includes the pages of the code, data and stack segment, as well as shared libraries, +/// user space kernel data, shared memory, and memory-mapped files. All mapped pages are guaranteed +/// to be resident in RAM when the call returns successfully; the pages are guaranteed to stay in RAM +/// until later unlocked. +#[inline] +pub(crate) fn mlockall(flags: MlockFlags) -> io::Result<()> { + unsafe { ret(syscall_readonly!(__NR_mlockall, flags)) } +} + +/// Unlocks all pages mapped into the address space of the calling process. +#[inline] +pub(crate) fn munlockall() -> io::Result<()> { + unsafe { ret(syscall_readonly!(__NR_munlockall)) } +} diff --git a/src/backend/linux_raw/mm/types.rs b/src/backend/linux_raw/mm/types.rs index 0dfb41050..7bf0ef112 100644 --- a/src/backend/linux_raw/mm/types.rs +++ b/src/backend/linux_raw/mm/types.rs @@ -262,3 +262,31 @@ bitflags! { const _ = !0; } } + +bitflags! { + /// `MCL_*` flags for use with [`mlockall`]. + /// + /// [`mlockall`]: crate::mm::mlockall + pub struct MlockallFlags: u32 { + /// Used together with `MCL_CURRENT`, `MCL_FUTURE`, or both. Mark + /// all current (with `MCL_CURRENT`) or future (with `MCL_FUTURE`) + /// mappings to lock pages when they are faulted in. When + /// used with `MCL_CURRENT`, all present pages are locked, but + /// `mlockall()` will not fault in non-present pages. When used + /// with `MCL_FUTURE`, all future mappings will be marked to + /// lock pages when they are faulted in, but they will not be + /// populated by the lock when the mapping is created. + /// `MCL_ONFAULT` must be used with either `MCL_CURRENT` or + /// `MCL_FUTURE` or both. + const ONFAULT = linux_raw_sys::general::MCL_ONFAULT; + /// Lock all pages which will become mapped into the address + /// space of the process in the future. These could be, for + /// instance, new pages required by a growing heap and stack + /// as well as new memory-mapped files or shared memory + /// regions. + const FUTURE = linux_raw_sys::general::MCL_FUTURE; + /// Lock all pages which are currently mapped into the address + /// space of the process. + const CURRENT = linux_raw_sys::general::MCL_CURRENT; + } +}