diff --git a/llama-cpp-2/src/lib.rs b/llama-cpp-2/src/lib.rs index a341def4..78c7e74a 100644 --- a/llama-cpp-2/src/lib.rs +++ b/llama-cpp-2/src/lib.rs @@ -194,3 +194,19 @@ pub enum StringToTokenError { pub fn ggml_time_us() -> i64 { unsafe { llama_cpp_sys_2::ggml_time_us() } } + +/// checks if mlock is supported +/// +/// ``` +/// # use llama_cpp_2::llama_supports_mlock; +/// +/// if llama_supports_mlock() { +/// println!("mlock is supported!"); +/// } else { +/// println!("mlock is not supported!"); +/// } +/// ``` +#[must_use] +pub fn llama_supports_mlock() -> bool { + unsafe { llama_cpp_sys_2::llama_supports_mlock() } +} diff --git a/llama-cpp-2/src/model/params.rs b/llama-cpp-2/src/model/params.rs index 99e89e53..dfd48583 100644 --- a/llama-cpp-2/src/model/params.rs +++ b/llama-cpp-2/src/model/params.rs @@ -62,6 +62,13 @@ impl LlamaModelParams { self.params.vocab_only = vocab_only; self } + + /// sets `use_mlock` + #[must_use] + pub fn with_use_mlock(mut self, use_mlock: bool) -> Self { + self.params.use_mlock = use_mlock; + self + } } /// Default parameters for `LlamaModel`. (as defined in llama.cpp by `llama_model_default_params`)