diff --git a/Cargo.toml b/Cargo.toml index 12fb518..8363137 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ license = " EPL-2.0 OR Apache-2.0" categories = ["development-tools::ffi", "no-std::no-alloc"] repository = "https://github.com/ZettaScaleLabs/stabby" readme = "stabby/README.md" -version = "6.2.2" # Track +version = "6.2.2" # Track [workspace.dependencies] stabby-macros = { path = "./stabby-macros/", version = "6.2.2", default-features = false } # Track diff --git a/stabby-abi/Cargo.toml b/stabby-abi/Cargo.toml index 0c74148..7f62ea0 100644 --- a/stabby-abi/Cargo.toml +++ b/stabby-abi/Cargo.toml @@ -25,7 +25,9 @@ description = "stabby's core ABI, you shouldn't add this crate to your dependenc [features] default = ["std"] -std = ["libc"] +std = ["libc", "alloc-rs"] +alloc-rs = [] +ctypes = ["stabby-macros/ctypes"] libc = ["dep:libc"] test = [] serde = ["dep:serde"] diff --git a/stabby-abi/src/alloc/allocators/btree_alloc.rs b/stabby-abi/src/alloc/allocators/btree_alloc.rs new file mode 100644 index 0000000..7e9fc12 --- /dev/null +++ b/stabby-abi/src/alloc/allocators/btree_alloc.rs @@ -0,0 +1,202 @@ +use core::{ + cell::UnsafeCell, mem::MaybeUninit, ops::DerefMut, ptr::NonNull, sync::atomic::AtomicPtr, +}; + +use crate::num::NonMaxUsize; + +/// A simple btree based allocator. +#[crate::stabby] +#[derive(Clone, Copy, Default)] +pub struct BTreeAlloc { + inner: [u8; 0], +} +impl core::fmt::Debug for BTreeAlloc { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.write_str("BTreeAlloc") + } +} +impl BTreeAlloc { + /// Constructs the allocator. + pub const fn new() -> Self { + Self { inner: [] } + } +} +impl crate::alloc::IAlloc for BTreeAlloc { + fn alloc(&mut self, layout: crate::alloc::Layout) -> *mut () { + allock().expect("Allocator not found").alloc(layout) + } + unsafe fn realloc(&mut self, p: *mut (), new_layout: crate::alloc::Layout) -> *mut () { + allock() + .expect("Allocator not found") + .realloc(p, new_layout) + } + unsafe fn free(&mut self, p: *mut ()) { + allock().expect("Allocator not found").free(p); + } +} + +static ALLOC: AtomicPtr = AtomicPtr::new(core::ptr::null_mut()); +#[repr(transparent)] +struct BTreeAllocGuard<'a> { + root: &'a Node, +} +fn allock<'a>() -> Option> { + loop { + let ptr = ALLOC.swap(usize::MAX as *mut _, core::sync::atomic::Ordering::Acquire); + if ptr as usize == usize::MAX { + core::hint::spin_loop(); + continue; + } + if let Some(root) = unsafe { ptr.as_mut() } { + return Some(BTreeAllocGuard { root }); + } + let mut allocated = crate::alloc::allocators::paging::PAGESIZE; + let mut root = unsafe { + crate::alloc::allocators::paging::memmap(core::ptr::null(), &mut allocated)? + .cast::>() + .as_mut() + }; + unsafe { + let start = NonNull::new_unchecked( + root.as_ptr().cast::().add(core::mem::size_of::()), + ); + let end = root.as_ptr().cast::().add(allocated); + let root = root.write(Node(UnsafeCell::new(NodeInner { + ends: [core::ptr::null(); NODE_SIZE], + blocks: [None; NODE_SIZE], + }))); + root.0.get_mut().ends[0] = NonNull::new(end.cast()); + root.0.get_mut().blocks[0] = Some(BlockRest { + start: start.cast(), + max_contiguous: end.offset_from(start) as usize, + children: None, + }); + return Some(BTreeAllocGuard { root }); + } + } +} +impl Drop for BTreeAllocGuard<'_> { + fn drop(&mut self) { + ALLOC.store(self.root, core::sync::atomic::Ordering::Release) + } +} + +const NODE_SIZE: usize = 8; +#[repr(C)] +#[derive(Debug, Copy, Clone)] +struct BlockRest { + start: NonNull<()>, + children: Option<&'static Node>, + max_contiguous: usize, +} +struct Block { + end: NonNull<()>, + rest: BlockRest, +} +#[repr(C)] +struct NodeInner { + ends: [Option>; NODE_SIZE], + blocks: [Option; NODE_SIZE], +} +#[repr(transparent)] +struct Node(UnsafeCell); + +impl crate::alloc::IAlloc for BTreeAllocGuard<'_> { + fn alloc(&mut self, layout: crate::alloc::Layout) -> *mut () { + let layout = layout.for_alloc(); + todo!() + } + unsafe fn free(&mut self, mut ptr: *mut ()) { + todo!() + } +} +macro_rules! segfault { + () => { + #[cfg(target_family = "unix")] + unsafe { + libc::signal(libc::SIGSEGV, libc::SIG_DFL) + }; + return None; + }; +} +impl Node { + fn alloc(&self, layout: crate::alloc::Layout, result: &mut *mut ()) -> Option { + let inner = unsafe { &mut *self.0.get() }; + for i in 0..NODE_SIZE { + { + let Some(block) = &inner.blocks[i] else { + break; + }; + if block.max_contiguous < layout.size { + continue; + } + if let Some(children) = block.children { + let extra_block = children.alloc(layout, result); + if result.is_null() { + continue; + } + block.max_contiguous = unsafe { (*children.0.get()).blocks.iter() } + .map_while(|x| x.as_ref()) + .fold(0, |acc, it| acc.max(it.max_contiguous)); + todo!() + } + let misalign = block.start.as_ptr() as usize % layout.align; + let new_block = if misalign != 0 { + // start isn't aligned as we want, let's split from the end to reduce fragmentation + todo!() + } else { + let end = unsafe { block.start.as_ptr().add(layout.size) }; + if core::ptr::eq(end, inner.ends[i]) { + // this block is just right, we take it + block.max_contiguous = 0; + *result = block.start.as_ptr(); + return Default::default(); + } else { + // start is aligned, let's split from the start to + todo!() + } + }; + } + } + Default::default() + } + fn free( + &self, + start: *mut (), + end: *mut (), + mut max_contig: usize, + root: &Self, + parent: Option<&Self>, + ) -> Option<&'static Self> { + let blocks = self.blocks.deref_mut(); + let Some(block_idx) = self.ends.iter().position(|block| block.end > start) else { + segfault!(); + }; + // SAFETY: Since we found the owner, we know its index to be valid and contain `Some(block)` + let mut block = unsafe { + self.blocks + .get_unchecked_mut(block_idx) + .as_mut() + .unwrap_unchecked() + }; + if let Some(children) = &block.children { + todo!("Free in children") + } else if core::ptr::eq(block.start.as_ptr(), start) { + if block.max_contiguous != 0 { + segfault!(); + } + block.max_contiguous = block.len(); + let (block, left, right) = self.merge_around(block_idx); + unsafe { + Some(NonMaxUsize::new_unchecked( + max_contig.max(block.max_contiguous), + )) + } + } else { + segfault!(); + } + } + fn merge_around(&mut self, index: usize) -> (&mut Block, Option<&mut Node>, Option<&mut Node>) { + todo!() + } +} diff --git a/stabby-abi/src/alloc/allocators/freelist_alloc.rs b/stabby-abi/src/alloc/allocators/freelist_alloc.rs new file mode 100644 index 0000000..f7f50dd --- /dev/null +++ b/stabby-abi/src/alloc/allocators/freelist_alloc.rs @@ -0,0 +1,287 @@ +use core::{ + ffi::c_void, + mem::MaybeUninit, + ptr::NonNull, + sync::atomic::{AtomicPtr, AtomicUsize, Ordering}, +}; + +/// A simple free-list based allocator. +/// +/// +#[crate::stabby] +#[derive(Clone, Copy, Default)] +pub struct FreelistAlloc { + inner: [u8; 0], +} +impl core::fmt::Debug for FreelistAlloc { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.write_str("FreelistAlloc") + } +} +impl FreelistAlloc { + /// Constructs the allocator. + pub const fn new() -> Self { + Self { inner: [] } + } +} +impl crate::alloc::IAlloc for FreelistAlloc { + fn alloc(&mut self, layout: crate::alloc::Layout) -> *mut () { + let layout = layout.for_alloc(); + let crate::alloc::Layout { + mut size, + mut align, + } = layout; + match ALLOC.lock().take(size, align) { + Some(slot) => { + let this = unsafe { (slot as *mut Slot).add(1).cast() }; + unsafe { this.write_bytes(0, size) }; + this + } + None => core::ptr::null_mut(), + } + } + unsafe fn realloc(&mut self, p: *mut c_void, new_size: usize) -> *mut c_void { + let slot = p.cast::().sub(1); + let this = unsafe { &mut *slot }; + let prev_size = this.size; + let align = this.align; + let alloc = ALLOC.lock(); + if alloc.try_extend(this, new_size).is_ok() { + p + } else { + let new_ptr = self.alloc(crate::alloc::Layout { + size: new_size, + align, + }); + unsafe { core::ptr::copy_nonoverlapping(p.cast::(), this.cast(), this.size) }; + new_ptr + } + } + unsafe fn free(&mut self, p: *mut ()) { + let slot = p.cast::().sub(1); + ALLOC.lock().insert(&mut *slot); + } +} + +#[repr(C)] +struct Slot { + size: usize, + lower: Option<&'static mut Slot>, + padding: usize, + align: usize, +} +impl core::cmp::Ord for Slot { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + (self as *const Self).cmp(&(other as *const Self)) + } +} +impl core::cmp::PartialOrd for Slot { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} +impl core::cmp::Eq for Slot {} +impl core::cmp::PartialEq for Slot { + fn eq(&self, other: &Self) -> bool { + core::ptr::eq(self, other) + } +} +impl Slot { + const fn full_size(&self) -> usize { + core::mem::size_of::() + self.size + } + const fn start(&self) -> *const u8 { + unsafe { (self as *const Self).cast::().sub(self.padding) } + } + const fn end(&self) -> *const Slot { + unsafe { (self as *const Self).cast::().add(self.full_size()) } + } + fn shift(&'static mut self, target_align: usize) -> &'static mut Self { + let required_padding = target_align - core::mem::size_of::(); + let padding = self.padding; + if padding == required_padding { + return self; + } + self.size += padding; + self.align = target_align; + self.padding = 0; + let new_addr = unsafe { + (self as *mut Self) + .cast::() + .offset(padding as isize - required_padding as isize) + }; + unsafe { + core::ptr::copy( + (self as *const Self).cast(), + new_addr, + core::mem::size_of::(), + ); + &mut *new_addr.cast() + } + } + fn split(self: &mut &'static mut Self, at: usize) -> Option<&'static mut Self> { + let size = self.size; + (size > at + core::mem::size_of::()).then(move || { + self.size = at; + let slot = unsafe { &mut *(self.end() as *mut MaybeUninit) }; + slot.write(Slot { + size: size - at + core::mem::size_of::(), + lower: None, + padding: 0, + align: 8, + }) + }) + } +} + +#[repr(C)] +struct Allocator { + free_list: AtomicPtr, + end: AtomicPtr, +} +struct Slots { + list: Option<&'static mut Slot>, +} +impl Drop for Slots { + fn drop(&mut self) { + ALLOC.free_list.store( + unsafe { + core::mem::transmute::, *mut Slot>(self.list.take()) + }, + Ordering::Release, + ); + } +} +impl Slots { + fn insert(&mut self, mut slot: &'static mut Slot) { + slot = slot.shift(core::mem::size_of::()); + let mut head = &mut self.list; + while let Some(h) = head { + if *h < slot { + if core::ptr::eq(h.end(), slot.start()) { + h.size += slot.full_size(); + return; + } + break; + } + head = unsafe { + core::mem::transmute::<&mut Option<&'static mut Slot>, &mut Option<&'static mut Slot>>( + &mut h.lower, + ) + }; + } + slot.lower = head.take(); + *head = Some(slot) + } + fn take(&mut self, size: usize, align: usize) -> Option<&'static mut Slot> { + let req = size + align; + let slot_owner = self.select_slot(req)?; + let mut slot = slot_owner.take()?; + let lower = slot.lower.take(); + *slot_owner = slot.split(size); + match slot_owner { + Some(owner) => owner.lower = lower, + None => *slot_owner = lower, + } + Some(slot) + } + fn select_slot(&mut self, size: usize) -> Option<&mut Option<&'static mut Slot>> { + let mut head = unsafe { + core::mem::transmute::<&mut Option<&'static mut Slot>, &mut Option<&'static mut Slot>>( + &mut self.list, + ) + }; + while let Some(h) = head { + if h.size < size { + head = unsafe { + core::mem::transmute::< + &mut Option<&'static mut Slot>, + &mut Option<&'static mut Slot>, + >(&mut h.lower) + }; + } else { + return Some(head); + } + } + let alloc_end = ALLOC.end.load(Ordering::Relaxed); + self.grow_take(alloc_end, size) + } + fn grow_take( + &mut self, + alloc_end: *mut Slot, + mut size: usize, + ) -> Option<&mut Option<&'static mut Slot>> { + let slot = unsafe { + crate::alloc::allocators::paging::memmap(alloc_end.cast(), &mut size)? + .cast::>() + .as_mut() + }; + let slot = slot.write(Slot { + size: size - core::mem::size_of::(), + lower: None, + padding: 0, + align: 8, + }); + ALLOC.end.store(slot.end().cast_mut(), Ordering::Relaxed); + self.insert(slot); + Some(&mut self.list) + } + fn try_extend(&mut self, slot: &'static mut Slot, new_size: usize) -> Result<(), ()> { + let alloc_end = ALLOC.end.load(Ordering::Relaxed); + let prev_size = slot.size; + if core::ptr::eq(alloc_end, slot.end()) { + if self.grow_take(alloc_end, new_size - prev_size).is_some() { + slot.size = new_size; + return Ok(()); + } + } + let mut head = unsafe { + core::mem::transmute::<&mut Option<&'static mut Slot>, &mut Option<&'static mut Slot>>( + &mut self.list, + ) + }; + while let Some(h) = head { + match h.start().cmp(&slot.end()) { + core::cmp::Ordering::Less => return Err(()), + core::cmp::Ordering::Equal => { + let extension_size = unsafe { h.end().offset_from(h.start()) }; + if prev_size + extension_size >= new_size { + todo!("just extending the slot may steal too much capacity, yield some back if so") + } else if core::ptr::eq(alloc_end, h.end()) { + todo!("we might still be able to extend the newly acquired slot") + } + } + core::cmp::Ordering::Greater => { + head = unsafe { + core::mem::transmute::< + &mut Option<&'static mut Slot>, + &mut Option<&'static mut Slot>, + >(&mut h.lower) + }; + } + } + } + Err(()) + } +} +impl Allocator { + const fn new() -> Self { + Self { + free_list: AtomicPtr::new(core::ptr::null_mut()), + end: AtomicPtr::new(core::ptr::null_mut()), + } + } + fn lock(&self) -> Slots { + loop { + let list = self + .free_list + .swap(usize::MAX as *mut Slot, Ordering::AcqRel); + if list as usize != usize::MAX { + return Slots { + list: unsafe { list.as_mut() }, + }; + } + core::hint::spin_loop(); + } + } +} diff --git a/stabby-abi/src/alloc/allocators/libc_alloc.rs b/stabby-abi/src/alloc/allocators/libc_alloc.rs new file mode 100644 index 0000000..5b1d70c --- /dev/null +++ b/stabby-abi/src/alloc/allocators/libc_alloc.rs @@ -0,0 +1,106 @@ +// +// Copyright (c) 2023 ZettaScale Technology +// +// This program and the accompanying materials are made available under the +// terms of the Eclipse Public License 2.0 which is available at +// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 +// which is available at https://www.apache.org/licenses/LICENSE-2.0. +// +// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 +// +// Contributors: +// Pierre Avital, +// + +use std::io::Write; + +use crate::alloc::Layout; + +#[cfg(not(windows))] +use libc::posix_memalign; +#[cfg(windows)] +unsafe fn posix_memalign(this: &mut *mut core::ffi::c_void, size: usize, align: usize) -> i32 { + let ptr = unsafe { libc::aligned_malloc(size, align) }; + if ptr.is_null() { + return libc::ENOMEM; + } + *this = ptr; + 0 +} +#[cfg(windows)] +use libc::aligned_free; +#[cfg(not(windows))] +use libc::free as aligned_free; +use libc::{malloc, realloc}; + +/// An allocator based on `libc::posix_memalign` or `libc::aligned_malloc` depending on the platform. +/// +/// It has all of `malloc`'s usual properties. +#[crate::stabby] +#[derive(Clone, Copy, Default)] +pub struct LibcAlloc { + inner: [u8; 0], +} +impl core::fmt::Debug for LibcAlloc { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.write_str("LibcAlloc") + } +} +impl LibcAlloc { + /// Constructs the allocator. + pub const fn new() -> Self { + Self { inner: [] } + } +} + +impl crate::alloc::IAlloc for LibcAlloc { + fn alloc(&mut self, layout: Layout) -> *mut () { + if layout.size == 0 { + return core::ptr::null_mut(); + } + let mut ptr = core::ptr::null_mut(); + let err = unsafe { posix_memalign(&mut ptr, layout.align, layout.size) }; + if err != 0 && (ptr as usize % layout.align != 0) { + ptr = core::ptr::null_mut(); + } + ptr.cast() + } + unsafe fn free(&mut self, ptr: *mut ()) { + unsafe { aligned_free(ptr.cast()) } + } + unsafe fn realloc(&mut self, ptr: *mut (), prev: Layout, new_size: usize) -> *mut () { + dbg!(prev); + if new_size == 0 { + return core::ptr::null_mut(); + } + let mut new_ptr = if prev.align <= 8 { + eprintln!( + "Previous ({ptr:?}): {:?}", + core::slice::from_raw_parts(ptr.cast::(), prev.size) + ); + let new_ptr = unsafe { realloc(ptr.cast(), new_size) }; + eprintln!( + "Reallocd ({new_ptr:?}): {:?}", + core::slice::from_raw_parts(new_ptr.cast::(), prev.size) + ); + new_ptr + } else { + core::ptr::null_mut() + }; + if new_ptr.is_null() { + new_ptr = core::ptr::null_mut(); + let err = unsafe { posix_memalign(&mut new_ptr, prev.align, new_size) }; + if err == 0 { + unsafe { + core::ptr::copy_nonoverlapping( + ptr.cast::(), + new_ptr.cast::(), + prev.size, + ) + } + self.free(ptr.cast()); + } + } + new_ptr.cast() + } +} diff --git a/stabby-abi/src/alloc/allocators/mod.rs b/stabby-abi/src/alloc/allocators/mod.rs new file mode 100644 index 0000000..aa82ac6 --- /dev/null +++ b/stabby-abi/src/alloc/allocators/mod.rs @@ -0,0 +1,62 @@ +/// A simple cross-platform allocator implementation +/// +/// This allocator is based on maintaining a btree of free memory blocks, +/// allowing rather predictable alloc/dealloc times. +// pub mod btree_alloc; +/// A simple cross-platform allocator implementation +/// +/// This allocator is based on an ordered linked list of free memory blocks. +// pub mod freelist_alloc; + +#[cfg(not(any(target_arch = "wasm32")))] +/// [`IAlloc`](crate::alloc::IAlloc) bindings for `libc::malloc` +pub mod libc_alloc; + +#[cfg(feature = "alloc-rs")] +/// Rust's GlobalAlloc, accessed through a vtable to ensure no incompatible function calls are performed +pub mod rust_alloc; + +#[cfg(target_arch = "wasm32")] +pub(crate) mod paging { + use core::ptr::NonNull; + pub(crate) const PAGESIZE: usize = 65536; + pub(crate) fn memmap(hint: *const (), requested_capacity: &mut usize) -> Option> { + let added_pages = (*requested_capacity / PAGESIZE) + 1; + let start = core::arch::wasm32::memory_grow(0, added_pages); + if start == usize::MAX { + return None; + } + *requested_capacity = added_pages * PAGESIZE; + unsafe { core::mem::transmute::>>(start * PAGESIZE) } + } + pub(crate) fn memunmap(hint: *mut (), max_unmap: usize) {} +} + +// #[cfg(all(target_family = "unix", feature = "libc"))] +// pub(crate) mod paging { +// use core::ptr::NonNull; +// pub(crate) const PAGESIZE: usize = 65536; +// pub(crate) fn memmap(hint: *const (), requested_capacity: &mut usize) -> Option> { +// const PAGESIZE: usize = 65536; +// let added_pages = (*requested_capacity / PAGESIZE) + 1; +// *requested_capacity = added_pages * PAGESIZE; +// let start = unsafe { +// libc::mmap( +// hint.cast_mut().cast(), +// *requested_capacity, +// libc::PROT_READ | libc::PROT_WRITE, +// libc::MAP_PRIVATE, +// -1, +// 0, +// ) +// }; +// if start as isize == -1 { +// return None; +// } +// NonNull::new(start.cast()) +// } +// pub(crate) fn memunmap(addr: *mut (), mut len: usize) { +// len -= len % PAGESIZE; +// unsafe { libc::munmap(addr.cast(), len) }; +// } +// } diff --git a/stabby-abi/src/alloc/allocators/rust_alloc.rs b/stabby-abi/src/alloc/allocators/rust_alloc.rs new file mode 100644 index 0000000..dce3d5c --- /dev/null +++ b/stabby-abi/src/alloc/allocators/rust_alloc.rs @@ -0,0 +1,88 @@ +use crate::alloc::{IAlloc, Layout}; + +/// Rust's GlobalAlloc, called via an FFI-safe vtable. +#[crate::stabby] +#[derive(Clone, Copy)] +pub struct RustAlloc { + vtable: &'static RustAllocVt, +} +#[crate::stabby] +/// The VTable for [`RustAlloc`] +pub struct RustAllocVt { + alloc: extern "C" fn(crate::alloc::Layout) -> *mut (), + free: extern "C" fn(*mut ()), + realloc: extern "C" fn(*mut (), crate::alloc::Layout, usize) -> *mut (), +} +extern "C" fn alloc(requested: crate::alloc::Layout) -> *mut () { + let requested = Layout::of::().concat(requested); + let Ok(layout) = core::alloc::Layout::from_size_align(requested.size, requested.align) else { + return core::ptr::null_mut(); + }; + let alloc_start = unsafe { alloc_rs::alloc::alloc(layout) }; + let ret = unsafe { alloc_start.add(layout.align().max(core::mem::size_of::())) }; + unsafe { ret.cast::().sub(1).write(requested) }; + ret.cast() +} +extern "C" fn realloc( + ptr: *mut (), + _prev_layout: crate::alloc::Layout, + new_size: usize, +) -> *mut () { + let prev_layout = unsafe { ptr.cast::().sub(1).read() }; + let alloc_start = unsafe { ptr.cast::().sub(prev_layout.align) }; + let Ok(layout) = core::alloc::Layout::from_size_align(prev_layout.size, prev_layout.align) + else { + return core::ptr::null_mut(); + }; + unsafe { alloc_rs::alloc::realloc(alloc_start, layout, new_size).cast() } +} +extern "C" fn free(ptr: *mut ()) { + let prev_layout = unsafe { ptr.cast::().sub(1).read() }; + let alloc_start = unsafe { ptr.cast::().sub(prev_layout.align) }; + unsafe { + alloc_rs::alloc::dealloc( + alloc_start, + core::alloc::Layout::from_size_align_unchecked(prev_layout.size, prev_layout.align), + ) + } +} +const VTABLE: RustAllocVt = RustAllocVt { + alloc: alloc as extern "C" fn(crate::alloc::Layout) -> *mut (), + free: free as extern "C" fn(*mut ()), + realloc: realloc as extern "C" fn(*mut (), crate::alloc::Layout, usize) -> *mut (), +}; +static VT: RustAllocVt = VTABLE; +impl core::fmt::Debug for RustAlloc { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.write_str("RustAlloc") + } +} +impl RustAlloc { + /// Constructs the allocator. + pub const fn new() -> Self { + Self { vtable: &VTABLE } + } +} +impl Default for RustAlloc { + fn default() -> Self { + Self { vtable: &VT } + } +} +impl IAlloc for RustAlloc { + fn alloc(&mut self, layout: crate::alloc::Layout) -> *mut () { + (self.vtable.alloc)(layout) + } + + unsafe fn free(&mut self, ptr: *mut ()) { + (self.vtable.free)(ptr) + } + + unsafe fn realloc( + &mut self, + ptr: *mut (), + prev_layout: crate::alloc::Layout, + new_size: usize, + ) -> *mut () { + (self.vtable.realloc)(ptr, prev_layout, new_size) + } +} diff --git a/stabby-abi/src/alloc/collections/arc_btree.rs b/stabby-abi/src/alloc/collections/arc_btree.rs index 7278352..1eed364 100644 --- a/stabby-abi/src/alloc/collections/arc_btree.rs +++ b/stabby-abi/src/alloc/collections/arc_btree.rs @@ -175,6 +175,7 @@ where type UnusedBits = <*const T as IStable>::UnusedBits; type HasExactlyOneNiche = crate::B0; type ContainsIndirections = crate::B1; + #[cfg(feature = "ctypes")] type CType = <*const T as IStable>::CType; const REPORT: &'static crate::report::TypeReport = &crate::report::TypeReport { name: crate::str::Str::new("ArcBTreeSet"), @@ -631,12 +632,50 @@ impl } } #[test] -fn btree_insert() { +fn btree_insert_libc() { use rand::Rng; let mut rng = rand::thread_rng(); - for _ in 0..1000 { + for i in 0..1000 { + dbg!(i); let mut vec = crate::alloc::vec::Vec::new(); let mut btree = ArcBTreeSet::new(); + for _ in 0..rng.gen_range(0..800) { + eprintln!("btree: {btree:?}"); + eprintln!("vec: {vec:?}"); + let val = rng.gen_range(0..100u8); + if vec.binary_search(&val).is_ok() { + assert_eq!(btree.insert(val), Some(val)); + } else { + vec.push(val); + vec.sort(); + assert_eq!( + btree.insert(val), + None, + "The BTree contained an unexpected value: {btree:?}, {vec:?}" + ); + } + } + vec.sort(); + assert_eq!(vec.len(), btree.len()); + let mut iter = vec.into_iter(); + btree.for_each(|i| assert_eq!(Some(*i), iter.next())); + assert_eq!(iter.next(), None); + } +} + +#[test] +#[cfg(feature = "alloc-rs")] +fn btree_insert_rs() { + use rand::Rng; + let mut rng = rand::thread_rng(); + for i in 0..1000 { + dbg!(i); + let mut vec = crate::alloc::vec::Vec::new_in( + crate::alloc::allocators::rust_alloc::RustAlloc::default(), + ); + let mut btree = ArcBTreeSet::<_, _, false, 5>::new_in( + crate::alloc::allocators::rust_alloc::RustAlloc::default(), + ); for _ in 0..rng.gen_range(0..800) { let val = rng.gen_range(0..100); if vec.binary_search(&val).is_ok() { diff --git a/stabby-abi/src/alloc/libc_alloc.rs b/stabby-abi/src/alloc/libc_alloc.rs deleted file mode 100644 index e794ab1..0000000 --- a/stabby-abi/src/alloc/libc_alloc.rs +++ /dev/null @@ -1,307 +0,0 @@ -// -// Copyright (c) 2023 ZettaScale Technology -// -// This program and the accompanying materials are made available under the -// terms of the Eclipse Public License 2.0 which is available at -// http://www.eclipse.org/legal/epl-2.0, or the Apache License, Version 2.0 -// which is available at https://www.apache.org/licenses/LICENSE-2.0. -// -// SPDX-License-Identifier: EPL-2.0 OR Apache-2.0 -// -// Contributors: -// Pierre Avital, -// - -use super::Layout; - -#[cfg(not(any(windows, target_arch = "wasm32")))] -use libc::posix_memalign; -#[cfg(windows)] -unsafe fn posix_memalign(this: &mut *mut core::ffi::c_void, size: usize, align: usize) -> i32 { - let ptr = unsafe { libc::aligned_malloc(size, align) }; - if ptr.is_null() { - return libc::ENOMEM; - } - *this = ptr; - 0 -} -#[cfg(windows)] -use libc::aligned_free; -#[cfg(not(any(windows, target_arch = "wasm32")))] -use libc::free as aligned_free; -#[cfg(not(target_arch = "wasm32"))] -use libc::realloc; -#[cfg(target_arch = "wasm32")] -use wasm32_alloc::{free as aligned_free, posix_memalign, realloc}; - -/// An allocator based on `libc::posix_memalign` or `libc::aligned_malloc` depending on the platform. -/// -/// It has all of `malloc`'s usual properties. -#[crate::stabby] -#[derive(Clone, Copy, Debug, Default)] -pub struct LibcAlloc { - inner: [u8; 0], -} -impl LibcAlloc { - /// Constructs the allocator. - pub const fn new() -> Self { - Self { inner: [] } - } -} - -impl super::IAlloc for LibcAlloc { - fn alloc(&mut self, layout: Layout) -> *mut () { - if layout.size == 0 { - return core::ptr::null_mut(); - } - let mut ptr = core::ptr::null_mut(); - let err = unsafe { posix_memalign(&mut ptr, layout.align, layout.size) }; - if err != 0 && (ptr as usize % layout.align != 0) { - ptr = core::ptr::null_mut(); - } - ptr.cast() - } - unsafe fn free(&mut self, ptr: *mut ()) { - unsafe { aligned_free(ptr.cast()) } - } - unsafe fn realloc(&mut self, ptr: *mut (), new_layout: Layout) -> *mut () { - if new_layout.size == 0 { - return core::ptr::null_mut(); - } - let mut new_ptr = unsafe { realloc(ptr.cast(), new_layout.size) }; - if new_ptr.is_null() || new_ptr as usize % new_layout.align != 0 { - let mut ptr = core::ptr::null_mut(); - let err = unsafe { posix_memalign(&mut ptr, new_layout.align, new_layout.size) }; - if err == 0 { - unsafe { - core::ptr::copy_nonoverlapping( - new_ptr.cast::(), - ptr.cast::(), - new_layout.size, - ) - } - self.free(new_ptr.cast()); - new_ptr = ptr; - } - } - new_ptr.cast() - } -} - -#[cfg(target_arch = "wasm32")] -mod wasm32_alloc { - use core::{ - ffi::c_void, - mem::MaybeUninit, - sync::atomic::{AtomicPtr, Ordering}, - }; - - #[repr(C)] - struct Slot { - size: usize, - lower: Option<&'static mut Slot>, - padding: usize, - _reserved: usize, - } - impl core::cmp::Ord for Slot { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - (self as *const Self).cmp(&(other as *const Self)) - } - } - impl core::cmp::PartialOrd for Slot { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } - } - impl core::cmp::Eq for Slot {} - impl core::cmp::PartialEq for Slot { - fn eq(&self, other: &Self) -> bool { - core::ptr::eq(self, other) - } - } - impl Slot { - const fn full_size(&self) -> usize { - core::mem::size_of::() + self.size - } - const fn start(&self) -> *const u8 { - unsafe { (self as *const Self).cast::().sub(self.padding) } - } - const fn end(&self) -> *const u8 { - unsafe { (self as *const Self).cast::().add(self.full_size()) } - } - fn shift(&'static mut self, target_align: usize) -> &'static mut Self { - let required_padding = target_align - core::mem::size_of::(); - let padding = self.padding; - if padding == required_padding { - return self; - } - self.size += padding; - self.padding = 0; - let new_addr = unsafe { - (self as *mut Self) - .cast::() - .offset(padding as isize - required_padding as isize) - }; - unsafe { - core::ptr::copy( - (self as *const Self).cast(), - new_addr, - core::mem::size_of::(), - ); - &mut *new_addr.cast() - } - } - fn split(self: &mut &'static mut Self, at: usize) -> Option<&'static mut Self> { - let size = self.size; - (size > at + core::mem::size_of::()).then(move || { - self.size = at; - let slot = unsafe { &mut *(self.end() as *mut MaybeUninit) }; - slot.write(Slot { - size: size - at + core::mem::size_of::(), - lower: None, - padding: 0, - _reserved: 0, - }) - }) - } - } - - const PAGESIZE: usize = 65536; - #[repr(C)] - struct Allocator { - free_list: AtomicPtr, - } - struct Slots { - list: Option<&'static mut Slot>, - } - impl Drop for Slots { - fn drop(&mut self) { - ALLOC.free_list.store( - unsafe { - core::mem::transmute::, *mut Slot>(self.list.take()) - }, - Ordering::Release, - ); - } - } - impl Slots { - fn insert(&mut self, mut slot: &'static mut Slot) { - slot = slot.shift(core::mem::size_of::()); - let mut head = &mut self.list; - while let Some(h) = head { - if *h < slot { - if core::ptr::eq(h.end(), slot.start()) { - h.size += slot.full_size(); - return; - } - break; - } - head = unsafe { - core::mem::transmute::< - &mut Option<&'static mut Slot>, - &mut Option<&'static mut Slot>, - >(&mut h.lower) - }; - } - slot.lower = head.take(); - *head = Some(slot) - } - fn take(&mut self, size: usize, align: usize) -> Option<&'static mut Slot> { - let req = size + align; - let slot_owner = self.select_slot(req)?; - let mut slot = slot_owner.take()?; - let lower = slot.lower.take(); - *slot_owner = slot.split(size); - match slot_owner { - Some(owner) => owner.lower = lower, - None => *slot_owner = lower, - } - Some(slot) - } - fn select_slot(&mut self, size: usize) -> Option<&mut Option<&'static mut Slot>> { - let mut head = unsafe { - core::mem::transmute::<&mut Option<&'static mut Slot>, &mut Option<&'static mut Slot>>( - &mut self.list, - ) - }; - while let Some(h) = head { - if h.size < size { - head = unsafe { - core::mem::transmute::< - &mut Option<&'static mut Slot>, - &mut Option<&'static mut Slot>, - >(&mut h.lower) - }; - } else { - return Some(head); - } - } - self.grow_take(size) - } - fn grow_take(&mut self, size: usize) -> Option<&mut Option<&'static mut Slot>> { - let added_pages = (size / PAGESIZE) + 2; - let start = core::arch::wasm32::memory_grow(0, added_pages); - if start == usize::MAX { - return None; - } - let slot = unsafe { &mut *((start * PAGESIZE) as *mut MaybeUninit) }; - let slot = slot.write(Slot { - size: added_pages * PAGESIZE - core::mem::size_of::(), - lower: None, - padding: 0, - _reserved: 0, - }); - self.insert(slot); - Some(&mut self.list) - } - } - impl Allocator { - const fn new() -> Self { - Self { - free_list: AtomicPtr::new(core::ptr::null_mut()), - } - } - fn lock(&self) -> Slots { - loop { - let list = self - .free_list - .swap(usize::MAX as *mut Slot, Ordering::AcqRel); - if list as usize != usize::MAX { - return Slots { - list: unsafe { list.as_mut() }, - }; - } - core::hint::spin_loop(); - } - } - } - static ALLOC: Allocator = Allocator::new(); - pub unsafe fn posix_memalign( - this: &mut *mut core::ffi::c_void, - mut size: usize, - mut align: usize, - ) -> i32 { - size = size.max(64); - align = align.max(8); - match ALLOC.lock().take(size, align) { - Some(slot) => { - *this = (slot as *mut Slot).add(1).cast(); - 0 - } - None => -1, - } - } - pub unsafe fn realloc(p: *mut c_void, new_size: usize) -> *mut c_void { - let mut this = core::ptr::null_mut(); - if posix_memalign(&mut this, new_size, 8) != 0 { - return core::ptr::null_mut(); - } - let slot = p.cast::().sub(1); - unsafe { core::ptr::copy_nonoverlapping(p.cast::(), this.cast(), (*slot).size) }; - this - } - pub unsafe fn free(p: *mut c_void) { - let slot = p.cast::().sub(1); - ALLOC.lock().insert(&mut *slot); - } -} diff --git a/stabby-abi/src/alloc/mod.rs b/stabby-abi/src/alloc/mod.rs index 762d034..47ee0b2 100644 --- a/stabby-abi/src/alloc/mod.rs +++ b/stabby-abi/src/alloc/mod.rs @@ -12,13 +12,15 @@ // Pierre Avital, // +#![allow(deprecated)] use core::{marker::PhantomData, mem::MaybeUninit, ptr::NonNull, sync::atomic::AtomicUsize}; use self::vec::ptr_diff; -#[cfg(feature = "libc")] -/// A libc malloc based implementation of the [`IAlloc`] API -pub mod libc_alloc; +/// Allocators provided by `stabby` +pub mod allocators; +#[cfg(all(feature = "libc", not(any(target_arch = "wasm32"))))] +pub use allocators::libc_alloc; /// A generic allocation error. #[crate::stabby] @@ -46,9 +48,9 @@ pub mod sync; pub mod vec; /// The default allocator: libc malloc based if the libc feature is enabled, or unavailable otherwise. -#[cfg(feature = "libc")] +#[cfg(all(feature = "libc", not(any(target_arch = "wasm32"))))] pub type DefaultAllocator = libc_alloc::LibcAlloc; -#[cfg(not(feature = "libc"))] +#[cfg(not(all(feature = "libc", not(any(target_arch = "wasm32")))))] pub type DefaultAllocator = core::convert::Infallible; #[crate::stabby] @@ -103,6 +105,14 @@ impl Layout { } next_matching(self.align, ptr.cast()).cast() } + pub(crate) const fn for_alloc(mut self) -> Self { + if self.align >= 8 { + return self; + } + self.align = 8; + self.size = self.size + (8 - self.size % 8) * ((self.size % 8 != 0) as usize); + self + } } /// An interface to an allocator. @@ -127,11 +137,14 @@ pub trait IAlloc: Unpin { /// /// # Safety /// `ptr` MUST have been allocated through a succesful call to `Self::alloc` with the same instance of `Self` - unsafe fn realloc(&mut self, ptr: *mut (), new_layout: Layout) -> *mut () { - let ret = self.alloc(new_layout); + unsafe fn realloc(&mut self, ptr: *mut (), prev_layout: Layout, new_size: usize) -> *mut () { + let ret = self.alloc(Layout { + size: new_size, + align: prev_layout.align, + }); if !ret.is_null() { unsafe { - core::ptr::copy_nonoverlapping(ptr.cast::(), ret.cast(), new_layout.size); + core::ptr::copy_nonoverlapping(ptr.cast::(), ret.cast(), prev_layout.size); self.free(ptr); } } @@ -141,6 +154,7 @@ pub trait IAlloc: Unpin { /// An ABI stable equivalent to [`IAlloc`]. #[crate::stabby] +#[deprecated = "Stabby doesn't actually use this trait due to conflicts."] pub trait IStableAlloc: Unpin { /// Allocates at least as much memory as requested by layout, ensuring the requested alignment is respected. /// @@ -157,11 +171,19 @@ pub trait IStableAlloc: Unpin { /// /// # Safety /// `ptr` MUST have been allocated through a succesful call to `Self::alloc` with the same instance of `Self` - extern "C" fn realloc(&mut self, ptr: *mut (), new_layout: Layout) -> *mut () { - let ret = self.alloc(new_layout); + extern "C" fn realloc( + &mut self, + ptr: *mut (), + prev_layout: Layout, + new_size: usize, + ) -> *mut () { + let ret = self.alloc(Layout { + size: new_size, + align: prev_layout.align, + }); if !ret.is_null() { unsafe { - core::ptr::copy_nonoverlapping(ptr.cast::(), ret.cast(), new_layout.size); + core::ptr::copy_nonoverlapping(ptr.cast::(), ret.cast(), prev_layout.size); self.free(ptr); } } @@ -176,8 +198,13 @@ impl IStableAlloc for T { extern "C" fn free(&mut self, ptr: *mut ()) { unsafe { IAlloc::free(self, ptr) } } - extern "C" fn realloc(&mut self, ptr: *mut (), layout: Layout) -> *mut () { - unsafe { IAlloc::realloc(self, ptr, layout) } + extern "C" fn realloc( + &mut self, + ptr: *mut (), + prev_layout: Layout, + new_size: usize, + ) -> *mut () { + unsafe { IAlloc::realloc(self, ptr, prev_layout, new_size) } } } @@ -188,8 +215,8 @@ impl + Unpin> IAlloc for T { unsafe fn free(&mut self, ptr: *mut ()) { IStableAllocDynMut::free(self, ptr) } - unsafe fn realloc(&mut self, ptr: *mut (), new_layout: Layout) -> *mut () { - IStableAllocDynMut::realloc(self, ptr, new_layout) + unsafe fn realloc(&mut self, ptr: *mut (), prev_layout: Layout, new_size: usize) -> *mut () { + IStableAllocDynMut::realloc(self, ptr, prev_layout, new_size) } } impl IAlloc for core::convert::Infallible { @@ -323,28 +350,39 @@ impl AllocPtr { /// Allocates a pointer to a single element of `T`, prefixed by an [`AllocPrefix`] pub fn alloc(alloc: &mut Alloc) -> Option { let ptr = alloc.alloc(Layout::of::>().concat(Layout::of::())); - NonNull::new(ptr).map(|ptr| unsafe { - ptr.cast::>().as_mut().capacity = AtomicUsize::new(1); - Self { + NonNull::new(ptr).map(|prefix| unsafe { + prefix.cast::>().as_mut().capacity = AtomicUsize::new(1); + let this = Self { ptr: NonNull::new_unchecked( - ptr.as_ptr().cast::().add(Self::prefix_skip()).cast(), + prefix.as_ptr().cast::().add(Self::prefix_skip()).cast(), ), marker: PhantomData, - } + }; + assert!(core::ptr::eq( + prefix.as_ptr().cast(), + this.prefix() as *const _ + )); + dbg!(prefix); + this }) } /// Allocates a pointer to an array of `capacity` `T`, prefixed by an [`AllocPrefix`] pub fn alloc_array(alloc: &mut Alloc, capacity: usize) -> Option { let ptr = alloc.alloc(Layout::of::>().concat(Layout::array::(capacity))); - NonNull::new(ptr).map(|ptr| unsafe { - ptr.cast::>().as_mut().capacity = AtomicUsize::new(capacity); - Self { - ptr: NonNull::new_unchecked( - ptr.as_ptr().cast::().add(Self::prefix_skip()).cast(), - ), + NonNull::new(ptr).map(|prefix| unsafe { + prefix.cast::>().as_mut().capacity = AtomicUsize::new(capacity); + let ptr = prefix.as_ptr().cast::().add(Self::prefix_skip()); + let this = Self { + ptr: NonNull::new_unchecked(ptr.cast()), marker: PhantomData, - } + }; + assert!(core::ptr::eq( + prefix.as_ptr().cast(), + this.prefix() as *const _ + )); + dbg!(prefix); + this }) } /// Reallocates a pointer to an array of `capacity` `T`, prefixed by an [`AllocPrefix`]. @@ -353,19 +391,33 @@ impl AllocPtr { /// /// # Safety /// `self` must not be dangling - pub unsafe fn realloc(self, alloc: &mut Alloc, capacity: usize) -> Option { + pub unsafe fn realloc( + self, + alloc: &mut Alloc, + prev_capacity: usize, + new_capacity: usize, + ) -> Option { + let layout = Layout::of::>().concat(Layout::array::(prev_capacity)); let ptr = alloc.realloc( - self.prefix() as *const _ as *mut _, - Layout::of::>().concat(Layout::array::(capacity)), + dbg!(self.prefix() as *const AllocPrefix) + .cast_mut() + .cast(), + layout, + new_capacity, ); - NonNull::new(ptr).map(|ptr| unsafe { - ptr.cast::>().as_mut().capacity = AtomicUsize::new(capacity); - Self { - ptr: NonNull::new_unchecked( - ptr.as_ptr().cast::().add(Self::prefix_skip()).cast(), - ), + NonNull::new(ptr).map(|prefix| unsafe { + prefix.cast::>().as_mut().capacity = AtomicUsize::new(new_capacity); + let ptr = prefix.as_ptr().cast::().add(Self::prefix_skip()); + let this = Self { + ptr: NonNull::new_unchecked(ptr.cast()), marker: PhantomData, - } + }; + assert!(core::ptr::eq( + prefix.as_ptr().cast(), + this.prefix() as *const _ + )); + dbg!(prefix); + this }) } /// Reallocates a pointer to an array of `capacity` `T`, prefixed by an [`AllocPrefix`] diff --git a/stabby-abi/src/alloc/vec.rs b/stabby-abi/src/alloc/vec.rs index fc01145..5ba49b3 100644 --- a/stabby-abi/src/alloc/vec.rs +++ b/stabby-abi/src/alloc/vec.rs @@ -204,12 +204,14 @@ impl Vec { /// otherwise returns Err(AllocationError) pub fn try_reserve(&mut self, additional: usize) -> Result { if self.remaining_capacity() < additional { - let new_capacity = self.len() + additional; - let start = if self.capacity() != 0 { + let len = self.len(); + let new_capacity = len + additional; + let old_capacity = self.capacity(); + let start = if old_capacity != 0 { unsafe { self.inner .start - .realloc(&mut self.inner.alloc, new_capacity) + .realloc(&mut self.inner.alloc, old_capacity, new_capacity) } } else { AllocPtr::alloc_array(&mut self.inner.alloc, new_capacity) @@ -217,7 +219,7 @@ impl Vec { let Some(start) = start else { return Err(AllocationError()); }; - let end = ptr_add(*start, self.len()); + let end = ptr_add(*start, len); let capacity = ptr_add(*start, new_capacity); self.inner.start = start; self.inner.end = end; diff --git a/stabby-abi/src/enums/mod.rs b/stabby-abi/src/enums/mod.rs index 677768f..57d34a1 100644 --- a/stabby-abi/src/enums/mod.rs +++ b/stabby-abi/src/enums/mod.rs @@ -55,6 +55,7 @@ unsafe impl IStable for BitDeterminant { type UnusedBits = Array; type HasExactlyOneNiche = Saturator; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = u8; primitive_report!("BitDeterminant"); } @@ -96,6 +97,7 @@ unsafe impl IStable for ValueIsErr IStable for Not { type UnusedBits = Determinant::UnusedBits; type HasExactlyOneNiche = Determinant::HasExactlyOneNiche; type ContainsIndirections = Determinant::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = Determinant::CType; primitive_report!("Not", Determinant); } diff --git a/stabby-abi/src/istable.rs b/stabby-abi/src/istable.rs index a3c4b77..06b1a61 100644 --- a/stabby-abi/src/istable.rs +++ b/stabby-abi/src/istable.rs @@ -44,6 +44,7 @@ pub unsafe trait IStable: Sized { type HasExactlyOneNiche: ISaturatingAdd; /// Whether or not the type contains indirections (pointers, indices in independent data-structures...) type ContainsIndirections: Bit; + #[cfg(feature = "ctypes")] /// A support mechanism for [`safer-ffi`](https://crates.io/crates/safer-ffi), allowing all [`IStable`] types to also be `safer_ffi::ReprC` type CType: IStable; /// A compile-time generated report of the fields of the type, allowing for compatibility inspection. @@ -123,6 +124,7 @@ unsafe impl IStable for NotPod { type ForbiddenValues = T::ForbiddenValues; type HasExactlyOneNiche = T::HasExactlyOneNiche; type UnusedBits = T::UnusedBits; + #[cfg(feature = "ctypes")] type CType = T::CType; primitive_report!("NotPod", T); } @@ -188,6 +190,7 @@ unsafe impl< type UnusedBits = UnusedBits; type HasExactlyOneNiche = HasExactlyOneNiche; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = (); primitive_report!("NicheExporter"); } @@ -386,6 +389,7 @@ unsafe impl IStable for FieldPair { as IStable>::HasExactlyOneNiche, >; type ContainsIndirections = ::Or; + #[cfg(feature = "ctypes")] type CType = (); primitive_report!("FP"); } @@ -517,6 +521,7 @@ unsafe impl IStable for Union { type Align = ::Max; type HasExactlyOneNiche = B0; type ContainsIndirections = ::Or; + #[cfg(feature = "ctypes")] type CType = <::Divide as IUnsignedBase>::Array< ::AsUint, >; @@ -537,6 +542,7 @@ unsafe impl IStable for AlignedAfter { >; type HasExactlyOneNiche = T::HasExactlyOneNiche; type ContainsIndirections = T::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = (); primitive_report!("FP"); } @@ -549,6 +555,7 @@ unsafe impl IStable for Struct { <::NextMultipleOf - T::Size) as IUnsignedBase>::PaddingBitMask as IBitMask>::Shift>; type HasExactlyOneNiche = Saturator; type ContainsIndirections = T::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = (); primitive_report!("FP"); } diff --git a/stabby-abi/src/lib.rs b/stabby-abi/src/lib.rs index 43255dc..379bdfb 100644 --- a/stabby-abi/src/lib.rs +++ b/stabby-abi/src/lib.rs @@ -26,6 +26,9 @@ #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(stabby_nightly, feature(freeze))] +#[cfg(feature = "alloc-rs")] +extern crate alloc as alloc_rs; + /// ABI-stable smart pointers and allocated data structures, with support for custom allocators. pub mod alloc; /// Extending [Non-Zero Types](core::num) to enable niches for other values than 0. @@ -261,6 +264,7 @@ unsafe impl IStable for StableLike { type UnusedBits = As::UnusedBits; type HasExactlyOneNiche = As::HasExactlyOneNiche; type ContainsIndirections = As::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = As::CType; const ID: u64 = crate::report::gen_id(Self::REPORT); const REPORT: &'static report::TypeReport = As::REPORT; @@ -294,6 +298,7 @@ unsafe impl< type UnusedBits = End; type HasExactlyOneNiche = HasExactlyOneNiche; type ContainsIndirections = ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = (); primitive_report!("NoNiches"); } @@ -346,6 +351,7 @@ unsafe impl IStable for StableIf { type UnusedBits = T::UnusedBits; type HasExactlyOneNiche = T::HasExactlyOneNiche; type ContainsIndirections = T::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = T::CType; const REPORT: &'static report::TypeReport = T::REPORT; const ID: u64 = crate::report::gen_id(Self::REPORT); @@ -401,19 +407,6 @@ pub use istable::{Array, End, IStable}; /// The heart of `stabby`: the [`IStable`] trait. pub mod istable; -mod boundtests { - #[crate::stabby] - pub trait Test { - extern "C" fn test(&self); - extern "C" fn test2(&self); - } - #[crate::stabby] - pub struct Test2 { - a: usize, - b: usize, - } -} - /// Expands to [`unreachable!()`](core::unreachable) in debug builds or if `--cfg stabby_check_unreachable=true` has been set in the `RUST_FLAGS`, and to [`core::hint::unreachable_unchecked`] otherwise. /// /// This lets the compiler take advantage of the fact that the code is unreachable in release builds, and optimize accordingly, while giving you the opportunity to double check this at runtime in case of doubts. diff --git a/stabby-abi/src/num.rs b/stabby-abi/src/num.rs index d8d9ed3..84666a6 100644 --- a/stabby-abi/src/num.rs +++ b/stabby-abi/src/num.rs @@ -78,6 +78,7 @@ macro_rules! define_non_max { type UnusedBits = <$NonZeroU8 as crate::IStable>::UnusedBits; type HasExactlyOneNiche = <$NonZeroU8 as crate::IStable>::HasExactlyOneNiche; type ContainsIndirections = <$NonZeroU8 as crate::IStable>::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = <$NonZeroU8 as crate::IStable>::CType; const ID: u64 = $crate::report::gen_id(Self::REPORT); const REPORT: &'static $crate::report::TypeReport = &$crate::report::TypeReport { @@ -158,6 +159,7 @@ macro_rules! define_non_x { type UnusedBits = <$NonZeroU8 as crate::IStable>::UnusedBits; type HasExactlyOneNiche = <$NonZeroU8 as crate::IStable>::HasExactlyOneNiche; type ContainsIndirections = <$NonZeroU8 as crate::IStable>::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = <$NonZeroU8 as crate::IStable>::CType; const ID: u64 = $crate::report::gen_id(Self::REPORT); const REPORT: &'static $crate::report::TypeReport = &$crate::report::TypeReport { diff --git a/stabby-abi/src/padding.rs b/stabby-abi/src/padding.rs index fc5df07..b42cda9 100644 --- a/stabby-abi/src/padding.rs +++ b/stabby-abi/src/padding.rs @@ -35,6 +35,7 @@ unsafe impl IStable for Padded { >; type HasExactlyOneNiche = Saturator; type ContainsIndirections = T::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = Tuple<::CType, T::CType>; const REPORT: &'static report::TypeReport = T::REPORT; const ID: u64 = crate::report::gen_id(Self::REPORT); diff --git a/stabby-abi/src/result.rs b/stabby-abi/src/result.rs index f11e7a5..0815cdc 100644 --- a/stabby-abi/src/result.rs +++ b/stabby-abi/src/result.rs @@ -53,6 +53,7 @@ where <>::NicheExporter as IStable>::ForbiddenValues; type UnusedBits = <, ::AsUint> as IStable>::UnusedBits as IBitMask>::BitOr<<<>::NicheExporter as IStable>::UnusedBits as IBitMask>::Shift<< as IStable>::Size as Unsigned>::NextMultipleOf>>; type HasExactlyOneNiche = B0; + #[cfg(feature = "ctypes")] type CType = ::Size, ::Align> as IStable>::CType; const REPORT: &'static crate::report::TypeReport = &crate::report::TypeReport { name: Str::new("Result"), diff --git a/stabby-abi/src/stable_impls/mod.rs b/stabby-abi/src/stable_impls/mod.rs index 5dc7489..39721a7 100644 --- a/stabby-abi/src/stable_impls/mod.rs +++ b/stabby-abi/src/stable_impls/mod.rs @@ -21,6 +21,7 @@ macro_rules! same_as { type UnusedBits = <$t as IStable>::UnusedBits; type ForbiddenValues = <$t as IStable>::ForbiddenValues; type HasExactlyOneNiche = <$t as IStable>::HasExactlyOneNiche; + #[cfg(feature = "ctypes")] type CType = <$t as IStable>::CType; primitive_report!($($name)*); }; @@ -30,6 +31,7 @@ macro_rules! same_as { type UnusedBits = <$t as IStable>::UnusedBits; type ForbiddenValues = <$t as IStable>::ForbiddenValues; type HasExactlyOneNiche = <$t as IStable>::HasExactlyOneNiche; + #[cfg(feature = "ctypes")] type CType = <$t as IStable>::CType; }; } @@ -232,6 +234,7 @@ unsafe impl IStable for () { type UnusedBits = End; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = (); primitive_report!("()"); } @@ -242,6 +245,7 @@ unsafe impl IStable for core::marker::PhantomData { type UnusedBits = End; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = (); primitive_report!("core::marker::PhantomData"); } @@ -252,6 +256,7 @@ unsafe impl IStable for core::marker::PhantomPinned { type UnusedBits = End; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = (); primitive_report!("core::marker::PhantomPinned"); } @@ -262,6 +267,7 @@ unsafe impl IStable for bool { type UnusedBits = End; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; primitive_report!("bool"); } @@ -273,6 +279,7 @@ unsafe impl IStable for u8 { type Size = U1; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; primitive_report!("u8"); } @@ -284,6 +291,7 @@ unsafe impl IStable for core::num::NonZeroU8 { type ForbiddenValues = nz_holes!(U0); type HasExactlyOneNiche = B1; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; primitive_report!("core::num::NonZeroU8"); } @@ -294,6 +302,7 @@ unsafe impl IStable for u16 { type Size = U2; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; primitive_report!("u16"); } @@ -305,6 +314,7 @@ unsafe impl IStable for core::num::NonZeroU16 { type Size = U2; type HasExactlyOneNiche = B1; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; primitive_report!("core::num::NonZeroU16"); } @@ -315,6 +325,7 @@ unsafe impl IStable for u32 { type Size = U4; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; primitive_report!("u32"); } @@ -326,6 +337,7 @@ unsafe impl IStable for core::num::NonZeroU32 { type Size = U4; type HasExactlyOneNiche = B1; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; primitive_report!("core::num::NonZeroU32"); } @@ -336,6 +348,7 @@ unsafe impl IStable for u64 { type Size = U8; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; primitive_report!("u64"); } @@ -347,6 +360,7 @@ unsafe impl IStable for core::num::NonZeroU64 { type Size = U8; type HasExactlyOneNiche = B1; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; primitive_report!("core::num::NonZeroU64"); } @@ -371,6 +385,7 @@ unsafe impl IStable for u128 { type Align = U8; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = ::AsUint; #[rustversion::before(1.77)] #[cfg(not(target_arch = "aarch64"))] @@ -395,6 +410,7 @@ unsafe impl IStable for core::num::NonZeroU128 { type Size = U16; type HasExactlyOneNiche = B1; type Align = ::Align; + #[cfg(feature = "ctypes")] type CType = ::AsUint; type ContainsIndirections = B0; primitive_report!("core::num::NonZeroU128"); @@ -582,6 +598,7 @@ unsafe impl IStable for HasExactlyOneNiche, type UnusedBits = End; type HasExactlyOneNiche = B0; type ContainsIndirections = T::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = T::CType; const REPORT: &'static report::TypeReport = &report::TypeReport { name: Str::new("Option"), @@ -622,6 +639,7 @@ unsafe impl IStable type ForbiddenValues = End; type UnusedBits = End; type HasExactlyOneNiche = B0; + #[cfg(feature = "ctypes")] type CType = Ok::CType; type ContainsIndirections = ::Or; const REPORT: &'static report::TypeReport = &report::TypeReport { @@ -656,6 +674,7 @@ unsafe impl IStable type ForbiddenValues = End; type UnusedBits = End; type HasExactlyOneNiche = B0; + #[cfg(feature = "ctypes")] type CType = Err::CType; type ContainsIndirections = ::Or; const REPORT: &'static report::TypeReport = &report::TypeReport { @@ -680,6 +699,7 @@ unsafe impl IStable for NameAggregator { type UnusedBits = End; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = (); const REPORT: &'static report::TypeReport = &report::TypeReport { name: Str::new("signature"), @@ -799,6 +819,7 @@ macro_rules! sliceimpl { <<$size as Unsigned>::Equal as Bit>::SaddTernary, >; type ContainsIndirections = T::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = [T::CType; <$size as Unsigned>::USIZE]; primitive_report!(ARRAY_NAME[<$size as Unsigned>::USIZE], T); } @@ -829,6 +850,7 @@ unsafe impl IStable for core::cmp::Ordering { type UnusedBits = End; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = u8; primitive_report!("core::cmp::Ordering"); } diff --git a/stabby-abi/src/typenum2/unsigned.rs b/stabby-abi/src/typenum2/unsigned.rs index 32bcc05..301c6d5 100644 --- a/stabby-abi/src/typenum2/unsigned.rs +++ b/stabby-abi/src/typenum2/unsigned.rs @@ -44,6 +44,7 @@ unsafe impl IStable for PadByte { type UnusedBits = Array; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = u8; primitive_report!("PadByte"); } @@ -433,6 +434,7 @@ unsafe impl IStable for OneMoreByte { type UnusedBits = ::BitOr>; type HasExactlyOneNiche = L::HasExactlyOneNiche; type ContainsIndirections = L::ContainsIndirections; + #[cfg(feature = "ctypes")] type CType = Tuple; primitive_report!("OneMoreByte"); } diff --git a/stabby-macros/Cargo.toml b/stabby-macros/Cargo.toml index 150c0e1..4116946 100644 --- a/stabby-macros/Cargo.toml +++ b/stabby-macros/Cargo.toml @@ -23,6 +23,9 @@ repository = { workspace = true } readme = { workspace = true } description = "the macros that make working with stabby possible, you shouldn't add this crate to your dependencies, only `stabby`." +[features] +ctypes = [] + [dependencies] proc-macro2 = { workspace = true } proc-macro-crate = { workspace = true } diff --git a/stabby-macros/src/enums.rs b/stabby-macros/src/enums.rs index afb8afb..09f35df 100644 --- a/stabby-macros/src/enums.rs +++ b/stabby-macros/src/enums.rs @@ -271,7 +271,10 @@ pub fn stabby( layout = quote!(#st::Tuple<#reprid, #layout>); report.tyty = crate::Tyty::Enum(trepr); let report_bounds = report.bounds(); - let ctype = report.crepr(); + let ctype = cfg!(feature = "ctypes").then(|| { + let ctype = report.crepr(); + quote! {type CType = #ctype;} + }); let size_bug = format!( "{ident}'s size was mis-evaluated by stabby, this is definitely a bug and may cause UB, please file an issue" ); @@ -281,6 +284,11 @@ pub fn stabby( let reprc_bug = format!( "{ident}'s CType was mis-evaluated by stabby, this is definitely a bug and may cause UB, please file an issue" ); + let ctype_assert = cfg!(feature = "ctypes").then(|| { + quote! {if core::mem::size_of::() != core::mem::size_of::<::CType>() || core::mem::align_of::() != core::mem::align_of::<::CType>() { + panic!(#reprc_bug) + }} + }); let assertion = generics .params .is_empty() @@ -302,12 +310,10 @@ pub fn stabby( type Align = <#layout as #st::IStable>::Align; type HasExactlyOneNiche = #st::B0; type ContainsIndirections = <#layout as #st::IStable>::ContainsIndirections; - type CType = #ctype; + #ctype const REPORT: &'static #st::report::TypeReport = & #report; const ID: u64 ={ - if core::mem::size_of::() != core::mem::size_of::<::CType>() || core::mem::align_of::() != core::mem::align_of::<::CType>() { - panic!(#reprc_bug) - } + #ctype_assert if core::mem::size_of::() != <::Size as #st::Unsigned>::USIZE { panic!(#size_bug) } @@ -551,6 +557,9 @@ pub(crate) fn repr_stabby( } } }); + let ctype = cfg!(feature = "ctypes").then(|| { + quote! {type CType = <#layout as #st::IStable>::CType;} + }); let assertions= generics.params.is_empty().then(||{ let check = check.is_some().then(||{ let sub_optimal_message = format!( @@ -591,7 +600,7 @@ pub(crate) fn repr_stabby( type Align = <#layout as #st::IStable>::Align; type HasExactlyOneNiche = #st::B0; type ContainsIndirections = <#layout as #st::IStable>::ContainsIndirections; - type CType = <#layout as #st::IStable>::CType; + #ctype const REPORT: &'static #st::report::TypeReport = & #report; const ID: u64 = #st::report::gen_id(Self::REPORT); } diff --git a/stabby-macros/src/gen_closures.rs b/stabby-macros/src/gen_closures.rs index a2200e3..6557141 100644 --- a/stabby-macros/src/gen_closures.rs +++ b/stabby-macros/src/gen_closures.rs @@ -38,11 +38,11 @@ pub fn gen_closures() -> proc_macro2::TokenStream { /// [`core::ops::FnOnce`], but ABI-stable pub trait #co { /// Call the function - extern "C" fn call_once(self: #st::boxed::Box #(, #args: #argtys)*) -> O; + extern "C" fn call_once(self: #st::alloc::boxed::Box #(, #args: #argtys)*) -> O; } impl O> #co for F { /// Call the function - extern "C" fn call_once(self: #st::boxed::Box #(, #args: #argtys)*) -> O { + extern "C" fn call_once(self: #st::alloc::boxed::Box #(, #args: #argtys)*) -> O { self(#(#args,)*) } } @@ -50,7 +50,7 @@ pub fn gen_closures() -> proc_macro2::TokenStream { /// The v-table for [`core::ops::FnOnce`] #[crate::stabby] pub struct #covt { - call_once: StableIf #(, #argtys)* ) -> O, &'static ()>, O>, + call_once: StableIf #(, #argtys)* ) -> O, &'static ()>, O>, } impl Copy for #covt {} impl Clone for #covt { @@ -82,7 +82,7 @@ pub fn gen_closures() -> proc_macro2::TokenStream { { const VTABLE: &'a Self = &Self { call_once: unsafe { - core::mem::transmute(>::call_once as extern "C" fn(#st::boxed::Box #(, #argtys)* ) -> O) + core::mem::transmute(>::call_once as extern "C" fn(#st::alloc::boxed::Box #(, #argtys)* ) -> O) }, }; } diff --git a/stabby-macros/src/structs.rs b/stabby-macros/src/structs.rs index 54c5aee..58cca0a 100644 --- a/stabby-macros/src/structs.rs +++ b/stabby-macros/src/structs.rs @@ -144,7 +144,15 @@ pub fn stabby( } }); let report_bounds = report.bounds(); - let ctype = report.crepr(); + let ctype = cfg!(feature = "ctypes").then(|| { + let ctype = report.crepr(); + quote! {type CType = #ctype;} + }); + let ctype_assert = cfg!(feature = "ctypes").then(|| { + quote! {if core::mem::size_of::() != core::mem::size_of::<::CType>() || core::mem::align_of::() != core::mem::align_of::<::CType>() { + panic!(#reprc_bug) + }} + }); let optdoc = format!("Returns true if the layout for [`{ident}`] is smaller or equal to that Rust would have generated for it."); quote! { #struct_code @@ -157,12 +165,10 @@ pub fn stabby( type Align = <#layout as #st::IStable>::Align; type HasExactlyOneNiche = <#layout as #st::IStable>::HasExactlyOneNiche; type ContainsIndirections = <#layout as #st::IStable>::ContainsIndirections; - type CType = #ctype; + #ctype const REPORT: &'static #st::report::TypeReport = &#report; const ID: u64 = { - if core::mem::size_of::() != core::mem::size_of::<::CType>() || core::mem::align_of::() != core::mem::align_of::<::CType>() { - panic!(#reprc_bug) - } + #ctype_assert if core::mem::size_of::() != <::Size as #st::Unsigned>::USIZE { panic!(#size_bug) } diff --git a/stabby-macros/src/unions.rs b/stabby-macros/src/unions.rs index 9588d6a..24d6eae 100644 --- a/stabby-macros/src/unions.rs +++ b/stabby-macros/src/unions.rs @@ -73,6 +73,9 @@ pub fn stabby( report.add_field(field.ident.as_ref().unwrap().to_string(), ty); } let report_bounds = report.bounds(); + let ctype = cfg!(feature = "ctypes").then(|| { + quote! {type CType = <#layout as #st::IStable>::CType;} + }); quote! { #(#attrs)* #[repr(C)] @@ -87,7 +90,7 @@ pub fn stabby( type Align = <#layout as #st::IStable>::Align; type HasExactlyOneNiche = #st::B0; type ContainsIndirections = <#layout as #st::IStable>::ContainsIndirections; - type CType = <#layout as #st::IStable>::CType; + #ctype const REPORT: &'static #st::report::TypeReport = & #report; const ID: u64 = #st::report::gen_id(Self::REPORT); } diff --git a/stabby/Cargo.toml b/stabby/Cargo.toml index 1402ec2..ab4654c 100644 --- a/stabby/Cargo.toml +++ b/stabby/Cargo.toml @@ -25,7 +25,9 @@ description = "A Stable ABI for Rust with compact sum-types." [features] default = ["std", "libc", "serde"] -std = ["libc", "stabby-abi/std"] +std = ["libc", "stabby-abi/std", "alloc-rs"] +alloc-rs = ["stabby-abi/alloc-rs"] +ctypes = ["stabby-abi/ctypes"] libloading = ["dep:libloading", "std"] libc = ["stabby-abi/libc"] serde = ["stabby-abi/serde"] diff --git a/stabby/src/tests/layouts.rs b/stabby/src/tests/layouts.rs index ca0aa21..858a879 100644 --- a/stabby/src/tests/layouts.rs +++ b/stabby/src/tests/layouts.rs @@ -250,6 +250,7 @@ unsafe impl stabby::abi::IStable for Align128 { type UnusedBits = End; type HasExactlyOneNiche = B0; type ContainsIndirections = B0; + #[cfg(feature = "ctypes")] type CType = Align128; const REPORT: &'static stabby::abi::report::TypeReport = &stabby::abi::report::TypeReport { name: stabby::abi::str::Str::new("Align128"),