diff --git a/CHANGELOG.md b/CHANGELOG.md index 539b277..08055c9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,10 @@ +# 6.4.1-rc1 (api=1.2.0, abi=1.0.0) +- Found out that `libc` doesn't expose any allocation primitives for `wasm` targets. + - These targets now have a poor man's allocator. Be warned that while ABI-stable, this allocator is trivial and probably bad. + If `wasm` is indeed a target which you care a lot about, and you use stabby's allocation primitives a lot, you should probably roll out a better one (maybe even contribute it back). + - This allocator is not included in `stabby`'s cross-version contract: it may be swapped out for a better one in a patch-level API-bump. + If you need me _not_ to do so (i.e. you pass stabby's allocation primitives in `wasm` between multiple packages and can't pin your version of `stabby` down to patch for X reason), please reach out to me. + # 6.2.1 (api=1.1.0, abi=1.0.0) - Add support for `#[stabby::stabby(version=10, module="my::module")]` to let you change the values in those fields without having to implement the whole trait yourself. - Add support for `serde` through the `serde` feature flag. diff --git a/Cargo.toml b/Cargo.toml index 80aeef7..c98b86d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,12 +33,12 @@ license = " EPL-2.0 OR Apache-2.0" categories = ["development-tools::ffi", "no-std::no-alloc"] repository = "https://github.com/ZettaScaleLabs/stabby" readme = "stabby/README.md" -version = "6.2.1" # Track +version = "6.4.1-rc1" # Track [workspace.dependencies] -stabby-macros = { path = "./stabby-macros/", version = "6.2.1", default-features = false } # Track -stabby-abi = { path = "./stabby-abi/", version = "6.2.1", default-features = false } # Track -stabby = { path = "./stabby/", version = "6.2.1", default-features = false } # Track +stabby-macros = { path = "./stabby-macros/", version = "6.4.1-rc1", default-features = false } # Track +stabby-abi = { path = "./stabby-abi/", version = "6.4.1-rc1", default-features = false } # Track +stabby = { path = "./stabby/", version = "6.4.1-rc1", default-features = false } # Track abi_stable = "0.11.2" criterion = "0.5.1" diff --git a/release.py b/release.py index 582963c..0f8ba78 100644 --- a/release.py +++ b/release.py @@ -10,7 +10,8 @@ def factor(x, base): return n def factor_version(version, base): - return ".".join([str(factor(int(x), base)) for x in version.split(".")]) + v = re.sub(r'([0-9\.]+).*', "\\g<1>", version) + return ".".join([str(factor(int(x), base)) for x in v.split(".")]) if __name__ == "__main__": if len(sys.argv) > 1 and sys.argv[1] == "publish": @@ -27,13 +28,13 @@ def factor_version(version, base): with open(changelog) as clog: changelog_text = clog.read() for line in changelog_text.splitlines(): - versions = re.findall(r"^#\s+([\d\.]+)", line) + versions = re.findall(r"^#\s+([\.\w\-]+)", line) version = versions[0] if len(versions) else None if version is not None: break header = f"# {version} (api={factor_version(version, 2)}, abi={factor_version(version, 3)})" print(header) - changelog_text = re.sub(r"^#\s+([\d\.]+)\s*(\(api[^\)]+\))?", header, changelog_text) + changelog_text = re.sub(r"^#\s+([\.\w\-]+)(\s*\(api[^\)]+\))?", header, changelog_text) with open(changelog, "w") as clog: clog.write(changelog_text) diff --git a/stabby-abi/src/alloc/libc_alloc.rs b/stabby-abi/src/alloc/libc_alloc.rs index a35ef85..e794ab1 100644 --- a/stabby-abi/src/alloc/libc_alloc.rs +++ b/stabby-abi/src/alloc/libc_alloc.rs @@ -14,7 +14,7 @@ use super::Layout; -#[cfg(not(windows))] +#[cfg(not(any(windows, target_arch = "wasm32")))] use libc::posix_memalign; #[cfg(windows)] unsafe fn posix_memalign(this: &mut *mut core::ffi::c_void, size: usize, align: usize) -> i32 { @@ -27,8 +27,12 @@ unsafe fn posix_memalign(this: &mut *mut core::ffi::c_void, size: usize, align: } #[cfg(windows)] use libc::aligned_free; -#[cfg(not(windows))] +#[cfg(not(any(windows, target_arch = "wasm32")))] use libc::free as aligned_free; +#[cfg(not(target_arch = "wasm32"))] +use libc::realloc; +#[cfg(target_arch = "wasm32")] +use wasm32_alloc::{free as aligned_free, posix_memalign, realloc}; /// An allocator based on `libc::posix_memalign` or `libc::aligned_malloc` depending on the platform. /// @@ -64,8 +68,8 @@ impl super::IAlloc for LibcAlloc { if new_layout.size == 0 { return core::ptr::null_mut(); } - let mut new_ptr = unsafe { libc::realloc(ptr.cast(), new_layout.size) }; - if new_ptr as usize % new_layout.align != 0 { + let mut new_ptr = unsafe { realloc(ptr.cast(), new_layout.size) }; + if new_ptr.is_null() || new_ptr as usize % new_layout.align != 0 { let mut ptr = core::ptr::null_mut(); let err = unsafe { posix_memalign(&mut ptr, new_layout.align, new_layout.size) }; if err == 0 { @@ -83,3 +87,221 @@ impl super::IAlloc for LibcAlloc { new_ptr.cast() } } + +#[cfg(target_arch = "wasm32")] +mod wasm32_alloc { + use core::{ + ffi::c_void, + mem::MaybeUninit, + sync::atomic::{AtomicPtr, Ordering}, + }; + + #[repr(C)] + struct Slot { + size: usize, + lower: Option<&'static mut Slot>, + padding: usize, + _reserved: usize, + } + impl core::cmp::Ord for Slot { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + (self as *const Self).cmp(&(other as *const Self)) + } + } + impl core::cmp::PartialOrd for Slot { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } + } + impl core::cmp::Eq for Slot {} + impl core::cmp::PartialEq for Slot { + fn eq(&self, other: &Self) -> bool { + core::ptr::eq(self, other) + } + } + impl Slot { + const fn full_size(&self) -> usize { + core::mem::size_of::() + self.size + } + const fn start(&self) -> *const u8 { + unsafe { (self as *const Self).cast::().sub(self.padding) } + } + const fn end(&self) -> *const u8 { + unsafe { (self as *const Self).cast::().add(self.full_size()) } + } + fn shift(&'static mut self, target_align: usize) -> &'static mut Self { + let required_padding = target_align - core::mem::size_of::(); + let padding = self.padding; + if padding == required_padding { + return self; + } + self.size += padding; + self.padding = 0; + let new_addr = unsafe { + (self as *mut Self) + .cast::() + .offset(padding as isize - required_padding as isize) + }; + unsafe { + core::ptr::copy( + (self as *const Self).cast(), + new_addr, + core::mem::size_of::(), + ); + &mut *new_addr.cast() + } + } + fn split(self: &mut &'static mut Self, at: usize) -> Option<&'static mut Self> { + let size = self.size; + (size > at + core::mem::size_of::()).then(move || { + self.size = at; + let slot = unsafe { &mut *(self.end() as *mut MaybeUninit) }; + slot.write(Slot { + size: size - at + core::mem::size_of::(), + lower: None, + padding: 0, + _reserved: 0, + }) + }) + } + } + + const PAGESIZE: usize = 65536; + #[repr(C)] + struct Allocator { + free_list: AtomicPtr, + } + struct Slots { + list: Option<&'static mut Slot>, + } + impl Drop for Slots { + fn drop(&mut self) { + ALLOC.free_list.store( + unsafe { + core::mem::transmute::, *mut Slot>(self.list.take()) + }, + Ordering::Release, + ); + } + } + impl Slots { + fn insert(&mut self, mut slot: &'static mut Slot) { + slot = slot.shift(core::mem::size_of::()); + let mut head = &mut self.list; + while let Some(h) = head { + if *h < slot { + if core::ptr::eq(h.end(), slot.start()) { + h.size += slot.full_size(); + return; + } + break; + } + head = unsafe { + core::mem::transmute::< + &mut Option<&'static mut Slot>, + &mut Option<&'static mut Slot>, + >(&mut h.lower) + }; + } + slot.lower = head.take(); + *head = Some(slot) + } + fn take(&mut self, size: usize, align: usize) -> Option<&'static mut Slot> { + let req = size + align; + let slot_owner = self.select_slot(req)?; + let mut slot = slot_owner.take()?; + let lower = slot.lower.take(); + *slot_owner = slot.split(size); + match slot_owner { + Some(owner) => owner.lower = lower, + None => *slot_owner = lower, + } + Some(slot) + } + fn select_slot(&mut self, size: usize) -> Option<&mut Option<&'static mut Slot>> { + let mut head = unsafe { + core::mem::transmute::<&mut Option<&'static mut Slot>, &mut Option<&'static mut Slot>>( + &mut self.list, + ) + }; + while let Some(h) = head { + if h.size < size { + head = unsafe { + core::mem::transmute::< + &mut Option<&'static mut Slot>, + &mut Option<&'static mut Slot>, + >(&mut h.lower) + }; + } else { + return Some(head); + } + } + self.grow_take(size) + } + fn grow_take(&mut self, size: usize) -> Option<&mut Option<&'static mut Slot>> { + let added_pages = (size / PAGESIZE) + 2; + let start = core::arch::wasm32::memory_grow(0, added_pages); + if start == usize::MAX { + return None; + } + let slot = unsafe { &mut *((start * PAGESIZE) as *mut MaybeUninit) }; + let slot = slot.write(Slot { + size: added_pages * PAGESIZE - core::mem::size_of::(), + lower: None, + padding: 0, + _reserved: 0, + }); + self.insert(slot); + Some(&mut self.list) + } + } + impl Allocator { + const fn new() -> Self { + Self { + free_list: AtomicPtr::new(core::ptr::null_mut()), + } + } + fn lock(&self) -> Slots { + loop { + let list = self + .free_list + .swap(usize::MAX as *mut Slot, Ordering::AcqRel); + if list as usize != usize::MAX { + return Slots { + list: unsafe { list.as_mut() }, + }; + } + core::hint::spin_loop(); + } + } + } + static ALLOC: Allocator = Allocator::new(); + pub unsafe fn posix_memalign( + this: &mut *mut core::ffi::c_void, + mut size: usize, + mut align: usize, + ) -> i32 { + size = size.max(64); + align = align.max(8); + match ALLOC.lock().take(size, align) { + Some(slot) => { + *this = (slot as *mut Slot).add(1).cast(); + 0 + } + None => -1, + } + } + pub unsafe fn realloc(p: *mut c_void, new_size: usize) -> *mut c_void { + let mut this = core::ptr::null_mut(); + if posix_memalign(&mut this, new_size, 8) != 0 { + return core::ptr::null_mut(); + } + let slot = p.cast::().sub(1); + unsafe { core::ptr::copy_nonoverlapping(p.cast::(), this.cast(), (*slot).size) }; + this + } + pub unsafe fn free(p: *mut c_void) { + let slot = p.cast::().sub(1); + ALLOC.lock().insert(&mut *slot); + } +} diff --git a/stabby-abi/src/stable_impls/mod.rs b/stabby-abi/src/stable_impls/mod.rs index a3e7ee1..5dc7489 100644 --- a/stabby-abi/src/stable_impls/mod.rs +++ b/stabby-abi/src/stable_impls/mod.rs @@ -36,32 +36,90 @@ macro_rules! same_as { #[allow(dead_code)] const ARCH: &[u8] = _ARCH; -#[cfg(target_arch = "x86")] -const _ARCH: &[u8] = b"x86"; -#[cfg(target_arch = "x86_64")] -const _ARCH: &[u8] = b"x86_64"; -#[cfg(target_arch = "arm")] -const _ARCH: &[u8] = b"arm"; #[cfg(target_arch = "aarch64")] const _ARCH: &[u8] = b"aarch64"; +#[cfg(target_arch = "arm")] +const _ARCH: &[u8] = b"arm"; +#[cfg(target_arch = "arm64ec")] +const _ARCH: &[u8] = b"arm64ec"; +#[cfg(target_arch = "avr")] +const _ARCH: &[u8] = b"avr"; +#[cfg(target_arch = "bpf")] +const _ARCH: &[u8] = b"bpf"; +#[cfg(target_arch = "csky")] +const _ARCH: &[u8] = b"csky"; +#[cfg(target_arch = "hexagon")] +const _ARCH: &[u8] = b"hexagon"; #[cfg(target_arch = "loongarch64")] const _ARCH: &[u8] = b"loongarch64"; #[cfg(target_arch = "m68k")] const _ARCH: &[u8] = b"m68k"; #[cfg(target_arch = "mips")] const _ARCH: &[u8] = b"mips"; +#[cfg(target_arch = "mips32r6")] +const _ARCH: &[u8] = b"mips32r6"; #[cfg(target_arch = "mips64")] const _ARCH: &[u8] = b"mips64"; +#[cfg(target_arch = "mips64r6")] +const _ARCH: &[u8] = b"mips64r6"; +#[cfg(target_arch = "msp430")] +const _ARCH: &[u8] = b"msp430"; +#[cfg(target_arch = "nvptx64")] +const _ARCH: &[u8] = b"nvptx64"; #[cfg(target_arch = "powerpc")] const _ARCH: &[u8] = b"powerpc"; #[cfg(target_arch = "powerpc64")] const _ARCH: &[u8] = b"powerpc64"; +#[cfg(target_arch = "riscv32")] +const _ARCH: &[u8] = b"riscv32"; #[cfg(target_arch = "riscv64")] const _ARCH: &[u8] = b"riscv64"; #[cfg(target_arch = "s390x")] const _ARCH: &[u8] = b"s390x"; +#[cfg(target_arch = "sparc")] +const _ARCH: &[u8] = b"sparc"; #[cfg(target_arch = "sparc64")] const _ARCH: &[u8] = b"sparc64"; +#[cfg(target_arch = "wasm32")] +const _ARCH: &[u8] = b"wasm32"; +#[cfg(target_arch = "wasm64")] +const _ARCH: &[u8] = b"wasm64"; +#[cfg(target_arch = "x86")] +const _ARCH: &[u8] = b"x86"; +#[cfg(target_arch = "x86_64")] +const _ARCH: &[u8] = b"x86_64"; +#[cfg(target_arch = "xtensa")] +const _ARCH: &[u8] = b"xtensa"; +#[cfg(not(any( + target_arch = "aarch64", + target_arch = "arm", + target_arch = "arm64ec", + target_arch = "avr", + target_arch = "bpf", + target_arch = "csky", + target_arch = "hexagon", + target_arch = "loongarch64", + target_arch = "m68k", + target_arch = "mips", + target_arch = "mips32r6", + target_arch = "mips64", + target_arch = "mips64r6", + target_arch = "msp430", + target_arch = "nvptx64", + target_arch = "powerpc", + target_arch = "powerpc64", + target_arch = "riscv32", + target_arch = "riscv64", + target_arch = "s390x", + target_arch = "sparc", + target_arch = "sparc64", + target_arch = "wasm32", + target_arch = "wasm64", + target_arch = "x86", + target_arch = "x86_64", + target_arch = "xtensa" +)))] +const _ARCH: &[u8] = b"unknown_arch"; macro_rules! check { ($t: ty) => { @@ -298,24 +356,34 @@ unsafe impl IStable for u128 { type ForbiddenValues = End; type Size = U16; type HasExactlyOneNiche = B0; + #[rustversion::before(1.77)] #[cfg(not(target_arch = "aarch64"))] type Align = U8; - #[rustversion::since(1.77)] - type Align = U16; #[rustversion::before(1.77)] #[cfg(target_arch = "aarch64")] type Align = U16; + #[rustversion::since(1.77)] + #[cfg(not(target_arch = "wasm32"))] + type Align = U16; + #[rustversion::since(1.77)] + #[cfg(target_arch = "wasm32")] + type Align = U8; + type ContainsIndirections = B0; type CType = ::AsUint; #[rustversion::before(1.77)] #[cfg(not(target_arch = "aarch64"))] primitive_report!("u128(8)"); - #[rustversion::since(1.77)] - primitive_report!("u128(16)"); #[rustversion::before(1.77)] #[cfg(target_arch = "aarch64")] primitive_report!("u128(16)"); + #[rustversion::since(1.77)] + #[cfg(not(target_arch = "wasm32"))] + primitive_report!("u128(16)"); + #[rustversion::since(1.77)] + #[cfg(target_arch = "wasm32")] + primitive_report!("u128(8)"); } check!(u128);