Skip to content

Commit

Permalink
Merge pull request #80 from ZettaScaleLabs/wasm-alloc
Browse files Browse the repository at this point in the history
v6.4.1-rc1 (api=1.2.0, abi=1.0.0)
  • Loading branch information
p-avital authored Jun 27, 2024
2 parents ae5c57e + 3ead8f3 commit e08c8f3
Show file tree
Hide file tree
Showing 5 changed files with 319 additions and 21 deletions.
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
# 6.4.1-rc1 (api=1.2.0, abi=1.0.0)
- Found out that `libc` doesn't expose any allocation primitives for `wasm` targets.
- These targets now have a poor man's allocator. Be warned that while ABI-stable, this allocator is trivial and probably bad.
If `wasm` is indeed a target which you care a lot about, and you use stabby's allocation primitives a lot, you should probably roll out a better one (maybe even contribute it back).
- This allocator is not included in `stabby`'s cross-version contract: it may be swapped out for a better one in a patch-level API-bump.
If you need me _not_ to do so (i.e. you pass stabby's allocation primitives in `wasm` between multiple packages and can't pin your version of `stabby` down to patch for X reason), please reach out to me.

# 6.2.1 (api=1.1.0, abi=1.0.0)
- Add support for `#[stabby::stabby(version=10, module="my::module")]` to let you change the values in those fields without having to implement the whole trait yourself.
- Add support for `serde` through the `serde` feature flag.
Expand Down
8 changes: 4 additions & 4 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,12 @@ license = " EPL-2.0 OR Apache-2.0"
categories = ["development-tools::ffi", "no-std::no-alloc"]
repository = "https://github.com/ZettaScaleLabs/stabby"
readme = "stabby/README.md"
version = "6.2.1" # Track
version = "6.4.1-rc1" # Track

[workspace.dependencies]
stabby-macros = { path = "./stabby-macros/", version = "6.2.1", default-features = false } # Track
stabby-abi = { path = "./stabby-abi/", version = "6.2.1", default-features = false } # Track
stabby = { path = "./stabby/", version = "6.2.1", default-features = false } # Track
stabby-macros = { path = "./stabby-macros/", version = "6.4.1-rc1", default-features = false } # Track
stabby-abi = { path = "./stabby-abi/", version = "6.4.1-rc1", default-features = false } # Track
stabby = { path = "./stabby/", version = "6.4.1-rc1", default-features = false } # Track

abi_stable = "0.11.2"
criterion = "0.5.1"
Expand Down
7 changes: 4 additions & 3 deletions release.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ def factor(x, base):
return n

def factor_version(version, base):
return ".".join([str(factor(int(x), base)) for x in version.split(".")])
v = re.sub(r'([0-9\.]+).*', "\\g<1>", version)
return ".".join([str(factor(int(x), base)) for x in v.split(".")])

if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] == "publish":
Expand All @@ -27,13 +28,13 @@ def factor_version(version, base):
with open(changelog) as clog:
changelog_text = clog.read()
for line in changelog_text.splitlines():
versions = re.findall(r"^#\s+([\d\.]+)", line)
versions = re.findall(r"^#\s+([\.\w\-]+)", line)
version = versions[0] if len(versions) else None
if version is not None:
break
header = f"# {version} (api={factor_version(version, 2)}, abi={factor_version(version, 3)})"
print(header)
changelog_text = re.sub(r"^#\s+([\d\.]+)\s*(\(api[^\)]+\))?", header, changelog_text)
changelog_text = re.sub(r"^#\s+([\.\w\-]+)(\s*\(api[^\)]+\))?", header, changelog_text)
with open(changelog, "w") as clog:
clog.write(changelog_text)

Expand Down
230 changes: 226 additions & 4 deletions stabby-abi/src/alloc/libc_alloc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

use super::Layout;

#[cfg(not(windows))]
#[cfg(not(any(windows, target_arch = "wasm32")))]
use libc::posix_memalign;
#[cfg(windows)]
unsafe fn posix_memalign(this: &mut *mut core::ffi::c_void, size: usize, align: usize) -> i32 {
Expand All @@ -27,8 +27,12 @@ unsafe fn posix_memalign(this: &mut *mut core::ffi::c_void, size: usize, align:
}
#[cfg(windows)]
use libc::aligned_free;
#[cfg(not(windows))]
#[cfg(not(any(windows, target_arch = "wasm32")))]
use libc::free as aligned_free;
#[cfg(not(target_arch = "wasm32"))]
use libc::realloc;
#[cfg(target_arch = "wasm32")]
use wasm32_alloc::{free as aligned_free, posix_memalign, realloc};

/// An allocator based on `libc::posix_memalign` or `libc::aligned_malloc` depending on the platform.
///
Expand Down Expand Up @@ -64,8 +68,8 @@ impl super::IAlloc for LibcAlloc {
if new_layout.size == 0 {
return core::ptr::null_mut();
}
let mut new_ptr = unsafe { libc::realloc(ptr.cast(), new_layout.size) };
if new_ptr as usize % new_layout.align != 0 {
let mut new_ptr = unsafe { realloc(ptr.cast(), new_layout.size) };
if new_ptr.is_null() || new_ptr as usize % new_layout.align != 0 {
let mut ptr = core::ptr::null_mut();
let err = unsafe { posix_memalign(&mut ptr, new_layout.align, new_layout.size) };
if err == 0 {
Expand All @@ -83,3 +87,221 @@ impl super::IAlloc for LibcAlloc {
new_ptr.cast()
}
}

#[cfg(target_arch = "wasm32")]
mod wasm32_alloc {
use core::{
ffi::c_void,
mem::MaybeUninit,
sync::atomic::{AtomicPtr, Ordering},
};

#[repr(C)]
struct Slot {
size: usize,
lower: Option<&'static mut Slot>,
padding: usize,
_reserved: usize,
}
impl core::cmp::Ord for Slot {
fn cmp(&self, other: &Self) -> core::cmp::Ordering {
(self as *const Self).cmp(&(other as *const Self))
}
}
impl core::cmp::PartialOrd for Slot {
fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl core::cmp::Eq for Slot {}
impl core::cmp::PartialEq for Slot {
fn eq(&self, other: &Self) -> bool {
core::ptr::eq(self, other)
}
}
impl Slot {
const fn full_size(&self) -> usize {
core::mem::size_of::<Self>() + self.size
}
const fn start(&self) -> *const u8 {
unsafe { (self as *const Self).cast::<u8>().sub(self.padding) }
}
const fn end(&self) -> *const u8 {
unsafe { (self as *const Self).cast::<u8>().add(self.full_size()) }
}
fn shift(&'static mut self, target_align: usize) -> &'static mut Self {
let required_padding = target_align - core::mem::size_of::<Self>();
let padding = self.padding;
if padding == required_padding {
return self;
}
self.size += padding;
self.padding = 0;
let new_addr = unsafe {
(self as *mut Self)
.cast::<u8>()
.offset(padding as isize - required_padding as isize)
};
unsafe {
core::ptr::copy(
(self as *const Self).cast(),
new_addr,
core::mem::size_of::<Self>(),
);
&mut *new_addr.cast()
}
}
fn split(self: &mut &'static mut Self, at: usize) -> Option<&'static mut Self> {
let size = self.size;
(size > at + core::mem::size_of::<Self>()).then(move || {
self.size = at;
let slot = unsafe { &mut *(self.end() as *mut MaybeUninit<Slot>) };
slot.write(Slot {
size: size - at + core::mem::size_of::<Self>(),
lower: None,
padding: 0,
_reserved: 0,
})
})
}
}

const PAGESIZE: usize = 65536;
#[repr(C)]
struct Allocator {
free_list: AtomicPtr<Slot>,
}
struct Slots {
list: Option<&'static mut Slot>,
}
impl Drop for Slots {
fn drop(&mut self) {
ALLOC.free_list.store(
unsafe {
core::mem::transmute::<Option<&'static mut Slot>, *mut Slot>(self.list.take())
},
Ordering::Release,
);
}
}
impl Slots {
fn insert(&mut self, mut slot: &'static mut Slot) {
slot = slot.shift(core::mem::size_of::<Slot>());
let mut head = &mut self.list;
while let Some(h) = head {
if *h < slot {
if core::ptr::eq(h.end(), slot.start()) {
h.size += slot.full_size();
return;
}
break;
}
head = unsafe {
core::mem::transmute::<
&mut Option<&'static mut Slot>,
&mut Option<&'static mut Slot>,
>(&mut h.lower)
};
}
slot.lower = head.take();
*head = Some(slot)
}
fn take(&mut self, size: usize, align: usize) -> Option<&'static mut Slot> {
let req = size + align;
let slot_owner = self.select_slot(req)?;
let mut slot = slot_owner.take()?;
let lower = slot.lower.take();
*slot_owner = slot.split(size);
match slot_owner {
Some(owner) => owner.lower = lower,
None => *slot_owner = lower,
}
Some(slot)
}
fn select_slot(&mut self, size: usize) -> Option<&mut Option<&'static mut Slot>> {
let mut head = unsafe {
core::mem::transmute::<&mut Option<&'static mut Slot>, &mut Option<&'static mut Slot>>(
&mut self.list,
)
};
while let Some(h) = head {
if h.size < size {
head = unsafe {
core::mem::transmute::<
&mut Option<&'static mut Slot>,
&mut Option<&'static mut Slot>,
>(&mut h.lower)
};
} else {
return Some(head);
}
}
self.grow_take(size)
}
fn grow_take(&mut self, size: usize) -> Option<&mut Option<&'static mut Slot>> {
let added_pages = (size / PAGESIZE) + 2;
let start = core::arch::wasm32::memory_grow(0, added_pages);
if start == usize::MAX {
return None;
}
let slot = unsafe { &mut *((start * PAGESIZE) as *mut MaybeUninit<Slot>) };
let slot = slot.write(Slot {
size: added_pages * PAGESIZE - core::mem::size_of::<Slot>(),
lower: None,
padding: 0,
_reserved: 0,
});
self.insert(slot);
Some(&mut self.list)
}
}
impl Allocator {
const fn new() -> Self {
Self {
free_list: AtomicPtr::new(core::ptr::null_mut()),
}
}
fn lock(&self) -> Slots {
loop {
let list = self
.free_list
.swap(usize::MAX as *mut Slot, Ordering::AcqRel);
if list as usize != usize::MAX {
return Slots {
list: unsafe { list.as_mut() },
};
}
core::hint::spin_loop();
}
}
}
static ALLOC: Allocator = Allocator::new();
pub unsafe fn posix_memalign(
this: &mut *mut core::ffi::c_void,
mut size: usize,
mut align: usize,
) -> i32 {
size = size.max(64);
align = align.max(8);
match ALLOC.lock().take(size, align) {
Some(slot) => {
*this = (slot as *mut Slot).add(1).cast();
0
}
None => -1,
}
}
pub unsafe fn realloc(p: *mut c_void, new_size: usize) -> *mut c_void {
let mut this = core::ptr::null_mut();
if posix_memalign(&mut this, new_size, 8) != 0 {
return core::ptr::null_mut();
}
let slot = p.cast::<Slot>().sub(1);
unsafe { core::ptr::copy_nonoverlapping(p.cast::<u8>(), this.cast(), (*slot).size) };
this
}
pub unsafe fn free(p: *mut c_void) {
let slot = p.cast::<Slot>().sub(1);
ALLOC.lock().insert(&mut *slot);
}
}
Loading

0 comments on commit e08c8f3

Please sign in to comment.