Skip to content

Commit

Permalink
Merge pull request #9 from axiom-crypto/feat/native
Browse files Browse the repository at this point in the history
chore: Make external function names more unique
  • Loading branch information
arayikhalatyan authored Dec 11, 2024
2 parents c9400f6 + ca59257 commit 155074b
Show file tree
Hide file tree
Showing 5 changed files with 51 additions and 33 deletions.
8 changes: 4 additions & 4 deletions src/add.rs
Original file line number Diff line number Diff line change
Expand Up @@ -155,10 +155,10 @@ impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
#[inline(always)]
#[must_use]
pub fn wrapping_add(mut self, rhs: Self) -> Self {
use crate::support::zkvm::wrapping_add_impl;
use crate::support::zkvm::zkvm_u256_wrapping_add_impl;
if BITS == 256 {
unsafe {
wrapping_add_impl(
zkvm_u256_wrapping_add_impl(
self.limbs.as_mut_ptr() as *mut u8,
self.limbs.as_ptr() as *const u8,
rhs.limbs.as_ptr() as *const u8,
Expand Down Expand Up @@ -198,10 +198,10 @@ impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
#[inline(always)]
#[must_use]
pub fn wrapping_sub(mut self, rhs: Self) -> Self {
use crate::support::zkvm::wrapping_sub_impl;
use crate::support::zkvm::zkvm_u256_wrapping_sub_impl;
if BITS == 256 {
unsafe {
wrapping_sub_impl(
zkvm_u256_wrapping_sub_impl(
self.limbs.as_mut_ptr() as *mut u8,
self.limbs.as_ptr() as *const u8,
rhs.limbs.as_ptr() as *const u8,
Expand Down
40 changes: 29 additions & 11 deletions src/bits.rs
Original file line number Diff line number Diff line change
Expand Up @@ -299,10 +299,10 @@ impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
if rhs >= 256 {
return Self::ZERO;
}
use crate::support::zkvm::wrapping_shl_impl;
use crate::support::zkvm::zkvm_u256_wrapping_shl_impl;
let rhs = rhs as u64;
unsafe {
wrapping_shl_impl(
zkvm_u256_wrapping_shl_impl(
self.limbs.as_mut_ptr() as *mut u8,
self.limbs.as_ptr() as *const u8,
[rhs].as_ptr() as *const u8,
Expand Down Expand Up @@ -398,10 +398,10 @@ impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
if rhs >= 256 {
return Self::ZERO;
}
use crate::support::zkvm::wrapping_shr_impl;
use crate::support::zkvm::zkvm_u256_wrapping_shr_impl;
let rhs = rhs as u64;
unsafe {
wrapping_shr_impl(
zkvm_u256_wrapping_shr_impl(
self.limbs.as_mut_ptr() as *mut u8,
self.limbs.as_ptr() as *const u8,
[rhs].as_ptr() as *const u8,
Expand Down Expand Up @@ -436,10 +436,10 @@ impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
pub fn arithmetic_shr(mut self, rhs: usize) -> Self {
if BITS == 256 {
let rhs = if rhs >= 256 { 255 } else { rhs };
use crate::support::zkvm::arithmetic_shr_impl;
use crate::support::zkvm::zkvm_u256_arithmetic_shr_impl;
let rhs = rhs as u64;
unsafe {
arithmetic_shr_impl(
zkvm_u256_arithmetic_shr_impl(
self.limbs.as_mut_ptr() as *mut u8,
self.limbs.as_ptr() as *const u8,
[rhs].as_ptr() as *const u8,
Expand Down Expand Up @@ -504,10 +504,10 @@ impl<const BITS: usize, const LIMBS: usize> Not for Uint<BITS, LIMBS> {
#[cfg(target_os = "zkvm")]
#[inline(always)]
fn not(mut self) -> Self::Output {
use crate::support::zkvm::wrapping_sub_impl;
use crate::support::zkvm::zkvm_u256_wrapping_sub_impl;
if BITS == 256 {
unsafe {
wrapping_sub_impl(
zkvm_u256_wrapping_sub_impl(
self.limbs.as_mut_ptr() as *mut u8,
Self::MAX.limbs.as_ptr() as *const u8,
self.limbs.as_ptr() as *const u8,
Expand Down Expand Up @@ -621,9 +621,27 @@ macro_rules! impl_bit_op {
};
}

impl_bit_op!(BitOr, bitor, BitOrAssign, bitor_assign, bitor_impl);
impl_bit_op!(BitAnd, bitand, BitAndAssign, bitand_assign, bitand_impl);
impl_bit_op!(BitXor, bitxor, BitXorAssign, bitxor_assign, bitxor_impl);
impl_bit_op!(
BitOr,
bitor,
BitOrAssign,
bitor_assign,
zkvm_u256_bitor_impl
);
impl_bit_op!(
BitAnd,
bitand,
BitAndAssign,
bitand_assign,
zkvm_u256_bitand_impl
);
impl_bit_op!(
BitXor,
bitxor,
BitXorAssign,
bitxor_assign,
zkvm_u256_bitxor_impl
);

impl<const BITS: usize, const LIMBS: usize> Shl<Self> for Uint<BITS, LIMBS> {
type Output = Self;
Expand Down
4 changes: 2 additions & 2 deletions src/cmp.rs
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ impl<const BITS: usize, const LIMBS: usize> Ord for Uint<BITS, LIMBS> {
#[cfg(target_os = "zkvm")]
#[inline]
fn cmp(&self, rhs: &Self) -> Ordering {
use crate::support::zkvm::cmp_impl;
use crate::support::zkvm::zkvm_u256_cmp_impl;
if BITS == 256 {
return unsafe {
cmp_impl(
zkvm_u256_cmp_impl(
self.limbs.as_ptr() as *const u8,
rhs.limbs.as_ptr() as *const u8,
)
Expand Down
4 changes: 2 additions & 2 deletions src/mul.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,10 +75,10 @@ impl<const BITS: usize, const LIMBS: usize> Uint<BITS, LIMBS> {
#[inline(always)]
#[must_use]
pub fn wrapping_mul(mut self, rhs: Self) -> Self {
use crate::support::zkvm::wrapping_mul_impl;
use crate::support::zkvm::zkvm_u256_wrapping_mul_impl;
if BITS == 256 {
unsafe {
wrapping_mul_impl(
zkvm_u256_wrapping_mul_impl(
self.limbs.as_mut_ptr() as *mut u8,
self.limbs.as_ptr() as *const u8,
rhs.limbs.as_ptr() as *const u8,
Expand Down
28 changes: 14 additions & 14 deletions src/support/zkvm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,29 +14,29 @@ use crate::Uint;

extern "C" {
/// Add two 256-bit numbers and store in `result`.
pub fn wrapping_add_impl(result: *mut u8, a: *const u8, b: *const u8);
pub fn zkvm_u256_wrapping_add_impl(result: *mut u8, a: *const u8, b: *const u8);
/// Subtract two 256-bit numbers and store in `result`.
pub fn wrapping_sub_impl(result: *mut u8, a: *const u8, b: *const u8);
pub fn zkvm_u256_wrapping_sub_impl(result: *mut u8, a: *const u8, b: *const u8);
/// Multiply two 256-bit numbers and store in `result`.
pub fn wrapping_mul_impl(result: *mut u8, a: *const u8, b: *const u8);
pub fn zkvm_u256_wrapping_mul_impl(result: *mut u8, a: *const u8, b: *const u8);
/// Bitwise XOR two 256-bit numbers and store in `result`.
pub fn bitxor_impl(result: *mut u8, a: *const u8, b: *const u8);
pub fn zkvm_u256_bitxor_impl(result: *mut u8, a: *const u8, b: *const u8);
/// Bitwise AND two 256-bit numbers and store in `result`.
pub fn bitand_impl(result: *mut u8, a: *const u8, b: *const u8);
pub fn zkvm_u256_bitand_impl(result: *mut u8, a: *const u8, b: *const u8);
/// Bitwise OR two 256-bit numbers and store in `result`.
pub fn bitor_impl(result: *mut u8, a: *const u8, b: *const u8);
pub fn zkvm_u256_bitor_impl(result: *mut u8, a: *const u8, b: *const u8);
/// Shift left two 256-bit numbers and store in `result`.
pub fn wrapping_shl_impl(result: *mut u8, a: *const u8, b: *const u8);
pub fn zkvm_u256_wrapping_shl_impl(result: *mut u8, a: *const u8, b: *const u8);
/// Shift right two 256-bit numbers and store in `result`.
pub fn wrapping_shr_impl(result: *mut u8, a: *const u8, b: *const u8);
pub fn zkvm_u256_wrapping_shr_impl(result: *mut u8, a: *const u8, b: *const u8);
/// Arithmetic shift right two 256-bit numbers and store in `result`.
pub fn arithmetic_shr_impl(result: *mut u8, a: *const u8, b: *const u8);
pub fn zkvm_u256_arithmetic_shr_impl(result: *mut u8, a: *const u8, b: *const u8);
/// Check if two 256-bit numbers are equal.
pub fn eq_impl(a: *const u8, b: *const u8) -> bool;
pub fn zkvm_u256_eq_impl(a: *const u8, b: *const u8) -> bool;
/// Compare two 256-bit numbers.
pub fn cmp_impl(a: *const u8, b: *const u8) -> Ordering;
pub fn zkvm_u256_cmp_impl(a: *const u8, b: *const u8) -> Ordering;
/// Clone a 256-bit number into `result`. `zero` has to
pub fn clone_impl(result: *mut u8, a: *const u8, zero: *const u8);
pub fn zkvm_u256_clone_impl(result: *mut u8, a: *const u8, zero: *const u8);
}

impl<const BITS: usize, const LIMBS: usize> Copy for Uint<BITS, LIMBS> {}
Expand All @@ -46,7 +46,7 @@ impl<const BITS: usize, const LIMBS: usize> Clone for Uint<BITS, LIMBS> {
if BITS == 256 {
let mut uninit: MaybeUninit<Self> = MaybeUninit::uninit();
unsafe {
clone_impl(
zkvm_u256_clone_impl(
(*uninit.as_mut_ptr()).limbs.as_mut_ptr() as *mut u8,
self.limbs.as_ptr() as *const u8,
Self::ZERO.limbs.as_ptr() as *const u8,
Expand All @@ -62,7 +62,7 @@ impl<const BITS: usize, const LIMBS: usize> PartialEq for Uint<BITS, LIMBS> {
fn eq(&self, other: &Self) -> bool {
if BITS == 256 {
unsafe {
eq_impl(
zkvm_u256_eq_impl(
self.limbs.as_ptr() as *const u8,
other.limbs.as_ptr() as *const u8,
)
Expand Down

0 comments on commit 155074b

Please sign in to comment.