diff --git a/src/snark/Makefile b/src/snark/Makefile index 3ef82ab8..42348d80 100644 --- a/src/snark/Makefile +++ b/src/snark/Makefile @@ -18,9 +18,9 @@ CURVE = BN128 OPTFLAGS = -O2 -march=x86-64 -g -mtune=x86-64 ifneq ($(PLATFORM),darwin) - FEATUREFLAGS = -DUSE_ASM -DMONTGOMERY_OUTPUT + FEATUREFLAGS = -DUSE_ASM_SNARK -DMONTGOMERY_OUTPUT else - FEATUREFLAGS = -DUSE_ASM -DMONTGOMERY_OUTPUT -D__SIZE_TYPE__="unsigned long long" + FEATUREFLAGS = -DUSE_ASM_SNARK -DMONTGOMERY_OUTPUT -D__SIZE_TYPE__="unsigned long long" endif # Initialize this using "CXXFLAGS=... make". The makefile appends to that. diff --git a/src/snark/README.md b/src/snark/README.md index d5aa3400..b39d7ead 100644 --- a/src/snark/README.md +++ b/src/snark/README.md @@ -426,7 +426,7 @@ The following flags change the behavior of the compiled code. of the corresponding algebraic objects. This option works for all curves except bn128. -* define `USE_ASM` (on by default) +* define `USE_ASM_SNARK` (on by default) Use unrolled assembly routines for F[p] arithmetic and faster heap in multi-exponentiation. (When not set, use GMP's `mpn_*` routines instead.) @@ -472,7 +472,7 @@ with respect to portability. Specifically: tested with g++ 4.7, g++ 4.8, and clang 3.4. 6. On x86-64, we by default use highly optimized assembly implementations for some - operations (see `USE_ASM` above). On other architectures we fall back to a + operations (see `USE_ASM_SNARK` above). On other architectures we fall back to a portable C++ implementation, which is slower. Tested configurations include: diff --git a/src/snark/libsnark/algebra/fields/fp.tcc b/src/snark/libsnark/algebra/fields/fp.tcc index f1d97153..0b0f4439 100644 --- a/src/snark/libsnark/algebra/fields/fp.tcc +++ b/src/snark/libsnark/algebra/fields/fp.tcc @@ -23,7 +23,7 @@ template& modulus> void Fp_model::mul_reduce(const bigint &other) { /* stupid pre-processor tricks; beware */ -#if defined(__x86_64__) && defined(USE_ASM) +#if defined(__x86_64__) && defined(USE_ASM_SNARK) if (n == 3) { // Use asm-optimized Comba multiplication and reduction mp_limb_t res[2*n]; @@ -293,7 +293,7 @@ Fp_model& Fp_model::operator+=(const Fp_model& #ifdef PROFILE_OP_COUNTS this->add_cnt++; #endif -#if defined(__x86_64__) && defined(USE_ASM) +#if defined(__x86_64__) && defined(USE_ASM_SNARK) if (n == 3) { __asm__ @@ -406,7 +406,7 @@ Fp_model& Fp_model::operator-=(const Fp_model& #ifdef PROFILE_OP_COUNTS this->sub_cnt++; #endif -#if defined(__x86_64__) && defined(USE_ASM) +#if defined(__x86_64__) && defined(USE_ASM_SNARK) if (n == 3) { __asm__ @@ -579,7 +579,7 @@ Fp_model Fp_model::squared() const this->mul_cnt--; // zero out the upcoming mul #endif /* stupid pre-processor tricks; beware */ -#if defined(__x86_64__) && defined(USE_ASM) +#if defined(__x86_64__) && defined(USE_ASM_SNARK) if (n == 3) { // use asm-optimized Comba squaring mp_limb_t res[2*n]; diff --git a/src/snark/libsnark/algebra/fields/fp_aux.tcc b/src/snark/libsnark/algebra/fields/fp_aux.tcc index 7f8a3ead..89c7438e 100644 --- a/src/snark/libsnark/algebra/fields/fp_aux.tcc +++ b/src/snark/libsnark/algebra/fields/fp_aux.tcc @@ -1,8 +1,8 @@ /** @file ***************************************************************************** Assembly code snippets for F[p] finite field arithmetic, used by fp.tcc . - Specific to x86-64, and used only if USE_ASM is defined. - On other architectures or without USE_ASM, fp.tcc uses a portable + Specific to x86-64, and used only if USE_ASM_SNARK is defined. + On other architectures or without USE_ASM_SNARK, fp.tcc uses a portable C++ implementation instead. ***************************************************************************** * @author This file is part of libsnark, developed by SCIPR Lab diff --git a/src/snark/libsnark/algebra/scalar_multiplication/multiexp.tcc b/src/snark/libsnark/algebra/scalar_multiplication/multiexp.tcc index 5dd19a65..7cd32f84 100644 --- a/src/snark/libsnark/algebra/scalar_multiplication/multiexp.tcc +++ b/src/snark/libsnark/algebra/scalar_multiplication/multiexp.tcc @@ -38,7 +38,7 @@ public: bool operator<(const ordered_exponent &other) const { -#if defined(__x86_64__) && defined(USE_ASM) +#if defined(__x86_64__) && defined(USE_ASM_SNARK) if (n == 3) { int64_t res;