From 2f61f960c81e4a45f3849baa7563812e7e526436 Mon Sep 17 00:00:00 2001 From: Jim Jagielski Date: Mon, 17 Sep 2018 15:50:19 +0000 Subject: [PATCH] Add in Atomics for 64bit ints git-svn-id: https://svn.apache.org/repos/asf/apr/apr/trunk@1841078 13f79535-47bb-0310-9956-ffa450edef68 --- CHANGES | 2 + apr.dsp | 4 + atomic/unix/builtins64.c | 64 +++++ atomic/unix/mutex64.c | 177 +++++++++++++ atomic/win32/apr_atomic64.c | 94 +++++++ include/apr_atomic.h | 67 +++++ include/arch/unix/apr_arch_atomic.h | 4 + test/testatomic.c | 387 ++++++++++++++++++++++++++++ 8 files changed, 799 insertions(+) create mode 100644 atomic/unix/builtins64.c create mode 100644 atomic/unix/mutex64.c create mode 100644 atomic/win32/apr_atomic64.c diff --git a/CHANGES b/CHANGES index 1cc69afaedd..88134dede72 100644 --- a/CHANGES +++ b/CHANGES @@ -1,6 +1,8 @@ -*- coding: utf-8 -*- Changes for APR 2.0.0 + *) Atomics: Support for 64bit ints. [Jim Jagielski] + *) apr_jose: Add support for encoding and decoding of JSON Object Signing and Encryption messages as per RFC7515, RFC7516, RFC7517 and RFC7519. [Graham Leggett] diff --git a/apr.dsp b/apr.dsp index c5b3f480d3f..38f3f1141c8 100644 --- a/apr.dsp +++ b/apr.dsp @@ -142,6 +142,10 @@ LIB32=link.exe -lib SOURCE=.\atomic\win32\apr_atomic.c # End Source File +# Begin Source File + +SOURCE=.\atomic\win32\apr_atomic64.c +# End Source File # End Group # Begin Group "buckets" diff --git a/atomic/unix/builtins64.c b/atomic/unix/builtins64.c new file mode 100644 index 00000000000..4a4b685c7ac --- /dev/null +++ b/atomic/unix/builtins64.c @@ -0,0 +1,64 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "apr_arch_atomic.h" + +#ifdef USE_ATOMICS_BUILTINS + +APR_DECLARE(apr_uint64_t) apr_atomic_read64(volatile apr_uint64_t *mem) +{ + return *mem; +} + +APR_DECLARE(void) apr_atomic_set64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ + *mem = val; +} + +APR_DECLARE(apr_uint64_t) apr_atomic_add64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ + return __sync_fetch_and_add(mem, val); +} + +APR_DECLARE(void) apr_atomic_sub64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ + __sync_fetch_and_sub(mem, val); +} + +APR_DECLARE(apr_uint64_t) apr_atomic_inc64(volatile apr_uint64_t *mem) +{ + return __sync_fetch_and_add(mem, 1); +} + +APR_DECLARE(int) apr_atomic_dec64(volatile apr_uint64_t *mem) +{ + return __sync_sub_and_fetch(mem, 1); +} + +APR_DECLARE(apr_uint64_t) apr_atomic_cas64(volatile apr_uint64_t *mem, apr_uint64_t with, + apr_uint64_t cmp) +{ + return __sync_val_compare_and_swap(mem, cmp, with); +} + +APR_DECLARE(apr_uint64_t) apr_atomic_xchg64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ + __sync_synchronize(); + + return __sync_lock_test_and_set(mem, val); +} + +#endif /* USE_ATOMICS_BUILTINS */ diff --git a/atomic/unix/mutex64.c b/atomic/unix/mutex64.c new file mode 100644 index 00000000000..10452413b4c --- /dev/null +++ b/atomic/unix/mutex64.c @@ -0,0 +1,177 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "apr_arch_atomic.h" + +#if defined(USE_ATOMICS_GENERIC) || defined (NEED_ATOMICS_GENERIC64) + +#include + +#if APR_HAS_THREADS +# define DECLARE_MUTEX_LOCKED(name, mem) \ + apr_thread_mutex_t *name = mutex_hash(mem) +# define MUTEX_UNLOCK(name) \ + do { \ + if (apr_thread_mutex_unlock(name) != APR_SUCCESS) \ + abort(); \ + } while (0) +#else +# define DECLARE_MUTEX_LOCKED(name, mem) +# define MUTEX_UNLOCK(name) +# warning Be warned: using stubs for all atomic operations +#endif + +#if APR_HAS_THREADS + +static apr_thread_mutex_t **hash_mutex; + +#define NUM_ATOMIC_HASH 7 +/* shift by 2 to get rid of alignment issues */ +#define ATOMIC_HASH(x) (unsigned int)(((unsigned long)(x)>>2)%(unsigned int)NUM_ATOMIC_HASH) + +static apr_status_t atomic_cleanup(void *data) +{ + if (hash_mutex == data) + hash_mutex = NULL; + + return APR_SUCCESS; +} + +APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p) +{ + int i; + apr_status_t rv; + + if (hash_mutex != NULL) + return APR_SUCCESS; + + hash_mutex = apr_palloc(p, sizeof(apr_thread_mutex_t*) * NUM_ATOMIC_HASH); + apr_pool_cleanup_register(p, hash_mutex, atomic_cleanup, + apr_pool_cleanup_null); + + for (i = 0; i < NUM_ATOMIC_HASH; i++) { + rv = apr_thread_mutex_create(&(hash_mutex[i]), + APR_THREAD_MUTEX_DEFAULT, p); + if (rv != APR_SUCCESS) { + return rv; + } + } + + return APR_SUCCESS; +} + +static APR_INLINE apr_thread_mutex_t *mutex_hash(volatile apr_uint64_t *mem) +{ + apr_thread_mutex_t *mutex = hash_mutex[ATOMIC_HASH(mem)]; + + if (apr_thread_mutex_lock(mutex) != APR_SUCCESS) { + abort(); + } + + return mutex; +} + +#else + +APR_DECLARE(apr_status_t) apr_atomic_init(apr_pool_t *p) +{ + return APR_SUCCESS; +} + +#endif /* APR_HAS_THREADS */ + +APR_DECLARE(apr_uint64_t) apr_atomic_read64(volatile apr_uint64_t *mem) +{ + return *mem; +} + +APR_DECLARE(void) apr_atomic_set64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ + DECLARE_MUTEX_LOCKED(mutex, mem); + + *mem = val; + + MUTEX_UNLOCK(mutex); +} + +APR_DECLARE(apr_uint64_t) apr_atomic_add64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ + apr_uint64_t old_value; + DECLARE_MUTEX_LOCKED(mutex, mem); + + old_value = *mem; + *mem += val; + + MUTEX_UNLOCK(mutex); + + return old_value; +} + +APR_DECLARE(void) apr_atomic_sub64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ + DECLARE_MUTEX_LOCKED(mutex, mem); + *mem -= val; + MUTEX_UNLOCK(mutex); +} + +APR_DECLARE(apr_uint64_t) apr_atomic_inc64(volatile apr_uint64_t *mem) +{ + return apr_atomic_add64(mem, 1); +} + +APR_DECLARE(int) apr_atomic_dec64(volatile apr_uint64_t *mem) +{ + apr_uint64_t new; + DECLARE_MUTEX_LOCKED(mutex, mem); + + (*mem)--; + new = *mem; + + MUTEX_UNLOCK(mutex); + + return new; +} + +APR_DECLARE(apr_uint64_t) apr_atomic_cas64(volatile apr_uint64_t *mem, apr_uint64_t with, + apr_uint64_t cmp) +{ + apr_uint64_t prev; + DECLARE_MUTEX_LOCKED(mutex, mem); + + prev = *mem; + if (prev == cmp) { + *mem = with; + } + + MUTEX_UNLOCK(mutex); + + return prev; +} + +APR_DECLARE(apr_uint64_t) apr_atomic_xchg64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ + apr_uint64_t prev; + DECLARE_MUTEX_LOCKED(mutex, mem); + + prev = *mem; + *mem = val; + + MUTEX_UNLOCK(mutex); + + return prev; +} + +#endif /* USE_ATOMICS_GENERIC64 */ diff --git a/atomic/win32/apr_atomic64.c b/atomic/win32/apr_atomic64.c new file mode 100644 index 00000000000..a5acc945e40 --- /dev/null +++ b/atomic/win32/apr_atomic64.c @@ -0,0 +1,94 @@ +/* Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "apr.h" +#include "apr_atomic.h" +#include "apr_thread_mutex.h" + +APR_DECLARE(apr_uint64_t) apr_atomic_add64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ +#if (defined(_M_IA64) || defined(_M_AMD64)) + return InterlockedExchangeAdd64(mem, val); +#else + return InterlockedExchangeAdd64((long *)mem, val); +#endif +} + +/* Of course we want the 2's compliment of the unsigned value, val */ +#ifdef _MSC_VER +#pragma warning(disable: 4146) +#endif + +APR_DECLARE(void) apr_atomic_sub64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ +#if (defined(_M_IA64) || defined(_M_AMD64)) + InterlockedExchangeAdd64(mem, -val); +#else + InterlockedExchangeAdd64((long *)mem, -val); +#endif +} + +APR_DECLARE(apr_uint64_t) apr_atomic_inc64(volatile apr_uint64_t *mem) +{ + /* we return old value, win64 returns new value :( */ +#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) + return InterlockedIncrement64(mem) - 1; +#else + return InterlockedIncrement64((long *)mem) - 1; +#endif +} + +APR_DECLARE(int) apr_atomic_dec64(volatile apr_uint64_t *mem) +{ +#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) + return InterlockedDecrement64(mem); +#else + return InterlockedDecrement64((long *)mem); +#endif +} + +APR_DECLARE(void) apr_atomic_set64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ +#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) + InterlockedExchange64(mem, val); +#else + InterlockedExchange64((long*)mem, val); +#endif +} + +APR_DECLARE(apr_uint64_t) apr_atomic_read64(volatile apr_uint64_t *mem) +{ + return *mem; +} + +APR_DECLARE(apr_uint64_t) apr_atomic_cas64(volatile apr_uint64_t *mem, apr_uint64_t with, + apr_uint64_t cmp) +{ +#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) + return InterlockedCompareExchange64(mem, with, cmp); +#else + return InterlockedCompareExchange64((long*)mem, with, cmp); +#endif +} + +APR_DECLARE(apr_uint64_t) apr_atomic_xchg64(volatile apr_uint64_t *mem, apr_uint64_t val) +{ +#if (defined(_M_IA64) || defined(_M_AMD64)) && !defined(RC_INVOKED) + return InterlockedExchange64(mem, val); +#else + return InterlockedExchange64((long *)mem, val); +#endif +} diff --git a/include/apr_atomic.h b/include/apr_atomic.h index 4e7879000e9..7ac9aafc66c 100644 --- a/include/apr_atomic.h +++ b/include/apr_atomic.h @@ -113,6 +113,73 @@ APR_DECLARE(apr_uint32_t) apr_atomic_cas32(volatile apr_uint32_t *mem, apr_uint3 */ APR_DECLARE(apr_uint32_t) apr_atomic_xchg32(volatile apr_uint32_t *mem, apr_uint32_t val); +/* + * Atomic operations on 64-bit values + * Note: Each of these functions internally implements a memory barrier + * on platforms that require it + */ + +/** + * atomically read an apr_uint64_t from memory + * @param mem the pointer + */ +APR_DECLARE(apr_uint64_t) apr_atomic_read64(volatile apr_uint64_t *mem); + +/** + * atomically set an apr_uint64_t in memory + * @param mem pointer to the object + * @param val value that the object will assume + */ +APR_DECLARE(void) apr_atomic_set64(volatile apr_uint64_t *mem, apr_uint64_t val); + +/** + * atomically add 'val' to an apr_uint64_t + * @param mem pointer to the object + * @param val amount to add + * @return old value pointed to by mem + */ +APR_DECLARE(apr_uint64_t) apr_atomic_add64(volatile apr_uint64_t *mem, apr_uint64_t val); + +/** + * atomically subtract 'val' from an apr_uint64_t + * @param mem pointer to the object + * @param val amount to subtract + */ +APR_DECLARE(void) apr_atomic_sub64(volatile apr_uint64_t *mem, apr_uint64_t val); + +/** + * atomically increment an apr_uint64_t by 1 + * @param mem pointer to the object + * @return old value pointed to by mem + */ +APR_DECLARE(apr_uint64_t) apr_atomic_inc64(volatile apr_uint64_t *mem); + +/** + * atomically decrement an apr_uint64_t by 1 + * @param mem pointer to the atomic value + * @return zero if the value becomes zero on decrement, otherwise non-zero + */ +APR_DECLARE(int) apr_atomic_dec64(volatile apr_uint64_t *mem); + +/** + * compare an apr_uint64_t's value with 'cmp'. + * If they are the same swap the value with 'with' + * @param mem pointer to the value + * @param with what to swap it with + * @param cmp the value to compare it to + * @return the old value of *mem + */ +APR_DECLARE(apr_uint64_t) apr_atomic_cas64(volatile apr_uint64_t *mem, apr_uint64_t with, + apr_uint64_t cmp); + +/** + * exchange an apr_uint64_t's value with 'val'. + * @param mem pointer to the value + * @param val what to swap it with + * @return the old value of *mem + */ +APR_DECLARE(apr_uint64_t) apr_atomic_xchg64(volatile apr_uint64_t *mem, apr_uint64_t val); + /** * compare the pointer's value with cmp. * If they are the same swap the value with 'with' diff --git a/include/arch/unix/apr_arch_atomic.h b/include/arch/unix/apr_arch_atomic.h index f8019060e50..b590ab239db 100644 --- a/include/arch/unix/apr_arch_atomic.h +++ b/include/arch/unix/apr_arch_atomic.h @@ -34,12 +34,16 @@ # define USE_ATOMICS_SOLARIS #elif defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) # define USE_ATOMICS_IA32 +# define NEED_ATOMICS_GENERIC64 #elif defined(__GNUC__) && (defined(__PPC__) || defined(__ppc__)) # define USE_ATOMICS_PPC +# define NEED_ATOMICS_GENERIC64 #elif defined(__GNUC__) && (defined(__s390__) || defined(__s390x__)) # define USE_ATOMICS_S390 +# define NEED_ATOMICS_GENERIC64 #else # define USE_ATOMICS_GENERIC +# define NEED_ATOMICS_GENERIC64 #endif #endif /* ATOMIC_H */ diff --git a/test/testatomic.c b/test/testatomic.c index 4bf2caa4688..e1213647794 100644 --- a/test/testatomic.c +++ b/test/testatomic.c @@ -230,15 +230,136 @@ static void test_inc_neg1(abts_case *tc, void *data) ABTS_ASSERT(tc, str, y32 == 0); } +static void test_set64(abts_case *tc, void *data) +{ + apr_uint64_t y64; + apr_atomic_set64(&y64, 2); + ABTS_INT_EQUAL(tc, 2, y64); +} + +static void test_read64(abts_case *tc, void *data) +{ + apr_uint64_t y64; + apr_atomic_set64(&y64, 2); + ABTS_INT_EQUAL(tc, 2, apr_atomic_read64(&y64)); +} + +static void test_dec64(abts_case *tc, void *data) +{ + apr_uint64_t y64; + int rv; + + apr_atomic_set64(&y64, 2); + + rv = apr_atomic_dec64(&y64); + ABTS_INT_EQUAL(tc, 1, y64); + ABTS_ASSERT(tc, "atomic_dec returned zero when it shouldn't", rv != 0); + + rv = apr_atomic_dec64(&y64); + ABTS_INT_EQUAL(tc, 0, y64); + ABTS_ASSERT(tc, "atomic_dec didn't returned zero when it should", rv == 0); +} + +static void test_xchg64(abts_case *tc, void *data) +{ + apr_uint64_t oldval; + apr_uint64_t y64; + + apr_atomic_set64(&y64, 100); + oldval = apr_atomic_xchg64(&y64, 50); + + ABTS_INT_EQUAL(tc, 100, oldval); + ABTS_INT_EQUAL(tc, 50, y64); +} + +static void test_add64(abts_case *tc, void *data) +{ + apr_uint64_t oldval; + apr_uint64_t y64; + + apr_atomic_set64(&y64, 23); + oldval = apr_atomic_add64(&y64, 4); + ABTS_INT_EQUAL(tc, 23, oldval); + ABTS_INT_EQUAL(tc, 27, y64); +} + +static void test_add64_neg(abts_case *tc, void *data) +{ + apr_uint64_t oldval; + apr_uint64_t y64; + + apr_atomic_set64(&y64, 23); + oldval = apr_atomic_add64(&y64, -10); + ABTS_INT_EQUAL(tc, 23, oldval); + ABTS_INT_EQUAL(tc, 13, y64); +} + +static void test_inc64(abts_case *tc, void *data) +{ + apr_uint64_t oldval; + apr_uint64_t y64; + + apr_atomic_set64(&y64, 23); + oldval = apr_atomic_inc64(&y64); + ABTS_INT_EQUAL(tc, 23, oldval); + ABTS_INT_EQUAL(tc, 24, y64); +} + +static void test_set_add_inc_sub64(abts_case *tc, void *data) +{ + apr_uint64_t y64; + + apr_atomic_set64(&y64, 0); + apr_atomic_add64(&y64, 20); + apr_atomic_inc64(&y64); + apr_atomic_sub64(&y64, 10); + + ABTS_INT_EQUAL(tc, 11, y64); +} + +static void test_wrap_zero64(abts_case *tc, void *data) +{ + apr_uint64_t y64; + apr_uint64_t rv; + apr_uint64_t minus1 = (apr_uint64_t)-1; + char *str; + + apr_atomic_set64(&y64, 0); + rv = apr_atomic_dec64(&y64); + + ABTS_ASSERT(tc, "apr_atomic_dec64 on zero returned zero.", rv != 0); + str = apr_psprintf(p, "zero wrap failed: 0 - 1 = %lu", y64); + ABTS_ASSERT(tc, str, y64 == minus1); +} + +static void test_inc_neg164(abts_case *tc, void *data) +{ + apr_uint64_t y64 = (apr_uint64_t)-1; + apr_uint64_t minus1 = (apr_uint64_t)-1; + apr_uint64_t rv; + char *str; + + rv = apr_atomic_inc64(&y64); + + ABTS_ASSERT(tc, "apr_atomic_inc64 didn't return the old value.", rv == minus1); + str = apr_psprintf(p, "zero wrap failed: -1 + 1 = %lu", y64); + ABTS_ASSERT(tc, str, y64 == 0); +} + #if APR_HAS_THREADS void *APR_THREAD_FUNC thread_func_mutex(apr_thread_t *thd, void *data); +void *APR_THREAD_FUNC thread_func_mutex64(apr_thread_t *thd, void *data); void *APR_THREAD_FUNC thread_func_atomic(apr_thread_t *thd, void *data); +void *APR_THREAD_FUNC thread_func_atomic64(apr_thread_t *thd, void *data); apr_thread_mutex_t *thread_lock; +apr_thread_mutex_t *thread_lock64; volatile apr_uint32_t mutex_locks = 0; +volatile apr_uint64_t mutex_locks64 = 0; volatile apr_uint32_t atomic_ops = 0; +volatile apr_uint64_t atomic_ops64 = 0; apr_status_t exit_ret_val = 123; /* just some made up number to check on later */ #define NUM_THREADS 40 @@ -313,6 +434,7 @@ static void test_atomics_threaded(abts_case *tc, void *data) #define NUM_THREADS 7 typedef struct tbox_t tbox_t; +typedef struct tbox_t64 tbox_t64; struct tbox_t { abts_case *tc; @@ -501,6 +623,259 @@ static void test_atomics_busyloop_threaded(abts_case *tc, void *data) ABTS_ASSERT(tc, "Failed creating threads", rv == APR_SUCCESS); } +void *APR_THREAD_FUNC thread_func_mutex64(apr_thread_t *thd, void *data) +{ + int i; + + for (i = 0; i < NUM_ITERATIONS; i++) { + apr_thread_mutex_lock(thread_lock64); + mutex_locks64++; + apr_thread_mutex_unlock(thread_lock64); + } + apr_thread_exit(thd, exit_ret_val); + return NULL; +} + + +void *APR_THREAD_FUNC thread_func_atomic64(apr_thread_t *thd, void *data) +{ + int i; + + for (i = 0; i < NUM_ITERATIONS ; i++) { + apr_atomic_inc64(&atomic_ops64); + apr_atomic_add64(&atomic_ops64, 2); + apr_atomic_dec64(&atomic_ops64); + apr_atomic_dec64(&atomic_ops64); + } + apr_thread_exit(thd, exit_ret_val); + return NULL; +} + +static void test_atomics_threaded64(abts_case *tc, void *data) +{ + apr_thread_t *t1[NUM_THREADS]; + apr_thread_t *t2[NUM_THREADS]; + apr_status_t rv; + int i; + +#ifdef HAVE_PTHREAD_SETCONCURRENCY + pthread_setconcurrency(8); +#endif + + rv = apr_thread_mutex_create(&thread_lock64, APR_THREAD_MUTEX_DEFAULT, p); + APR_ASSERT_SUCCESS(tc, "Could not create lock", rv); + + for (i = 0; i < NUM_THREADS; i++) { + apr_status_t r1, r2; + r1 = apr_thread_create(&t1[i], NULL, thread_func_mutex64, NULL, p); + r2 = apr_thread_create(&t2[i], NULL, thread_func_atomic64, NULL, p); + ABTS_ASSERT(tc, "Failed creating threads", !r1 && !r2); + } + + for (i = 0; i < NUM_THREADS; i++) { + apr_status_t s1, s2; + apr_thread_join(&s1, t1[i]); + apr_thread_join(&s2, t2[i]); + + ABTS_ASSERT(tc, "Invalid return value from thread_join", + s1 == exit_ret_val && s2 == exit_ret_val); + } + + ABTS_INT_EQUAL(tc, NUM_THREADS * NUM_ITERATIONS, mutex_locks64); + ABTS_INT_EQUAL(tc, NUM_THREADS * NUM_ITERATIONS, + apr_atomic_read64(&atomic_ops64)); + + rv = apr_thread_mutex_destroy(thread_lock64); + ABTS_ASSERT(tc, "Failed creating threads", rv == APR_SUCCESS); +} + +struct tbox_t64 { + abts_case *tc; + apr_uint64_t *mem; + apr_uint64_t preval; + apr_uint64_t postval; + apr_uint64_t loop; + void (*func)(tbox_t64 *box); +}; + +static APR_INLINE void busyloop_read64(tbox_t64 *tbox) +{ + apr_uint64_t val; + + do { + val = apr_atomic_read64(tbox->mem); + + if (val != tbox->preval) + apr_thread_yield(); + else + break; + } while (1); +} + +static void busyloop_set64(tbox_t64 *tbox) +{ + do { + busyloop_read64(tbox); + apr_atomic_set64(tbox->mem, tbox->postval); + } while (--tbox->loop); +} + +static void busyloop_add64(tbox_t64 *tbox) +{ + apr_uint64_t val; + + do { + busyloop_read64(tbox); + val = apr_atomic_add64(tbox->mem, tbox->postval); + apr_thread_mutex_lock(thread_lock64); + ABTS_INT_EQUAL(tbox->tc, val, tbox->preval); + apr_thread_mutex_unlock(thread_lock64); + } while (--tbox->loop); +} + +static void busyloop_sub64(tbox_t64 *tbox) +{ + do { + busyloop_read64(tbox); + apr_atomic_sub64(tbox->mem, tbox->postval); + } while (--tbox->loop); +} + +static void busyloop_inc64(tbox_t64 *tbox) +{ + apr_uint64_t val; + + do { + busyloop_read64(tbox); + val = apr_atomic_inc64(tbox->mem); + apr_thread_mutex_lock(thread_lock64); + ABTS_INT_EQUAL(tbox->tc, val, tbox->preval); + apr_thread_mutex_unlock(thread_lock64); + } while (--tbox->loop); +} + +static void busyloop_dec64(tbox_t64 *tbox) +{ + apr_uint64_t val; + + do { + busyloop_read64(tbox); + val = apr_atomic_dec64(tbox->mem); + apr_thread_mutex_lock(thread_lock64); + ABTS_INT_NEQUAL(tbox->tc, 0, val); + apr_thread_mutex_unlock(thread_lock64); + } while (--tbox->loop); +} + +static void busyloop_cas64(tbox_t64 *tbox) +{ + apr_uint64_t val; + + do { + do { + val = apr_atomic_cas64(tbox->mem, tbox->postval, tbox->preval); + + if (val != tbox->preval) + apr_thread_yield(); + else + break; + } while (1); + } while (--tbox->loop); +} + +static void busyloop_xchg64(tbox_t64 *tbox) +{ + apr_uint64_t val; + + do { + busyloop_read64(tbox); + val = apr_atomic_xchg64(tbox->mem, tbox->postval); + apr_thread_mutex_lock(thread_lock64); + ABTS_INT_EQUAL(tbox->tc, val, tbox->preval); + apr_thread_mutex_unlock(thread_lock64); + } while (--tbox->loop); +} + +static void *APR_THREAD_FUNC thread_func_busyloop64(apr_thread_t *thd, void *data) +{ + tbox_t64 *tbox = data; + + tbox->func(tbox); + + apr_thread_exit(thd, 0); + + return NULL; +} + +static void test_atomics_busyloop_threaded64(abts_case *tc, void *data) +{ + unsigned int i; + apr_status_t rv; + apr_uint64_t count = 0; + tbox_t64 tbox[NUM_THREADS]; + apr_thread_t *thread[NUM_THREADS]; + + rv = apr_thread_mutex_create(&thread_lock64, APR_THREAD_MUTEX_DEFAULT, p); + APR_ASSERT_SUCCESS(tc, "Could not create lock", rv); + + /* get ready */ + for (i = 0; i < NUM_THREADS; i++) { + tbox[i].tc = tc; + tbox[i].mem = &count; + tbox[i].loop = 50; + } + + tbox[0].preval = 98; + tbox[0].postval = 3891; + tbox[0].func = busyloop_add64; + + tbox[1].preval = 3989; + tbox[1].postval = 1010; + tbox[1].func = busyloop_sub64; + + tbox[2].preval = 2979; + tbox[2].postval = 0; /* not used */ + tbox[2].func = busyloop_inc64; + + tbox[3].preval = 2980; + tbox[3].postval = 16384; + tbox[3].func = busyloop_set64; + + tbox[4].preval = 16384; + tbox[4].postval = 0; /* not used */ + tbox[4].func = busyloop_dec64; + + tbox[5].preval = 16383; + tbox[5].postval = 1048576; + tbox[5].func = busyloop_cas64; + + tbox[6].preval = 1048576; + tbox[6].postval = 98; /* goto tbox[0] */ + tbox[6].func = busyloop_xchg64; + + /* get set */ + for (i = 0; i < NUM_THREADS; i++) { + rv = apr_thread_create(&thread[i], NULL, thread_func_busyloop64, + &tbox[i], p); + ABTS_ASSERT(tc, "Failed creating thread", rv == APR_SUCCESS); + } + + /* go! */ + apr_atomic_set64(tbox->mem, 98); + + for (i = 0; i < NUM_THREADS; i++) { + apr_status_t retval; + rv = apr_thread_join(&retval, thread[i]); + ABTS_ASSERT(tc, "Thread join failed", rv == APR_SUCCESS); + ABTS_ASSERT(tc, "Invalid return value from thread_join", retval == 0); + } + + ABTS_INT_EQUAL(tbox->tc, 98, count); + + rv = apr_thread_mutex_destroy(thread_lock64); + ABTS_ASSERT(tc, "Failed creating threads", rv == APR_SUCCESS); +} + #endif /* !APR_HAS_THREADS */ abts_suite *testatomic(abts_suite *suite) @@ -525,10 +900,22 @@ abts_suite *testatomic(abts_suite *suite) abts_run_test(suite, test_set_add_inc_sub, NULL); abts_run_test(suite, test_wrap_zero, NULL); abts_run_test(suite, test_inc_neg1, NULL); + abts_run_test(suite, test_set64, NULL); + abts_run_test(suite, test_read64, NULL); + abts_run_test(suite, test_dec64, NULL); + abts_run_test(suite, test_xchg64, NULL); + abts_run_test(suite, test_add64, NULL); + abts_run_test(suite, test_add64_neg, NULL); + abts_run_test(suite, test_inc64, NULL); + abts_run_test(suite, test_set_add_inc_sub64, NULL); + abts_run_test(suite, test_wrap_zero64, NULL); + abts_run_test(suite, test_inc_neg164, NULL); #if APR_HAS_THREADS abts_run_test(suite, test_atomics_threaded, NULL); + abts_run_test(suite, test_atomics_threaded64, NULL); abts_run_test(suite, test_atomics_busyloop_threaded, NULL); + abts_run_test(suite, test_atomics_busyloop_threaded64, NULL); #endif return suite;