diff --git a/examples/phil/phil.c b/examples/phil/phil.c index e2e0bcb1..57d06c31 100644 --- a/examples/phil/phil.c +++ b/examples/phil/phil.c @@ -30,7 +30,7 @@ a number of seconds equal to the philosopher's order number around the table The two solutions implemented here are 1) a ticket-based one, and 2) a claim-based one. The ticket-based solution is fair in the sense that all philosophers get to access the resource for the same amount of time in average -whether they think quick (or shallowy...) or long. The drawback is that the +whether they think quick (or shallow...) or long. The drawback is that the faster thinkers get to wait more for accessing the resource. The claim-based solution is addressing this by letting the faster thinkers access the resource as long as it is not claimed by another philosopher (in which case the diff --git a/lib/core/hash/cne_fbk_hash.h b/lib/core/hash/cne_fbk_hash.h index fa92a0b6..db7a6ffb 100644 --- a/lib/core/hash/cne_fbk_hash.h +++ b/lib/core/hash/cne_fbk_hash.h @@ -325,9 +325,9 @@ struct cne_fbk_hash_table *cne_fbk_hash_find_existing(const char *name); * - E_CNE_NO_CONFIG - function could not get pointer to cne_config structure * - E_CNE_SECONDARY - function was called from a secondary process instance * - EINVAL - invalid parameter value passed to function - * - ENOSPC - the maximum number of memzones has already been allocated - * - EEXIST - a memzone with the same name already exists - * - ENOMEM - no appropriate memory area found in which to create memzone + * - ENOSPC - the maximum number of memory has already been allocated + * - EEXIST - a mempool with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memory */ struct cne_fbk_hash_table *cne_fbk_hash_create(const struct cne_fbk_hash_params *params); diff --git a/lib/core/hash/cne_hash.h b/lib/core/hash/cne_hash.h index a1ac7818..c88ed184 100644 --- a/lib/core/hash/cne_hash.h +++ b/lib/core/hash/cne_hash.h @@ -93,9 +93,9 @@ struct cne_hash; * - E_CNE_SECONDARY - function was called from a secondary process instance * - ENOENT - missing entry * - EINVAL - invalid parameter passed to function - * - ENOSPC - the maximum number of memzones has already been allocated - * - EEXIST - a memzone with the same name already exists - * - ENOMEM - no appropriate memory area found in which to create memzone + * - ENOSPC - the maximum number of memory has already been allocated + * - EEXIST - a mempool with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create memory */ struct cne_hash *cne_hash_create(const struct cne_hash_parameters *params); diff --git a/lib/core/mempool/mempool.h b/lib/core/mempool/mempool.h index 034516e8..5d0dd921 100644 --- a/lib/core/mempool/mempool.h +++ b/lib/core/mempool/mempool.h @@ -112,9 +112,9 @@ typedef struct mempool_cfg { * @return * The pointer to the new allocated mempool, on success. NULL on error * with errno set appropriately. Possible errno values include: - * - ENOSPC - the maximum number of memzones has already been allocated - * - EEXIST - a memzone with the same name already exists - * - ENOMEM - no appropriate memory area found in which to create memzone + * - ENOSPC - the maximum number of mempools has already been allocated + * - EEXIST - a mempool with the same name already exists + * - ENOMEM - no appropriate memory area found in which to create mempool */ CNDP_API mempool_t *mempool_create(struct mempool_cfg *cinfo); diff --git a/lib/include/cne_lock_annotations.h b/lib/include/cne_lock_annotations.h new file mode 100644 index 00000000..37f4b592 --- /dev/null +++ b/lib/include/cne_lock_annotations.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2022 Red Hat, Inc. + */ + +#ifndef CNE_LOCK_ANNOTATIONS_H +#define CNE_LOCK_ANNOTATIONS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef CNE_ANNOTATE_LOCKS + +#define __cne_lockable __attribute__((lockable)) + +#define __cne_guarded_by(...) __attribute__((guarded_by(__VA_ARGS__))) +#define __cne_guarded_var __attribute__((guarded_var)) + +#define __cne_exclusive_locks_required(...) __attribute__((exclusive_locks_required(__VA_ARGS__))) +#define __cne_exclusive_lock_function(...) __attribute__((exclusive_lock_function(__VA_ARGS__))) +#define __cne_exclusive_trylock_function(ret, ...) \ + __attribute__((exclusive_trylock_function(ret, __VA_ARGS__))) +#define __cne_assert_exclusive_lock(...) __attribute__((assert_exclusive_lock(__VA_ARGS__))) + +#define __cne_shared_locks_required(...) __attribute__((shared_locks_required(__VA_ARGS__))) +#define __cne_shared_lock_function(...) __attribute__((shared_lock_function(__VA_ARGS__))) +#define __cne_shared_trylock_function(ret, ...) \ + __attribute__((shared_trylock_function(ret, __VA_ARGS__))) +#define __cne_assert_shared_lock(...) __attribute__((assert_shared_lock(__VA_ARGS__))) + +#define __cne_unlock_function(...) __attribute__((unlock_function(__VA_ARGS__))) + +#define __cne_no_thread_safety_analysis __attribute__((no_thread_safety_analysis)) + +#else /* ! CNE_ANNOTATE_LOCKS */ + +#define __cne_lockable + +#define __cne_guarded_by(...) +#define __cne_guarded_var + +#define __cne_exclusive_locks_required(...) +#define __cne_exclusive_lock_function(...) +#define __cne_exclusive_trylock_function(...) +#define __cne_assert_exclusive_lock(...) + +#define __cne_shared_locks_required(...) +#define __cne_shared_lock_function(...) +#define __cne_shared_trylock_function(...) +#define __cne_assert_shared_lock(...) + +#define __cne_unlock_function(...) + +#define __cne_no_thread_safety_analysis + +#endif /* CNE_ANNOTATE_LOCKS */ + +#ifdef __cplusplus +} +#endif + +#endif /* CNE_LOCK_ANNOTATIONS_H */ diff --git a/lib/include/cne_rwlock.h b/lib/include/cne_rwlock.h index 6094ed30..76425338 100644 --- a/lib/include/cne_rwlock.h +++ b/lib/include/cne_rwlock.h @@ -15,11 +15,17 @@ * one writer. All readers are blocked until the writer is finished * writing. * + * This version does not give preference to readers or writers + * and does not starve either readers or writers. + * + * See also: + * https://locklessinc.com/articles/locks/ */ #include #include #include +#include #include #ifdef __cplusplus @@ -29,10 +35,28 @@ extern "C" { /** * The cne_rwlock_t type. * - * cnt is -1 when write lock is held, and > 0 when read locks are held. + * Readers increment the counter by CNE_RWLOCK_READ (4) + * Writers set the CNE_RWLOCK_WRITE bit when lock is held + * and set the CNE_RWLOCK_WAIT bit while waiting. + * + * 31 2 1 0 + * +-------------------+-+-+ + * | readers | | | + * +-------------------+-+-+ + * ^ ^ + * | | + * WRITE: lock held ----/ | + * WAIT: writer pending --/ */ -typedef struct { - volatile int32_t cnt; /**< -1 when W lock held, > 0 when R locks held. */ + +#define CNE_RWLOCK_WAIT 0x1 /* Writer is waiting */ +#define CNE_RWLOCK_WRITE 0x2 /* Writer has the lock */ +#define CNE_RWLOCK_MASK (CNE_RWLOCK_WAIT | CNE_RWLOCK_WRITE) +/* Writer is waiting or has lock */ +#define CNE_RWLOCK_READ 0x4 /* Reader increment */ + +typedef struct __cne_lockable { + int32_t cnt; } cne_rwlock_t; /** @@ -63,24 +87,29 @@ cne_rwlock_init(cne_rwlock_t *rwl) */ static inline void cne_rwlock_read_lock(cne_rwlock_t *rwl) + __cne_shared_lock_function(rwl) __cne_no_thread_safety_analysis { int32_t x; - int success = 0; - while (success == 0) { - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); - /* write lock is held */ - if (x < 0) { + while (1) { + /* Wait while writer is present or pending */ + while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & CNE_RWLOCK_MASK) cne_pause(); - continue; - } - success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, __ATOMIC_ACQUIRE, - __ATOMIC_RELAXED); + + /* Try to get read lock */ + x = __atomic_fetch_add(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_ACQUIRE) + CNE_RWLOCK_READ; + + /* If no writer, then acquire was successful */ + if (likely(!(x & CNE_RWLOCK_MASK))) + return; + + /* Lost race with writer, backout the change. */ + __atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_RELAXED); } } /** - * try to take a read lock. + * Try to take a read lock. * * @param rwl * A pointer to a rwlock structure. @@ -91,19 +120,25 @@ cne_rwlock_read_lock(cne_rwlock_t *rwl) */ static inline int cne_rwlock_read_trylock(cne_rwlock_t *rwl) + __cne_shared_trylock_function(0, rwl) __cne_no_thread_safety_analysis { int32_t x; - int success = 0; - while (success == 0) { - x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); - /* write lock is held */ - if (x < 0) - return -EBUSY; - success = __atomic_compare_exchange_n(&rwl->cnt, &x, x + 1, 1, __ATOMIC_ACQUIRE, - __ATOMIC_RELAXED); - } + x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); + + /* fail if write lock is held or writer is pending */ + if (x & CNE_RWLOCK_MASK) + return -EBUSY; + + /* Try to get read lock */ + x = __atomic_fetch_add(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_ACQUIRE) + CNE_RWLOCK_READ; + /* Back out if writer raced in */ + if (unlikely(x & CNE_RWLOCK_MASK)) { + __atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_RELEASE); + + return -EBUSY; + } return 0; } @@ -114,13 +149,13 @@ cne_rwlock_read_trylock(cne_rwlock_t *rwl) * A pointer to the rwlock structure. */ static inline void -cne_rwlock_read_unlock(cne_rwlock_t *rwl) +cne_rwlock_read_unlock(cne_rwlock_t *rwl) __cne_unlock_function(rwl) __cne_no_thread_safety_analysis { - __atomic_fetch_sub(&rwl->cnt, 1, __ATOMIC_RELEASE); + __atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_READ, __ATOMIC_RELEASE); } /** - * try to take a write lock. + * Try to take a write lock. * * @param rwl * A pointer to a rwlock structure. @@ -131,15 +166,16 @@ cne_rwlock_read_unlock(cne_rwlock_t *rwl) */ static inline int cne_rwlock_write_trylock(cne_rwlock_t *rwl) + __cne_exclusive_trylock_function(0, rwl) __cne_no_thread_safety_analysis { int32_t x; x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); - if (x != 0 || - __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED) == 0) + if (x < CNE_RWLOCK_WRITE && __atomic_compare_exchange_n(&rwl->cnt, &x, x + CNE_RWLOCK_WRITE, 1, + __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) + return 0; + else return -EBUSY; - - return 0; } /** @@ -150,19 +186,28 @@ cne_rwlock_write_trylock(cne_rwlock_t *rwl) */ static inline void cne_rwlock_write_lock(cne_rwlock_t *rwl) + __cne_exclusive_lock_function(rwl) __cne_no_thread_safety_analysis { int32_t x; - int success = 0; - while (success == 0) { + while (1) { x = __atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED); - /* a lock is held */ - if (x != 0) { - cne_pause(); - continue; + + /* No readers or writers? */ + if (likely(x < CNE_RWLOCK_WRITE)) { + /* Turn off CNE_RWLOCK_WAIT, turn on CNE_RWLOCK_WRITE */ + if (__atomic_compare_exchange_n(&rwl->cnt, &x, CNE_RWLOCK_WRITE, 1, __ATOMIC_ACQUIRE, + __ATOMIC_RELAXED)) + return; } - success = - __atomic_compare_exchange_n(&rwl->cnt, &x, -1, 1, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED); + + /* Turn on writer wait bit */ + if (!(x & CNE_RWLOCK_WAIT)) + __atomic_fetch_or(&rwl->cnt, CNE_RWLOCK_WAIT, __ATOMIC_RELAXED); + + /* Wait until no readers before trying again */ + while (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) > CNE_RWLOCK_WAIT) + cne_pause(); } } @@ -174,8 +219,26 @@ cne_rwlock_write_lock(cne_rwlock_t *rwl) */ static inline void cne_rwlock_write_unlock(cne_rwlock_t *rwl) + __cne_unlock_function(rwl) __cne_no_thread_safety_analysis { - __atomic_store_n(&rwl->cnt, 0, __ATOMIC_RELEASE); + __atomic_fetch_sub(&rwl->cnt, CNE_RWLOCK_WRITE, __ATOMIC_RELEASE); +} + +/** + * Test if the write lock is taken. + * + * @param rwl + * A pointer to a rwlock structure. + * @return + * 1 if the write lock is currently taken; 0 otherwise. + */ +static inline int +cne_rwlock_write_is_locked(cne_rwlock_t *rwl) +{ + if (__atomic_load_n(&rwl->cnt, __ATOMIC_RELAXED) & CNE_RWLOCK_WRITE) + return 1; + + return 0; } /** diff --git a/lib/include/cne_tailq.h b/lib/include/cne_tailq.h index 738488f8..ca140951 100644 --- a/lib/include/cne_tailq.h +++ b/lib/include/cne_tailq.h @@ -77,10 +77,8 @@ struct cne_tailq_elem { * first parameter passed to TAILQ_HEAD macro) * * @return - * The return value, typecast to the appropriate - * structure pointer type. - * NULL on error, since the tailq_head is the first - * element in the cne_tailq_head structure. + * The return value, typecast to the appropriate structure pointer type. NULL on error, + * since the tailq_head is the first element in the cne_tailq_head structure. */ #define CNE_TAILQ_LOOKUP(name, struct_name) CNE_TAILQ_CAST(cne_tailq_lookup(name), struct_name) @@ -123,22 +121,6 @@ CNDP_API void cne_dump_tailq(void); */ CNDP_API struct cne_tailq_head *cne_tailq_lookup(const char *name); -/** - * Register a tail queue. - * - * Register a tail queue from shared memory. - * This function is mainly used by some, which is used to - * register tailq from the different cndp libraries. Since this macro is a - * constructor. - * - * @param t - * The tailq element which contains the name of the tailq you want to - * create (/retrieve when in secondary process). - * @return - * 0 on success or -1 in case of an error. - */ -CNDP_API int cne_eal_tailq_register(struct cne_tailq_elem *t); - /** * This macro permits both remove and free var within the loop safely. */ diff --git a/lib/include/meson.build b/lib/include/meson.build index 21859ca0..544356de 100644 --- a/lib/include/meson.build +++ b/lib/include/meson.build @@ -15,6 +15,7 @@ headers = files( 'cne_inet4.h', 'cne_inet6.h', 'cne_isa.h', + 'cne_lock_annotations.h', 'cne_lport.h', 'cne_mutex_helper.h', 'cne_pause.h', diff --git a/lib/usr/clib/graph/graph_private.h b/lib/usr/clib/graph/graph_private.h index bf5f098a..48d9b7c8 100644 --- a/lib/usr/clib/graph/graph_private.h +++ b/lib/usr/clib/graph/graph_private.h @@ -266,7 +266,7 @@ int graph_fp_mem_create(struct graph *graph); * * @return * - 0: Success. - * - <0: Graph memzone related error. + * - <0: Graph memory related error. */ int graph_fp_mem_destroy(struct graph *graph); diff --git a/test/testcne/graph_perf_test.c b/test/testcne/graph_perf_test.c index 13058ef1..bd90a88b 100644 --- a/test/testcne/graph_perf_test.c +++ b/test/testcne/graph_perf_test.c @@ -346,8 +346,8 @@ graph_init(const char *gname, uint8_t nb_srcs, uint8_t nb_sinks, uint32_t stages graph_data->node_data = malloc(sizeof(struct test_node_data) * (nb_srcs + nb_sinks + stages * nodes_per_stage)); if (graph_data->node_data == NULL) { - tst_error("Failed to reserve memzone for graph data"); - goto memzone_free; + tst_error("Failed to reserve memory for graph data"); + goto memory_free; } node_patterns = calloc(1, sizeof(char *) * (nb_srcs + nb_sinks + stages * nodes_per_stage + 1)); @@ -621,7 +621,7 @@ graph_init(const char *gname, uint8_t nb_srcs, uint8_t nb_sinks, uint32_t stages free(node_patterns); data_free: free(graph_data->node_data); -memzone_free: +memory_free: free(graph_data); return -ENOMEM; } diff --git a/usrtools/txgen/app/capture.c b/usrtools/txgen/app/capture.c index 231d87ea..bf2aeb29 100644 --- a/usrtools/txgen/app/capture.c +++ b/usrtools/txgen/app/capture.c @@ -45,7 +45,7 @@ * storage. * * PARAMETERS: - * capture: capture_t struct that will keep a pointer to the allocated memzone + * capture: capture_t struct that will keep a pointer to the allocated memory * * RETURNS: N/A * diff --git a/usrtools/txgen/app/capture.h b/usrtools/txgen/app/capture.h index 24c03d9c..c39a16bc 100644 --- a/usrtools/txgen/app/capture.h +++ b/usrtools/txgen/app/capture.h @@ -29,7 +29,7 @@ typedef struct capture_s { cap_hdr_t *tail; /**< Current tail pointer in the pkt buffer */ cap_hdr_t *end; /**< Points to just before the end[-1] of the buffer */ size_t used; /**< Memory used by captured packets */ - uint16_t port; /**< port for this memzone */ + uint16_t port; /**< port for this mempool */ } capture_t; /* Capture initialization */ diff --git a/usrtools/txgen/app/cksum.c b/usrtools/txgen/app/cksum.c index 71256f46..61c968bb 100644 --- a/usrtools/txgen/app/cksum.c +++ b/usrtools/txgen/app/cksum.c @@ -35,7 +35,7 @@ cksum(void *pBuf, int32_t size, uint32_t cksum) } /** - * cksumUpdate - Calaculate an 16 bit checksum and return the 32 bit value + * cksumUpdate - Calculate an 16 bit checksum and return the 32 bit value * * DESCRIPTION * Will need to call txgen_cksumDone to finish computing the checksum. The