12 #ifndef _CNE_RING_GENERIC_H_
13 #define _CNE_RING_GENERIC_H_
36 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \
38 const uint32_t size = (r)->size; \
39 uint32_t idx = prod_head & (r)->mask; \
40 obj_type *ring = (obj_type *)ring_start; \
41 if (likely(idx + n < size)) { \
42 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \
43 ring[idx] = obj_table[i]; \
44 ring[idx+1] = obj_table[i+1]; \
45 ring[idx+2] = obj_table[i+2]; \
46 ring[idx+3] = obj_table[i+3]; \
50 ring[idx++] = obj_table[i++]; \
52 ring[idx++] = obj_table[i++]; \
54 ring[idx++] = obj_table[i++]; \
57 for (i = 0; idx < size; i++, idx++)\
58 ring[idx] = obj_table[i]; \
59 for (idx = 0; i < n; i++, idx++) \
60 ring[idx] = obj_table[i]; \
68 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \
70 uint32_t idx = cons_head &r->mask; \
71 const uint32_t size = r->size; \
72 obj_type *ring = (obj_type *)ring_start; \
73 if (likely(idx + n < size)) { \
74 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\
75 obj_table[i] = ring[idx]; \
76 obj_table[i+1] = ring[idx+1]; \
77 obj_table[i+2] = ring[idx+2]; \
78 obj_table[i+3] = ring[idx+3]; \
82 obj_table[i++] = ring[idx++]; \
84 obj_table[i++] = ring[idx++]; \
86 obj_table[i++] = ring[idx++]; \
89 for (i = 0; idx < size; i++, idx++) \
90 obj_table[i] = ring[idx]; \
91 for (idx = 0; i < n; i++, idx++) \
92 obj_table[i] = ring[idx]; \
98 update_tail(
struct cne_ring_headtail *ht, uint32_t old_val, uint32_t new_val, uint32_t single)
105 uint64_t timo = 1000;
107 while (
unlikely(atomic_load_explicit(&ht->tail, CNE_MEMORY_ORDER(relaxed)) != old_val)) {
115 atomic_store_explicit(&ht->tail, new_val, CNE_MEMORY_ORDER(release));
142 __cne_ring_move_prod_head(
struct cne_ring *r,
unsigned int is_sp,
unsigned int n,
143 enum cne_ring_queue_behavior behavior, uint32_t *old_head,
144 uint32_t *new_head, uint32_t *free_entries)
146 const uint32_t capacity = r->capacity;
147 unsigned int max = n;
151 *old_head = atomic_load_explicit(&r->prod.head, CNE_MEMORY_ORDER(relaxed));
159 atomic_thread_fence(CNE_MEMORY_ORDER(acquire));
164 cons_tail = atomic_load_explicit(&r->cons.tail, CNE_MEMORY_ORDER(acquire));
172 *free_entries = (capacity + cons_tail - *old_head);
176 n = (behavior == CNE_RING_QUEUE_FIXED_ITEMS) ? 0 : *free_entries;
181 *new_head = *old_head + n;
183 r->prod.head = *new_head;
186 success = atomic_compare_exchange_strong_explicit(&r->prod.head, old_head, *new_head,
187 CNE_MEMORY_ORDER(relaxed),
188 CNE_MEMORY_ORDER(relaxed));
218 __cne_ring_move_cons_head(
struct cne_ring *r,
unsigned int is_sc,
unsigned int n,
219 enum cne_ring_queue_behavior behavior, uint32_t *old_head,
220 uint32_t *new_head, uint32_t *entries)
222 unsigned int max = n;
227 *old_head = atomic_load_explicit(&r->cons.head, CNE_MEMORY_ORDER(relaxed));
235 atomic_thread_fence(CNE_MEMORY_ORDER(acquire));
240 prod_tail = atomic_load_explicit(&r->prod.tail, CNE_MEMORY_ORDER(acquire));
247 *entries = (prod_tail - *old_head);
251 n = (behavior == CNE_RING_QUEUE_FIXED_ITEMS) ? 0 : *entries;
256 *new_head = *old_head + n;
258 r->cons.head = *new_head;
261 success = atomic_compare_exchange_strong_explicit(&r->cons.head, old_head, *new_head,
262 CNE_MEMORY_ORDER(relaxed),
263 CNE_MEMORY_ORDER(relaxed));
290 __cne_ring_do_enqueue(
struct cne_ring *r,
void *
const *obj_table,
unsigned int n,
291 enum cne_ring_queue_behavior behavior,
unsigned int is_sp,
292 unsigned int *free_space)
294 uint32_t prod_head, prod_next;
295 uint32_t free_entries;
297 n = __cne_ring_move_prod_head(r, is_sp, n, behavior, &prod_head, &prod_next, &free_entries);
301 ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n,
void *);
303 update_tail(&r->prod, prod_head, prod_next, is_sp);
305 if (free_space != NULL)
306 *free_space = free_entries - n;
331 __cne_ring_do_dequeue(
struct cne_ring *r,
void **obj_table,
unsigned int n,
332 enum cne_ring_queue_behavior behavior,
unsigned int is_sc,
333 unsigned int *available)
335 uint32_t cons_head, cons_next;
338 n = __cne_ring_move_cons_head(r, (
int)is_sc, n, behavior, &cons_head, &cons_next, &entries);
342 DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n,
void *);
344 update_tail(&r->cons, cons_head, cons_next, is_sc);
347 if (available != NULL)
348 *available = entries - n;
#define __cne_always_inline