96 #include <sys/queue.h> 99 #include <rte_config.h> 107 #define RTE_TAILQ_RING_NAME "RTE_RING" 109 enum rte_ring_queue_behavior {
110 RTE_RING_QUEUE_FIXED = 0,
111 RTE_RING_QUEUE_VARIABLE
114 #define RTE_RING_MZ_PREFIX "RG_" 116 #define RTE_RING_NAMESIZE (RTE_MEMZONE_NAMESIZE - \ 117 sizeof(RTE_RING_MZ_PREFIX) + 1) 121 #if RTE_CACHE_LINE_SIZE < 128 122 #define PROD_ALIGN (RTE_CACHE_LINE_SIZE * 2) 123 #define CONS_ALIGN (RTE_CACHE_LINE_SIZE * 2) 125 #define PROD_ALIGN RTE_CACHE_LINE_SIZE 126 #define CONS_ALIGN RTE_CACHE_LINE_SIZE 130 struct rte_ring_headtail {
131 volatile uint32_t head;
132 volatile uint32_t tail;
167 #define RING_F_SP_ENQ 0x0001 168 #define RING_F_SC_DEQ 0x0002 177 #define RING_F_EXACT_SZ 0x0004 178 #define RTE_RING_SZ_MASK (0x7fffffffU) 279 int socket_id,
unsigned flags);
301 #define ENQUEUE_PTRS(r, ring_start, prod_head, obj_table, n, obj_type) do { \ 303 const uint32_t size = (r)->size; \ 304 uint32_t idx = prod_head & (r)->mask; \ 305 obj_type *ring = (obj_type *)ring_start; \ 306 if (likely(idx + n < size)) { \ 307 for (i = 0; i < (n & ((~(unsigned)0x3))); i+=4, idx+=4) { \ 308 ring[idx] = obj_table[i]; \ 309 ring[idx+1] = obj_table[i+1]; \ 310 ring[idx+2] = obj_table[i+2]; \ 311 ring[idx+3] = obj_table[i+3]; \ 315 ring[idx++] = obj_table[i++]; \ 317 ring[idx++] = obj_table[i++]; \ 319 ring[idx++] = obj_table[i++]; \ 322 for (i = 0; idx < size; i++, idx++)\ 323 ring[idx] = obj_table[i]; \ 324 for (idx = 0; i < n; i++, idx++) \ 325 ring[idx] = obj_table[i]; \ 332 #define DEQUEUE_PTRS(r, ring_start, cons_head, obj_table, n, obj_type) do { \ 334 uint32_t idx = cons_head & (r)->mask; \ 335 const uint32_t size = (r)->size; \ 336 obj_type *ring = (obj_type *)ring_start; \ 337 if (likely(idx + n < size)) { \ 338 for (i = 0; i < (n & (~(unsigned)0x3)); i+=4, idx+=4) {\ 339 obj_table[i] = ring[idx]; \ 340 obj_table[i+1] = ring[idx+1]; \ 341 obj_table[i+2] = ring[idx+2]; \ 342 obj_table[i+3] = ring[idx+3]; \ 346 obj_table[i++] = ring[idx++]; \ 348 obj_table[i++] = ring[idx++]; \ 350 obj_table[i++] = ring[idx++]; \ 353 for (i = 0; idx < size; i++, idx++) \ 354 obj_table[i] = ring[idx]; \ 355 for (idx = 0; i < n; i++, idx++) \ 356 obj_table[i] = ring[idx]; \ 361 update_tail(
struct rte_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
369 while (
unlikely(ht->tail != old_val))
399 __rte_ring_move_prod_head(
struct rte_ring *r,
int is_sp,
400 unsigned int n,
enum rte_ring_queue_behavior behavior,
401 uint32_t *old_head, uint32_t *new_head,
402 uint32_t *free_entries)
405 unsigned int max = n;
412 *old_head = r->prod.head;
419 const uint32_t cons_tail = r->cons.tail;
426 *free_entries = (capacity + cons_tail - *old_head);
430 n = (behavior == RTE_RING_QUEUE_FIXED) ?
436 *new_head = *old_head + n;
438 r->prod.head = *new_head, success = 1;
441 *old_head, *new_head);
467 __rte_ring_do_enqueue(
struct rte_ring *r,
void *
const *obj_table,
468 unsigned int n,
enum rte_ring_queue_behavior behavior,
469 int is_sp,
unsigned int *free_space)
471 uint32_t prod_head, prod_next;
472 uint32_t free_entries;
474 n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
475 &prod_head, &prod_next, &free_entries);
479 ENQUEUE_PTRS(r, &r[1], prod_head, obj_table, n,
void *);
482 update_tail(&r->prod, prod_head, prod_next, is_sp);
484 if (free_space != NULL)
485 *free_space = free_entries - n;
513 __rte_ring_move_cons_head(
struct rte_ring *r,
int is_sc,
514 unsigned int n,
enum rte_ring_queue_behavior behavior,
515 uint32_t *old_head, uint32_t *new_head,
518 unsigned int max = n;
526 *old_head = r->cons.head;
533 const uint32_t prod_tail = r->prod.tail;
538 *entries = (prod_tail - *old_head);
542 n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;
547 *new_head = *old_head + n;
549 r->cons.head = *new_head, success = 1;
578 __rte_ring_do_dequeue(
struct rte_ring *r,
void **obj_table,
579 unsigned int n,
enum rte_ring_queue_behavior behavior,
580 int is_sc,
unsigned int *available)
582 uint32_t cons_head, cons_next;
585 n = __rte_ring_move_cons_head(r, is_sc, n, behavior,
586 &cons_head, &cons_next, &entries);
590 DEQUEUE_PTRS(r, &r[1], cons_head, obj_table, n,
void *);
593 update_tail(&r->cons, cons_head, cons_next, is_sc);
596 if (available != NULL)
597 *available = entries - n;
621 unsigned int n,
unsigned int *free_space)
623 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
624 __IS_MP, free_space);
644 unsigned int n,
unsigned int *free_space)
646 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
647 __IS_SP, free_space);
671 unsigned int n,
unsigned int *free_space)
673 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
674 r->prod.single, free_space);
755 unsigned int n,
unsigned int *available)
757 return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
779 unsigned int n,
unsigned int *available)
781 return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
806 unsigned int *available)
808 return __rte_ring_do_dequeue(r, obj_table, n, RTE_RING_QUEUE_FIXED,
809 r->cons.single, available);
881 static inline unsigned 884 uint32_t prod_tail = r->prod.tail;
885 uint32_t cons_tail = r->cons.tail;
886 uint32_t count = (prod_tail - cons_tail) & r->
mask;
898 static inline unsigned 944 static inline unsigned int 958 static inline unsigned int 1004 unsigned int n,
unsigned int *free_space)
1006 return __rte_ring_do_enqueue(r, obj_table, n,
1007 RTE_RING_QUEUE_VARIABLE, __IS_MP, free_space);
1027 unsigned int n,
unsigned int *free_space)
1029 return __rte_ring_do_enqueue(r, obj_table, n,
1030 RTE_RING_QUEUE_VARIABLE, __IS_SP, free_space);
1054 unsigned int n,
unsigned int *free_space)
1056 return __rte_ring_do_enqueue(r, obj_table, n, RTE_RING_QUEUE_VARIABLE,
1057 r->prod.single, free_space);
1082 unsigned int n,
unsigned int *available)
1084 return __rte_ring_do_dequeue(r, obj_table, n,
1085 RTE_RING_QUEUE_VARIABLE, __IS_MC, available);
1107 unsigned int n,
unsigned int *available)
1109 return __rte_ring_do_dequeue(r, obj_table, n,
1110 RTE_RING_QUEUE_VARIABLE, __IS_SC, available);
1134 unsigned int n,
unsigned int *available)
1136 return __rte_ring_do_dequeue(r, obj_table, n,
1137 RTE_RING_QUEUE_VARIABLE,
1138 r->cons.single, available);
static void rte_smp_rmb(void)
static int rte_atomic32_cmpset(volatile uint32_t *dst, uint32_t exp, uint32_t src)
#define __rte_always_inline
const struct rte_memzone * memzone
static __rte_always_inline unsigned int rte_ring_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
char name[RTE_MEMZONE_NAMESIZE] __rte_cache_aligned
static __rte_always_inline int rte_ring_dequeue(struct rte_ring *r, void **obj_p)
static __rte_always_inline unsigned rte_ring_mc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static int rte_ring_empty(const struct rte_ring *r)
static __rte_always_inline unsigned rte_ring_mp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_mc_dequeue(struct rte_ring *r, void **obj_p)
void rte_ring_list_dump(FILE *f)
static __rte_always_inline unsigned int rte_ring_mp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_always_inline int rte_ring_sp_enqueue(struct rte_ring *r, void *obj)
static __rte_always_inline int rte_ring_mp_enqueue(struct rte_ring *r, void *obj)
static unsigned int rte_ring_get_capacity(const struct rte_ring *r)
static unsigned int rte_ring_get_size(const struct rte_ring *r)
static __rte_always_inline int rte_ring_sc_dequeue(struct rte_ring *r, void **obj_p)
void rte_ring_free(struct rte_ring *r)
static void rte_pause(void)
static __rte_always_inline unsigned rte_ring_sp_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static void rte_smp_wmb(void)
static __rte_always_inline unsigned rte_ring_sc_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned rte_ring_dequeue_burst(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sp_enqueue_bulk(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
void rte_ring_dump(FILE *f, const struct rte_ring *r)
static unsigned rte_ring_count(const struct rte_ring *r)
struct rte_ring * rte_ring_create(const char *name, unsigned count, int socket_id, unsigned flags)
static __rte_always_inline unsigned int rte_ring_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
static __rte_always_inline unsigned int rte_ring_sc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
struct rte_ring * rte_ring_lookup(const char *name)
static unsigned rte_ring_free_count(const struct rte_ring *r)
static __rte_always_inline int rte_ring_enqueue(struct rte_ring *r, void *obj)
static __rte_always_inline unsigned rte_ring_enqueue_burst(struct rte_ring *r, void *const *obj_table, unsigned int n, unsigned int *free_space)
static __rte_always_inline unsigned int rte_ring_mc_dequeue_bulk(struct rte_ring *r, void **obj_table, unsigned int n, unsigned int *available)
int rte_ring_init(struct rte_ring *r, const char *name, unsigned count, unsigned flags)
static int rte_ring_full(const struct rte_ring *r)
ssize_t rte_ring_get_memsize(unsigned count)
#define RTE_MEMZONE_NAMESIZE