1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2019 Arm Limited
4 * Copyright (c) 2010-2017 Intel Corporation
5 * Copyright (c) 2007-2009 Kip Macy [email protected]
6 * All rights reserved.
7 * Derived from FreeBSD's bufring.h
8 * Used as BSD-3 Licensed with permission from Kip Macy.
9 */
10
11 #ifndef _RTE_RING_ELEM_H_
12 #define _RTE_RING_ELEM_H_
13
14 /**
15 * @file
16 * RTE Ring with user defined element size
17 */
18
19 #ifdef __cplusplus
20 extern "C" {
21 #endif
22
23 #include <rte_ring_core.h>
24
25 /**
26 * Calculate the memory size needed for a ring with given element size
27 *
28 * This function returns the number of bytes needed for a ring, given
29 * the number of elements in it and the size of the element. This value
30 * is the sum of the size of the structure rte_ring and the size of the
31 * memory needed for storing the elements. The value is aligned to a cache
32 * line size.
33 *
34 * @param esize
35 * The size of ring element, in bytes. It must be a multiple of 4.
36 * @param count
37 * The number of elements in the ring (must be a power of 2).
38 * @return
39 * - The memory size needed for the ring on success.
40 * - -EINVAL - esize is not a multiple of 4 or count provided is not a
41 * power of 2.
42 */
43 ssize_t rte_ring_get_memsize_elem(unsigned int esize, unsigned int count);
44
45 /**
46 * Create a new ring named *name* that stores elements with given size.
47 *
48 * This function uses ``memzone_reserve()`` to allocate memory. Then it
49 * calls rte_ring_init() to initialize an empty ring.
50 *
51 * The new ring size is set to *count*, which must be a power of
52 * two. Water marking is disabled by default. The real usable ring size
53 * is *count-1* instead of *count* to differentiate a free ring from an
54 * empty ring.
55 *
56 * The ring is added in RTE_TAILQ_RING list.
57 *
58 * @param name
59 * The name of the ring.
60 * @param esize
61 * The size of ring element, in bytes. It must be a multiple of 4.
62 * @param count
63 * The number of elements in the ring (must be a power of 2).
64 * @param socket_id
65 * The *socket_id* argument is the socket identifier in case of
66 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA
67 * constraint for the reserved zone.
68 * @param flags
69 * An OR of the following:
70 * - One of mutually exclusive flags that define producer behavior:
71 * - RING_F_SP_ENQ: If this flag is set, the default behavior when
72 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
73 * is "single-producer".
74 * - RING_F_MP_RTS_ENQ: If this flag is set, the default behavior when
75 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
76 * is "multi-producer RTS mode".
77 * - RING_F_MP_HTS_ENQ: If this flag is set, the default behavior when
78 * using ``rte_ring_enqueue()`` or ``rte_ring_enqueue_bulk()``
79 * is "multi-producer HTS mode".
80 * If none of these flags is set, then default "multi-producer"
81 * behavior is selected.
82 * - One of mutually exclusive flags that define consumer behavior:
83 * - RING_F_SC_DEQ: If this flag is set, the default behavior when
84 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
85 * is "single-consumer". Otherwise, it is "multi-consumers".
86 * - RING_F_MC_RTS_DEQ: If this flag is set, the default behavior when
87 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
88 * is "multi-consumer RTS mode".
89 * - RING_F_MC_HTS_DEQ: If this flag is set, the default behavior when
90 * using ``rte_ring_dequeue()`` or ``rte_ring_dequeue_bulk()``
91 * is "multi-consumer HTS mode".
92 * If none of these flags is set, then default "multi-consumer"
93 * behavior is selected.
94 * @return
95 * On success, the pointer to the new allocated ring. NULL on error with
96 * rte_errno set appropriately. Possible errno values include:
97 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure
98 * - E_RTE_SECONDARY - function was called from a secondary process instance
99 * - EINVAL - esize is not a multiple of 4 or count provided is not a
100 * power of 2.
101 * - ENOSPC - the maximum number of memzones has already been allocated
102 * - EEXIST - a memzone with the same name already exists
103 * - ENOMEM - no appropriate memory area found in which to create memzone
104 */
105 struct rte_ring *rte_ring_create_elem(const char *name, unsigned int esize,
106 unsigned int count, int socket_id, unsigned int flags);
107
108 static __rte_always_inline void
__rte_ring_enqueue_elems_32(struct rte_ring * r,const uint32_t size,uint32_t idx,const void * obj_table,uint32_t n)109 __rte_ring_enqueue_elems_32(struct rte_ring *r, const uint32_t size,
110 uint32_t idx, const void *obj_table, uint32_t n)
111 {
112 unsigned int i;
113 uint32_t *ring = (uint32_t *)&r[1];
114 const uint32_t *obj = (const uint32_t *)obj_table;
115 if (likely(idx + n < size)) {
116 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
117 ring[idx] = obj[i];
118 ring[idx + 1] = obj[i + 1];
119 ring[idx + 2] = obj[i + 2];
120 ring[idx + 3] = obj[i + 3];
121 ring[idx + 4] = obj[i + 4];
122 ring[idx + 5] = obj[i + 5];
123 ring[idx + 6] = obj[i + 6];
124 ring[idx + 7] = obj[i + 7];
125 }
126 switch (n & 0x7) {
127 case 7:
128 ring[idx++] = obj[i++]; /* fallthrough */
129 case 6:
130 ring[idx++] = obj[i++]; /* fallthrough */
131 case 5:
132 ring[idx++] = obj[i++]; /* fallthrough */
133 case 4:
134 ring[idx++] = obj[i++]; /* fallthrough */
135 case 3:
136 ring[idx++] = obj[i++]; /* fallthrough */
137 case 2:
138 ring[idx++] = obj[i++]; /* fallthrough */
139 case 1:
140 ring[idx++] = obj[i++]; /* fallthrough */
141 }
142 } else {
143 for (i = 0; idx < size; i++, idx++)
144 ring[idx] = obj[i];
145 /* Start at the beginning */
146 for (idx = 0; i < n; i++, idx++)
147 ring[idx] = obj[i];
148 }
149 }
150
151 static __rte_always_inline void
__rte_ring_enqueue_elems_64(struct rte_ring * r,uint32_t prod_head,const void * obj_table,uint32_t n)152 __rte_ring_enqueue_elems_64(struct rte_ring *r, uint32_t prod_head,
153 const void *obj_table, uint32_t n)
154 {
155 unsigned int i;
156 const uint32_t size = r->size;
157 uint32_t idx = prod_head & r->mask;
158 uint64_t *ring = (uint64_t *)&r[1];
159 const unaligned_uint64_t *obj = (const unaligned_uint64_t *)obj_table;
160 if (likely(idx + n < size)) {
161 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
162 ring[idx] = obj[i];
163 ring[idx + 1] = obj[i + 1];
164 ring[idx + 2] = obj[i + 2];
165 ring[idx + 3] = obj[i + 3];
166 }
167 switch (n & 0x3) {
168 case 3:
169 ring[idx++] = obj[i++]; /* fallthrough */
170 case 2:
171 ring[idx++] = obj[i++]; /* fallthrough */
172 case 1:
173 ring[idx++] = obj[i++];
174 }
175 } else {
176 for (i = 0; idx < size; i++, idx++)
177 ring[idx] = obj[i];
178 /* Start at the beginning */
179 for (idx = 0; i < n; i++, idx++)
180 ring[idx] = obj[i];
181 }
182 }
183
184 static __rte_always_inline void
__rte_ring_enqueue_elems_128(struct rte_ring * r,uint32_t prod_head,const void * obj_table,uint32_t n)185 __rte_ring_enqueue_elems_128(struct rte_ring *r, uint32_t prod_head,
186 const void *obj_table, uint32_t n)
187 {
188 unsigned int i;
189 const uint32_t size = r->size;
190 uint32_t idx = prod_head & r->mask;
191 rte_int128_t *ring = (rte_int128_t *)&r[1];
192 const rte_int128_t *obj = (const rte_int128_t *)obj_table;
193 if (likely(idx + n < size)) {
194 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
195 memcpy((void *)(ring + idx),
196 (const void *)(obj + i), 32);
197 switch (n & 0x1) {
198 case 1:
199 memcpy((void *)(ring + idx),
200 (const void *)(obj + i), 16);
201 }
202 } else {
203 for (i = 0; idx < size; i++, idx++)
204 memcpy((void *)(ring + idx),
205 (const void *)(obj + i), 16);
206 /* Start at the beginning */
207 for (idx = 0; i < n; i++, idx++)
208 memcpy((void *)(ring + idx),
209 (const void *)(obj + i), 16);
210 }
211 }
212
213 /* the actual enqueue of elements on the ring.
214 * Placed here since identical code needed in both
215 * single and multi producer enqueue functions.
216 */
217 static __rte_always_inline void
__rte_ring_enqueue_elems(struct rte_ring * r,uint32_t prod_head,const void * obj_table,uint32_t esize,uint32_t num)218 __rte_ring_enqueue_elems(struct rte_ring *r, uint32_t prod_head,
219 const void *obj_table, uint32_t esize, uint32_t num)
220 {
221 /* 8B and 16B copies implemented individually to retain
222 * the current performance.
223 */
224 if (esize == 8)
225 __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
226 else if (esize == 16)
227 __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
228 else {
229 uint32_t idx, scale, nr_idx, nr_num, nr_size;
230
231 /* Normalize to uint32_t */
232 scale = esize / sizeof(uint32_t);
233 nr_num = num * scale;
234 idx = prod_head & r->mask;
235 nr_idx = idx * scale;
236 nr_size = r->size * scale;
237 __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
238 obj_table, nr_num);
239 }
240 }
241
242 static __rte_always_inline void
__rte_ring_dequeue_elems_32(struct rte_ring * r,const uint32_t size,uint32_t idx,void * obj_table,uint32_t n)243 __rte_ring_dequeue_elems_32(struct rte_ring *r, const uint32_t size,
244 uint32_t idx, void *obj_table, uint32_t n)
245 {
246 unsigned int i;
247 uint32_t *ring = (uint32_t *)&r[1];
248 uint32_t *obj = (uint32_t *)obj_table;
249 if (likely(idx + n < size)) {
250 for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
251 obj[i] = ring[idx];
252 obj[i + 1] = ring[idx + 1];
253 obj[i + 2] = ring[idx + 2];
254 obj[i + 3] = ring[idx + 3];
255 obj[i + 4] = ring[idx + 4];
256 obj[i + 5] = ring[idx + 5];
257 obj[i + 6] = ring[idx + 6];
258 obj[i + 7] = ring[idx + 7];
259 }
260 switch (n & 0x7) {
261 case 7:
262 obj[i++] = ring[idx++]; /* fallthrough */
263 case 6:
264 obj[i++] = ring[idx++]; /* fallthrough */
265 case 5:
266 obj[i++] = ring[idx++]; /* fallthrough */
267 case 4:
268 obj[i++] = ring[idx++]; /* fallthrough */
269 case 3:
270 obj[i++] = ring[idx++]; /* fallthrough */
271 case 2:
272 obj[i++] = ring[idx++]; /* fallthrough */
273 case 1:
274 obj[i++] = ring[idx++]; /* fallthrough */
275 }
276 } else {
277 for (i = 0; idx < size; i++, idx++)
278 obj[i] = ring[idx];
279 /* Start at the beginning */
280 for (idx = 0; i < n; i++, idx++)
281 obj[i] = ring[idx];
282 }
283 }
284
285 static __rte_always_inline void
__rte_ring_dequeue_elems_64(struct rte_ring * r,uint32_t prod_head,void * obj_table,uint32_t n)286 __rte_ring_dequeue_elems_64(struct rte_ring *r, uint32_t prod_head,
287 void *obj_table, uint32_t n)
288 {
289 unsigned int i;
290 const uint32_t size = r->size;
291 uint32_t idx = prod_head & r->mask;
292 uint64_t *ring = (uint64_t *)&r[1];
293 unaligned_uint64_t *obj = (unaligned_uint64_t *)obj_table;
294 if (likely(idx + n < size)) {
295 for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
296 obj[i] = ring[idx];
297 obj[i + 1] = ring[idx + 1];
298 obj[i + 2] = ring[idx + 2];
299 obj[i + 3] = ring[idx + 3];
300 }
301 switch (n & 0x3) {
302 case 3:
303 obj[i++] = ring[idx++]; /* fallthrough */
304 case 2:
305 obj[i++] = ring[idx++]; /* fallthrough */
306 case 1:
307 obj[i++] = ring[idx++]; /* fallthrough */
308 }
309 } else {
310 for (i = 0; idx < size; i++, idx++)
311 obj[i] = ring[idx];
312 /* Start at the beginning */
313 for (idx = 0; i < n; i++, idx++)
314 obj[i] = ring[idx];
315 }
316 }
317
318 static __rte_always_inline void
__rte_ring_dequeue_elems_128(struct rte_ring * r,uint32_t prod_head,void * obj_table,uint32_t n)319 __rte_ring_dequeue_elems_128(struct rte_ring *r, uint32_t prod_head,
320 void *obj_table, uint32_t n)
321 {
322 unsigned int i;
323 const uint32_t size = r->size;
324 uint32_t idx = prod_head & r->mask;
325 rte_int128_t *ring = (rte_int128_t *)&r[1];
326 rte_int128_t *obj = (rte_int128_t *)obj_table;
327 if (likely(idx + n < size)) {
328 for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
329 memcpy((void *)(obj + i), (void *)(ring + idx), 32);
330 switch (n & 0x1) {
331 case 1:
332 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
333 }
334 } else {
335 for (i = 0; idx < size; i++, idx++)
336 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
337 /* Start at the beginning */
338 for (idx = 0; i < n; i++, idx++)
339 memcpy((void *)(obj + i), (void *)(ring + idx), 16);
340 }
341 }
342
343 /* the actual dequeue of elements from the ring.
344 * Placed here since identical code needed in both
345 * single and multi producer enqueue functions.
346 */
347 static __rte_always_inline void
__rte_ring_dequeue_elems(struct rte_ring * r,uint32_t cons_head,void * obj_table,uint32_t esize,uint32_t num)348 __rte_ring_dequeue_elems(struct rte_ring *r, uint32_t cons_head,
349 void *obj_table, uint32_t esize, uint32_t num)
350 {
351 /* 8B and 16B copies implemented individually to retain
352 * the current performance.
353 */
354 if (esize == 8)
355 __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
356 else if (esize == 16)
357 __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
358 else {
359 uint32_t idx, scale, nr_idx, nr_num, nr_size;
360
361 /* Normalize to uint32_t */
362 scale = esize / sizeof(uint32_t);
363 nr_num = num * scale;
364 idx = cons_head & r->mask;
365 nr_idx = idx * scale;
366 nr_size = r->size * scale;
367 __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
368 obj_table, nr_num);
369 }
370 }
371
372 /* Between load and load. there might be cpu reorder in weak model
373 * (powerpc/arm).
374 * There are 2 choices for the users
375 * 1.use rmb() memory barrier
376 * 2.use one-direction load_acquire/store_release barrier
377 * It depends on performance test results.
378 * By default, move common functions to rte_ring_generic.h
379 */
380 #ifdef RTE_USE_C11_MEM_MODEL
381 #include "rte_ring_c11_mem.h"
382 #else
383 #include "rte_ring_generic.h"
384 #endif
385
386 /**
387 * @internal Enqueue several objects on the ring
388 *
389 * @param r
390 * A pointer to the ring structure.
391 * @param obj_table
392 * A pointer to a table of objects.
393 * @param esize
394 * The size of ring element, in bytes. It must be a multiple of 4.
395 * This must be the same value used while creating the ring. Otherwise
396 * the results are undefined.
397 * @param n
398 * The number of objects to add in the ring from the obj_table.
399 * @param behavior
400 * RTE_RING_QUEUE_FIXED: Enqueue a fixed number of items from a ring
401 * RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
402 * @param is_sp
403 * Indicates whether to use single producer or multi-producer head update
404 * @param free_space
405 * returns the amount of space after the enqueue operation has finished
406 * @return
407 * Actual number of objects enqueued.
408 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
409 */
410 static __rte_always_inline unsigned int
__rte_ring_do_enqueue_elem(struct rte_ring * r,const void * obj_table,unsigned int esize,unsigned int n,enum rte_ring_queue_behavior behavior,unsigned int is_sp,unsigned int * free_space)411 __rte_ring_do_enqueue_elem(struct rte_ring *r, const void *obj_table,
412 unsigned int esize, unsigned int n,
413 enum rte_ring_queue_behavior behavior, unsigned int is_sp,
414 unsigned int *free_space)
415 {
416 uint32_t prod_head, prod_next;
417 uint32_t free_entries;
418
419 n = __rte_ring_move_prod_head(r, is_sp, n, behavior,
420 &prod_head, &prod_next, &free_entries);
421 if (n == 0)
422 goto end;
423
424 __rte_ring_enqueue_elems(r, prod_head, obj_table, esize, n);
425
426 update_tail(&r->prod, prod_head, prod_next, is_sp, 1);
427 end:
428 if (free_space != NULL)
429 *free_space = free_entries - n;
430 return n;
431 }
432
433 /**
434 * @internal Dequeue several objects from the ring
435 *
436 * @param r
437 * A pointer to the ring structure.
438 * @param obj_table
439 * A pointer to a table of objects.
440 * @param esize
441 * The size of ring element, in bytes. It must be a multiple of 4.
442 * This must be the same value used while creating the ring. Otherwise
443 * the results are undefined.
444 * @param n
445 * The number of objects to pull from the ring.
446 * @param behavior
447 * RTE_RING_QUEUE_FIXED: Dequeue a fixed number of items from a ring
448 * RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
449 * @param is_sc
450 * Indicates whether to use single consumer or multi-consumer head update
451 * @param available
452 * returns the number of remaining ring entries after the dequeue has finished
453 * @return
454 * - Actual number of objects dequeued.
455 * If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
456 */
457 static __rte_always_inline unsigned int
__rte_ring_do_dequeue_elem(struct rte_ring * r,void * obj_table,unsigned int esize,unsigned int n,enum rte_ring_queue_behavior behavior,unsigned int is_sc,unsigned int * available)458 __rte_ring_do_dequeue_elem(struct rte_ring *r, void *obj_table,
459 unsigned int esize, unsigned int n,
460 enum rte_ring_queue_behavior behavior, unsigned int is_sc,
461 unsigned int *available)
462 {
463 uint32_t cons_head, cons_next;
464 uint32_t entries;
465
466 n = __rte_ring_move_cons_head(r, (int)is_sc, n, behavior,
467 &cons_head, &cons_next, &entries);
468 if (n == 0)
469 goto end;
470
471 __rte_ring_dequeue_elems(r, cons_head, obj_table, esize, n);
472
473 update_tail(&r->cons, cons_head, cons_next, is_sc, 0);
474
475 end:
476 if (available != NULL)
477 *available = entries - n;
478 return n;
479 }
480
481 /**
482 * Enqueue several objects on the ring (multi-producers safe).
483 *
484 * This function uses a "compare and set" instruction to move the
485 * producer index atomically.
486 *
487 * @param r
488 * A pointer to the ring structure.
489 * @param obj_table
490 * A pointer to a table of objects.
491 * @param esize
492 * The size of ring element, in bytes. It must be a multiple of 4.
493 * This must be the same value used while creating the ring. Otherwise
494 * the results are undefined.
495 * @param n
496 * The number of objects to add in the ring from the obj_table.
497 * @param free_space
498 * if non-NULL, returns the amount of space in the ring after the
499 * enqueue operation has finished.
500 * @return
501 * The number of objects enqueued, either 0 or n
502 */
503 static __rte_always_inline unsigned int
rte_ring_mp_enqueue_bulk_elem(struct rte_ring * r,const void * obj_table,unsigned int esize,unsigned int n,unsigned int * free_space)504 rte_ring_mp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
505 unsigned int esize, unsigned int n, unsigned int *free_space)
506 {
507 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
508 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_MT, free_space);
509 }
510
511 /**
512 * Enqueue several objects on a ring
513 *
514 * @warning This API is NOT multi-producers safe
515 *
516 * @param r
517 * A pointer to the ring structure.
518 * @param obj_table
519 * A pointer to a table of objects.
520 * @param esize
521 * The size of ring element, in bytes. It must be a multiple of 4.
522 * This must be the same value used while creating the ring. Otherwise
523 * the results are undefined.
524 * @param n
525 * The number of objects to add in the ring from the obj_table.
526 * @param free_space
527 * if non-NULL, returns the amount of space in the ring after the
528 * enqueue operation has finished.
529 * @return
530 * The number of objects enqueued, either 0 or n
531 */
532 static __rte_always_inline unsigned int
rte_ring_sp_enqueue_bulk_elem(struct rte_ring * r,const void * obj_table,unsigned int esize,unsigned int n,unsigned int * free_space)533 rte_ring_sp_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
534 unsigned int esize, unsigned int n, unsigned int *free_space)
535 {
536 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
537 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_ST, free_space);
538 }
539
540 #ifdef ALLOW_EXPERIMENTAL_API
541 #include <rte_ring_hts.h>
542 #include <rte_ring_rts.h>
543 #endif
544
545 /**
546 * Enqueue several objects on a ring.
547 *
548 * This function calls the multi-producer or the single-producer
549 * version depending on the default behavior that was specified at
550 * ring creation time (see flags).
551 *
552 * @param r
553 * A pointer to the ring structure.
554 * @param obj_table
555 * A pointer to a table of objects.
556 * @param esize
557 * The size of ring element, in bytes. It must be a multiple of 4.
558 * This must be the same value used while creating the ring. Otherwise
559 * the results are undefined.
560 * @param n
561 * The number of objects to add in the ring from the obj_table.
562 * @param free_space
563 * if non-NULL, returns the amount of space in the ring after the
564 * enqueue operation has finished.
565 * @return
566 * The number of objects enqueued, either 0 or n
567 */
568 static __rte_always_inline unsigned int
rte_ring_enqueue_bulk_elem(struct rte_ring * r,const void * obj_table,unsigned int esize,unsigned int n,unsigned int * free_space)569 rte_ring_enqueue_bulk_elem(struct rte_ring *r, const void *obj_table,
570 unsigned int esize, unsigned int n, unsigned int *free_space)
571 {
572 switch (r->prod.sync_type) {
573 case RTE_RING_SYNC_MT:
574 return rte_ring_mp_enqueue_bulk_elem(r, obj_table, esize, n,
575 free_space);
576 case RTE_RING_SYNC_ST:
577 return rte_ring_sp_enqueue_bulk_elem(r, obj_table, esize, n,
578 free_space);
579 #ifdef ALLOW_EXPERIMENTAL_API
580 case RTE_RING_SYNC_MT_RTS:
581 return rte_ring_mp_rts_enqueue_bulk_elem(r, obj_table, esize, n,
582 free_space);
583 case RTE_RING_SYNC_MT_HTS:
584 return rte_ring_mp_hts_enqueue_bulk_elem(r, obj_table, esize, n,
585 free_space);
586 #endif
587 }
588
589 /* valid ring should never reach this point */
590 RTE_ASSERT(0);
591 if (free_space != NULL)
592 *free_space = 0;
593 return 0;
594 }
595
596 /**
597 * Enqueue one object on a ring (multi-producers safe).
598 *
599 * This function uses a "compare and set" instruction to move the
600 * producer index atomically.
601 *
602 * @param r
603 * A pointer to the ring structure.
604 * @param obj
605 * A pointer to the object to be added.
606 * @param esize
607 * The size of ring element, in bytes. It must be a multiple of 4.
608 * This must be the same value used while creating the ring. Otherwise
609 * the results are undefined.
610 * @return
611 * - 0: Success; objects enqueued.
612 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
613 */
614 static __rte_always_inline int
rte_ring_mp_enqueue_elem(struct rte_ring * r,void * obj,unsigned int esize)615 rte_ring_mp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
616 {
617 return rte_ring_mp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
618 -ENOBUFS;
619 }
620
621 /**
622 * Enqueue one object on a ring
623 *
624 * @warning This API is NOT multi-producers safe
625 *
626 * @param r
627 * A pointer to the ring structure.
628 * @param obj
629 * A pointer to the object to be added.
630 * @param esize
631 * The size of ring element, in bytes. It must be a multiple of 4.
632 * This must be the same value used while creating the ring. Otherwise
633 * the results are undefined.
634 * @return
635 * - 0: Success; objects enqueued.
636 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
637 */
638 static __rte_always_inline int
rte_ring_sp_enqueue_elem(struct rte_ring * r,void * obj,unsigned int esize)639 rte_ring_sp_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
640 {
641 return rte_ring_sp_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
642 -ENOBUFS;
643 }
644
645 /**
646 * Enqueue one object on a ring.
647 *
648 * This function calls the multi-producer or the single-producer
649 * version, depending on the default behaviour that was specified at
650 * ring creation time (see flags).
651 *
652 * @param r
653 * A pointer to the ring structure.
654 * @param obj
655 * A pointer to the object to be added.
656 * @param esize
657 * The size of ring element, in bytes. It must be a multiple of 4.
658 * This must be the same value used while creating the ring. Otherwise
659 * the results are undefined.
660 * @return
661 * - 0: Success; objects enqueued.
662 * - -ENOBUFS: Not enough room in the ring to enqueue; no object is enqueued.
663 */
664 static __rte_always_inline int
rte_ring_enqueue_elem(struct rte_ring * r,void * obj,unsigned int esize)665 rte_ring_enqueue_elem(struct rte_ring *r, void *obj, unsigned int esize)
666 {
667 return rte_ring_enqueue_bulk_elem(r, obj, esize, 1, NULL) ? 0 :
668 -ENOBUFS;
669 }
670
671 /**
672 * Dequeue several objects from a ring (multi-consumers safe).
673 *
674 * This function uses a "compare and set" instruction to move the
675 * consumer index atomically.
676 *
677 * @param r
678 * A pointer to the ring structure.
679 * @param obj_table
680 * A pointer to a table of objects that will be filled.
681 * @param esize
682 * The size of ring element, in bytes. It must be a multiple of 4.
683 * This must be the same value used while creating the ring. Otherwise
684 * the results are undefined.
685 * @param n
686 * The number of objects to dequeue from the ring to the obj_table.
687 * @param available
688 * If non-NULL, returns the number of remaining ring entries after the
689 * dequeue has finished.
690 * @return
691 * The number of objects dequeued, either 0 or n
692 */
693 static __rte_always_inline unsigned int
rte_ring_mc_dequeue_bulk_elem(struct rte_ring * r,void * obj_table,unsigned int esize,unsigned int n,unsigned int * available)694 rte_ring_mc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
695 unsigned int esize, unsigned int n, unsigned int *available)
696 {
697 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
698 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_MT, available);
699 }
700
701 /**
702 * Dequeue several objects from a ring (NOT multi-consumers safe).
703 *
704 * @param r
705 * A pointer to the ring structure.
706 * @param obj_table
707 * A pointer to a table of objects that will be filled.
708 * @param esize
709 * The size of ring element, in bytes. It must be a multiple of 4.
710 * This must be the same value used while creating the ring. Otherwise
711 * the results are undefined.
712 * @param n
713 * The number of objects to dequeue from the ring to the obj_table,
714 * must be strictly positive.
715 * @param available
716 * If non-NULL, returns the number of remaining ring entries after the
717 * dequeue has finished.
718 * @return
719 * The number of objects dequeued, either 0 or n
720 */
721 static __rte_always_inline unsigned int
rte_ring_sc_dequeue_bulk_elem(struct rte_ring * r,void * obj_table,unsigned int esize,unsigned int n,unsigned int * available)722 rte_ring_sc_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
723 unsigned int esize, unsigned int n, unsigned int *available)
724 {
725 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
726 RTE_RING_QUEUE_FIXED, RTE_RING_SYNC_ST, available);
727 }
728
729 /**
730 * Dequeue several objects from a ring.
731 *
732 * This function calls the multi-consumers or the single-consumer
733 * version, depending on the default behaviour that was specified at
734 * ring creation time (see flags).
735 *
736 * @param r
737 * A pointer to the ring structure.
738 * @param obj_table
739 * A pointer to a table of objects that will be filled.
740 * @param esize
741 * The size of ring element, in bytes. It must be a multiple of 4.
742 * This must be the same value used while creating the ring. Otherwise
743 * the results are undefined.
744 * @param n
745 * The number of objects to dequeue from the ring to the obj_table.
746 * @param available
747 * If non-NULL, returns the number of remaining ring entries after the
748 * dequeue has finished.
749 * @return
750 * The number of objects dequeued, either 0 or n
751 */
752 static __rte_always_inline unsigned int
rte_ring_dequeue_bulk_elem(struct rte_ring * r,void * obj_table,unsigned int esize,unsigned int n,unsigned int * available)753 rte_ring_dequeue_bulk_elem(struct rte_ring *r, void *obj_table,
754 unsigned int esize, unsigned int n, unsigned int *available)
755 {
756 switch (r->cons.sync_type) {
757 case RTE_RING_SYNC_MT:
758 return rte_ring_mc_dequeue_bulk_elem(r, obj_table, esize, n,
759 available);
760 case RTE_RING_SYNC_ST:
761 return rte_ring_sc_dequeue_bulk_elem(r, obj_table, esize, n,
762 available);
763 #ifdef ALLOW_EXPERIMENTAL_API
764 case RTE_RING_SYNC_MT_RTS:
765 return rte_ring_mc_rts_dequeue_bulk_elem(r, obj_table, esize,
766 n, available);
767 case RTE_RING_SYNC_MT_HTS:
768 return rte_ring_mc_hts_dequeue_bulk_elem(r, obj_table, esize,
769 n, available);
770 #endif
771 }
772
773 /* valid ring should never reach this point */
774 RTE_ASSERT(0);
775 if (available != NULL)
776 *available = 0;
777 return 0;
778 }
779
780 /**
781 * Dequeue one object from a ring (multi-consumers safe).
782 *
783 * This function uses a "compare and set" instruction to move the
784 * consumer index atomically.
785 *
786 * @param r
787 * A pointer to the ring structure.
788 * @param obj_p
789 * A pointer to the object that will be filled.
790 * @param esize
791 * The size of ring element, in bytes. It must be a multiple of 4.
792 * This must be the same value used while creating the ring. Otherwise
793 * the results are undefined.
794 * @return
795 * - 0: Success; objects dequeued.
796 * - -ENOENT: Not enough entries in the ring to dequeue; no object is
797 * dequeued.
798 */
799 static __rte_always_inline int
rte_ring_mc_dequeue_elem(struct rte_ring * r,void * obj_p,unsigned int esize)800 rte_ring_mc_dequeue_elem(struct rte_ring *r, void *obj_p,
801 unsigned int esize)
802 {
803 return rte_ring_mc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
804 -ENOENT;
805 }
806
807 /**
808 * Dequeue one object from a ring (NOT multi-consumers safe).
809 *
810 * @param r
811 * A pointer to the ring structure.
812 * @param obj_p
813 * A pointer to the object that will be filled.
814 * @param esize
815 * The size of ring element, in bytes. It must be a multiple of 4.
816 * This must be the same value used while creating the ring. Otherwise
817 * the results are undefined.
818 * @return
819 * - 0: Success; objects dequeued.
820 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
821 * dequeued.
822 */
823 static __rte_always_inline int
rte_ring_sc_dequeue_elem(struct rte_ring * r,void * obj_p,unsigned int esize)824 rte_ring_sc_dequeue_elem(struct rte_ring *r, void *obj_p,
825 unsigned int esize)
826 {
827 return rte_ring_sc_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
828 -ENOENT;
829 }
830
831 /**
832 * Dequeue one object from a ring.
833 *
834 * This function calls the multi-consumers or the single-consumer
835 * version depending on the default behaviour that was specified at
836 * ring creation time (see flags).
837 *
838 * @param r
839 * A pointer to the ring structure.
840 * @param obj_p
841 * A pointer to the object that will be filled.
842 * @param esize
843 * The size of ring element, in bytes. It must be a multiple of 4.
844 * This must be the same value used while creating the ring. Otherwise
845 * the results are undefined.
846 * @return
847 * - 0: Success, objects dequeued.
848 * - -ENOENT: Not enough entries in the ring to dequeue, no object is
849 * dequeued.
850 */
851 static __rte_always_inline int
rte_ring_dequeue_elem(struct rte_ring * r,void * obj_p,unsigned int esize)852 rte_ring_dequeue_elem(struct rte_ring *r, void *obj_p, unsigned int esize)
853 {
854 return rte_ring_dequeue_bulk_elem(r, obj_p, esize, 1, NULL) ? 0 :
855 -ENOENT;
856 }
857
858 /**
859 * Enqueue several objects on the ring (multi-producers safe).
860 *
861 * This function uses a "compare and set" instruction to move the
862 * producer index atomically.
863 *
864 * @param r
865 * A pointer to the ring structure.
866 * @param obj_table
867 * A pointer to a table of objects.
868 * @param esize
869 * The size of ring element, in bytes. It must be a multiple of 4.
870 * This must be the same value used while creating the ring. Otherwise
871 * the results are undefined.
872 * @param n
873 * The number of objects to add in the ring from the obj_table.
874 * @param free_space
875 * if non-NULL, returns the amount of space in the ring after the
876 * enqueue operation has finished.
877 * @return
878 * - n: Actual number of objects enqueued.
879 */
880 static __rte_always_inline unsigned int
rte_ring_mp_enqueue_burst_elem(struct rte_ring * r,const void * obj_table,unsigned int esize,unsigned int n,unsigned int * free_space)881 rte_ring_mp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
882 unsigned int esize, unsigned int n, unsigned int *free_space)
883 {
884 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
885 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, free_space);
886 }
887
888 /**
889 * Enqueue several objects on a ring
890 *
891 * @warning This API is NOT multi-producers safe
892 *
893 * @param r
894 * A pointer to the ring structure.
895 * @param obj_table
896 * A pointer to a table of objects.
897 * @param esize
898 * The size of ring element, in bytes. It must be a multiple of 4.
899 * This must be the same value used while creating the ring. Otherwise
900 * the results are undefined.
901 * @param n
902 * The number of objects to add in the ring from the obj_table.
903 * @param free_space
904 * if non-NULL, returns the amount of space in the ring after the
905 * enqueue operation has finished.
906 * @return
907 * - n: Actual number of objects enqueued.
908 */
909 static __rte_always_inline unsigned int
rte_ring_sp_enqueue_burst_elem(struct rte_ring * r,const void * obj_table,unsigned int esize,unsigned int n,unsigned int * free_space)910 rte_ring_sp_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
911 unsigned int esize, unsigned int n, unsigned int *free_space)
912 {
913 return __rte_ring_do_enqueue_elem(r, obj_table, esize, n,
914 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, free_space);
915 }
916
917 /**
918 * Enqueue several objects on a ring.
919 *
920 * This function calls the multi-producer or the single-producer
921 * version depending on the default behavior that was specified at
922 * ring creation time (see flags).
923 *
924 * @param r
925 * A pointer to the ring structure.
926 * @param obj_table
927 * A pointer to a table of objects.
928 * @param esize
929 * The size of ring element, in bytes. It must be a multiple of 4.
930 * This must be the same value used while creating the ring. Otherwise
931 * the results are undefined.
932 * @param n
933 * The number of objects to add in the ring from the obj_table.
934 * @param free_space
935 * if non-NULL, returns the amount of space in the ring after the
936 * enqueue operation has finished.
937 * @return
938 * - n: Actual number of objects enqueued.
939 */
940 static __rte_always_inline unsigned int
rte_ring_enqueue_burst_elem(struct rte_ring * r,const void * obj_table,unsigned int esize,unsigned int n,unsigned int * free_space)941 rte_ring_enqueue_burst_elem(struct rte_ring *r, const void *obj_table,
942 unsigned int esize, unsigned int n, unsigned int *free_space)
943 {
944 switch (r->prod.sync_type) {
945 case RTE_RING_SYNC_MT:
946 return rte_ring_mp_enqueue_burst_elem(r, obj_table, esize, n,
947 free_space);
948 case RTE_RING_SYNC_ST:
949 return rte_ring_sp_enqueue_burst_elem(r, obj_table, esize, n,
950 free_space);
951 #ifdef ALLOW_EXPERIMENTAL_API
952 case RTE_RING_SYNC_MT_RTS:
953 return rte_ring_mp_rts_enqueue_burst_elem(r, obj_table, esize,
954 n, free_space);
955 case RTE_RING_SYNC_MT_HTS:
956 return rte_ring_mp_hts_enqueue_burst_elem(r, obj_table, esize,
957 n, free_space);
958 #endif
959 }
960
961 /* valid ring should never reach this point */
962 RTE_ASSERT(0);
963 if (free_space != NULL)
964 *free_space = 0;
965 return 0;
966 }
967
968 /**
969 * Dequeue several objects from a ring (multi-consumers safe). When the request
970 * objects are more than the available objects, only dequeue the actual number
971 * of objects
972 *
973 * This function uses a "compare and set" instruction to move the
974 * consumer index atomically.
975 *
976 * @param r
977 * A pointer to the ring structure.
978 * @param obj_table
979 * A pointer to a table of objects that will be filled.
980 * @param esize
981 * The size of ring element, in bytes. It must be a multiple of 4.
982 * This must be the same value used while creating the ring. Otherwise
983 * the results are undefined.
984 * @param n
985 * The number of objects to dequeue from the ring to the obj_table.
986 * @param available
987 * If non-NULL, returns the number of remaining ring entries after the
988 * dequeue has finished.
989 * @return
990 * - n: Actual number of objects dequeued, 0 if ring is empty
991 */
992 static __rte_always_inline unsigned int
rte_ring_mc_dequeue_burst_elem(struct rte_ring * r,void * obj_table,unsigned int esize,unsigned int n,unsigned int * available)993 rte_ring_mc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
994 unsigned int esize, unsigned int n, unsigned int *available)
995 {
996 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
997 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_MT, available);
998 }
999
1000 /**
1001 * Dequeue several objects from a ring (NOT multi-consumers safe).When the
1002 * request objects are more than the available objects, only dequeue the
1003 * actual number of objects
1004 *
1005 * @param r
1006 * A pointer to the ring structure.
1007 * @param obj_table
1008 * A pointer to a table of objects that will be filled.
1009 * @param esize
1010 * The size of ring element, in bytes. It must be a multiple of 4.
1011 * This must be the same value used while creating the ring. Otherwise
1012 * the results are undefined.
1013 * @param n
1014 * The number of objects to dequeue from the ring to the obj_table.
1015 * @param available
1016 * If non-NULL, returns the number of remaining ring entries after the
1017 * dequeue has finished.
1018 * @return
1019 * - n: Actual number of objects dequeued, 0 if ring is empty
1020 */
1021 static __rte_always_inline unsigned int
rte_ring_sc_dequeue_burst_elem(struct rte_ring * r,void * obj_table,unsigned int esize,unsigned int n,unsigned int * available)1022 rte_ring_sc_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1023 unsigned int esize, unsigned int n, unsigned int *available)
1024 {
1025 return __rte_ring_do_dequeue_elem(r, obj_table, esize, n,
1026 RTE_RING_QUEUE_VARIABLE, RTE_RING_SYNC_ST, available);
1027 }
1028
1029 /**
1030 * Dequeue multiple objects from a ring up to a maximum number.
1031 *
1032 * This function calls the multi-consumers or the single-consumer
1033 * version, depending on the default behaviour that was specified at
1034 * ring creation time (see flags).
1035 *
1036 * @param r
1037 * A pointer to the ring structure.
1038 * @param obj_table
1039 * A pointer to a table of objects that will be filled.
1040 * @param esize
1041 * The size of ring element, in bytes. It must be a multiple of 4.
1042 * This must be the same value used while creating the ring. Otherwise
1043 * the results are undefined.
1044 * @param n
1045 * The number of objects to dequeue from the ring to the obj_table.
1046 * @param available
1047 * If non-NULL, returns the number of remaining ring entries after the
1048 * dequeue has finished.
1049 * @return
1050 * - Number of objects dequeued
1051 */
1052 static __rte_always_inline unsigned int
rte_ring_dequeue_burst_elem(struct rte_ring * r,void * obj_table,unsigned int esize,unsigned int n,unsigned int * available)1053 rte_ring_dequeue_burst_elem(struct rte_ring *r, void *obj_table,
1054 unsigned int esize, unsigned int n, unsigned int *available)
1055 {
1056 switch (r->cons.sync_type) {
1057 case RTE_RING_SYNC_MT:
1058 return rte_ring_mc_dequeue_burst_elem(r, obj_table, esize, n,
1059 available);
1060 case RTE_RING_SYNC_ST:
1061 return rte_ring_sc_dequeue_burst_elem(r, obj_table, esize, n,
1062 available);
1063 #ifdef ALLOW_EXPERIMENTAL_API
1064 case RTE_RING_SYNC_MT_RTS:
1065 return rte_ring_mc_rts_dequeue_burst_elem(r, obj_table, esize,
1066 n, available);
1067 case RTE_RING_SYNC_MT_HTS:
1068 return rte_ring_mc_hts_dequeue_burst_elem(r, obj_table, esize,
1069 n, available);
1070 #endif
1071 }
1072
1073 /* valid ring should never reach this point */
1074 RTE_ASSERT(0);
1075 if (available != NULL)
1076 *available = 0;
1077 return 0;
1078 }
1079
1080 #ifdef ALLOW_EXPERIMENTAL_API
1081 #include <rte_ring_peek.h>
1082 #include <rte_ring_peek_zc.h>
1083 #endif
1084
1085 #include <rte_ring.h>
1086
1087 #ifdef __cplusplus
1088 }
1089 #endif
1090
1091 #endif /* _RTE_RING_ELEM_H_ */
1092