1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2015 6WIND S.A.
3 * Copyright 2015 Mellanox Technologies, Ltd
4 */
5
6 #ifndef RTE_PMD_MLX5_UTILS_H_
7 #define RTE_PMD_MLX5_UTILS_H_
8
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <stdio.h>
12 #include <limits.h>
13 #include <errno.h>
14
15 #include <rte_spinlock.h>
16 #include <rte_rwlock.h>
17 #include <rte_memory.h>
18 #include <rte_bitmap.h>
19
20 #include <mlx5_common.h>
21 #include <mlx5_common_utils.h>
22
23 #include "mlx5_defs.h"
24
25 /* Convert a bit number to the corresponding 64-bit mask */
26 #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v))
27
28 /* Save and restore errno around argument evaluation. */
29 #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0]))
30
31 extern int mlx5_logtype;
32
33 #define MLX5_NET_LOG_PREFIX "mlx5_net"
34
35 /* Generic printf()-like logging macro with automatic line feed. */
36 #define DRV_LOG(level, ...) \
37 PMD_DRV_LOG_(level, mlx5_logtype, MLX5_NET_LOG_PREFIX, \
38 __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \
39 PMD_DRV_LOG_CPAREN)
40
41 /* Convenience macros for accessing mbuf fields. */
42 #define NEXT(m) ((m)->next)
43 #define DATA_LEN(m) ((m)->data_len)
44 #define PKT_LEN(m) ((m)->pkt_len)
45 #define DATA_OFF(m) ((m)->data_off)
46 #define SET_DATA_OFF(m, o) ((m)->data_off = (o))
47 #define NB_SEGS(m) ((m)->nb_segs)
48 #define PORT(m) ((m)->port)
49
50 /* Transpose flags. Useful to convert IBV to DPDK flags. */
51 #define TRANSPOSE(val, from, to) \
52 (((from) >= (to)) ? \
53 (((val) & (from)) / ((from) / (to))) : \
54 (((val) & (from)) * ((to) / (from))))
55
56 /*
57 * For the case which data is linked with sequence increased index, the
58 * array table will be more efficient than hash table once need to search
59 * one data entry in large numbers of entries. Since the traditional hash
60 * tables has fixed table size, when huge numbers of data saved to the hash
61 * table, it also comes lots of hash conflict.
62 *
63 * But simple array table also has fixed size, allocates all the needed
64 * memory at once will waste lots of memory. For the case don't know the
65 * exactly number of entries will be impossible to allocate the array.
66 *
67 * Then the multiple level table helps to balance the two disadvantages.
68 * Allocate a global high level table with sub table entries at first,
69 * the global table contains the sub table entries, and the sub table will
70 * be allocated only once the corresponding index entry need to be saved.
71 * e.g. for up to 32-bits index, three level table with 10-10-12 splitting,
72 * with sequence increased index, the memory grows with every 4K entries.
73 *
74 * The currently implementation introduces 10-10-12 32-bits splitting
75 * Three-Level table to help the cases which have millions of enties to
76 * save. The index entries can be addressed directly by the index, no
77 * search will be needed.q
78 */
79
80 /* L3 table global table define. */
81 #define MLX5_L3T_GT_OFFSET 22
82 #define MLX5_L3T_GT_SIZE (1 << 10)
83 #define MLX5_L3T_GT_MASK (MLX5_L3T_GT_SIZE - 1)
84
85 /* L3 table middle table define. */
86 #define MLX5_L3T_MT_OFFSET 12
87 #define MLX5_L3T_MT_SIZE (1 << 10)
88 #define MLX5_L3T_MT_MASK (MLX5_L3T_MT_SIZE - 1)
89
90 /* L3 table entry table define. */
91 #define MLX5_L3T_ET_OFFSET 0
92 #define MLX5_L3T_ET_SIZE (1 << 12)
93 #define MLX5_L3T_ET_MASK (MLX5_L3T_ET_SIZE - 1)
94
95 /* L3 table type. */
96 enum mlx5_l3t_type {
97 MLX5_L3T_TYPE_WORD = 0,
98 MLX5_L3T_TYPE_DWORD,
99 MLX5_L3T_TYPE_QWORD,
100 MLX5_L3T_TYPE_PTR,
101 MLX5_L3T_TYPE_MAX,
102 };
103
104 struct mlx5_indexed_pool;
105
106 /* Generic data struct. */
107 union mlx5_l3t_data {
108 uint16_t word;
109 uint32_t dword;
110 uint64_t qword;
111 void *ptr;
112 };
113
114 /* L3 level table data structure. */
115 struct mlx5_l3t_level_tbl {
116 uint64_t ref_cnt; /* Table ref_cnt. */
117 void *tbl[]; /* Table array. */
118 };
119
120 /* L3 word entry table data structure. */
121 struct mlx5_l3t_entry_word {
122 uint32_t idx; /* Table index. */
123 uint64_t ref_cnt; /* Table ref_cnt. */
124 struct {
125 uint16_t data;
126 uint32_t ref_cnt;
127 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */
128 } __rte_packed;
129
130 /* L3 double word entry table data structure. */
131 struct mlx5_l3t_entry_dword {
132 uint32_t idx; /* Table index. */
133 uint64_t ref_cnt; /* Table ref_cnt. */
134 struct {
135 uint32_t data;
136 int32_t ref_cnt;
137 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */
138 } __rte_packed;
139
140 /* L3 quad word entry table data structure. */
141 struct mlx5_l3t_entry_qword {
142 uint32_t idx; /* Table index. */
143 uint64_t ref_cnt; /* Table ref_cnt. */
144 struct {
145 uint64_t data;
146 uint32_t ref_cnt;
147 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */
148 } __rte_packed;
149
150 /* L3 pointer entry table data structure. */
151 struct mlx5_l3t_entry_ptr {
152 uint32_t idx; /* Table index. */
153 uint64_t ref_cnt; /* Table ref_cnt. */
154 struct {
155 void *data;
156 uint32_t ref_cnt;
157 } entry[MLX5_L3T_ET_SIZE]; /* Entry array */
158 } __rte_packed;
159
160 /* L3 table data structure. */
161 struct mlx5_l3t_tbl {
162 enum mlx5_l3t_type type; /* Table type. */
163 struct mlx5_indexed_pool *eip;
164 /* Table index pool handles. */
165 struct mlx5_l3t_level_tbl *tbl; /* Global table index. */
166 rte_spinlock_t sl; /* The table lock. */
167 };
168
169 /** Type of function that is used to handle the data before freeing. */
170 typedef int32_t (*mlx5_l3t_alloc_callback_fn)(void *ctx,
171 union mlx5_l3t_data *data);
172
173 /*
174 * The indexed memory entry index is made up of trunk index and offset of
175 * the entry in the trunk. Since the entry index is 32 bits, in case user
176 * prefers to have small trunks, user can change the macro below to a big
177 * number which helps the pool contains more trunks with lots of entries
178 * allocated.
179 */
180 #define TRUNK_IDX_BITS 16
181 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1)
182 #define TRUNK_INVALID TRUNK_MAX_IDX
183 #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS))
184 #ifdef RTE_LIBRTE_MLX5_DEBUG
185 #define POOL_DEBUG 1
186 #endif
187
188 struct mlx5_indexed_pool_config {
189 uint32_t size; /* Pool entry size. */
190 uint32_t trunk_size:22;
191 /*
192 * Trunk entry number. Must be power of 2. It can be increased
193 * if trunk_grow enable. The trunk entry number increases with
194 * left shift grow_shift. Trunks with index are after grow_trunk
195 * will keep the entry number same with the last grow trunk.
196 */
197 uint32_t grow_trunk:4;
198 /*
199 * Trunks with entry number increase in the pool. Set it to 0
200 * to make the pool works as trunk entry fixed pool. It works
201 * only if grow_shift is not 0.
202 */
203 uint32_t grow_shift:4;
204 /*
205 * Trunk entry number increase shift value, stop after grow_trunk.
206 * It works only if grow_trunk is not 0.
207 */
208 uint32_t need_lock:1;
209 /* Lock is needed for multiple thread usage. */
210 uint32_t release_mem_en:1; /* Rlease trunk when it is free. */
211 uint32_t max_idx; /* The maximum index can be allocated. */
212 uint32_t per_core_cache;
213 /*
214 * Cache entry number per core for performance. Should not be
215 * set with release_mem_en.
216 */
217 const char *type; /* Memory allocate type name. */
218 void *(*malloc)(uint32_t flags, size_t size, unsigned int align,
219 int socket);
220 /* User defined memory allocator. */
221 void (*free)(void *addr); /* User defined memory release. */
222 };
223
224 struct mlx5_indexed_trunk {
225 uint32_t idx; /* Trunk id. */
226 uint32_t prev; /* Previous free trunk in free list. */
227 uint32_t next; /* Next free trunk in free list. */
228 uint32_t free; /* Free entries available */
229 struct rte_bitmap *bmp;
230 uint8_t data[] __rte_cache_aligned; /* Entry data start. */
231 };
232
233 struct mlx5_indexed_cache {
234 struct mlx5_indexed_trunk **trunks;
235 volatile uint32_t n_trunk_valid; /* Trunks allocated. */
236 uint32_t n_trunk; /* Trunk pointer array size. */
237 uint32_t ref_cnt;
238 uint32_t len;
239 uint32_t idx[];
240 };
241
242 struct mlx5_ipool_per_lcore {
243 struct mlx5_indexed_cache *lc;
244 uint32_t len; /**< Current cache count. */
245 uint32_t idx[]; /**< Cache objects. */
246 };
247
248 struct mlx5_indexed_pool {
249 struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */
250 rte_spinlock_t rsz_lock; /* Pool lock for multiple thread usage. */
251 rte_spinlock_t lcore_lock;
252 /* Dim of trunk pointer array. */
253 union {
254 struct {
255 uint32_t n_trunk_valid; /* Trunks allocated. */
256 uint32_t n_trunk; /* Trunk pointer array size. */
257 struct mlx5_indexed_trunk **trunks;
258 uint32_t free_list; /* Index to first free trunk. */
259 };
260 struct {
261 struct mlx5_indexed_cache *gc;
262 /* Global cache. */
263 struct mlx5_ipool_per_lcore *cache[RTE_MAX_LCORE + 1];
264 /* Local cache. */
265 struct rte_bitmap *ibmp;
266 void *bmp_mem;
267 /* Allocate objects bitmap. Use during flush. */
268 };
269 };
270 #ifdef POOL_DEBUG
271 uint32_t n_entry;
272 uint32_t trunk_new;
273 uint32_t trunk_avail;
274 uint32_t trunk_empty;
275 uint32_t trunk_free;
276 #endif
277 uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */
278 };
279
280 /**
281 * Return logarithm of the nearest power of two above input value.
282 *
283 * @param v
284 * Input value.
285 *
286 * @return
287 * Logarithm of the nearest power of two above input value.
288 */
289 static inline unsigned int
log2above(unsigned int v)290 log2above(unsigned int v)
291 {
292 unsigned int l;
293 unsigned int r;
294
295 for (l = 0, r = 0; (v >> 1); ++l, v >>= 1)
296 r |= (v & 1);
297 return l + r;
298 }
299
300 /********************************* indexed pool *************************/
301
302 /**
303 * This function allocates non-initialized memory entry from pool.
304 * In NUMA systems, the memory entry allocated resides on the same
305 * NUMA socket as the core that calls this function.
306 *
307 * Memory entry is allocated from memory trunk, no alignment.
308 *
309 * @param pool
310 * Pointer to indexed memory entry pool.
311 * No initialization required.
312 * @param[out] idx
313 * Pointer to memory to save allocated index.
314 * Memory index always positive value.
315 * @return
316 * - Pointer to the allocated memory entry.
317 * - NULL on error. Not enough memory, or invalid arguments.
318 */
319 void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
320
321 /**
322 * This function allocates zero initialized memory entry from pool.
323 * In NUMA systems, the memory entry allocated resides on the same
324 * NUMA socket as the core that calls this function.
325 *
326 * Memory entry is allocated from memory trunk, no alignment.
327 *
328 * @param pool
329 * Pointer to indexed memory pool.
330 * No initialization required.
331 * @param[out] idx
332 * Pointer to memory to save allocated index.
333 * Memory index always positive value.
334 * @return
335 * - Pointer to the allocated memory entry .
336 * - NULL on error. Not enough memory, or invalid arguments.
337 */
338 void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx);
339
340 /**
341 * This function frees indexed memory entry to pool.
342 * Caller has to make sure that the index is allocated from same pool.
343 *
344 * @param pool
345 * Pointer to indexed memory pool.
346 * @param idx
347 * Allocated memory entry index.
348 */
349 void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx);
350
351 /**
352 * This function returns pointer of indexed memory entry from index.
353 * Caller has to make sure that the index is valid, and allocated
354 * from same pool.
355 *
356 * @param pool
357 * Pointer to indexed memory pool.
358 * @param idx
359 * Allocated memory index.
360 * @return
361 * - Pointer to indexed memory entry.
362 */
363 void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx);
364
365 /**
366 * This function creates indexed memory pool.
367 * Caller has to configure the configuration accordingly.
368 *
369 * @param pool
370 * Pointer to indexed memory pool.
371 * @param cfg
372 * Allocated memory index.
373 */
374 struct mlx5_indexed_pool *
375 mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg);
376
377 /**
378 * This function releases all resources of pool.
379 * Caller has to make sure that all indexes and memories allocated
380 * from this pool not referenced anymore.
381 *
382 * @param pool
383 * Pointer to indexed memory pool.
384 * @return
385 * - non-zero value on error.
386 * - 0 on success.
387 */
388 int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool);
389
390 /**
391 * This function dumps debug info of pool.
392 *
393 * @param pool
394 * Pointer to indexed memory pool.
395 */
396 void mlx5_ipool_dump(struct mlx5_indexed_pool *pool);
397
398 /**
399 * This function flushes all the cache index back to pool trunk.
400 *
401 * @param pool
402 * Pointer to the index memory pool handler.
403 *
404 */
405
406 void mlx5_ipool_flush_cache(struct mlx5_indexed_pool *pool);
407
408 /**
409 * This function gets the available entry from pos.
410 *
411 * @param pool
412 * Pointer to the index memory pool handler.
413 * @param pos
414 * Pointer to the index position start from.
415 *
416 * @return
417 * - Pointer to the next available entry.
418 *
419 */
420 void *mlx5_ipool_get_next(struct mlx5_indexed_pool *pool, uint32_t *pos);
421
422 /**
423 * This function allocates new empty Three-level table.
424 *
425 * @param type
426 * The l3t can set as word, double word, quad word or pointer with index.
427 *
428 * @return
429 * - Pointer to the allocated l3t.
430 * - NULL on error. Not enough memory, or invalid arguments.
431 */
432 struct mlx5_l3t_tbl *mlx5_l3t_create(enum mlx5_l3t_type type);
433
434 /**
435 * This function destroys Three-level table.
436 *
437 * @param tbl
438 * Pointer to the l3t.
439 */
440 void mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl);
441
442 /**
443 * This function gets the index entry from Three-level table.
444 *
445 * @param tbl
446 * Pointer to the l3t.
447 * @param idx
448 * Index to the entry.
449 * @param data
450 * Pointer to the memory which saves the entry data.
451 * When function call returns 0, data contains the entry data get from
452 * l3t.
453 * When function call returns -1, data is not modified.
454 *
455 * @return
456 * 0 if success, -1 on error.
457 */
458
459 int32_t mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
460 union mlx5_l3t_data *data);
461
462 /**
463 * This function decreases and clear index entry if reference
464 * counter is 0 from Three-level table.
465 *
466 * @param tbl
467 * Pointer to the l3t.
468 * @param idx
469 * Index to the entry.
470 *
471 * @return
472 * The remaining reference count, 0 means entry be cleared, -1 on error.
473 */
474 int32_t mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx);
475
476 /**
477 * This function sets the index entry to Three-level table.
478 * If the entry is already set, the EEXIST errno will be given, and
479 * the set data will be filled to the data.
480 *
481 * @param tbl[in]
482 * Pointer to the l3t.
483 * @param idx[in]
484 * Index to the entry.
485 * @param data[in/out]
486 * Pointer to the memory which contains the entry data save to l3t.
487 * If the entry is already set, the set data will be filled.
488 *
489 * @return
490 * 0 if success, -1 on error.
491 */
492 int32_t mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx,
493 union mlx5_l3t_data *data);
494
495 static inline void *
mlx5_l3t_get_next(struct mlx5_l3t_tbl * tbl,uint32_t * pos)496 mlx5_l3t_get_next(struct mlx5_l3t_tbl *tbl, uint32_t *pos)
497 {
498 struct mlx5_l3t_level_tbl *g_tbl, *m_tbl;
499 uint32_t i, j, k, g_start, m_start, e_start;
500 uint32_t idx = *pos;
501 void *e_tbl;
502 struct mlx5_l3t_entry_word *w_e_tbl;
503 struct mlx5_l3t_entry_dword *dw_e_tbl;
504 struct mlx5_l3t_entry_qword *qw_e_tbl;
505 struct mlx5_l3t_entry_ptr *ptr_e_tbl;
506
507 if (!tbl)
508 return NULL;
509 g_tbl = tbl->tbl;
510 if (!g_tbl)
511 return NULL;
512 g_start = (idx >> MLX5_L3T_GT_OFFSET) & MLX5_L3T_GT_MASK;
513 m_start = (idx >> MLX5_L3T_MT_OFFSET) & MLX5_L3T_MT_MASK;
514 e_start = idx & MLX5_L3T_ET_MASK;
515 for (i = g_start; i < MLX5_L3T_GT_SIZE; i++) {
516 m_tbl = g_tbl->tbl[i];
517 if (!m_tbl) {
518 /* Jump to new table, reset the sub table start. */
519 m_start = 0;
520 e_start = 0;
521 continue;
522 }
523 for (j = m_start; j < MLX5_L3T_MT_SIZE; j++) {
524 if (!m_tbl->tbl[j]) {
525 /*
526 * Jump to new table, reset the sub table
527 * start.
528 */
529 e_start = 0;
530 continue;
531 }
532 e_tbl = m_tbl->tbl[j];
533 switch (tbl->type) {
534 case MLX5_L3T_TYPE_WORD:
535 w_e_tbl = (struct mlx5_l3t_entry_word *)e_tbl;
536 for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
537 if (!w_e_tbl->entry[k].data)
538 continue;
539 *pos = (i << MLX5_L3T_GT_OFFSET) |
540 (j << MLX5_L3T_MT_OFFSET) | k;
541 return (void *)&w_e_tbl->entry[k].data;
542 }
543 break;
544 case MLX5_L3T_TYPE_DWORD:
545 dw_e_tbl = (struct mlx5_l3t_entry_dword *)e_tbl;
546 for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
547 if (!dw_e_tbl->entry[k].data)
548 continue;
549 *pos = (i << MLX5_L3T_GT_OFFSET) |
550 (j << MLX5_L3T_MT_OFFSET) | k;
551 return (void *)&dw_e_tbl->entry[k].data;
552 }
553 break;
554 case MLX5_L3T_TYPE_QWORD:
555 qw_e_tbl = (struct mlx5_l3t_entry_qword *)e_tbl;
556 for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
557 if (!qw_e_tbl->entry[k].data)
558 continue;
559 *pos = (i << MLX5_L3T_GT_OFFSET) |
560 (j << MLX5_L3T_MT_OFFSET) | k;
561 return (void *)&qw_e_tbl->entry[k].data;
562 }
563 break;
564 default:
565 ptr_e_tbl = (struct mlx5_l3t_entry_ptr *)e_tbl;
566 for (k = e_start; k < MLX5_L3T_ET_SIZE; k++) {
567 if (!ptr_e_tbl->entry[k].data)
568 continue;
569 *pos = (i << MLX5_L3T_GT_OFFSET) |
570 (j << MLX5_L3T_MT_OFFSET) | k;
571 return ptr_e_tbl->entry[k].data;
572 }
573 break;
574 }
575 }
576 }
577 return NULL;
578 }
579
580 /*
581 * Macros for linked list based on indexed memory.
582 * Example data structure:
583 * struct Foo {
584 * ILIST_ENTRY(uint16_t) next;
585 * ...
586 * }
587 *
588 */
589 #define ILIST_ENTRY(type) \
590 struct { \
591 type prev; /* Index of previous element. */ \
592 type next; /* Index of next element. */ \
593 }
594
595 #define ILIST_INSERT(pool, head, idx, elem, field) \
596 do { \
597 typeof(elem) peer; \
598 MLX5_ASSERT((elem) && (idx)); \
599 (elem)->field.next = *(head); \
600 (elem)->field.prev = 0; \
601 if (*(head)) { \
602 (peer) = mlx5_ipool_get(pool, *(head)); \
603 if (peer) \
604 (peer)->field.prev = (idx); \
605 } \
606 *(head) = (idx); \
607 } while (0)
608
609 #define ILIST_REMOVE(pool, head, idx, elem, field) \
610 do { \
611 typeof(elem) peer; \
612 MLX5_ASSERT(elem); \
613 MLX5_ASSERT(head); \
614 if ((elem)->field.prev) { \
615 (peer) = mlx5_ipool_get \
616 (pool, (elem)->field.prev); \
617 if (peer) \
618 (peer)->field.next = (elem)->field.next;\
619 } \
620 if ((elem)->field.next) { \
621 (peer) = mlx5_ipool_get \
622 (pool, (elem)->field.next); \
623 if (peer) \
624 (peer)->field.prev = (elem)->field.prev;\
625 } \
626 if (*(head) == (idx)) \
627 *(head) = (elem)->field.next; \
628 } while (0)
629
630 #define ILIST_FOREACH(pool, head, idx, elem, field) \
631 for ((idx) = (head), (elem) = \
632 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
633 idx = (elem)->field.next, (elem) = \
634 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
635
636 /* Single index list. */
637 #define SILIST_ENTRY(type) \
638 struct { \
639 type next; /* Index of next element. */ \
640 }
641
642 #define SILIST_INSERT(head, idx, elem, field) \
643 do { \
644 MLX5_ASSERT((elem) && (idx)); \
645 (elem)->field.next = *(head); \
646 *(head) = (idx); \
647 } while (0)
648
649 #define SILIST_FOREACH(pool, head, idx, elem, field) \
650 for ((idx) = (head), (elem) = \
651 (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \
652 idx = (elem)->field.next, (elem) = \
653 (idx) ? mlx5_ipool_get(pool, idx) : NULL)
654
655 #define MLX5_L3T_FOREACH(tbl, idx, entry) \
656 for (idx = 0, (entry) = mlx5_l3t_get_next((tbl), &idx); \
657 (entry); \
658 idx++, (entry) = mlx5_l3t_get_next((tbl), &idx))
659
660 #define MLX5_IPOOL_FOREACH(ipool, idx, entry) \
661 for ((idx) = 0, mlx5_ipool_flush_cache((ipool)), \
662 (entry) = mlx5_ipool_get_next((ipool), &idx); \
663 (entry); idx++, (entry) = mlx5_ipool_get_next((ipool), &idx))
664
665 #endif /* RTE_PMD_MLX5_UTILS_H_ */
666