1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation. 3 * Copyright(c) 2016 6WIND S.A. 4 */ 5 6 #ifndef _RTE_MEMPOOL_H_ 7 #define _RTE_MEMPOOL_H_ 8 9 /** 10 * @file 11 * RTE Mempool. 12 * 13 * A memory pool is an allocator of fixed-size object. It is 14 * identified by its name, and uses a ring to store free objects. It 15 * provides some other optional services, like a per-core object 16 * cache, and an alignment helper to ensure that objects are padded 17 * to spread them equally on all RAM channels, ranks, and so on. 18 * 19 * Objects owned by a mempool should never be added in another 20 * mempool. When an object is freed using rte_mempool_put() or 21 * equivalent, the object data is not modified; the user can save some 22 * meta-data in the object data and retrieve them when allocating a 23 * new object. 24 * 25 * Note: the mempool implementation is not preemptible. An lcore must not be 26 * interrupted by another task that uses the same mempool (because it uses a 27 * ring which is not preemptible). Also, usual mempool functions like 28 * rte_mempool_get() or rte_mempool_put() are designed to be called from an EAL 29 * thread due to the internal per-lcore cache. Due to the lack of caching, 30 * rte_mempool_get() or rte_mempool_put() performance will suffer when called 31 * by unregistered non-EAL threads. Instead, unregistered non-EAL threads 32 * should call rte_mempool_generic_get() or rte_mempool_generic_put() with a 33 * user cache created with rte_mempool_cache_create(). 34 */ 35 36 #include <stdio.h> 37 #include <stdint.h> 38 #include <inttypes.h> 39 40 #include <rte_config.h> 41 #include <rte_spinlock.h> 42 #include <rte_debug.h> 43 #include <rte_lcore.h> 44 #include <rte_branch_prediction.h> 45 #include <rte_ring.h> 46 #include <rte_memcpy.h> 47 #include <rte_common.h> 48 49 #include "rte_mempool_trace_fp.h" 50 51 #ifdef __cplusplus 52 extern "C" { 53 #endif 54 55 #define RTE_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /**< Header cookie. */ 56 #define RTE_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /**< Header cookie. */ 57 #define RTE_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /**< Trailer cookie.*/ 58 59 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 60 /** 61 * A structure that stores the mempool statistics (per-lcore). 62 * Note: Cache stats (put_cache_bulk/objs, get_cache_bulk/objs) are not 63 * captured since they can be calculated from other stats. 64 * For example: put_cache_objs = put_objs - put_common_pool_objs. 65 */ 66 struct rte_mempool_debug_stats { 67 uint64_t put_bulk; /**< Number of puts. */ 68 uint64_t put_objs; /**< Number of objects successfully put. */ 69 uint64_t put_common_pool_bulk; /**< Number of bulks enqueued in common pool. */ 70 uint64_t put_common_pool_objs; /**< Number of objects enqueued in common pool. */ 71 uint64_t get_common_pool_bulk; /**< Number of bulks dequeued from common pool. */ 72 uint64_t get_common_pool_objs; /**< Number of objects dequeued from common pool. */ 73 uint64_t get_success_bulk; /**< Successful allocation number. */ 74 uint64_t get_success_objs; /**< Objects successfully allocated. */ 75 uint64_t get_fail_bulk; /**< Failed allocation number. */ 76 uint64_t get_fail_objs; /**< Objects that failed to be allocated. */ 77 uint64_t get_success_blks; /**< Successful allocation number of contiguous blocks. */ 78 uint64_t get_fail_blks; /**< Failed allocation number of contiguous blocks. */ 79 } __rte_cache_aligned; 80 #endif 81 82 /** 83 * A structure that stores a per-core object cache. 84 */ 85 struct rte_mempool_cache { 86 uint32_t size; /**< Size of the cache */ 87 uint32_t flushthresh; /**< Threshold before we flush excess elements */ 88 uint32_t len; /**< Current cache count */ 89 /* 90 * Cache is allocated to this size to allow it to overflow in certain 91 * cases to avoid needless emptying of cache. 92 */ 93 void *objs[RTE_MEMPOOL_CACHE_MAX_SIZE * 3]; /**< Cache objects */ 94 } __rte_cache_aligned; 95 96 /** 97 * A structure that stores the size of mempool elements. 98 */ 99 struct rte_mempool_objsz { 100 uint32_t elt_size; /**< Size of an element. */ 101 uint32_t header_size; /**< Size of header (before elt). */ 102 uint32_t trailer_size; /**< Size of trailer (after elt). */ 103 uint32_t total_size; 104 /**< Total size of an object (header + elt + trailer). */ 105 }; 106 107 /**< Maximum length of a memory pool's name. */ 108 #define RTE_MEMPOOL_NAMESIZE (RTE_RING_NAMESIZE - \ 109 sizeof(RTE_MEMPOOL_MZ_PREFIX) + 1) 110 #define RTE_MEMPOOL_MZ_PREFIX "MP_" 111 112 /* "MP_<name>" */ 113 #define RTE_MEMPOOL_MZ_FORMAT RTE_MEMPOOL_MZ_PREFIX "%s" 114 115 #define MEMPOOL_PG_SHIFT_MAX \ 116 RTE_DEPRECATED(MEMPOOL_PG_SHIFT_MAX) (sizeof(uintptr_t) * CHAR_BIT - 1) 117 118 /** Deprecated. Mempool over one chunk of physically continuous memory */ 119 #define MEMPOOL_PG_NUM_DEFAULT RTE_DEPRECATED(MEMPOOL_PG_NUM_DEFAULT) 1 120 121 #ifndef RTE_MEMPOOL_ALIGN 122 /** 123 * Alignment of elements inside mempool. 124 */ 125 #define RTE_MEMPOOL_ALIGN RTE_CACHE_LINE_SIZE 126 #endif 127 128 #define RTE_MEMPOOL_ALIGN_MASK (RTE_MEMPOOL_ALIGN - 1) 129 130 /** 131 * Mempool object header structure 132 * 133 * Each object stored in mempools are prefixed by this header structure, 134 * it allows to retrieve the mempool pointer from the object and to 135 * iterate on all objects attached to a mempool. When debug is enabled, 136 * a cookie is also added in this structure preventing corruptions and 137 * double-frees. 138 */ 139 struct rte_mempool_objhdr { 140 RTE_STAILQ_ENTRY(rte_mempool_objhdr) next; /**< Next in list. */ 141 struct rte_mempool *mp; /**< The mempool owning the object. */ 142 rte_iova_t iova; /**< IO address of the object. */ 143 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 144 uint64_t cookie; /**< Debug cookie. */ 145 #endif 146 }; 147 148 /** 149 * A list of object headers type 150 */ 151 RTE_STAILQ_HEAD(rte_mempool_objhdr_list, rte_mempool_objhdr); 152 153 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 154 155 /** 156 * Mempool object trailer structure 157 * 158 * In debug mode, each object stored in mempools are suffixed by this 159 * trailer structure containing a cookie preventing memory corruptions. 160 */ 161 struct rte_mempool_objtlr { 162 uint64_t cookie; /**< Debug cookie. */ 163 }; 164 165 #endif 166 167 /** 168 * A list of memory where objects are stored 169 */ 170 RTE_STAILQ_HEAD(rte_mempool_memhdr_list, rte_mempool_memhdr); 171 172 /** 173 * Callback used to free a memory chunk 174 */ 175 typedef void (rte_mempool_memchunk_free_cb_t)(struct rte_mempool_memhdr *memhdr, 176 void *opaque); 177 178 /** 179 * Mempool objects memory header structure 180 * 181 * The memory chunks where objects are stored. Each chunk is virtually 182 * and physically contiguous. 183 */ 184 struct rte_mempool_memhdr { 185 RTE_STAILQ_ENTRY(rte_mempool_memhdr) next; /**< Next in list. */ 186 struct rte_mempool *mp; /**< The mempool owning the chunk */ 187 void *addr; /**< Virtual address of the chunk */ 188 rte_iova_t iova; /**< IO address of the chunk */ 189 size_t len; /**< length of the chunk */ 190 rte_mempool_memchunk_free_cb_t *free_cb; /**< Free callback */ 191 void *opaque; /**< Argument passed to the free callback */ 192 }; 193 194 /** 195 * Additional information about the mempool 196 * 197 * The structure is cache-line aligned to avoid ABI breakages in 198 * a number of cases when something small is added. 199 */ 200 struct rte_mempool_info { 201 /** Number of objects in the contiguous block */ 202 unsigned int contig_block_size; 203 } __rte_cache_aligned; 204 205 /** 206 * The RTE mempool structure. 207 */ 208 struct rte_mempool { 209 char name[RTE_MEMPOOL_NAMESIZE]; /**< Name of mempool. */ 210 RTE_STD_C11 211 union { 212 void *pool_data; /**< Ring or pool to store objects. */ 213 uint64_t pool_id; /**< External mempool identifier. */ 214 }; 215 void *pool_config; /**< optional args for ops alloc. */ 216 const struct rte_memzone *mz; /**< Memzone where pool is alloc'd. */ 217 unsigned int flags; /**< Flags of the mempool. */ 218 int socket_id; /**< Socket id passed at create. */ 219 uint32_t size; /**< Max size of the mempool. */ 220 uint32_t cache_size; 221 /**< Size of per-lcore default local cache. */ 222 223 uint32_t elt_size; /**< Size of an element. */ 224 uint32_t header_size; /**< Size of header (before elt). */ 225 uint32_t trailer_size; /**< Size of trailer (after elt). */ 226 227 unsigned private_data_size; /**< Size of private data. */ 228 /** 229 * Index into rte_mempool_ops_table array of mempool ops 230 * structs, which contain callback function pointers. 231 * We're using an index here rather than pointers to the callbacks 232 * to facilitate any secondary processes that may want to use 233 * this mempool. 234 */ 235 int32_t ops_index; 236 237 struct rte_mempool_cache *local_cache; /**< Per-lcore local cache */ 238 239 uint32_t populated_size; /**< Number of populated objects. */ 240 struct rte_mempool_objhdr_list elt_list; /**< List of objects in pool */ 241 uint32_t nb_mem_chunks; /**< Number of memory chunks */ 242 struct rte_mempool_memhdr_list mem_list; /**< List of memory chunks */ 243 244 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 245 /** Per-lcore statistics. */ 246 struct rte_mempool_debug_stats stats[RTE_MAX_LCORE]; 247 #endif 248 } __rte_cache_aligned; 249 250 /** Spreading among memory channels not required. */ 251 #define RTE_MEMPOOL_F_NO_SPREAD 0x0001 252 /** 253 * Backward compatibility synonym for RTE_MEMPOOL_F_NO_SPREAD. 254 * To be deprecated. 255 */ 256 #define MEMPOOL_F_NO_SPREAD RTE_MEMPOOL_F_NO_SPREAD 257 /** Do not align objects on cache lines. */ 258 #define RTE_MEMPOOL_F_NO_CACHE_ALIGN 0x0002 259 /** 260 * Backward compatibility synonym for RTE_MEMPOOL_F_NO_CACHE_ALIGN. 261 * To be deprecated. 262 */ 263 #define MEMPOOL_F_NO_CACHE_ALIGN RTE_MEMPOOL_F_NO_CACHE_ALIGN 264 /** Default put is "single-producer". */ 265 #define RTE_MEMPOOL_F_SP_PUT 0x0004 266 /** 267 * Backward compatibility synonym for RTE_MEMPOOL_F_SP_PUT. 268 * To be deprecated. 269 */ 270 #define MEMPOOL_F_SP_PUT RTE_MEMPOOL_F_SP_PUT 271 /** Default get is "single-consumer". */ 272 #define RTE_MEMPOOL_F_SC_GET 0x0008 273 /** 274 * Backward compatibility synonym for RTE_MEMPOOL_F_SC_GET. 275 * To be deprecated. 276 */ 277 #define MEMPOOL_F_SC_GET RTE_MEMPOOL_F_SC_GET 278 /** Internal: pool is created. */ 279 #define RTE_MEMPOOL_F_POOL_CREATED 0x0010 280 /** Don't need IOVA contiguous objects. */ 281 #define RTE_MEMPOOL_F_NO_IOVA_CONTIG 0x0020 282 /** 283 * Backward compatibility synonym for RTE_MEMPOOL_F_NO_IOVA_CONTIG. 284 * To be deprecated. 285 */ 286 #define MEMPOOL_F_NO_IOVA_CONTIG RTE_MEMPOOL_F_NO_IOVA_CONTIG 287 /** Internal: no object from the pool can be used for device IO (DMA). */ 288 #define RTE_MEMPOOL_F_NON_IO 0x0040 289 290 /** 291 * This macro lists all the mempool flags an application may request. 292 */ 293 #define RTE_MEMPOOL_VALID_USER_FLAGS (RTE_MEMPOOL_F_NO_SPREAD \ 294 | RTE_MEMPOOL_F_NO_CACHE_ALIGN \ 295 | RTE_MEMPOOL_F_SP_PUT \ 296 | RTE_MEMPOOL_F_SC_GET \ 297 | RTE_MEMPOOL_F_NO_IOVA_CONTIG \ 298 ) 299 /** 300 * @internal When debug is enabled, store some statistics. 301 * 302 * @param mp 303 * Pointer to the memory pool. 304 * @param name 305 * Name of the statistics field to increment in the memory pool. 306 * @param n 307 * Number to add to the object-oriented statistics. 308 */ 309 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 310 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do { \ 311 unsigned __lcore_id = rte_lcore_id(); \ 312 if (__lcore_id < RTE_MAX_LCORE) { \ 313 mp->stats[__lcore_id].name += n; \ 314 } \ 315 } while (0) 316 #else 317 #define RTE_MEMPOOL_STAT_ADD(mp, name, n) do {} while (0) 318 #endif 319 320 /** 321 * @internal Calculate the size of the mempool header. 322 * 323 * @param mp 324 * Pointer to the memory pool. 325 * @param cs 326 * Size of the per-lcore cache. 327 */ 328 #define RTE_MEMPOOL_HEADER_SIZE(mp, cs) \ 329 (sizeof(*(mp)) + (((cs) == 0) ? 0 : \ 330 (sizeof(struct rte_mempool_cache) * RTE_MAX_LCORE))) 331 332 /** Deprecated. Use RTE_MEMPOOL_HEADER_SIZE() for internal purposes only. */ 333 #define MEMPOOL_HEADER_SIZE(mp, cs) \ 334 RTE_DEPRECATED(MEMPOOL_HEADER_SIZE) RTE_MEMPOOL_HEADER_SIZE(mp, cs) 335 336 /* return the header of a mempool object (internal) */ 337 static inline struct rte_mempool_objhdr * 338 rte_mempool_get_header(void *obj) 339 { 340 return (struct rte_mempool_objhdr *)RTE_PTR_SUB(obj, 341 sizeof(struct rte_mempool_objhdr)); 342 } 343 344 /** 345 * Return a pointer to the mempool owning this object. 346 * 347 * @param obj 348 * An object that is owned by a pool. If this is not the case, 349 * the behavior is undefined. 350 * @return 351 * A pointer to the mempool structure. 352 */ 353 static inline struct rte_mempool *rte_mempool_from_obj(void *obj) 354 { 355 struct rte_mempool_objhdr *hdr = rte_mempool_get_header(obj); 356 return hdr->mp; 357 } 358 359 /* return the trailer of a mempool object (internal) */ 360 static inline struct rte_mempool_objtlr *rte_mempool_get_trailer(void *obj) 361 { 362 struct rte_mempool *mp = rte_mempool_from_obj(obj); 363 return (struct rte_mempool_objtlr *)RTE_PTR_ADD(obj, mp->elt_size); 364 } 365 366 /** 367 * @internal Check and update cookies or panic. 368 * 369 * @param mp 370 * Pointer to the memory pool. 371 * @param obj_table_const 372 * Pointer to a table of void * pointers (objects). 373 * @param n 374 * Index of object in object table. 375 * @param free 376 * - 0: object is supposed to be allocated, mark it as free 377 * - 1: object is supposed to be free, mark it as allocated 378 * - 2: just check that cookie is valid (free or allocated) 379 */ 380 void rte_mempool_check_cookies(const struct rte_mempool *mp, 381 void * const *obj_table_const, unsigned n, int free); 382 383 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 384 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) \ 385 rte_mempool_check_cookies(mp, obj_table_const, n, free) 386 #else 387 #define RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table_const, n, free) do {} while (0) 388 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ 389 390 /** 391 * @internal Check contiguous object blocks and update cookies or panic. 392 * 393 * @param mp 394 * Pointer to the memory pool. 395 * @param first_obj_table_const 396 * Pointer to a table of void * pointers (first object of the contiguous 397 * object blocks). 398 * @param n 399 * Number of contiguous object blocks. 400 * @param free 401 * - 0: object is supposed to be allocated, mark it as free 402 * - 1: object is supposed to be free, mark it as allocated 403 * - 2: just check that cookie is valid (free or allocated) 404 */ 405 void rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp, 406 void * const *first_obj_table_const, unsigned int n, int free); 407 408 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 409 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \ 410 free) \ 411 rte_mempool_contig_blocks_check_cookies(mp, first_obj_table_const, n, \ 412 free) 413 #else 414 #define RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table_const, n, \ 415 free) \ 416 do {} while (0) 417 #endif /* RTE_LIBRTE_MEMPOOL_DEBUG */ 418 419 #define RTE_MEMPOOL_OPS_NAMESIZE 32 /**< Max length of ops struct name. */ 420 421 /** 422 * Prototype for implementation specific data provisioning function. 423 * 424 * The function should provide the implementation specific memory for 425 * use by the other mempool ops functions in a given mempool ops struct. 426 * E.g. the default ops provides an instance of the rte_ring for this purpose. 427 * it will most likely point to a different type of data structure, and 428 * will be transparent to the application programmer. 429 * This function should set mp->pool_data. 430 */ 431 typedef int (*rte_mempool_alloc_t)(struct rte_mempool *mp); 432 433 /** 434 * Free the opaque private data pointed to by mp->pool_data pointer. 435 */ 436 typedef void (*rte_mempool_free_t)(struct rte_mempool *mp); 437 438 /** 439 * Enqueue an object into the external pool. 440 */ 441 typedef int (*rte_mempool_enqueue_t)(struct rte_mempool *mp, 442 void * const *obj_table, unsigned int n); 443 444 /** 445 * Dequeue an object from the external pool. 446 */ 447 typedef int (*rte_mempool_dequeue_t)(struct rte_mempool *mp, 448 void **obj_table, unsigned int n); 449 450 /** 451 * Dequeue a number of contiguous object blocks from the external pool. 452 */ 453 typedef int (*rte_mempool_dequeue_contig_blocks_t)(struct rte_mempool *mp, 454 void **first_obj_table, unsigned int n); 455 456 /** 457 * Return the number of available objects in the external pool. 458 */ 459 typedef unsigned (*rte_mempool_get_count)(const struct rte_mempool *mp); 460 461 /** 462 * Calculate memory size required to store given number of objects. 463 * 464 * If mempool objects are not required to be IOVA-contiguous 465 * (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is set), min_chunk_size defines 466 * virtually contiguous chunk size. Otherwise, if mempool objects must 467 * be IOVA-contiguous (the flag RTE_MEMPOOL_F_NO_IOVA_CONTIG is clear), 468 * min_chunk_size defines IOVA-contiguous chunk size. 469 * 470 * @param[in] mp 471 * Pointer to the memory pool. 472 * @param[in] obj_num 473 * Number of objects. 474 * @param[in] pg_shift 475 * LOG2 of the physical pages size. If set to 0, ignore page boundaries. 476 * @param[out] min_chunk_size 477 * Location for minimum size of the memory chunk which may be used to 478 * store memory pool objects. 479 * @param[out] align 480 * Location for required memory chunk alignment. 481 * @return 482 * Required memory size. 483 */ 484 typedef ssize_t (*rte_mempool_calc_mem_size_t)(const struct rte_mempool *mp, 485 uint32_t obj_num, uint32_t pg_shift, 486 size_t *min_chunk_size, size_t *align); 487 488 /** 489 * @internal Helper to calculate memory size required to store given 490 * number of objects. 491 * 492 * This function is internal to mempool library and mempool drivers. 493 * 494 * If page boundaries may be ignored, it is just a product of total 495 * object size including header and trailer and number of objects. 496 * Otherwise, it is a number of pages required to store given number of 497 * objects without crossing page boundary. 498 * 499 * Note that if object size is bigger than page size, then it assumes 500 * that pages are grouped in subsets of physically continuous pages big 501 * enough to store at least one object. 502 * 503 * Minimum size of memory chunk is the total element size. 504 * Required memory chunk alignment is the cache line size. 505 * 506 * @param[in] mp 507 * A pointer to the mempool structure. 508 * @param[in] obj_num 509 * Number of objects to be added in mempool. 510 * @param[in] pg_shift 511 * LOG2 of the physical pages size. If set to 0, ignore page boundaries. 512 * @param[in] chunk_reserve 513 * Amount of memory that must be reserved at the beginning of each page, 514 * or at the beginning of the memory area if pg_shift is 0. 515 * @param[out] min_chunk_size 516 * Location for minimum size of the memory chunk which may be used to 517 * store memory pool objects. 518 * @param[out] align 519 * Location for required memory chunk alignment. 520 * @return 521 * Required memory size. 522 */ 523 ssize_t rte_mempool_op_calc_mem_size_helper(const struct rte_mempool *mp, 524 uint32_t obj_num, uint32_t pg_shift, size_t chunk_reserve, 525 size_t *min_chunk_size, size_t *align); 526 527 /** 528 * Default way to calculate memory size required to store given number of 529 * objects. 530 * 531 * Equivalent to rte_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift, 532 * 0, min_chunk_size, align). 533 */ 534 ssize_t rte_mempool_op_calc_mem_size_default(const struct rte_mempool *mp, 535 uint32_t obj_num, uint32_t pg_shift, 536 size_t *min_chunk_size, size_t *align); 537 538 /** 539 * Function to be called for each populated object. 540 * 541 * @param[in] mp 542 * A pointer to the mempool structure. 543 * @param[in] opaque 544 * An opaque pointer passed to iterator. 545 * @param[in] vaddr 546 * Object virtual address. 547 * @param[in] iova 548 * Input/output virtual address of the object or RTE_BAD_IOVA. 549 */ 550 typedef void (rte_mempool_populate_obj_cb_t)(struct rte_mempool *mp, 551 void *opaque, void *vaddr, rte_iova_t iova); 552 553 /** 554 * Populate memory pool objects using provided memory chunk. 555 * 556 * Populated objects should be enqueued to the pool, e.g. using 557 * rte_mempool_ops_enqueue_bulk(). 558 * 559 * If the given IO address is unknown (iova = RTE_BAD_IOVA), 560 * the chunk doesn't need to be physically contiguous (only virtually), 561 * and allocated objects may span two pages. 562 * 563 * @param[in] mp 564 * A pointer to the mempool structure. 565 * @param[in] max_objs 566 * Maximum number of objects to be populated. 567 * @param[in] vaddr 568 * The virtual address of memory that should be used to store objects. 569 * @param[in] iova 570 * The IO address 571 * @param[in] len 572 * The length of memory in bytes. 573 * @param[in] obj_cb 574 * Callback function to be executed for each populated object. 575 * @param[in] obj_cb_arg 576 * An opaque pointer passed to the callback function. 577 * @return 578 * The number of objects added on success. 579 * On error, no objects are populated and a negative errno is returned. 580 */ 581 typedef int (*rte_mempool_populate_t)(struct rte_mempool *mp, 582 unsigned int max_objs, 583 void *vaddr, rte_iova_t iova, size_t len, 584 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); 585 586 /** 587 * Align objects on addresses multiple of total_elt_sz. 588 */ 589 #define RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ 0x0001 590 591 /** 592 * @internal Helper to populate memory pool object using provided memory 593 * chunk: just slice objects one by one, taking care of not 594 * crossing page boundaries. 595 * 596 * If RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ is set in flags, the addresses 597 * of object headers will be aligned on a multiple of total_elt_sz. 598 * This feature is used by octeontx hardware. 599 * 600 * This function is internal to mempool library and mempool drivers. 601 * 602 * @param[in] mp 603 * A pointer to the mempool structure. 604 * @param[in] flags 605 * Logical OR of following flags: 606 * - RTE_MEMPOOL_POPULATE_F_ALIGN_OBJ: align objects on addresses 607 * multiple of total_elt_sz. 608 * @param[in] max_objs 609 * Maximum number of objects to be added in mempool. 610 * @param[in] vaddr 611 * The virtual address of memory that should be used to store objects. 612 * @param[in] iova 613 * The IO address corresponding to vaddr, or RTE_BAD_IOVA. 614 * @param[in] len 615 * The length of memory in bytes. 616 * @param[in] obj_cb 617 * Callback function to be executed for each populated object. 618 * @param[in] obj_cb_arg 619 * An opaque pointer passed to the callback function. 620 * @return 621 * The number of objects added in mempool. 622 */ 623 int rte_mempool_op_populate_helper(struct rte_mempool *mp, 624 unsigned int flags, unsigned int max_objs, 625 void *vaddr, rte_iova_t iova, size_t len, 626 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); 627 628 /** 629 * Default way to populate memory pool object using provided memory chunk. 630 * 631 * Equivalent to rte_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova, 632 * len, obj_cb, obj_cb_arg). 633 */ 634 int rte_mempool_op_populate_default(struct rte_mempool *mp, 635 unsigned int max_objs, 636 void *vaddr, rte_iova_t iova, size_t len, 637 rte_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg); 638 639 /** 640 * Get some additional information about a mempool. 641 */ 642 typedef int (*rte_mempool_get_info_t)(const struct rte_mempool *mp, 643 struct rte_mempool_info *info); 644 645 646 /** Structure defining mempool operations structure */ 647 struct rte_mempool_ops { 648 char name[RTE_MEMPOOL_OPS_NAMESIZE]; /**< Name of mempool ops struct. */ 649 rte_mempool_alloc_t alloc; /**< Allocate private data. */ 650 rte_mempool_free_t free; /**< Free the external pool. */ 651 rte_mempool_enqueue_t enqueue; /**< Enqueue an object. */ 652 rte_mempool_dequeue_t dequeue; /**< Dequeue an object. */ 653 rte_mempool_get_count get_count; /**< Get qty of available objs. */ 654 /** 655 * Optional callback to calculate memory size required to 656 * store specified number of objects. 657 */ 658 rte_mempool_calc_mem_size_t calc_mem_size; 659 /** 660 * Optional callback to populate mempool objects using 661 * provided memory chunk. 662 */ 663 rte_mempool_populate_t populate; 664 /** 665 * Get mempool info 666 */ 667 rte_mempool_get_info_t get_info; 668 /** 669 * Dequeue a number of contiguous object blocks. 670 */ 671 rte_mempool_dequeue_contig_blocks_t dequeue_contig_blocks; 672 } __rte_cache_aligned; 673 674 #define RTE_MEMPOOL_MAX_OPS_IDX 16 /**< Max registered ops structs */ 675 676 /** 677 * Structure storing the table of registered ops structs, each of which contain 678 * the function pointers for the mempool ops functions. 679 * Each process has its own storage for this ops struct array so that 680 * the mempools can be shared across primary and secondary processes. 681 * The indices used to access the array are valid across processes, whereas 682 * any function pointers stored directly in the mempool struct would not be. 683 * This results in us simply having "ops_index" in the mempool struct. 684 */ 685 struct rte_mempool_ops_table { 686 rte_spinlock_t sl; /**< Spinlock for add/delete. */ 687 uint32_t num_ops; /**< Number of used ops structs in the table. */ 688 /** 689 * Storage for all possible ops structs. 690 */ 691 struct rte_mempool_ops ops[RTE_MEMPOOL_MAX_OPS_IDX]; 692 } __rte_cache_aligned; 693 694 /** Array of registered ops structs. */ 695 extern struct rte_mempool_ops_table rte_mempool_ops_table; 696 697 /** 698 * @internal Get the mempool ops struct from its index. 699 * 700 * @param ops_index 701 * The index of the ops struct in the ops struct table. It must be a valid 702 * index: (0 <= idx < num_ops). 703 * @return 704 * The pointer to the ops struct in the table. 705 */ 706 static inline struct rte_mempool_ops * 707 rte_mempool_get_ops(int ops_index) 708 { 709 RTE_VERIFY((ops_index >= 0) && (ops_index < RTE_MEMPOOL_MAX_OPS_IDX)); 710 711 return &rte_mempool_ops_table.ops[ops_index]; 712 } 713 714 /** 715 * @internal Wrapper for mempool_ops alloc callback. 716 * 717 * @param mp 718 * Pointer to the memory pool. 719 * @return 720 * - 0: Success; successfully allocated mempool pool_data. 721 * - <0: Error; code of alloc function. 722 */ 723 int 724 rte_mempool_ops_alloc(struct rte_mempool *mp); 725 726 /** 727 * @internal Wrapper for mempool_ops dequeue callback. 728 * 729 * @param mp 730 * Pointer to the memory pool. 731 * @param obj_table 732 * Pointer to a table of void * pointers (objects). 733 * @param n 734 * Number of objects to get. 735 * @return 736 * - 0: Success; got n objects. 737 * - <0: Error; code of dequeue function. 738 */ 739 static inline int 740 rte_mempool_ops_dequeue_bulk(struct rte_mempool *mp, 741 void **obj_table, unsigned n) 742 { 743 struct rte_mempool_ops *ops; 744 int ret; 745 746 rte_mempool_trace_ops_dequeue_bulk(mp, obj_table, n); 747 ops = rte_mempool_get_ops(mp->ops_index); 748 ret = ops->dequeue(mp, obj_table, n); 749 if (ret == 0) { 750 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_bulk, 1); 751 RTE_MEMPOOL_STAT_ADD(mp, get_common_pool_objs, n); 752 } 753 return ret; 754 } 755 756 /** 757 * @internal Wrapper for mempool_ops dequeue_contig_blocks callback. 758 * 759 * @param[in] mp 760 * Pointer to the memory pool. 761 * @param[out] first_obj_table 762 * Pointer to a table of void * pointers (first objects). 763 * @param[in] n 764 * Number of blocks to get. 765 * @return 766 * - 0: Success; got n objects. 767 * - <0: Error; code of dequeue function. 768 */ 769 static inline int 770 rte_mempool_ops_dequeue_contig_blocks(struct rte_mempool *mp, 771 void **first_obj_table, unsigned int n) 772 { 773 struct rte_mempool_ops *ops; 774 775 ops = rte_mempool_get_ops(mp->ops_index); 776 RTE_ASSERT(ops->dequeue_contig_blocks != NULL); 777 rte_mempool_trace_ops_dequeue_contig_blocks(mp, first_obj_table, n); 778 return ops->dequeue_contig_blocks(mp, first_obj_table, n); 779 } 780 781 /** 782 * @internal wrapper for mempool_ops enqueue callback. 783 * 784 * @param mp 785 * Pointer to the memory pool. 786 * @param obj_table 787 * Pointer to a table of void * pointers (objects). 788 * @param n 789 * Number of objects to put. 790 * @return 791 * - 0: Success; n objects supplied. 792 * - <0: Error; code of enqueue function. 793 */ 794 static inline int 795 rte_mempool_ops_enqueue_bulk(struct rte_mempool *mp, void * const *obj_table, 796 unsigned n) 797 { 798 struct rte_mempool_ops *ops; 799 800 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_bulk, 1); 801 RTE_MEMPOOL_STAT_ADD(mp, put_common_pool_objs, n); 802 rte_mempool_trace_ops_enqueue_bulk(mp, obj_table, n); 803 ops = rte_mempool_get_ops(mp->ops_index); 804 return ops->enqueue(mp, obj_table, n); 805 } 806 807 /** 808 * @internal wrapper for mempool_ops get_count callback. 809 * 810 * @param mp 811 * Pointer to the memory pool. 812 * @return 813 * The number of available objects in the external pool. 814 */ 815 unsigned 816 rte_mempool_ops_get_count(const struct rte_mempool *mp); 817 818 /** 819 * @internal wrapper for mempool_ops calc_mem_size callback. 820 * API to calculate size of memory required to store specified number of 821 * object. 822 * 823 * @param[in] mp 824 * Pointer to the memory pool. 825 * @param[in] obj_num 826 * Number of objects. 827 * @param[in] pg_shift 828 * LOG2 of the physical pages size. If set to 0, ignore page boundaries. 829 * @param[out] min_chunk_size 830 * Location for minimum size of the memory chunk which may be used to 831 * store memory pool objects. 832 * @param[out] align 833 * Location for required memory chunk alignment. 834 * @return 835 * Required memory size aligned at page boundary. 836 */ 837 ssize_t rte_mempool_ops_calc_mem_size(const struct rte_mempool *mp, 838 uint32_t obj_num, uint32_t pg_shift, 839 size_t *min_chunk_size, size_t *align); 840 841 /** 842 * @internal wrapper for mempool_ops populate callback. 843 * 844 * Populate memory pool objects using provided memory chunk. 845 * 846 * @param[in] mp 847 * A pointer to the mempool structure. 848 * @param[in] max_objs 849 * Maximum number of objects to be populated. 850 * @param[in] vaddr 851 * The virtual address of memory that should be used to store objects. 852 * @param[in] iova 853 * The IO address 854 * @param[in] len 855 * The length of memory in bytes. 856 * @param[in] obj_cb 857 * Callback function to be executed for each populated object. 858 * @param[in] obj_cb_arg 859 * An opaque pointer passed to the callback function. 860 * @return 861 * The number of objects added on success. 862 * On error, no objects are populated and a negative errno is returned. 863 */ 864 int rte_mempool_ops_populate(struct rte_mempool *mp, unsigned int max_objs, 865 void *vaddr, rte_iova_t iova, size_t len, 866 rte_mempool_populate_obj_cb_t *obj_cb, 867 void *obj_cb_arg); 868 869 /** 870 * Wrapper for mempool_ops get_info callback. 871 * 872 * @param[in] mp 873 * Pointer to the memory pool. 874 * @param[out] info 875 * Pointer to the rte_mempool_info structure 876 * @return 877 * - 0: Success; The mempool driver supports retrieving supplementary 878 * mempool information 879 * - -ENOTSUP - doesn't support get_info ops (valid case). 880 */ 881 int rte_mempool_ops_get_info(const struct rte_mempool *mp, 882 struct rte_mempool_info *info); 883 884 /** 885 * @internal wrapper for mempool_ops free callback. 886 * 887 * @param mp 888 * Pointer to the memory pool. 889 */ 890 void 891 rte_mempool_ops_free(struct rte_mempool *mp); 892 893 /** 894 * Set the ops of a mempool. 895 * 896 * This can only be done on a mempool that is not populated, i.e. just after 897 * a call to rte_mempool_create_empty(). 898 * 899 * @param mp 900 * Pointer to the memory pool. 901 * @param name 902 * Name of the ops structure to use for this mempool. 903 * @param pool_config 904 * Opaque data that can be passed by the application to the ops functions. 905 * @return 906 * - 0: Success; the mempool is now using the requested ops functions. 907 * - -EINVAL - Invalid ops struct name provided. 908 * - -EEXIST - mempool already has an ops struct assigned. 909 */ 910 int 911 rte_mempool_set_ops_byname(struct rte_mempool *mp, const char *name, 912 void *pool_config); 913 914 /** 915 * Register mempool operations. 916 * 917 * @param ops 918 * Pointer to an ops structure to register. 919 * @return 920 * - >=0: Success; return the index of the ops struct in the table. 921 * - -EINVAL - some missing callbacks while registering ops struct. 922 * - -ENOSPC - the maximum number of ops structs has been reached. 923 */ 924 int rte_mempool_register_ops(const struct rte_mempool_ops *ops); 925 926 /** 927 * Macro to statically register the ops of a mempool handler. 928 * Note that the rte_mempool_register_ops fails silently here when 929 * more than RTE_MEMPOOL_MAX_OPS_IDX is registered. 930 */ 931 #define RTE_MEMPOOL_REGISTER_OPS(ops) \ 932 RTE_INIT(mp_hdlr_init_##ops) \ 933 { \ 934 rte_mempool_register_ops(&ops); \ 935 } 936 937 /** Deprecated. Use RTE_MEMPOOL_REGISTER_OPS() instead. */ 938 #define MEMPOOL_REGISTER_OPS(ops) \ 939 RTE_DEPRECATED(MEMPOOL_REGISTER_OPS) RTE_MEMPOOL_REGISTER_OPS(ops) 940 941 /** 942 * An object callback function for mempool. 943 * 944 * Used by rte_mempool_create() and rte_mempool_obj_iter(). 945 */ 946 typedef void (rte_mempool_obj_cb_t)(struct rte_mempool *mp, 947 void *opaque, void *obj, unsigned obj_idx); 948 typedef rte_mempool_obj_cb_t rte_mempool_obj_ctor_t; /* compat */ 949 950 /** 951 * A memory callback function for mempool. 952 * 953 * Used by rte_mempool_mem_iter(). 954 */ 955 typedef void (rte_mempool_mem_cb_t)(struct rte_mempool *mp, 956 void *opaque, struct rte_mempool_memhdr *memhdr, 957 unsigned mem_idx); 958 959 /** 960 * A mempool constructor callback function. 961 * 962 * Arguments are the mempool and the opaque pointer given by the user in 963 * rte_mempool_create(). 964 */ 965 typedef void (rte_mempool_ctor_t)(struct rte_mempool *, void *); 966 967 /** 968 * Create a new mempool named *name* in memory. 969 * 970 * This function uses ``rte_memzone_reserve()`` to allocate memory. The 971 * pool contains n elements of elt_size. Its size is set to n. 972 * 973 * @param name 974 * The name of the mempool. 975 * @param n 976 * The number of elements in the mempool. The optimum size (in terms of 977 * memory usage) for a mempool is when n is a power of two minus one: 978 * n = (2^q - 1). 979 * @param elt_size 980 * The size of each element. 981 * @param cache_size 982 * If cache_size is non-zero, the rte_mempool library will try to 983 * limit the accesses to the common lockless pool, by maintaining a 984 * per-lcore object cache. This argument must be lower or equal to 985 * RTE_MEMPOOL_CACHE_MAX_SIZE and n / 1.5. It is advised to choose 986 * cache_size to have "n modulo cache_size == 0": if this is 987 * not the case, some elements will always stay in the pool and will 988 * never be used. The access to the per-lcore table is of course 989 * faster than the multi-producer/consumer pool. The cache can be 990 * disabled if the cache_size argument is set to 0; it can be useful to 991 * avoid losing objects in cache. 992 * @param private_data_size 993 * The size of the private data appended after the mempool 994 * structure. This is useful for storing some private data after the 995 * mempool structure, as is done for rte_mbuf_pool for example. 996 * @param mp_init 997 * A function pointer that is called for initialization of the pool, 998 * before object initialization. The user can initialize the private 999 * data in this function if needed. This parameter can be NULL if 1000 * not needed. 1001 * @param mp_init_arg 1002 * An opaque pointer to data that can be used in the mempool 1003 * constructor function. 1004 * @param obj_init 1005 * A function pointer that is called for each object at 1006 * initialization of the pool. The user can set some meta data in 1007 * objects if needed. This parameter can be NULL if not needed. 1008 * The obj_init() function takes the mempool pointer, the init_arg, 1009 * the object pointer and the object number as parameters. 1010 * @param obj_init_arg 1011 * An opaque pointer to data that can be used as an argument for 1012 * each call to the object constructor function. 1013 * @param socket_id 1014 * The *socket_id* argument is the socket identifier in the case of 1015 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 1016 * constraint for the reserved zone. 1017 * @param flags 1018 * The *flags* arguments is an OR of following flags: 1019 * - RTE_MEMPOOL_F_NO_SPREAD: By default, objects addresses are spread 1020 * between channels in RAM: the pool allocator will add padding 1021 * between objects depending on the hardware configuration. See 1022 * Memory alignment constraints for details. If this flag is set, 1023 * the allocator will just align them to a cache line. 1024 * - RTE_MEMPOOL_F_NO_CACHE_ALIGN: By default, the returned objects are 1025 * cache-aligned. This flag removes this constraint, and no 1026 * padding will be present between objects. This flag implies 1027 * RTE_MEMPOOL_F_NO_SPREAD. 1028 * - RTE_MEMPOOL_F_SP_PUT: If this flag is set, the default behavior 1029 * when using rte_mempool_put() or rte_mempool_put_bulk() is 1030 * "single-producer". Otherwise, it is "multi-producers". 1031 * - RTE_MEMPOOL_F_SC_GET: If this flag is set, the default behavior 1032 * when using rte_mempool_get() or rte_mempool_get_bulk() is 1033 * "single-consumer". Otherwise, it is "multi-consumers". 1034 * - RTE_MEMPOOL_F_NO_IOVA_CONTIG: If set, allocated objects won't 1035 * necessarily be contiguous in IO memory. 1036 * @return 1037 * The pointer to the new allocated mempool, on success. NULL on error 1038 * with rte_errno set appropriately. Possible rte_errno values include: 1039 * - E_RTE_NO_CONFIG - function could not get pointer to rte_config structure 1040 * - E_RTE_SECONDARY - function was called from a secondary process instance 1041 * - EINVAL - cache size provided is too large or an unknown flag was passed 1042 * - ENOSPC - the maximum number of memzones has already been allocated 1043 * - EEXIST - a memzone with the same name already exists 1044 * - ENOMEM - no appropriate memory area found in which to create memzone 1045 */ 1046 struct rte_mempool * 1047 rte_mempool_create(const char *name, unsigned n, unsigned elt_size, 1048 unsigned cache_size, unsigned private_data_size, 1049 rte_mempool_ctor_t *mp_init, void *mp_init_arg, 1050 rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, 1051 int socket_id, unsigned flags); 1052 1053 /** 1054 * Create an empty mempool 1055 * 1056 * The mempool is allocated and initialized, but it is not populated: no 1057 * memory is allocated for the mempool elements. The user has to call 1058 * rte_mempool_populate_*() to add memory chunks to the pool. Once 1059 * populated, the user may also want to initialize each object with 1060 * rte_mempool_obj_iter(). 1061 * 1062 * @param name 1063 * The name of the mempool. 1064 * @param n 1065 * The maximum number of elements that can be added in the mempool. 1066 * The optimum size (in terms of memory usage) for a mempool is when n 1067 * is a power of two minus one: n = (2^q - 1). 1068 * @param elt_size 1069 * The size of each element. 1070 * @param cache_size 1071 * Size of the cache. See rte_mempool_create() for details. 1072 * @param private_data_size 1073 * The size of the private data appended after the mempool 1074 * structure. This is useful for storing some private data after the 1075 * mempool structure, as is done for rte_mbuf_pool for example. 1076 * @param socket_id 1077 * The *socket_id* argument is the socket identifier in the case of 1078 * NUMA. The value can be *SOCKET_ID_ANY* if there is no NUMA 1079 * constraint for the reserved zone. 1080 * @param flags 1081 * Flags controlling the behavior of the mempool. See 1082 * rte_mempool_create() for details. 1083 * @return 1084 * The pointer to the new allocated mempool, on success. NULL on error 1085 * with rte_errno set appropriately. See rte_mempool_create() for details. 1086 */ 1087 struct rte_mempool * 1088 rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size, 1089 unsigned cache_size, unsigned private_data_size, 1090 int socket_id, unsigned flags); 1091 /** 1092 * Free a mempool 1093 * 1094 * Unlink the mempool from global list, free the memory chunks, and all 1095 * memory referenced by the mempool. The objects must not be used by 1096 * other cores as they will be freed. 1097 * 1098 * @param mp 1099 * A pointer to the mempool structure. 1100 * If NULL then, the function does nothing. 1101 */ 1102 void 1103 rte_mempool_free(struct rte_mempool *mp); 1104 1105 /** 1106 * Add physically contiguous memory for objects in the pool at init 1107 * 1108 * Add a virtually and physically contiguous memory chunk in the pool 1109 * where objects can be instantiated. 1110 * 1111 * If the given IO address is unknown (iova = RTE_BAD_IOVA), 1112 * the chunk doesn't need to be physically contiguous (only virtually), 1113 * and allocated objects may span two pages. 1114 * 1115 * @param mp 1116 * A pointer to the mempool structure. 1117 * @param vaddr 1118 * The virtual address of memory that should be used to store objects. 1119 * @param iova 1120 * The IO address 1121 * @param len 1122 * The length of memory in bytes. 1123 * @param free_cb 1124 * The callback used to free this chunk when destroying the mempool. 1125 * @param opaque 1126 * An opaque argument passed to free_cb. 1127 * @return 1128 * The number of objects added on success (strictly positive). 1129 * On error, the chunk is not added in the memory list of the 1130 * mempool the following code is returned: 1131 * (0): not enough room in chunk for one object. 1132 * (-ENOSPC): mempool is already populated. 1133 * (-ENOMEM): allocation failure. 1134 */ 1135 int rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr, 1136 rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb, 1137 void *opaque); 1138 1139 /** 1140 * Add virtually contiguous memory for objects in the pool at init 1141 * 1142 * Add a virtually contiguous memory chunk in the pool where objects can 1143 * be instantiated. 1144 * 1145 * @param mp 1146 * A pointer to the mempool structure. 1147 * @param addr 1148 * The virtual address of memory that should be used to store objects. 1149 * @param len 1150 * The length of memory in bytes. 1151 * @param pg_sz 1152 * The size of memory pages in this virtual area. 1153 * @param free_cb 1154 * The callback used to free this chunk when destroying the mempool. 1155 * @param opaque 1156 * An opaque argument passed to free_cb. 1157 * @return 1158 * The number of objects added on success (strictly positive). 1159 * On error, the chunk is not added in the memory list of the 1160 * mempool the following code is returned: 1161 * (0): not enough room in chunk for one object. 1162 * (-ENOSPC): mempool is already populated. 1163 * (-ENOMEM): allocation failure. 1164 */ 1165 int 1166 rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, 1167 size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb, 1168 void *opaque); 1169 1170 /** 1171 * Add memory for objects in the pool at init 1172 * 1173 * This is the default function used by rte_mempool_create() to populate 1174 * the mempool. It adds memory allocated using rte_memzone_reserve(). 1175 * 1176 * @param mp 1177 * A pointer to the mempool structure. 1178 * @return 1179 * The number of objects added on success. 1180 * On error, the chunk is not added in the memory list of the 1181 * mempool and a negative errno is returned. 1182 */ 1183 int rte_mempool_populate_default(struct rte_mempool *mp); 1184 1185 /** 1186 * Add memory from anonymous mapping for objects in the pool at init 1187 * 1188 * This function mmap an anonymous memory zone that is locked in 1189 * memory to store the objects of the mempool. 1190 * 1191 * @param mp 1192 * A pointer to the mempool structure. 1193 * @return 1194 * The number of objects added on success. 1195 * On error, 0 is returned, rte_errno is set, and the chunk is not added in 1196 * the memory list of the mempool. 1197 */ 1198 int rte_mempool_populate_anon(struct rte_mempool *mp); 1199 1200 /** 1201 * Call a function for each mempool element 1202 * 1203 * Iterate across all objects attached to a rte_mempool and call the 1204 * callback function on it. 1205 * 1206 * @param mp 1207 * A pointer to an initialized mempool. 1208 * @param obj_cb 1209 * A function pointer that is called for each object. 1210 * @param obj_cb_arg 1211 * An opaque pointer passed to the callback function. 1212 * @return 1213 * Number of objects iterated. 1214 */ 1215 uint32_t rte_mempool_obj_iter(struct rte_mempool *mp, 1216 rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg); 1217 1218 /** 1219 * Call a function for each mempool memory chunk 1220 * 1221 * Iterate across all memory chunks attached to a rte_mempool and call 1222 * the callback function on it. 1223 * 1224 * @param mp 1225 * A pointer to an initialized mempool. 1226 * @param mem_cb 1227 * A function pointer that is called for each memory chunk. 1228 * @param mem_cb_arg 1229 * An opaque pointer passed to the callback function. 1230 * @return 1231 * Number of memory chunks iterated. 1232 */ 1233 uint32_t rte_mempool_mem_iter(struct rte_mempool *mp, 1234 rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg); 1235 1236 /** 1237 * Dump the status of the mempool to a file. 1238 * 1239 * @param f 1240 * A pointer to a file for output 1241 * @param mp 1242 * A pointer to the mempool structure. 1243 */ 1244 void rte_mempool_dump(FILE *f, struct rte_mempool *mp); 1245 1246 /** 1247 * Create a user-owned mempool cache. 1248 * 1249 * This can be used by unregistered non-EAL threads to enable caching when they 1250 * interact with a mempool. 1251 * 1252 * @param size 1253 * The size of the mempool cache. See rte_mempool_create()'s cache_size 1254 * parameter description for more information. The same limits and 1255 * considerations apply here too. 1256 * @param socket_id 1257 * The socket identifier in the case of NUMA. The value can be 1258 * SOCKET_ID_ANY if there is no NUMA constraint for the reserved zone. 1259 */ 1260 struct rte_mempool_cache * 1261 rte_mempool_cache_create(uint32_t size, int socket_id); 1262 1263 /** 1264 * Free a user-owned mempool cache. 1265 * 1266 * @param cache 1267 * A pointer to the mempool cache. 1268 */ 1269 void 1270 rte_mempool_cache_free(struct rte_mempool_cache *cache); 1271 1272 /** 1273 * Get a pointer to the per-lcore default mempool cache. 1274 * 1275 * @param mp 1276 * A pointer to the mempool structure. 1277 * @param lcore_id 1278 * The logical core id. 1279 * @return 1280 * A pointer to the mempool cache or NULL if disabled or unregistered non-EAL 1281 * thread. 1282 */ 1283 static __rte_always_inline struct rte_mempool_cache * 1284 rte_mempool_default_cache(struct rte_mempool *mp, unsigned lcore_id) 1285 { 1286 if (mp->cache_size == 0) 1287 return NULL; 1288 1289 if (lcore_id >= RTE_MAX_LCORE) 1290 return NULL; 1291 1292 rte_mempool_trace_default_cache(mp, lcore_id, 1293 &mp->local_cache[lcore_id]); 1294 return &mp->local_cache[lcore_id]; 1295 } 1296 1297 /** 1298 * Flush a user-owned mempool cache to the specified mempool. 1299 * 1300 * @param cache 1301 * A pointer to the mempool cache. 1302 * @param mp 1303 * A pointer to the mempool. 1304 */ 1305 static __rte_always_inline void 1306 rte_mempool_cache_flush(struct rte_mempool_cache *cache, 1307 struct rte_mempool *mp) 1308 { 1309 if (cache == NULL) 1310 cache = rte_mempool_default_cache(mp, rte_lcore_id()); 1311 if (cache == NULL || cache->len == 0) 1312 return; 1313 rte_mempool_trace_cache_flush(cache, mp); 1314 rte_mempool_ops_enqueue_bulk(mp, cache->objs, cache->len); 1315 cache->len = 0; 1316 } 1317 1318 /** 1319 * @internal Put several objects back in the mempool; used internally. 1320 * @param mp 1321 * A pointer to the mempool structure. 1322 * @param obj_table 1323 * A pointer to a table of void * pointers (objects). 1324 * @param n 1325 * The number of objects to store back in the mempool, must be strictly 1326 * positive. 1327 * @param cache 1328 * A pointer to a mempool cache structure. May be NULL if not needed. 1329 */ 1330 static __rte_always_inline void 1331 rte_mempool_do_generic_put(struct rte_mempool *mp, void * const *obj_table, 1332 unsigned int n, struct rte_mempool_cache *cache) 1333 { 1334 void **cache_objs; 1335 1336 /* increment stat now, adding in mempool always success */ 1337 RTE_MEMPOOL_STAT_ADD(mp, put_bulk, 1); 1338 RTE_MEMPOOL_STAT_ADD(mp, put_objs, n); 1339 1340 /* No cache provided or if put would overflow mem allocated for cache */ 1341 if (unlikely(cache == NULL || n > RTE_MEMPOOL_CACHE_MAX_SIZE)) 1342 goto ring_enqueue; 1343 1344 cache_objs = &cache->objs[cache->len]; 1345 1346 /* 1347 * The cache follows the following algorithm 1348 * 1. Add the objects to the cache 1349 * 2. Anything greater than the cache min value (if it crosses the 1350 * cache flush threshold) is flushed to the ring. 1351 */ 1352 1353 /* Add elements back into the cache */ 1354 rte_memcpy(&cache_objs[0], obj_table, sizeof(void *) * n); 1355 1356 cache->len += n; 1357 1358 if (cache->len >= cache->flushthresh) { 1359 rte_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size], 1360 cache->len - cache->size); 1361 cache->len = cache->size; 1362 } 1363 1364 return; 1365 1366 ring_enqueue: 1367 1368 /* push remaining objects in ring */ 1369 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG 1370 if (rte_mempool_ops_enqueue_bulk(mp, obj_table, n) < 0) 1371 rte_panic("cannot put objects in mempool\n"); 1372 #else 1373 rte_mempool_ops_enqueue_bulk(mp, obj_table, n); 1374 #endif 1375 } 1376 1377 1378 /** 1379 * Put several objects back in the mempool. 1380 * 1381 * @param mp 1382 * A pointer to the mempool structure. 1383 * @param obj_table 1384 * A pointer to a table of void * pointers (objects). 1385 * @param n 1386 * The number of objects to add in the mempool from the obj_table. 1387 * @param cache 1388 * A pointer to a mempool cache structure. May be NULL if not needed. 1389 */ 1390 static __rte_always_inline void 1391 rte_mempool_generic_put(struct rte_mempool *mp, void * const *obj_table, 1392 unsigned int n, struct rte_mempool_cache *cache) 1393 { 1394 rte_mempool_trace_generic_put(mp, obj_table, n, cache); 1395 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 0); 1396 rte_mempool_do_generic_put(mp, obj_table, n, cache); 1397 } 1398 1399 /** 1400 * Put several objects back in the mempool. 1401 * 1402 * This function calls the multi-producer or the single-producer 1403 * version depending on the default behavior that was specified at 1404 * mempool creation time (see flags). 1405 * 1406 * @param mp 1407 * A pointer to the mempool structure. 1408 * @param obj_table 1409 * A pointer to a table of void * pointers (objects). 1410 * @param n 1411 * The number of objects to add in the mempool from obj_table. 1412 */ 1413 static __rte_always_inline void 1414 rte_mempool_put_bulk(struct rte_mempool *mp, void * const *obj_table, 1415 unsigned int n) 1416 { 1417 struct rte_mempool_cache *cache; 1418 cache = rte_mempool_default_cache(mp, rte_lcore_id()); 1419 rte_mempool_trace_put_bulk(mp, obj_table, n, cache); 1420 rte_mempool_generic_put(mp, obj_table, n, cache); 1421 } 1422 1423 /** 1424 * Put one object back in the mempool. 1425 * 1426 * This function calls the multi-producer or the single-producer 1427 * version depending on the default behavior that was specified at 1428 * mempool creation time (see flags). 1429 * 1430 * @param mp 1431 * A pointer to the mempool structure. 1432 * @param obj 1433 * A pointer to the object to be added. 1434 */ 1435 static __rte_always_inline void 1436 rte_mempool_put(struct rte_mempool *mp, void *obj) 1437 { 1438 rte_mempool_put_bulk(mp, &obj, 1); 1439 } 1440 1441 /** 1442 * @internal Get several objects from the mempool; used internally. 1443 * @param mp 1444 * A pointer to the mempool structure. 1445 * @param obj_table 1446 * A pointer to a table of void * pointers (objects). 1447 * @param n 1448 * The number of objects to get, must be strictly positive. 1449 * @param cache 1450 * A pointer to a mempool cache structure. May be NULL if not needed. 1451 * @return 1452 * - >=0: Success; number of objects supplied. 1453 * - <0: Error; code of ring dequeue function. 1454 */ 1455 static __rte_always_inline int 1456 rte_mempool_do_generic_get(struct rte_mempool *mp, void **obj_table, 1457 unsigned int n, struct rte_mempool_cache *cache) 1458 { 1459 int ret; 1460 uint32_t index, len; 1461 void **cache_objs; 1462 1463 /* No cache provided or cannot be satisfied from cache */ 1464 if (unlikely(cache == NULL || n >= cache->size)) 1465 goto ring_dequeue; 1466 1467 cache_objs = cache->objs; 1468 1469 /* Can this be satisfied from the cache? */ 1470 if (cache->len < n) { 1471 /* No. Backfill the cache first, and then fill from it */ 1472 uint32_t req = n + (cache->size - cache->len); 1473 1474 /* How many do we require i.e. number to fill the cache + the request */ 1475 ret = rte_mempool_ops_dequeue_bulk(mp, 1476 &cache->objs[cache->len], req); 1477 if (unlikely(ret < 0)) { 1478 /* 1479 * In the off chance that we are buffer constrained, 1480 * where we are not able to allocate cache + n, go to 1481 * the ring directly. If that fails, we are truly out of 1482 * buffers. 1483 */ 1484 goto ring_dequeue; 1485 } 1486 1487 cache->len += req; 1488 } 1489 1490 /* Now fill in the response ... */ 1491 for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++) 1492 *obj_table = cache_objs[len]; 1493 1494 cache->len -= n; 1495 1496 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); 1497 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n); 1498 1499 return 0; 1500 1501 ring_dequeue: 1502 1503 /* get remaining objects from ring */ 1504 ret = rte_mempool_ops_dequeue_bulk(mp, obj_table, n); 1505 1506 if (ret < 0) { 1507 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); 1508 RTE_MEMPOOL_STAT_ADD(mp, get_fail_objs, n); 1509 } else { 1510 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); 1511 RTE_MEMPOOL_STAT_ADD(mp, get_success_objs, n); 1512 } 1513 1514 return ret; 1515 } 1516 1517 /** 1518 * Get several objects from the mempool. 1519 * 1520 * If cache is enabled, objects will be retrieved first from cache, 1521 * subsequently from the common pool. Note that it can return -ENOENT when 1522 * the local cache and common pool are empty, even if cache from other 1523 * lcores are full. 1524 * 1525 * @param mp 1526 * A pointer to the mempool structure. 1527 * @param obj_table 1528 * A pointer to a table of void * pointers (objects) that will be filled. 1529 * @param n 1530 * The number of objects to get from mempool to obj_table. 1531 * @param cache 1532 * A pointer to a mempool cache structure. May be NULL if not needed. 1533 * @return 1534 * - 0: Success; objects taken. 1535 * - -ENOENT: Not enough entries in the mempool; no object is retrieved. 1536 */ 1537 static __rte_always_inline int 1538 rte_mempool_generic_get(struct rte_mempool *mp, void **obj_table, 1539 unsigned int n, struct rte_mempool_cache *cache) 1540 { 1541 int ret; 1542 ret = rte_mempool_do_generic_get(mp, obj_table, n, cache); 1543 if (ret == 0) 1544 RTE_MEMPOOL_CHECK_COOKIES(mp, obj_table, n, 1); 1545 rte_mempool_trace_generic_get(mp, obj_table, n, cache); 1546 return ret; 1547 } 1548 1549 /** 1550 * Get several objects from the mempool. 1551 * 1552 * This function calls the multi-consumers or the single-consumer 1553 * version, depending on the default behaviour that was specified at 1554 * mempool creation time (see flags). 1555 * 1556 * If cache is enabled, objects will be retrieved first from cache, 1557 * subsequently from the common pool. Note that it can return -ENOENT when 1558 * the local cache and common pool are empty, even if cache from other 1559 * lcores are full. 1560 * 1561 * @param mp 1562 * A pointer to the mempool structure. 1563 * @param obj_table 1564 * A pointer to a table of void * pointers (objects) that will be filled. 1565 * @param n 1566 * The number of objects to get from the mempool to obj_table. 1567 * @return 1568 * - 0: Success; objects taken 1569 * - -ENOENT: Not enough entries in the mempool; no object is retrieved. 1570 */ 1571 static __rte_always_inline int 1572 rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n) 1573 { 1574 struct rte_mempool_cache *cache; 1575 cache = rte_mempool_default_cache(mp, rte_lcore_id()); 1576 rte_mempool_trace_get_bulk(mp, obj_table, n, cache); 1577 return rte_mempool_generic_get(mp, obj_table, n, cache); 1578 } 1579 1580 /** 1581 * Get one object from the mempool. 1582 * 1583 * This function calls the multi-consumers or the single-consumer 1584 * version, depending on the default behavior that was specified at 1585 * mempool creation (see flags). 1586 * 1587 * If cache is enabled, objects will be retrieved first from cache, 1588 * subsequently from the common pool. Note that it can return -ENOENT when 1589 * the local cache and common pool are empty, even if cache from other 1590 * lcores are full. 1591 * 1592 * @param mp 1593 * A pointer to the mempool structure. 1594 * @param obj_p 1595 * A pointer to a void * pointer (object) that will be filled. 1596 * @return 1597 * - 0: Success; objects taken. 1598 * - -ENOENT: Not enough entries in the mempool; no object is retrieved. 1599 */ 1600 static __rte_always_inline int 1601 rte_mempool_get(struct rte_mempool *mp, void **obj_p) 1602 { 1603 return rte_mempool_get_bulk(mp, obj_p, 1); 1604 } 1605 1606 /** 1607 * Get a contiguous blocks of objects from the mempool. 1608 * 1609 * If cache is enabled, consider to flush it first, to reuse objects 1610 * as soon as possible. 1611 * 1612 * The application should check that the driver supports the operation 1613 * by calling rte_mempool_ops_get_info() and checking that `contig_block_size` 1614 * is not zero. 1615 * 1616 * @param mp 1617 * A pointer to the mempool structure. 1618 * @param first_obj_table 1619 * A pointer to a pointer to the first object in each block. 1620 * @param n 1621 * The number of blocks to get from mempool. 1622 * @return 1623 * - 0: Success; blocks taken. 1624 * - -ENOBUFS: Not enough entries in the mempool; no object is retrieved. 1625 * - -EOPNOTSUPP: The mempool driver does not support block dequeue 1626 */ 1627 static __rte_always_inline int 1628 rte_mempool_get_contig_blocks(struct rte_mempool *mp, 1629 void **first_obj_table, unsigned int n) 1630 { 1631 int ret; 1632 1633 ret = rte_mempool_ops_dequeue_contig_blocks(mp, first_obj_table, n); 1634 if (ret == 0) { 1635 RTE_MEMPOOL_STAT_ADD(mp, get_success_bulk, 1); 1636 RTE_MEMPOOL_STAT_ADD(mp, get_success_blks, n); 1637 RTE_MEMPOOL_CONTIG_BLOCKS_CHECK_COOKIES(mp, first_obj_table, n, 1638 1); 1639 } else { 1640 RTE_MEMPOOL_STAT_ADD(mp, get_fail_bulk, 1); 1641 RTE_MEMPOOL_STAT_ADD(mp, get_fail_blks, n); 1642 } 1643 1644 rte_mempool_trace_get_contig_blocks(mp, first_obj_table, n); 1645 return ret; 1646 } 1647 1648 /** 1649 * Return the number of entries in the mempool. 1650 * 1651 * When cache is enabled, this function has to browse the length of 1652 * all lcores, so it should not be used in a data path, but only for 1653 * debug purposes. User-owned mempool caches are not accounted for. 1654 * 1655 * @param mp 1656 * A pointer to the mempool structure. 1657 * @return 1658 * The number of entries in the mempool. 1659 */ 1660 unsigned int rte_mempool_avail_count(const struct rte_mempool *mp); 1661 1662 /** 1663 * Return the number of elements which have been allocated from the mempool 1664 * 1665 * When cache is enabled, this function has to browse the length of 1666 * all lcores, so it should not be used in a data path, but only for 1667 * debug purposes. 1668 * 1669 * @param mp 1670 * A pointer to the mempool structure. 1671 * @return 1672 * The number of free entries in the mempool. 1673 */ 1674 unsigned int 1675 rte_mempool_in_use_count(const struct rte_mempool *mp); 1676 1677 /** 1678 * Test if the mempool is full. 1679 * 1680 * When cache is enabled, this function has to browse the length of all 1681 * lcores, so it should not be used in a data path, but only for debug 1682 * purposes. User-owned mempool caches are not accounted for. 1683 * 1684 * @param mp 1685 * A pointer to the mempool structure. 1686 * @return 1687 * - 1: The mempool is full. 1688 * - 0: The mempool is not full. 1689 */ 1690 static inline int 1691 rte_mempool_full(const struct rte_mempool *mp) 1692 { 1693 return rte_mempool_avail_count(mp) == mp->size; 1694 } 1695 1696 /** 1697 * Test if the mempool is empty. 1698 * 1699 * When cache is enabled, this function has to browse the length of all 1700 * lcores, so it should not be used in a data path, but only for debug 1701 * purposes. User-owned mempool caches are not accounted for. 1702 * 1703 * @param mp 1704 * A pointer to the mempool structure. 1705 * @return 1706 * - 1: The mempool is empty. 1707 * - 0: The mempool is not empty. 1708 */ 1709 static inline int 1710 rte_mempool_empty(const struct rte_mempool *mp) 1711 { 1712 return rte_mempool_avail_count(mp) == 0; 1713 } 1714 1715 /** 1716 * Return the IO address of elt, which is an element of the pool mp. 1717 * 1718 * @param elt 1719 * A pointer (virtual address) to the element of the pool. 1720 * @return 1721 * The IO address of the elt element. 1722 * If the mempool was created with RTE_MEMPOOL_F_NO_IOVA_CONTIG, the 1723 * returned value is RTE_BAD_IOVA. 1724 */ 1725 static inline rte_iova_t 1726 rte_mempool_virt2iova(const void *elt) 1727 { 1728 const struct rte_mempool_objhdr *hdr; 1729 hdr = (const struct rte_mempool_objhdr *)RTE_PTR_SUB(elt, 1730 sizeof(*hdr)); 1731 return hdr->iova; 1732 } 1733 1734 /** 1735 * Check the consistency of mempool objects. 1736 * 1737 * Verify the coherency of fields in the mempool structure. Also check 1738 * that the cookies of mempool objects (even the ones that are not 1739 * present in pool) have a correct value. If not, a panic will occur. 1740 * 1741 * @param mp 1742 * A pointer to the mempool structure. 1743 */ 1744 void rte_mempool_audit(struct rte_mempool *mp); 1745 1746 /** 1747 * Return a pointer to the private data in an mempool structure. 1748 * 1749 * @param mp 1750 * A pointer to the mempool structure. 1751 * @return 1752 * A pointer to the private data. 1753 */ 1754 static inline void *rte_mempool_get_priv(struct rte_mempool *mp) 1755 { 1756 return (char *)mp + 1757 RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size); 1758 } 1759 1760 /** 1761 * Dump the status of all mempools on the console 1762 * 1763 * @param f 1764 * A pointer to a file for output 1765 */ 1766 void rte_mempool_list_dump(FILE *f); 1767 1768 /** 1769 * Search a mempool from its name 1770 * 1771 * @param name 1772 * The name of the mempool. 1773 * @return 1774 * The pointer to the mempool matching the name, or NULL if not found. 1775 * NULL on error 1776 * with rte_errno set appropriately. Possible rte_errno values include: 1777 * - ENOENT - required entry not available to return. 1778 * 1779 */ 1780 struct rte_mempool *rte_mempool_lookup(const char *name); 1781 1782 /** 1783 * Get the header, trailer and total size of a mempool element. 1784 * 1785 * Given a desired size of the mempool element and mempool flags, 1786 * calculates header, trailer, body and total sizes of the mempool object. 1787 * 1788 * @param elt_size 1789 * The size of each element, without header and trailer. 1790 * @param flags 1791 * The flags used for the mempool creation. 1792 * Consult rte_mempool_create() for more information about possible values. 1793 * The size of each element. 1794 * @param sz 1795 * The calculated detailed size the mempool object. May be NULL. 1796 * @return 1797 * Total size of the mempool object. 1798 */ 1799 uint32_t rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, 1800 struct rte_mempool_objsz *sz); 1801 1802 /** 1803 * Walk list of all memory pools 1804 * 1805 * @param func 1806 * Iterator function 1807 * @param arg 1808 * Argument passed to iterator 1809 */ 1810 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *arg), 1811 void *arg); 1812 1813 /** 1814 * @internal Get page size used for mempool object allocation. 1815 * This function is internal to mempool library and mempool drivers. 1816 */ 1817 int 1818 rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz); 1819 1820 /** 1821 * Mempool event type. 1822 * @internal 1823 */ 1824 enum rte_mempool_event { 1825 /** Occurs after a mempool is fully populated. */ 1826 RTE_MEMPOOL_EVENT_READY = 0, 1827 /** Occurs before the destruction of a mempool begins. */ 1828 RTE_MEMPOOL_EVENT_DESTROY = 1, 1829 }; 1830 1831 /** 1832 * @internal 1833 * Mempool event callback. 1834 * 1835 * rte_mempool_event_callback_register() may be called from within the callback, 1836 * but the callbacks registered this way will not be invoked for the same event. 1837 * rte_mempool_event_callback_unregister() may only be safely called 1838 * to remove the running callback. 1839 */ 1840 typedef void (rte_mempool_event_callback)( 1841 enum rte_mempool_event event, 1842 struct rte_mempool *mp, 1843 void *user_data); 1844 1845 /** 1846 * @internal 1847 * Register a callback function invoked on mempool life cycle event. 1848 * The function will be invoked in the process 1849 * that performs an action which triggers the callback. 1850 * 1851 * @param func 1852 * Callback function. 1853 * @param user_data 1854 * User data. 1855 * 1856 * @return 1857 * 0 on success, negative on failure and rte_errno is set. 1858 */ 1859 __rte_internal 1860 int 1861 rte_mempool_event_callback_register(rte_mempool_event_callback *func, 1862 void *user_data); 1863 1864 /** 1865 * @internal 1866 * Unregister a callback added with rte_mempool_event_callback_register(). 1867 * @p func and @p user_data must exactly match registration parameters. 1868 * 1869 * @param func 1870 * Callback function. 1871 * @param user_data 1872 * User data. 1873 * 1874 * @return 1875 * 0 on success, negative on failure and rte_errno is set. 1876 */ 1877 __rte_internal 1878 int 1879 rte_mempool_event_callback_unregister(rte_mempool_event_callback *func, 1880 void *user_data); 1881 1882 #ifdef __cplusplus 1883 } 1884 #endif 1885 1886 #endif /* _RTE_MEMPOOL_H_ */ 1887