xref: /dpdk/lib/mempool/rte_mempool.c (revision ff4e52ef)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright(c) 2016 6WIND S.A.
4  */
5 
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <stdint.h>
10 #include <stdarg.h>
11 #include <unistd.h>
12 #include <inttypes.h>
13 #include <errno.h>
14 #include <sys/queue.h>
15 
16 #include <rte_common.h>
17 #include <rte_log.h>
18 #include <rte_debug.h>
19 #include <rte_memory.h>
20 #include <rte_memzone.h>
21 #include <rte_malloc.h>
22 #include <rte_atomic.h>
23 #include <rte_launch.h>
24 #include <rte_eal.h>
25 #include <rte_eal_memconfig.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_branch_prediction.h>
29 #include <rte_errno.h>
30 #include <rte_string_fns.h>
31 #include <rte_spinlock.h>
32 #include <rte_tailq.h>
33 #include <rte_eal_paging.h>
34 
35 #include "rte_mempool.h"
36 #include "rte_mempool_trace.h"
37 
38 TAILQ_HEAD(rte_mempool_list, rte_tailq_entry);
39 
40 static struct rte_tailq_elem rte_mempool_tailq = {
41 	.name = "RTE_MEMPOOL",
42 };
43 EAL_REGISTER_TAILQ(rte_mempool_tailq)
44 
45 TAILQ_HEAD(mempool_callback_list, rte_tailq_entry);
46 
47 static struct rte_tailq_elem callback_tailq = {
48 	.name = "RTE_MEMPOOL_CALLBACK",
49 };
50 EAL_REGISTER_TAILQ(callback_tailq)
51 
52 /* Invoke all registered mempool event callbacks. */
53 static void
54 mempool_event_callback_invoke(enum rte_mempool_event event,
55 			      struct rte_mempool *mp);
56 
57 #define CACHE_FLUSHTHRESH_MULTIPLIER 1.5
58 #define CALC_CACHE_FLUSHTHRESH(c)	\
59 	((typeof(c))((c) * CACHE_FLUSHTHRESH_MULTIPLIER))
60 
61 #if defined(RTE_ARCH_X86)
62 /*
63  * return the greatest common divisor between a and b (fast algorithm)
64  *
65  */
66 static unsigned get_gcd(unsigned a, unsigned b)
67 {
68 	unsigned c;
69 
70 	if (0 == a)
71 		return b;
72 	if (0 == b)
73 		return a;
74 
75 	if (a < b) {
76 		c = a;
77 		a = b;
78 		b = c;
79 	}
80 
81 	while (b != 0) {
82 		c = a % b;
83 		a = b;
84 		b = c;
85 	}
86 
87 	return a;
88 }
89 
90 /*
91  * Depending on memory configuration on x86 arch, objects addresses are spread
92  * between channels and ranks in RAM: the pool allocator will add
93  * padding between objects. This function return the new size of the
94  * object.
95  */
96 static unsigned int
97 arch_mem_object_align(unsigned int obj_size)
98 {
99 	unsigned nrank, nchan;
100 	unsigned new_obj_size;
101 
102 	/* get number of channels */
103 	nchan = rte_memory_get_nchannel();
104 	if (nchan == 0)
105 		nchan = 4;
106 
107 	nrank = rte_memory_get_nrank();
108 	if (nrank == 0)
109 		nrank = 1;
110 
111 	/* process new object size */
112 	new_obj_size = (obj_size + RTE_MEMPOOL_ALIGN_MASK) / RTE_MEMPOOL_ALIGN;
113 	while (get_gcd(new_obj_size, nrank * nchan) != 1)
114 		new_obj_size++;
115 	return new_obj_size * RTE_MEMPOOL_ALIGN;
116 }
117 #else
118 static unsigned int
119 arch_mem_object_align(unsigned int obj_size)
120 {
121 	return obj_size;
122 }
123 #endif
124 
125 struct pagesz_walk_arg {
126 	int socket_id;
127 	size_t min;
128 };
129 
130 static int
131 find_min_pagesz(const struct rte_memseg_list *msl, void *arg)
132 {
133 	struct pagesz_walk_arg *wa = arg;
134 	bool valid;
135 
136 	/*
137 	 * we need to only look at page sizes available for a particular socket
138 	 * ID.  so, we either need an exact match on socket ID (can match both
139 	 * native and external memory), or, if SOCKET_ID_ANY was specified as a
140 	 * socket ID argument, we must only look at native memory and ignore any
141 	 * page sizes associated with external memory.
142 	 */
143 	valid = msl->socket_id == wa->socket_id;
144 	valid |= wa->socket_id == SOCKET_ID_ANY && msl->external == 0;
145 
146 	if (valid && msl->page_sz < wa->min)
147 		wa->min = msl->page_sz;
148 
149 	return 0;
150 }
151 
152 static size_t
153 get_min_page_size(int socket_id)
154 {
155 	struct pagesz_walk_arg wa;
156 
157 	wa.min = SIZE_MAX;
158 	wa.socket_id = socket_id;
159 
160 	rte_memseg_list_walk(find_min_pagesz, &wa);
161 
162 	return wa.min == SIZE_MAX ? (size_t) rte_mem_page_size() : wa.min;
163 }
164 
165 
166 static void
167 mempool_add_elem(struct rte_mempool *mp, __rte_unused void *opaque,
168 		 void *obj, rte_iova_t iova)
169 {
170 	struct rte_mempool_objhdr *hdr;
171 	struct rte_mempool_objtlr *tlr __rte_unused;
172 
173 	/* set mempool ptr in header */
174 	hdr = RTE_PTR_SUB(obj, sizeof(*hdr));
175 	hdr->mp = mp;
176 	hdr->iova = iova;
177 	STAILQ_INSERT_TAIL(&mp->elt_list, hdr, next);
178 	mp->populated_size++;
179 
180 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
181 	hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
182 	tlr = rte_mempool_get_trailer(obj);
183 	tlr->cookie = RTE_MEMPOOL_TRAILER_COOKIE;
184 #endif
185 }
186 
187 /* call obj_cb() for each mempool element */
188 uint32_t
189 rte_mempool_obj_iter(struct rte_mempool *mp,
190 	rte_mempool_obj_cb_t *obj_cb, void *obj_cb_arg)
191 {
192 	struct rte_mempool_objhdr *hdr;
193 	void *obj;
194 	unsigned n = 0;
195 
196 	STAILQ_FOREACH(hdr, &mp->elt_list, next) {
197 		obj = (char *)hdr + sizeof(*hdr);
198 		obj_cb(mp, obj_cb_arg, obj, n);
199 		n++;
200 	}
201 
202 	return n;
203 }
204 
205 /* call mem_cb() for each mempool memory chunk */
206 uint32_t
207 rte_mempool_mem_iter(struct rte_mempool *mp,
208 	rte_mempool_mem_cb_t *mem_cb, void *mem_cb_arg)
209 {
210 	struct rte_mempool_memhdr *hdr;
211 	unsigned n = 0;
212 
213 	STAILQ_FOREACH(hdr, &mp->mem_list, next) {
214 		mem_cb(mp, mem_cb_arg, hdr, n);
215 		n++;
216 	}
217 
218 	return n;
219 }
220 
221 /* get the header, trailer and total size of a mempool element. */
222 uint32_t
223 rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
224 	struct rte_mempool_objsz *sz)
225 {
226 	struct rte_mempool_objsz lsz;
227 
228 	sz = (sz != NULL) ? sz : &lsz;
229 
230 	sz->header_size = sizeof(struct rte_mempool_objhdr);
231 	if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0)
232 		sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
233 			RTE_MEMPOOL_ALIGN);
234 
235 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
236 	sz->trailer_size = sizeof(struct rte_mempool_objtlr);
237 #else
238 	sz->trailer_size = 0;
239 #endif
240 
241 	/* element size is 8 bytes-aligned at least */
242 	sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
243 
244 	/* expand trailer to next cache line */
245 	if ((flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN) == 0) {
246 		sz->total_size = sz->header_size + sz->elt_size +
247 			sz->trailer_size;
248 		sz->trailer_size += ((RTE_MEMPOOL_ALIGN -
249 				  (sz->total_size & RTE_MEMPOOL_ALIGN_MASK)) &
250 				 RTE_MEMPOOL_ALIGN_MASK);
251 	}
252 
253 	/*
254 	 * increase trailer to add padding between objects in order to
255 	 * spread them across memory channels/ranks
256 	 */
257 	if ((flags & RTE_MEMPOOL_F_NO_SPREAD) == 0) {
258 		unsigned new_size;
259 		new_size = arch_mem_object_align
260 			    (sz->header_size + sz->elt_size + sz->trailer_size);
261 		sz->trailer_size = new_size - sz->header_size - sz->elt_size;
262 	}
263 
264 	/* this is the size of an object, including header and trailer */
265 	sz->total_size = sz->header_size + sz->elt_size + sz->trailer_size;
266 
267 	return sz->total_size;
268 }
269 
270 /* free a memchunk allocated with rte_memzone_reserve() */
271 static void
272 rte_mempool_memchunk_mz_free(__rte_unused struct rte_mempool_memhdr *memhdr,
273 	void *opaque)
274 {
275 	const struct rte_memzone *mz = opaque;
276 	rte_memzone_free(mz);
277 }
278 
279 /* Free memory chunks used by a mempool. Objects must be in pool */
280 static void
281 rte_mempool_free_memchunks(struct rte_mempool *mp)
282 {
283 	struct rte_mempool_memhdr *memhdr;
284 	void *elt;
285 
286 	while (!STAILQ_EMPTY(&mp->elt_list)) {
287 		rte_mempool_ops_dequeue_bulk(mp, &elt, 1);
288 		(void)elt;
289 		STAILQ_REMOVE_HEAD(&mp->elt_list, next);
290 		mp->populated_size--;
291 	}
292 
293 	while (!STAILQ_EMPTY(&mp->mem_list)) {
294 		memhdr = STAILQ_FIRST(&mp->mem_list);
295 		STAILQ_REMOVE_HEAD(&mp->mem_list, next);
296 		if (memhdr->free_cb != NULL)
297 			memhdr->free_cb(memhdr, memhdr->opaque);
298 		rte_free(memhdr);
299 		mp->nb_mem_chunks--;
300 	}
301 }
302 
303 static int
304 mempool_ops_alloc_once(struct rte_mempool *mp)
305 {
306 	int ret;
307 
308 	/* create the internal ring if not already done */
309 	if ((mp->flags & RTE_MEMPOOL_F_POOL_CREATED) == 0) {
310 		ret = rte_mempool_ops_alloc(mp);
311 		if (ret != 0)
312 			return ret;
313 		mp->flags |= RTE_MEMPOOL_F_POOL_CREATED;
314 	}
315 	return 0;
316 }
317 
318 /* Add objects in the pool, using a physically contiguous memory
319  * zone. Return the number of objects added, or a negative value
320  * on error.
321  */
322 int
323 rte_mempool_populate_iova(struct rte_mempool *mp, char *vaddr,
324 	rte_iova_t iova, size_t len, rte_mempool_memchunk_free_cb_t *free_cb,
325 	void *opaque)
326 {
327 	unsigned i = 0;
328 	size_t off;
329 	struct rte_mempool_memhdr *memhdr;
330 	int ret;
331 
332 	ret = mempool_ops_alloc_once(mp);
333 	if (ret != 0)
334 		return ret;
335 
336 	/* mempool is already populated */
337 	if (mp->populated_size >= mp->size)
338 		return -ENOSPC;
339 
340 	memhdr = rte_zmalloc("MEMPOOL_MEMHDR", sizeof(*memhdr), 0);
341 	if (memhdr == NULL)
342 		return -ENOMEM;
343 
344 	memhdr->mp = mp;
345 	memhdr->addr = vaddr;
346 	memhdr->iova = iova;
347 	memhdr->len = len;
348 	memhdr->free_cb = free_cb;
349 	memhdr->opaque = opaque;
350 
351 	if (mp->flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
352 		off = RTE_PTR_ALIGN_CEIL(vaddr, 8) - vaddr;
353 	else
354 		off = RTE_PTR_ALIGN_CEIL(vaddr, RTE_MEMPOOL_ALIGN) - vaddr;
355 
356 	if (off > len) {
357 		ret = 0;
358 		goto fail;
359 	}
360 
361 	i = rte_mempool_ops_populate(mp, mp->size - mp->populated_size,
362 		(char *)vaddr + off,
363 		(iova == RTE_BAD_IOVA) ? RTE_BAD_IOVA : (iova + off),
364 		len - off, mempool_add_elem, NULL);
365 
366 	/* not enough room to store one object */
367 	if (i == 0) {
368 		ret = 0;
369 		goto fail;
370 	}
371 
372 	STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
373 	mp->nb_mem_chunks++;
374 
375 	/* At least some objects in the pool can now be used for IO. */
376 	if (iova != RTE_BAD_IOVA)
377 		mp->flags &= ~RTE_MEMPOOL_F_NON_IO;
378 
379 	/* Report the mempool as ready only when fully populated. */
380 	if (mp->populated_size >= mp->size)
381 		mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_READY, mp);
382 
383 	rte_mempool_trace_populate_iova(mp, vaddr, iova, len, free_cb, opaque);
384 	return i;
385 
386 fail:
387 	rte_free(memhdr);
388 	return ret;
389 }
390 
391 static rte_iova_t
392 get_iova(void *addr)
393 {
394 	struct rte_memseg *ms;
395 
396 	/* try registered memory first */
397 	ms = rte_mem_virt2memseg(addr, NULL);
398 	if (ms == NULL || ms->iova == RTE_BAD_IOVA)
399 		/* fall back to actual physical address */
400 		return rte_mem_virt2iova(addr);
401 	return ms->iova + RTE_PTR_DIFF(addr, ms->addr);
402 }
403 
404 /* Populate the mempool with a virtual area. Return the number of
405  * objects added, or a negative value on error.
406  */
407 int
408 rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
409 	size_t len, size_t pg_sz, rte_mempool_memchunk_free_cb_t *free_cb,
410 	void *opaque)
411 {
412 	rte_iova_t iova;
413 	size_t off, phys_len;
414 	int ret, cnt = 0;
415 
416 	if (mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG)
417 		return rte_mempool_populate_iova(mp, addr, RTE_BAD_IOVA,
418 			len, free_cb, opaque);
419 
420 	for (off = 0; off < len &&
421 		     mp->populated_size < mp->size; off += phys_len) {
422 
423 		iova = get_iova(addr + off);
424 
425 		/* populate with the largest group of contiguous pages */
426 		for (phys_len = RTE_MIN(
427 			(size_t)(RTE_PTR_ALIGN_CEIL(addr + off + 1, pg_sz) -
428 				(addr + off)),
429 			len - off);
430 		     off + phys_len < len;
431 		     phys_len = RTE_MIN(phys_len + pg_sz, len - off)) {
432 			rte_iova_t iova_tmp;
433 
434 			iova_tmp = get_iova(addr + off + phys_len);
435 
436 			if (iova_tmp == RTE_BAD_IOVA ||
437 					iova_tmp != iova + phys_len)
438 				break;
439 		}
440 
441 		ret = rte_mempool_populate_iova(mp, addr + off, iova,
442 			phys_len, free_cb, opaque);
443 		if (ret == 0)
444 			continue;
445 		if (ret < 0)
446 			goto fail;
447 		/* no need to call the free callback for next chunks */
448 		free_cb = NULL;
449 		cnt += ret;
450 	}
451 
452 	rte_mempool_trace_populate_virt(mp, addr, len, pg_sz, free_cb, opaque);
453 	return cnt;
454 
455  fail:
456 	rte_mempool_free_memchunks(mp);
457 	return ret;
458 }
459 
460 /* Get the minimal page size used in a mempool before populating it. */
461 int
462 rte_mempool_get_page_size(struct rte_mempool *mp, size_t *pg_sz)
463 {
464 	bool need_iova_contig_obj;
465 	bool alloc_in_ext_mem;
466 	int ret;
467 
468 	/* check if we can retrieve a valid socket ID */
469 	ret = rte_malloc_heap_socket_is_external(mp->socket_id);
470 	if (ret < 0)
471 		return -EINVAL;
472 	alloc_in_ext_mem = (ret == 1);
473 	need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
474 
475 	if (!need_iova_contig_obj)
476 		*pg_sz = 0;
477 	else if (rte_eal_has_hugepages() || alloc_in_ext_mem)
478 		*pg_sz = get_min_page_size(mp->socket_id);
479 	else
480 		*pg_sz = rte_mem_page_size();
481 
482 	rte_mempool_trace_get_page_size(mp, *pg_sz);
483 	return 0;
484 }
485 
486 /* Default function to populate the mempool: allocate memory in memzones,
487  * and populate them. Return the number of objects added, or a negative
488  * value on error.
489  */
490 int
491 rte_mempool_populate_default(struct rte_mempool *mp)
492 {
493 	unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
494 	char mz_name[RTE_MEMZONE_NAMESIZE];
495 	const struct rte_memzone *mz;
496 	ssize_t mem_size;
497 	size_t align, pg_sz, pg_shift = 0;
498 	rte_iova_t iova;
499 	unsigned mz_id, n;
500 	int ret;
501 	bool need_iova_contig_obj;
502 	size_t max_alloc_size = SIZE_MAX;
503 
504 	ret = mempool_ops_alloc_once(mp);
505 	if (ret != 0)
506 		return ret;
507 
508 	/* mempool must not be populated */
509 	if (mp->nb_mem_chunks != 0)
510 		return -EEXIST;
511 
512 	/*
513 	 * the following section calculates page shift and page size values.
514 	 *
515 	 * these values impact the result of calc_mem_size operation, which
516 	 * returns the amount of memory that should be allocated to store the
517 	 * desired number of objects. when not zero, it allocates more memory
518 	 * for the padding between objects, to ensure that an object does not
519 	 * cross a page boundary. in other words, page size/shift are to be set
520 	 * to zero if mempool elements won't care about page boundaries.
521 	 * there are several considerations for page size and page shift here.
522 	 *
523 	 * if we don't need our mempools to have physically contiguous objects,
524 	 * then just set page shift and page size to 0, because the user has
525 	 * indicated that there's no need to care about anything.
526 	 *
527 	 * if we do need contiguous objects (if a mempool driver has its
528 	 * own calc_size() method returning min_chunk_size = mem_size),
529 	 * there is also an option to reserve the entire mempool memory
530 	 * as one contiguous block of memory.
531 	 *
532 	 * if we require contiguous objects, but not necessarily the entire
533 	 * mempool reserved space to be contiguous, pg_sz will be != 0,
534 	 * and the default ops->populate() will take care of not placing
535 	 * objects across pages.
536 	 *
537 	 * if our IO addresses are physical, we may get memory from bigger
538 	 * pages, or we might get memory from smaller pages, and how much of it
539 	 * we require depends on whether we want bigger or smaller pages.
540 	 * However, requesting each and every memory size is too much work, so
541 	 * what we'll do instead is walk through the page sizes available, pick
542 	 * the smallest one and set up page shift to match that one. We will be
543 	 * wasting some space this way, but it's much nicer than looping around
544 	 * trying to reserve each and every page size.
545 	 *
546 	 * If we fail to get enough contiguous memory, then we'll go and
547 	 * reserve space in smaller chunks.
548 	 */
549 
550 	need_iova_contig_obj = !(mp->flags & RTE_MEMPOOL_F_NO_IOVA_CONTIG);
551 	ret = rte_mempool_get_page_size(mp, &pg_sz);
552 	if (ret < 0)
553 		return ret;
554 
555 	if (pg_sz != 0)
556 		pg_shift = rte_bsf32(pg_sz);
557 
558 	for (mz_id = 0, n = mp->size; n > 0; mz_id++, n -= ret) {
559 		size_t min_chunk_size;
560 
561 		mem_size = rte_mempool_ops_calc_mem_size(
562 			mp, n, pg_shift, &min_chunk_size, &align);
563 
564 		if (mem_size < 0) {
565 			ret = mem_size;
566 			goto fail;
567 		}
568 
569 		ret = snprintf(mz_name, sizeof(mz_name),
570 			RTE_MEMPOOL_MZ_FORMAT "_%d", mp->name, mz_id);
571 		if (ret < 0 || ret >= (int)sizeof(mz_name)) {
572 			ret = -ENAMETOOLONG;
573 			goto fail;
574 		}
575 
576 		/* if we're trying to reserve contiguous memory, add appropriate
577 		 * memzone flag.
578 		 */
579 		if (min_chunk_size == (size_t)mem_size)
580 			mz_flags |= RTE_MEMZONE_IOVA_CONTIG;
581 
582 		/* Allocate a memzone, retrying with a smaller area on ENOMEM */
583 		do {
584 			mz = rte_memzone_reserve_aligned(mz_name,
585 				RTE_MIN((size_t)mem_size, max_alloc_size),
586 				mp->socket_id, mz_flags, align);
587 
588 			if (mz != NULL || rte_errno != ENOMEM)
589 				break;
590 
591 			max_alloc_size = RTE_MIN(max_alloc_size,
592 						(size_t)mem_size) / 2;
593 		} while (mz == NULL && max_alloc_size >= min_chunk_size);
594 
595 		if (mz == NULL) {
596 			ret = -rte_errno;
597 			goto fail;
598 		}
599 
600 		if (need_iova_contig_obj)
601 			iova = mz->iova;
602 		else
603 			iova = RTE_BAD_IOVA;
604 
605 		if (pg_sz == 0 || (mz_flags & RTE_MEMZONE_IOVA_CONTIG))
606 			ret = rte_mempool_populate_iova(mp, mz->addr,
607 				iova, mz->len,
608 				rte_mempool_memchunk_mz_free,
609 				(void *)(uintptr_t)mz);
610 		else
611 			ret = rte_mempool_populate_virt(mp, mz->addr,
612 				mz->len, pg_sz,
613 				rte_mempool_memchunk_mz_free,
614 				(void *)(uintptr_t)mz);
615 		if (ret == 0) /* should not happen */
616 			ret = -ENOBUFS;
617 		if (ret < 0) {
618 			rte_memzone_free(mz);
619 			goto fail;
620 		}
621 	}
622 
623 	rte_mempool_trace_populate_default(mp);
624 	return mp->size;
625 
626  fail:
627 	rte_mempool_free_memchunks(mp);
628 	return ret;
629 }
630 
631 /* return the memory size required for mempool objects in anonymous mem */
632 static ssize_t
633 get_anon_size(const struct rte_mempool *mp)
634 {
635 	ssize_t size;
636 	size_t pg_sz, pg_shift;
637 	size_t min_chunk_size;
638 	size_t align;
639 
640 	pg_sz = rte_mem_page_size();
641 	pg_shift = rte_bsf32(pg_sz);
642 	size = rte_mempool_ops_calc_mem_size(mp, mp->size, pg_shift,
643 					     &min_chunk_size, &align);
644 
645 	return size;
646 }
647 
648 /* unmap a memory zone mapped by rte_mempool_populate_anon() */
649 static void
650 rte_mempool_memchunk_anon_free(struct rte_mempool_memhdr *memhdr,
651 	void *opaque)
652 {
653 	ssize_t size;
654 
655 	/*
656 	 * Calculate size since memhdr->len has contiguous chunk length
657 	 * which may be smaller if anon map is split into many contiguous
658 	 * chunks. Result must be the same as we calculated on populate.
659 	 */
660 	size = get_anon_size(memhdr->mp);
661 	if (size < 0)
662 		return;
663 
664 	rte_mem_unmap(opaque, size);
665 }
666 
667 /* populate the mempool with an anonymous mapping */
668 int
669 rte_mempool_populate_anon(struct rte_mempool *mp)
670 {
671 	ssize_t size;
672 	int ret;
673 	char *addr;
674 
675 	/* mempool is already populated, error */
676 	if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) {
677 		rte_errno = EINVAL;
678 		return 0;
679 	}
680 
681 	ret = mempool_ops_alloc_once(mp);
682 	if (ret < 0) {
683 		rte_errno = -ret;
684 		return 0;
685 	}
686 
687 	size = get_anon_size(mp);
688 	if (size < 0) {
689 		rte_errno = -size;
690 		return 0;
691 	}
692 
693 	/* get chunk of virtually continuous memory */
694 	addr = rte_mem_map(NULL, size, RTE_PROT_READ | RTE_PROT_WRITE,
695 		RTE_MAP_SHARED | RTE_MAP_ANONYMOUS, -1, 0);
696 	if (addr == NULL)
697 		return 0;
698 	/* can't use MMAP_LOCKED, it does not exist on BSD */
699 	if (rte_mem_lock(addr, size) < 0) {
700 		rte_mem_unmap(addr, size);
701 		return 0;
702 	}
703 
704 	ret = rte_mempool_populate_virt(mp, addr, size, rte_mem_page_size(),
705 		rte_mempool_memchunk_anon_free, addr);
706 	if (ret == 0) /* should not happen */
707 		ret = -ENOBUFS;
708 	if (ret < 0) {
709 		rte_errno = -ret;
710 		goto fail;
711 	}
712 
713 	rte_mempool_trace_populate_anon(mp);
714 	return mp->populated_size;
715 
716  fail:
717 	rte_mempool_free_memchunks(mp);
718 	return 0;
719 }
720 
721 /* free a mempool */
722 void
723 rte_mempool_free(struct rte_mempool *mp)
724 {
725 	struct rte_mempool_list *mempool_list = NULL;
726 	struct rte_tailq_entry *te;
727 
728 	if (mp == NULL)
729 		return;
730 
731 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
732 	rte_mcfg_tailq_write_lock();
733 	/* find out tailq entry */
734 	TAILQ_FOREACH(te, mempool_list, next) {
735 		if (te->data == (void *)mp)
736 			break;
737 	}
738 
739 	if (te != NULL) {
740 		TAILQ_REMOVE(mempool_list, te, next);
741 		rte_free(te);
742 	}
743 	rte_mcfg_tailq_write_unlock();
744 
745 	mempool_event_callback_invoke(RTE_MEMPOOL_EVENT_DESTROY, mp);
746 	rte_mempool_trace_free(mp);
747 	rte_mempool_free_memchunks(mp);
748 	rte_mempool_ops_free(mp);
749 	rte_memzone_free(mp->mz);
750 }
751 
752 static void
753 mempool_cache_init(struct rte_mempool_cache *cache, uint32_t size)
754 {
755 	cache->size = size;
756 	cache->flushthresh = CALC_CACHE_FLUSHTHRESH(size);
757 	cache->len = 0;
758 }
759 
760 /*
761  * Create and initialize a cache for objects that are retrieved from and
762  * returned to an underlying mempool. This structure is identical to the
763  * local_cache[lcore_id] pointed to by the mempool structure.
764  */
765 struct rte_mempool_cache *
766 rte_mempool_cache_create(uint32_t size, int socket_id)
767 {
768 	struct rte_mempool_cache *cache;
769 
770 	if (size == 0 || size > RTE_MEMPOOL_CACHE_MAX_SIZE) {
771 		rte_errno = EINVAL;
772 		return NULL;
773 	}
774 
775 	cache = rte_zmalloc_socket("MEMPOOL_CACHE", sizeof(*cache),
776 				  RTE_CACHE_LINE_SIZE, socket_id);
777 	if (cache == NULL) {
778 		RTE_LOG(ERR, MEMPOOL, "Cannot allocate mempool cache.\n");
779 		rte_errno = ENOMEM;
780 		return NULL;
781 	}
782 
783 	mempool_cache_init(cache, size);
784 
785 	rte_mempool_trace_cache_create(size, socket_id, cache);
786 	return cache;
787 }
788 
789 /*
790  * Free a cache. It's the responsibility of the user to make sure that any
791  * remaining objects in the cache are flushed to the corresponding
792  * mempool.
793  */
794 void
795 rte_mempool_cache_free(struct rte_mempool_cache *cache)
796 {
797 	rte_mempool_trace_cache_free(cache);
798 	rte_free(cache);
799 }
800 
801 /* create an empty mempool */
802 struct rte_mempool *
803 rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
804 	unsigned cache_size, unsigned private_data_size,
805 	int socket_id, unsigned flags)
806 {
807 	char mz_name[RTE_MEMZONE_NAMESIZE];
808 	struct rte_mempool_list *mempool_list;
809 	struct rte_mempool *mp = NULL;
810 	struct rte_tailq_entry *te = NULL;
811 	const struct rte_memzone *mz = NULL;
812 	size_t mempool_size;
813 	unsigned int mz_flags = RTE_MEMZONE_1GB|RTE_MEMZONE_SIZE_HINT_ONLY;
814 	struct rte_mempool_objsz objsz;
815 	unsigned lcore_id;
816 	int ret;
817 
818 	/* compilation-time checks */
819 	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool) &
820 			  RTE_CACHE_LINE_MASK) != 0);
821 	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_cache) &
822 			  RTE_CACHE_LINE_MASK) != 0);
823 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
824 	RTE_BUILD_BUG_ON((sizeof(struct rte_mempool_debug_stats) &
825 			  RTE_CACHE_LINE_MASK) != 0);
826 	RTE_BUILD_BUG_ON((offsetof(struct rte_mempool, stats) &
827 			  RTE_CACHE_LINE_MASK) != 0);
828 #endif
829 
830 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
831 
832 	/* asked for zero items */
833 	if (n == 0) {
834 		rte_errno = EINVAL;
835 		return NULL;
836 	}
837 
838 	/* asked cache too big */
839 	if (cache_size > RTE_MEMPOOL_CACHE_MAX_SIZE ||
840 	    CALC_CACHE_FLUSHTHRESH(cache_size) > n) {
841 		rte_errno = EINVAL;
842 		return NULL;
843 	}
844 
845 	/* enforce only user flags are passed by the application */
846 	if ((flags & ~RTE_MEMPOOL_VALID_USER_FLAGS) != 0) {
847 		rte_errno = EINVAL;
848 		return NULL;
849 	}
850 
851 	/*
852 	 * No objects in the pool can be used for IO until it's populated
853 	 * with at least some objects with valid IOVA.
854 	 */
855 	flags |= RTE_MEMPOOL_F_NON_IO;
856 
857 	/* "no cache align" imply "no spread" */
858 	if (flags & RTE_MEMPOOL_F_NO_CACHE_ALIGN)
859 		flags |= RTE_MEMPOOL_F_NO_SPREAD;
860 
861 	/* calculate mempool object sizes. */
862 	if (!rte_mempool_calc_obj_size(elt_size, flags, &objsz)) {
863 		rte_errno = EINVAL;
864 		return NULL;
865 	}
866 
867 	rte_mcfg_mempool_write_lock();
868 
869 	/*
870 	 * reserve a memory zone for this mempool: private data is
871 	 * cache-aligned
872 	 */
873 	private_data_size = (private_data_size +
874 			     RTE_MEMPOOL_ALIGN_MASK) & (~RTE_MEMPOOL_ALIGN_MASK);
875 
876 
877 	/* try to allocate tailq entry */
878 	te = rte_zmalloc("MEMPOOL_TAILQ_ENTRY", sizeof(*te), 0);
879 	if (te == NULL) {
880 		RTE_LOG(ERR, MEMPOOL, "Cannot allocate tailq entry!\n");
881 		goto exit_unlock;
882 	}
883 
884 	mempool_size = RTE_MEMPOOL_HEADER_SIZE(mp, cache_size);
885 	mempool_size += private_data_size;
886 	mempool_size = RTE_ALIGN_CEIL(mempool_size, RTE_MEMPOOL_ALIGN);
887 
888 	ret = snprintf(mz_name, sizeof(mz_name), RTE_MEMPOOL_MZ_FORMAT, name);
889 	if (ret < 0 || ret >= (int)sizeof(mz_name)) {
890 		rte_errno = ENAMETOOLONG;
891 		goto exit_unlock;
892 	}
893 
894 	mz = rte_memzone_reserve(mz_name, mempool_size, socket_id, mz_flags);
895 	if (mz == NULL)
896 		goto exit_unlock;
897 
898 	/* init the mempool structure */
899 	mp = mz->addr;
900 	memset(mp, 0, RTE_MEMPOOL_HEADER_SIZE(mp, cache_size));
901 	ret = strlcpy(mp->name, name, sizeof(mp->name));
902 	if (ret < 0 || ret >= (int)sizeof(mp->name)) {
903 		rte_errno = ENAMETOOLONG;
904 		goto exit_unlock;
905 	}
906 	mp->mz = mz;
907 	mp->size = n;
908 	mp->flags = flags;
909 	mp->socket_id = socket_id;
910 	mp->elt_size = objsz.elt_size;
911 	mp->header_size = objsz.header_size;
912 	mp->trailer_size = objsz.trailer_size;
913 	/* Size of default caches, zero means disabled. */
914 	mp->cache_size = cache_size;
915 	mp->private_data_size = private_data_size;
916 	STAILQ_INIT(&mp->elt_list);
917 	STAILQ_INIT(&mp->mem_list);
918 
919 	/*
920 	 * local_cache pointer is set even if cache_size is zero.
921 	 * The local_cache points to just past the elt_pa[] array.
922 	 */
923 	mp->local_cache = (struct rte_mempool_cache *)
924 		RTE_PTR_ADD(mp, RTE_MEMPOOL_HEADER_SIZE(mp, 0));
925 
926 	/* Init all default caches. */
927 	if (cache_size != 0) {
928 		for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
929 			mempool_cache_init(&mp->local_cache[lcore_id],
930 					   cache_size);
931 	}
932 
933 	te->data = mp;
934 
935 	rte_mcfg_tailq_write_lock();
936 	TAILQ_INSERT_TAIL(mempool_list, te, next);
937 	rte_mcfg_tailq_write_unlock();
938 	rte_mcfg_mempool_write_unlock();
939 
940 	rte_mempool_trace_create_empty(name, n, elt_size, cache_size,
941 		private_data_size, flags, mp);
942 	return mp;
943 
944 exit_unlock:
945 	rte_mcfg_mempool_write_unlock();
946 	rte_free(te);
947 	rte_mempool_free(mp);
948 	return NULL;
949 }
950 
951 /* create the mempool */
952 struct rte_mempool *
953 rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
954 	unsigned cache_size, unsigned private_data_size,
955 	rte_mempool_ctor_t *mp_init, void *mp_init_arg,
956 	rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
957 	int socket_id, unsigned flags)
958 {
959 	int ret;
960 	struct rte_mempool *mp;
961 
962 	mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
963 		private_data_size, socket_id, flags);
964 	if (mp == NULL)
965 		return NULL;
966 
967 	/*
968 	 * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
969 	 * set the correct index into the table of ops structs.
970 	 */
971 	if ((flags & RTE_MEMPOOL_F_SP_PUT) && (flags & RTE_MEMPOOL_F_SC_GET))
972 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
973 	else if (flags & RTE_MEMPOOL_F_SP_PUT)
974 		ret = rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
975 	else if (flags & RTE_MEMPOOL_F_SC_GET)
976 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
977 	else
978 		ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
979 
980 	if (ret)
981 		goto fail;
982 
983 	/* call the mempool priv initializer */
984 	if (mp_init)
985 		mp_init(mp, mp_init_arg);
986 
987 	if (rte_mempool_populate_default(mp) < 0)
988 		goto fail;
989 
990 	/* call the object initializers */
991 	if (obj_init)
992 		rte_mempool_obj_iter(mp, obj_init, obj_init_arg);
993 
994 	rte_mempool_trace_create(name, n, elt_size, cache_size,
995 		private_data_size, mp_init, mp_init_arg, obj_init,
996 		obj_init_arg, flags, mp);
997 	return mp;
998 
999  fail:
1000 	rte_mempool_free(mp);
1001 	return NULL;
1002 }
1003 
1004 /* Return the number of entries in the mempool */
1005 unsigned int
1006 rte_mempool_avail_count(const struct rte_mempool *mp)
1007 {
1008 	unsigned count;
1009 	unsigned lcore_id;
1010 
1011 	count = rte_mempool_ops_get_count(mp);
1012 
1013 	if (mp->cache_size == 0)
1014 		return count;
1015 
1016 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++)
1017 		count += mp->local_cache[lcore_id].len;
1018 
1019 	/*
1020 	 * due to race condition (access to len is not locked), the
1021 	 * total can be greater than size... so fix the result
1022 	 */
1023 	if (count > mp->size)
1024 		return mp->size;
1025 	return count;
1026 }
1027 
1028 /* return the number of entries allocated from the mempool */
1029 unsigned int
1030 rte_mempool_in_use_count(const struct rte_mempool *mp)
1031 {
1032 	return mp->size - rte_mempool_avail_count(mp);
1033 }
1034 
1035 /* dump the cache status */
1036 static unsigned
1037 rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
1038 {
1039 	unsigned lcore_id;
1040 	unsigned count = 0;
1041 	unsigned cache_count;
1042 
1043 	fprintf(f, "  internal cache infos:\n");
1044 	fprintf(f, "    cache_size=%"PRIu32"\n", mp->cache_size);
1045 
1046 	if (mp->cache_size == 0)
1047 		return count;
1048 
1049 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1050 		cache_count = mp->local_cache[lcore_id].len;
1051 		fprintf(f, "    cache_count[%u]=%"PRIu32"\n",
1052 			lcore_id, cache_count);
1053 		count += cache_count;
1054 	}
1055 	fprintf(f, "    total_cache_count=%u\n", count);
1056 	return count;
1057 }
1058 
1059 #ifndef __INTEL_COMPILER
1060 #pragma GCC diagnostic ignored "-Wcast-qual"
1061 #endif
1062 
1063 /* check and update cookies or panic (internal) */
1064 void rte_mempool_check_cookies(const struct rte_mempool *mp,
1065 	void * const *obj_table_const, unsigned n, int free)
1066 {
1067 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1068 	struct rte_mempool_objhdr *hdr;
1069 	struct rte_mempool_objtlr *tlr;
1070 	uint64_t cookie;
1071 	void *tmp;
1072 	void *obj;
1073 	void **obj_table;
1074 
1075 	/* Force to drop the "const" attribute. This is done only when
1076 	 * DEBUG is enabled */
1077 	tmp = (void *) obj_table_const;
1078 	obj_table = tmp;
1079 
1080 	while (n--) {
1081 		obj = obj_table[n];
1082 
1083 		if (rte_mempool_from_obj(obj) != mp)
1084 			rte_panic("MEMPOOL: object is owned by another "
1085 				  "mempool\n");
1086 
1087 		hdr = rte_mempool_get_header(obj);
1088 		cookie = hdr->cookie;
1089 
1090 		if (free == 0) {
1091 			if (cookie != RTE_MEMPOOL_HEADER_COOKIE1) {
1092 				RTE_LOG(CRIT, MEMPOOL,
1093 					"obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
1094 					obj, (const void *) mp, cookie);
1095 				rte_panic("MEMPOOL: bad header cookie (put)\n");
1096 			}
1097 			hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE2;
1098 		} else if (free == 1) {
1099 			if (cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
1100 				RTE_LOG(CRIT, MEMPOOL,
1101 					"obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
1102 					obj, (const void *) mp, cookie);
1103 				rte_panic("MEMPOOL: bad header cookie (get)\n");
1104 			}
1105 			hdr->cookie = RTE_MEMPOOL_HEADER_COOKIE1;
1106 		} else if (free == 2) {
1107 			if (cookie != RTE_MEMPOOL_HEADER_COOKIE1 &&
1108 			    cookie != RTE_MEMPOOL_HEADER_COOKIE2) {
1109 				RTE_LOG(CRIT, MEMPOOL,
1110 					"obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
1111 					obj, (const void *) mp, cookie);
1112 				rte_panic("MEMPOOL: bad header cookie (audit)\n");
1113 			}
1114 		}
1115 		tlr = rte_mempool_get_trailer(obj);
1116 		cookie = tlr->cookie;
1117 		if (cookie != RTE_MEMPOOL_TRAILER_COOKIE) {
1118 			RTE_LOG(CRIT, MEMPOOL,
1119 				"obj=%p, mempool=%p, cookie=%" PRIx64 "\n",
1120 				obj, (const void *) mp, cookie);
1121 			rte_panic("MEMPOOL: bad trailer cookie\n");
1122 		}
1123 	}
1124 #else
1125 	RTE_SET_USED(mp);
1126 	RTE_SET_USED(obj_table_const);
1127 	RTE_SET_USED(n);
1128 	RTE_SET_USED(free);
1129 #endif
1130 }
1131 
1132 void
1133 rte_mempool_contig_blocks_check_cookies(const struct rte_mempool *mp,
1134 	void * const *first_obj_table_const, unsigned int n, int free)
1135 {
1136 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1137 	struct rte_mempool_info info;
1138 	const size_t total_elt_sz =
1139 		mp->header_size + mp->elt_size + mp->trailer_size;
1140 	unsigned int i, j;
1141 
1142 	rte_mempool_ops_get_info(mp, &info);
1143 
1144 	for (i = 0; i < n; ++i) {
1145 		void *first_obj = first_obj_table_const[i];
1146 
1147 		for (j = 0; j < info.contig_block_size; ++j) {
1148 			void *obj;
1149 
1150 			obj = (void *)((uintptr_t)first_obj + j * total_elt_sz);
1151 			rte_mempool_check_cookies(mp, &obj, 1, free);
1152 		}
1153 	}
1154 #else
1155 	RTE_SET_USED(mp);
1156 	RTE_SET_USED(first_obj_table_const);
1157 	RTE_SET_USED(n);
1158 	RTE_SET_USED(free);
1159 #endif
1160 }
1161 
1162 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1163 static void
1164 mempool_obj_audit(struct rte_mempool *mp, __rte_unused void *opaque,
1165 	void *obj, __rte_unused unsigned idx)
1166 {
1167 	RTE_MEMPOOL_CHECK_COOKIES(mp, &obj, 1, 2);
1168 }
1169 
1170 static void
1171 mempool_audit_cookies(struct rte_mempool *mp)
1172 {
1173 	unsigned num;
1174 
1175 	num = rte_mempool_obj_iter(mp, mempool_obj_audit, NULL);
1176 	if (num != mp->size) {
1177 		rte_panic("rte_mempool_obj_iter(mempool=%p, size=%u) "
1178 			"iterated only over %u elements\n",
1179 			mp, mp->size, num);
1180 	}
1181 }
1182 #else
1183 #define mempool_audit_cookies(mp) do {} while(0)
1184 #endif
1185 
1186 #ifndef __INTEL_COMPILER
1187 #pragma GCC diagnostic error "-Wcast-qual"
1188 #endif
1189 
1190 /* check cookies before and after objects */
1191 static void
1192 mempool_audit_cache(const struct rte_mempool *mp)
1193 {
1194 	/* check cache size consistency */
1195 	unsigned lcore_id;
1196 
1197 	if (mp->cache_size == 0)
1198 		return;
1199 
1200 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1201 		const struct rte_mempool_cache *cache;
1202 		cache = &mp->local_cache[lcore_id];
1203 		if (cache->len > RTE_DIM(cache->objs)) {
1204 			RTE_LOG(CRIT, MEMPOOL, "badness on cache[%u]\n",
1205 				lcore_id);
1206 			rte_panic("MEMPOOL: invalid cache len\n");
1207 		}
1208 	}
1209 }
1210 
1211 /* check the consistency of mempool (size, cookies, ...) */
1212 void
1213 rte_mempool_audit(struct rte_mempool *mp)
1214 {
1215 	mempool_audit_cache(mp);
1216 	mempool_audit_cookies(mp);
1217 
1218 	/* For case where mempool DEBUG is not set, and cache size is 0 */
1219 	RTE_SET_USED(mp);
1220 }
1221 
1222 /* dump the status of the mempool on the console */
1223 void
1224 rte_mempool_dump(FILE *f, struct rte_mempool *mp)
1225 {
1226 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1227 	struct rte_mempool_info info;
1228 	struct rte_mempool_debug_stats sum;
1229 	unsigned lcore_id;
1230 #endif
1231 	struct rte_mempool_memhdr *memhdr;
1232 	struct rte_mempool_ops *ops;
1233 	unsigned common_count;
1234 	unsigned cache_count;
1235 	size_t mem_len = 0;
1236 
1237 	RTE_ASSERT(f != NULL);
1238 	RTE_ASSERT(mp != NULL);
1239 
1240 	fprintf(f, "mempool <%s>@%p\n", mp->name, mp);
1241 	fprintf(f, "  flags=%x\n", mp->flags);
1242 	fprintf(f, "  socket_id=%d\n", mp->socket_id);
1243 	fprintf(f, "  pool=%p\n", mp->pool_data);
1244 	fprintf(f, "  iova=0x%" PRIx64 "\n", mp->mz->iova);
1245 	fprintf(f, "  nb_mem_chunks=%u\n", mp->nb_mem_chunks);
1246 	fprintf(f, "  size=%"PRIu32"\n", mp->size);
1247 	fprintf(f, "  populated_size=%"PRIu32"\n", mp->populated_size);
1248 	fprintf(f, "  header_size=%"PRIu32"\n", mp->header_size);
1249 	fprintf(f, "  elt_size=%"PRIu32"\n", mp->elt_size);
1250 	fprintf(f, "  trailer_size=%"PRIu32"\n", mp->trailer_size);
1251 	fprintf(f, "  total_obj_size=%"PRIu32"\n",
1252 	       mp->header_size + mp->elt_size + mp->trailer_size);
1253 
1254 	fprintf(f, "  private_data_size=%"PRIu32"\n", mp->private_data_size);
1255 
1256 	fprintf(f, "  ops_index=%d\n", mp->ops_index);
1257 	ops = rte_mempool_get_ops(mp->ops_index);
1258 	fprintf(f, "  ops_name: <%s>\n", (ops != NULL) ? ops->name : "NA");
1259 
1260 	STAILQ_FOREACH(memhdr, &mp->mem_list, next)
1261 		mem_len += memhdr->len;
1262 	if (mem_len != 0) {
1263 		fprintf(f, "  avg bytes/object=%#Lf\n",
1264 			(long double)mem_len / mp->size);
1265 	}
1266 
1267 	cache_count = rte_mempool_dump_cache(f, mp);
1268 	common_count = rte_mempool_ops_get_count(mp);
1269 	if ((cache_count + common_count) > mp->size)
1270 		common_count = mp->size - cache_count;
1271 	fprintf(f, "  common_pool_count=%u\n", common_count);
1272 
1273 	/* sum and dump statistics */
1274 #ifdef RTE_LIBRTE_MEMPOOL_DEBUG
1275 	rte_mempool_ops_get_info(mp, &info);
1276 	memset(&sum, 0, sizeof(sum));
1277 	for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
1278 		sum.put_bulk += mp->stats[lcore_id].put_bulk;
1279 		sum.put_objs += mp->stats[lcore_id].put_objs;
1280 		sum.put_common_pool_bulk += mp->stats[lcore_id].put_common_pool_bulk;
1281 		sum.put_common_pool_objs += mp->stats[lcore_id].put_common_pool_objs;
1282 		sum.get_common_pool_bulk += mp->stats[lcore_id].get_common_pool_bulk;
1283 		sum.get_common_pool_objs += mp->stats[lcore_id].get_common_pool_objs;
1284 		sum.get_success_bulk += mp->stats[lcore_id].get_success_bulk;
1285 		sum.get_success_objs += mp->stats[lcore_id].get_success_objs;
1286 		sum.get_fail_bulk += mp->stats[lcore_id].get_fail_bulk;
1287 		sum.get_fail_objs += mp->stats[lcore_id].get_fail_objs;
1288 		sum.get_success_blks += mp->stats[lcore_id].get_success_blks;
1289 		sum.get_fail_blks += mp->stats[lcore_id].get_fail_blks;
1290 	}
1291 	fprintf(f, "  stats:\n");
1292 	fprintf(f, "    put_bulk=%"PRIu64"\n", sum.put_bulk);
1293 	fprintf(f, "    put_objs=%"PRIu64"\n", sum.put_objs);
1294 	fprintf(f, "    put_common_pool_bulk=%"PRIu64"\n", sum.put_common_pool_bulk);
1295 	fprintf(f, "    put_common_pool_objs=%"PRIu64"\n", sum.put_common_pool_objs);
1296 	fprintf(f, "    get_common_pool_bulk=%"PRIu64"\n", sum.get_common_pool_bulk);
1297 	fprintf(f, "    get_common_pool_objs=%"PRIu64"\n", sum.get_common_pool_objs);
1298 	fprintf(f, "    get_success_bulk=%"PRIu64"\n", sum.get_success_bulk);
1299 	fprintf(f, "    get_success_objs=%"PRIu64"\n", sum.get_success_objs);
1300 	fprintf(f, "    get_fail_bulk=%"PRIu64"\n", sum.get_fail_bulk);
1301 	fprintf(f, "    get_fail_objs=%"PRIu64"\n", sum.get_fail_objs);
1302 	if (info.contig_block_size > 0) {
1303 		fprintf(f, "    get_success_blks=%"PRIu64"\n",
1304 			sum.get_success_blks);
1305 		fprintf(f, "    get_fail_blks=%"PRIu64"\n", sum.get_fail_blks);
1306 	}
1307 #else
1308 	fprintf(f, "  no statistics available\n");
1309 #endif
1310 
1311 	rte_mempool_audit(mp);
1312 }
1313 
1314 /* dump the status of all mempools on the console */
1315 void
1316 rte_mempool_list_dump(FILE *f)
1317 {
1318 	struct rte_mempool *mp = NULL;
1319 	struct rte_tailq_entry *te;
1320 	struct rte_mempool_list *mempool_list;
1321 
1322 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
1323 
1324 	rte_mcfg_mempool_read_lock();
1325 
1326 	TAILQ_FOREACH(te, mempool_list, next) {
1327 		mp = (struct rte_mempool *) te->data;
1328 		rte_mempool_dump(f, mp);
1329 	}
1330 
1331 	rte_mcfg_mempool_read_unlock();
1332 }
1333 
1334 /* search a mempool from its name */
1335 struct rte_mempool *
1336 rte_mempool_lookup(const char *name)
1337 {
1338 	struct rte_mempool *mp = NULL;
1339 	struct rte_tailq_entry *te;
1340 	struct rte_mempool_list *mempool_list;
1341 
1342 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
1343 
1344 	rte_mcfg_mempool_read_lock();
1345 
1346 	TAILQ_FOREACH(te, mempool_list, next) {
1347 		mp = (struct rte_mempool *) te->data;
1348 		if (strncmp(name, mp->name, RTE_MEMPOOL_NAMESIZE) == 0)
1349 			break;
1350 	}
1351 
1352 	rte_mcfg_mempool_read_unlock();
1353 
1354 	if (te == NULL) {
1355 		rte_errno = ENOENT;
1356 		return NULL;
1357 	}
1358 
1359 	return mp;
1360 }
1361 
1362 void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),
1363 		      void *arg)
1364 {
1365 	struct rte_tailq_entry *te = NULL;
1366 	struct rte_mempool_list *mempool_list;
1367 	void *tmp_te;
1368 
1369 	mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
1370 
1371 	rte_mcfg_mempool_read_lock();
1372 
1373 	RTE_TAILQ_FOREACH_SAFE(te, mempool_list, next, tmp_te) {
1374 		(*func)((struct rte_mempool *) te->data, arg);
1375 	}
1376 
1377 	rte_mcfg_mempool_read_unlock();
1378 }
1379 
1380 struct mempool_callback_data {
1381 	rte_mempool_event_callback *func;
1382 	void *user_data;
1383 };
1384 
1385 static void
1386 mempool_event_callback_invoke(enum rte_mempool_event event,
1387 			      struct rte_mempool *mp)
1388 {
1389 	struct mempool_callback_list *list;
1390 	struct rte_tailq_entry *te;
1391 	void *tmp_te;
1392 
1393 	rte_mcfg_tailq_read_lock();
1394 	list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);
1395 	RTE_TAILQ_FOREACH_SAFE(te, list, next, tmp_te) {
1396 		struct mempool_callback_data *cb = te->data;
1397 		rte_mcfg_tailq_read_unlock();
1398 		cb->func(event, mp, cb->user_data);
1399 		rte_mcfg_tailq_read_lock();
1400 	}
1401 	rte_mcfg_tailq_read_unlock();
1402 }
1403 
1404 int
1405 rte_mempool_event_callback_register(rte_mempool_event_callback *func,
1406 				    void *user_data)
1407 {
1408 	struct mempool_callback_list *list;
1409 	struct rte_tailq_entry *te = NULL;
1410 	struct mempool_callback_data *cb;
1411 	void *tmp_te;
1412 	int ret;
1413 
1414 	if (func == NULL) {
1415 		rte_errno = EINVAL;
1416 		return -rte_errno;
1417 	}
1418 
1419 	rte_mcfg_tailq_write_lock();
1420 	list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);
1421 	RTE_TAILQ_FOREACH_SAFE(te, list, next, tmp_te) {
1422 		cb = te->data;
1423 		if (cb->func == func && cb->user_data == user_data) {
1424 			ret = -EEXIST;
1425 			goto exit;
1426 		}
1427 	}
1428 
1429 	te = rte_zmalloc("mempool_cb_tail_entry", sizeof(*te), 0);
1430 	if (te == NULL) {
1431 		RTE_LOG(ERR, MEMPOOL,
1432 			"Cannot allocate event callback tailq entry!\n");
1433 		ret = -ENOMEM;
1434 		goto exit;
1435 	}
1436 
1437 	cb = rte_malloc("mempool_cb_data", sizeof(*cb), 0);
1438 	if (cb == NULL) {
1439 		RTE_LOG(ERR, MEMPOOL,
1440 			"Cannot allocate event callback!\n");
1441 		rte_free(te);
1442 		ret = -ENOMEM;
1443 		goto exit;
1444 	}
1445 
1446 	cb->func = func;
1447 	cb->user_data = user_data;
1448 	te->data = cb;
1449 	TAILQ_INSERT_TAIL(list, te, next);
1450 	ret = 0;
1451 
1452 exit:
1453 	rte_mcfg_tailq_write_unlock();
1454 	rte_errno = -ret;
1455 	return ret;
1456 }
1457 
1458 int
1459 rte_mempool_event_callback_unregister(rte_mempool_event_callback *func,
1460 				      void *user_data)
1461 {
1462 	struct mempool_callback_list *list;
1463 	struct rte_tailq_entry *te = NULL;
1464 	struct mempool_callback_data *cb;
1465 	int ret = -ENOENT;
1466 
1467 	rte_mcfg_tailq_write_lock();
1468 	list = RTE_TAILQ_CAST(callback_tailq.head, mempool_callback_list);
1469 	TAILQ_FOREACH(te, list, next) {
1470 		cb = te->data;
1471 		if (cb->func == func && cb->user_data == user_data) {
1472 			TAILQ_REMOVE(list, te, next);
1473 			ret = 0;
1474 			break;
1475 		}
1476 	}
1477 	rte_mcfg_tailq_write_unlock();
1478 
1479 	if (ret == 0) {
1480 		rte_free(te);
1481 		rte_free(cb);
1482 	}
1483 	rte_errno = -ret;
1484 	return ret;
1485 }
1486