xref: /dpdk/app/test/test_mempool.c (revision ff4e52ef)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <stdarg.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13 
14 #include <rte_common.h>
15 #include <rte_eal_paging.h>
16 #include <rte_log.h>
17 #include <rte_debug.h>
18 #include <rte_errno.h>
19 #include <rte_memory.h>
20 #include <rte_launch.h>
21 #include <rte_cycles.h>
22 #include <rte_eal.h>
23 #include <rte_per_lcore.h>
24 #include <rte_lcore.h>
25 #include <rte_branch_prediction.h>
26 #include <rte_mempool.h>
27 #include <rte_spinlock.h>
28 #include <rte_malloc.h>
29 #include <rte_mbuf_pool_ops.h>
30 #include <rte_mbuf.h>
31 
32 #include "test.h"
33 
34 /*
35  * Mempool
36  * =======
37  *
38  * Basic tests: done on one core with and without cache:
39  *
40  *    - Get one object, put one object
41  *    - Get two objects, put two objects
42  *    - Get all objects, test that their content is not modified and
43  *      put them back in the pool.
44  */
45 
46 #define MEMPOOL_ELT_SIZE 2048
47 #define MAX_KEEP 16
48 #define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
49 
50 #define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__)
51 #define RET_ERR() do {							\
52 		LOG_ERR();						\
53 		return -1;						\
54 	} while (0)
55 #define GOTO_ERR(var, label) do {					\
56 		LOG_ERR();						\
57 		var = -1;						\
58 		goto label;						\
59 	} while (0)
60 
61 /*
62  * save the object number in the first 4 bytes of object data. All
63  * other bytes are set to 0.
64  */
65 static void
66 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
67 	    void *obj, unsigned i)
68 {
69 	uint32_t *objnum = obj;
70 
71 	memset(obj, 0, mp->elt_size);
72 	*objnum = i;
73 }
74 
75 /* basic tests (done on one core) */
76 static int
77 test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
78 {
79 	uint32_t *objnum;
80 	void **objtable;
81 	void *obj, *obj2;
82 	char *obj_data;
83 	int ret = 0;
84 	unsigned i, j;
85 	int offset;
86 	struct rte_mempool_cache *cache;
87 
88 	if (use_external_cache) {
89 		/* Create a user-owned mempool cache. */
90 		cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE,
91 						 SOCKET_ID_ANY);
92 		if (cache == NULL)
93 			RET_ERR();
94 	} else {
95 		/* May be NULL if cache is disabled. */
96 		cache = rte_mempool_default_cache(mp, rte_lcore_id());
97 	}
98 
99 	/* dump the mempool status */
100 	rte_mempool_dump(stdout, mp);
101 
102 	printf("get an object\n");
103 	if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0)
104 		GOTO_ERR(ret, out);
105 	rte_mempool_dump(stdout, mp);
106 
107 	/* tests that improve coverage */
108 	printf("get object count\n");
109 	/* We have to count the extra caches, one in this case. */
110 	offset = use_external_cache ? 1 * cache->len : 0;
111 	if (rte_mempool_avail_count(mp) + offset != MEMPOOL_SIZE - 1)
112 		GOTO_ERR(ret, out);
113 
114 	printf("get private data\n");
115 	if (rte_mempool_get_priv(mp) != (char *)mp +
116 			RTE_MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
117 		GOTO_ERR(ret, out);
118 
119 #ifndef RTE_EXEC_ENV_FREEBSD /* rte_mem_virt2iova() not supported on bsd */
120 	printf("get physical address of an object\n");
121 	if (rte_mempool_virt2iova(obj) != rte_mem_virt2iova(obj))
122 		GOTO_ERR(ret, out);
123 #endif
124 
125 	printf("put the object back\n");
126 	rte_mempool_generic_put(mp, &obj, 1, cache);
127 	rte_mempool_dump(stdout, mp);
128 
129 	printf("get 2 objects\n");
130 	if (rte_mempool_generic_get(mp, &obj, 1, cache) < 0)
131 		GOTO_ERR(ret, out);
132 	if (rte_mempool_generic_get(mp, &obj2, 1, cache) < 0) {
133 		rte_mempool_generic_put(mp, &obj, 1, cache);
134 		GOTO_ERR(ret, out);
135 	}
136 	rte_mempool_dump(stdout, mp);
137 
138 	printf("put the objects back\n");
139 	rte_mempool_generic_put(mp, &obj, 1, cache);
140 	rte_mempool_generic_put(mp, &obj2, 1, cache);
141 	rte_mempool_dump(stdout, mp);
142 
143 	/*
144 	 * get many objects: we cannot get them all because the cache
145 	 * on other cores may not be empty.
146 	 */
147 	objtable = malloc(MEMPOOL_SIZE * sizeof(void *));
148 	if (objtable == NULL)
149 		GOTO_ERR(ret, out);
150 
151 	for (i = 0; i < MEMPOOL_SIZE; i++) {
152 		if (rte_mempool_generic_get(mp, &objtable[i], 1, cache) < 0)
153 			break;
154 	}
155 
156 	/*
157 	 * for each object, check that its content was not modified,
158 	 * and put objects back in pool
159 	 */
160 	while (i--) {
161 		obj = objtable[i];
162 		obj_data = obj;
163 		objnum = obj;
164 		if (*objnum > MEMPOOL_SIZE) {
165 			printf("bad object number(%d)\n", *objnum);
166 			ret = -1;
167 			break;
168 		}
169 		for (j = sizeof(*objnum); j < mp->elt_size; j++) {
170 			if (obj_data[j] != 0)
171 				ret = -1;
172 		}
173 
174 		rte_mempool_generic_put(mp, &objtable[i], 1, cache);
175 	}
176 
177 	free(objtable);
178 	if (ret == -1)
179 		printf("objects were modified!\n");
180 
181 out:
182 	if (use_external_cache) {
183 		rte_mempool_cache_flush(cache, mp);
184 		rte_mempool_cache_free(cache);
185 	}
186 
187 	return ret;
188 }
189 
190 static int test_mempool_creation_with_exceeded_cache_size(void)
191 {
192 	struct rte_mempool *mp_cov;
193 
194 	mp_cov = rte_mempool_create("test_mempool_cache_too_big",
195 		MEMPOOL_SIZE,
196 		MEMPOOL_ELT_SIZE,
197 		RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0,
198 		NULL, NULL,
199 		my_obj_init, NULL,
200 		SOCKET_ID_ANY, 0);
201 
202 	if (mp_cov != NULL) {
203 		rte_mempool_free(mp_cov);
204 		RET_ERR();
205 	}
206 
207 	return 0;
208 }
209 
210 static int test_mempool_creation_with_invalid_flags(void)
211 {
212 	struct rte_mempool *mp_cov;
213 
214 	mp_cov = rte_mempool_create("test_mempool_invalid_flags", MEMPOOL_SIZE,
215 		MEMPOOL_ELT_SIZE, 0, 0,
216 		NULL, NULL,
217 		NULL, NULL,
218 		SOCKET_ID_ANY, ~RTE_MEMPOOL_VALID_USER_FLAGS);
219 
220 	if (mp_cov != NULL) {
221 		rte_mempool_free(mp_cov);
222 		RET_ERR();
223 	}
224 
225 	return 0;
226 }
227 
228 static struct rte_mempool *mp_spsc;
229 static rte_spinlock_t scsp_spinlock;
230 static void *scsp_obj_table[MAX_KEEP];
231 
232 /*
233  * single producer function
234  */
235 static int test_mempool_single_producer(void)
236 {
237 	unsigned int i;
238 	void *obj = NULL;
239 	uint64_t start_cycles, end_cycles;
240 	uint64_t duration = rte_get_timer_hz() / 4;
241 
242 	start_cycles = rte_get_timer_cycles();
243 	while (1) {
244 		end_cycles = rte_get_timer_cycles();
245 		/* duration uses up, stop producing */
246 		if (start_cycles + duration < end_cycles)
247 			break;
248 		rte_spinlock_lock(&scsp_spinlock);
249 		for (i = 0; i < MAX_KEEP; i ++) {
250 			if (NULL != scsp_obj_table[i]) {
251 				obj = scsp_obj_table[i];
252 				break;
253 			}
254 		}
255 		rte_spinlock_unlock(&scsp_spinlock);
256 		if (i >= MAX_KEEP) {
257 			continue;
258 		}
259 		if (rte_mempool_from_obj(obj) != mp_spsc) {
260 			printf("obj not owned by this mempool\n");
261 			RET_ERR();
262 		}
263 		rte_mempool_put(mp_spsc, obj);
264 		rte_spinlock_lock(&scsp_spinlock);
265 		scsp_obj_table[i] = NULL;
266 		rte_spinlock_unlock(&scsp_spinlock);
267 	}
268 
269 	return 0;
270 }
271 
272 /*
273  * single consumer function
274  */
275 static int test_mempool_single_consumer(void)
276 {
277 	unsigned int i;
278 	void * obj;
279 	uint64_t start_cycles, end_cycles;
280 	uint64_t duration = rte_get_timer_hz() / 8;
281 
282 	start_cycles = rte_get_timer_cycles();
283 	while (1) {
284 		end_cycles = rte_get_timer_cycles();
285 		/* duration uses up, stop consuming */
286 		if (start_cycles + duration < end_cycles)
287 			break;
288 		rte_spinlock_lock(&scsp_spinlock);
289 		for (i = 0; i < MAX_KEEP; i ++) {
290 			if (NULL == scsp_obj_table[i])
291 				break;
292 		}
293 		rte_spinlock_unlock(&scsp_spinlock);
294 		if (i >= MAX_KEEP)
295 			continue;
296 		if (rte_mempool_get(mp_spsc, &obj) < 0)
297 			break;
298 		rte_spinlock_lock(&scsp_spinlock);
299 		scsp_obj_table[i] = obj;
300 		rte_spinlock_unlock(&scsp_spinlock);
301 	}
302 
303 	return 0;
304 }
305 
306 /*
307  * test function for mempool test based on singple consumer and single producer,
308  * can run on one lcore only
309  */
310 static int
311 test_mempool_launch_single_consumer(__rte_unused void *arg)
312 {
313 	return test_mempool_single_consumer();
314 }
315 
316 static void
317 my_mp_init(struct rte_mempool *mp, __rte_unused void *arg)
318 {
319 	printf("mempool name is %s\n", mp->name);
320 	/* nothing to be implemented here*/
321 	return ;
322 }
323 
324 /*
325  * it tests the mempool operations based on singple producer and single consumer
326  */
327 static int
328 test_mempool_sp_sc(void)
329 {
330 	int ret = 0;
331 	unsigned lcore_id = rte_lcore_id();
332 	unsigned lcore_next;
333 
334 	/* create a mempool with single producer/consumer ring */
335 	if (mp_spsc == NULL) {
336 		mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE,
337 			MEMPOOL_ELT_SIZE, 0, 0,
338 			my_mp_init, NULL,
339 			my_obj_init, NULL,
340 			SOCKET_ID_ANY,
341 			RTE_MEMPOOL_F_NO_CACHE_ALIGN | RTE_MEMPOOL_F_SP_PUT |
342 			RTE_MEMPOOL_F_SC_GET);
343 		if (mp_spsc == NULL)
344 			RET_ERR();
345 	}
346 	if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) {
347 		printf("Cannot lookup mempool from its name\n");
348 		ret = -1;
349 		goto err;
350 	}
351 	lcore_next = rte_get_next_lcore(lcore_id, 0, 1);
352 	if (lcore_next >= RTE_MAX_LCORE) {
353 		ret = -1;
354 		goto err;
355 	}
356 	if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) {
357 		ret = -1;
358 		goto err;
359 	}
360 	rte_spinlock_init(&scsp_spinlock);
361 	memset(scsp_obj_table, 0, sizeof(scsp_obj_table));
362 	rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL,
363 		lcore_next);
364 	if (test_mempool_single_producer() < 0)
365 		ret = -1;
366 
367 	if (rte_eal_wait_lcore(lcore_next) < 0)
368 		ret = -1;
369 
370 err:
371 	rte_mempool_free(mp_spsc);
372 	mp_spsc = NULL;
373 
374 	return ret;
375 }
376 
377 /*
378  * it tests some more basic of mempool
379  */
380 static int
381 test_mempool_basic_ex(struct rte_mempool *mp)
382 {
383 	unsigned i;
384 	void **obj;
385 	void *err_obj;
386 	int ret = -1;
387 
388 	if (mp == NULL)
389 		return ret;
390 
391 	obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE,
392 		sizeof(void *), 0);
393 	if (obj == NULL) {
394 		printf("test_mempool_basic_ex fail to rte_malloc\n");
395 		return ret;
396 	}
397 	printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n",
398 		mp->name, rte_mempool_in_use_count(mp));
399 	if (rte_mempool_full(mp) != 1) {
400 		printf("test_mempool_basic_ex the mempool should be full\n");
401 		goto fail_mp_basic_ex;
402 	}
403 
404 	for (i = 0; i < MEMPOOL_SIZE; i ++) {
405 		if (rte_mempool_get(mp, &obj[i]) < 0) {
406 			printf("test_mp_basic_ex fail to get object for [%u]\n",
407 				i);
408 			goto fail_mp_basic_ex;
409 		}
410 	}
411 	if (rte_mempool_get(mp, &err_obj) == 0) {
412 		printf("test_mempool_basic_ex get an impossible obj\n");
413 		goto fail_mp_basic_ex;
414 	}
415 	printf("number: %u\n", i);
416 	if (rte_mempool_empty(mp) != 1) {
417 		printf("test_mempool_basic_ex the mempool should be empty\n");
418 		goto fail_mp_basic_ex;
419 	}
420 
421 	for (i = 0; i < MEMPOOL_SIZE; i++)
422 		rte_mempool_put(mp, obj[i]);
423 
424 	if (rte_mempool_full(mp) != 1) {
425 		printf("test_mempool_basic_ex the mempool should be full\n");
426 		goto fail_mp_basic_ex;
427 	}
428 
429 	ret = 0;
430 
431 fail_mp_basic_ex:
432 	if (obj != NULL)
433 		rte_free((void *)obj);
434 
435 	return ret;
436 }
437 
438 static int
439 test_mempool_same_name_twice_creation(void)
440 {
441 	struct rte_mempool *mp_tc, *mp_tc2;
442 
443 	mp_tc = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
444 		MEMPOOL_ELT_SIZE, 0, 0,
445 		NULL, NULL,
446 		NULL, NULL,
447 		SOCKET_ID_ANY, 0);
448 
449 	if (mp_tc == NULL)
450 		RET_ERR();
451 
452 	mp_tc2 = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
453 		MEMPOOL_ELT_SIZE, 0, 0,
454 		NULL, NULL,
455 		NULL, NULL,
456 		SOCKET_ID_ANY, 0);
457 
458 	if (mp_tc2 != NULL) {
459 		rte_mempool_free(mp_tc);
460 		rte_mempool_free(mp_tc2);
461 		RET_ERR();
462 	}
463 
464 	rte_mempool_free(mp_tc);
465 	return 0;
466 }
467 
468 static void
469 walk_cb(struct rte_mempool *mp, void *userdata __rte_unused)
470 {
471 	printf("\t%s\n", mp->name);
472 }
473 
474 struct mp_data {
475 	int16_t ret;
476 };
477 
478 static void
479 test_mp_mem_init(struct rte_mempool *mp,
480 		__rte_unused void *opaque,
481 		__rte_unused struct rte_mempool_memhdr *memhdr,
482 		__rte_unused unsigned int mem_idx)
483 {
484 	struct mp_data *data = opaque;
485 
486 	if (mp == NULL) {
487 		data->ret = -1;
488 		return;
489 	}
490 	/* nothing to be implemented here*/
491 	data->ret = 0;
492 }
493 
494 struct test_mempool_events_data {
495 	struct rte_mempool *mp;
496 	enum rte_mempool_event event;
497 	bool invoked;
498 };
499 
500 static void
501 test_mempool_events_cb(enum rte_mempool_event event,
502 		       struct rte_mempool *mp, void *user_data)
503 {
504 	struct test_mempool_events_data *data = user_data;
505 
506 	data->mp = mp;
507 	data->event = event;
508 	data->invoked = true;
509 }
510 
511 static int
512 test_mempool_events(int (*populate)(struct rte_mempool *mp))
513 {
514 #pragma push_macro("RTE_TEST_TRACE_FAILURE")
515 #undef RTE_TEST_TRACE_FAILURE
516 #define RTE_TEST_TRACE_FAILURE(...) do { goto fail; } while (0)
517 
518 	static const size_t CB_NUM = 3;
519 	static const size_t MP_NUM = 2;
520 
521 	struct test_mempool_events_data data[CB_NUM];
522 	struct rte_mempool *mp[MP_NUM], *freed;
523 	char name[RTE_MEMPOOL_NAMESIZE];
524 	size_t i, j;
525 	int ret;
526 
527 	memset(mp, 0, sizeof(mp));
528 	for (i = 0; i < CB_NUM; i++) {
529 		ret = rte_mempool_event_callback_register
530 				(test_mempool_events_cb, &data[i]);
531 		RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to register the callback %zu: %s",
532 				      i, rte_strerror(rte_errno));
533 	}
534 	ret = rte_mempool_event_callback_unregister(test_mempool_events_cb, mp);
535 	RTE_TEST_ASSERT_NOT_EQUAL(ret, 0, "Unregistered a non-registered callback");
536 	/* NULL argument has no special meaning in this API. */
537 	ret = rte_mempool_event_callback_unregister(test_mempool_events_cb,
538 						    NULL);
539 	RTE_TEST_ASSERT_NOT_EQUAL(ret, 0, "Unregistered a non-registered callback with NULL argument");
540 
541 	/* Create mempool 0 that will be observed by all callbacks. */
542 	memset(&data, 0, sizeof(data));
543 	strcpy(name, "empty0");
544 	mp[0] = rte_mempool_create_empty(name, MEMPOOL_SIZE,
545 					 MEMPOOL_ELT_SIZE, 0, 0,
546 					 SOCKET_ID_ANY, 0);
547 	RTE_TEST_ASSERT_NOT_NULL(mp[0], "Cannot create mempool %s: %s",
548 				 name, rte_strerror(rte_errno));
549 	for (j = 0; j < CB_NUM; j++)
550 		RTE_TEST_ASSERT_EQUAL(data[j].invoked, false,
551 				      "Callback %zu invoked on %s mempool creation",
552 				      j, name);
553 
554 	rte_mempool_set_ops_byname(mp[0], rte_mbuf_best_mempool_ops(), NULL);
555 	ret = populate(mp[0]);
556 	RTE_TEST_ASSERT_EQUAL(ret, (int)mp[0]->size, "Failed to populate mempool %s: %s",
557 			      name, rte_strerror(-ret));
558 	for (j = 0; j < CB_NUM; j++) {
559 		RTE_TEST_ASSERT_EQUAL(data[j].invoked, true,
560 					"Callback %zu not invoked on mempool %s population",
561 					j, name);
562 		RTE_TEST_ASSERT_EQUAL(data[j].event,
563 					RTE_MEMPOOL_EVENT_READY,
564 					"Wrong callback invoked, expected READY");
565 		RTE_TEST_ASSERT_EQUAL(data[j].mp, mp[0],
566 					"Callback %zu invoked for a wrong mempool instead of %s",
567 					j, name);
568 	}
569 
570 	/* Check that unregistered callback 0 observes no events. */
571 	ret = rte_mempool_event_callback_unregister(test_mempool_events_cb,
572 						    &data[0]);
573 	RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to unregister callback 0: %s",
574 			      rte_strerror(rte_errno));
575 	memset(&data, 0, sizeof(data));
576 	strcpy(name, "empty1");
577 	mp[1] = rte_mempool_create_empty(name, MEMPOOL_SIZE,
578 					 MEMPOOL_ELT_SIZE, 0, 0,
579 					 SOCKET_ID_ANY, 0);
580 	RTE_TEST_ASSERT_NOT_NULL(mp[1], "Cannot create mempool %s: %s",
581 				 name, rte_strerror(rte_errno));
582 	rte_mempool_set_ops_byname(mp[1], rte_mbuf_best_mempool_ops(), NULL);
583 	ret = populate(mp[1]);
584 	RTE_TEST_ASSERT_EQUAL(ret, (int)mp[1]->size, "Failed to populate mempool %s: %s",
585 			      name, rte_strerror(-ret));
586 	RTE_TEST_ASSERT_EQUAL(data[0].invoked, false,
587 			      "Unregistered callback 0 invoked on %s mempool populaton",
588 			      name);
589 
590 	for (i = 0; i < MP_NUM; i++) {
591 		memset(&data, 0, sizeof(data));
592 		sprintf(name, "empty%zu", i);
593 		rte_mempool_free(mp[i]);
594 		/*
595 		 * Save pointer to check that it was passed to the callback,
596 		 * but put NULL into the array in case cleanup is called early.
597 		 */
598 		freed = mp[i];
599 		mp[i] = NULL;
600 		for (j = 1; j < CB_NUM; j++) {
601 			RTE_TEST_ASSERT_EQUAL(data[j].invoked, true,
602 					      "Callback %zu not invoked on mempool %s destruction",
603 					      j, name);
604 			RTE_TEST_ASSERT_EQUAL(data[j].event,
605 					      RTE_MEMPOOL_EVENT_DESTROY,
606 					      "Wrong callback invoked, expected DESTROY");
607 			RTE_TEST_ASSERT_EQUAL(data[j].mp, freed,
608 					      "Callback %zu invoked for a wrong mempool instead of %s",
609 					      j, name);
610 		}
611 		RTE_TEST_ASSERT_EQUAL(data[0].invoked, false,
612 				      "Unregistered callback 0 invoked on %s mempool destruction",
613 				      name);
614 	}
615 
616 	for (j = 1; j < CB_NUM; j++) {
617 		ret = rte_mempool_event_callback_unregister
618 					(test_mempool_events_cb, &data[j]);
619 		RTE_TEST_ASSERT_EQUAL(ret, 0, "Failed to unregister the callback %zu: %s",
620 				      j, rte_strerror(rte_errno));
621 	}
622 	return TEST_SUCCESS;
623 
624 fail:
625 	for (j = 0; j < CB_NUM; j++)
626 		rte_mempool_event_callback_unregister
627 					(test_mempool_events_cb, &data[j]);
628 	for (i = 0; i < MP_NUM; i++)
629 		rte_mempool_free(mp[i]);
630 	return TEST_FAILED;
631 
632 #pragma pop_macro("RTE_TEST_TRACE_FAILURE")
633 }
634 
635 struct test_mempool_events_safety_data {
636 	bool invoked;
637 	int (*api_func)(rte_mempool_event_callback *func, void *user_data);
638 	rte_mempool_event_callback *cb_func;
639 	void *cb_user_data;
640 	int ret;
641 };
642 
643 static void
644 test_mempool_events_safety_cb(enum rte_mempool_event event,
645 			      struct rte_mempool *mp, void *user_data)
646 {
647 	struct test_mempool_events_safety_data *data = user_data;
648 
649 	RTE_SET_USED(event);
650 	RTE_SET_USED(mp);
651 	data->invoked = true;
652 	data->ret = data->api_func(data->cb_func, data->cb_user_data);
653 }
654 
655 static int
656 test_mempool_events_safety(void)
657 {
658 #pragma push_macro("RTE_TEST_TRACE_FAILURE")
659 #undef RTE_TEST_TRACE_FAILURE
660 #define RTE_TEST_TRACE_FAILURE(...) do { \
661 		ret = TEST_FAILED; \
662 		goto exit; \
663 	} while (0)
664 
665 	struct test_mempool_events_data data;
666 	struct test_mempool_events_safety_data sdata[2];
667 	struct rte_mempool *mp;
668 	size_t i;
669 	int ret;
670 
671 	/* removes itself */
672 	sdata[0].api_func = rte_mempool_event_callback_unregister;
673 	sdata[0].cb_func = test_mempool_events_safety_cb;
674 	sdata[0].cb_user_data = &sdata[0];
675 	sdata[0].ret = -1;
676 	rte_mempool_event_callback_register(test_mempool_events_safety_cb,
677 					    &sdata[0]);
678 	/* inserts a callback after itself */
679 	sdata[1].api_func = rte_mempool_event_callback_register;
680 	sdata[1].cb_func = test_mempool_events_cb;
681 	sdata[1].cb_user_data = &data;
682 	sdata[1].ret = -1;
683 	rte_mempool_event_callback_register(test_mempool_events_safety_cb,
684 					    &sdata[1]);
685 
686 	mp = rte_mempool_create_empty("empty", MEMPOOL_SIZE,
687 				      MEMPOOL_ELT_SIZE, 0, 0,
688 				      SOCKET_ID_ANY, 0);
689 	RTE_TEST_ASSERT_NOT_NULL(mp, "Cannot create mempool: %s",
690 				 rte_strerror(rte_errno));
691 	memset(&data, 0, sizeof(data));
692 	ret = rte_mempool_populate_default(mp);
693 	RTE_TEST_ASSERT_EQUAL(ret, (int)mp->size, "Failed to populate mempool: %s",
694 			      rte_strerror(-ret));
695 
696 	RTE_TEST_ASSERT_EQUAL(sdata[0].ret, 0, "Callback failed to unregister itself: %s",
697 			      rte_strerror(rte_errno));
698 	RTE_TEST_ASSERT_EQUAL(sdata[1].ret, 0, "Failed to insert a new callback: %s",
699 			      rte_strerror(rte_errno));
700 	RTE_TEST_ASSERT_EQUAL(data.invoked, false,
701 			      "Inserted callback is invoked on mempool population");
702 
703 	memset(&data, 0, sizeof(data));
704 	sdata[0].invoked = false;
705 	rte_mempool_free(mp);
706 	mp = NULL;
707 	RTE_TEST_ASSERT_EQUAL(sdata[0].invoked, false,
708 			      "Callback that unregistered itself was called");
709 	RTE_TEST_ASSERT_EQUAL(sdata[1].ret, -EEXIST,
710 			      "New callback inserted twice");
711 	RTE_TEST_ASSERT_EQUAL(data.invoked, true,
712 			      "Inserted callback is not invoked on mempool destruction");
713 
714 	rte_mempool_event_callback_unregister(test_mempool_events_cb, &data);
715 	for (i = 0; i < RTE_DIM(sdata); i++)
716 		rte_mempool_event_callback_unregister
717 				(test_mempool_events_safety_cb, &sdata[i]);
718 	ret = TEST_SUCCESS;
719 
720 exit:
721 	/* cleanup, don't care which callbacks are already removed */
722 	rte_mempool_event_callback_unregister(test_mempool_events_cb, &data);
723 	for (i = 0; i < RTE_DIM(sdata); i++)
724 		rte_mempool_event_callback_unregister
725 				(test_mempool_events_safety_cb, &sdata[i]);
726 	/* in case of failure before the planned destruction */
727 	rte_mempool_free(mp);
728 	return ret;
729 
730 #pragma pop_macro("RTE_TEST_TRACE_FAILURE")
731 }
732 
733 #pragma push_macro("RTE_TEST_TRACE_FAILURE")
734 #undef RTE_TEST_TRACE_FAILURE
735 #define RTE_TEST_TRACE_FAILURE(...) do { \
736 		ret = TEST_FAILED; \
737 		goto exit; \
738 	} while (0)
739 
740 static int
741 test_mempool_flag_non_io_set_when_no_iova_contig_set(void)
742 {
743 	struct rte_mempool *mp = NULL;
744 	int ret;
745 
746 	mp = rte_mempool_create_empty("empty", MEMPOOL_SIZE,
747 				      MEMPOOL_ELT_SIZE, 0, 0,
748 				      SOCKET_ID_ANY, RTE_MEMPOOL_F_NO_IOVA_CONTIG);
749 	RTE_TEST_ASSERT_NOT_NULL(mp, "Cannot create mempool: %s",
750 				 rte_strerror(rte_errno));
751 	rte_mempool_set_ops_byname(mp, rte_mbuf_best_mempool_ops(), NULL);
752 	ret = rte_mempool_populate_default(mp);
753 	RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
754 			rte_strerror(-ret));
755 	RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
756 			"NON_IO flag is not set when NO_IOVA_CONTIG is set");
757 	ret = TEST_SUCCESS;
758 exit:
759 	rte_mempool_free(mp);
760 	return ret;
761 }
762 
763 static int
764 test_mempool_flag_non_io_unset_when_populated_with_valid_iova(void)
765 {
766 	void *virt = NULL;
767 	rte_iova_t iova;
768 	size_t total_size = MEMPOOL_ELT_SIZE * MEMPOOL_SIZE;
769 	size_t block_size = total_size / 3;
770 	struct rte_mempool *mp = NULL;
771 	int ret;
772 
773 	/*
774 	 * Since objects from the pool are never used in the test,
775 	 * we don't care for contiguous IOVA, on the other hand,
776 	 * reiuring it could cause spurious test failures.
777 	 */
778 	virt = rte_malloc("test_mempool", total_size, rte_mem_page_size());
779 	RTE_TEST_ASSERT_NOT_NULL(virt, "Cannot allocate memory");
780 	iova = rte_mem_virt2iova(virt);
781 	RTE_TEST_ASSERT_NOT_EQUAL(iova,  RTE_BAD_IOVA, "Cannot get IOVA");
782 	mp = rte_mempool_create_empty("empty", MEMPOOL_SIZE,
783 				      MEMPOOL_ELT_SIZE, 0, 0,
784 				      SOCKET_ID_ANY, 0);
785 	RTE_TEST_ASSERT_NOT_NULL(mp, "Cannot create mempool: %s",
786 				 rte_strerror(rte_errno));
787 
788 	ret = rte_mempool_populate_iova(mp, RTE_PTR_ADD(virt, 1 * block_size),
789 					RTE_BAD_IOVA, block_size, NULL, NULL);
790 	RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
791 			rte_strerror(-ret));
792 	RTE_TEST_ASSERT(mp->flags & RTE_MEMPOOL_F_NON_IO,
793 			"NON_IO flag is not set when mempool is populated with only RTE_BAD_IOVA");
794 
795 	ret = rte_mempool_populate_iova(mp, virt, iova, block_size, NULL, NULL);
796 	RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
797 			rte_strerror(-ret));
798 	RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
799 			"NON_IO flag is not unset when mempool is populated with valid IOVA");
800 
801 	ret = rte_mempool_populate_iova(mp, RTE_PTR_ADD(virt, 2 * block_size),
802 					RTE_BAD_IOVA, block_size, NULL, NULL);
803 	RTE_TEST_ASSERT(ret > 0, "Failed to populate mempool: %s",
804 			rte_strerror(-ret));
805 	RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
806 			"NON_IO flag is set even when some objects have valid IOVA");
807 	ret = TEST_SUCCESS;
808 
809 exit:
810 	rte_mempool_free(mp);
811 	rte_free(virt);
812 	return ret;
813 }
814 
815 static int
816 test_mempool_flag_non_io_unset_by_default(void)
817 {
818 	struct rte_mempool *mp;
819 	int ret;
820 
821 	mp = rte_mempool_create_empty("empty", MEMPOOL_SIZE,
822 				      MEMPOOL_ELT_SIZE, 0, 0,
823 				      SOCKET_ID_ANY, 0);
824 	RTE_TEST_ASSERT_NOT_NULL(mp, "Cannot create mempool: %s",
825 				 rte_strerror(rte_errno));
826 	ret = rte_mempool_populate_default(mp);
827 	RTE_TEST_ASSERT_EQUAL(ret, (int)mp->size, "Failed to populate mempool: %s",
828 			      rte_strerror(-ret));
829 	RTE_TEST_ASSERT(!(mp->flags & RTE_MEMPOOL_F_NON_IO),
830 			"NON_IO flag is set by default");
831 	ret = TEST_SUCCESS;
832 exit:
833 	rte_mempool_free(mp);
834 	return ret;
835 }
836 
837 #pragma pop_macro("RTE_TEST_TRACE_FAILURE")
838 
839 static int
840 test_mempool(void)
841 {
842 	int ret = -1;
843 	uint32_t nb_objs = 0;
844 	uint32_t nb_mem_chunks = 0;
845 	struct rte_mempool *mp_cache = NULL;
846 	struct rte_mempool *mp_nocache = NULL;
847 	struct rte_mempool *mp_stack_anon = NULL;
848 	struct rte_mempool *mp_stack_mempool_iter = NULL;
849 	struct rte_mempool *mp_stack = NULL;
850 	struct rte_mempool *default_pool = NULL;
851 	struct mp_data cb_arg = {
852 		.ret = -1
853 	};
854 	const char *default_pool_ops = rte_mbuf_best_mempool_ops();
855 
856 	/* create a mempool (without cache) */
857 	mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
858 		MEMPOOL_ELT_SIZE, 0, 0,
859 		NULL, NULL,
860 		my_obj_init, NULL,
861 		SOCKET_ID_ANY, 0);
862 
863 	if (mp_nocache == NULL) {
864 		printf("cannot allocate mp_nocache mempool\n");
865 		GOTO_ERR(ret, err);
866 	}
867 
868 	/* create a mempool (with cache) */
869 	mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE,
870 		MEMPOOL_ELT_SIZE,
871 		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
872 		NULL, NULL,
873 		my_obj_init, NULL,
874 		SOCKET_ID_ANY, 0);
875 
876 	if (mp_cache == NULL) {
877 		printf("cannot allocate mp_cache mempool\n");
878 		GOTO_ERR(ret, err);
879 	}
880 
881 	/* create an empty mempool  */
882 	mp_stack_anon = rte_mempool_create_empty("test_stack_anon",
883 		MEMPOOL_SIZE,
884 		MEMPOOL_ELT_SIZE,
885 		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
886 		SOCKET_ID_ANY, 0);
887 
888 	if (mp_stack_anon == NULL)
889 		GOTO_ERR(ret, err);
890 
891 	/* populate an empty mempool */
892 	ret = rte_mempool_populate_anon(mp_stack_anon);
893 	printf("%s ret = %d\n", __func__, ret);
894 	if (ret < 0)
895 		GOTO_ERR(ret, err);
896 
897 	/* Try to populate when already populated */
898 	ret = rte_mempool_populate_anon(mp_stack_anon);
899 	if (ret != 0)
900 		GOTO_ERR(ret, err);
901 
902 	/* create a mempool  */
903 	mp_stack_mempool_iter = rte_mempool_create("test_iter_obj",
904 		MEMPOOL_SIZE,
905 		MEMPOOL_ELT_SIZE,
906 		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
907 		NULL, NULL,
908 		my_obj_init, NULL,
909 		SOCKET_ID_ANY, 0);
910 
911 	if (mp_stack_mempool_iter == NULL)
912 		GOTO_ERR(ret, err);
913 
914 	/* test to initialize mempool objects and memory */
915 	nb_objs = rte_mempool_obj_iter(mp_stack_mempool_iter, my_obj_init,
916 			NULL);
917 	if (nb_objs == 0)
918 		GOTO_ERR(ret, err);
919 
920 	nb_mem_chunks = rte_mempool_mem_iter(mp_stack_mempool_iter,
921 			test_mp_mem_init, &cb_arg);
922 	if (nb_mem_chunks == 0 || cb_arg.ret < 0)
923 		GOTO_ERR(ret, err);
924 
925 	/* create a mempool with an external handler */
926 	mp_stack = rte_mempool_create_empty("test_stack",
927 		MEMPOOL_SIZE,
928 		MEMPOOL_ELT_SIZE,
929 		RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
930 		SOCKET_ID_ANY, 0);
931 
932 	if (mp_stack == NULL) {
933 		printf("cannot allocate mp_stack mempool\n");
934 		GOTO_ERR(ret, err);
935 	}
936 	if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) {
937 		printf("cannot set stack handler\n");
938 		GOTO_ERR(ret, err);
939 	}
940 	if (rte_mempool_populate_default(mp_stack) < 0) {
941 		printf("cannot populate mp_stack mempool\n");
942 		GOTO_ERR(ret, err);
943 	}
944 	rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
945 
946 	/* Create a mempool based on Default handler */
947 	printf("Testing %s mempool handler\n", default_pool_ops);
948 	default_pool = rte_mempool_create_empty("default_pool",
949 						MEMPOOL_SIZE,
950 						MEMPOOL_ELT_SIZE,
951 						RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
952 						SOCKET_ID_ANY, 0);
953 
954 	if (default_pool == NULL) {
955 		printf("cannot allocate default mempool\n");
956 		GOTO_ERR(ret, err);
957 	}
958 	if (rte_mempool_set_ops_byname(default_pool,
959 				default_pool_ops, NULL) < 0) {
960 		printf("cannot set %s handler\n", default_pool_ops);
961 		GOTO_ERR(ret, err);
962 	}
963 	if (rte_mempool_populate_default(default_pool) < 0) {
964 		printf("cannot populate %s mempool\n", default_pool_ops);
965 		GOTO_ERR(ret, err);
966 	}
967 	rte_mempool_obj_iter(default_pool, my_obj_init, NULL);
968 
969 	/* retrieve the mempool from its name */
970 	if (rte_mempool_lookup("test_nocache") != mp_nocache) {
971 		printf("Cannot lookup mempool from its name\n");
972 		GOTO_ERR(ret, err);
973 	}
974 
975 	printf("Walk into mempools:\n");
976 	rte_mempool_walk(walk_cb, NULL);
977 
978 	rte_mempool_list_dump(stdout);
979 
980 	/* basic tests without cache */
981 	if (test_mempool_basic(mp_nocache, 0) < 0)
982 		GOTO_ERR(ret, err);
983 
984 	/* basic tests with cache */
985 	if (test_mempool_basic(mp_cache, 0) < 0)
986 		GOTO_ERR(ret, err);
987 
988 	/* basic tests with user-owned cache */
989 	if (test_mempool_basic(mp_nocache, 1) < 0)
990 		GOTO_ERR(ret, err);
991 
992 	/* more basic tests without cache */
993 	if (test_mempool_basic_ex(mp_nocache) < 0)
994 		GOTO_ERR(ret, err);
995 
996 	/* mempool operation test based on single producer and single comsumer */
997 	if (test_mempool_sp_sc() < 0)
998 		GOTO_ERR(ret, err);
999 
1000 	if (test_mempool_creation_with_exceeded_cache_size() < 0)
1001 		GOTO_ERR(ret, err);
1002 
1003 	if (test_mempool_creation_with_invalid_flags() < 0)
1004 		GOTO_ERR(ret, err);
1005 
1006 	if (test_mempool_same_name_twice_creation() < 0)
1007 		GOTO_ERR(ret, err);
1008 
1009 	/* test the stack handler */
1010 	if (test_mempool_basic(mp_stack, 1) < 0)
1011 		GOTO_ERR(ret, err);
1012 
1013 	if (test_mempool_basic(default_pool, 1) < 0)
1014 		GOTO_ERR(ret, err);
1015 
1016 	/* test mempool event callbacks */
1017 	if (test_mempool_events(rte_mempool_populate_default) < 0)
1018 		GOTO_ERR(ret, err);
1019 	if (test_mempool_events(rte_mempool_populate_anon) < 0)
1020 		GOTO_ERR(ret, err);
1021 	if (test_mempool_events_safety() < 0)
1022 		GOTO_ERR(ret, err);
1023 
1024 	/* test NON_IO flag inference */
1025 	if (test_mempool_flag_non_io_unset_by_default() < 0)
1026 		GOTO_ERR(ret, err);
1027 	if (test_mempool_flag_non_io_set_when_no_iova_contig_set() < 0)
1028 		GOTO_ERR(ret, err);
1029 	if (test_mempool_flag_non_io_unset_when_populated_with_valid_iova() < 0)
1030 		GOTO_ERR(ret, err);
1031 
1032 	rte_mempool_list_dump(stdout);
1033 
1034 	ret = 0;
1035 
1036 err:
1037 	rte_mempool_free(mp_nocache);
1038 	rte_mempool_free(mp_cache);
1039 	rte_mempool_free(mp_stack_anon);
1040 	rte_mempool_free(mp_stack_mempool_iter);
1041 	rte_mempool_free(mp_stack);
1042 	rte_mempool_free(default_pool);
1043 
1044 	return ret;
1045 }
1046 
1047 REGISTER_TEST_COMMAND(mempool_autotest, test_mempool);
1048