1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <stdarg.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_ring.h>
25 #include <rte_mempool.h>
26 #include <rte_spinlock.h>
27 #include <rte_malloc.h>
28
29 #ifdef RTE_LIB_HASH
30 #include <rte_hash.h>
31 #include <rte_fbk_hash.h>
32 #include <rte_jhash.h>
33 #endif /* RTE_LIB_HASH */
34
35 #ifdef RTE_LIB_LPM
36 #include <rte_lpm.h>
37 #endif /* RTE_LIB_LPM */
38
39 #include <rte_string_fns.h>
40
41 #include "test.h"
42
43 typedef int (*case_func_t)(void* arg);
44 typedef void (*case_clean_t)(unsigned lcore_id);
45
46 #define MAX_STRING_SIZE (256)
47 #define MAX_ITER_MULTI (16)
48 #define MAX_ITER_ONCE (4)
49 #define MAX_LPM_ITER_TIMES (6)
50
51 #define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
52 #define MEMPOOL_SIZE (4)
53
54 #define MAX_LCORES (RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
55
56 static uint32_t obj_count;
57 static uint32_t synchro;
58
59 #define WAIT_SYNCHRO_FOR_WORKERS() do { \
60 if (lcore_self != rte_get_main_lcore()) \
61 rte_wait_until_equal_32(&synchro, 1, __ATOMIC_RELAXED); \
62 } while(0)
63
64 /*
65 * rte_eal_init only init once
66 */
67 static int
test_eal_init_once(__rte_unused void * arg)68 test_eal_init_once(__rte_unused void *arg)
69 {
70 unsigned lcore_self = rte_lcore_id();
71
72 WAIT_SYNCHRO_FOR_WORKERS();
73
74 __atomic_store_n(&obj_count, 1, __ATOMIC_RELAXED); /* silent the check in the caller */
75 if (rte_eal_init(0, NULL) != -1)
76 return -1;
77
78 return 0;
79 }
80
81 /*
82 * ring create/lookup reentrancy test
83 */
84 static void
ring_clean(unsigned int lcore_id)85 ring_clean(unsigned int lcore_id)
86 {
87 struct rte_ring *rp;
88 char ring_name[MAX_STRING_SIZE];
89 int i;
90
91 rp = rte_ring_lookup("fr_test_once");
92 rte_ring_free(rp);
93
94 for (i = 0; i < MAX_ITER_MULTI; i++) {
95 snprintf(ring_name, sizeof(ring_name),
96 "fr_test_%d_%d", lcore_id, i);
97 rp = rte_ring_lookup(ring_name);
98 rte_ring_free(rp);
99 }
100 }
101
102 static int
ring_create_lookup(__rte_unused void * arg)103 ring_create_lookup(__rte_unused void *arg)
104 {
105 unsigned lcore_self = rte_lcore_id();
106 struct rte_ring * rp;
107 char ring_name[MAX_STRING_SIZE];
108 int i;
109
110 WAIT_SYNCHRO_FOR_WORKERS();
111
112 /* create the same ring simultaneously on all threads */
113 for (i = 0; i < MAX_ITER_ONCE; i++) {
114 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
115 if (rp != NULL)
116 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
117 }
118
119 /* create/lookup new ring several times */
120 for (i = 0; i < MAX_ITER_MULTI; i++) {
121 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
122 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
123 if (NULL == rp)
124 return -1;
125 if (rte_ring_lookup(ring_name) != rp)
126 return -1;
127
128 /* verify all ring created successful */
129 if (rte_ring_lookup(ring_name) == NULL)
130 return -1;
131 }
132
133 return 0;
134 }
135
136 static void
my_obj_init(struct rte_mempool * mp,__rte_unused void * arg,void * obj,unsigned i)137 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
138 void *obj, unsigned i)
139 {
140 uint32_t *objnum = obj;
141 memset(obj, 0, mp->elt_size);
142 *objnum = i;
143 }
144
145 static void
mempool_clean(unsigned int lcore_id)146 mempool_clean(unsigned int lcore_id)
147 {
148 struct rte_mempool *mp;
149 char mempool_name[MAX_STRING_SIZE];
150 int i;
151
152 mp = rte_mempool_lookup("fr_test_once");
153 rte_mempool_free(mp);
154
155 for (i = 0; i < MAX_ITER_MULTI; i++) {
156 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
157 lcore_id, i);
158 mp = rte_mempool_lookup(mempool_name);
159 rte_mempool_free(mp);
160 }
161 }
162
163 static int
mempool_create_lookup(__rte_unused void * arg)164 mempool_create_lookup(__rte_unused void *arg)
165 {
166 unsigned lcore_self = rte_lcore_id();
167 struct rte_mempool * mp;
168 char mempool_name[MAX_STRING_SIZE];
169 int i;
170
171 WAIT_SYNCHRO_FOR_WORKERS();
172
173 /* create the same mempool simultaneously on all threads */
174 for (i = 0; i < MAX_ITER_ONCE; i++) {
175 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
176 MEMPOOL_ELT_SIZE, 0, 0,
177 NULL, NULL,
178 my_obj_init, NULL,
179 SOCKET_ID_ANY, 0);
180 if (mp != NULL)
181 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
182 }
183
184 /* create/lookup new ring several times */
185 for (i = 0; i < MAX_ITER_MULTI; i++) {
186 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
187 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
188 MEMPOOL_ELT_SIZE, 0, 0,
189 NULL, NULL,
190 my_obj_init, NULL,
191 SOCKET_ID_ANY, 0);
192 if (NULL == mp)
193 return -1;
194 if (rte_mempool_lookup(mempool_name) != mp)
195 return -1;
196
197 /* verify all ring created successful */
198 if (rte_mempool_lookup(mempool_name) == NULL)
199 return -1;
200 }
201
202 return 0;
203 }
204
205 #ifdef RTE_LIB_HASH
206 static void
hash_clean(unsigned lcore_id)207 hash_clean(unsigned lcore_id)
208 {
209 char hash_name[MAX_STRING_SIZE];
210 struct rte_hash *handle;
211 int i;
212
213 handle = rte_hash_find_existing("fr_test_once");
214 rte_hash_free(handle);
215
216 for (i = 0; i < MAX_ITER_MULTI; i++) {
217 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
218
219 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
220 rte_hash_free(handle);
221 }
222 }
223
224 static int
hash_create_free(__rte_unused void * arg)225 hash_create_free(__rte_unused void *arg)
226 {
227 unsigned lcore_self = rte_lcore_id();
228 struct rte_hash *handle;
229 char hash_name[MAX_STRING_SIZE];
230 int i;
231 struct rte_hash_parameters hash_params = {
232 .name = NULL,
233 .entries = 16,
234 .key_len = 4,
235 .hash_func = (rte_hash_function)rte_jhash_32b,
236 .hash_func_init_val = 0,
237 .socket_id = 0,
238 };
239
240 WAIT_SYNCHRO_FOR_WORKERS();
241
242 /* create the same hash simultaneously on all threads */
243 hash_params.name = "fr_test_once";
244 for (i = 0; i < MAX_ITER_ONCE; i++) {
245 handle = rte_hash_create(&hash_params);
246 if (handle != NULL)
247 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
248 }
249
250 /* create multiple times simultaneously */
251 for (i = 0; i < MAX_ITER_MULTI; i++) {
252 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
253 hash_params.name = hash_name;
254
255 handle = rte_hash_create(&hash_params);
256 if (NULL == handle)
257 return -1;
258
259 /* verify correct existing and then free all */
260 if (handle != rte_hash_find_existing(hash_name))
261 return -1;
262
263 rte_hash_free(handle);
264
265 /* verify free correct */
266 if (NULL != rte_hash_find_existing(hash_name))
267 return -1;
268 }
269
270 return 0;
271 }
272
273 static void
fbk_clean(unsigned lcore_id)274 fbk_clean(unsigned lcore_id)
275 {
276 char fbk_name[MAX_STRING_SIZE];
277 struct rte_fbk_hash_table *handle;
278 int i;
279
280 handle = rte_fbk_hash_find_existing("fr_test_once");
281 if (handle != NULL)
282 rte_fbk_hash_free(handle);
283
284 for (i = 0; i < MAX_ITER_MULTI; i++) {
285 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
286
287 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
288 rte_fbk_hash_free(handle);
289 }
290 }
291
292 static int
fbk_create_free(__rte_unused void * arg)293 fbk_create_free(__rte_unused void *arg)
294 {
295 unsigned lcore_self = rte_lcore_id();
296 struct rte_fbk_hash_table *handle;
297 char fbk_name[MAX_STRING_SIZE];
298 int i;
299 struct rte_fbk_hash_params fbk_params = {
300 .name = NULL,
301 .entries = 4,
302 .entries_per_bucket = 4,
303 .socket_id = 0,
304 .hash_func = rte_jhash_1word,
305 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
306 };
307
308 WAIT_SYNCHRO_FOR_WORKERS();
309
310 /* create the same fbk hash table simultaneously on all threads */
311 fbk_params.name = "fr_test_once";
312 for (i = 0; i < MAX_ITER_ONCE; i++) {
313 handle = rte_fbk_hash_create(&fbk_params);
314 if (handle != NULL)
315 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
316 }
317
318 /* create multiple fbk tables simultaneously */
319 for (i = 0; i < MAX_ITER_MULTI; i++) {
320 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
321 fbk_params.name = fbk_name;
322
323 handle = rte_fbk_hash_create(&fbk_params);
324 if (NULL == handle)
325 return -1;
326
327 /* verify correct existing and then free all */
328 if (handle != rte_fbk_hash_find_existing(fbk_name))
329 return -1;
330
331 rte_fbk_hash_free(handle);
332
333 /* verify free correct */
334 if (NULL != rte_fbk_hash_find_existing(fbk_name))
335 return -1;
336 }
337
338 return 0;
339 }
340 #endif /* RTE_LIB_HASH */
341
342 #ifdef RTE_LIB_LPM
343 static void
lpm_clean(unsigned int lcore_id)344 lpm_clean(unsigned int lcore_id)
345 {
346 char lpm_name[MAX_STRING_SIZE];
347 struct rte_lpm *lpm;
348 int i;
349
350 lpm = rte_lpm_find_existing("fr_test_once");
351 if (lpm != NULL)
352 rte_lpm_free(lpm);
353
354 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
355 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
356
357 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
358 rte_lpm_free(lpm);
359 }
360 }
361
362 static int
lpm_create_free(__rte_unused void * arg)363 lpm_create_free(__rte_unused void *arg)
364 {
365 unsigned lcore_self = rte_lcore_id();
366 struct rte_lpm *lpm;
367 struct rte_lpm_config config;
368
369 config.max_rules = 4;
370 config.number_tbl8s = 256;
371 config.flags = 0;
372 char lpm_name[MAX_STRING_SIZE];
373 int i;
374
375 WAIT_SYNCHRO_FOR_WORKERS();
376
377 /* create the same lpm simultaneously on all threads */
378 for (i = 0; i < MAX_ITER_ONCE; i++) {
379 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
380 if (lpm != NULL)
381 __atomic_fetch_add(&obj_count, 1, __ATOMIC_RELAXED);
382 }
383
384 /* create multiple fbk tables simultaneously */
385 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
386 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
387 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
388 if (NULL == lpm)
389 return -1;
390
391 /* verify correct existing and then free all */
392 if (lpm != rte_lpm_find_existing(lpm_name))
393 return -1;
394
395 rte_lpm_free(lpm);
396
397 /* verify free correct */
398 if (NULL != rte_lpm_find_existing(lpm_name))
399 return -1;
400 }
401
402 return 0;
403 }
404 #endif /* RTE_LIB_LPM */
405
406 struct test_case{
407 case_func_t func;
408 void* arg;
409 case_clean_t clean;
410 char name[MAX_STRING_SIZE];
411 };
412
413 /* All test cases in the test suite */
414 struct test_case test_cases[] = {
415 { test_eal_init_once, NULL, NULL, "eal init once" },
416 { ring_create_lookup, NULL, ring_clean, "ring create/lookup" },
417 { mempool_create_lookup, NULL, mempool_clean,
418 "mempool create/lookup" },
419 #ifdef RTE_LIB_HASH
420 { hash_create_free, NULL, hash_clean, "hash create/free" },
421 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
422 #endif /* RTE_LIB_HASH */
423 #ifdef RTE_LIB_LPM
424 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
425 #endif /* RTE_LIB_LPM */
426 };
427
428 /**
429 * launch test case in two separate thread
430 */
431 static int
launch_test(struct test_case * pt_case)432 launch_test(struct test_case *pt_case)
433 {
434 unsigned int lcore_id;
435 unsigned int cores;
436 unsigned int count;
437 int ret = 0;
438
439 if (pt_case->func == NULL)
440 return -1;
441
442 __atomic_store_n(&obj_count, 0, __ATOMIC_RELAXED);
443 __atomic_store_n(&synchro, 0, __ATOMIC_RELAXED);
444
445 cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
446 RTE_LCORE_FOREACH_WORKER(lcore_id) {
447 if (cores == 1)
448 break;
449 cores--;
450 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
451 }
452
453 __atomic_store_n(&synchro, 1, __ATOMIC_RELAXED);
454
455 if (pt_case->func(pt_case->arg) < 0)
456 ret = -1;
457
458 RTE_LCORE_FOREACH_WORKER(lcore_id) {
459 if (rte_eal_wait_lcore(lcore_id) < 0)
460 ret = -1;
461 }
462
463 RTE_LCORE_FOREACH(lcore_id) {
464 if (pt_case->clean != NULL)
465 pt_case->clean(lcore_id);
466 }
467
468 count = __atomic_load_n(&obj_count, __ATOMIC_RELAXED);
469 if (count != 1) {
470 printf("%s: common object allocated %d times (should be 1)\n",
471 pt_case->name, count);
472 ret = -1;
473 }
474
475 return ret;
476 }
477
478 /**
479 * Main entry of func_reentrancy test
480 */
481 static int
test_func_reentrancy(void)482 test_func_reentrancy(void)
483 {
484 uint32_t case_id;
485 struct test_case *pt_case = NULL;
486
487 if (RTE_EXEC_ENV_IS_WINDOWS)
488 return TEST_SKIPPED;
489
490 if (rte_lcore_count() < 2) {
491 printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
492 return TEST_SKIPPED;
493 }
494 else if (rte_lcore_count() > MAX_LCORES)
495 printf("Too many lcores, some cores will be disabled\n");
496
497 for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) {
498 pt_case = &test_cases[case_id];
499 if (pt_case->func == NULL)
500 continue;
501
502 if (launch_test(pt_case) < 0) {
503 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
504 return -1;
505 }
506 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
507 }
508
509 return 0;
510 }
511
512 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);
513