xref: /f-stack/dpdk/app/test/test_spinlock.c (revision 2d9fd380)
14418919fSjohnjiang /* SPDX-License-Identifier: BSD-3-Clause
24418919fSjohnjiang  * Copyright(c) 2010-2014 Intel Corporation
34418919fSjohnjiang  */
44418919fSjohnjiang 
54418919fSjohnjiang #include <stdio.h>
64418919fSjohnjiang #include <stdint.h>
74418919fSjohnjiang #include <inttypes.h>
84418919fSjohnjiang #include <string.h>
94418919fSjohnjiang #include <unistd.h>
104418919fSjohnjiang #include <sys/queue.h>
114418919fSjohnjiang 
124418919fSjohnjiang #include <rte_common.h>
134418919fSjohnjiang #include <rte_memory.h>
144418919fSjohnjiang #include <rte_per_lcore.h>
154418919fSjohnjiang #include <rte_launch.h>
164418919fSjohnjiang #include <rte_eal.h>
174418919fSjohnjiang #include <rte_lcore.h>
184418919fSjohnjiang #include <rte_cycles.h>
194418919fSjohnjiang #include <rte_spinlock.h>
204418919fSjohnjiang #include <rte_atomic.h>
214418919fSjohnjiang 
224418919fSjohnjiang #include "test.h"
234418919fSjohnjiang 
244418919fSjohnjiang /*
254418919fSjohnjiang  * Spinlock test
264418919fSjohnjiang  * =============
274418919fSjohnjiang  *
284418919fSjohnjiang  * - There is a global spinlock and a table of spinlocks (one per lcore).
294418919fSjohnjiang  *
304418919fSjohnjiang  * - The test function takes all of these locks and launches the
31*2d9fd380Sjfb8856606  *   ``test_spinlock_per_core()`` function on each core (except the main).
324418919fSjohnjiang  *
334418919fSjohnjiang  *   - The function takes the global lock, display something, then releases
344418919fSjohnjiang  *     the global lock.
354418919fSjohnjiang  *   - The function takes the per-lcore lock, display something, then releases
364418919fSjohnjiang  *     the per-core lock.
374418919fSjohnjiang  *
384418919fSjohnjiang  * - The main function unlocks the per-lcore locks sequentially and
394418919fSjohnjiang  *   waits between each lock. This triggers the display of a message
404418919fSjohnjiang  *   for each core, in the correct order. The autotest script checks that
414418919fSjohnjiang  *   this order is correct.
424418919fSjohnjiang  *
434418919fSjohnjiang  * - A load test is carried out, with all cores attempting to lock a single lock
444418919fSjohnjiang  *   multiple times
454418919fSjohnjiang  */
464418919fSjohnjiang 
474418919fSjohnjiang static rte_spinlock_t sl, sl_try;
484418919fSjohnjiang static rte_spinlock_t sl_tab[RTE_MAX_LCORE];
494418919fSjohnjiang static rte_spinlock_recursive_t slr;
504418919fSjohnjiang static unsigned count = 0;
514418919fSjohnjiang 
524418919fSjohnjiang static rte_atomic32_t synchro;
534418919fSjohnjiang 
544418919fSjohnjiang static int
test_spinlock_per_core(__rte_unused void * arg)55*2d9fd380Sjfb8856606 test_spinlock_per_core(__rte_unused void *arg)
564418919fSjohnjiang {
574418919fSjohnjiang 	rte_spinlock_lock(&sl);
584418919fSjohnjiang 	printf("Global lock taken on core %u\n", rte_lcore_id());
594418919fSjohnjiang 	rte_spinlock_unlock(&sl);
604418919fSjohnjiang 
614418919fSjohnjiang 	rte_spinlock_lock(&sl_tab[rte_lcore_id()]);
624418919fSjohnjiang 	printf("Hello from core %u !\n", rte_lcore_id());
634418919fSjohnjiang 	rte_spinlock_unlock(&sl_tab[rte_lcore_id()]);
644418919fSjohnjiang 
654418919fSjohnjiang 	return 0;
664418919fSjohnjiang }
674418919fSjohnjiang 
684418919fSjohnjiang static int
test_spinlock_recursive_per_core(__rte_unused void * arg)69*2d9fd380Sjfb8856606 test_spinlock_recursive_per_core(__rte_unused void *arg)
704418919fSjohnjiang {
714418919fSjohnjiang 	unsigned id = rte_lcore_id();
724418919fSjohnjiang 
734418919fSjohnjiang 	rte_spinlock_recursive_lock(&slr);
744418919fSjohnjiang 	printf("Global recursive lock taken on core %u - count = %d\n",
754418919fSjohnjiang 	       id, slr.count);
764418919fSjohnjiang 	rte_spinlock_recursive_lock(&slr);
774418919fSjohnjiang 	printf("Global recursive lock taken on core %u - count = %d\n",
784418919fSjohnjiang 	       id, slr.count);
794418919fSjohnjiang 	rte_spinlock_recursive_lock(&slr);
804418919fSjohnjiang 	printf("Global recursive lock taken on core %u - count = %d\n",
814418919fSjohnjiang 	       id, slr.count);
824418919fSjohnjiang 
834418919fSjohnjiang 	printf("Hello from within recursive locks from core %u !\n", id);
844418919fSjohnjiang 
854418919fSjohnjiang 	rte_spinlock_recursive_unlock(&slr);
864418919fSjohnjiang 	printf("Global recursive lock released on core %u - count = %d\n",
874418919fSjohnjiang 	       id, slr.count);
884418919fSjohnjiang 	rte_spinlock_recursive_unlock(&slr);
894418919fSjohnjiang 	printf("Global recursive lock released on core %u - count = %d\n",
904418919fSjohnjiang 	       id, slr.count);
914418919fSjohnjiang 	rte_spinlock_recursive_unlock(&slr);
924418919fSjohnjiang 	printf("Global recursive lock released on core %u - count = %d\n",
934418919fSjohnjiang 	       id, slr.count);
944418919fSjohnjiang 
954418919fSjohnjiang 	return 0;
964418919fSjohnjiang }
974418919fSjohnjiang 
984418919fSjohnjiang static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
994418919fSjohnjiang static uint64_t time_count[RTE_MAX_LCORE] = {0};
1004418919fSjohnjiang 
1014418919fSjohnjiang #define MAX_LOOP 10000
1024418919fSjohnjiang 
1034418919fSjohnjiang static int
load_loop_fn(void * func_param)1044418919fSjohnjiang load_loop_fn(void *func_param)
1054418919fSjohnjiang {
1064418919fSjohnjiang 	uint64_t time_diff = 0, begin;
1074418919fSjohnjiang 	uint64_t hz = rte_get_timer_hz();
1084418919fSjohnjiang 	volatile uint64_t lcount = 0;
1094418919fSjohnjiang 	const int use_lock = *(int*)func_param;
1104418919fSjohnjiang 	const unsigned lcore = rte_lcore_id();
1114418919fSjohnjiang 
112*2d9fd380Sjfb8856606 	/* wait synchro for workers */
113*2d9fd380Sjfb8856606 	if (lcore != rte_get_main_lcore())
1144418919fSjohnjiang 		while (rte_atomic32_read(&synchro) == 0);
1154418919fSjohnjiang 
1164418919fSjohnjiang 	begin = rte_get_timer_cycles();
1174418919fSjohnjiang 	while (lcount < MAX_LOOP) {
1184418919fSjohnjiang 		if (use_lock)
1194418919fSjohnjiang 			rte_spinlock_lock(&lk);
1204418919fSjohnjiang 		lcount++;
1214418919fSjohnjiang 		if (use_lock)
1224418919fSjohnjiang 			rte_spinlock_unlock(&lk);
1234418919fSjohnjiang 	}
1244418919fSjohnjiang 	time_diff = rte_get_timer_cycles() - begin;
1254418919fSjohnjiang 	time_count[lcore] = time_diff * 1000000 / hz;
1264418919fSjohnjiang 	return 0;
1274418919fSjohnjiang }
1284418919fSjohnjiang 
1294418919fSjohnjiang static int
test_spinlock_perf(void)1304418919fSjohnjiang test_spinlock_perf(void)
1314418919fSjohnjiang {
1324418919fSjohnjiang 	unsigned int i;
1334418919fSjohnjiang 	uint64_t total = 0;
1344418919fSjohnjiang 	int lock = 0;
1354418919fSjohnjiang 	const unsigned lcore = rte_lcore_id();
1364418919fSjohnjiang 
1374418919fSjohnjiang 	printf("\nTest with no lock on single core...\n");
1384418919fSjohnjiang 	load_loop_fn(&lock);
1394418919fSjohnjiang 	printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
1404418919fSjohnjiang 						time_count[lcore]);
1414418919fSjohnjiang 	memset(time_count, 0, sizeof(time_count));
1424418919fSjohnjiang 
1434418919fSjohnjiang 	printf("\nTest with lock on single core...\n");
1444418919fSjohnjiang 	lock = 1;
1454418919fSjohnjiang 	load_loop_fn(&lock);
1464418919fSjohnjiang 	printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
1474418919fSjohnjiang 						time_count[lcore]);
1484418919fSjohnjiang 	memset(time_count, 0, sizeof(time_count));
1494418919fSjohnjiang 
1504418919fSjohnjiang 	printf("\nTest with lock on %u cores...\n", rte_lcore_count());
1514418919fSjohnjiang 
152*2d9fd380Sjfb8856606 	/* Clear synchro and start workers */
1534418919fSjohnjiang 	rte_atomic32_set(&synchro, 0);
154*2d9fd380Sjfb8856606 	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
1554418919fSjohnjiang 
156*2d9fd380Sjfb8856606 	/* start synchro and launch test on main */
1574418919fSjohnjiang 	rte_atomic32_set(&synchro, 1);
1584418919fSjohnjiang 	load_loop_fn(&lock);
1594418919fSjohnjiang 
1604418919fSjohnjiang 	rte_eal_mp_wait_lcore();
1614418919fSjohnjiang 
1624418919fSjohnjiang 	RTE_LCORE_FOREACH(i) {
1634418919fSjohnjiang 		printf("Core [%u] Cost Time = %"PRIu64" us\n", i,
1644418919fSjohnjiang 						time_count[i]);
1654418919fSjohnjiang 		total += time_count[i];
1664418919fSjohnjiang 	}
1674418919fSjohnjiang 
1684418919fSjohnjiang 	printf("Total Cost Time = %"PRIu64" us\n", total);
1694418919fSjohnjiang 
1704418919fSjohnjiang 	return 0;
1714418919fSjohnjiang }
1724418919fSjohnjiang 
1734418919fSjohnjiang /*
1744418919fSjohnjiang  * Use rte_spinlock_trylock() to trylock a spinlock object,
1754418919fSjohnjiang  * If it could not lock the object successfully, it would
1764418919fSjohnjiang  * return immediately and the variable of "count" would be
1774418919fSjohnjiang  * increased by one per times. the value of "count" could be
1784418919fSjohnjiang  * checked as the result later.
1794418919fSjohnjiang  */
1804418919fSjohnjiang static int
test_spinlock_try(__rte_unused void * arg)181*2d9fd380Sjfb8856606 test_spinlock_try(__rte_unused void *arg)
1824418919fSjohnjiang {
1834418919fSjohnjiang 	if (rte_spinlock_trylock(&sl_try) == 0) {
1844418919fSjohnjiang 		rte_spinlock_lock(&sl);
1854418919fSjohnjiang 		count ++;
1864418919fSjohnjiang 		rte_spinlock_unlock(&sl);
1874418919fSjohnjiang 	}
1884418919fSjohnjiang 
1894418919fSjohnjiang 	return 0;
1904418919fSjohnjiang }
1914418919fSjohnjiang 
1924418919fSjohnjiang 
1934418919fSjohnjiang /*
1944418919fSjohnjiang  * Test rte_eal_get_lcore_state() in addition to spinlocks
1954418919fSjohnjiang  * as we have "waiting" then "running" lcores.
1964418919fSjohnjiang  */
1974418919fSjohnjiang static int
test_spinlock(void)1984418919fSjohnjiang test_spinlock(void)
1994418919fSjohnjiang {
2004418919fSjohnjiang 	int ret = 0;
2014418919fSjohnjiang 	int i;
2024418919fSjohnjiang 
203*2d9fd380Sjfb8856606 	/* worker cores should be waiting: print it */
204*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
2054418919fSjohnjiang 		printf("lcore %d state: %d\n", i,
2064418919fSjohnjiang 		       (int) rte_eal_get_lcore_state(i));
2074418919fSjohnjiang 	}
2084418919fSjohnjiang 
2094418919fSjohnjiang 	rte_spinlock_init(&sl);
2104418919fSjohnjiang 	rte_spinlock_init(&sl_try);
2114418919fSjohnjiang 	rte_spinlock_recursive_init(&slr);
2124418919fSjohnjiang 	for (i=0; i<RTE_MAX_LCORE; i++)
2134418919fSjohnjiang 		rte_spinlock_init(&sl_tab[i]);
2144418919fSjohnjiang 
2154418919fSjohnjiang 	rte_spinlock_lock(&sl);
2164418919fSjohnjiang 
217*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
2184418919fSjohnjiang 		rte_spinlock_lock(&sl_tab[i]);
2194418919fSjohnjiang 		rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
2204418919fSjohnjiang 	}
2214418919fSjohnjiang 
222*2d9fd380Sjfb8856606 	/* worker cores should be busy: print it */
223*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
2244418919fSjohnjiang 		printf("lcore %d state: %d\n", i,
2254418919fSjohnjiang 		       (int) rte_eal_get_lcore_state(i));
2264418919fSjohnjiang 	}
2274418919fSjohnjiang 	rte_spinlock_unlock(&sl);
2284418919fSjohnjiang 
229*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
2304418919fSjohnjiang 		rte_spinlock_unlock(&sl_tab[i]);
2314418919fSjohnjiang 		rte_delay_ms(10);
2324418919fSjohnjiang 	}
2334418919fSjohnjiang 
2344418919fSjohnjiang 	rte_eal_mp_wait_lcore();
2354418919fSjohnjiang 
2364418919fSjohnjiang 	rte_spinlock_recursive_lock(&slr);
2374418919fSjohnjiang 
2384418919fSjohnjiang 	/*
2394418919fSjohnjiang 	 * Try to acquire a lock that we already own
2404418919fSjohnjiang 	 */
2414418919fSjohnjiang 	if(!rte_spinlock_recursive_trylock(&slr)) {
2424418919fSjohnjiang 		printf("rte_spinlock_recursive_trylock failed on a lock that "
2434418919fSjohnjiang 		       "we already own\n");
2444418919fSjohnjiang 		ret = -1;
2454418919fSjohnjiang 	} else
2464418919fSjohnjiang 		rte_spinlock_recursive_unlock(&slr);
2474418919fSjohnjiang 
248*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
2494418919fSjohnjiang 		rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
2504418919fSjohnjiang 	}
2514418919fSjohnjiang 	rte_spinlock_recursive_unlock(&slr);
2524418919fSjohnjiang 	rte_eal_mp_wait_lcore();
2534418919fSjohnjiang 
2544418919fSjohnjiang 	/*
2554418919fSjohnjiang 	 * Test if it could return immediately from try-locking a locked object.
256*2d9fd380Sjfb8856606 	 * Here it will lock the spinlock object first, then launch all the worker
2574418919fSjohnjiang 	 * lcores to trylock the same spinlock object.
258*2d9fd380Sjfb8856606 	 * All the worker lcores should give up try-locking a locked object and
2594418919fSjohnjiang 	 * return immediately, and then increase the "count" initialized with zero
2604418919fSjohnjiang 	 * by one per times.
261*2d9fd380Sjfb8856606 	 * We can check if the "count" is finally equal to the number of all worker
2624418919fSjohnjiang 	 * lcores to see if the behavior of try-locking a locked spinlock object
2634418919fSjohnjiang 	 * is correct.
2644418919fSjohnjiang 	 */
2654418919fSjohnjiang 	if (rte_spinlock_trylock(&sl_try) == 0) {
2664418919fSjohnjiang 		return -1;
2674418919fSjohnjiang 	}
2684418919fSjohnjiang 	count = 0;
269*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
2704418919fSjohnjiang 		rte_eal_remote_launch(test_spinlock_try, NULL, i);
2714418919fSjohnjiang 	}
2724418919fSjohnjiang 	rte_eal_mp_wait_lcore();
2734418919fSjohnjiang 	rte_spinlock_unlock(&sl_try);
2744418919fSjohnjiang 	if (rte_spinlock_is_locked(&sl)) {
2754418919fSjohnjiang 		printf("spinlock is locked but it should not be\n");
2764418919fSjohnjiang 		return -1;
2774418919fSjohnjiang 	}
2784418919fSjohnjiang 	rte_spinlock_lock(&sl);
2794418919fSjohnjiang 	if (count != ( rte_lcore_count() - 1)) {
2804418919fSjohnjiang 		ret = -1;
2814418919fSjohnjiang 	}
2824418919fSjohnjiang 	rte_spinlock_unlock(&sl);
2834418919fSjohnjiang 
2844418919fSjohnjiang 	/*
2854418919fSjohnjiang 	 * Test if it can trylock recursively.
2864418919fSjohnjiang 	 * Use rte_spinlock_recursive_trylock() to check if it can lock a spinlock
2874418919fSjohnjiang 	 * object recursively. Here it will try to lock a spinlock object twice.
2884418919fSjohnjiang 	 */
2894418919fSjohnjiang 	if (rte_spinlock_recursive_trylock(&slr) == 0) {
2904418919fSjohnjiang 		printf("It failed to do the first spinlock_recursive_trylock but it should able to do\n");
2914418919fSjohnjiang 		return -1;
2924418919fSjohnjiang 	}
2934418919fSjohnjiang 	if (rte_spinlock_recursive_trylock(&slr) == 0) {
2944418919fSjohnjiang 		printf("It failed to do the second spinlock_recursive_trylock but it should able to do\n");
2954418919fSjohnjiang 		return -1;
2964418919fSjohnjiang 	}
2974418919fSjohnjiang 	rte_spinlock_recursive_unlock(&slr);
2984418919fSjohnjiang 	rte_spinlock_recursive_unlock(&slr);
2994418919fSjohnjiang 
3004418919fSjohnjiang 	if (test_spinlock_perf() < 0)
3014418919fSjohnjiang 		return -1;
3024418919fSjohnjiang 
3034418919fSjohnjiang 	return ret;
3044418919fSjohnjiang }
3054418919fSjohnjiang 
3064418919fSjohnjiang REGISTER_TEST_COMMAND(spinlock_autotest, test_spinlock);
307