xref: /f-stack/dpdk/app/test/test_mcslock.c (revision 2d9fd380)
14418919fSjohnjiang /* SPDX-License-Identifier: BSD-3-Clause
24418919fSjohnjiang  * Copyright(c) 2019 Arm Limited
34418919fSjohnjiang  */
44418919fSjohnjiang 
54418919fSjohnjiang #include <stdio.h>
64418919fSjohnjiang #include <stdint.h>
74418919fSjohnjiang #include <inttypes.h>
84418919fSjohnjiang #include <string.h>
94418919fSjohnjiang #include <unistd.h>
104418919fSjohnjiang #include <sys/queue.h>
114418919fSjohnjiang 
124418919fSjohnjiang #include <rte_common.h>
134418919fSjohnjiang #include <rte_memory.h>
144418919fSjohnjiang #include <rte_per_lcore.h>
154418919fSjohnjiang #include <rte_launch.h>
164418919fSjohnjiang #include <rte_eal.h>
174418919fSjohnjiang #include <rte_lcore.h>
184418919fSjohnjiang #include <rte_cycles.h>
194418919fSjohnjiang #include <rte_mcslock.h>
204418919fSjohnjiang #include <rte_atomic.h>
214418919fSjohnjiang 
224418919fSjohnjiang #include "test.h"
234418919fSjohnjiang 
244418919fSjohnjiang /*
254418919fSjohnjiang  * RTE MCS lock test
264418919fSjohnjiang  * =================
274418919fSjohnjiang  *
284418919fSjohnjiang  * These tests are derived from spin lock test cases.
294418919fSjohnjiang  *
304418919fSjohnjiang  * - The functional test takes all of these locks and launches the
31*2d9fd380Sjfb8856606  *   ''test_mcslock_per_core()'' function on each core (except the main).
324418919fSjohnjiang  *
334418919fSjohnjiang  *   - The function takes the global lock, display something, then releases
344418919fSjohnjiang  *     the global lock on each core.
354418919fSjohnjiang  *
364418919fSjohnjiang  * - A load test is carried out, with all cores attempting to lock a single
374418919fSjohnjiang  *   lock multiple times.
384418919fSjohnjiang  */
394418919fSjohnjiang 
404418919fSjohnjiang RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_me);
414418919fSjohnjiang RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_try_me);
424418919fSjohnjiang RTE_DEFINE_PER_LCORE(rte_mcslock_t, _ml_perf_me);
434418919fSjohnjiang 
444418919fSjohnjiang rte_mcslock_t *p_ml;
454418919fSjohnjiang rte_mcslock_t *p_ml_try;
464418919fSjohnjiang rte_mcslock_t *p_ml_perf;
474418919fSjohnjiang 
484418919fSjohnjiang static unsigned int count;
494418919fSjohnjiang 
504418919fSjohnjiang static rte_atomic32_t synchro;
514418919fSjohnjiang 
524418919fSjohnjiang static int
test_mcslock_per_core(__rte_unused void * arg)53*2d9fd380Sjfb8856606 test_mcslock_per_core(__rte_unused void *arg)
544418919fSjohnjiang {
554418919fSjohnjiang 	/* Per core me node. */
564418919fSjohnjiang 	rte_mcslock_t ml_me = RTE_PER_LCORE(_ml_me);
574418919fSjohnjiang 
584418919fSjohnjiang 	rte_mcslock_lock(&p_ml, &ml_me);
594418919fSjohnjiang 	printf("MCS lock taken on core %u\n", rte_lcore_id());
604418919fSjohnjiang 	rte_mcslock_unlock(&p_ml, &ml_me);
614418919fSjohnjiang 	printf("MCS lock released on core %u\n", rte_lcore_id());
624418919fSjohnjiang 
634418919fSjohnjiang 	return 0;
644418919fSjohnjiang }
654418919fSjohnjiang 
664418919fSjohnjiang static uint64_t time_count[RTE_MAX_LCORE] = {0};
674418919fSjohnjiang 
684418919fSjohnjiang #define MAX_LOOP 1000000
694418919fSjohnjiang 
704418919fSjohnjiang static int
load_loop_fn(void * func_param)714418919fSjohnjiang load_loop_fn(void *func_param)
724418919fSjohnjiang {
734418919fSjohnjiang 	uint64_t time_diff = 0, begin;
744418919fSjohnjiang 	uint64_t hz = rte_get_timer_hz();
754418919fSjohnjiang 	volatile uint64_t lcount = 0;
764418919fSjohnjiang 	const int use_lock = *(int *)func_param;
774418919fSjohnjiang 	const unsigned int lcore = rte_lcore_id();
784418919fSjohnjiang 
794418919fSjohnjiang 	/**< Per core me node. */
804418919fSjohnjiang 	rte_mcslock_t ml_perf_me = RTE_PER_LCORE(_ml_perf_me);
814418919fSjohnjiang 
824418919fSjohnjiang 	/* wait synchro */
834418919fSjohnjiang 	while (rte_atomic32_read(&synchro) == 0)
844418919fSjohnjiang 		;
854418919fSjohnjiang 
864418919fSjohnjiang 	begin = rte_get_timer_cycles();
874418919fSjohnjiang 	while (lcount < MAX_LOOP) {
884418919fSjohnjiang 		if (use_lock)
894418919fSjohnjiang 			rte_mcslock_lock(&p_ml_perf, &ml_perf_me);
904418919fSjohnjiang 
914418919fSjohnjiang 		lcount++;
924418919fSjohnjiang 		if (use_lock)
934418919fSjohnjiang 			rte_mcslock_unlock(&p_ml_perf, &ml_perf_me);
944418919fSjohnjiang 	}
954418919fSjohnjiang 	time_diff = rte_get_timer_cycles() - begin;
964418919fSjohnjiang 	time_count[lcore] = time_diff * 1000000 / hz;
974418919fSjohnjiang 	return 0;
984418919fSjohnjiang }
994418919fSjohnjiang 
1004418919fSjohnjiang static int
test_mcslock_perf(void)1014418919fSjohnjiang test_mcslock_perf(void)
1024418919fSjohnjiang {
1034418919fSjohnjiang 	unsigned int i;
1044418919fSjohnjiang 	uint64_t total = 0;
1054418919fSjohnjiang 	int lock = 0;
1064418919fSjohnjiang 	const unsigned int lcore = rte_lcore_id();
1074418919fSjohnjiang 
1084418919fSjohnjiang 	printf("\nTest with no lock on single core...\n");
1094418919fSjohnjiang 	rte_atomic32_set(&synchro, 1);
1104418919fSjohnjiang 	load_loop_fn(&lock);
1114418919fSjohnjiang 	printf("Core [%u] Cost Time = %"PRIu64" us\n",
1124418919fSjohnjiang 			lcore, time_count[lcore]);
1134418919fSjohnjiang 	memset(time_count, 0, sizeof(time_count));
1144418919fSjohnjiang 
1154418919fSjohnjiang 	printf("\nTest with lock on single core...\n");
1164418919fSjohnjiang 	lock = 1;
1174418919fSjohnjiang 	rte_atomic32_set(&synchro, 1);
1184418919fSjohnjiang 	load_loop_fn(&lock);
1194418919fSjohnjiang 	printf("Core [%u] Cost Time = %"PRIu64" us\n",
1204418919fSjohnjiang 			lcore, time_count[lcore]);
1214418919fSjohnjiang 	memset(time_count, 0, sizeof(time_count));
1224418919fSjohnjiang 
1234418919fSjohnjiang 	printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
1244418919fSjohnjiang 
1254418919fSjohnjiang 	rte_atomic32_set(&synchro, 0);
126*2d9fd380Sjfb8856606 	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
1274418919fSjohnjiang 
128*2d9fd380Sjfb8856606 	/* start synchro and launch test on main */
1294418919fSjohnjiang 	rte_atomic32_set(&synchro, 1);
1304418919fSjohnjiang 	load_loop_fn(&lock);
1314418919fSjohnjiang 
1324418919fSjohnjiang 	rte_eal_mp_wait_lcore();
1334418919fSjohnjiang 
1344418919fSjohnjiang 	RTE_LCORE_FOREACH(i) {
1354418919fSjohnjiang 		printf("Core [%u] Cost Time = %"PRIu64" us\n",
1364418919fSjohnjiang 				i, time_count[i]);
1374418919fSjohnjiang 		total += time_count[i];
1384418919fSjohnjiang 	}
1394418919fSjohnjiang 
1404418919fSjohnjiang 	printf("Total Cost Time = %"PRIu64" us\n", total);
1414418919fSjohnjiang 
1424418919fSjohnjiang 	return 0;
1434418919fSjohnjiang }
1444418919fSjohnjiang 
1454418919fSjohnjiang /*
1464418919fSjohnjiang  * Use rte_mcslock_trylock() to trylock a mcs lock object,
1474418919fSjohnjiang  * If it could not lock the object successfully, it would
1484418919fSjohnjiang  * return immediately.
1494418919fSjohnjiang  */
1504418919fSjohnjiang static int
test_mcslock_try(__rte_unused void * arg)151*2d9fd380Sjfb8856606 test_mcslock_try(__rte_unused void *arg)
1524418919fSjohnjiang {
1534418919fSjohnjiang 	/**< Per core me node. */
1544418919fSjohnjiang 	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
1554418919fSjohnjiang 	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
1564418919fSjohnjiang 
157*2d9fd380Sjfb8856606 	/* Locked ml_try in the main lcore, so it should fail
158*2d9fd380Sjfb8856606 	 * when trying to lock it in the worker lcore.
1594418919fSjohnjiang 	 */
1604418919fSjohnjiang 	if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0) {
1614418919fSjohnjiang 		rte_mcslock_lock(&p_ml, &ml_me);
1624418919fSjohnjiang 		count++;
1634418919fSjohnjiang 		rte_mcslock_unlock(&p_ml, &ml_me);
1644418919fSjohnjiang 	}
1654418919fSjohnjiang 
1664418919fSjohnjiang 	return 0;
1674418919fSjohnjiang }
1684418919fSjohnjiang 
1694418919fSjohnjiang 
1704418919fSjohnjiang /*
1714418919fSjohnjiang  * Test rte_eal_get_lcore_state() in addition to mcs locks
1724418919fSjohnjiang  * as we have "waiting" then "running" lcores.
1734418919fSjohnjiang  */
1744418919fSjohnjiang static int
test_mcslock(void)1754418919fSjohnjiang test_mcslock(void)
1764418919fSjohnjiang {
1774418919fSjohnjiang 	int ret = 0;
1784418919fSjohnjiang 	int i;
1794418919fSjohnjiang 
1804418919fSjohnjiang 	/* Define per core me node. */
1814418919fSjohnjiang 	rte_mcslock_t ml_me     = RTE_PER_LCORE(_ml_me);
1824418919fSjohnjiang 	rte_mcslock_t ml_try_me = RTE_PER_LCORE(_ml_try_me);
1834418919fSjohnjiang 
1844418919fSjohnjiang 	/*
1854418919fSjohnjiang 	 * Test mcs lock & unlock on each core
1864418919fSjohnjiang 	 */
1874418919fSjohnjiang 
188*2d9fd380Sjfb8856606 	/* worker cores should be waiting: print it */
189*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
1904418919fSjohnjiang 		printf("lcore %d state: %d\n", i,
1914418919fSjohnjiang 				(int) rte_eal_get_lcore_state(i));
1924418919fSjohnjiang 	}
1934418919fSjohnjiang 
1944418919fSjohnjiang 	rte_mcslock_lock(&p_ml, &ml_me);
1954418919fSjohnjiang 
196*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
1974418919fSjohnjiang 		rte_eal_remote_launch(test_mcslock_per_core, NULL, i);
1984418919fSjohnjiang 	}
1994418919fSjohnjiang 
200*2d9fd380Sjfb8856606 	/* worker cores should be busy: print it */
201*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
2024418919fSjohnjiang 		printf("lcore %d state: %d\n", i,
2034418919fSjohnjiang 				(int) rte_eal_get_lcore_state(i));
2044418919fSjohnjiang 	}
2054418919fSjohnjiang 
2064418919fSjohnjiang 	rte_mcslock_unlock(&p_ml, &ml_me);
2074418919fSjohnjiang 
2084418919fSjohnjiang 	rte_eal_mp_wait_lcore();
2094418919fSjohnjiang 
2104418919fSjohnjiang 	/*
2114418919fSjohnjiang 	 * Test if it could return immediately from try-locking a locked object.
2124418919fSjohnjiang 	 * Here it will lock the mcs lock object first, then launch all the
213*2d9fd380Sjfb8856606 	 * worker lcores to trylock the same mcs lock object.
214*2d9fd380Sjfb8856606 	 * All the worker lcores should give up try-locking a locked object and
2154418919fSjohnjiang 	 * return immediately, and then increase the "count" initialized with
2164418919fSjohnjiang 	 * zero by one per times.
2174418919fSjohnjiang 	 * We can check if the "count" is finally equal to the number of all
218*2d9fd380Sjfb8856606 	 * worker lcores to see if the behavior of try-locking a locked
2194418919fSjohnjiang 	 * mcslock object is correct.
2204418919fSjohnjiang 	 */
2214418919fSjohnjiang 	if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0)
2224418919fSjohnjiang 		return -1;
2234418919fSjohnjiang 
2244418919fSjohnjiang 	count = 0;
225*2d9fd380Sjfb8856606 	RTE_LCORE_FOREACH_WORKER(i) {
2264418919fSjohnjiang 		rte_eal_remote_launch(test_mcslock_try, NULL, i);
2274418919fSjohnjiang 	}
2284418919fSjohnjiang 	rte_eal_mp_wait_lcore();
2294418919fSjohnjiang 	rte_mcslock_unlock(&p_ml_try, &ml_try_me);
2304418919fSjohnjiang 
2314418919fSjohnjiang 	/* Test is_locked API */
2324418919fSjohnjiang 	if (rte_mcslock_is_locked(p_ml)) {
2334418919fSjohnjiang 		printf("mcslock is locked but it should not be\n");
2344418919fSjohnjiang 		return -1;
2354418919fSjohnjiang 	}
2364418919fSjohnjiang 
2374418919fSjohnjiang 	/* Counting the locked times in each core */
2384418919fSjohnjiang 	rte_mcslock_lock(&p_ml, &ml_me);
2394418919fSjohnjiang 	if (count != (rte_lcore_count() - 1))
2404418919fSjohnjiang 		ret = -1;
2414418919fSjohnjiang 	rte_mcslock_unlock(&p_ml, &ml_me);
2424418919fSjohnjiang 
2434418919fSjohnjiang 	/* mcs lock perf test */
2444418919fSjohnjiang 	if (test_mcslock_perf() < 0)
2454418919fSjohnjiang 		return -1;
2464418919fSjohnjiang 
2474418919fSjohnjiang 	return ret;
2484418919fSjohnjiang }
2494418919fSjohnjiang 
2504418919fSjohnjiang REGISTER_TEST_COMMAND(mcslock_autotest, test_mcslock);
251