xref: /dpdk/app/test/test_mcslock.c (revision b6a7e6852e9ab82ae0e05e2d2a0b83abca17de3b)
132dcb9fdSPhil Yang /* SPDX-License-Identifier: BSD-3-Clause
232dcb9fdSPhil Yang  * Copyright(c) 2019 Arm Limited
332dcb9fdSPhil Yang  */
432dcb9fdSPhil Yang 
532dcb9fdSPhil Yang #include <stdio.h>
632dcb9fdSPhil Yang #include <stdint.h>
732dcb9fdSPhil Yang #include <inttypes.h>
832dcb9fdSPhil Yang #include <string.h>
932dcb9fdSPhil Yang #include <unistd.h>
1032dcb9fdSPhil Yang #include <sys/queue.h>
1132dcb9fdSPhil Yang 
1232dcb9fdSPhil Yang #include <rte_common.h>
1332dcb9fdSPhil Yang #include <rte_memory.h>
1432dcb9fdSPhil Yang #include <rte_per_lcore.h>
1532dcb9fdSPhil Yang #include <rte_launch.h>
1632dcb9fdSPhil Yang #include <rte_eal.h>
1732dcb9fdSPhil Yang #include <rte_lcore.h>
1832dcb9fdSPhil Yang #include <rte_cycles.h>
1932dcb9fdSPhil Yang #include <rte_mcslock.h>
2032dcb9fdSPhil Yang 
2132dcb9fdSPhil Yang #include "test.h"
2232dcb9fdSPhil Yang 
2332dcb9fdSPhil Yang /*
2432dcb9fdSPhil Yang  * RTE MCS lock test
2532dcb9fdSPhil Yang  * =================
2632dcb9fdSPhil Yang  *
2732dcb9fdSPhil Yang  * These tests are derived from spin lock test cases.
2832dcb9fdSPhil Yang  *
2932dcb9fdSPhil Yang  * - The functional test takes all of these locks and launches the
30cb056611SStephen Hemminger  *   ''test_mcslock_per_core()'' function on each core (except the main).
3132dcb9fdSPhil Yang  *
3232dcb9fdSPhil Yang  *   - The function takes the global lock, display something, then releases
3332dcb9fdSPhil Yang  *     the global lock on each core.
3432dcb9fdSPhil Yang  *
3532dcb9fdSPhil Yang  * - A load test is carried out, with all cores attempting to lock a single
3632dcb9fdSPhil Yang  *   lock multiple times.
3732dcb9fdSPhil Yang  */
3832dcb9fdSPhil Yang 
391ec6a845STyler Retzlaff RTE_ATOMIC(rte_mcslock_t *) p_ml;
401ec6a845STyler Retzlaff RTE_ATOMIC(rte_mcslock_t *) p_ml_try;
411ec6a845STyler Retzlaff RTE_ATOMIC(rte_mcslock_t *) p_ml_perf;
4232dcb9fdSPhil Yang 
4332dcb9fdSPhil Yang static unsigned int count;
4432dcb9fdSPhil Yang 
45*b6a7e685STyler Retzlaff static RTE_ATOMIC(uint32_t) synchro;
4632dcb9fdSPhil Yang 
4732dcb9fdSPhil Yang static int
test_mcslock_per_core(__rte_unused void * arg)48f2fc83b4SThomas Monjalon test_mcslock_per_core(__rte_unused void *arg)
4932dcb9fdSPhil Yang {
5032dcb9fdSPhil Yang 	/* Per core me node. */
515b3ada04SOlivier Matz 	rte_mcslock_t ml_me;
5232dcb9fdSPhil Yang 
5332dcb9fdSPhil Yang 	rte_mcslock_lock(&p_ml, &ml_me);
5432dcb9fdSPhil Yang 	printf("MCS lock taken on core %u\n", rte_lcore_id());
5532dcb9fdSPhil Yang 	rte_mcslock_unlock(&p_ml, &ml_me);
5632dcb9fdSPhil Yang 	printf("MCS lock released on core %u\n", rte_lcore_id());
5732dcb9fdSPhil Yang 
5832dcb9fdSPhil Yang 	return 0;
5932dcb9fdSPhil Yang }
6032dcb9fdSPhil Yang 
6132dcb9fdSPhil Yang static uint64_t time_count[RTE_MAX_LCORE] = {0};
6232dcb9fdSPhil Yang 
6332dcb9fdSPhil Yang #define MAX_LOOP 1000000
6432dcb9fdSPhil Yang 
6532dcb9fdSPhil Yang static int
load_loop_fn(void * func_param)6632dcb9fdSPhil Yang load_loop_fn(void *func_param)
6732dcb9fdSPhil Yang {
6832dcb9fdSPhil Yang 	uint64_t time_diff = 0, begin;
6932dcb9fdSPhil Yang 	uint64_t hz = rte_get_timer_hz();
7032dcb9fdSPhil Yang 	volatile uint64_t lcount = 0;
7132dcb9fdSPhil Yang 	const int use_lock = *(int *)func_param;
7232dcb9fdSPhil Yang 	const unsigned int lcore = rte_lcore_id();
7332dcb9fdSPhil Yang 
7432dcb9fdSPhil Yang 	/**< Per core me node. */
755b3ada04SOlivier Matz 	rte_mcslock_t ml_perf_me;
7632dcb9fdSPhil Yang 
7732dcb9fdSPhil Yang 	/* wait synchro */
78*b6a7e685STyler Retzlaff 	rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, rte_memory_order_relaxed);
7932dcb9fdSPhil Yang 
8032dcb9fdSPhil Yang 	begin = rte_get_timer_cycles();
8132dcb9fdSPhil Yang 	while (lcount < MAX_LOOP) {
8232dcb9fdSPhil Yang 		if (use_lock)
8332dcb9fdSPhil Yang 			rte_mcslock_lock(&p_ml_perf, &ml_perf_me);
8432dcb9fdSPhil Yang 
8532dcb9fdSPhil Yang 		lcount++;
8632dcb9fdSPhil Yang 		if (use_lock)
8732dcb9fdSPhil Yang 			rte_mcslock_unlock(&p_ml_perf, &ml_perf_me);
8832dcb9fdSPhil Yang 	}
8932dcb9fdSPhil Yang 	time_diff = rte_get_timer_cycles() - begin;
9032dcb9fdSPhil Yang 	time_count[lcore] = time_diff * 1000000 / hz;
9132dcb9fdSPhil Yang 	return 0;
9232dcb9fdSPhil Yang }
9332dcb9fdSPhil Yang 
9432dcb9fdSPhil Yang static int
test_mcslock_perf(void)9532dcb9fdSPhil Yang test_mcslock_perf(void)
9632dcb9fdSPhil Yang {
9732dcb9fdSPhil Yang 	unsigned int i;
9832dcb9fdSPhil Yang 	uint64_t total = 0;
9932dcb9fdSPhil Yang 	int lock = 0;
10032dcb9fdSPhil Yang 	const unsigned int lcore = rte_lcore_id();
10132dcb9fdSPhil Yang 
10232dcb9fdSPhil Yang 	printf("\nTest with no lock on single core...\n");
103*b6a7e685STyler Retzlaff 	rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
10432dcb9fdSPhil Yang 	load_loop_fn(&lock);
10532dcb9fdSPhil Yang 	printf("Core [%u] Cost Time = %"PRIu64" us\n",
10632dcb9fdSPhil Yang 			lcore, time_count[lcore]);
10732dcb9fdSPhil Yang 	memset(time_count, 0, sizeof(time_count));
10832dcb9fdSPhil Yang 
10932dcb9fdSPhil Yang 	printf("\nTest with lock on single core...\n");
110*b6a7e685STyler Retzlaff 	rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
11132dcb9fdSPhil Yang 	lock = 1;
11232dcb9fdSPhil Yang 	load_loop_fn(&lock);
11332dcb9fdSPhil Yang 	printf("Core [%u] Cost Time = %"PRIu64" us\n",
11432dcb9fdSPhil Yang 			lcore, time_count[lcore]);
11532dcb9fdSPhil Yang 	memset(time_count, 0, sizeof(time_count));
11632dcb9fdSPhil Yang 
11732dcb9fdSPhil Yang 	printf("\nTest with lock on %u cores...\n", (rte_lcore_count()));
11832dcb9fdSPhil Yang 
119*b6a7e685STyler Retzlaff 	rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
120cb056611SStephen Hemminger 	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
12132dcb9fdSPhil Yang 
122cb056611SStephen Hemminger 	/* start synchro and launch test on main */
123*b6a7e685STyler Retzlaff 	rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
12432dcb9fdSPhil Yang 	load_loop_fn(&lock);
12532dcb9fdSPhil Yang 
12632dcb9fdSPhil Yang 	rte_eal_mp_wait_lcore();
12732dcb9fdSPhil Yang 
12832dcb9fdSPhil Yang 	RTE_LCORE_FOREACH(i) {
12932dcb9fdSPhil Yang 		printf("Core [%u] Cost Time = %"PRIu64" us\n",
13032dcb9fdSPhil Yang 				i, time_count[i]);
13132dcb9fdSPhil Yang 		total += time_count[i];
13232dcb9fdSPhil Yang 	}
13332dcb9fdSPhil Yang 
13432dcb9fdSPhil Yang 	printf("Total Cost Time = %"PRIu64" us\n", total);
13532dcb9fdSPhil Yang 
13632dcb9fdSPhil Yang 	return 0;
13732dcb9fdSPhil Yang }
13832dcb9fdSPhil Yang 
13932dcb9fdSPhil Yang /*
14032dcb9fdSPhil Yang  * Use rte_mcslock_trylock() to trylock a mcs lock object,
14132dcb9fdSPhil Yang  * If it could not lock the object successfully, it would
14232dcb9fdSPhil Yang  * return immediately.
14332dcb9fdSPhil Yang  */
14432dcb9fdSPhil Yang static int
test_mcslock_try(__rte_unused void * arg)145f2fc83b4SThomas Monjalon test_mcslock_try(__rte_unused void *arg)
14632dcb9fdSPhil Yang {
14732dcb9fdSPhil Yang 	/**< Per core me node. */
1485b3ada04SOlivier Matz 	rte_mcslock_t ml_me;
1495b3ada04SOlivier Matz 	rte_mcslock_t ml_try_me;
15032dcb9fdSPhil Yang 
151cb056611SStephen Hemminger 	/* Locked ml_try in the main lcore, so it should fail
152cb056611SStephen Hemminger 	 * when trying to lock it in the worker lcore.
15332dcb9fdSPhil Yang 	 */
15432dcb9fdSPhil Yang 	if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0) {
15532dcb9fdSPhil Yang 		rte_mcslock_lock(&p_ml, &ml_me);
15632dcb9fdSPhil Yang 		count++;
15732dcb9fdSPhil Yang 		rte_mcslock_unlock(&p_ml, &ml_me);
15832dcb9fdSPhil Yang 	}
15932dcb9fdSPhil Yang 
16032dcb9fdSPhil Yang 	return 0;
16132dcb9fdSPhil Yang }
16232dcb9fdSPhil Yang 
16332dcb9fdSPhil Yang 
16432dcb9fdSPhil Yang /*
16532dcb9fdSPhil Yang  * Test rte_eal_get_lcore_state() in addition to mcs locks
16632dcb9fdSPhil Yang  * as we have "waiting" then "running" lcores.
16732dcb9fdSPhil Yang  */
16832dcb9fdSPhil Yang static int
test_mcslock(void)16932dcb9fdSPhil Yang test_mcslock(void)
17032dcb9fdSPhil Yang {
17132dcb9fdSPhil Yang 	int ret = 0;
17232dcb9fdSPhil Yang 	int i;
17332dcb9fdSPhil Yang 
17432dcb9fdSPhil Yang 	/* Define per core me node. */
1755b3ada04SOlivier Matz 	rte_mcslock_t ml_me;
1765b3ada04SOlivier Matz 	rte_mcslock_t ml_try_me;
17732dcb9fdSPhil Yang 
17832dcb9fdSPhil Yang 	/*
17932dcb9fdSPhil Yang 	 * Test mcs lock & unlock on each core
18032dcb9fdSPhil Yang 	 */
18132dcb9fdSPhil Yang 
182cb056611SStephen Hemminger 	/* worker cores should be waiting: print it */
183cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(i) {
18432dcb9fdSPhil Yang 		printf("lcore %d state: %d\n", i,
18532dcb9fdSPhil Yang 				(int) rte_eal_get_lcore_state(i));
18632dcb9fdSPhil Yang 	}
18732dcb9fdSPhil Yang 
18832dcb9fdSPhil Yang 	rte_mcslock_lock(&p_ml, &ml_me);
18932dcb9fdSPhil Yang 
190cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(i) {
19132dcb9fdSPhil Yang 		rte_eal_remote_launch(test_mcslock_per_core, NULL, i);
19232dcb9fdSPhil Yang 	}
19332dcb9fdSPhil Yang 
194cb056611SStephen Hemminger 	/* worker cores should be busy: print it */
195cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(i) {
19632dcb9fdSPhil Yang 		printf("lcore %d state: %d\n", i,
19732dcb9fdSPhil Yang 				(int) rte_eal_get_lcore_state(i));
19832dcb9fdSPhil Yang 	}
19932dcb9fdSPhil Yang 
20032dcb9fdSPhil Yang 	rte_mcslock_unlock(&p_ml, &ml_me);
20132dcb9fdSPhil Yang 
20232dcb9fdSPhil Yang 	rte_eal_mp_wait_lcore();
20332dcb9fdSPhil Yang 
20432dcb9fdSPhil Yang 	/*
20532dcb9fdSPhil Yang 	 * Test if it could return immediately from try-locking a locked object.
20632dcb9fdSPhil Yang 	 * Here it will lock the mcs lock object first, then launch all the
207cb056611SStephen Hemminger 	 * worker lcores to trylock the same mcs lock object.
208cb056611SStephen Hemminger 	 * All the worker lcores should give up try-locking a locked object and
20932dcb9fdSPhil Yang 	 * return immediately, and then increase the "count" initialized with
21032dcb9fdSPhil Yang 	 * zero by one per times.
21132dcb9fdSPhil Yang 	 * We can check if the "count" is finally equal to the number of all
212cb056611SStephen Hemminger 	 * worker lcores to see if the behavior of try-locking a locked
21332dcb9fdSPhil Yang 	 * mcslock object is correct.
21432dcb9fdSPhil Yang 	 */
21532dcb9fdSPhil Yang 	if (rte_mcslock_trylock(&p_ml_try, &ml_try_me) == 0)
21632dcb9fdSPhil Yang 		return -1;
21732dcb9fdSPhil Yang 
21832dcb9fdSPhil Yang 	count = 0;
219cb056611SStephen Hemminger 	RTE_LCORE_FOREACH_WORKER(i) {
22032dcb9fdSPhil Yang 		rte_eal_remote_launch(test_mcslock_try, NULL, i);
22132dcb9fdSPhil Yang 	}
22232dcb9fdSPhil Yang 	rte_eal_mp_wait_lcore();
2237fb1e845SAaron Conole 	rte_mcslock_unlock(&p_ml_try, &ml_try_me);
22432dcb9fdSPhil Yang 
22532dcb9fdSPhil Yang 	/* Test is_locked API */
22632dcb9fdSPhil Yang 	if (rte_mcslock_is_locked(p_ml)) {
22732dcb9fdSPhil Yang 		printf("mcslock is locked but it should not be\n");
22832dcb9fdSPhil Yang 		return -1;
22932dcb9fdSPhil Yang 	}
23032dcb9fdSPhil Yang 
23132dcb9fdSPhil Yang 	/* Counting the locked times in each core */
23232dcb9fdSPhil Yang 	rte_mcslock_lock(&p_ml, &ml_me);
23332dcb9fdSPhil Yang 	if (count != (rte_lcore_count() - 1))
23432dcb9fdSPhil Yang 		ret = -1;
23532dcb9fdSPhil Yang 	rte_mcslock_unlock(&p_ml, &ml_me);
23632dcb9fdSPhil Yang 
23732dcb9fdSPhil Yang 	/* mcs lock perf test */
23832dcb9fdSPhil Yang 	if (test_mcslock_perf() < 0)
23932dcb9fdSPhil Yang 		return -1;
24032dcb9fdSPhil Yang 
24132dcb9fdSPhil Yang 	return ret;
24232dcb9fdSPhil Yang }
24332dcb9fdSPhil Yang 
244e0a8442cSBruce Richardson REGISTER_FAST_TEST(mcslock_autotest, false, true, test_mcslock);
245