xref: /dpdk/app/test/test_ticketlock.c (revision b6a7e6852e9ab82ae0e05e2d2a0b83abca17de3b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018-2019 Arm Limited
3  */
4 
5 #include <inttypes.h>
6 #include <stdint.h>
7 #include <stdio.h>
8 #include <string.h>
9 #include <sys/queue.h>
10 #include <unistd.h>
11 
12 #include <rte_common.h>
13 #include <rte_cycles.h>
14 #include <rte_eal.h>
15 #include <rte_launch.h>
16 #include <rte_lcore.h>
17 #include <rte_memory.h>
18 #include <rte_per_lcore.h>
19 #include <rte_ticketlock.h>
20 
21 #include "test.h"
22 
23 /*
24  * Ticketlock test
25  * =============
26  *
27  * - There is a global ticketlock and a table of ticketlocks (one per lcore).
28  *
29  * - The test function takes all of these locks and launches the
30  *   ``test_ticketlock_per_core()`` function on each core (except the main).
31  *
32  *   - The function takes the global lock, display something, then releases
33  *     the global lock.
34  *   - The function takes the per-lcore lock, display something, then releases
35  *     the per-core lock.
36  *
37  * - The main function unlocks the per-lcore locks sequentially and
38  *   waits between each lock. This triggers the display of a message
39  *   for each core, in the correct order. The autotest script checks that
40  *   this order is correct.
41  *
42  * - A load test is carried out, with all cores attempting to lock a single lock
43  *   multiple times
44  */
45 
46 static rte_ticketlock_t tl, tl_try;
47 static rte_ticketlock_t tl_tab[RTE_MAX_LCORE];
48 static rte_ticketlock_recursive_t tlr;
49 static unsigned int count;
50 
51 static RTE_ATOMIC(uint32_t) synchro;
52 
53 static int
test_ticketlock_per_core(__rte_unused void * arg)54 test_ticketlock_per_core(__rte_unused void *arg)
55 {
56 	rte_ticketlock_lock(&tl);
57 	printf("Global lock taken on core %u\n", rte_lcore_id());
58 	rte_ticketlock_unlock(&tl);
59 
60 	rte_ticketlock_lock(&tl_tab[rte_lcore_id()]);
61 	printf("Hello from core %u !\n", rte_lcore_id());
62 	rte_ticketlock_unlock(&tl_tab[rte_lcore_id()]);
63 
64 	return 0;
65 }
66 
67 static int
test_ticketlock_recursive_per_core(__rte_unused void * arg)68 test_ticketlock_recursive_per_core(__rte_unused void *arg)
69 {
70 	unsigned int id = rte_lcore_id();
71 
72 	rte_ticketlock_recursive_lock(&tlr);
73 	printf("Global recursive lock taken on core %u - count = %d\n",
74 	       id, tlr.count);
75 	rte_ticketlock_recursive_lock(&tlr);
76 	printf("Global recursive lock taken on core %u - count = %d\n",
77 	       id, tlr.count);
78 	rte_ticketlock_recursive_lock(&tlr);
79 	printf("Global recursive lock taken on core %u - count = %d\n",
80 	       id, tlr.count);
81 
82 	printf("Hello from within recursive locks from core %u !\n", id);
83 
84 	rte_ticketlock_recursive_unlock(&tlr);
85 	printf("Global recursive lock released on core %u - count = %d\n",
86 	       id, tlr.count);
87 	rte_ticketlock_recursive_unlock(&tlr);
88 	printf("Global recursive lock released on core %u - count = %d\n",
89 	       id, tlr.count);
90 	rte_ticketlock_recursive_unlock(&tlr);
91 	printf("Global recursive lock released on core %u - count = %d\n",
92 	       id, tlr.count);
93 
94 	return 0;
95 }
96 
97 static rte_ticketlock_t lk = RTE_TICKETLOCK_INITIALIZER;
98 static alignas(RTE_CACHE_LINE_SIZE) uint64_t lcount;
99 static alignas(RTE_CACHE_LINE_SIZE) uint64_t lcore_count[RTE_MAX_LCORE];
100 static uint64_t time_cost[RTE_MAX_LCORE];
101 
102 #define MAX_LOOP 10000
103 
104 static int
load_loop_fn(void * func_param)105 load_loop_fn(void *func_param)
106 {
107 	uint64_t time_diff = 0, begin;
108 	uint64_t hz = rte_get_timer_hz();
109 	const int use_lock = *(int *)func_param;
110 	const unsigned int lcore = rte_lcore_id();
111 
112 	/* wait synchro for workers */
113 	if (lcore != rte_get_main_lcore())
114 		rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
115 				rte_memory_order_relaxed);
116 
117 	begin = rte_rdtsc_precise();
118 	while (lcore_count[lcore] < MAX_LOOP) {
119 		if (use_lock)
120 			rte_ticketlock_lock(&lk);
121 		lcore_count[lcore]++;
122 		lcount++;
123 		if (use_lock)
124 			rte_ticketlock_unlock(&lk);
125 	}
126 	time_diff = rte_rdtsc_precise() - begin;
127 	time_cost[lcore] = time_diff * 1000000 / hz;
128 	return 0;
129 }
130 
131 static int
test_ticketlock_perf(void)132 test_ticketlock_perf(void)
133 {
134 	unsigned int i;
135 	uint64_t tcount = 0;
136 	uint64_t total_time = 0;
137 	int lock = 0;
138 	const unsigned int lcore = rte_lcore_id();
139 
140 	printf("\nTest with no lock on single core...\n");
141 	load_loop_fn(&lock);
142 	printf("Core [%u] cost time = %"PRIu64" us\n", lcore, time_cost[lcore]);
143 	memset(lcore_count, 0, sizeof(lcore_count));
144 	memset(time_cost, 0, sizeof(time_cost));
145 
146 	printf("\nTest with lock on single core...\n");
147 	lock = 1;
148 	load_loop_fn(&lock);
149 	printf("Core [%u] cost time = %"PRIu64" us\n", lcore, time_cost[lcore]);
150 	memset(lcore_count, 0, sizeof(lcore_count));
151 	memset(time_cost, 0, sizeof(time_cost));
152 
153 	lcount = 0;
154 	printf("\nTest with lock on %u cores...\n", rte_lcore_count());
155 
156 	/* Clear synchro and start workers */
157 	rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
158 	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
159 
160 	/* start synchro and launch test on main */
161 	rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
162 	load_loop_fn(&lock);
163 
164 	rte_eal_mp_wait_lcore();
165 
166 	RTE_LCORE_FOREACH(i) {
167 		printf("Core [%u] cost time = %"PRIu64" us\n", i, time_cost[i]);
168 		tcount += lcore_count[i];
169 		total_time += time_cost[i];
170 	}
171 
172 	if (tcount != lcount)
173 		return -1;
174 
175 	printf("Total cost time = %"PRIu64" us\n", total_time);
176 
177 	return 0;
178 }
179 
180 /*
181  * Use rte_ticketlock_trylock() to trylock a ticketlock object,
182  * If it could not lock the object successfully, it would
183  * return immediately and the variable of "count" would be
184  * increased by one per times. the value of "count" could be
185  * checked as the result later.
186  */
187 static int
test_ticketlock_try(__rte_unused void * arg)188 test_ticketlock_try(__rte_unused void *arg)
189 {
190 	if (rte_ticketlock_trylock(&tl_try) == 0) {
191 		rte_ticketlock_lock(&tl);
192 		count++;
193 		rte_ticketlock_unlock(&tl);
194 	}
195 
196 	return 0;
197 }
198 
199 
200 /*
201  * Test rte_eal_get_lcore_state() in addition to ticketlocks
202  * as we have "waiting" then "running" lcores.
203  */
204 static int
test_ticketlock(void)205 test_ticketlock(void)
206 {
207 	int ret = 0;
208 	int i;
209 
210 	/* worker cores should be waiting: print it */
211 	RTE_LCORE_FOREACH_WORKER(i) {
212 		printf("lcore %d state: %d\n", i,
213 		       (int) rte_eal_get_lcore_state(i));
214 	}
215 
216 	rte_ticketlock_init(&tl);
217 	rte_ticketlock_init(&tl_try);
218 	rte_ticketlock_recursive_init(&tlr);
219 	RTE_LCORE_FOREACH_WORKER(i) {
220 		rte_ticketlock_init(&tl_tab[i]);
221 	}
222 
223 	rte_ticketlock_lock(&tl);
224 
225 	RTE_LCORE_FOREACH_WORKER(i) {
226 		rte_ticketlock_lock(&tl_tab[i]);
227 		rte_eal_remote_launch(test_ticketlock_per_core, NULL, i);
228 	}
229 
230 	/* worker cores should be busy: print it */
231 	RTE_LCORE_FOREACH_WORKER(i) {
232 		printf("lcore %d state: %d\n", i,
233 		       (int) rte_eal_get_lcore_state(i));
234 	}
235 	rte_ticketlock_unlock(&tl);
236 
237 	RTE_LCORE_FOREACH_WORKER(i) {
238 		rte_ticketlock_unlock(&tl_tab[i]);
239 		rte_delay_ms(10);
240 	}
241 
242 	rte_eal_mp_wait_lcore();
243 
244 	rte_ticketlock_recursive_lock(&tlr);
245 
246 	/*
247 	 * Try to acquire a lock that we already own
248 	 */
249 	if (!rte_ticketlock_recursive_trylock(&tlr)) {
250 		printf("rte_ticketlock_recursive_trylock failed on a lock that "
251 		       "we already own\n");
252 		ret = -1;
253 	} else
254 		rte_ticketlock_recursive_unlock(&tlr);
255 
256 	RTE_LCORE_FOREACH_WORKER(i) {
257 		rte_eal_remote_launch(test_ticketlock_recursive_per_core,
258 					NULL, i);
259 	}
260 	rte_ticketlock_recursive_unlock(&tlr);
261 	rte_eal_mp_wait_lcore();
262 
263 	/*
264 	 * Test if it could return immediately from try-locking a locked object.
265 	 * Here it will lock the ticketlock object first, then launch all the
266 	 * worker lcores to trylock the same ticketlock object.
267 	 * All the worker lcores should give up try-locking a locked object and
268 	 * return immediately, and then increase the "count" initialized with
269 	 * zero by one per times.
270 	 * We can check if the "count" is finally equal to the number of all
271 	 * worker lcores to see if the behavior of try-locking a locked
272 	 * ticketlock object is correct.
273 	 */
274 	if (rte_ticketlock_trylock(&tl_try) == 0)
275 		return -1;
276 
277 	count = 0;
278 	RTE_LCORE_FOREACH_WORKER(i) {
279 		rte_eal_remote_launch(test_ticketlock_try, NULL, i);
280 	}
281 	rte_eal_mp_wait_lcore();
282 	rte_ticketlock_unlock(&tl_try);
283 	if (rte_ticketlock_is_locked(&tl)) {
284 		printf("ticketlock is locked but it should not be\n");
285 		return -1;
286 	}
287 	rte_ticketlock_lock(&tl);
288 	if (count != (rte_lcore_count() - 1))
289 		ret = -1;
290 
291 	rte_ticketlock_unlock(&tl);
292 
293 	/*
294 	 * Test if it can trylock recursively.
295 	 * Use rte_ticketlock_recursive_trylock() to check if it can lock
296 	 * a ticketlock object recursively. Here it will try to lock a
297 	 * ticketlock object twice.
298 	 */
299 	if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
300 		printf("It failed to do the first ticketlock_recursive_trylock "
301 			   "but it should able to do\n");
302 		return -1;
303 	}
304 	if (rte_ticketlock_recursive_trylock(&tlr) == 0) {
305 		printf("It failed to do the second ticketlock_recursive_trylock "
306 			   "but it should able to do\n");
307 		return -1;
308 	}
309 	rte_ticketlock_recursive_unlock(&tlr);
310 	rte_ticketlock_recursive_unlock(&tlr);
311 
312 	if (test_ticketlock_perf() < 0)
313 		return -1;
314 
315 	return ret;
316 }
317 
318 REGISTER_FAST_TEST(ticketlock_autotest, true, true, test_ticketlock);
319