xref: /dpdk/app/test/test_spinlock.c (revision b6a7e6852e9ab82ae0e05e2d2a0b83abca17de3b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdio.h>
6 #include <stdint.h>
7 #include <inttypes.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <sys/queue.h>
11 
12 #include <rte_common.h>
13 #include <rte_memory.h>
14 #include <rte_per_lcore.h>
15 #include <rte_launch.h>
16 #include <rte_eal.h>
17 #include <rte_lcore.h>
18 #include <rte_cycles.h>
19 #include <rte_spinlock.h>
20 
21 #include "test.h"
22 
23 /*
24  * Spinlock test
25  * =============
26  *
27  * - There is a global spinlock and a table of spinlocks (one per lcore).
28  *
29  * - The test function takes all of these locks and launches the
30  *   ``test_spinlock_per_core()`` function on each core (except the main).
31  *
32  *   - The function takes the global lock, display something, then releases
33  *     the global lock.
34  *   - The function takes the per-lcore lock, display something, then releases
35  *     the per-core lock.
36  *
37  * - The main function unlocks the per-lcore locks sequentially and
38  *   waits between each lock. This triggers the display of a message
39  *   for each core, in the correct order. The autotest script checks that
40  *   this order is correct.
41  *
42  * - A load test is carried out, with all cores attempting to lock a single lock
43  *   multiple times
44  */
45 
46 static rte_spinlock_t sl, sl_try;
47 static rte_spinlock_t sl_tab[RTE_MAX_LCORE];
48 static rte_spinlock_recursive_t slr;
49 static unsigned count = 0;
50 
51 static RTE_ATOMIC(uint32_t) synchro;
52 
53 static int
test_spinlock_per_core(__rte_unused void * arg)54 test_spinlock_per_core(__rte_unused void *arg)
55 {
56 	rte_spinlock_lock(&sl);
57 	printf("Global lock taken on core %u\n", rte_lcore_id());
58 	rte_spinlock_unlock(&sl);
59 
60 	rte_spinlock_lock(&sl_tab[rte_lcore_id()]);
61 	printf("Hello from core %u !\n", rte_lcore_id());
62 	rte_spinlock_unlock(&sl_tab[rte_lcore_id()]);
63 
64 	return 0;
65 }
66 
67 static int
test_spinlock_recursive_per_core(__rte_unused void * arg)68 test_spinlock_recursive_per_core(__rte_unused void *arg)
69 {
70 	unsigned id = rte_lcore_id();
71 
72 	rte_spinlock_recursive_lock(&slr);
73 	printf("Global recursive lock taken on core %u - count = %d\n",
74 	       id, slr.count);
75 	rte_spinlock_recursive_lock(&slr);
76 	printf("Global recursive lock taken on core %u - count = %d\n",
77 	       id, slr.count);
78 	rte_spinlock_recursive_lock(&slr);
79 	printf("Global recursive lock taken on core %u - count = %d\n",
80 	       id, slr.count);
81 
82 	printf("Hello from within recursive locks from core %u !\n", id);
83 
84 	rte_spinlock_recursive_unlock(&slr);
85 	printf("Global recursive lock released on core %u - count = %d\n",
86 	       id, slr.count);
87 	rte_spinlock_recursive_unlock(&slr);
88 	printf("Global recursive lock released on core %u - count = %d\n",
89 	       id, slr.count);
90 	rte_spinlock_recursive_unlock(&slr);
91 	printf("Global recursive lock released on core %u - count = %d\n",
92 	       id, slr.count);
93 
94 	return 0;
95 }
96 
97 static rte_spinlock_t lk = RTE_SPINLOCK_INITIALIZER;
98 static uint64_t time_count[RTE_MAX_LCORE] = {0};
99 
100 #define MAX_LOOP 10000
101 
102 static int
load_loop_fn(void * func_param)103 load_loop_fn(void *func_param)
104 {
105 	uint64_t time_diff = 0, begin;
106 	uint64_t hz = rte_get_timer_hz();
107 	volatile uint64_t lcount = 0;
108 	const int use_lock = *(int*)func_param;
109 	const unsigned lcore = rte_lcore_id();
110 
111 	/* wait synchro for workers */
112 	if (lcore != rte_get_main_lcore())
113 		rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1,
114 				rte_memory_order_relaxed);
115 
116 	begin = rte_get_timer_cycles();
117 	while (lcount < MAX_LOOP) {
118 		if (use_lock)
119 			rte_spinlock_lock(&lk);
120 		lcount++;
121 		if (use_lock)
122 			rte_spinlock_unlock(&lk);
123 	}
124 	time_diff = rte_get_timer_cycles() - begin;
125 	time_count[lcore] = time_diff * 1000000 / hz;
126 	return 0;
127 }
128 
129 static int
test_spinlock_perf(void)130 test_spinlock_perf(void)
131 {
132 	unsigned int i;
133 	uint64_t total = 0;
134 	int lock = 0;
135 	const unsigned lcore = rte_lcore_id();
136 
137 	printf("\nTest with no lock on single core...\n");
138 	load_loop_fn(&lock);
139 	printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
140 						time_count[lcore]);
141 	memset(time_count, 0, sizeof(time_count));
142 
143 	printf("\nTest with lock on single core...\n");
144 	lock = 1;
145 	load_loop_fn(&lock);
146 	printf("Core [%u] Cost Time = %"PRIu64" us\n", lcore,
147 						time_count[lcore]);
148 	memset(time_count, 0, sizeof(time_count));
149 
150 	printf("\nTest with lock on %u cores...\n", rte_lcore_count());
151 
152 	/* Clear synchro and start workers */
153 	rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
154 	rte_eal_mp_remote_launch(load_loop_fn, &lock, SKIP_MAIN);
155 
156 	/* start synchro and launch test on main */
157 	rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
158 	load_loop_fn(&lock);
159 
160 	rte_eal_mp_wait_lcore();
161 
162 	RTE_LCORE_FOREACH(i) {
163 		printf("Core [%u] Cost Time = %"PRIu64" us\n", i,
164 						time_count[i]);
165 		total += time_count[i];
166 	}
167 
168 	printf("Total Cost Time = %"PRIu64" us\n", total);
169 
170 	return 0;
171 }
172 
173 /*
174  * Use rte_spinlock_trylock() to trylock a spinlock object,
175  * If it could not lock the object successfully, it would
176  * return immediately and the variable of "count" would be
177  * increased by one per times. the value of "count" could be
178  * checked as the result later.
179  */
180 static int
test_spinlock_try(__rte_unused void * arg)181 test_spinlock_try(__rte_unused void *arg)
182 {
183 	if (rte_spinlock_trylock(&sl_try) == 0) {
184 		rte_spinlock_lock(&sl);
185 		count ++;
186 		rte_spinlock_unlock(&sl);
187 	}
188 
189 	return 0;
190 }
191 
192 
193 /*
194  * Test rte_eal_get_lcore_state() in addition to spinlocks
195  * as we have "waiting" then "running" lcores.
196  */
197 static int
test_spinlock(void)198 test_spinlock(void)
199 {
200 	int ret = 0;
201 	int i;
202 
203 	/* worker cores should be waiting: print it */
204 	RTE_LCORE_FOREACH_WORKER(i) {
205 		printf("lcore %d state: %d\n", i,
206 		       (int) rte_eal_get_lcore_state(i));
207 	}
208 
209 	rte_spinlock_init(&sl);
210 	rte_spinlock_init(&sl_try);
211 	rte_spinlock_recursive_init(&slr);
212 	for (i=0; i<RTE_MAX_LCORE; i++)
213 		rte_spinlock_init(&sl_tab[i]);
214 
215 	rte_spinlock_lock(&sl);
216 
217 	RTE_LCORE_FOREACH_WORKER(i) {
218 		rte_spinlock_lock(&sl_tab[i]);
219 		rte_eal_remote_launch(test_spinlock_per_core, NULL, i);
220 	}
221 
222 	/* worker cores should be busy: print it */
223 	RTE_LCORE_FOREACH_WORKER(i) {
224 		printf("lcore %d state: %d\n", i,
225 		       (int) rte_eal_get_lcore_state(i));
226 	}
227 	rte_spinlock_unlock(&sl);
228 
229 	RTE_LCORE_FOREACH_WORKER(i) {
230 		rte_spinlock_unlock(&sl_tab[i]);
231 		rte_delay_ms(10);
232 	}
233 
234 	rte_eal_mp_wait_lcore();
235 
236 	rte_spinlock_recursive_lock(&slr);
237 
238 	/*
239 	 * Try to acquire a lock that we already own
240 	 */
241 	if(!rte_spinlock_recursive_trylock(&slr)) {
242 		printf("rte_spinlock_recursive_trylock failed on a lock that "
243 		       "we already own\n");
244 		ret = -1;
245 	} else
246 		rte_spinlock_recursive_unlock(&slr);
247 
248 	RTE_LCORE_FOREACH_WORKER(i) {
249 		rte_eal_remote_launch(test_spinlock_recursive_per_core, NULL, i);
250 	}
251 	rte_spinlock_recursive_unlock(&slr);
252 	rte_eal_mp_wait_lcore();
253 
254 	/*
255 	 * Test if it could return immediately from try-locking a locked object.
256 	 * Here it will lock the spinlock object first, then launch all the worker
257 	 * lcores to trylock the same spinlock object.
258 	 * All the worker lcores should give up try-locking a locked object and
259 	 * return immediately, and then increase the "count" initialized with zero
260 	 * by one per times.
261 	 * We can check if the "count" is finally equal to the number of all worker
262 	 * lcores to see if the behavior of try-locking a locked spinlock object
263 	 * is correct.
264 	 */
265 	if (rte_spinlock_trylock(&sl_try) == 0) {
266 		return -1;
267 	}
268 	count = 0;
269 	RTE_LCORE_FOREACH_WORKER(i) {
270 		rte_eal_remote_launch(test_spinlock_try, NULL, i);
271 	}
272 	rte_eal_mp_wait_lcore();
273 	rte_spinlock_unlock(&sl_try);
274 	if (rte_spinlock_is_locked(&sl)) {
275 		printf("spinlock is locked but it should not be\n");
276 		return -1;
277 	}
278 	rte_spinlock_lock(&sl);
279 	if (count != ( rte_lcore_count() - 1)) {
280 		ret = -1;
281 	}
282 	rte_spinlock_unlock(&sl);
283 
284 	/*
285 	 * Test if it can trylock recursively.
286 	 * Use rte_spinlock_recursive_trylock() to check if it can lock a spinlock
287 	 * object recursively. Here it will try to lock a spinlock object twice.
288 	 */
289 	if (rte_spinlock_recursive_trylock(&slr) == 0) {
290 		printf("It failed to do the first spinlock_recursive_trylock but it should able to do\n");
291 		return -1;
292 	}
293 	if (rte_spinlock_recursive_trylock(&slr) == 0) {
294 		printf("It failed to do the second spinlock_recursive_trylock but it should able to do\n");
295 		return -1;
296 	}
297 	rte_spinlock_recursive_unlock(&slr);
298 	rte_spinlock_recursive_unlock(&slr);
299 
300 	if (test_spinlock_perf() < 0)
301 		return -1;
302 
303 	return ret;
304 }
305 
306 REGISTER_FAST_TEST(spinlock_autotest, true, true, test_spinlock);
307