1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3 */
4
5 #include "spdk/stdinc.h"
6
7 #include "spdk/env.h"
8 #include "spdk/event.h"
9 #include "spdk/string.h"
10 #include "spdk/thread.h"
11 #include "spdk/util.h"
12 #include "spdk_internal/thread.h"
13 #include "spdk/barrier.h"
14
15 #include "thread/thread.c"
16
17 /*
18 * Used by multiple tests
19 */
20
21 typedef void (*test_setup_fn)(void);
22
23 struct test {
24 /* Initialized in g_tests array */
25 const char *name;
26 uint32_t thread_count;
27 test_setup_fn setup_fn;
28 spdk_poller_fn end_fn;
29 uint32_t poller_thread_number;
30 /* State set while a test is running */
31 struct spdk_poller *poller;
32 };
33
34 #define ASSERT(cond) do { \
35 if (cond) { \
36 g_pass++; \
37 } else { \
38 g_fail++; \
39 printf("FAIL: %s:%d %s %s\n", __FILE__, __LINE__, __func__, #cond); \
40 } \
41 } while (0);
42
43 #define WORKER_COUNT 2
44
45 static uint32_t g_pass;
46 static uint32_t g_fail;
47 static struct spdk_thread *g_thread[WORKER_COUNT];
48 /* Protects g_lock_error_count during updates by spin_abort_fn(). */
49 pthread_mutex_t g_lock_lock = PTHREAD_MUTEX_INITIALIZER;
50 uint32_t g_lock_error_count[SPIN_ERR_LAST];
51
52 static void launch_next_test(void *arg);
53
54 static bool
check_spin_err_count(enum spin_error * expect)55 check_spin_err_count(enum spin_error *expect)
56 {
57 enum spin_error i;
58 bool ret = true;
59
60 for (i = SPIN_ERR_NONE; i < SPIN_ERR_LAST; i++) {
61 if (g_lock_error_count[i] != expect[i]) {
62 printf("FAIL: %s: Error %d expected %u, got %u\n", __func__, i,
63 expect[i], g_lock_error_count[i]);
64 ret = false;
65 }
66 }
67
68 return ret;
69 }
70
71 /* A spin_abort_fn() implementation */
72 static void
do_not_abort(enum spin_error error)73 do_not_abort(enum spin_error error)
74 {
75 struct spdk_thread *thread = spdk_get_thread();
76 uint32_t i;
77
78 /*
79 * Only count on threads for the current test. Those from a previous test may continue to
80 * rack up errors in their death throes. A real application will abort() or exit() on the
81 * first error.
82 */
83 for (i = 0; i < SPDK_COUNTOF(g_thread); i++) {
84 if (g_thread[i] != thread) {
85 continue;
86 }
87 ASSERT(error >= SPIN_ERR_NONE && error < SPIN_ERR_LAST);
88 if (error >= SPIN_ERR_NONE && error < SPIN_ERR_LAST) {
89 pthread_mutex_lock(&g_lock_lock);
90 g_lock_error_count[error]++;
91 pthread_mutex_unlock(&g_lock_lock);
92 }
93 }
94 }
95
96 /*
97 * contend - make sure that two concurrent threads can take turns at getting the lock
98 */
99
100 struct contend_worker_data {
101 struct spdk_poller *poller;
102 uint64_t wait_time;
103 uint64_t hold_time;
104 uint32_t increments;
105 uint32_t delay_us;
106 uint32_t bit;
107 };
108
109 static struct spdk_spinlock g_contend_spinlock;
110 static uint32_t g_contend_remaining;
111 static uint32_t g_get_lock_times = 50000;
112 static struct contend_worker_data g_contend_data[WORKER_COUNT] = {
113 { .bit = 0, .delay_us = 3 },
114 { .bit = 1, .delay_us = 5 },
115 };
116
117 static inline uint64_t
timediff(struct timespec * ts0,struct timespec * ts1)118 timediff(struct timespec *ts0, struct timespec *ts1)
119 {
120 return (ts1->tv_sec - ts0->tv_sec) * SPDK_SEC_TO_NSEC + ts1->tv_nsec - ts0->tv_nsec;
121 }
122
123 static uint32_t g_contend_word;
124
125 static int
contend_worker_fn(void * arg)126 contend_worker_fn(void *arg)
127 {
128 struct contend_worker_data *data = arg;
129 struct timespec ts0, ts1, ts2;
130 const uint32_t mask = 1 << data->bit;
131
132 clock_gettime(CLOCK_MONOTONIC, &ts0);
133 spdk_spin_lock(&g_contend_spinlock);
134 clock_gettime(CLOCK_MONOTONIC, &ts1);
135 data->wait_time += timediff(&ts0, &ts1);
136
137 switch (data->increments & 0x1) {
138 case 0:
139 ASSERT((g_contend_word & mask) == 0);
140 g_contend_word |= mask;
141 break;
142 case 1:
143 ASSERT((g_contend_word & mask) == mask);
144 g_contend_word ^= mask;
145 break;
146 default:
147 abort();
148 }
149 data->increments++;
150 spdk_delay_us(data->delay_us);
151
152 if (data->increments == g_get_lock_times) {
153 g_contend_remaining--;
154 spdk_poller_unregister(&data->poller);
155 assert(data->poller == NULL);
156 }
157
158 spdk_spin_unlock(&g_contend_spinlock);
159 clock_gettime(CLOCK_MONOTONIC, &ts2);
160 data->hold_time += timediff(&ts1, &ts2);
161
162 return SPDK_POLLER_BUSY;
163 }
164
165 static void
contend_start_worker_poller(void * ctx)166 contend_start_worker_poller(void *ctx)
167 {
168 struct contend_worker_data *data = ctx;
169
170 data->poller = SPDK_POLLER_REGISTER(contend_worker_fn, data, 0);
171 if (data->poller == NULL) {
172 fprintf(stderr, "Failed to start poller\n");
173 abort();
174 }
175 }
176
177 static void
contend_setup(void)178 contend_setup(void)
179 {
180 uint32_t i;
181
182 memset(&g_contend_spinlock, 0, sizeof(g_contend_spinlock));
183 spdk_spin_init(&g_contend_spinlock);
184 g_contend_remaining = SPDK_COUNTOF(g_contend_data);
185
186 /* Add a poller to each thread */
187 for (i = 0; i < SPDK_COUNTOF(g_contend_data); i++) {
188 spdk_thread_send_msg(g_thread[i], contend_start_worker_poller, &g_contend_data[i]);
189 }
190 }
191
192 static int
contend_end(void * arg)193 contend_end(void *arg)
194 {
195 struct test *test = arg;
196 enum spin_error expect[SPIN_ERR_LAST] = { 0 };
197 uint32_t i;
198
199 if (g_contend_remaining != 0) {
200 return SPDK_POLLER_IDLE;
201 }
202
203 ASSERT(check_spin_err_count(expect));
204 ASSERT(g_get_lock_times == g_contend_data[0].increments);
205 ASSERT(g_get_lock_times == g_contend_data[1].increments);
206
207 printf("%8s %8s %8s %8s %8s\n", "Worker", "Delay", "Wait us", "Hold us", "Total us");
208 for (i = 0; i < SPDK_COUNTOF(g_contend_data); i++) {
209 printf("%8" PRIu32 " %8" PRIu32 " %8" PRIu64 " %8" PRIu64 " %8" PRIu64 "\n",
210 i, g_contend_data[i].delay_us,
211 g_contend_data[i].wait_time / 1000, g_contend_data[i].hold_time / 1000,
212 (g_contend_data[i].wait_time + g_contend_data[i].hold_time) / 1000);
213 }
214
215 spdk_poller_unregister(&test->poller);
216 spdk_thread_send_msg(spdk_thread_get_app_thread(), launch_next_test, NULL);
217 return SPDK_POLLER_BUSY;
218 }
219
220 /*
221 * hold_by_poller - a lock held by a poller when it returns trips an assert
222 */
223
224 static struct spdk_spinlock g_hold_by_poller_spinlock;
225 struct spdk_poller *g_hold_by_poller_poller;
226 static bool g_hold_by_poller_done;
227
228 static int
hold_by_poller(void * arg)229 hold_by_poller(void *arg)
230 {
231 static int times_called = 0;
232 enum spin_error expect[SPIN_ERR_LAST] = { 0 };
233
234 /* This polller will be called three times, trying to take the lock the first two times. */
235 switch (times_called) {
236 case 0:
237 ASSERT(check_spin_err_count(expect));
238 break;
239 case 1:
240 expect[SPIN_ERR_HOLD_DURING_SWITCH] = 1;
241 ASSERT(check_spin_err_count(expect));
242 break;
243 default:
244 abort();
245 }
246
247 spdk_spin_lock(&g_hold_by_poller_spinlock);
248
249 memset(expect, 0, sizeof(expect));
250 switch (times_called) {
251 case 0:
252 ASSERT(check_spin_err_count(expect));
253 break;
254 case 1:
255 expect[SPIN_ERR_DEADLOCK] = 1;
256 expect[SPIN_ERR_HOLD_DURING_SWITCH] = 1;
257 ASSERT(check_spin_err_count(expect));
258 /*
259 * Unlock so that future polls don't continue to increase the "hold during switch"
260 * count. Without this, the SPIN_ERR_HOLD_DURING_SWITCH is indeterminant.
261 */
262 spdk_spin_unlock(&g_hold_by_poller_spinlock);
263 ASSERT(check_spin_err_count(expect));
264 spdk_poller_unregister(&g_hold_by_poller_poller);
265 g_hold_by_poller_done = true;
266 break;
267 default:
268 abort();
269 }
270
271 times_called++;
272
273 return SPDK_POLLER_BUSY;
274 }
275
276 static void
hold_by_poller_start(void * arg)277 hold_by_poller_start(void *arg)
278 {
279 memset(g_lock_error_count, 0, sizeof(g_lock_error_count));
280 spdk_spin_init(&g_hold_by_poller_spinlock);
281
282 g_hold_by_poller_poller = spdk_poller_register(hold_by_poller, NULL, 0);
283 }
284
285 static void
hold_by_poller_setup(void)286 hold_by_poller_setup(void)
287 {
288 spdk_thread_send_msg(g_thread[0], hold_by_poller_start, NULL);
289 }
290
291 static int
hold_by_poller_end(void * arg)292 hold_by_poller_end(void *arg)
293 {
294 struct test *test = arg;
295 enum spin_error expect[SPIN_ERR_LAST] = { 0 };
296
297 /* Wait for hold_by_poller() to complete its work. */
298 if (!g_hold_by_poller_done) {
299 return SPDK_POLLER_IDLE;
300 }
301
302 /* Some final checks to be sure all the expected errors were seen */
303 expect[SPIN_ERR_DEADLOCK] = 1;
304 expect[SPIN_ERR_HOLD_DURING_SWITCH] = 1;
305 ASSERT(check_spin_err_count(expect));
306
307 /* All done, move on to next test */
308 spdk_poller_unregister(&test->poller);
309 spdk_thread_send_msg(spdk_thread_get_app_thread(), launch_next_test, NULL);
310
311 return SPDK_POLLER_BUSY;
312 }
313
314 /*
315 * hold_by_message - A message sent to a thread retains the lock when it returns.
316 */
317
318 static struct spdk_spinlock g_hold_by_message_spinlock;
319 static bool g_hold_by_message_done;
320
321 static void
hold_by_message(void * ctx)322 hold_by_message(void *ctx)
323 {
324 spdk_spin_lock(&g_hold_by_message_spinlock);
325
326 g_hold_by_message_done = true;
327 }
328
329 static void
hold_by_message_setup(void)330 hold_by_message_setup(void)
331 {
332 memset(g_lock_error_count, 0, sizeof(g_lock_error_count));
333 spdk_spin_init(&g_hold_by_message_spinlock);
334
335 spdk_thread_send_msg(g_thread[0], hold_by_message, NULL);
336 }
337
338 static int
hold_by_message_end(void * arg)339 hold_by_message_end(void *arg)
340 {
341 struct test *test = arg;
342 enum spin_error expect[SPIN_ERR_LAST] = { 0 };
343
344 /* Wait for the message to be processed */
345 if (!g_hold_by_message_done) {
346 return SPDK_POLLER_IDLE;
347 }
348
349 /* Verify an error was seen */
350 expect[SPIN_ERR_HOLD_DURING_SWITCH] = 1;
351 ASSERT(check_spin_err_count(expect));
352
353 /* All done, move on to next test */
354 spdk_poller_unregister(&test->poller);
355 spdk_thread_send_msg(spdk_thread_get_app_thread(), launch_next_test, NULL);
356
357 return SPDK_POLLER_BUSY;
358 }
359
360 /*
361 * Test definitions
362 */
363
364 static void
start_threads(uint32_t count)365 start_threads(uint32_t count)
366 {
367 struct spdk_cpuset *cpuset;
368 uint32_t i;
369
370 cpuset = spdk_cpuset_alloc();
371 if (cpuset == NULL) {
372 fprintf(stderr, "failed to allocate cpuset\n");
373 abort();
374 }
375
376 assert(count <= SPDK_COUNTOF(g_thread));
377
378 for (i = 0; i < count; i++) {
379 spdk_cpuset_zero(cpuset);
380 spdk_cpuset_set_cpu(cpuset, i, true);
381 g_thread[i] = spdk_thread_create("worker", cpuset);
382 if (g_thread[i] == NULL) {
383 fprintf(stderr, "failed to create thread\n");
384 abort();
385 }
386 }
387 spdk_cpuset_free(cpuset);
388 }
389
390 static void
stop_thread(void * arg)391 stop_thread(void *arg)
392 {
393 struct spdk_thread *thread = arg;
394
395 spdk_thread_exit(thread);
396 }
397
398 static void
stop_threads(void)399 stop_threads(void)
400 {
401 uint32_t i;
402
403 for (i = 0; i < SPDK_COUNTOF(g_thread); i++) {
404 if (g_thread[i] == NULL) {
405 break;
406 }
407 spdk_thread_send_msg(g_thread[i], stop_thread, g_thread[i]);
408 g_thread[i] = NULL;
409 }
410 }
411
412 static struct test g_tests[] = {
413 {"contend", 2, contend_setup, contend_end, 0},
414 {"hold_by_poller", 1, hold_by_poller_setup, hold_by_poller_end, 0},
415 {"hold_by_message", 1, hold_by_message_setup, hold_by_message_end, 1},
416 };
417
418 static void
launch_end_poller(void * arg)419 launch_end_poller(void *arg)
420 {
421 struct test *test = arg;
422
423 test->poller = SPDK_POLLER_REGISTER(test->end_fn, test, 100);
424 }
425
426 static void
launch_next_test(void * arg)427 launch_next_test(void *arg)
428 {
429 struct test *test;
430 static uint32_t last_fail_count = 0;
431 static uint32_t current_test = 0;
432
433 assert(spdk_thread_is_app_thread(NULL));
434
435 if (current_test != 0) {
436 const char *name = g_tests[current_test - 1].name;
437 if (g_fail == last_fail_count) {
438 printf("PASS test %s\n", name);
439 } else {
440 printf("FAIL test %s (%u failed assertions)\n", name,
441 g_fail - last_fail_count);
442 }
443 stop_threads();
444 }
445
446 if (current_test == SPDK_COUNTOF(g_tests)) {
447 spdk_app_stop(g_fail);
448
449 return;
450 }
451
452 test = &g_tests[current_test];
453
454 printf("Starting test %s\n", test->name);
455 start_threads(test->thread_count);
456
457 if (test->poller_thread_number == 0) {
458 launch_end_poller(test);
459 } else {
460 /*
461 * A test may set a done flag then return, expecting the error to be generated
462 * when the poller or message goes off CPU. To ensure that we don't check for the
463 * error between the time that "done" is set and the time the error is registered,
464 * check for the error on the thread that runs the poller or handles the message.
465 */
466 spdk_thread_send_msg(g_thread[test->poller_thread_number - 1],
467 launch_end_poller, test);
468 }
469
470 /*
471 * The setup function starts after the end poller. If it's not done this way, the start
472 * function may trigger an error condition (thread->lock_count != 0) that would cause
473 * extraneous calls to spin_abort_fn() as the end poller is registered.
474 */
475 test->setup_fn();
476
477 current_test++;
478 }
479
480 static void
start_tests(void * arg)481 start_tests(void *arg)
482 {
483 g_spin_abort_fn = do_not_abort;
484 spdk_thread_send_msg(spdk_thread_get_app_thread(), launch_next_test, NULL);
485 }
486
487 int
main(int argc,char ** argv)488 main(int argc, char **argv)
489 {
490 struct spdk_app_opts opts;
491 char *me = argv[0];
492 int ret;
493 char mask[8];
494
495 spdk_app_opts_init(&opts, sizeof(opts));
496 opts.name = "spdk_lock_test";
497 snprintf(mask, sizeof(mask), "0x%x", (1 << SPDK_COUNTOF(g_thread)) - 1);
498 opts.reactor_mask = mask;
499 opts.rpc_addr = NULL;
500
501 spdk_app_start(&opts, start_tests, NULL);
502
503 spdk_app_fini();
504
505 printf("%s summary:\n", me);
506 printf(" %8u assertions passed\n", g_pass);
507 printf(" %8u assertions failed\n", g_fail);
508
509 if (g_pass + g_fail == 0) {
510 ret = 1;
511 } else {
512 ret = spdk_min(g_fail, 127);
513 }
514 return ret;
515 }
516