1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 38 #include "spdk_internal/thread.h" 39 40 #include "thread/thread.c" 41 #include "common/lib/ut_multithread.c" 42 43 static int g_sched_rc = 0; 44 45 static int 46 _thread_schedule(struct spdk_thread *thread) 47 { 48 return g_sched_rc; 49 } 50 51 static bool 52 _thread_op_supported(enum spdk_thread_op op) 53 { 54 switch (op) { 55 case SPDK_THREAD_OP_NEW: 56 return true; 57 default: 58 return false; 59 } 60 } 61 62 static int 63 _thread_op(struct spdk_thread *thread, enum spdk_thread_op op) 64 { 65 switch (op) { 66 case SPDK_THREAD_OP_NEW: 67 return _thread_schedule(thread); 68 default: 69 return -ENOTSUP; 70 } 71 } 72 73 static void 74 thread_alloc(void) 75 { 76 struct spdk_thread *thread; 77 78 /* No schedule callback */ 79 spdk_thread_lib_init(NULL, 0); 80 thread = spdk_thread_create(NULL, NULL); 81 SPDK_CU_ASSERT_FATAL(thread != NULL); 82 spdk_set_thread(thread); 83 spdk_thread_exit(thread); 84 while (!spdk_thread_is_exited(thread)) { 85 spdk_thread_poll(thread, 0, 0); 86 } 87 spdk_thread_destroy(thread); 88 spdk_thread_lib_fini(); 89 90 /* Schedule callback exists */ 91 spdk_thread_lib_init(_thread_schedule, 0); 92 93 /* Scheduling succeeds */ 94 g_sched_rc = 0; 95 thread = spdk_thread_create(NULL, NULL); 96 SPDK_CU_ASSERT_FATAL(thread != NULL); 97 spdk_set_thread(thread); 98 spdk_thread_exit(thread); 99 while (!spdk_thread_is_exited(thread)) { 100 spdk_thread_poll(thread, 0, 0); 101 } 102 spdk_thread_destroy(thread); 103 104 /* Scheduling fails */ 105 g_sched_rc = -1; 106 thread = spdk_thread_create(NULL, NULL); 107 SPDK_CU_ASSERT_FATAL(thread == NULL); 108 109 spdk_thread_lib_fini(); 110 111 /* Scheduling callback exists with extended thread library initialization. */ 112 spdk_thread_lib_init_ext(_thread_op, _thread_op_supported, 0); 113 114 /* Scheduling succeeds */ 115 g_sched_rc = 0; 116 thread = spdk_thread_create(NULL, NULL); 117 SPDK_CU_ASSERT_FATAL(thread != NULL); 118 spdk_set_thread(thread); 119 spdk_thread_exit(thread); 120 while (!spdk_thread_is_exited(thread)) { 121 spdk_thread_poll(thread, 0, 0); 122 } 123 spdk_thread_destroy(thread); 124 125 /* Scheduling fails */ 126 g_sched_rc = -1; 127 thread = spdk_thread_create(NULL, NULL); 128 SPDK_CU_ASSERT_FATAL(thread == NULL); 129 130 spdk_thread_lib_fini(); 131 } 132 133 static void 134 send_msg_cb(void *ctx) 135 { 136 bool *done = ctx; 137 138 *done = true; 139 } 140 141 static void 142 thread_send_msg(void) 143 { 144 struct spdk_thread *thread0; 145 bool done = false; 146 147 allocate_threads(2); 148 set_thread(0); 149 thread0 = spdk_get_thread(); 150 151 set_thread(1); 152 /* Simulate thread 1 sending a message to thread 0. */ 153 spdk_thread_send_msg(thread0, send_msg_cb, &done); 154 155 /* We have not polled thread 0 yet, so done should be false. */ 156 CU_ASSERT(!done); 157 158 /* 159 * Poll thread 1. The message was sent to thread 0, so this should be 160 * a nop and done should still be false. 161 */ 162 poll_thread(1); 163 CU_ASSERT(!done); 164 165 /* 166 * Poll thread 0. This should execute the message and done should then 167 * be true. 168 */ 169 poll_thread(0); 170 CU_ASSERT(done); 171 172 free_threads(); 173 } 174 175 static int 176 poller_run_done(void *ctx) 177 { 178 bool *poller_run = ctx; 179 180 *poller_run = true; 181 182 return -1; 183 } 184 185 static void 186 thread_poller(void) 187 { 188 struct spdk_poller *poller = NULL; 189 bool poller_run = false; 190 191 allocate_threads(1); 192 193 set_thread(0); 194 MOCK_SET(spdk_get_ticks, 0); 195 /* Register a poller with no-wait time and test execution */ 196 poller = spdk_poller_register(poller_run_done, &poller_run, 0); 197 CU_ASSERT(poller != NULL); 198 199 poll_threads(); 200 CU_ASSERT(poller_run == true); 201 202 spdk_poller_unregister(&poller); 203 CU_ASSERT(poller == NULL); 204 205 /* Register a poller with 1000us wait time and test single execution */ 206 poller_run = false; 207 poller = spdk_poller_register(poller_run_done, &poller_run, 1000); 208 CU_ASSERT(poller != NULL); 209 210 poll_threads(); 211 CU_ASSERT(poller_run == false); 212 213 spdk_delay_us(1000); 214 poll_threads(); 215 CU_ASSERT(poller_run == true); 216 217 poller_run = false; 218 poll_threads(); 219 CU_ASSERT(poller_run == false); 220 221 spdk_delay_us(1000); 222 poll_threads(); 223 CU_ASSERT(poller_run == true); 224 225 spdk_poller_unregister(&poller); 226 CU_ASSERT(poller == NULL); 227 228 free_threads(); 229 } 230 231 struct poller_ctx { 232 struct spdk_poller *poller; 233 bool run; 234 }; 235 236 static int 237 poller_run_pause(void *ctx) 238 { 239 struct poller_ctx *poller_ctx = ctx; 240 241 poller_ctx->run = true; 242 spdk_poller_pause(poller_ctx->poller); 243 244 return 0; 245 } 246 247 /* Verify the same poller can be switched multiple times between 248 * pause and resume while it runs. 249 */ 250 static int 251 poller_run_pause_resume_pause(void *ctx) 252 { 253 struct poller_ctx *poller_ctx = ctx; 254 255 poller_ctx->run = true; 256 257 spdk_poller_pause(poller_ctx->poller); 258 spdk_poller_resume(poller_ctx->poller); 259 spdk_poller_pause(poller_ctx->poller); 260 261 return 0; 262 } 263 264 static void 265 poller_msg_pause_cb(void *ctx) 266 { 267 struct spdk_poller *poller = ctx; 268 269 spdk_poller_pause(poller); 270 } 271 272 static void 273 poller_msg_resume_cb(void *ctx) 274 { 275 struct spdk_poller *poller = ctx; 276 277 spdk_poller_resume(poller); 278 } 279 280 static void 281 poller_pause(void) 282 { 283 struct poller_ctx poller_ctx = {}; 284 unsigned int delay[] = { 0, 1000 }; 285 unsigned int i; 286 287 allocate_threads(1); 288 set_thread(0); 289 290 /* Register a poller that pauses itself */ 291 poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, 0); 292 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller); 293 294 poller_ctx.run = false; 295 poll_threads(); 296 CU_ASSERT_EQUAL(poller_ctx.run, true); 297 298 poller_ctx.run = false; 299 poll_threads(); 300 CU_ASSERT_EQUAL(poller_ctx.run, false); 301 302 spdk_poller_unregister(&poller_ctx.poller); 303 CU_ASSERT_PTR_NULL(poller_ctx.poller); 304 305 /* Register a poller that switches between pause and resume itself */ 306 poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause, &poller_ctx, 0); 307 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller); 308 309 poller_ctx.run = false; 310 poll_threads(); 311 CU_ASSERT_EQUAL(poller_ctx.run, true); 312 313 poller_ctx.run = false; 314 poll_threads(); 315 CU_ASSERT_EQUAL(poller_ctx.run, false); 316 317 spdk_poller_unregister(&poller_ctx.poller); 318 CU_ASSERT_PTR_NULL(poller_ctx.poller); 319 320 /* Verify that resuming an unpaused poller doesn't do anything */ 321 poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0); 322 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller); 323 324 spdk_poller_resume(poller_ctx.poller); 325 326 poller_ctx.run = false; 327 poll_threads(); 328 CU_ASSERT_EQUAL(poller_ctx.run, true); 329 330 /* Verify that pausing the same poller twice works too */ 331 spdk_poller_pause(poller_ctx.poller); 332 333 poller_ctx.run = false; 334 poll_threads(); 335 CU_ASSERT_EQUAL(poller_ctx.run, false); 336 337 spdk_poller_pause(poller_ctx.poller); 338 poll_threads(); 339 CU_ASSERT_EQUAL(poller_ctx.run, false); 340 341 spdk_poller_resume(poller_ctx.poller); 342 poll_threads(); 343 CU_ASSERT_EQUAL(poller_ctx.run, true); 344 345 /* Verify that a poller is run when it's resumed immediately after pausing */ 346 poller_ctx.run = false; 347 spdk_poller_pause(poller_ctx.poller); 348 spdk_poller_resume(poller_ctx.poller); 349 poll_threads(); 350 CU_ASSERT_EQUAL(poller_ctx.run, true); 351 352 spdk_poller_unregister(&poller_ctx.poller); 353 CU_ASSERT_PTR_NULL(poller_ctx.poller); 354 355 /* Poll the thread to make sure the previous poller gets unregistered */ 356 poll_threads(); 357 CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false); 358 359 /* Verify that it's possible to unregister a paused poller */ 360 poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, 0); 361 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller); 362 363 poller_ctx.run = false; 364 poll_threads(); 365 CU_ASSERT_EQUAL(poller_ctx.run, true); 366 367 spdk_poller_pause(poller_ctx.poller); 368 369 poller_ctx.run = false; 370 poll_threads(); 371 CU_ASSERT_EQUAL(poller_ctx.run, false); 372 373 spdk_poller_unregister(&poller_ctx.poller); 374 375 poll_threads(); 376 CU_ASSERT_EQUAL(poller_ctx.run, false); 377 CU_ASSERT_EQUAL(spdk_thread_has_pollers(spdk_get_thread()), false); 378 379 /* Register pollers with 0 and 1000us wait time and pause/resume them */ 380 for (i = 0; i < SPDK_COUNTOF(delay); ++i) { 381 poller_ctx.poller = spdk_poller_register(poller_run_done, &poller_ctx.run, delay[i]); 382 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller); 383 384 spdk_delay_us(delay[i]); 385 poller_ctx.run = false; 386 poll_threads(); 387 CU_ASSERT_EQUAL(poller_ctx.run, true); 388 389 spdk_poller_pause(poller_ctx.poller); 390 391 spdk_delay_us(delay[i]); 392 poller_ctx.run = false; 393 poll_threads(); 394 CU_ASSERT_EQUAL(poller_ctx.run, false); 395 396 spdk_poller_resume(poller_ctx.poller); 397 398 spdk_delay_us(delay[i]); 399 poll_threads(); 400 CU_ASSERT_EQUAL(poller_ctx.run, true); 401 402 /* Verify that the poller can be paused/resumed from spdk_thread_send_msg */ 403 spdk_thread_send_msg(spdk_get_thread(), poller_msg_pause_cb, poller_ctx.poller); 404 405 spdk_delay_us(delay[i]); 406 poller_ctx.run = false; 407 poll_threads(); 408 CU_ASSERT_EQUAL(poller_ctx.run, false); 409 410 spdk_thread_send_msg(spdk_get_thread(), poller_msg_resume_cb, poller_ctx.poller); 411 412 poll_threads(); 413 if (delay[i] > 0) { 414 spdk_delay_us(delay[i]); 415 poll_threads(); 416 } 417 CU_ASSERT_EQUAL(poller_ctx.run, true); 418 419 spdk_poller_unregister(&poller_ctx.poller); 420 CU_ASSERT_PTR_NULL(poller_ctx.poller); 421 422 /* Register a timed poller that pauses itself */ 423 poller_ctx.poller = spdk_poller_register(poller_run_pause, &poller_ctx, delay[i]); 424 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller); 425 426 spdk_delay_us(delay[i]); 427 poller_ctx.run = false; 428 poll_threads(); 429 CU_ASSERT_EQUAL(poller_ctx.run, true); 430 431 poller_ctx.run = false; 432 spdk_delay_us(delay[i]); 433 poll_threads(); 434 CU_ASSERT_EQUAL(poller_ctx.run, false); 435 436 spdk_poller_resume(poller_ctx.poller); 437 438 CU_ASSERT_EQUAL(poller_ctx.run, false); 439 spdk_delay_us(delay[i]); 440 poll_threads(); 441 CU_ASSERT_EQUAL(poller_ctx.run, true); 442 443 spdk_poller_unregister(&poller_ctx.poller); 444 CU_ASSERT_PTR_NULL(poller_ctx.poller); 445 446 /* Register a timed poller that switches between pause and resume itself */ 447 poller_ctx.poller = spdk_poller_register(poller_run_pause_resume_pause, 448 &poller_ctx, delay[i]); 449 CU_ASSERT_PTR_NOT_NULL(poller_ctx.poller); 450 451 spdk_delay_us(delay[i]); 452 poller_ctx.run = false; 453 poll_threads(); 454 CU_ASSERT_EQUAL(poller_ctx.run, true); 455 456 poller_ctx.run = false; 457 spdk_delay_us(delay[i]); 458 poll_threads(); 459 CU_ASSERT_EQUAL(poller_ctx.run, false); 460 461 spdk_poller_resume(poller_ctx.poller); 462 463 CU_ASSERT_EQUAL(poller_ctx.run, false); 464 spdk_delay_us(delay[i]); 465 poll_threads(); 466 CU_ASSERT_EQUAL(poller_ctx.run, true); 467 468 spdk_poller_unregister(&poller_ctx.poller); 469 CU_ASSERT_PTR_NULL(poller_ctx.poller); 470 } 471 472 free_threads(); 473 } 474 475 static void 476 for_each_cb(void *ctx) 477 { 478 int *count = ctx; 479 480 (*count)++; 481 } 482 483 static void 484 thread_for_each(void) 485 { 486 int count = 0; 487 int i; 488 489 allocate_threads(3); 490 set_thread(0); 491 492 spdk_for_each_thread(for_each_cb, &count, for_each_cb); 493 494 /* We have not polled thread 0 yet, so count should be 0 */ 495 CU_ASSERT(count == 0); 496 497 /* Poll each thread to verify the message is passed to each */ 498 for (i = 0; i < 3; i++) { 499 poll_thread(i); 500 CU_ASSERT(count == (i + 1)); 501 } 502 503 /* 504 * After each thread is called, the completion calls it 505 * one more time. 506 */ 507 poll_thread(0); 508 CU_ASSERT(count == 4); 509 510 free_threads(); 511 } 512 513 static int 514 channel_create(void *io_device, void *ctx_buf) 515 { 516 int *ch_count = io_device; 517 518 (*ch_count)++; 519 return 0; 520 } 521 522 static void 523 channel_destroy(void *io_device, void *ctx_buf) 524 { 525 int *ch_count = io_device; 526 527 (*ch_count)--; 528 } 529 530 static void 531 channel_msg(struct spdk_io_channel_iter *i) 532 { 533 int *msg_count = spdk_io_channel_iter_get_ctx(i); 534 535 (*msg_count)++; 536 spdk_for_each_channel_continue(i, 0); 537 } 538 539 static void 540 channel_cpl(struct spdk_io_channel_iter *i, int status) 541 { 542 int *msg_count = spdk_io_channel_iter_get_ctx(i); 543 544 (*msg_count)++; 545 } 546 547 static void 548 for_each_channel_remove(void) 549 { 550 struct spdk_io_channel *ch0, *ch1, *ch2; 551 int ch_count = 0; 552 int msg_count = 0; 553 554 allocate_threads(3); 555 set_thread(0); 556 spdk_io_device_register(&ch_count, channel_create, channel_destroy, sizeof(int), NULL); 557 ch0 = spdk_get_io_channel(&ch_count); 558 set_thread(1); 559 ch1 = spdk_get_io_channel(&ch_count); 560 set_thread(2); 561 ch2 = spdk_get_io_channel(&ch_count); 562 CU_ASSERT(ch_count == 3); 563 564 /* 565 * Test that io_channel handles the case where we start to iterate through 566 * the channels, and during the iteration, one of the channels is deleted. 567 * This is done in some different and sometimes non-intuitive orders, because 568 * some operations are deferred and won't execute until their threads are 569 * polled. 570 * 571 * Case #1: Put the I/O channel before spdk_for_each_channel. 572 */ 573 set_thread(0); 574 spdk_put_io_channel(ch0); 575 CU_ASSERT(ch_count == 3); 576 poll_threads(); 577 CU_ASSERT(ch_count == 2); 578 spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl); 579 CU_ASSERT(msg_count == 0); 580 poll_threads(); 581 CU_ASSERT(msg_count == 3); 582 583 msg_count = 0; 584 585 /* 586 * Case #2: Put the I/O channel after spdk_for_each_channel, but before 587 * thread 0 is polled. 588 */ 589 ch0 = spdk_get_io_channel(&ch_count); 590 CU_ASSERT(ch_count == 3); 591 spdk_for_each_channel(&ch_count, channel_msg, &msg_count, channel_cpl); 592 spdk_put_io_channel(ch0); 593 CU_ASSERT(ch_count == 3); 594 595 poll_threads(); 596 CU_ASSERT(ch_count == 2); 597 CU_ASSERT(msg_count == 4); 598 set_thread(1); 599 spdk_put_io_channel(ch1); 600 CU_ASSERT(ch_count == 2); 601 set_thread(2); 602 spdk_put_io_channel(ch2); 603 CU_ASSERT(ch_count == 2); 604 poll_threads(); 605 CU_ASSERT(ch_count == 0); 606 607 spdk_io_device_unregister(&ch_count, NULL); 608 poll_threads(); 609 610 free_threads(); 611 } 612 613 struct unreg_ctx { 614 bool ch_done; 615 bool foreach_done; 616 }; 617 618 static void 619 unreg_ch_done(struct spdk_io_channel_iter *i) 620 { 621 struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 622 623 ctx->ch_done = true; 624 625 SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL); 626 spdk_for_each_channel_continue(i, 0); 627 } 628 629 static void 630 unreg_foreach_done(struct spdk_io_channel_iter *i, int status) 631 { 632 struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 633 634 ctx->foreach_done = true; 635 } 636 637 static void 638 for_each_channel_unreg(void) 639 { 640 struct spdk_io_channel *ch0; 641 struct io_device *dev; 642 struct unreg_ctx ctx = {}; 643 int io_target = 0; 644 645 allocate_threads(1); 646 set_thread(0); 647 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 648 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL); 649 CU_ASSERT(!TAILQ_EMPTY(&g_io_devices)); 650 dev = TAILQ_FIRST(&g_io_devices); 651 SPDK_CU_ASSERT_FATAL(dev != NULL); 652 CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL); 653 ch0 = spdk_get_io_channel(&io_target); 654 spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done); 655 656 spdk_io_device_unregister(&io_target, NULL); 657 /* 658 * There is an outstanding foreach call on the io_device, so the unregister should not 659 * have removed the device. 660 */ 661 CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices)); 662 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL); 663 /* 664 * There is already a device registered at &io_target, so a new io_device should not 665 * have been added to g_io_devices. 666 */ 667 CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices)); 668 CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL); 669 670 poll_thread(0); 671 CU_ASSERT(ctx.ch_done == true); 672 CU_ASSERT(ctx.foreach_done == true); 673 /* 674 * There are no more foreach operations outstanding, so we can unregister the device, 675 * even though a channel still exists for the device. 676 */ 677 spdk_io_device_unregister(&io_target, NULL); 678 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 679 680 set_thread(0); 681 spdk_put_io_channel(ch0); 682 683 poll_threads(); 684 685 free_threads(); 686 } 687 688 static void 689 thread_name(void) 690 { 691 struct spdk_thread *thread; 692 const char *name; 693 694 spdk_thread_lib_init(NULL, 0); 695 696 /* Create thread with no name, which automatically generates one */ 697 thread = spdk_thread_create(NULL, NULL); 698 spdk_set_thread(thread); 699 thread = spdk_get_thread(); 700 SPDK_CU_ASSERT_FATAL(thread != NULL); 701 name = spdk_thread_get_name(thread); 702 CU_ASSERT(name != NULL); 703 spdk_thread_exit(thread); 704 while (!spdk_thread_is_exited(thread)) { 705 spdk_thread_poll(thread, 0, 0); 706 } 707 spdk_thread_destroy(thread); 708 709 /* Create thread named "test_thread" */ 710 thread = spdk_thread_create("test_thread", NULL); 711 spdk_set_thread(thread); 712 thread = spdk_get_thread(); 713 SPDK_CU_ASSERT_FATAL(thread != NULL); 714 name = spdk_thread_get_name(thread); 715 SPDK_CU_ASSERT_FATAL(name != NULL); 716 CU_ASSERT(strcmp(name, "test_thread") == 0); 717 spdk_thread_exit(thread); 718 while (!spdk_thread_is_exited(thread)) { 719 spdk_thread_poll(thread, 0, 0); 720 } 721 spdk_thread_destroy(thread); 722 723 spdk_thread_lib_fini(); 724 } 725 726 static uint64_t g_device1; 727 static uint64_t g_device2; 728 static uint64_t g_device3; 729 730 static uint64_t g_ctx1 = 0x1111; 731 static uint64_t g_ctx2 = 0x2222; 732 733 static int g_create_cb_calls = 0; 734 static int g_destroy_cb_calls = 0; 735 736 static int 737 create_cb_1(void *io_device, void *ctx_buf) 738 { 739 CU_ASSERT(io_device == &g_device1); 740 *(uint64_t *)ctx_buf = g_ctx1; 741 g_create_cb_calls++; 742 return 0; 743 } 744 745 static void 746 destroy_cb_1(void *io_device, void *ctx_buf) 747 { 748 CU_ASSERT(io_device == &g_device1); 749 CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx1); 750 g_destroy_cb_calls++; 751 } 752 753 static int 754 create_cb_2(void *io_device, void *ctx_buf) 755 { 756 CU_ASSERT(io_device == &g_device2); 757 *(uint64_t *)ctx_buf = g_ctx2; 758 g_create_cb_calls++; 759 return 0; 760 } 761 762 static void 763 destroy_cb_2(void *io_device, void *ctx_buf) 764 { 765 CU_ASSERT(io_device == &g_device2); 766 CU_ASSERT(*(uint64_t *)ctx_buf == g_ctx2); 767 g_destroy_cb_calls++; 768 } 769 770 static void 771 channel(void) 772 { 773 struct spdk_io_channel *ch1, *ch2; 774 void *ctx; 775 776 allocate_threads(1); 777 set_thread(0); 778 779 spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL); 780 spdk_io_device_register(&g_device2, create_cb_2, destroy_cb_2, sizeof(g_ctx2), NULL); 781 782 g_create_cb_calls = 0; 783 ch1 = spdk_get_io_channel(&g_device1); 784 CU_ASSERT(g_create_cb_calls == 1); 785 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 786 CU_ASSERT(spdk_io_channel_get_io_device(ch1) == &g_device1); 787 788 g_create_cb_calls = 0; 789 ch2 = spdk_get_io_channel(&g_device1); 790 CU_ASSERT(g_create_cb_calls == 0); 791 CU_ASSERT(ch1 == ch2); 792 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 793 CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device1); 794 795 g_destroy_cb_calls = 0; 796 spdk_put_io_channel(ch2); 797 poll_threads(); 798 CU_ASSERT(g_destroy_cb_calls == 0); 799 800 g_create_cb_calls = 0; 801 ch2 = spdk_get_io_channel(&g_device2); 802 CU_ASSERT(g_create_cb_calls == 1); 803 CU_ASSERT(ch1 != ch2); 804 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 805 CU_ASSERT(spdk_io_channel_get_io_device(ch2) == &g_device2); 806 807 ctx = spdk_io_channel_get_ctx(ch2); 808 CU_ASSERT(*(uint64_t *)ctx == g_ctx2); 809 810 g_destroy_cb_calls = 0; 811 spdk_put_io_channel(ch1); 812 poll_threads(); 813 CU_ASSERT(g_destroy_cb_calls == 1); 814 815 g_destroy_cb_calls = 0; 816 spdk_put_io_channel(ch2); 817 poll_threads(); 818 CU_ASSERT(g_destroy_cb_calls == 1); 819 820 ch1 = spdk_get_io_channel(&g_device3); 821 CU_ASSERT(ch1 == NULL); 822 823 spdk_io_device_unregister(&g_device1, NULL); 824 poll_threads(); 825 spdk_io_device_unregister(&g_device2, NULL); 826 poll_threads(); 827 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 828 free_threads(); 829 CU_ASSERT(TAILQ_EMPTY(&g_threads)); 830 } 831 832 static int 833 create_cb(void *io_device, void *ctx_buf) 834 { 835 uint64_t *refcnt = (uint64_t *)ctx_buf; 836 837 CU_ASSERT(*refcnt == 0); 838 *refcnt = 1; 839 840 return 0; 841 } 842 843 static void 844 destroy_cb(void *io_device, void *ctx_buf) 845 { 846 uint64_t *refcnt = (uint64_t *)ctx_buf; 847 848 CU_ASSERT(*refcnt == 1); 849 *refcnt = 0; 850 } 851 852 /** 853 * This test is checking that a sequence of get, put, get, put without allowing 854 * the deferred put operation to complete doesn't result in releasing the memory 855 * for the channel twice. 856 */ 857 static void 858 channel_destroy_races(void) 859 { 860 uint64_t device; 861 struct spdk_io_channel *ch; 862 863 allocate_threads(1); 864 set_thread(0); 865 866 spdk_io_device_register(&device, create_cb, destroy_cb, sizeof(uint64_t), NULL); 867 868 ch = spdk_get_io_channel(&device); 869 SPDK_CU_ASSERT_FATAL(ch != NULL); 870 871 spdk_put_io_channel(ch); 872 873 ch = spdk_get_io_channel(&device); 874 SPDK_CU_ASSERT_FATAL(ch != NULL); 875 876 spdk_put_io_channel(ch); 877 poll_threads(); 878 879 spdk_io_device_unregister(&device, NULL); 880 poll_threads(); 881 882 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 883 free_threads(); 884 CU_ASSERT(TAILQ_EMPTY(&g_threads)); 885 } 886 887 static void 888 thread_exit_test(void) 889 { 890 struct spdk_thread *thread; 891 struct spdk_io_channel *ch; 892 struct spdk_poller *poller1, *poller2; 893 void *ctx; 894 bool done1 = false, done2 = false, poller1_run = false, poller2_run = false; 895 int rc __attribute__((unused)); 896 897 MOCK_SET(spdk_get_ticks, 10); 898 MOCK_SET(spdk_get_ticks_hz, 1); 899 900 allocate_threads(4); 901 902 /* Test if all pending messages are reaped for the exiting thread, and the 903 * thread moves to the exited state. 904 */ 905 set_thread(0); 906 thread = spdk_get_thread(); 907 908 /* Sending message to thread 0 will be accepted. */ 909 rc = spdk_thread_send_msg(thread, send_msg_cb, &done1); 910 CU_ASSERT(rc == 0); 911 CU_ASSERT(!done1); 912 913 /* Move thread 0 to the exiting state. */ 914 spdk_thread_exit(thread); 915 916 CU_ASSERT(spdk_thread_is_exited(thread) == false); 917 918 /* Sending message to thread 0 will be still accepted. */ 919 rc = spdk_thread_send_msg(thread, send_msg_cb, &done2); 920 CU_ASSERT(rc == 0); 921 922 /* Thread 0 will reap pending messages. */ 923 poll_thread(0); 924 CU_ASSERT(done1 == true); 925 CU_ASSERT(done2 == true); 926 927 /* Thread 0 will move to the exited state. */ 928 CU_ASSERT(spdk_thread_is_exited(thread) == true); 929 930 /* Test releasing I/O channel is reaped even after the thread moves to 931 * the exiting state 932 */ 933 set_thread(1); 934 935 spdk_io_device_register(&g_device1, create_cb_1, destroy_cb_1, sizeof(g_ctx1), NULL); 936 937 g_create_cb_calls = 0; 938 ch = spdk_get_io_channel(&g_device1); 939 CU_ASSERT(g_create_cb_calls == 1); 940 SPDK_CU_ASSERT_FATAL(ch != NULL); 941 942 ctx = spdk_io_channel_get_ctx(ch); 943 CU_ASSERT(*(uint64_t *)ctx == g_ctx1); 944 945 g_destroy_cb_calls = 0; 946 spdk_put_io_channel(ch); 947 948 thread = spdk_get_thread(); 949 spdk_thread_exit(thread); 950 951 /* Thread 1 will not move to the exited state yet because I/O channel release 952 * does not complete yet. 953 */ 954 CU_ASSERT(spdk_thread_is_exited(thread) == false); 955 956 /* Thread 1 will be able to get the another reference of I/O channel 957 * even after the thread moves to the exiting state. 958 */ 959 g_create_cb_calls = 0; 960 ch = spdk_get_io_channel(&g_device1); 961 962 CU_ASSERT(g_create_cb_calls == 0); 963 SPDK_CU_ASSERT_FATAL(ch != NULL); 964 965 ctx = spdk_io_channel_get_ctx(ch); 966 CU_ASSERT(*(uint64_t *)ctx == g_ctx1); 967 968 spdk_put_io_channel(ch); 969 970 poll_threads(); 971 CU_ASSERT(g_destroy_cb_calls == 1); 972 973 /* Thread 1 will move to the exited state after I/O channel is released. 974 * are released. 975 */ 976 CU_ASSERT(spdk_thread_is_exited(thread) == true); 977 978 spdk_io_device_unregister(&g_device1, NULL); 979 poll_threads(); 980 981 /* Test if unregistering poller is reaped for the exiting thread, and the 982 * thread moves to the exited thread. 983 */ 984 set_thread(2); 985 thread = spdk_get_thread(); 986 987 poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0); 988 CU_ASSERT(poller1 != NULL); 989 990 spdk_poller_unregister(&poller1); 991 992 spdk_thread_exit(thread); 993 994 poller2 = spdk_poller_register(poller_run_done, &poller2_run, 0); 995 996 poll_threads(); 997 998 CU_ASSERT(poller1_run == false); 999 CU_ASSERT(poller2_run == true); 1000 1001 CU_ASSERT(spdk_thread_is_exited(thread) == false); 1002 1003 spdk_poller_unregister(&poller2); 1004 1005 poll_threads(); 1006 1007 CU_ASSERT(spdk_thread_is_exited(thread) == true); 1008 1009 /* Test if the exiting thread is exited forcefully after timeout. */ 1010 set_thread(3); 1011 thread = spdk_get_thread(); 1012 1013 poller1 = spdk_poller_register(poller_run_done, &poller1_run, 0); 1014 CU_ASSERT(poller1 != NULL); 1015 1016 spdk_thread_exit(thread); 1017 1018 CU_ASSERT(spdk_thread_is_exited(thread) == false); 1019 1020 MOCK_SET(spdk_get_ticks, 11); 1021 1022 poll_threads(); 1023 1024 CU_ASSERT(spdk_thread_is_exited(thread) == false); 1025 1026 /* Cause timeout forcefully. */ 1027 MOCK_SET(spdk_get_ticks, 15); 1028 1029 poll_threads(); 1030 1031 CU_ASSERT(spdk_thread_is_exited(thread) == true); 1032 1033 spdk_poller_unregister(&poller1); 1034 1035 poll_threads(); 1036 1037 MOCK_CLEAR(spdk_get_ticks); 1038 MOCK_CLEAR(spdk_get_ticks_hz); 1039 1040 free_threads(); 1041 } 1042 1043 static int 1044 poller_run_idle(void *ctx) 1045 { 1046 uint64_t delay_us = (uint64_t)ctx; 1047 1048 spdk_delay_us(delay_us); 1049 1050 return 0; 1051 } 1052 1053 static int 1054 poller_run_busy(void *ctx) 1055 { 1056 uint64_t delay_us = (uint64_t)ctx; 1057 1058 spdk_delay_us(delay_us); 1059 1060 return 1; 1061 } 1062 1063 static void 1064 thread_update_stats_test(void) 1065 { 1066 struct spdk_poller *poller; 1067 struct spdk_thread *thread; 1068 1069 MOCK_SET(spdk_get_ticks, 10); 1070 1071 allocate_threads(1); 1072 1073 set_thread(0); 1074 thread = spdk_get_thread(); 1075 1076 CU_ASSERT(thread->tsc_last == 10); 1077 CU_ASSERT(thread->stats.idle_tsc == 0); 1078 CU_ASSERT(thread->stats.busy_tsc == 0); 1079 1080 /* Test if idle_tsc is updated expectedly. */ 1081 poller = spdk_poller_register(poller_run_idle, (void *)1000, 0); 1082 CU_ASSERT(poller != NULL); 1083 1084 spdk_delay_us(100); 1085 1086 poll_thread_times(0, 1); 1087 1088 CU_ASSERT(thread->tsc_last == 1110); 1089 CU_ASSERT(thread->stats.idle_tsc == 1000); 1090 CU_ASSERT(thread->stats.busy_tsc == 0); 1091 1092 spdk_delay_us(100); 1093 1094 poll_thread_times(0, 1); 1095 1096 CU_ASSERT(thread->tsc_last == 2210); 1097 CU_ASSERT(thread->stats.idle_tsc == 2000); 1098 CU_ASSERT(thread->stats.busy_tsc == 0); 1099 1100 spdk_poller_unregister(&poller); 1101 1102 /* Test if busy_tsc is updated expectedly. */ 1103 poller = spdk_poller_register(poller_run_busy, (void *)100000, 0); 1104 CU_ASSERT(poller != NULL); 1105 1106 spdk_delay_us(10000); 1107 1108 poll_thread_times(0, 1); 1109 1110 CU_ASSERT(thread->tsc_last == 112210); 1111 CU_ASSERT(thread->stats.idle_tsc == 2000); 1112 CU_ASSERT(thread->stats.busy_tsc == 100000); 1113 1114 spdk_delay_us(10000); 1115 1116 poll_thread_times(0, 1); 1117 1118 CU_ASSERT(thread->tsc_last == 222210); 1119 CU_ASSERT(thread->stats.idle_tsc == 2000); 1120 CU_ASSERT(thread->stats.busy_tsc == 200000); 1121 1122 spdk_poller_unregister(&poller); 1123 1124 MOCK_CLEAR(spdk_get_ticks); 1125 1126 free_threads(); 1127 } 1128 1129 struct ut_nested_ch { 1130 struct spdk_io_channel *child; 1131 struct spdk_poller *poller; 1132 }; 1133 1134 struct ut_nested_dev { 1135 struct ut_nested_dev *child; 1136 }; 1137 1138 static struct io_device * 1139 ut_get_io_device(void *dev) 1140 { 1141 struct io_device *tmp; 1142 1143 TAILQ_FOREACH(tmp, &g_io_devices, tailq) { 1144 if (tmp->io_device == dev) { 1145 return tmp; 1146 } 1147 } 1148 1149 return NULL; 1150 } 1151 1152 static int 1153 ut_null_poll(void *ctx) 1154 { 1155 return -1; 1156 } 1157 1158 static int 1159 ut_nested_ch_create_cb(void *io_device, void *ctx_buf) 1160 { 1161 struct ut_nested_ch *_ch = ctx_buf; 1162 struct ut_nested_dev *_dev = io_device; 1163 struct ut_nested_dev *_child; 1164 1165 _child = _dev->child; 1166 1167 if (_child != NULL) { 1168 _ch->child = spdk_get_io_channel(_child); 1169 SPDK_CU_ASSERT_FATAL(_ch->child != NULL); 1170 } else { 1171 _ch->child = NULL; 1172 } 1173 1174 _ch->poller = spdk_poller_register(ut_null_poll, NULL, 0); 1175 SPDK_CU_ASSERT_FATAL(_ch->poller != NULL); 1176 1177 return 0; 1178 } 1179 1180 static void 1181 ut_nested_ch_destroy_cb(void *io_device, void *ctx_buf) 1182 { 1183 struct ut_nested_ch *_ch = ctx_buf; 1184 struct spdk_io_channel *child; 1185 1186 child = _ch->child; 1187 if (child != NULL) { 1188 spdk_put_io_channel(child); 1189 } 1190 1191 spdk_poller_unregister(&_ch->poller); 1192 } 1193 1194 static void 1195 ut_check_nested_ch_create(struct spdk_io_channel *ch, struct io_device *dev) 1196 { 1197 CU_ASSERT(ch->ref == 1); 1198 CU_ASSERT(ch->dev == dev); 1199 CU_ASSERT(dev->refcnt == 1); 1200 } 1201 1202 static void 1203 ut_check_nested_ch_destroy_pre(struct spdk_io_channel *ch, struct io_device *dev) 1204 { 1205 CU_ASSERT(ch->ref == 0); 1206 CU_ASSERT(ch->destroy_ref == 1); 1207 CU_ASSERT(dev->refcnt == 1); 1208 } 1209 1210 static void 1211 ut_check_nested_ch_destroy_post(struct io_device *dev) 1212 { 1213 CU_ASSERT(dev->refcnt == 0); 1214 } 1215 1216 static void 1217 ut_check_nested_poller_register(struct spdk_poller *poller) 1218 { 1219 SPDK_CU_ASSERT_FATAL(poller != NULL); 1220 } 1221 1222 static void 1223 nested_channel(void) 1224 { 1225 struct ut_nested_dev _dev1, _dev2, _dev3; 1226 struct ut_nested_ch *_ch1, *_ch2, *_ch3; 1227 struct io_device *dev1, *dev2, *dev3; 1228 struct spdk_io_channel *ch1, *ch2, *ch3; 1229 struct spdk_poller *poller; 1230 struct spdk_thread *thread; 1231 1232 allocate_threads(1); 1233 set_thread(0); 1234 1235 thread = spdk_get_thread(); 1236 SPDK_CU_ASSERT_FATAL(thread != NULL); 1237 1238 _dev1.child = &_dev2; 1239 _dev2.child = &_dev3; 1240 _dev3.child = NULL; 1241 1242 spdk_io_device_register(&_dev1, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb, 1243 sizeof(struct ut_nested_ch), "dev1"); 1244 spdk_io_device_register(&_dev2, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb, 1245 sizeof(struct ut_nested_ch), "dev2"); 1246 spdk_io_device_register(&_dev3, ut_nested_ch_create_cb, ut_nested_ch_destroy_cb, 1247 sizeof(struct ut_nested_ch), "dev3"); 1248 1249 dev1 = ut_get_io_device(&_dev1); 1250 SPDK_CU_ASSERT_FATAL(dev1 != NULL); 1251 dev2 = ut_get_io_device(&_dev2); 1252 SPDK_CU_ASSERT_FATAL(dev2 != NULL); 1253 dev3 = ut_get_io_device(&_dev3); 1254 SPDK_CU_ASSERT_FATAL(dev3 != NULL); 1255 1256 /* A single call spdk_get_io_channel() to dev1 will also create channels 1257 * to dev2 and dev3 continuously. Pollers will be registered together. 1258 */ 1259 ch1 = spdk_get_io_channel(&_dev1); 1260 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1261 1262 _ch1 = spdk_io_channel_get_ctx(ch1); 1263 ch2 = _ch1->child; 1264 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1265 1266 _ch2 = spdk_io_channel_get_ctx(ch2); 1267 ch3 = _ch2->child; 1268 SPDK_CU_ASSERT_FATAL(ch3 != NULL); 1269 1270 _ch3 = spdk_io_channel_get_ctx(ch3); 1271 CU_ASSERT(_ch3->child == NULL); 1272 1273 ut_check_nested_ch_create(ch1, dev1); 1274 ut_check_nested_ch_create(ch2, dev2); 1275 ut_check_nested_ch_create(ch3, dev3); 1276 1277 poller = spdk_poller_register(ut_null_poll, NULL, 0); 1278 1279 ut_check_nested_poller_register(poller); 1280 ut_check_nested_poller_register(_ch1->poller); 1281 ut_check_nested_poller_register(_ch2->poller); 1282 ut_check_nested_poller_register(_ch3->poller); 1283 1284 spdk_poller_unregister(&poller); 1285 poll_thread_times(0, 1); 1286 1287 /* A single call spdk_put_io_channel() to dev1 will also destroy channels 1288 * to dev2 and dev3 continuously. Pollers will be unregistered together. 1289 */ 1290 spdk_put_io_channel(ch1); 1291 1292 /* Start exiting the current thread after unregistering the non-nested 1293 * I/O channel. 1294 */ 1295 spdk_thread_exit(thread); 1296 1297 ut_check_nested_ch_destroy_pre(ch1, dev1); 1298 poll_thread_times(0, 1); 1299 ut_check_nested_ch_destroy_post(dev1); 1300 1301 CU_ASSERT(spdk_thread_is_exited(thread) == false); 1302 1303 ut_check_nested_ch_destroy_pre(ch2, dev2); 1304 poll_thread_times(0, 1); 1305 ut_check_nested_ch_destroy_post(dev2); 1306 1307 CU_ASSERT(spdk_thread_is_exited(thread) == false); 1308 1309 ut_check_nested_ch_destroy_pre(ch3, dev3); 1310 poll_thread_times(0, 1); 1311 ut_check_nested_ch_destroy_post(dev3); 1312 1313 CU_ASSERT(spdk_thread_is_exited(thread) == true); 1314 1315 spdk_io_device_unregister(&_dev1, NULL); 1316 spdk_io_device_unregister(&_dev2, NULL); 1317 spdk_io_device_unregister(&_dev3, NULL); 1318 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 1319 1320 free_threads(); 1321 CU_ASSERT(TAILQ_EMPTY(&g_threads)); 1322 } 1323 1324 static int 1325 create_cb2(void *io_device, void *ctx_buf) 1326 { 1327 uint64_t *devcnt = (uint64_t *)io_device; 1328 1329 *devcnt += 1; 1330 1331 return 0; 1332 } 1333 1334 static void 1335 destroy_cb2(void *io_device, void *ctx_buf) 1336 { 1337 uint64_t *devcnt = (uint64_t *)io_device; 1338 1339 CU_ASSERT(*devcnt > 0); 1340 *devcnt -= 1; 1341 } 1342 1343 static void 1344 unregister_cb2(void *io_device) 1345 { 1346 uint64_t *devcnt = (uint64_t *)io_device; 1347 1348 CU_ASSERT(*devcnt == 0); 1349 } 1350 1351 static void 1352 device_unregister_and_thread_exit_race(void) 1353 { 1354 uint64_t device = 0; 1355 struct spdk_io_channel *ch1, *ch2; 1356 struct spdk_thread *thread1, *thread2; 1357 1358 /* Create two threads and each thread gets a channel from the same device. */ 1359 allocate_threads(2); 1360 set_thread(0); 1361 1362 thread1 = spdk_get_thread(); 1363 SPDK_CU_ASSERT_FATAL(thread1 != NULL); 1364 1365 spdk_io_device_register(&device, create_cb2, destroy_cb2, sizeof(uint64_t), NULL); 1366 1367 ch1 = spdk_get_io_channel(&device); 1368 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 1369 1370 set_thread(1); 1371 1372 thread2 = spdk_get_thread(); 1373 SPDK_CU_ASSERT_FATAL(thread2 != NULL); 1374 1375 ch2 = spdk_get_io_channel(&device); 1376 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 1377 1378 set_thread(0); 1379 1380 /* Move thread 0 to the exiting state, but it should keep exiting until two channels 1381 * and a device are released. 1382 */ 1383 spdk_thread_exit(thread1); 1384 poll_thread(0); 1385 1386 spdk_put_io_channel(ch1); 1387 1388 spdk_io_device_unregister(&device, unregister_cb2); 1389 poll_thread(0); 1390 1391 CU_ASSERT(spdk_thread_is_exited(thread1) == false); 1392 1393 set_thread(1); 1394 1395 /* Move thread 1 to the exiting state, but it should keep exiting until its channel 1396 * is released. 1397 */ 1398 spdk_thread_exit(thread2); 1399 poll_thread(1); 1400 1401 CU_ASSERT(spdk_thread_is_exited(thread2) == false); 1402 1403 spdk_put_io_channel(ch2); 1404 poll_thread(1); 1405 1406 CU_ASSERT(spdk_thread_is_exited(thread1) == false); 1407 CU_ASSERT(spdk_thread_is_exited(thread2) == true); 1408 1409 poll_thread(0); 1410 1411 CU_ASSERT(spdk_thread_is_exited(thread1) == true); 1412 1413 free_threads(); 1414 } 1415 1416 static int 1417 dummy_poller(void *arg) 1418 { 1419 return SPDK_POLLER_IDLE; 1420 } 1421 1422 static void 1423 cache_closest_timed_poller(void) 1424 { 1425 struct spdk_thread *thread; 1426 struct spdk_poller *poller1, *poller2, *poller3, *tmp; 1427 1428 allocate_threads(1); 1429 set_thread(0); 1430 1431 thread = spdk_get_thread(); 1432 SPDK_CU_ASSERT_FATAL(thread != NULL); 1433 1434 poller1 = spdk_poller_register(dummy_poller, NULL, 1000); 1435 SPDK_CU_ASSERT_FATAL(poller1 != NULL); 1436 1437 poller2 = spdk_poller_register(dummy_poller, NULL, 1500); 1438 SPDK_CU_ASSERT_FATAL(poller2 != NULL); 1439 1440 poller3 = spdk_poller_register(dummy_poller, NULL, 1800); 1441 SPDK_CU_ASSERT_FATAL(poller3 != NULL); 1442 1443 poll_threads(); 1444 1445 /* When multiple timed pollers are inserted, the cache should 1446 * have the closest timed poller. 1447 */ 1448 CU_ASSERT(thread->first_timed_poller == poller1); 1449 CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1); 1450 1451 spdk_delay_us(1000); 1452 poll_threads(); 1453 1454 CU_ASSERT(thread->first_timed_poller == poller2); 1455 CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller2); 1456 1457 /* If we unregister a timed poller by spdk_poller_unregister() 1458 * when it is waiting, it is marked as being unregistereed and 1459 * is actually unregistered when it is expired. 1460 * 1461 * Hence if we unregister the closest timed poller when it is waiting, 1462 * the cache is not updated to the next timed poller until it is expired. 1463 */ 1464 tmp = poller2; 1465 1466 spdk_poller_unregister(&poller2); 1467 CU_ASSERT(poller2 == NULL); 1468 1469 spdk_delay_us(499); 1470 poll_threads(); 1471 1472 CU_ASSERT(thread->first_timed_poller == tmp); 1473 CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == tmp); 1474 1475 spdk_delay_us(1); 1476 poll_threads(); 1477 1478 CU_ASSERT(thread->first_timed_poller == poller3); 1479 CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3); 1480 1481 /* If we pause a timed poller by spdk_poller_pause() when it is waiting, 1482 * it is marked as being paused and is actually paused when it is expired. 1483 * 1484 * Hence if we pause the closest timed poller when it is waiting, the cache 1485 * is not updated to the next timed poller until it is expired. 1486 */ 1487 spdk_poller_pause(poller3); 1488 1489 spdk_delay_us(299); 1490 poll_threads(); 1491 1492 CU_ASSERT(thread->first_timed_poller == poller3); 1493 CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller3); 1494 1495 spdk_delay_us(1); 1496 poll_threads(); 1497 1498 CU_ASSERT(thread->first_timed_poller == poller1); 1499 CU_ASSERT(RB_MIN(timed_pollers_tree, &thread->timed_pollers) == poller1); 1500 1501 /* After unregistering all timed pollers, the cache should 1502 * be NULL. 1503 */ 1504 spdk_poller_unregister(&poller1); 1505 spdk_poller_unregister(&poller3); 1506 1507 spdk_delay_us(200); 1508 poll_threads(); 1509 1510 CU_ASSERT(thread->first_timed_poller == NULL); 1511 CU_ASSERT(RB_EMPTY(&thread->timed_pollers)); 1512 1513 free_threads(); 1514 } 1515 1516 static void 1517 multi_timed_pollers_have_same_expiration(void) 1518 { 1519 struct spdk_thread *thread; 1520 struct spdk_poller *poller1, *poller2, *poller3, *poller4, *tmp; 1521 uint64_t start_ticks; 1522 1523 allocate_threads(1); 1524 set_thread(0); 1525 1526 thread = spdk_get_thread(); 1527 SPDK_CU_ASSERT_FATAL(thread != NULL); 1528 1529 /* 1530 * case 1: multiple timed pollers have the same next_run_tick. 1531 */ 1532 start_ticks = spdk_get_ticks(); 1533 1534 poller1 = spdk_poller_register(dummy_poller, NULL, 500); 1535 SPDK_CU_ASSERT_FATAL(poller1 != NULL); 1536 1537 poller2 = spdk_poller_register(dummy_poller, NULL, 500); 1538 SPDK_CU_ASSERT_FATAL(poller2 != NULL); 1539 1540 poller3 = spdk_poller_register(dummy_poller, NULL, 1000); 1541 SPDK_CU_ASSERT_FATAL(poller3 != NULL); 1542 1543 poller4 = spdk_poller_register(dummy_poller, NULL, 1500); 1544 SPDK_CU_ASSERT_FATAL(poller4 != NULL); 1545 1546 /* poller1 and poller2 have the same next_run_tick but cache has poller1 1547 * because poller1 is registered earlier than poller2. 1548 */ 1549 CU_ASSERT(thread->first_timed_poller == poller1); 1550 CU_ASSERT(poller1->next_run_tick == start_ticks + 500); 1551 CU_ASSERT(poller2->next_run_tick == start_ticks + 500); 1552 CU_ASSERT(poller3->next_run_tick == start_ticks + 1000); 1553 CU_ASSERT(poller4->next_run_tick == start_ticks + 1500); 1554 1555 /* after 500 usec, poller1 and poller2 are expired. */ 1556 spdk_delay_us(500); 1557 CU_ASSERT(spdk_get_ticks() == start_ticks + 500); 1558 poll_threads(); 1559 1560 /* poller1, poller2, and poller3 have the same next_run_tick but cache 1561 * has poller3 because poller3 is not expired yet. 1562 */ 1563 CU_ASSERT(thread->first_timed_poller == poller3); 1564 CU_ASSERT(poller1->next_run_tick == start_ticks + 1000); 1565 CU_ASSERT(poller2->next_run_tick == start_ticks + 1000); 1566 CU_ASSERT(poller3->next_run_tick == start_ticks + 1000); 1567 CU_ASSERT(poller4->next_run_tick == start_ticks + 1500); 1568 1569 /* after 500 usec, poller1, poller2, and poller3 are expired. */ 1570 spdk_delay_us(500); 1571 CU_ASSERT(spdk_get_ticks() == start_ticks + 1000); 1572 poll_threads(); 1573 1574 /* poller1, poller2, and poller4 have the same next_run_tick but cache 1575 * has poller4 because poller4 is not expired yet. 1576 */ 1577 CU_ASSERT(thread->first_timed_poller == poller4); 1578 CU_ASSERT(poller1->next_run_tick == start_ticks + 1500); 1579 CU_ASSERT(poller2->next_run_tick == start_ticks + 1500); 1580 CU_ASSERT(poller3->next_run_tick == start_ticks + 2000); 1581 CU_ASSERT(poller4->next_run_tick == start_ticks + 1500); 1582 1583 /* after 500 usec, poller1, poller2, and poller4 are expired. */ 1584 spdk_delay_us(500); 1585 CU_ASSERT(spdk_get_ticks() == start_ticks + 1500); 1586 poll_threads(); 1587 1588 /* poller1, poller2, and poller3 have the same next_run_tick but cache 1589 * has poller3 because poller3 is updated earlier than poller1 and poller2. 1590 */ 1591 CU_ASSERT(thread->first_timed_poller == poller3); 1592 CU_ASSERT(poller1->next_run_tick == start_ticks + 2000); 1593 CU_ASSERT(poller2->next_run_tick == start_ticks + 2000); 1594 CU_ASSERT(poller3->next_run_tick == start_ticks + 2000); 1595 CU_ASSERT(poller4->next_run_tick == start_ticks + 3000); 1596 1597 spdk_poller_unregister(&poller1); 1598 spdk_poller_unregister(&poller2); 1599 spdk_poller_unregister(&poller3); 1600 spdk_poller_unregister(&poller4); 1601 1602 spdk_delay_us(1500); 1603 CU_ASSERT(spdk_get_ticks() == start_ticks + 3000); 1604 poll_threads(); 1605 1606 CU_ASSERT(thread->first_timed_poller == NULL); 1607 CU_ASSERT(RB_EMPTY(&thread->timed_pollers)); 1608 1609 /* 1610 * case 2: unregister timed pollers while multiple timed pollers are registered. 1611 */ 1612 start_ticks = spdk_get_ticks(); 1613 1614 poller1 = spdk_poller_register(dummy_poller, NULL, 500); 1615 SPDK_CU_ASSERT_FATAL(poller1 != NULL); 1616 1617 CU_ASSERT(thread->first_timed_poller == poller1); 1618 CU_ASSERT(poller1->next_run_tick == start_ticks + 500); 1619 1620 /* after 250 usec, register poller2 and poller3. */ 1621 spdk_delay_us(250); 1622 CU_ASSERT(spdk_get_ticks() == start_ticks + 250); 1623 1624 poller2 = spdk_poller_register(dummy_poller, NULL, 500); 1625 SPDK_CU_ASSERT_FATAL(poller2 != NULL); 1626 1627 poller3 = spdk_poller_register(dummy_poller, NULL, 750); 1628 SPDK_CU_ASSERT_FATAL(poller3 != NULL); 1629 1630 CU_ASSERT(thread->first_timed_poller == poller1); 1631 CU_ASSERT(poller1->next_run_tick == start_ticks + 500); 1632 CU_ASSERT(poller2->next_run_tick == start_ticks + 750); 1633 CU_ASSERT(poller3->next_run_tick == start_ticks + 1000); 1634 1635 /* unregister poller2 which is not the closest. */ 1636 tmp = poller2; 1637 spdk_poller_unregister(&poller2); 1638 1639 /* after 250 usec, poller1 is expired. */ 1640 spdk_delay_us(250); 1641 CU_ASSERT(spdk_get_ticks() == start_ticks + 500); 1642 poll_threads(); 1643 1644 /* poller2 is not unregistered yet because it is not expired. */ 1645 CU_ASSERT(thread->first_timed_poller == tmp); 1646 CU_ASSERT(poller1->next_run_tick == start_ticks + 1000); 1647 CU_ASSERT(tmp->next_run_tick == start_ticks + 750); 1648 CU_ASSERT(poller3->next_run_tick == start_ticks + 1000); 1649 1650 spdk_delay_us(250); 1651 CU_ASSERT(spdk_get_ticks() == start_ticks + 750); 1652 poll_threads(); 1653 1654 CU_ASSERT(thread->first_timed_poller == poller3); 1655 CU_ASSERT(poller1->next_run_tick == start_ticks + 1000); 1656 CU_ASSERT(poller3->next_run_tick == start_ticks + 1000); 1657 1658 spdk_poller_unregister(&poller3); 1659 1660 spdk_delay_us(250); 1661 CU_ASSERT(spdk_get_ticks() == start_ticks + 1000); 1662 poll_threads(); 1663 1664 CU_ASSERT(thread->first_timed_poller == poller1); 1665 CU_ASSERT(poller1->next_run_tick == start_ticks + 1500); 1666 1667 spdk_poller_unregister(&poller1); 1668 1669 spdk_delay_us(500); 1670 CU_ASSERT(spdk_get_ticks() == start_ticks + 1500); 1671 poll_threads(); 1672 1673 CU_ASSERT(thread->first_timed_poller == NULL); 1674 CU_ASSERT(RB_EMPTY(&thread->timed_pollers)); 1675 1676 free_threads(); 1677 } 1678 1679 int 1680 main(int argc, char **argv) 1681 { 1682 CU_pSuite suite = NULL; 1683 unsigned int num_failures; 1684 1685 CU_set_error_action(CUEA_ABORT); 1686 CU_initialize_registry(); 1687 1688 suite = CU_add_suite("io_channel", NULL, NULL); 1689 1690 CU_ADD_TEST(suite, thread_alloc); 1691 CU_ADD_TEST(suite, thread_send_msg); 1692 CU_ADD_TEST(suite, thread_poller); 1693 CU_ADD_TEST(suite, poller_pause); 1694 CU_ADD_TEST(suite, thread_for_each); 1695 CU_ADD_TEST(suite, for_each_channel_remove); 1696 CU_ADD_TEST(suite, for_each_channel_unreg); 1697 CU_ADD_TEST(suite, thread_name); 1698 CU_ADD_TEST(suite, channel); 1699 CU_ADD_TEST(suite, channel_destroy_races); 1700 CU_ADD_TEST(suite, thread_exit_test); 1701 CU_ADD_TEST(suite, thread_update_stats_test); 1702 CU_ADD_TEST(suite, nested_channel); 1703 CU_ADD_TEST(suite, device_unregister_and_thread_exit_race); 1704 CU_ADD_TEST(suite, cache_closest_timed_poller); 1705 CU_ADD_TEST(suite, multi_timed_pollers_have_same_expiration); 1706 1707 CU_basic_set_mode(CU_BRM_VERBOSE); 1708 CU_basic_run_tests(); 1709 num_failures = CU_get_number_of_failures(); 1710 CU_cleanup_registry(); 1711 return num_failures; 1712 } 1713