1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 38 #include "thread/thread.c" 39 #include "common/lib/test_env.c" 40 #include "common/lib/ut_multithread.c" 41 42 static void 43 _send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx) 44 { 45 fn(ctx); 46 } 47 48 static void 49 thread_alloc(void) 50 { 51 CU_ASSERT(TAILQ_EMPTY(&g_threads)); 52 allocate_threads(1); 53 CU_ASSERT(!TAILQ_EMPTY(&g_threads)); 54 free_threads(); 55 CU_ASSERT(TAILQ_EMPTY(&g_threads)); 56 } 57 58 static void 59 send_msg_cb(void *ctx) 60 { 61 bool *done = ctx; 62 63 *done = true; 64 } 65 66 static void 67 thread_send_msg(void) 68 { 69 struct spdk_thread *thread0; 70 bool done = false; 71 72 allocate_threads(2); 73 set_thread(0); 74 thread0 = spdk_get_thread(); 75 76 set_thread(1); 77 /* Simulate thread 1 sending a message to thread 0. */ 78 spdk_thread_send_msg(thread0, send_msg_cb, &done); 79 80 /* We have not polled thread 0 yet, so done should be false. */ 81 CU_ASSERT(!done); 82 83 /* 84 * Poll thread 1. The message was sent to thread 0, so this should be 85 * a nop and done should still be false. 86 */ 87 poll_thread(1); 88 CU_ASSERT(!done); 89 90 /* 91 * Poll thread 0. This should execute the message and done should then 92 * be true. 93 */ 94 poll_thread(0); 95 CU_ASSERT(done); 96 97 free_threads(); 98 } 99 100 static int 101 poller_run_done(void *ctx) 102 { 103 bool *poller_run = ctx; 104 105 *poller_run = true; 106 107 return -1; 108 } 109 110 static void 111 thread_poller(void) 112 { 113 struct spdk_poller *poller = NULL; 114 bool poller_run = false; 115 116 allocate_threads(1); 117 118 set_thread(0); 119 reset_time(); 120 /* Register a poller with no-wait time and test execution */ 121 poller = spdk_poller_register(poller_run_done, &poller_run, 0); 122 CU_ASSERT(poller != NULL); 123 124 poll_threads(); 125 CU_ASSERT(poller_run == true); 126 127 spdk_poller_unregister(&poller); 128 CU_ASSERT(poller == NULL); 129 130 /* Register a poller with 1000us wait time and test single execution */ 131 poller_run = false; 132 poller = spdk_poller_register(poller_run_done, &poller_run, 1000); 133 CU_ASSERT(poller != NULL); 134 135 poll_threads(); 136 CU_ASSERT(poller_run == false); 137 138 increment_time(1000); 139 poll_threads(); 140 CU_ASSERT(poller_run == true); 141 142 reset_time(); 143 poller_run = false; 144 poll_threads(); 145 CU_ASSERT(poller_run == false); 146 147 increment_time(1000); 148 poll_threads(); 149 CU_ASSERT(poller_run == true); 150 151 spdk_poller_unregister(&poller); 152 CU_ASSERT(poller == NULL); 153 154 free_threads(); 155 } 156 157 static void 158 for_each_cb(void *ctx) 159 { 160 int *count = ctx; 161 162 (*count)++; 163 } 164 165 static void 166 thread_for_each(void) 167 { 168 int count = 0; 169 int i; 170 171 allocate_threads(3); 172 set_thread(0); 173 174 spdk_for_each_thread(for_each_cb, &count, for_each_cb); 175 176 /* We have not polled thread 0 yet, so count should be 0 */ 177 CU_ASSERT(count == 0); 178 179 /* Poll each thread to verify the message is passed to each */ 180 for (i = 0; i < 3; i++) { 181 poll_thread(i); 182 CU_ASSERT(count == (i + 1)); 183 } 184 185 /* 186 * After each thread is called, the completion calls it 187 * one more time. 188 */ 189 poll_thread(0); 190 CU_ASSERT(count == 4); 191 192 free_threads(); 193 } 194 195 static int 196 channel_create(void *io_device, void *ctx_buf) 197 { 198 return 0; 199 } 200 201 static void 202 channel_destroy(void *io_device, void *ctx_buf) 203 { 204 } 205 206 static void 207 channel_msg(struct spdk_io_channel_iter *i) 208 { 209 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 210 int *count = spdk_io_channel_get_ctx(ch); 211 212 (*count)++; 213 214 spdk_for_each_channel_continue(i, 0); 215 } 216 217 static void 218 channel_cpl(struct spdk_io_channel_iter *i, int status) 219 { 220 } 221 222 static void 223 for_each_channel_remove(void) 224 { 225 struct spdk_io_channel *ch0, *ch1, *ch2; 226 int io_target; 227 int count = 0; 228 229 allocate_threads(3); 230 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL); 231 set_thread(0); 232 ch0 = spdk_get_io_channel(&io_target); 233 set_thread(1); 234 ch1 = spdk_get_io_channel(&io_target); 235 set_thread(2); 236 ch2 = spdk_get_io_channel(&io_target); 237 238 /* 239 * Test that io_channel handles the case where we start to iterate through 240 * the channels, and during the iteration, one of the channels is deleted. 241 * This is done in some different and sometimes non-intuitive orders, because 242 * some operations are deferred and won't execute until their threads are 243 * polled. 244 * 245 * Case #1: Put the I/O channel before spdk_for_each_channel. 246 */ 247 set_thread(0); 248 spdk_put_io_channel(ch0); 249 spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl); 250 poll_threads(); 251 252 /* 253 * Case #2: Put the I/O channel after spdk_for_each_channel, but before 254 * thread 0 is polled. 255 */ 256 ch0 = spdk_get_io_channel(&io_target); 257 spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl); 258 spdk_put_io_channel(ch0); 259 poll_threads(); 260 261 set_thread(1); 262 spdk_put_io_channel(ch1); 263 set_thread(2); 264 spdk_put_io_channel(ch2); 265 spdk_io_device_unregister(&io_target, NULL); 266 poll_threads(); 267 268 free_threads(); 269 } 270 271 struct unreg_ctx { 272 bool ch_done; 273 bool foreach_done; 274 }; 275 276 static void 277 unreg_ch_done(struct spdk_io_channel_iter *i) 278 { 279 struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 280 281 ctx->ch_done = true; 282 283 SPDK_CU_ASSERT_FATAL(i->cur_thread != NULL); 284 spdk_for_each_channel_continue(i, 0); 285 } 286 287 static void 288 unreg_foreach_done(struct spdk_io_channel_iter *i, int status) 289 { 290 struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 291 292 ctx->foreach_done = true; 293 } 294 295 static void 296 for_each_channel_unreg(void) 297 { 298 struct spdk_io_channel *ch0; 299 struct io_device *dev; 300 struct unreg_ctx ctx = {}; 301 int io_target; 302 303 allocate_threads(1); 304 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 305 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL); 306 CU_ASSERT(!TAILQ_EMPTY(&g_io_devices)); 307 dev = TAILQ_FIRST(&g_io_devices); 308 SPDK_CU_ASSERT_FATAL(dev != NULL); 309 CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL); 310 set_thread(0); 311 ch0 = spdk_get_io_channel(&io_target); 312 spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done); 313 314 spdk_io_device_unregister(&io_target, NULL); 315 /* 316 * There is an outstanding foreach call on the io_device, so the unregister should not 317 * have removed the device. 318 */ 319 CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices)); 320 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int), NULL); 321 /* 322 * There is already a device registered at &io_target, so a new io_device should not 323 * have been added to g_io_devices. 324 */ 325 CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices)); 326 CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL); 327 328 poll_thread(0); 329 CU_ASSERT(ctx.ch_done == true); 330 CU_ASSERT(ctx.foreach_done == true); 331 /* 332 * There are no more foreach operations outstanding, so we can unregister the device, 333 * even though a channel still exists for the device. 334 */ 335 spdk_io_device_unregister(&io_target, NULL); 336 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 337 338 set_thread(0); 339 spdk_put_io_channel(ch0); 340 341 poll_threads(); 342 343 free_threads(); 344 } 345 346 static void 347 thread_name(void) 348 { 349 struct spdk_thread *thread; 350 const char *name; 351 352 /* Create thread with no name, which automatically generates one */ 353 spdk_allocate_thread(_send_msg, NULL, NULL, NULL, NULL); 354 thread = spdk_get_thread(); 355 SPDK_CU_ASSERT_FATAL(thread != NULL); 356 name = spdk_thread_get_name(thread); 357 CU_ASSERT(name != NULL); 358 spdk_free_thread(); 359 360 /* Create thread named "test_thread" */ 361 spdk_allocate_thread(_send_msg, NULL, NULL, NULL, "test_thread"); 362 thread = spdk_get_thread(); 363 SPDK_CU_ASSERT_FATAL(thread != NULL); 364 name = spdk_thread_get_name(thread); 365 SPDK_CU_ASSERT_FATAL(name != NULL); 366 CU_ASSERT(strcmp(name, "test_thread") == 0); 367 spdk_free_thread(); 368 } 369 370 static uint64_t device1; 371 static uint64_t device2; 372 static uint64_t device3; 373 374 static uint64_t ctx1 = 0x1111; 375 static uint64_t ctx2 = 0x2222; 376 377 static int g_create_cb_calls = 0; 378 static int g_destroy_cb_calls = 0; 379 380 static int 381 create_cb_1(void *io_device, void *ctx_buf) 382 { 383 CU_ASSERT(io_device == &device1); 384 *(uint64_t *)ctx_buf = ctx1; 385 g_create_cb_calls++; 386 return 0; 387 } 388 389 static void 390 destroy_cb_1(void *io_device, void *ctx_buf) 391 { 392 CU_ASSERT(io_device == &device1); 393 CU_ASSERT(*(uint64_t *)ctx_buf == ctx1); 394 g_destroy_cb_calls++; 395 } 396 397 static int 398 create_cb_2(void *io_device, void *ctx_buf) 399 { 400 CU_ASSERT(io_device == &device2); 401 *(uint64_t *)ctx_buf = ctx2; 402 g_create_cb_calls++; 403 return 0; 404 } 405 406 static void 407 destroy_cb_2(void *io_device, void *ctx_buf) 408 { 409 CU_ASSERT(io_device == &device2); 410 CU_ASSERT(*(uint64_t *)ctx_buf == ctx2); 411 g_destroy_cb_calls++; 412 } 413 414 static void 415 channel(void) 416 { 417 struct spdk_io_channel *ch1, *ch2; 418 void *ctx; 419 420 spdk_allocate_thread(_send_msg, NULL, NULL, NULL, "thread0"); 421 spdk_io_device_register(&device1, create_cb_1, destroy_cb_1, sizeof(ctx1), NULL); 422 spdk_io_device_register(&device2, create_cb_2, destroy_cb_2, sizeof(ctx2), NULL); 423 424 g_create_cb_calls = 0; 425 ch1 = spdk_get_io_channel(&device1); 426 CU_ASSERT(g_create_cb_calls == 1); 427 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 428 429 g_create_cb_calls = 0; 430 ch2 = spdk_get_io_channel(&device1); 431 CU_ASSERT(g_create_cb_calls == 0); 432 CU_ASSERT(ch1 == ch2); 433 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 434 435 g_destroy_cb_calls = 0; 436 spdk_put_io_channel(ch2); 437 CU_ASSERT(g_destroy_cb_calls == 0); 438 439 g_create_cb_calls = 0; 440 ch2 = spdk_get_io_channel(&device2); 441 CU_ASSERT(g_create_cb_calls == 1); 442 CU_ASSERT(ch1 != ch2); 443 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 444 445 ctx = spdk_io_channel_get_ctx(ch2); 446 CU_ASSERT(*(uint64_t *)ctx == ctx2); 447 448 g_destroy_cb_calls = 0; 449 spdk_put_io_channel(ch1); 450 CU_ASSERT(g_destroy_cb_calls == 1); 451 452 g_destroy_cb_calls = 0; 453 spdk_put_io_channel(ch2); 454 CU_ASSERT(g_destroy_cb_calls == 1); 455 456 ch1 = spdk_get_io_channel(&device3); 457 CU_ASSERT(ch1 == NULL); 458 459 spdk_io_device_unregister(&device1, NULL); 460 spdk_io_device_unregister(&device2, NULL); 461 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 462 spdk_free_thread(); 463 CU_ASSERT(TAILQ_EMPTY(&g_threads)); 464 } 465 466 int 467 main(int argc, char **argv) 468 { 469 CU_pSuite suite = NULL; 470 unsigned int num_failures; 471 472 if (CU_initialize_registry() != CUE_SUCCESS) { 473 return CU_get_error(); 474 } 475 476 suite = CU_add_suite("io_channel", NULL, NULL); 477 if (suite == NULL) { 478 CU_cleanup_registry(); 479 return CU_get_error(); 480 } 481 482 if ( 483 CU_add_test(suite, "thread_alloc", thread_alloc) == NULL || 484 CU_add_test(suite, "thread_send_msg", thread_send_msg) == NULL || 485 CU_add_test(suite, "thread_poller", thread_poller) == NULL || 486 CU_add_test(suite, "thread_for_each", thread_for_each) == NULL || 487 CU_add_test(suite, "for_each_channel_remove", for_each_channel_remove) == NULL || 488 CU_add_test(suite, "for_each_channel_unreg", for_each_channel_unreg) == NULL || 489 CU_add_test(suite, "thread_name", thread_name) == NULL || 490 CU_add_test(suite, "channel", channel) == NULL 491 ) { 492 CU_cleanup_registry(); 493 return CU_get_error(); 494 } 495 496 CU_basic_set_mode(CU_BRM_VERBOSE); 497 CU_basic_run_tests(); 498 num_failures = CU_get_number_of_failures(); 499 CU_cleanup_registry(); 500 return num_failures; 501 } 502