1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 #include "spdk/stdinc.h" 35 36 #include "spdk_cunit.h" 37 38 #include "thread/thread.c" 39 #include "common/lib/test_env.c" 40 #include "common/lib/ut_multithread.c" 41 42 static void 43 _send_msg(spdk_thread_fn fn, void *ctx, void *thread_ctx) 44 { 45 fn(ctx); 46 } 47 48 static void 49 thread_alloc(void) 50 { 51 CU_ASSERT(TAILQ_EMPTY(&g_threads)); 52 allocate_threads(1); 53 CU_ASSERT(!TAILQ_EMPTY(&g_threads)); 54 free_threads(); 55 CU_ASSERT(TAILQ_EMPTY(&g_threads)); 56 } 57 58 static void 59 send_msg_cb(void *ctx) 60 { 61 bool *done = ctx; 62 63 *done = true; 64 } 65 66 static void 67 thread_send_msg(void) 68 { 69 struct spdk_thread *thread0; 70 bool done = false; 71 72 allocate_threads(2); 73 set_thread(0); 74 thread0 = spdk_get_thread(); 75 76 set_thread(1); 77 /* Simulate thread 1 sending a message to thread 0. */ 78 spdk_thread_send_msg(thread0, send_msg_cb, &done); 79 80 /* We have not polled thread 0 yet, so done should be false. */ 81 CU_ASSERT(!done); 82 83 /* 84 * Poll thread 1. The message was sent to thread 0, so this should be 85 * a nop and done should still be false. 86 */ 87 poll_thread(1); 88 CU_ASSERT(!done); 89 90 /* 91 * Poll thread 0. This should execute the message and done should then 92 * be true. 93 */ 94 poll_thread(0); 95 CU_ASSERT(done); 96 97 free_threads(); 98 } 99 100 static void 101 for_each_cb(void *ctx) 102 { 103 int *count = ctx; 104 105 (*count)++; 106 } 107 108 static void 109 thread_for_each(void) 110 { 111 int count = 0; 112 int i; 113 114 allocate_threads(3); 115 set_thread(0); 116 117 spdk_for_each_thread(for_each_cb, &count, for_each_cb); 118 119 /* We have not polled thread 0 yet, so count should be 0 */ 120 CU_ASSERT(count == 0); 121 122 /* Poll each thread to verify the message is passed to each */ 123 for (i = 0; i < 3; i++) { 124 poll_thread(i); 125 CU_ASSERT(count == (i + 1)); 126 } 127 128 /* 129 * After each thread is called, the completion calls it 130 * one more time. 131 */ 132 poll_thread(0); 133 CU_ASSERT(count == 4); 134 135 free_threads(); 136 } 137 138 static int 139 channel_create(void *io_device, void *ctx_buf) 140 { 141 return 0; 142 } 143 144 static void 145 channel_destroy(void *io_device, void *ctx_buf) 146 { 147 } 148 149 static void 150 channel_msg(struct spdk_io_channel_iter *i) 151 { 152 struct spdk_io_channel *ch = spdk_io_channel_iter_get_channel(i); 153 int *count = spdk_io_channel_get_ctx(ch); 154 155 (*count)++; 156 157 spdk_for_each_channel_continue(i, 0); 158 } 159 160 static void 161 channel_cpl(struct spdk_io_channel_iter *i, int status) 162 { 163 } 164 165 static void 166 for_each_channel_remove(void) 167 { 168 struct spdk_io_channel *ch0, *ch1, *ch2; 169 int io_target; 170 int count = 0; 171 172 allocate_threads(3); 173 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int)); 174 set_thread(0); 175 ch0 = spdk_get_io_channel(&io_target); 176 set_thread(1); 177 ch1 = spdk_get_io_channel(&io_target); 178 set_thread(2); 179 ch2 = spdk_get_io_channel(&io_target); 180 181 /* 182 * Test that io_channel handles the case where we start to iterate through 183 * the channels, and during the iteration, one of the channels is deleted. 184 * This is done in some different and sometimes non-intuitive orders, because 185 * some operations are deferred and won't execute until their threads are 186 * polled. 187 * 188 * Case #1: Put the I/O channel before spdk_for_each_channel. 189 */ 190 set_thread(0); 191 spdk_put_io_channel(ch0); 192 spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl); 193 poll_threads(); 194 195 /* 196 * Case #2: Put the I/O channel after spdk_for_each_channel, but before 197 * thread 0 is polled. 198 */ 199 ch0 = spdk_get_io_channel(&io_target); 200 spdk_for_each_channel(&io_target, channel_msg, &count, channel_cpl); 201 spdk_put_io_channel(ch0); 202 poll_threads(); 203 204 set_thread(1); 205 spdk_put_io_channel(ch1); 206 set_thread(2); 207 spdk_put_io_channel(ch2); 208 spdk_io_device_unregister(&io_target, NULL); 209 poll_threads(); 210 211 free_threads(); 212 } 213 214 struct unreg_ctx { 215 bool ch_done; 216 bool foreach_done; 217 }; 218 219 static void 220 unreg_ch_done(struct spdk_io_channel_iter *i) 221 { 222 struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 223 224 ctx->ch_done = true; 225 226 spdk_for_each_channel_continue(i, 0); 227 } 228 229 static void 230 unreg_foreach_done(struct spdk_io_channel_iter *i, int status) 231 { 232 struct unreg_ctx *ctx = spdk_io_channel_iter_get_ctx(i); 233 234 ctx->foreach_done = true; 235 } 236 237 static void 238 for_each_channel_unreg(void) 239 { 240 struct spdk_io_channel *ch0; 241 struct io_device *dev; 242 struct unreg_ctx ctx = {}; 243 int io_target; 244 245 allocate_threads(1); 246 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 247 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int)); 248 CU_ASSERT(!TAILQ_EMPTY(&g_io_devices)); 249 dev = TAILQ_FIRST(&g_io_devices); 250 SPDK_CU_ASSERT_FATAL(dev != NULL); 251 CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL); 252 set_thread(0); 253 ch0 = spdk_get_io_channel(&io_target); 254 spdk_for_each_channel(&io_target, unreg_ch_done, &ctx, unreg_foreach_done); 255 256 spdk_io_device_unregister(&io_target, NULL); 257 /* 258 * There is an outstanding foreach call on the io_device, so the unregister should not 259 * have removed the device. 260 */ 261 CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices)); 262 spdk_io_device_register(&io_target, channel_create, channel_destroy, sizeof(int)); 263 /* 264 * There is already a device registered at &io_target, so a new io_device should not 265 * have been added to g_io_devices. 266 */ 267 CU_ASSERT(dev == TAILQ_FIRST(&g_io_devices)); 268 CU_ASSERT(TAILQ_NEXT(dev, tailq) == NULL); 269 270 poll_thread(0); 271 CU_ASSERT(ctx.ch_done == true); 272 CU_ASSERT(ctx.foreach_done == true); 273 /* 274 * There are no more foreach operations outstanding, so we can unregister the device, 275 * even though a channel still exists for the device. 276 */ 277 spdk_io_device_unregister(&io_target, NULL); 278 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 279 280 set_thread(0); 281 spdk_put_io_channel(ch0); 282 283 poll_threads(); 284 285 free_threads(); 286 } 287 288 static void 289 thread_name(void) 290 { 291 struct spdk_thread *thread; 292 const char *name; 293 294 /* Create thread with no name */ 295 spdk_allocate_thread(_send_msg, NULL, NULL, NULL, NULL); 296 thread = spdk_get_thread(); 297 SPDK_CU_ASSERT_FATAL(thread != NULL); 298 name = spdk_thread_get_name(thread); 299 CU_ASSERT(name == NULL); 300 spdk_free_thread(); 301 302 /* Create thread named "test_thread" */ 303 spdk_allocate_thread(_send_msg, NULL, NULL, NULL, "test_thread"); 304 thread = spdk_get_thread(); 305 SPDK_CU_ASSERT_FATAL(thread != NULL); 306 name = spdk_thread_get_name(thread); 307 SPDK_CU_ASSERT_FATAL(name != NULL); 308 CU_ASSERT(strcmp(name, "test_thread") == 0); 309 spdk_free_thread(); 310 } 311 312 static uint64_t device1; 313 static uint64_t device2; 314 static uint64_t device3; 315 316 static uint64_t ctx1 = 0x1111; 317 static uint64_t ctx2 = 0x2222; 318 319 static int g_create_cb_calls = 0; 320 static int g_destroy_cb_calls = 0; 321 322 static int 323 create_cb_1(void *io_device, void *ctx_buf) 324 { 325 CU_ASSERT(io_device == &device1); 326 *(uint64_t *)ctx_buf = ctx1; 327 g_create_cb_calls++; 328 return 0; 329 } 330 331 static void 332 destroy_cb_1(void *io_device, void *ctx_buf) 333 { 334 CU_ASSERT(io_device == &device1); 335 CU_ASSERT(*(uint64_t *)ctx_buf == ctx1); 336 g_destroy_cb_calls++; 337 } 338 339 static int 340 create_cb_2(void *io_device, void *ctx_buf) 341 { 342 CU_ASSERT(io_device == &device2); 343 *(uint64_t *)ctx_buf = ctx2; 344 g_create_cb_calls++; 345 return 0; 346 } 347 348 static void 349 destroy_cb_2(void *io_device, void *ctx_buf) 350 { 351 CU_ASSERT(io_device == &device2); 352 CU_ASSERT(*(uint64_t *)ctx_buf == ctx2); 353 g_destroy_cb_calls++; 354 } 355 356 static void 357 channel(void) 358 { 359 struct spdk_io_channel *ch1, *ch2; 360 void *ctx; 361 362 spdk_allocate_thread(_send_msg, NULL, NULL, NULL, "thread0"); 363 spdk_io_device_register(&device1, create_cb_1, destroy_cb_1, sizeof(ctx1)); 364 spdk_io_device_register(&device2, create_cb_2, destroy_cb_2, sizeof(ctx2)); 365 366 g_create_cb_calls = 0; 367 ch1 = spdk_get_io_channel(&device1); 368 CU_ASSERT(g_create_cb_calls == 1); 369 SPDK_CU_ASSERT_FATAL(ch1 != NULL); 370 371 g_create_cb_calls = 0; 372 ch2 = spdk_get_io_channel(&device1); 373 CU_ASSERT(g_create_cb_calls == 0); 374 CU_ASSERT(ch1 == ch2); 375 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 376 377 g_destroy_cb_calls = 0; 378 spdk_put_io_channel(ch2); 379 CU_ASSERT(g_destroy_cb_calls == 0); 380 381 g_create_cb_calls = 0; 382 ch2 = spdk_get_io_channel(&device2); 383 CU_ASSERT(g_create_cb_calls == 1); 384 CU_ASSERT(ch1 != ch2); 385 SPDK_CU_ASSERT_FATAL(ch2 != NULL); 386 387 ctx = spdk_io_channel_get_ctx(ch2); 388 CU_ASSERT(*(uint64_t *)ctx == ctx2); 389 390 g_destroy_cb_calls = 0; 391 spdk_put_io_channel(ch1); 392 CU_ASSERT(g_destroy_cb_calls == 1); 393 394 g_destroy_cb_calls = 0; 395 spdk_put_io_channel(ch2); 396 CU_ASSERT(g_destroy_cb_calls == 1); 397 398 ch1 = spdk_get_io_channel(&device3); 399 CU_ASSERT(ch1 == NULL); 400 401 spdk_io_device_unregister(&device1, NULL); 402 spdk_io_device_unregister(&device2, NULL); 403 CU_ASSERT(TAILQ_EMPTY(&g_io_devices)); 404 spdk_free_thread(); 405 CU_ASSERT(TAILQ_EMPTY(&g_threads)); 406 } 407 408 int 409 main(int argc, char **argv) 410 { 411 CU_pSuite suite = NULL; 412 unsigned int num_failures; 413 414 if (CU_initialize_registry() != CUE_SUCCESS) { 415 return CU_get_error(); 416 } 417 418 suite = CU_add_suite("io_channel", NULL, NULL); 419 if (suite == NULL) { 420 CU_cleanup_registry(); 421 return CU_get_error(); 422 } 423 424 if ( 425 CU_add_test(suite, "thread_alloc", thread_alloc) == NULL || 426 CU_add_test(suite, "thread_send_msg", thread_send_msg) == NULL || 427 CU_add_test(suite, "thread_for_each", thread_for_each) == NULL || 428 CU_add_test(suite, "for_each_channel_remove", for_each_channel_remove) == NULL || 429 CU_add_test(suite, "for_each_channel_unreg", for_each_channel_unreg) == NULL || 430 CU_add_test(suite, "thread_name", thread_name) == NULL || 431 CU_add_test(suite, "channel", channel) == NULL 432 ) { 433 CU_cleanup_registry(); 434 return CU_get_error(); 435 } 436 437 CU_basic_set_mode(CU_BRM_VERBOSE); 438 CU_basic_run_tests(); 439 num_failures = CU_get_number_of_failures(); 440 CU_cleanup_registry(); 441 return num_failures; 442 } 443