1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "spdk/stdinc.h" 7 #include "spdk_internal/cunit.h" 8 #include "spdk/env.h" 9 #include "thread/thread_internal.h" 10 #include "spdk_internal/mock.h" 11 12 #include "bdev/raid/bdev_raid.h" 13 #include "bdev/raid/concat.c" 14 #include "../common.c" 15 16 DEFINE_STUB(spdk_bdev_readv_blocks_with_md, int, (struct spdk_bdev_desc *desc, 17 struct spdk_io_channel *ch, 18 struct iovec *iov, int iovcnt, void *md, 19 uint64_t offset_blocks, uint64_t num_blocks, 20 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 21 DEFINE_STUB(spdk_bdev_writev_blocks_with_md, int, (struct spdk_bdev_desc *desc, 22 struct spdk_io_channel *ch, 23 struct iovec *iov, int iovcnt, void *md, 24 uint64_t offset_blocks, uint64_t num_blocks, 25 spdk_bdev_io_completion_cb cb, void *cb_arg), 0); 26 27 #define BLOCK_LEN (4096) 28 29 enum CONCAT_IO_TYPE { 30 CONCAT_NONE = 0, 31 CONCAT_WRITEV, 32 CONCAT_READV, 33 CONCAT_FLUSH, 34 CONCAT_UNMAP, 35 }; 36 37 #define MAX_RECORDS (10) 38 /* 39 * Store the information of io requests sent to the underlying bdevs. 40 * For a single null payload request to the concat bdev, 41 * we may send multiple requests to the underling bdevs, 42 * so we store the io request information to arrays. 43 */ 44 struct req_records { 45 uint64_t offset_blocks[MAX_RECORDS]; 46 uint64_t num_blocks[MAX_RECORDS]; 47 enum CONCAT_IO_TYPE io_type[MAX_RECORDS]; 48 int count; 49 void *md; 50 } g_req_records; 51 52 /* 53 * g_succeed is true means the spdk_bdev_readv/writev/unmap/flush_blocks 54 * functions will return 0. 55 * g_succeed is false means the spdk_bdev_readv/writev/unmap/flush_blocks 56 * functions will return -ENOMEM. 57 * We always set it to false before an IO request, then the raid_bdev_queue_io_wait 58 * function will re-submit the request, and the raid_bdev_queue_io_wait function will 59 * set g_succeed to true, then the IO will succeed next time. 60 */ 61 bool g_succeed; 62 63 DEFINE_STUB_V(raid_bdev_module_list_add, (struct raid_bdev_module *raid_module)); 64 DEFINE_STUB_V(raid_bdev_io_complete, (struct raid_bdev_io *raid_io, 65 enum spdk_bdev_io_status status)); 66 DEFINE_STUB_V(spdk_bdev_free_io, (struct spdk_bdev_io *bdev_io)); 67 DEFINE_STUB(raid_bdev_io_complete_part, bool, 68 (struct raid_bdev_io *raid_io, uint64_t completed, 69 enum spdk_bdev_io_status status), 70 true); 71 72 int 73 spdk_bdev_readv_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 74 struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks, 75 spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts) 76 { 77 if (g_succeed) { 78 int i = g_req_records.count; 79 80 g_req_records.offset_blocks[i] = offset_blocks; 81 g_req_records.num_blocks[i] = num_blocks; 82 g_req_records.io_type[i] = CONCAT_READV; 83 g_req_records.count++; 84 cb(NULL, true, cb_arg); 85 g_req_records.md = opts->metadata; 86 return 0; 87 } else { 88 return -ENOMEM; 89 } 90 } 91 92 int 93 spdk_bdev_writev_blocks_ext(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 94 struct iovec *iov, int iovcnt, uint64_t offset_blocks, uint64_t num_blocks, 95 spdk_bdev_io_completion_cb cb, void *cb_arg, struct spdk_bdev_ext_io_opts *opts) 96 { 97 if (g_succeed) { 98 int i = g_req_records.count; 99 100 g_req_records.offset_blocks[i] = offset_blocks; 101 g_req_records.num_blocks[i] = num_blocks; 102 g_req_records.io_type[i] = CONCAT_WRITEV; 103 g_req_records.count++; 104 cb(NULL, true, cb_arg); 105 g_req_records.md = opts->metadata; 106 return 0; 107 } else { 108 return -ENOMEM; 109 } 110 } 111 112 int 113 spdk_bdev_unmap_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 114 uint64_t offset_blocks, uint64_t num_blocks, 115 spdk_bdev_io_completion_cb cb, void *cb_arg) 116 { 117 if (g_succeed) { 118 int i = g_req_records.count; 119 120 g_req_records.offset_blocks[i] = offset_blocks; 121 g_req_records.num_blocks[i] = num_blocks; 122 g_req_records.io_type[i] = CONCAT_UNMAP; 123 g_req_records.count++; 124 cb(NULL, true, cb_arg); 125 return 0; 126 } else { 127 return -ENOMEM; 128 } 129 } 130 131 int 132 spdk_bdev_flush_blocks(struct spdk_bdev_desc *desc, struct spdk_io_channel *ch, 133 uint64_t offset_blocks, uint64_t num_blocks, 134 spdk_bdev_io_completion_cb cb, void *cb_arg) 135 { 136 if (g_succeed) { 137 int i = g_req_records.count; 138 139 g_req_records.offset_blocks[i] = offset_blocks; 140 g_req_records.num_blocks[i] = num_blocks; 141 g_req_records.io_type[i] = CONCAT_FLUSH; 142 g_req_records.count++; 143 cb(NULL, true, cb_arg); 144 return 0; 145 } else { 146 return -ENOMEM; 147 } 148 } 149 150 void 151 raid_bdev_queue_io_wait(struct raid_bdev_io *raid_io, struct spdk_bdev *bdev, 152 struct spdk_io_channel *ch, spdk_bdev_io_wait_cb cb_fn) 153 { 154 g_succeed = true; 155 cb_fn(raid_io); 156 } 157 158 static void 159 init_globals(void) 160 { 161 int i; 162 163 for (i = 0; i < MAX_RECORDS; i++) { 164 g_req_records.offset_blocks[i] = 0; 165 g_req_records.num_blocks[i] = 0; 166 g_req_records.io_type[i] = CONCAT_NONE; 167 } 168 g_req_records.count = 0; 169 g_succeed = false; 170 } 171 172 static int 173 test_setup(void) 174 { 175 uint8_t num_base_bdevs_values[] = { 3, 4, 5 }; 176 uint64_t base_bdev_blockcnt_values[] = { 1, 1024, 1024 * 1024 }; 177 uint32_t base_bdev_blocklen_values[] = { 512, 4096 }; 178 uint32_t strip_size_kb_values[] = { 1, 4, 128 }; 179 uint8_t *num_base_bdevs; 180 uint64_t *base_bdev_blockcnt; 181 uint32_t *base_bdev_blocklen; 182 uint32_t *strip_size_kb; 183 struct raid_params params; 184 uint64_t params_count; 185 int rc; 186 187 params_count = SPDK_COUNTOF(num_base_bdevs_values) * 188 SPDK_COUNTOF(base_bdev_blockcnt_values) * 189 SPDK_COUNTOF(base_bdev_blocklen_values) * 190 SPDK_COUNTOF(strip_size_kb_values); 191 rc = raid_test_params_alloc(params_count); 192 if (rc) { 193 return rc; 194 } 195 196 ARRAY_FOR_EACH(num_base_bdevs_values, num_base_bdevs) { 197 ARRAY_FOR_EACH(base_bdev_blockcnt_values, base_bdev_blockcnt) { 198 ARRAY_FOR_EACH(base_bdev_blocklen_values, base_bdev_blocklen) { 199 ARRAY_FOR_EACH(strip_size_kb_values, strip_size_kb) { 200 params.num_base_bdevs = *num_base_bdevs; 201 params.base_bdev_blockcnt = *base_bdev_blockcnt; 202 params.base_bdev_blocklen = *base_bdev_blocklen; 203 params.strip_size = *strip_size_kb * 1024 / *base_bdev_blocklen; 204 params.md_len = 0; 205 if (params.strip_size == 0 || 206 params.strip_size > *base_bdev_blockcnt) { 207 continue; 208 } 209 raid_test_params_add(¶ms); 210 } 211 } 212 } 213 } 214 215 return 0; 216 } 217 218 static int 219 test_cleanup(void) 220 { 221 raid_test_params_free(); 222 return 0; 223 } 224 225 static struct raid_bdev * 226 create_concat(struct raid_params *params) 227 { 228 struct raid_bdev *raid_bdev = raid_test_create_raid_bdev(params, &g_concat_module); 229 230 CU_ASSERT(concat_start(raid_bdev) == 0); 231 return raid_bdev; 232 } 233 234 static void 235 delete_concat(struct raid_bdev *raid_bdev) 236 { 237 concat_stop(raid_bdev); 238 raid_test_delete_raid_bdev(raid_bdev); 239 } 240 241 static void 242 test_concat_start(void) 243 { 244 struct raid_bdev *raid_bdev; 245 struct raid_params *params; 246 struct concat_block_range *block_range; 247 uint64_t total_blockcnt; 248 int i; 249 250 RAID_PARAMS_FOR_EACH(params) { 251 raid_bdev = create_concat(params); 252 block_range = raid_bdev->module_private; 253 total_blockcnt = 0; 254 for (i = 0; i < params->num_base_bdevs; i++) { 255 CU_ASSERT(block_range[i].start == total_blockcnt); 256 CU_ASSERT(block_range[i].length == params->base_bdev_blockcnt); 257 total_blockcnt += params->base_bdev_blockcnt; 258 } 259 delete_concat(raid_bdev); 260 } 261 } 262 263 static void 264 bdev_io_cleanup(struct spdk_bdev_io *bdev_io) 265 { 266 if (bdev_io->u.bdev.iovs) { 267 if (bdev_io->u.bdev.iovs->iov_base) { 268 free(bdev_io->u.bdev.iovs->iov_base); 269 } 270 free(bdev_io->u.bdev.iovs); 271 } 272 273 free(bdev_io); 274 } 275 276 static void 277 bdev_io_initialize(struct spdk_bdev_io *bdev_io, struct spdk_io_channel *ch, struct spdk_bdev *bdev, 278 uint64_t lba, uint64_t blocks, int16_t iotype) 279 { 280 struct spdk_bdev_channel *channel = spdk_io_channel_get_ctx(ch); 281 282 bdev_io->bdev = bdev; 283 bdev_io->u.bdev.offset_blocks = lba; 284 bdev_io->u.bdev.num_blocks = blocks; 285 bdev_io->type = iotype; 286 287 if (bdev_io->type == SPDK_BDEV_IO_TYPE_UNMAP || bdev_io->type == SPDK_BDEV_IO_TYPE_FLUSH) { 288 return; 289 } 290 291 bdev_io->u.bdev.iovcnt = 1; 292 bdev_io->u.bdev.iovs = calloc(1, sizeof(struct iovec)); 293 SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs != NULL); 294 bdev_io->u.bdev.iovs->iov_base = calloc(1, bdev_io->u.bdev.num_blocks * 4096); 295 SPDK_CU_ASSERT_FATAL(bdev_io->u.bdev.iovs->iov_base != NULL); 296 bdev_io->u.bdev.iovs->iov_len = bdev_io->u.bdev.num_blocks * BLOCK_LEN; 297 bdev_io->internal.ch = channel; 298 bdev_io->u.bdev.md_buf = (void *)0xAEDFEBAC; 299 } 300 301 static void 302 submit_and_verify_rw(enum CONCAT_IO_TYPE io_type, struct raid_params *params) 303 { 304 struct raid_bdev *raid_bdev; 305 struct spdk_bdev_io *bdev_io; 306 struct spdk_io_channel *ch; 307 struct raid_bdev_io *raid_io; 308 struct raid_bdev_io_channel *raid_ch; 309 uint64_t lba, blocks; 310 int i; 311 312 lba = 0; 313 blocks = 1; 314 for (i = 0; i < params->num_base_bdevs; i++) { 315 init_globals(); 316 raid_bdev = create_concat(params); 317 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 318 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 319 raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx; 320 raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel)); 321 SPDK_CU_ASSERT_FATAL(raid_ch != NULL); 322 raid_ch->base_channel = calloc(params->num_base_bdevs, 323 sizeof(struct spdk_io_channel)); 324 SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL); 325 raid_io->raid_ch = raid_ch; 326 raid_io->raid_bdev = raid_bdev; 327 ch = calloc(1, sizeof(struct spdk_io_channel)); 328 SPDK_CU_ASSERT_FATAL(ch != NULL); 329 330 switch (io_type) { 331 case CONCAT_WRITEV: 332 bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_WRITE); 333 concat_submit_rw_request(raid_io); 334 break; 335 case CONCAT_READV: 336 bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_READ); 337 concat_submit_rw_request(raid_io); 338 break; 339 case CONCAT_UNMAP: 340 bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP); 341 concat_submit_null_payload_request(raid_io); 342 break; 343 case CONCAT_FLUSH: 344 bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH); 345 concat_submit_null_payload_request(raid_io); 346 break; 347 default: 348 CU_ASSERT(false); 349 } 350 351 /* 352 * We submit request to the first lba of each underlying device, 353 * so the offset of the underling device should always be 0. 354 */ 355 CU_ASSERT(g_req_records.offset_blocks[0] == 0); 356 CU_ASSERT(g_req_records.num_blocks[0] == blocks); 357 CU_ASSERT(g_req_records.io_type[0] == io_type); 358 CU_ASSERT(g_req_records.count == 1); 359 CU_ASSERT(g_req_records.md == (void *)0xAEDFEBAC); 360 bdev_io_cleanup(bdev_io); 361 free(ch); 362 free(raid_ch->base_channel); 363 free(raid_ch); 364 delete_concat(raid_bdev); 365 lba += params->base_bdev_blockcnt; 366 } 367 } 368 369 static void 370 test_concat_rw(void) 371 { 372 struct raid_params *params; 373 enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_WRITEV, CONCAT_READV}; 374 enum CONCAT_IO_TYPE io_type; 375 int i; 376 377 RAID_PARAMS_FOR_EACH(params) { 378 for (i = 0; i < 2; i ++) { 379 io_type = io_type_list[i]; 380 submit_and_verify_rw(io_type, params); 381 } 382 } 383 } 384 385 static void 386 submit_and_verify_null_payload(enum CONCAT_IO_TYPE io_type, struct raid_params *params) 387 { 388 struct raid_bdev *raid_bdev; 389 struct spdk_bdev_io *bdev_io; 390 struct spdk_io_channel *ch; 391 struct raid_bdev_io *raid_io; 392 struct raid_bdev_io_channel *raid_ch; 393 uint64_t lba, blocks; 394 395 /* 396 * In this unittest, all base bdevs have the same blockcnt. 397 * If the base_bdev_blockcnt > 1, the request will start from 398 * the second bdev, and across two bdevs. 399 * If the base_bdev_blockcnt == 1, the request will start from 400 * the third bdev. In this case, if there are only 3 bdevs, 401 * we can not set blocks to base_bdev_blockcnt + 1 because the request 402 * will be beyond the end of the last bdev, so we set the blocks to 1 403 */ 404 lba = params->base_bdev_blockcnt + 1; 405 if (params->base_bdev_blockcnt == 1 && params->num_base_bdevs == 3) { 406 blocks = 1; 407 } else { 408 blocks = params->base_bdev_blockcnt + 1; 409 } 410 init_globals(); 411 raid_bdev = create_concat(params); 412 bdev_io = calloc(1, sizeof(struct spdk_bdev_io) + sizeof(struct raid_bdev_io)); 413 SPDK_CU_ASSERT_FATAL(bdev_io != NULL); 414 raid_io = (struct raid_bdev_io *)bdev_io->driver_ctx; 415 raid_ch = calloc(1, sizeof(struct raid_bdev_io_channel)); 416 SPDK_CU_ASSERT_FATAL(raid_ch != NULL); 417 raid_ch->base_channel = calloc(params->num_base_bdevs, 418 sizeof(struct spdk_io_channel)); 419 SPDK_CU_ASSERT_FATAL(raid_ch->base_channel != NULL); 420 raid_io->raid_ch = raid_ch; 421 raid_io->raid_bdev = raid_bdev; 422 ch = calloc(1, sizeof(struct spdk_io_channel)); 423 SPDK_CU_ASSERT_FATAL(ch != NULL); 424 425 switch (io_type) { 426 case CONCAT_UNMAP: 427 bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_UNMAP); 428 concat_submit_null_payload_request(raid_io); 429 break; 430 case CONCAT_FLUSH: 431 bdev_io_initialize(bdev_io, ch, &raid_bdev->bdev, lba, blocks, SPDK_BDEV_IO_TYPE_FLUSH); 432 concat_submit_null_payload_request(raid_io); 433 break; 434 default: 435 CU_ASSERT(false); 436 } 437 438 if (params->base_bdev_blockcnt == 1) { 439 if (params->num_base_bdevs == 3) { 440 CU_ASSERT(g_req_records.count == 1); 441 CU_ASSERT(g_req_records.offset_blocks[0] == 0); 442 CU_ASSERT(g_req_records.num_blocks[0] == 1); 443 } else { 444 CU_ASSERT(g_req_records.count == 2); 445 CU_ASSERT(g_req_records.offset_blocks[0] == 0); 446 CU_ASSERT(g_req_records.num_blocks[0] == 1); 447 CU_ASSERT(g_req_records.io_type[0] == io_type); 448 CU_ASSERT(g_req_records.offset_blocks[1] == 0); 449 CU_ASSERT(g_req_records.num_blocks[1] == 1); 450 CU_ASSERT(g_req_records.io_type[1] == io_type); 451 } 452 } else { 453 CU_ASSERT(g_req_records.count == 2); 454 CU_ASSERT(g_req_records.offset_blocks[0] == 1); 455 CU_ASSERT(g_req_records.num_blocks[0] == params->base_bdev_blockcnt - 1); 456 CU_ASSERT(g_req_records.io_type[0] == io_type); 457 CU_ASSERT(g_req_records.offset_blocks[1] == 0); 458 CU_ASSERT(g_req_records.num_blocks[1] == 2); 459 CU_ASSERT(g_req_records.io_type[1] == io_type); 460 } 461 bdev_io_cleanup(bdev_io); 462 free(ch); 463 free(raid_ch->base_channel); 464 free(raid_ch); 465 delete_concat(raid_bdev); 466 } 467 468 static void 469 test_concat_null_payload(void) 470 { 471 struct raid_params *params; 472 enum CONCAT_IO_TYPE io_type_list[] = {CONCAT_FLUSH, CONCAT_UNMAP}; 473 enum CONCAT_IO_TYPE io_type; 474 int i; 475 476 RAID_PARAMS_FOR_EACH(params) { 477 for (i = 0; i < 2; i ++) { 478 io_type = io_type_list[i]; 479 submit_and_verify_null_payload(io_type, params); 480 } 481 } 482 } 483 484 int 485 main(int argc, char **argv) 486 { 487 CU_pSuite suite = NULL; 488 unsigned int num_failures; 489 490 CU_initialize_registry(); 491 492 suite = CU_add_suite("concat", test_setup, test_cleanup); 493 CU_ADD_TEST(suite, test_concat_start); 494 CU_ADD_TEST(suite, test_concat_rw); 495 CU_ADD_TEST(suite, test_concat_null_payload); 496 497 num_failures = spdk_ut_run_tests(argc, argv, NULL); 498 CU_cleanup_registry(); 499 return num_failures; 500 } 501