1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2019 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "bdev_raid.h" 8 9 #include "spdk/env.h" 10 #include "spdk/thread.h" 11 #include "spdk/string.h" 12 #include "spdk/util.h" 13 14 #include "spdk/log.h" 15 16 /* 17 * brief: 18 * raid0_bdev_io_completion function is called by lower layers to notify raid 19 * module that particular bdev_io is completed. 20 * params: 21 * bdev_io - pointer to bdev io submitted to lower layers, like child io 22 * success - bdev_io status 23 * cb_arg - function callback context (parent raid_bdev_io) 24 * returns: 25 * none 26 */ 27 static void 28 raid0_bdev_io_completion(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 29 { 30 struct raid_bdev_io *raid_io = cb_arg; 31 32 spdk_bdev_free_io(bdev_io); 33 34 if (success) { 35 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_SUCCESS); 36 } else { 37 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 38 } 39 } 40 41 static void raid0_submit_rw_request(struct raid_bdev_io *raid_io); 42 43 static void 44 _raid0_submit_rw_request(void *_raid_io) 45 { 46 struct raid_bdev_io *raid_io = _raid_io; 47 48 raid0_submit_rw_request(raid_io); 49 } 50 51 /* 52 * brief: 53 * raid0_submit_rw_request function is used to submit I/O to the correct 54 * member disk for raid0 bdevs. 55 * params: 56 * raid_io 57 * returns: 58 * none 59 */ 60 static void 61 raid0_submit_rw_request(struct raid_bdev_io *raid_io) 62 { 63 struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); 64 struct raid_bdev_io_channel *raid_ch = raid_io->raid_ch; 65 struct raid_bdev *raid_bdev = raid_io->raid_bdev; 66 uint64_t pd_strip; 67 uint32_t offset_in_strip; 68 uint64_t pd_lba; 69 uint64_t pd_blocks; 70 uint8_t pd_idx; 71 int ret = 0; 72 uint64_t start_strip; 73 uint64_t end_strip; 74 struct raid_base_bdev_info *base_info; 75 struct spdk_io_channel *base_ch; 76 77 start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift; 78 end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> 79 raid_bdev->strip_size_shift; 80 if (start_strip != end_strip && raid_bdev->num_base_bdevs > 1) { 81 assert(false); 82 SPDK_ERRLOG("I/O spans strip boundary!\n"); 83 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 84 return; 85 } 86 87 pd_strip = start_strip / raid_bdev->num_base_bdevs; 88 pd_idx = start_strip % raid_bdev->num_base_bdevs; 89 offset_in_strip = bdev_io->u.bdev.offset_blocks & (raid_bdev->strip_size - 1); 90 pd_lba = (pd_strip << raid_bdev->strip_size_shift) + offset_in_strip; 91 pd_blocks = bdev_io->u.bdev.num_blocks; 92 base_info = &raid_bdev->base_bdev_info[pd_idx]; 93 if (base_info->desc == NULL) { 94 SPDK_ERRLOG("base bdev desc null for pd_idx %u\n", pd_idx); 95 assert(0); 96 } 97 98 /* 99 * Submit child io to bdev layer with using base bdev descriptors, base 100 * bdev lba, base bdev child io length in blocks, buffer, completion 101 * function and function callback context 102 */ 103 assert(raid_ch != NULL); 104 assert(raid_ch->base_channel); 105 base_ch = raid_ch->base_channel[pd_idx]; 106 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 107 if (bdev_io->u.bdev.ext_opts != NULL) { 108 ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch, 109 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 110 pd_lba, pd_blocks, raid0_bdev_io_completion, 111 raid_io, bdev_io->u.bdev.ext_opts); 112 } else { 113 ret = spdk_bdev_readv_blocks_with_md(base_info->desc, base_ch, 114 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 115 bdev_io->u.bdev.md_buf, 116 pd_lba, pd_blocks, 117 raid0_bdev_io_completion, raid_io); 118 } 119 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 120 if (bdev_io->u.bdev.ext_opts != NULL) { 121 ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch, 122 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 123 pd_lba, pd_blocks, raid0_bdev_io_completion, 124 raid_io, bdev_io->u.bdev.ext_opts); 125 } else { 126 ret = spdk_bdev_writev_blocks_with_md(base_info->desc, base_ch, 127 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 128 bdev_io->u.bdev.md_buf, 129 pd_lba, pd_blocks, 130 raid0_bdev_io_completion, raid_io); 131 } 132 } else { 133 SPDK_ERRLOG("Recvd not supported io type %u\n", bdev_io->type); 134 assert(0); 135 } 136 137 if (ret == -ENOMEM) { 138 raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch, 139 _raid0_submit_rw_request); 140 } else if (ret != 0) { 141 SPDK_ERRLOG("bdev io submit error not due to ENOMEM, it should not happen\n"); 142 assert(false); 143 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 144 } 145 } 146 147 /* raid0 IO range */ 148 struct raid_bdev_io_range { 149 uint64_t strip_size; 150 uint64_t start_strip_in_disk; 151 uint64_t end_strip_in_disk; 152 uint64_t start_offset_in_strip; 153 uint64_t end_offset_in_strip; 154 uint8_t start_disk; 155 uint8_t end_disk; 156 uint8_t n_disks_involved; 157 }; 158 159 static inline void 160 _raid0_get_io_range(struct raid_bdev_io_range *io_range, 161 uint8_t num_base_bdevs, uint64_t strip_size, uint64_t strip_size_shift, 162 uint64_t offset_blocks, uint64_t num_blocks) 163 { 164 uint64_t start_strip; 165 uint64_t end_strip; 166 uint64_t total_blocks; 167 168 io_range->strip_size = strip_size; 169 total_blocks = offset_blocks + num_blocks - (num_blocks > 0); 170 171 /* The start and end strip index in raid0 bdev scope */ 172 start_strip = offset_blocks >> strip_size_shift; 173 end_strip = total_blocks >> strip_size_shift; 174 io_range->start_strip_in_disk = start_strip / num_base_bdevs; 175 io_range->end_strip_in_disk = end_strip / num_base_bdevs; 176 177 /* The first strip may have unaligned start LBA offset. 178 * The end strip may have unaligned end LBA offset. 179 * Strips between them certainly have aligned offset and length to boundaries. 180 */ 181 io_range->start_offset_in_strip = offset_blocks % strip_size; 182 io_range->end_offset_in_strip = total_blocks % strip_size; 183 184 /* The base bdev indexes in which start and end strips are located */ 185 io_range->start_disk = start_strip % num_base_bdevs; 186 io_range->end_disk = end_strip % num_base_bdevs; 187 188 /* Calculate how many base_bdevs are involved in io operation. 189 * Number of base bdevs involved is between 1 and num_base_bdevs. 190 * It will be 1 if the first strip and last strip are the same one. 191 */ 192 io_range->n_disks_involved = spdk_min((end_strip - start_strip + 1), num_base_bdevs); 193 } 194 195 static inline void 196 _raid0_split_io_range(struct raid_bdev_io_range *io_range, uint8_t disk_idx, 197 uint64_t *_offset_in_disk, uint64_t *_nblocks_in_disk) 198 { 199 uint64_t n_strips_in_disk; 200 uint64_t start_offset_in_disk; 201 uint64_t end_offset_in_disk; 202 uint64_t offset_in_disk; 203 uint64_t nblocks_in_disk; 204 uint64_t start_strip_in_disk; 205 uint64_t end_strip_in_disk; 206 207 start_strip_in_disk = io_range->start_strip_in_disk; 208 if (disk_idx < io_range->start_disk) { 209 start_strip_in_disk += 1; 210 } 211 212 end_strip_in_disk = io_range->end_strip_in_disk; 213 if (disk_idx > io_range->end_disk) { 214 end_strip_in_disk -= 1; 215 } 216 217 assert(end_strip_in_disk >= start_strip_in_disk); 218 n_strips_in_disk = end_strip_in_disk - start_strip_in_disk + 1; 219 220 if (disk_idx == io_range->start_disk) { 221 start_offset_in_disk = io_range->start_offset_in_strip; 222 } else { 223 start_offset_in_disk = 0; 224 } 225 226 if (disk_idx == io_range->end_disk) { 227 end_offset_in_disk = io_range->end_offset_in_strip; 228 } else { 229 end_offset_in_disk = io_range->strip_size - 1; 230 } 231 232 offset_in_disk = start_offset_in_disk + start_strip_in_disk * io_range->strip_size; 233 nblocks_in_disk = (n_strips_in_disk - 1) * io_range->strip_size 234 + end_offset_in_disk - start_offset_in_disk + 1; 235 236 SPDK_DEBUGLOG(bdev_raid0, 237 "raid_bdev (strip_size 0x%" PRIx64 ") splits IO to base_bdev (%u) at (0x%" PRIx64 ", 0x%" PRIx64 238 ").\n", 239 io_range->strip_size, disk_idx, offset_in_disk, nblocks_in_disk); 240 241 *_offset_in_disk = offset_in_disk; 242 *_nblocks_in_disk = nblocks_in_disk; 243 } 244 245 static void raid0_submit_null_payload_request(struct raid_bdev_io *raid_io); 246 247 static void 248 _raid0_submit_null_payload_request(void *_raid_io) 249 { 250 struct raid_bdev_io *raid_io = _raid_io; 251 252 raid0_submit_null_payload_request(raid_io); 253 } 254 255 static void 256 raid0_base_io_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 257 { 258 struct raid_bdev_io *raid_io = cb_arg; 259 260 raid_bdev_io_complete_part(raid_io, 1, success ? 261 SPDK_BDEV_IO_STATUS_SUCCESS : 262 SPDK_BDEV_IO_STATUS_FAILED); 263 264 spdk_bdev_free_io(bdev_io); 265 } 266 267 /* 268 * brief: 269 * raid0_submit_null_payload_request function submits the next batch of 270 * io requests with range but without payload, like FLUSH and UNMAP, to member disks; 271 * it will submit as many as possible unless one base io request fails with -ENOMEM, 272 * in which case it will queue itself for later submission. 273 * params: 274 * bdev_io - pointer to parent bdev_io on raid bdev device 275 * returns: 276 * none 277 */ 278 static void 279 raid0_submit_null_payload_request(struct raid_bdev_io *raid_io) 280 { 281 struct spdk_bdev_io *bdev_io; 282 struct raid_bdev *raid_bdev; 283 struct raid_bdev_io_range io_range; 284 int ret; 285 struct raid_base_bdev_info *base_info; 286 struct spdk_io_channel *base_ch; 287 288 bdev_io = spdk_bdev_io_from_ctx(raid_io); 289 raid_bdev = raid_io->raid_bdev; 290 291 _raid0_get_io_range(&io_range, raid_bdev->num_base_bdevs, 292 raid_bdev->strip_size, raid_bdev->strip_size_shift, 293 bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks); 294 295 if (raid_io->base_bdev_io_remaining == 0) { 296 raid_io->base_bdev_io_remaining = io_range.n_disks_involved; 297 } 298 299 while (raid_io->base_bdev_io_submitted < io_range.n_disks_involved) { 300 uint8_t disk_idx; 301 uint64_t offset_in_disk; 302 uint64_t nblocks_in_disk; 303 304 /* base_bdev is started from start_disk to end_disk. 305 * It is possible that index of start_disk is larger than end_disk's. 306 */ 307 disk_idx = (io_range.start_disk + raid_io->base_bdev_io_submitted) % raid_bdev->num_base_bdevs; 308 base_info = &raid_bdev->base_bdev_info[disk_idx]; 309 base_ch = raid_io->raid_ch->base_channel[disk_idx]; 310 311 _raid0_split_io_range(&io_range, disk_idx, &offset_in_disk, &nblocks_in_disk); 312 313 switch (bdev_io->type) { 314 case SPDK_BDEV_IO_TYPE_UNMAP: 315 ret = spdk_bdev_unmap_blocks(base_info->desc, base_ch, 316 offset_in_disk, nblocks_in_disk, 317 raid0_base_io_complete, raid_io); 318 break; 319 320 case SPDK_BDEV_IO_TYPE_FLUSH: 321 ret = spdk_bdev_flush_blocks(base_info->desc, base_ch, 322 offset_in_disk, nblocks_in_disk, 323 raid0_base_io_complete, raid_io); 324 break; 325 326 default: 327 SPDK_ERRLOG("submit request, invalid io type with null payload %u\n", bdev_io->type); 328 assert(false); 329 ret = -EIO; 330 } 331 332 if (ret == 0) { 333 raid_io->base_bdev_io_submitted++; 334 } else if (ret == -ENOMEM) { 335 raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch, 336 _raid0_submit_null_payload_request); 337 return; 338 } else { 339 SPDK_ERRLOG("bdev io submit error not due to ENOMEM, it should not happen\n"); 340 assert(false); 341 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 342 return; 343 } 344 } 345 } 346 347 static uint64_t 348 raid0_calculate_blockcnt(struct raid_bdev *raid_bdev) 349 { 350 uint64_t min_blockcnt = UINT64_MAX; 351 struct raid_base_bdev_info *base_info; 352 353 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { 354 /* Calculate minimum block count from all base bdevs */ 355 min_blockcnt = spdk_min(min_blockcnt, base_info->bdev->blockcnt); 356 } 357 358 /* 359 * Take the minimum block count based approach where total block count 360 * of raid bdev is the number of base bdev times the minimum block count 361 * of any base bdev. 362 */ 363 SPDK_DEBUGLOG(bdev_raid0, "min blockcount %" PRIu64 ", numbasedev %u, strip size shift %u\n", 364 min_blockcnt, raid_bdev->num_base_bdevs, raid_bdev->strip_size_shift); 365 366 return ((min_blockcnt >> raid_bdev->strip_size_shift) << 367 raid_bdev->strip_size_shift) * raid_bdev->num_base_bdevs; 368 } 369 370 static int 371 raid0_start(struct raid_bdev *raid_bdev) 372 { 373 raid_bdev->bdev.blockcnt = raid0_calculate_blockcnt(raid_bdev); 374 375 if (raid_bdev->num_base_bdevs > 1) { 376 raid_bdev->bdev.optimal_io_boundary = raid_bdev->strip_size; 377 raid_bdev->bdev.split_on_optimal_io_boundary = true; 378 } else { 379 /* Do not need to split reads/writes on single bdev RAID modules. */ 380 raid_bdev->bdev.optimal_io_boundary = 0; 381 raid_bdev->bdev.split_on_optimal_io_boundary = false; 382 } 383 384 return 0; 385 } 386 387 static struct raid_bdev_module g_raid0_module = { 388 .level = RAID0, 389 .base_bdevs_min = 1, 390 .start = raid0_start, 391 .submit_rw_request = raid0_submit_rw_request, 392 .submit_null_payload_request = raid0_submit_null_payload_request, 393 }; 394 RAID_MODULE_REGISTER(&g_raid0_module) 395 396 SPDK_LOG_REGISTER_COMPONENT(bdev_raid0) 397