1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "bdev_raid.h" 8 9 #include "spdk/env.h" 10 #include "spdk/thread.h" 11 #include "spdk/string.h" 12 #include "spdk/util.h" 13 14 #include "spdk/log.h" 15 16 /* 17 * brief: 18 * raid0_bdev_io_completion function is called by lower layers to notify raid 19 * module that particular bdev_io is completed. 20 * params: 21 * bdev_io - pointer to bdev io submitted to lower layers, like child io 22 * success - bdev_io status 23 * cb_arg - function callback context (parent raid_bdev_io) 24 * returns: 25 * none 26 */ 27 static void 28 raid0_bdev_io_completion(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 29 { 30 struct raid_bdev_io *raid_io = cb_arg; 31 32 spdk_bdev_free_io(bdev_io); 33 34 if (success) { 35 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_SUCCESS); 36 } else { 37 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 38 } 39 } 40 41 static void raid0_submit_rw_request(struct raid_bdev_io *raid_io); 42 43 static void 44 _raid0_submit_rw_request(void *_raid_io) 45 { 46 struct raid_bdev_io *raid_io = _raid_io; 47 48 raid0_submit_rw_request(raid_io); 49 } 50 51 /* 52 * brief: 53 * raid0_submit_rw_request function is used to submit I/O to the correct 54 * member disk for raid0 bdevs. 55 * params: 56 * raid_io 57 * returns: 58 * none 59 */ 60 static void 61 raid0_submit_rw_request(struct raid_bdev_io *raid_io) 62 { 63 struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); 64 struct raid_bdev_io_channel *raid_ch = raid_io->raid_ch; 65 struct raid_bdev *raid_bdev = raid_io->raid_bdev; 66 uint64_t pd_strip; 67 uint32_t offset_in_strip; 68 uint64_t pd_lba; 69 uint64_t pd_blocks; 70 uint8_t pd_idx; 71 int ret = 0; 72 uint64_t start_strip; 73 uint64_t end_strip; 74 struct raid_base_bdev_info *base_info; 75 struct spdk_io_channel *base_ch; 76 77 start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift; 78 end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> 79 raid_bdev->strip_size_shift; 80 if (start_strip != end_strip && raid_bdev->num_base_bdevs > 1) { 81 assert(false); 82 SPDK_ERRLOG("I/O spans strip boundary!\n"); 83 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 84 return; 85 } 86 87 pd_strip = start_strip / raid_bdev->num_base_bdevs; 88 pd_idx = start_strip % raid_bdev->num_base_bdevs; 89 offset_in_strip = bdev_io->u.bdev.offset_blocks & (raid_bdev->strip_size - 1); 90 pd_lba = (pd_strip << raid_bdev->strip_size_shift) + offset_in_strip; 91 pd_blocks = bdev_io->u.bdev.num_blocks; 92 base_info = &raid_bdev->base_bdev_info[pd_idx]; 93 if (base_info->desc == NULL) { 94 SPDK_ERRLOG("base bdev desc null for pd_idx %u\n", pd_idx); 95 assert(0); 96 } 97 98 /* 99 * Submit child io to bdev layer with using base bdev descriptors, base 100 * bdev lba, base bdev child io length in blocks, buffer, completion 101 * function and function callback context 102 */ 103 assert(raid_ch != NULL); 104 assert(raid_ch->base_channel); 105 base_ch = raid_ch->base_channel[pd_idx]; 106 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 107 ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch, 108 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 109 pd_lba, pd_blocks, raid0_bdev_io_completion, 110 raid_io, bdev_io->u.bdev.ext_opts); 111 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 112 ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch, 113 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 114 pd_lba, pd_blocks, raid0_bdev_io_completion, 115 raid_io, bdev_io->u.bdev.ext_opts); 116 } else { 117 SPDK_ERRLOG("Recvd not supported io type %u\n", bdev_io->type); 118 assert(0); 119 } 120 121 if (ret == -ENOMEM) { 122 raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch, 123 _raid0_submit_rw_request); 124 } else if (ret != 0) { 125 SPDK_ERRLOG("bdev io submit error not due to ENOMEM, it should not happen\n"); 126 assert(false); 127 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 128 } 129 } 130 131 /* raid0 IO range */ 132 struct raid_bdev_io_range { 133 uint64_t strip_size; 134 uint64_t start_strip_in_disk; 135 uint64_t end_strip_in_disk; 136 uint64_t start_offset_in_strip; 137 uint64_t end_offset_in_strip; 138 uint8_t start_disk; 139 uint8_t end_disk; 140 uint8_t n_disks_involved; 141 }; 142 143 static inline void 144 _raid0_get_io_range(struct raid_bdev_io_range *io_range, 145 uint8_t num_base_bdevs, uint64_t strip_size, uint64_t strip_size_shift, 146 uint64_t offset_blocks, uint64_t num_blocks) 147 { 148 uint64_t start_strip; 149 uint64_t end_strip; 150 151 io_range->strip_size = strip_size; 152 153 /* The start and end strip index in raid0 bdev scope */ 154 start_strip = offset_blocks >> strip_size_shift; 155 end_strip = (offset_blocks + num_blocks - 1) >> strip_size_shift; 156 io_range->start_strip_in_disk = start_strip / num_base_bdevs; 157 io_range->end_strip_in_disk = end_strip / num_base_bdevs; 158 159 /* The first strip may have unaligned start LBA offset. 160 * The end strip may have unaligned end LBA offset. 161 * Strips between them certainly have aligned offset and length to boundaries. 162 */ 163 io_range->start_offset_in_strip = offset_blocks % strip_size; 164 io_range->end_offset_in_strip = (offset_blocks + num_blocks - 1) % strip_size; 165 166 /* The base bdev indexes in which start and end strips are located */ 167 io_range->start_disk = start_strip % num_base_bdevs; 168 io_range->end_disk = end_strip % num_base_bdevs; 169 170 /* Calculate how many base_bdevs are involved in io operation. 171 * Number of base bdevs involved is between 1 and num_base_bdevs. 172 * It will be 1 if the first strip and last strip are the same one. 173 */ 174 io_range->n_disks_involved = spdk_min((end_strip - start_strip + 1), num_base_bdevs); 175 } 176 177 static inline void 178 _raid0_split_io_range(struct raid_bdev_io_range *io_range, uint8_t disk_idx, 179 uint64_t *_offset_in_disk, uint64_t *_nblocks_in_disk) 180 { 181 uint64_t n_strips_in_disk; 182 uint64_t start_offset_in_disk; 183 uint64_t end_offset_in_disk; 184 uint64_t offset_in_disk; 185 uint64_t nblocks_in_disk; 186 uint64_t start_strip_in_disk; 187 uint64_t end_strip_in_disk; 188 189 start_strip_in_disk = io_range->start_strip_in_disk; 190 if (disk_idx < io_range->start_disk) { 191 start_strip_in_disk += 1; 192 } 193 194 end_strip_in_disk = io_range->end_strip_in_disk; 195 if (disk_idx > io_range->end_disk) { 196 end_strip_in_disk -= 1; 197 } 198 199 assert(end_strip_in_disk >= start_strip_in_disk); 200 n_strips_in_disk = end_strip_in_disk - start_strip_in_disk + 1; 201 202 if (disk_idx == io_range->start_disk) { 203 start_offset_in_disk = io_range->start_offset_in_strip; 204 } else { 205 start_offset_in_disk = 0; 206 } 207 208 if (disk_idx == io_range->end_disk) { 209 end_offset_in_disk = io_range->end_offset_in_strip; 210 } else { 211 end_offset_in_disk = io_range->strip_size - 1; 212 } 213 214 offset_in_disk = start_offset_in_disk + start_strip_in_disk * io_range->strip_size; 215 nblocks_in_disk = (n_strips_in_disk - 1) * io_range->strip_size 216 + end_offset_in_disk - start_offset_in_disk + 1; 217 218 SPDK_DEBUGLOG(bdev_raid0, 219 "raid_bdev (strip_size 0x%" PRIx64 ") splits IO to base_bdev (%u) at (0x%" PRIx64 ", 0x%" PRIx64 220 ").\n", 221 io_range->strip_size, disk_idx, offset_in_disk, nblocks_in_disk); 222 223 *_offset_in_disk = offset_in_disk; 224 *_nblocks_in_disk = nblocks_in_disk; 225 } 226 227 static void raid0_submit_null_payload_request(struct raid_bdev_io *raid_io); 228 229 static void 230 _raid0_submit_null_payload_request(void *_raid_io) 231 { 232 struct raid_bdev_io *raid_io = _raid_io; 233 234 raid0_submit_null_payload_request(raid_io); 235 } 236 237 static void 238 raid0_base_io_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 239 { 240 struct raid_bdev_io *raid_io = cb_arg; 241 242 raid_bdev_io_complete_part(raid_io, 1, success ? 243 SPDK_BDEV_IO_STATUS_SUCCESS : 244 SPDK_BDEV_IO_STATUS_FAILED); 245 246 spdk_bdev_free_io(bdev_io); 247 } 248 249 /* 250 * brief: 251 * raid0_submit_null_payload_request function submits the next batch of 252 * io requests with range but without payload, like FLUSH and UNMAP, to member disks; 253 * it will submit as many as possible unless one base io request fails with -ENOMEM, 254 * in which case it will queue itself for later submission. 255 * params: 256 * bdev_io - pointer to parent bdev_io on raid bdev device 257 * returns: 258 * none 259 */ 260 static void 261 raid0_submit_null_payload_request(struct raid_bdev_io *raid_io) 262 { 263 struct spdk_bdev_io *bdev_io; 264 struct raid_bdev *raid_bdev; 265 struct raid_bdev_io_range io_range; 266 int ret; 267 struct raid_base_bdev_info *base_info; 268 struct spdk_io_channel *base_ch; 269 270 bdev_io = spdk_bdev_io_from_ctx(raid_io); 271 raid_bdev = raid_io->raid_bdev; 272 273 _raid0_get_io_range(&io_range, raid_bdev->num_base_bdevs, 274 raid_bdev->strip_size, raid_bdev->strip_size_shift, 275 bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks); 276 277 if (raid_io->base_bdev_io_remaining == 0) { 278 raid_io->base_bdev_io_remaining = io_range.n_disks_involved; 279 } 280 281 while (raid_io->base_bdev_io_submitted < io_range.n_disks_involved) { 282 uint8_t disk_idx; 283 uint64_t offset_in_disk; 284 uint64_t nblocks_in_disk; 285 286 /* base_bdev is started from start_disk to end_disk. 287 * It is possible that index of start_disk is larger than end_disk's. 288 */ 289 disk_idx = (io_range.start_disk + raid_io->base_bdev_io_submitted) % raid_bdev->num_base_bdevs; 290 base_info = &raid_bdev->base_bdev_info[disk_idx]; 291 base_ch = raid_io->raid_ch->base_channel[disk_idx]; 292 293 _raid0_split_io_range(&io_range, disk_idx, &offset_in_disk, &nblocks_in_disk); 294 295 switch (bdev_io->type) { 296 case SPDK_BDEV_IO_TYPE_UNMAP: 297 ret = spdk_bdev_unmap_blocks(base_info->desc, base_ch, 298 offset_in_disk, nblocks_in_disk, 299 raid0_base_io_complete, raid_io); 300 break; 301 302 case SPDK_BDEV_IO_TYPE_FLUSH: 303 ret = spdk_bdev_flush_blocks(base_info->desc, base_ch, 304 offset_in_disk, nblocks_in_disk, 305 raid0_base_io_complete, raid_io); 306 break; 307 308 default: 309 SPDK_ERRLOG("submit request, invalid io type with null payload %u\n", bdev_io->type); 310 assert(false); 311 ret = -EIO; 312 } 313 314 if (ret == 0) { 315 raid_io->base_bdev_io_submitted++; 316 } else if (ret == -ENOMEM) { 317 raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch, 318 _raid0_submit_null_payload_request); 319 return; 320 } else { 321 SPDK_ERRLOG("bdev io submit error not due to ENOMEM, it should not happen\n"); 322 assert(false); 323 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 324 return; 325 } 326 } 327 } 328 329 static int 330 raid0_start(struct raid_bdev *raid_bdev) 331 { 332 uint64_t min_blockcnt = UINT64_MAX; 333 struct raid_base_bdev_info *base_info; 334 335 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { 336 /* Calculate minimum block count from all base bdevs */ 337 min_blockcnt = spdk_min(min_blockcnt, base_info->bdev->blockcnt); 338 } 339 340 /* 341 * Take the minimum block count based approach where total block count 342 * of raid bdev is the number of base bdev times the minimum block count 343 * of any base bdev. 344 */ 345 SPDK_DEBUGLOG(bdev_raid0, "min blockcount %" PRIu64 ", numbasedev %u, strip size shift %u\n", 346 min_blockcnt, raid_bdev->num_base_bdevs, raid_bdev->strip_size_shift); 347 raid_bdev->bdev.blockcnt = ((min_blockcnt >> raid_bdev->strip_size_shift) << 348 raid_bdev->strip_size_shift) * raid_bdev->num_base_bdevs; 349 350 if (raid_bdev->num_base_bdevs > 1) { 351 raid_bdev->bdev.optimal_io_boundary = raid_bdev->strip_size; 352 raid_bdev->bdev.split_on_optimal_io_boundary = true; 353 } else { 354 /* Do not need to split reads/writes on single bdev RAID modules. */ 355 raid_bdev->bdev.optimal_io_boundary = 0; 356 raid_bdev->bdev.split_on_optimal_io_boundary = false; 357 } 358 359 return 0; 360 } 361 362 static struct raid_bdev_module g_raid0_module = { 363 .level = RAID0, 364 .base_bdevs_min = 1, 365 .start = raid0_start, 366 .submit_rw_request = raid0_submit_rw_request, 367 .submit_null_payload_request = raid0_submit_null_payload_request, 368 }; 369 RAID_MODULE_REGISTER(&g_raid0_module) 370 371 SPDK_LOG_REGISTER_COMPONENT(bdev_raid0) 372