1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2019 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 #include "bdev_raid.h" 8 9 #include "spdk/env.h" 10 #include "spdk/thread.h" 11 #include "spdk/string.h" 12 #include "spdk/util.h" 13 14 #include "spdk/log.h" 15 16 /* 17 * brief: 18 * raid0_bdev_io_completion function is called by lower layers to notify raid 19 * module that particular bdev_io is completed. 20 * params: 21 * bdev_io - pointer to bdev io submitted to lower layers, like child io 22 * success - bdev_io status 23 * cb_arg - function callback context (parent raid_bdev_io) 24 * returns: 25 * none 26 */ 27 static void 28 raid0_bdev_io_completion(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 29 { 30 struct raid_bdev_io *raid_io = cb_arg; 31 32 spdk_bdev_free_io(bdev_io); 33 34 if (success) { 35 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_SUCCESS); 36 } else { 37 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 38 } 39 } 40 41 static void raid0_submit_rw_request(struct raid_bdev_io *raid_io); 42 43 static void 44 _raid0_submit_rw_request(void *_raid_io) 45 { 46 struct raid_bdev_io *raid_io = _raid_io; 47 48 raid0_submit_rw_request(raid_io); 49 } 50 51 /* 52 * brief: 53 * raid0_submit_rw_request function is used to submit I/O to the correct 54 * member disk for raid0 bdevs. 55 * params: 56 * raid_io 57 * returns: 58 * none 59 */ 60 static void 61 raid0_submit_rw_request(struct raid_bdev_io *raid_io) 62 { 63 struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); 64 struct raid_bdev_io_channel *raid_ch = raid_io->raid_ch; 65 struct raid_bdev *raid_bdev = raid_io->raid_bdev; 66 uint64_t pd_strip; 67 uint32_t offset_in_strip; 68 uint64_t pd_lba; 69 uint64_t pd_blocks; 70 uint8_t pd_idx; 71 int ret = 0; 72 uint64_t start_strip; 73 uint64_t end_strip; 74 struct raid_base_bdev_info *base_info; 75 struct spdk_io_channel *base_ch; 76 77 start_strip = bdev_io->u.bdev.offset_blocks >> raid_bdev->strip_size_shift; 78 end_strip = (bdev_io->u.bdev.offset_blocks + bdev_io->u.bdev.num_blocks - 1) >> 79 raid_bdev->strip_size_shift; 80 if (start_strip != end_strip && raid_bdev->num_base_bdevs > 1) { 81 assert(false); 82 SPDK_ERRLOG("I/O spans strip boundary!\n"); 83 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 84 return; 85 } 86 87 pd_strip = start_strip / raid_bdev->num_base_bdevs; 88 pd_idx = start_strip % raid_bdev->num_base_bdevs; 89 offset_in_strip = bdev_io->u.bdev.offset_blocks & (raid_bdev->strip_size - 1); 90 pd_lba = (pd_strip << raid_bdev->strip_size_shift) + offset_in_strip; 91 pd_blocks = bdev_io->u.bdev.num_blocks; 92 base_info = &raid_bdev->base_bdev_info[pd_idx]; 93 if (base_info->desc == NULL) { 94 SPDK_ERRLOG("base bdev desc null for pd_idx %u\n", pd_idx); 95 assert(0); 96 } 97 98 /* 99 * Submit child io to bdev layer with using base bdev descriptors, base 100 * bdev lba, base bdev child io length in blocks, buffer, completion 101 * function and function callback context 102 */ 103 assert(raid_ch != NULL); 104 assert(raid_ch->base_channel); 105 base_ch = raid_ch->base_channel[pd_idx]; 106 if (bdev_io->type == SPDK_BDEV_IO_TYPE_READ) { 107 ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch, 108 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 109 pd_lba, pd_blocks, raid0_bdev_io_completion, 110 raid_io, bdev_io->u.bdev.ext_opts); 111 } else if (bdev_io->type == SPDK_BDEV_IO_TYPE_WRITE) { 112 ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch, 113 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 114 pd_lba, pd_blocks, raid0_bdev_io_completion, 115 raid_io, bdev_io->u.bdev.ext_opts); 116 } else { 117 SPDK_ERRLOG("Recvd not supported io type %u\n", bdev_io->type); 118 assert(0); 119 } 120 121 if (ret == -ENOMEM) { 122 raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch, 123 _raid0_submit_rw_request); 124 } else if (ret != 0) { 125 SPDK_ERRLOG("bdev io submit error not due to ENOMEM, it should not happen\n"); 126 assert(false); 127 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 128 } 129 } 130 131 /* raid0 IO range */ 132 struct raid_bdev_io_range { 133 uint64_t strip_size; 134 uint64_t start_strip_in_disk; 135 uint64_t end_strip_in_disk; 136 uint64_t start_offset_in_strip; 137 uint64_t end_offset_in_strip; 138 uint8_t start_disk; 139 uint8_t end_disk; 140 uint8_t n_disks_involved; 141 }; 142 143 static inline void 144 _raid0_get_io_range(struct raid_bdev_io_range *io_range, 145 uint8_t num_base_bdevs, uint64_t strip_size, uint64_t strip_size_shift, 146 uint64_t offset_blocks, uint64_t num_blocks) 147 { 148 uint64_t start_strip; 149 uint64_t end_strip; 150 uint64_t total_blocks; 151 152 io_range->strip_size = strip_size; 153 total_blocks = offset_blocks + num_blocks - (num_blocks > 0); 154 155 /* The start and end strip index in raid0 bdev scope */ 156 start_strip = offset_blocks >> strip_size_shift; 157 end_strip = total_blocks >> strip_size_shift; 158 io_range->start_strip_in_disk = start_strip / num_base_bdevs; 159 io_range->end_strip_in_disk = end_strip / num_base_bdevs; 160 161 /* The first strip may have unaligned start LBA offset. 162 * The end strip may have unaligned end LBA offset. 163 * Strips between them certainly have aligned offset and length to boundaries. 164 */ 165 io_range->start_offset_in_strip = offset_blocks % strip_size; 166 io_range->end_offset_in_strip = total_blocks % strip_size; 167 168 /* The base bdev indexes in which start and end strips are located */ 169 io_range->start_disk = start_strip % num_base_bdevs; 170 io_range->end_disk = end_strip % num_base_bdevs; 171 172 /* Calculate how many base_bdevs are involved in io operation. 173 * Number of base bdevs involved is between 1 and num_base_bdevs. 174 * It will be 1 if the first strip and last strip are the same one. 175 */ 176 io_range->n_disks_involved = spdk_min((end_strip - start_strip + 1), num_base_bdevs); 177 } 178 179 static inline void 180 _raid0_split_io_range(struct raid_bdev_io_range *io_range, uint8_t disk_idx, 181 uint64_t *_offset_in_disk, uint64_t *_nblocks_in_disk) 182 { 183 uint64_t n_strips_in_disk; 184 uint64_t start_offset_in_disk; 185 uint64_t end_offset_in_disk; 186 uint64_t offset_in_disk; 187 uint64_t nblocks_in_disk; 188 uint64_t start_strip_in_disk; 189 uint64_t end_strip_in_disk; 190 191 start_strip_in_disk = io_range->start_strip_in_disk; 192 if (disk_idx < io_range->start_disk) { 193 start_strip_in_disk += 1; 194 } 195 196 end_strip_in_disk = io_range->end_strip_in_disk; 197 if (disk_idx > io_range->end_disk) { 198 end_strip_in_disk -= 1; 199 } 200 201 assert(end_strip_in_disk >= start_strip_in_disk); 202 n_strips_in_disk = end_strip_in_disk - start_strip_in_disk + 1; 203 204 if (disk_idx == io_range->start_disk) { 205 start_offset_in_disk = io_range->start_offset_in_strip; 206 } else { 207 start_offset_in_disk = 0; 208 } 209 210 if (disk_idx == io_range->end_disk) { 211 end_offset_in_disk = io_range->end_offset_in_strip; 212 } else { 213 end_offset_in_disk = io_range->strip_size - 1; 214 } 215 216 offset_in_disk = start_offset_in_disk + start_strip_in_disk * io_range->strip_size; 217 nblocks_in_disk = (n_strips_in_disk - 1) * io_range->strip_size 218 + end_offset_in_disk - start_offset_in_disk + 1; 219 220 SPDK_DEBUGLOG(bdev_raid0, 221 "raid_bdev (strip_size 0x%" PRIx64 ") splits IO to base_bdev (%u) at (0x%" PRIx64 ", 0x%" PRIx64 222 ").\n", 223 io_range->strip_size, disk_idx, offset_in_disk, nblocks_in_disk); 224 225 *_offset_in_disk = offset_in_disk; 226 *_nblocks_in_disk = nblocks_in_disk; 227 } 228 229 static void raid0_submit_null_payload_request(struct raid_bdev_io *raid_io); 230 231 static void 232 _raid0_submit_null_payload_request(void *_raid_io) 233 { 234 struct raid_bdev_io *raid_io = _raid_io; 235 236 raid0_submit_null_payload_request(raid_io); 237 } 238 239 static void 240 raid0_base_io_complete(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 241 { 242 struct raid_bdev_io *raid_io = cb_arg; 243 244 raid_bdev_io_complete_part(raid_io, 1, success ? 245 SPDK_BDEV_IO_STATUS_SUCCESS : 246 SPDK_BDEV_IO_STATUS_FAILED); 247 248 spdk_bdev_free_io(bdev_io); 249 } 250 251 /* 252 * brief: 253 * raid0_submit_null_payload_request function submits the next batch of 254 * io requests with range but without payload, like FLUSH and UNMAP, to member disks; 255 * it will submit as many as possible unless one base io request fails with -ENOMEM, 256 * in which case it will queue itself for later submission. 257 * params: 258 * bdev_io - pointer to parent bdev_io on raid bdev device 259 * returns: 260 * none 261 */ 262 static void 263 raid0_submit_null_payload_request(struct raid_bdev_io *raid_io) 264 { 265 struct spdk_bdev_io *bdev_io; 266 struct raid_bdev *raid_bdev; 267 struct raid_bdev_io_range io_range; 268 int ret; 269 struct raid_base_bdev_info *base_info; 270 struct spdk_io_channel *base_ch; 271 272 bdev_io = spdk_bdev_io_from_ctx(raid_io); 273 raid_bdev = raid_io->raid_bdev; 274 275 _raid0_get_io_range(&io_range, raid_bdev->num_base_bdevs, 276 raid_bdev->strip_size, raid_bdev->strip_size_shift, 277 bdev_io->u.bdev.offset_blocks, bdev_io->u.bdev.num_blocks); 278 279 if (raid_io->base_bdev_io_remaining == 0) { 280 raid_io->base_bdev_io_remaining = io_range.n_disks_involved; 281 } 282 283 while (raid_io->base_bdev_io_submitted < io_range.n_disks_involved) { 284 uint8_t disk_idx; 285 uint64_t offset_in_disk; 286 uint64_t nblocks_in_disk; 287 288 /* base_bdev is started from start_disk to end_disk. 289 * It is possible that index of start_disk is larger than end_disk's. 290 */ 291 disk_idx = (io_range.start_disk + raid_io->base_bdev_io_submitted) % raid_bdev->num_base_bdevs; 292 base_info = &raid_bdev->base_bdev_info[disk_idx]; 293 base_ch = raid_io->raid_ch->base_channel[disk_idx]; 294 295 _raid0_split_io_range(&io_range, disk_idx, &offset_in_disk, &nblocks_in_disk); 296 297 switch (bdev_io->type) { 298 case SPDK_BDEV_IO_TYPE_UNMAP: 299 ret = spdk_bdev_unmap_blocks(base_info->desc, base_ch, 300 offset_in_disk, nblocks_in_disk, 301 raid0_base_io_complete, raid_io); 302 break; 303 304 case SPDK_BDEV_IO_TYPE_FLUSH: 305 ret = spdk_bdev_flush_blocks(base_info->desc, base_ch, 306 offset_in_disk, nblocks_in_disk, 307 raid0_base_io_complete, raid_io); 308 break; 309 310 default: 311 SPDK_ERRLOG("submit request, invalid io type with null payload %u\n", bdev_io->type); 312 assert(false); 313 ret = -EIO; 314 } 315 316 if (ret == 0) { 317 raid_io->base_bdev_io_submitted++; 318 } else if (ret == -ENOMEM) { 319 raid_bdev_queue_io_wait(raid_io, base_info->bdev, base_ch, 320 _raid0_submit_null_payload_request); 321 return; 322 } else { 323 SPDK_ERRLOG("bdev io submit error not due to ENOMEM, it should not happen\n"); 324 assert(false); 325 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 326 return; 327 } 328 } 329 } 330 331 static int 332 raid0_start(struct raid_bdev *raid_bdev) 333 { 334 uint64_t min_blockcnt = UINT64_MAX; 335 struct raid_base_bdev_info *base_info; 336 337 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { 338 /* Calculate minimum block count from all base bdevs */ 339 min_blockcnt = spdk_min(min_blockcnt, base_info->bdev->blockcnt); 340 } 341 342 /* 343 * Take the minimum block count based approach where total block count 344 * of raid bdev is the number of base bdev times the minimum block count 345 * of any base bdev. 346 */ 347 SPDK_DEBUGLOG(bdev_raid0, "min blockcount %" PRIu64 ", numbasedev %u, strip size shift %u\n", 348 min_blockcnt, raid_bdev->num_base_bdevs, raid_bdev->strip_size_shift); 349 raid_bdev->bdev.blockcnt = ((min_blockcnt >> raid_bdev->strip_size_shift) << 350 raid_bdev->strip_size_shift) * raid_bdev->num_base_bdevs; 351 352 if (raid_bdev->num_base_bdevs > 1) { 353 raid_bdev->bdev.optimal_io_boundary = raid_bdev->strip_size; 354 raid_bdev->bdev.split_on_optimal_io_boundary = true; 355 } else { 356 /* Do not need to split reads/writes on single bdev RAID modules. */ 357 raid_bdev->bdev.optimal_io_boundary = 0; 358 raid_bdev->bdev.split_on_optimal_io_boundary = false; 359 } 360 361 return 0; 362 } 363 364 static struct raid_bdev_module g_raid0_module = { 365 .level = RAID0, 366 .base_bdevs_min = 1, 367 .start = raid0_start, 368 .submit_rw_request = raid0_submit_rw_request, 369 .submit_null_payload_request = raid0_submit_null_payload_request, 370 }; 371 RAID_MODULE_REGISTER(&g_raid0_module) 372 373 SPDK_LOG_REGISTER_COMPONENT(bdev_raid0) 374