1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2022 Intel Corporation. 3 * All rights reserved. 4 */ 5 6 #include "bdev_raid.h" 7 8 #include "spdk/likely.h" 9 #include "spdk/log.h" 10 11 struct raid1_info { 12 /* The parent raid bdev */ 13 struct raid_bdev *raid_bdev; 14 }; 15 16 static void 17 raid1_bdev_io_completion(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 18 { 19 struct raid_bdev_io *raid_io = cb_arg; 20 21 spdk_bdev_free_io(bdev_io); 22 23 raid_bdev_io_complete_part(raid_io, 1, success ? 24 SPDK_BDEV_IO_STATUS_SUCCESS : 25 SPDK_BDEV_IO_STATUS_FAILED); 26 } 27 28 static void raid1_submit_rw_request(struct raid_bdev_io *raid_io); 29 30 static void 31 _raid1_submit_rw_request(void *_raid_io) 32 { 33 struct raid_bdev_io *raid_io = _raid_io; 34 35 raid1_submit_rw_request(raid_io); 36 } 37 38 static void 39 raid1_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts) 40 { 41 memset(opts, 0, sizeof(*opts)); 42 opts->size = sizeof(*opts); 43 opts->memory_domain = bdev_io->u.bdev.memory_domain; 44 opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx; 45 opts->metadata = bdev_io->u.bdev.md_buf; 46 } 47 48 static int 49 raid1_submit_read_request(struct raid_bdev_io *raid_io) 50 { 51 struct raid_bdev *raid_bdev = raid_io->raid_bdev; 52 struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); 53 struct spdk_bdev_ext_io_opts io_opts; 54 uint8_t idx = 0; 55 struct raid_base_bdev_info *base_info; 56 struct spdk_io_channel *base_ch = NULL; 57 uint64_t pd_lba, pd_blocks; 58 int ret; 59 60 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { 61 base_ch = raid_io->raid_ch->base_channel[idx]; 62 if (base_ch != NULL) { 63 break; 64 } 65 idx++; 66 } 67 68 if (base_ch == NULL) { 69 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 70 return 0; 71 } 72 73 pd_lba = bdev_io->u.bdev.offset_blocks; 74 pd_blocks = bdev_io->u.bdev.num_blocks; 75 76 raid_io->base_bdev_io_remaining = 1; 77 78 raid1_init_ext_io_opts(bdev_io, &io_opts); 79 ret = spdk_bdev_readv_blocks_ext(base_info->desc, base_ch, 80 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 81 pd_lba, pd_blocks, raid1_bdev_io_completion, 82 raid_io, &io_opts); 83 84 if (spdk_likely(ret == 0)) { 85 raid_io->base_bdev_io_submitted++; 86 } else if (spdk_unlikely(ret == -ENOMEM)) { 87 raid_bdev_queue_io_wait(raid_io, spdk_bdev_desc_get_bdev(base_info->desc), 88 base_ch, _raid1_submit_rw_request); 89 return 0; 90 } 91 92 return ret; 93 } 94 95 static int 96 raid1_submit_write_request(struct raid_bdev_io *raid_io) 97 { 98 struct raid_bdev *raid_bdev = raid_io->raid_bdev; 99 struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); 100 struct spdk_bdev_ext_io_opts io_opts; 101 struct raid_base_bdev_info *base_info; 102 struct spdk_io_channel *base_ch; 103 uint64_t pd_lba, pd_blocks; 104 uint8_t idx; 105 uint64_t base_bdev_io_not_submitted; 106 int ret = 0; 107 108 pd_lba = bdev_io->u.bdev.offset_blocks; 109 pd_blocks = bdev_io->u.bdev.num_blocks; 110 111 if (raid_io->base_bdev_io_submitted == 0) { 112 raid_io->base_bdev_io_remaining = raid_bdev->num_base_bdevs; 113 } 114 115 raid1_init_ext_io_opts(bdev_io, &io_opts); 116 for (idx = raid_io->base_bdev_io_submitted; idx < raid_bdev->num_base_bdevs; idx++) { 117 base_info = &raid_bdev->base_bdev_info[idx]; 118 base_ch = raid_io->raid_ch->base_channel[idx]; 119 120 if (base_ch == NULL) { 121 /* skip a missing base bdev's slot */ 122 raid_io->base_bdev_io_submitted++; 123 raid_bdev_io_complete_part(raid_io, 1, SPDK_BDEV_IO_STATUS_SUCCESS); 124 continue; 125 } 126 127 ret = spdk_bdev_writev_blocks_ext(base_info->desc, base_ch, 128 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 129 pd_lba, pd_blocks, raid1_bdev_io_completion, 130 raid_io, &io_opts); 131 if (spdk_unlikely(ret != 0)) { 132 if (spdk_unlikely(ret == -ENOMEM)) { 133 raid_bdev_queue_io_wait(raid_io, spdk_bdev_desc_get_bdev(base_info->desc), 134 base_ch, _raid1_submit_rw_request); 135 return 0; 136 } 137 138 base_bdev_io_not_submitted = raid_bdev->num_base_bdevs - 139 raid_io->base_bdev_io_submitted; 140 raid_bdev_io_complete_part(raid_io, base_bdev_io_not_submitted, 141 SPDK_BDEV_IO_STATUS_FAILED); 142 return 0; 143 } 144 145 raid_io->base_bdev_io_submitted++; 146 } 147 148 if (raid_io->base_bdev_io_submitted == 0) { 149 ret = -ENODEV; 150 } 151 152 return ret; 153 } 154 155 static void 156 raid1_submit_rw_request(struct raid_bdev_io *raid_io) 157 { 158 struct spdk_bdev_io *bdev_io = spdk_bdev_io_from_ctx(raid_io); 159 int ret; 160 161 switch (bdev_io->type) { 162 case SPDK_BDEV_IO_TYPE_READ: 163 ret = raid1_submit_read_request(raid_io); 164 break; 165 case SPDK_BDEV_IO_TYPE_WRITE: 166 ret = raid1_submit_write_request(raid_io); 167 break; 168 default: 169 ret = -EINVAL; 170 break; 171 } 172 173 if (spdk_unlikely(ret != 0)) { 174 raid_bdev_io_complete(raid_io, SPDK_BDEV_IO_STATUS_FAILED); 175 } 176 } 177 178 static int 179 raid1_start(struct raid_bdev *raid_bdev) 180 { 181 uint64_t min_blockcnt = UINT64_MAX; 182 struct raid_base_bdev_info *base_info; 183 struct raid1_info *r1info; 184 185 r1info = calloc(1, sizeof(*r1info)); 186 if (!r1info) { 187 SPDK_ERRLOG("Failed to allocate RAID1 info device structure\n"); 188 return -ENOMEM; 189 } 190 r1info->raid_bdev = raid_bdev; 191 192 RAID_FOR_EACH_BASE_BDEV(raid_bdev, base_info) { 193 min_blockcnt = spdk_min(min_blockcnt, spdk_bdev_desc_get_bdev(base_info->desc)->blockcnt); 194 } 195 196 raid_bdev->bdev.blockcnt = min_blockcnt; 197 raid_bdev->module_private = r1info; 198 199 return 0; 200 } 201 202 static bool 203 raid1_stop(struct raid_bdev *raid_bdev) 204 { 205 struct raid1_info *r1info = raid_bdev->module_private; 206 207 free(r1info); 208 209 return true; 210 } 211 212 static struct raid_bdev_module g_raid1_module = { 213 .level = RAID1, 214 .base_bdevs_min = 2, 215 .base_bdevs_constraint = {CONSTRAINT_MIN_BASE_BDEVS_OPERATIONAL, 1}, 216 .memory_domains_supported = true, 217 .start = raid1_start, 218 .stop = raid1_stop, 219 .submit_rw_request = raid1_submit_rw_request, 220 }; 221 RAID_MODULE_REGISTER(&g_raid1_module) 222 223 SPDK_LOG_REGISTER_COMPONENT(bdev_raid1) 224