1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * This is a simple example of a virtual block device module that passes IO 36 * down to a bdev (or bdevs) that its configured to attach to. 37 */ 38 39 #include "spdk/stdinc.h" 40 41 #include "vbdev_passthru.h" 42 #include "spdk/rpc.h" 43 #include "spdk/env.h" 44 #include "spdk/endian.h" 45 #include "spdk/string.h" 46 #include "spdk/thread.h" 47 #include "spdk/util.h" 48 49 #include "spdk/bdev_module.h" 50 #include "spdk/log.h" 51 52 53 static int vbdev_passthru_init(void); 54 static int vbdev_passthru_get_ctx_size(void); 55 static void vbdev_passthru_examine(struct spdk_bdev *bdev); 56 static void vbdev_passthru_finish(void); 57 static int vbdev_passthru_config_json(struct spdk_json_write_ctx *w); 58 59 static struct spdk_bdev_module passthru_if = { 60 .name = "passthru", 61 .module_init = vbdev_passthru_init, 62 .get_ctx_size = vbdev_passthru_get_ctx_size, 63 .examine_config = vbdev_passthru_examine, 64 .module_fini = vbdev_passthru_finish, 65 .config_json = vbdev_passthru_config_json 66 }; 67 68 SPDK_BDEV_MODULE_REGISTER(passthru, &passthru_if) 69 70 /* List of pt_bdev names and their base bdevs via configuration file. 71 * Used so we can parse the conf once at init and use this list in examine(). 72 */ 73 struct bdev_names { 74 char *vbdev_name; 75 char *bdev_name; 76 TAILQ_ENTRY(bdev_names) link; 77 }; 78 static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names); 79 80 /* List of virtual bdevs and associated info for each. */ 81 struct vbdev_passthru { 82 struct spdk_bdev *base_bdev; /* the thing we're attaching to */ 83 struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */ 84 struct spdk_bdev pt_bdev; /* the PT virtual bdev */ 85 TAILQ_ENTRY(vbdev_passthru) link; 86 struct spdk_thread *thread; /* thread where base device is opened */ 87 }; 88 static TAILQ_HEAD(, vbdev_passthru) g_pt_nodes = TAILQ_HEAD_INITIALIZER(g_pt_nodes); 89 90 /* The pt vbdev channel struct. It is allocated and freed on my behalf by the io channel code. 91 * If this vbdev needed to implement a poller or a queue for IO, this is where those things 92 * would be defined. This passthru bdev doesn't actually need to allocate a channel, it could 93 * simply pass back the channel of the bdev underneath it but for example purposes we will 94 * present its own to the upper layers. 95 */ 96 struct pt_io_channel { 97 struct spdk_io_channel *base_ch; /* IO channel of base device */ 98 }; 99 100 /* Just for fun, this pt_bdev module doesn't need it but this is essentially a per IO 101 * context that we get handed by the bdev layer. 102 */ 103 struct passthru_bdev_io { 104 uint8_t test; 105 106 /* bdev related */ 107 struct spdk_io_channel *ch; 108 109 /* for bdev_io_wait */ 110 struct spdk_bdev_io_wait_entry bdev_io_wait; 111 }; 112 113 static void 114 vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); 115 116 117 /* Callback for unregistering the IO device. */ 118 static void 119 _device_unregister_cb(void *io_device) 120 { 121 struct vbdev_passthru *pt_node = io_device; 122 123 /* Done with this pt_node. */ 124 free(pt_node->pt_bdev.name); 125 free(pt_node); 126 } 127 128 /* Wrapper for the bdev close operation. */ 129 static void 130 _vbdev_passthru_destruct(void *ctx) 131 { 132 struct spdk_bdev_desc *desc = ctx; 133 134 spdk_bdev_close(desc); 135 } 136 137 /* Called after we've unregistered following a hot remove callback. 138 * Our finish entry point will be called next. 139 */ 140 static int 141 vbdev_passthru_destruct(void *ctx) 142 { 143 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 144 145 /* It is important to follow this exact sequence of steps for destroying 146 * a vbdev... 147 */ 148 149 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 150 151 /* Unclaim the underlying bdev. */ 152 spdk_bdev_module_release_bdev(pt_node->base_bdev); 153 154 /* Close the underlying bdev on its same opened thread. */ 155 if (pt_node->thread && pt_node->thread != spdk_get_thread()) { 156 spdk_thread_send_msg(pt_node->thread, _vbdev_passthru_destruct, pt_node->base_desc); 157 } else { 158 spdk_bdev_close(pt_node->base_desc); 159 } 160 161 /* Unregister the io_device. */ 162 spdk_io_device_unregister(pt_node, _device_unregister_cb); 163 164 return 0; 165 } 166 167 /* Completion callback for IO that were issued from this bdev. The original bdev_io 168 * is passed in as an arg so we'll complete that one with the appropriate status 169 * and then free the one that this module issued. 170 */ 171 static void 172 _pt_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 173 { 174 struct spdk_bdev_io *orig_io = cb_arg; 175 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 176 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx; 177 178 /* We setup this value in the submission routine, just showing here that it is 179 * passed back to us. 180 */ 181 if (io_ctx->test != 0x5a) { 182 SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n", 183 io_ctx->test); 184 } 185 186 /* Complete the original IO and then free the one that we created here 187 * as a result of issuing an IO via submit_request. 188 */ 189 spdk_bdev_io_complete(orig_io, status); 190 spdk_bdev_free_io(bdev_io); 191 } 192 193 static void 194 _pt_complete_zcopy_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 195 { 196 struct spdk_bdev_io *orig_io = cb_arg; 197 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 198 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx; 199 200 /* We setup this value in the submission routine, just showing here that it is 201 * passed back to us. 202 */ 203 if (io_ctx->test != 0x5a) { 204 SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n", 205 io_ctx->test); 206 } 207 208 /* Complete the original IO and then free the one that we created here 209 * as a result of issuing an IO via submit_request. 210 */ 211 spdk_bdev_io_set_buf(orig_io, bdev_io->u.bdev.iovs[0].iov_base, bdev_io->u.bdev.iovs[0].iov_len); 212 spdk_bdev_io_complete(orig_io, status); 213 spdk_bdev_free_io(bdev_io); 214 } 215 216 static void 217 vbdev_passthru_resubmit_io(void *arg) 218 { 219 struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg; 220 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 221 222 vbdev_passthru_submit_request(io_ctx->ch, bdev_io); 223 } 224 225 static void 226 vbdev_passthru_queue_io(struct spdk_bdev_io *bdev_io) 227 { 228 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 229 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(io_ctx->ch); 230 int rc; 231 232 io_ctx->bdev_io_wait.bdev = bdev_io->bdev; 233 io_ctx->bdev_io_wait.cb_fn = vbdev_passthru_resubmit_io; 234 io_ctx->bdev_io_wait.cb_arg = bdev_io; 235 236 /* Queue the IO using the channel of the base device. */ 237 rc = spdk_bdev_queue_io_wait(bdev_io->bdev, pt_ch->base_ch, &io_ctx->bdev_io_wait); 238 if (rc != 0) { 239 SPDK_ERRLOG("Queue io failed in vbdev_passthru_queue_io, rc=%d.\n", rc); 240 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 241 } 242 } 243 244 /* Callback for getting a buf from the bdev pool in the event that the caller passed 245 * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module 246 * beneath us before we're done with it. That won't happen in this example but it could 247 * if this example were used as a template for something more complex. 248 */ 249 static void 250 pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) 251 { 252 struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, 253 pt_bdev); 254 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); 255 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 256 int rc; 257 258 if (!success) { 259 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 260 return; 261 } 262 263 if (bdev_io->u.bdev.md_buf == NULL) { 264 rc = spdk_bdev_readv_blocks(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, 265 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 266 bdev_io->u.bdev.num_blocks, _pt_complete_io, 267 bdev_io); 268 } else { 269 rc = spdk_bdev_readv_blocks_with_md(pt_node->base_desc, pt_ch->base_ch, 270 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 271 bdev_io->u.bdev.md_buf, 272 bdev_io->u.bdev.offset_blocks, 273 bdev_io->u.bdev.num_blocks, 274 _pt_complete_io, bdev_io); 275 } 276 277 if (rc != 0) { 278 if (rc == -ENOMEM) { 279 SPDK_ERRLOG("No memory, start to queue io for passthru.\n"); 280 io_ctx->ch = ch; 281 vbdev_passthru_queue_io(bdev_io); 282 } else { 283 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 284 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 285 } 286 } 287 } 288 289 /* Called when someone above submits IO to this pt vbdev. We're simply passing it on here 290 * via SPDK IO calls which in turn allocate another bdev IO and call our cpl callback provided 291 * below along with the original bdev_io so that we can complete it once this IO completes. 292 */ 293 static void 294 vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 295 { 296 struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, pt_bdev); 297 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); 298 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 299 int rc = 0; 300 301 /* Setup a per IO context value; we don't do anything with it in the vbdev other 302 * than confirm we get the same thing back in the completion callback just to 303 * demonstrate. 304 */ 305 io_ctx->test = 0x5a; 306 307 switch (bdev_io->type) { 308 case SPDK_BDEV_IO_TYPE_READ: 309 spdk_bdev_io_get_buf(bdev_io, pt_read_get_buf_cb, 310 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 311 break; 312 case SPDK_BDEV_IO_TYPE_WRITE: 313 if (bdev_io->u.bdev.md_buf == NULL) { 314 rc = spdk_bdev_writev_blocks(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, 315 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 316 bdev_io->u.bdev.num_blocks, _pt_complete_io, 317 bdev_io); 318 } else { 319 rc = spdk_bdev_writev_blocks_with_md(pt_node->base_desc, pt_ch->base_ch, 320 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 321 bdev_io->u.bdev.md_buf, 322 bdev_io->u.bdev.offset_blocks, 323 bdev_io->u.bdev.num_blocks, 324 _pt_complete_io, bdev_io); 325 } 326 break; 327 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 328 rc = spdk_bdev_write_zeroes_blocks(pt_node->base_desc, pt_ch->base_ch, 329 bdev_io->u.bdev.offset_blocks, 330 bdev_io->u.bdev.num_blocks, 331 _pt_complete_io, bdev_io); 332 break; 333 case SPDK_BDEV_IO_TYPE_UNMAP: 334 rc = spdk_bdev_unmap_blocks(pt_node->base_desc, pt_ch->base_ch, 335 bdev_io->u.bdev.offset_blocks, 336 bdev_io->u.bdev.num_blocks, 337 _pt_complete_io, bdev_io); 338 break; 339 case SPDK_BDEV_IO_TYPE_FLUSH: 340 rc = spdk_bdev_flush_blocks(pt_node->base_desc, pt_ch->base_ch, 341 bdev_io->u.bdev.offset_blocks, 342 bdev_io->u.bdev.num_blocks, 343 _pt_complete_io, bdev_io); 344 break; 345 case SPDK_BDEV_IO_TYPE_RESET: 346 rc = spdk_bdev_reset(pt_node->base_desc, pt_ch->base_ch, 347 _pt_complete_io, bdev_io); 348 break; 349 case SPDK_BDEV_IO_TYPE_ZCOPY: 350 rc = spdk_bdev_zcopy_start(pt_node->base_desc, pt_ch->base_ch, NULL, 0, 351 bdev_io->u.bdev.offset_blocks, 352 bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate, 353 _pt_complete_zcopy_io, bdev_io); 354 break; 355 case SPDK_BDEV_IO_TYPE_ABORT: 356 rc = spdk_bdev_abort(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.abort.bio_to_abort, 357 _pt_complete_io, bdev_io); 358 break; 359 default: 360 SPDK_ERRLOG("passthru: unknown I/O type %d\n", bdev_io->type); 361 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 362 return; 363 } 364 if (rc != 0) { 365 if (rc == -ENOMEM) { 366 SPDK_ERRLOG("No memory, start to queue io for passthru.\n"); 367 io_ctx->ch = ch; 368 vbdev_passthru_queue_io(bdev_io); 369 } else { 370 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 371 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 372 } 373 } 374 } 375 376 /* We'll just call the base bdev and let it answer however if we were more 377 * restrictive for some reason (or less) we could get the response back 378 * and modify according to our purposes. 379 */ 380 static bool 381 vbdev_passthru_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 382 { 383 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 384 385 return spdk_bdev_io_type_supported(pt_node->base_bdev, io_type); 386 } 387 388 /* We supplied this as an entry point for upper layers who want to communicate to this 389 * bdev. This is how they get a channel. We are passed the same context we provided when 390 * we created our PT vbdev in examine() which, for this bdev, is the address of one of 391 * our context nodes. From here we'll ask the SPDK channel code to fill out our channel 392 * struct and we'll keep it in our PT node. 393 */ 394 static struct spdk_io_channel * 395 vbdev_passthru_get_io_channel(void *ctx) 396 { 397 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 398 struct spdk_io_channel *pt_ch = NULL; 399 400 /* The IO channel code will allocate a channel for us which consists of 401 * the SPDK channel structure plus the size of our pt_io_channel struct 402 * that we passed in when we registered our IO device. It will then call 403 * our channel create callback to populate any elements that we need to 404 * update. 405 */ 406 pt_ch = spdk_get_io_channel(pt_node); 407 408 return pt_ch; 409 } 410 411 /* This is the output for bdev_get_bdevs() for this vbdev */ 412 static int 413 vbdev_passthru_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 414 { 415 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 416 417 spdk_json_write_name(w, "passthru"); 418 spdk_json_write_object_begin(w); 419 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev)); 420 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev)); 421 spdk_json_write_object_end(w); 422 423 return 0; 424 } 425 426 /* This is used to generate JSON that can configure this module to its current state. */ 427 static int 428 vbdev_passthru_config_json(struct spdk_json_write_ctx *w) 429 { 430 struct vbdev_passthru *pt_node; 431 432 TAILQ_FOREACH(pt_node, &g_pt_nodes, link) { 433 spdk_json_write_object_begin(w); 434 spdk_json_write_named_string(w, "method", "bdev_passthru_create"); 435 spdk_json_write_named_object_begin(w, "params"); 436 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev)); 437 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev)); 438 spdk_json_write_object_end(w); 439 spdk_json_write_object_end(w); 440 } 441 return 0; 442 } 443 444 /* We provide this callback for the SPDK channel code to create a channel using 445 * the channel struct we provided in our module get_io_channel() entry point. Here 446 * we get and save off an underlying base channel of the device below us so that 447 * we can communicate with the base bdev on a per channel basis. If we needed 448 * our own poller for this vbdev, we'd register it here. 449 */ 450 static int 451 pt_bdev_ch_create_cb(void *io_device, void *ctx_buf) 452 { 453 struct pt_io_channel *pt_ch = ctx_buf; 454 struct vbdev_passthru *pt_node = io_device; 455 456 pt_ch->base_ch = spdk_bdev_get_io_channel(pt_node->base_desc); 457 458 return 0; 459 } 460 461 /* We provide this callback for the SPDK channel code to destroy a channel 462 * created with our create callback. We just need to undo anything we did 463 * when we created. If this bdev used its own poller, we'd unregister it here. 464 */ 465 static void 466 pt_bdev_ch_destroy_cb(void *io_device, void *ctx_buf) 467 { 468 struct pt_io_channel *pt_ch = ctx_buf; 469 470 spdk_put_io_channel(pt_ch->base_ch); 471 } 472 473 /* Create the passthru association from the bdev and vbdev name and insert 474 * on the global list. */ 475 static int 476 vbdev_passthru_insert_name(const char *bdev_name, const char *vbdev_name) 477 { 478 struct bdev_names *name; 479 480 TAILQ_FOREACH(name, &g_bdev_names, link) { 481 if (strcmp(vbdev_name, name->vbdev_name) == 0) { 482 SPDK_ERRLOG("passthru bdev %s already exists\n", vbdev_name); 483 return -EEXIST; 484 } 485 } 486 487 name = calloc(1, sizeof(struct bdev_names)); 488 if (!name) { 489 SPDK_ERRLOG("could not allocate bdev_names\n"); 490 return -ENOMEM; 491 } 492 493 name->bdev_name = strdup(bdev_name); 494 if (!name->bdev_name) { 495 SPDK_ERRLOG("could not allocate name->bdev_name\n"); 496 free(name); 497 return -ENOMEM; 498 } 499 500 name->vbdev_name = strdup(vbdev_name); 501 if (!name->vbdev_name) { 502 SPDK_ERRLOG("could not allocate name->vbdev_name\n"); 503 free(name->bdev_name); 504 free(name); 505 return -ENOMEM; 506 } 507 508 TAILQ_INSERT_TAIL(&g_bdev_names, name, link); 509 510 return 0; 511 } 512 513 /* On init, just perform bdev module specific initialization. */ 514 static int 515 vbdev_passthru_init(void) 516 { 517 return 0; 518 } 519 520 /* Called when the entire module is being torn down. */ 521 static void 522 vbdev_passthru_finish(void) 523 { 524 struct bdev_names *name; 525 526 while ((name = TAILQ_FIRST(&g_bdev_names))) { 527 TAILQ_REMOVE(&g_bdev_names, name, link); 528 free(name->bdev_name); 529 free(name->vbdev_name); 530 free(name); 531 } 532 } 533 534 /* During init we'll be asked how much memory we'd like passed to us 535 * in bev_io structures as context. Here's where we specify how 536 * much context we want per IO. 537 */ 538 static int 539 vbdev_passthru_get_ctx_size(void) 540 { 541 return sizeof(struct passthru_bdev_io); 542 } 543 544 /* Where vbdev_passthru_config_json() is used to generate per module JSON config data, this 545 * function is called to output any per bdev specific methods. For the PT module, there are 546 * none. 547 */ 548 static void 549 vbdev_passthru_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 550 { 551 /* No config per bdev needed */ 552 } 553 554 /* When we register our bdev this is how we specify our entry points. */ 555 static const struct spdk_bdev_fn_table vbdev_passthru_fn_table = { 556 .destruct = vbdev_passthru_destruct, 557 .submit_request = vbdev_passthru_submit_request, 558 .io_type_supported = vbdev_passthru_io_type_supported, 559 .get_io_channel = vbdev_passthru_get_io_channel, 560 .dump_info_json = vbdev_passthru_dump_info_json, 561 .write_config_json = vbdev_passthru_write_config_json, 562 }; 563 564 static void 565 vbdev_passthru_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find) 566 { 567 struct vbdev_passthru *pt_node, *tmp; 568 569 TAILQ_FOREACH_SAFE(pt_node, &g_pt_nodes, link, tmp) { 570 if (bdev_find == pt_node->base_bdev) { 571 spdk_bdev_unregister(&pt_node->pt_bdev, NULL, NULL); 572 } 573 } 574 } 575 576 /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */ 577 static void 578 vbdev_passthru_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 579 void *event_ctx) 580 { 581 switch (type) { 582 case SPDK_BDEV_EVENT_REMOVE: 583 vbdev_passthru_base_bdev_hotremove_cb(bdev); 584 break; 585 default: 586 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 587 break; 588 } 589 } 590 591 /* Create and register the passthru vbdev if we find it in our list of bdev names. 592 * This can be called either by the examine path or RPC method. 593 */ 594 static int 595 vbdev_passthru_register(const char *bdev_name) 596 { 597 struct bdev_names *name; 598 struct vbdev_passthru *pt_node; 599 struct spdk_bdev *bdev; 600 int rc = 0; 601 602 /* Check our list of names from config versus this bdev and if 603 * there's a match, create the pt_node & bdev accordingly. 604 */ 605 TAILQ_FOREACH(name, &g_bdev_names, link) { 606 if (strcmp(name->bdev_name, bdev_name) != 0) { 607 continue; 608 } 609 610 SPDK_NOTICELOG("Match on %s\n", bdev_name); 611 pt_node = calloc(1, sizeof(struct vbdev_passthru)); 612 if (!pt_node) { 613 rc = -ENOMEM; 614 SPDK_ERRLOG("could not allocate pt_node\n"); 615 break; 616 } 617 618 pt_node->pt_bdev.name = strdup(name->vbdev_name); 619 if (!pt_node->pt_bdev.name) { 620 rc = -ENOMEM; 621 SPDK_ERRLOG("could not allocate pt_bdev name\n"); 622 free(pt_node); 623 break; 624 } 625 pt_node->pt_bdev.product_name = "passthru"; 626 627 /* The base bdev that we're attaching to. */ 628 rc = spdk_bdev_open_ext(bdev_name, true, vbdev_passthru_base_bdev_event_cb, 629 NULL, &pt_node->base_desc); 630 if (rc) { 631 if (rc != -ENODEV) { 632 SPDK_ERRLOG("could not open bdev %s\n", bdev_name); 633 } 634 free(pt_node->pt_bdev.name); 635 free(pt_node); 636 break; 637 } 638 SPDK_NOTICELOG("base bdev opened\n"); 639 640 bdev = spdk_bdev_desc_get_bdev(pt_node->base_desc); 641 pt_node->base_bdev = bdev; 642 643 /* Copy some properties from the underlying base bdev. */ 644 pt_node->pt_bdev.write_cache = bdev->write_cache; 645 pt_node->pt_bdev.required_alignment = bdev->required_alignment; 646 pt_node->pt_bdev.optimal_io_boundary = bdev->optimal_io_boundary; 647 pt_node->pt_bdev.blocklen = bdev->blocklen; 648 pt_node->pt_bdev.blockcnt = bdev->blockcnt; 649 650 pt_node->pt_bdev.md_interleave = bdev->md_interleave; 651 pt_node->pt_bdev.md_len = bdev->md_len; 652 pt_node->pt_bdev.dif_type = bdev->dif_type; 653 pt_node->pt_bdev.dif_is_head_of_md = bdev->dif_is_head_of_md; 654 pt_node->pt_bdev.dif_check_flags = bdev->dif_check_flags; 655 656 /* This is the context that is passed to us when the bdev 657 * layer calls in so we'll save our pt_bdev node here. 658 */ 659 pt_node->pt_bdev.ctxt = pt_node; 660 pt_node->pt_bdev.fn_table = &vbdev_passthru_fn_table; 661 pt_node->pt_bdev.module = &passthru_if; 662 TAILQ_INSERT_TAIL(&g_pt_nodes, pt_node, link); 663 664 spdk_io_device_register(pt_node, pt_bdev_ch_create_cb, pt_bdev_ch_destroy_cb, 665 sizeof(struct pt_io_channel), 666 name->vbdev_name); 667 SPDK_NOTICELOG("io_device created at: 0x%p\n", pt_node); 668 669 /* Save the thread where the base device is opened */ 670 pt_node->thread = spdk_get_thread(); 671 672 rc = spdk_bdev_module_claim_bdev(bdev, pt_node->base_desc, pt_node->pt_bdev.module); 673 if (rc) { 674 SPDK_ERRLOG("could not claim bdev %s\n", bdev_name); 675 spdk_bdev_close(pt_node->base_desc); 676 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 677 spdk_io_device_unregister(pt_node, NULL); 678 free(pt_node->pt_bdev.name); 679 free(pt_node); 680 break; 681 } 682 SPDK_NOTICELOG("bdev claimed\n"); 683 684 rc = spdk_bdev_register(&pt_node->pt_bdev); 685 if (rc) { 686 SPDK_ERRLOG("could not register pt_bdev\n"); 687 spdk_bdev_module_release_bdev(&pt_node->pt_bdev); 688 spdk_bdev_close(pt_node->base_desc); 689 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 690 spdk_io_device_unregister(pt_node, NULL); 691 free(pt_node->pt_bdev.name); 692 free(pt_node); 693 break; 694 } 695 SPDK_NOTICELOG("pt_bdev registered\n"); 696 SPDK_NOTICELOG("created pt_bdev for: %s\n", name->vbdev_name); 697 } 698 699 return rc; 700 } 701 702 /* Create the passthru disk from the given bdev and vbdev name. */ 703 int 704 bdev_passthru_create_disk(const char *bdev_name, const char *vbdev_name) 705 { 706 int rc; 707 708 /* Insert the bdev name into our global name list even if it doesn't exist yet, 709 * it may show up soon... 710 */ 711 rc = vbdev_passthru_insert_name(bdev_name, vbdev_name); 712 if (rc) { 713 return rc; 714 } 715 716 rc = vbdev_passthru_register(bdev_name); 717 if (rc == -ENODEV) { 718 /* This is not an error, we tracked the name above and it still 719 * may show up later. 720 */ 721 SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n"); 722 rc = 0; 723 } 724 725 return rc; 726 } 727 728 void 729 bdev_passthru_delete_disk(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 730 { 731 struct bdev_names *name; 732 733 if (!bdev || bdev->module != &passthru_if) { 734 cb_fn(cb_arg, -ENODEV); 735 return; 736 } 737 738 /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the 739 * vbdev does not get re-created if the same bdev is constructed at some other time, 740 * unless the underlying bdev was hot-removed. 741 */ 742 TAILQ_FOREACH(name, &g_bdev_names, link) { 743 if (strcmp(name->vbdev_name, bdev->name) == 0) { 744 TAILQ_REMOVE(&g_bdev_names, name, link); 745 free(name->bdev_name); 746 free(name->vbdev_name); 747 free(name); 748 break; 749 } 750 } 751 752 /* Additional cleanup happens in the destruct callback. */ 753 spdk_bdev_unregister(bdev, cb_fn, cb_arg); 754 } 755 756 /* Because we specified this function in our pt bdev function table when we 757 * registered our pt bdev, we'll get this call anytime a new bdev shows up. 758 * Here we need to decide if we care about it and if so what to do. We 759 * parsed the config file at init so we check the new bdev against the list 760 * we built up at that time and if the user configured us to attach to this 761 * bdev, here's where we do it. 762 */ 763 static void 764 vbdev_passthru_examine(struct spdk_bdev *bdev) 765 { 766 vbdev_passthru_register(bdev->name); 767 768 spdk_bdev_module_examine_done(&passthru_if); 769 } 770 771 SPDK_LOG_REGISTER_COMPONENT(vbdev_passthru) 772