1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) Intel Corporation. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 11 * * Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * * Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in 15 * the documentation and/or other materials provided with the 16 * distribution. 17 * * Neither the name of Intel Corporation nor the names of its 18 * contributors may be used to endorse or promote products derived 19 * from this software without specific prior written permission. 20 * 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 */ 33 34 /* 35 * This is a simple example of a virtual block device module that passes IO 36 * down to a bdev (or bdevs) that its configured to attach to. 37 */ 38 39 #include "spdk/stdinc.h" 40 41 #include "vbdev_passthru.h" 42 #include "spdk/rpc.h" 43 #include "spdk/env.h" 44 #include "spdk/conf.h" 45 #include "spdk/endian.h" 46 #include "spdk/string.h" 47 #include "spdk/thread.h" 48 #include "spdk/util.h" 49 50 #include "spdk/bdev_module.h" 51 #include "spdk_internal/log.h" 52 53 54 static int vbdev_passthru_init(void); 55 static void vbdev_passthru_get_spdk_running_config(FILE *fp); 56 static int vbdev_passthru_get_ctx_size(void); 57 static void vbdev_passthru_examine(struct spdk_bdev *bdev); 58 static void vbdev_passthru_finish(void); 59 static int vbdev_passthru_config_json(struct spdk_json_write_ctx *w); 60 61 static struct spdk_bdev_module passthru_if = { 62 .name = "passthru", 63 .module_init = vbdev_passthru_init, 64 .config_text = vbdev_passthru_get_spdk_running_config, 65 .get_ctx_size = vbdev_passthru_get_ctx_size, 66 .examine_config = vbdev_passthru_examine, 67 .module_fini = vbdev_passthru_finish, 68 .config_json = vbdev_passthru_config_json 69 }; 70 71 SPDK_BDEV_MODULE_REGISTER(passthru, &passthru_if) 72 73 /* List of pt_bdev names and their base bdevs via configuration file. 74 * Used so we can parse the conf once at init and use this list in examine(). 75 */ 76 struct bdev_names { 77 char *vbdev_name; 78 char *bdev_name; 79 TAILQ_ENTRY(bdev_names) link; 80 }; 81 static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names); 82 83 /* List of virtual bdevs and associated info for each. */ 84 struct vbdev_passthru { 85 struct spdk_bdev *base_bdev; /* the thing we're attaching to */ 86 struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */ 87 struct spdk_bdev pt_bdev; /* the PT virtual bdev */ 88 TAILQ_ENTRY(vbdev_passthru) link; 89 struct spdk_thread *thread; /* thread where base device is opened */ 90 }; 91 static TAILQ_HEAD(, vbdev_passthru) g_pt_nodes = TAILQ_HEAD_INITIALIZER(g_pt_nodes); 92 93 /* The pt vbdev channel struct. It is allocated and freed on my behalf by the io channel code. 94 * If this vbdev needed to implement a poller or a queue for IO, this is where those things 95 * would be defined. This passthru bdev doesn't actually need to allocate a channel, it could 96 * simply pass back the channel of the bdev underneath it but for example purposes we will 97 * present its own to the upper layers. 98 */ 99 struct pt_io_channel { 100 struct spdk_io_channel *base_ch; /* IO channel of base device */ 101 }; 102 103 /* Just for fun, this pt_bdev module doesn't need it but this is essentially a per IO 104 * context that we get handed by the bdev layer. 105 */ 106 struct passthru_bdev_io { 107 uint8_t test; 108 109 /* bdev related */ 110 struct spdk_io_channel *ch; 111 112 /* for bdev_io_wait */ 113 struct spdk_bdev_io_wait_entry bdev_io_wait; 114 }; 115 116 static void 117 vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); 118 119 120 /* Callback for unregistering the IO device. */ 121 static void 122 _device_unregister_cb(void *io_device) 123 { 124 struct vbdev_passthru *pt_node = io_device; 125 126 /* Done with this pt_node. */ 127 free(pt_node->pt_bdev.name); 128 free(pt_node); 129 } 130 131 /* Wrapper for the bdev close operation. */ 132 static void 133 _vbdev_passthru_destruct(void *ctx) 134 { 135 struct spdk_bdev_desc *desc = ctx; 136 137 spdk_bdev_close(desc); 138 } 139 140 /* Called after we've unregistered following a hot remove callback. 141 * Our finish entry point will be called next. 142 */ 143 static int 144 vbdev_passthru_destruct(void *ctx) 145 { 146 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 147 148 /* It is important to follow this exact sequence of steps for destroying 149 * a vbdev... 150 */ 151 152 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 153 154 /* Unclaim the underlying bdev. */ 155 spdk_bdev_module_release_bdev(pt_node->base_bdev); 156 157 /* Close the underlying bdev on its same opened thread. */ 158 if (pt_node->thread && pt_node->thread != spdk_get_thread()) { 159 spdk_thread_send_msg(pt_node->thread, _vbdev_passthru_destruct, pt_node->base_desc); 160 } else { 161 spdk_bdev_close(pt_node->base_desc); 162 } 163 164 /* Unregister the io_device. */ 165 spdk_io_device_unregister(pt_node, _device_unregister_cb); 166 167 return 0; 168 } 169 170 /* Completion callback for IO that were issued from this bdev. The original bdev_io 171 * is passed in as an arg so we'll complete that one with the appropriate status 172 * and then free the one that this module issued. 173 */ 174 static void 175 _pt_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 176 { 177 struct spdk_bdev_io *orig_io = cb_arg; 178 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 179 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx; 180 181 /* We setup this value in the submission routine, just showing here that it is 182 * passed back to us. 183 */ 184 if (io_ctx->test != 0x5a) { 185 SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n", 186 io_ctx->test); 187 } 188 189 /* Complete the original IO and then free the one that we created here 190 * as a result of issuing an IO via submit_request. 191 */ 192 spdk_bdev_io_complete(orig_io, status); 193 spdk_bdev_free_io(bdev_io); 194 } 195 196 static void 197 _pt_complete_zcopy_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 198 { 199 struct spdk_bdev_io *orig_io = cb_arg; 200 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 201 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx; 202 203 /* We setup this value in the submission routine, just showing here that it is 204 * passed back to us. 205 */ 206 if (io_ctx->test != 0x5a) { 207 SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n", 208 io_ctx->test); 209 } 210 211 /* Complete the original IO and then free the one that we created here 212 * as a result of issuing an IO via submit_request. 213 */ 214 spdk_bdev_io_set_buf(orig_io, bdev_io->u.bdev.iovs[0].iov_base, bdev_io->u.bdev.iovs[0].iov_len); 215 spdk_bdev_io_complete(orig_io, status); 216 spdk_bdev_free_io(bdev_io); 217 } 218 219 static void 220 vbdev_passthru_resubmit_io(void *arg) 221 { 222 struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg; 223 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 224 225 vbdev_passthru_submit_request(io_ctx->ch, bdev_io); 226 } 227 228 static void 229 vbdev_passthru_queue_io(struct spdk_bdev_io *bdev_io) 230 { 231 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 232 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(io_ctx->ch); 233 int rc; 234 235 io_ctx->bdev_io_wait.bdev = bdev_io->bdev; 236 io_ctx->bdev_io_wait.cb_fn = vbdev_passthru_resubmit_io; 237 io_ctx->bdev_io_wait.cb_arg = bdev_io; 238 239 /* Queue the IO using the channel of the base device. */ 240 rc = spdk_bdev_queue_io_wait(bdev_io->bdev, pt_ch->base_ch, &io_ctx->bdev_io_wait); 241 if (rc != 0) { 242 SPDK_ERRLOG("Queue io failed in vbdev_passthru_queue_io, rc=%d.\n", rc); 243 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 244 } 245 } 246 247 /* Callback for getting a buf from the bdev pool in the event that the caller passed 248 * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module 249 * beneath us before we're done with it. That won't happen in this example but it could 250 * if this example were used as a template for something more complex. 251 */ 252 static void 253 pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) 254 { 255 struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, 256 pt_bdev); 257 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); 258 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 259 int rc; 260 261 if (!success) { 262 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 263 return; 264 } 265 266 if (bdev_io->u.bdev.md_buf == NULL) { 267 rc = spdk_bdev_readv_blocks(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, 268 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 269 bdev_io->u.bdev.num_blocks, _pt_complete_io, 270 bdev_io); 271 } else { 272 rc = spdk_bdev_readv_blocks_with_md(pt_node->base_desc, pt_ch->base_ch, 273 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 274 bdev_io->u.bdev.md_buf, 275 bdev_io->u.bdev.offset_blocks, 276 bdev_io->u.bdev.num_blocks, 277 _pt_complete_io, bdev_io); 278 } 279 280 if (rc != 0) { 281 if (rc == -ENOMEM) { 282 SPDK_ERRLOG("No memory, start to queue io for passthru.\n"); 283 io_ctx->ch = ch; 284 vbdev_passthru_queue_io(bdev_io); 285 } else { 286 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 287 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 288 } 289 } 290 } 291 292 /* Called when someone above submits IO to this pt vbdev. We're simply passing it on here 293 * via SPDK IO calls which in turn allocate another bdev IO and call our cpl callback provided 294 * below along with the original bdev_io so that we can complete it once this IO completes. 295 */ 296 static void 297 vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 298 { 299 struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, pt_bdev); 300 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); 301 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 302 int rc = 0; 303 304 /* Setup a per IO context value; we don't do anything with it in the vbdev other 305 * than confirm we get the same thing back in the completion callback just to 306 * demonstrate. 307 */ 308 io_ctx->test = 0x5a; 309 310 switch (bdev_io->type) { 311 case SPDK_BDEV_IO_TYPE_READ: 312 spdk_bdev_io_get_buf(bdev_io, pt_read_get_buf_cb, 313 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 314 break; 315 case SPDK_BDEV_IO_TYPE_WRITE: 316 if (bdev_io->u.bdev.md_buf == NULL) { 317 rc = spdk_bdev_writev_blocks(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, 318 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 319 bdev_io->u.bdev.num_blocks, _pt_complete_io, 320 bdev_io); 321 } else { 322 rc = spdk_bdev_writev_blocks_with_md(pt_node->base_desc, pt_ch->base_ch, 323 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 324 bdev_io->u.bdev.md_buf, 325 bdev_io->u.bdev.offset_blocks, 326 bdev_io->u.bdev.num_blocks, 327 _pt_complete_io, bdev_io); 328 } 329 break; 330 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 331 rc = spdk_bdev_write_zeroes_blocks(pt_node->base_desc, pt_ch->base_ch, 332 bdev_io->u.bdev.offset_blocks, 333 bdev_io->u.bdev.num_blocks, 334 _pt_complete_io, bdev_io); 335 break; 336 case SPDK_BDEV_IO_TYPE_UNMAP: 337 rc = spdk_bdev_unmap_blocks(pt_node->base_desc, pt_ch->base_ch, 338 bdev_io->u.bdev.offset_blocks, 339 bdev_io->u.bdev.num_blocks, 340 _pt_complete_io, bdev_io); 341 break; 342 case SPDK_BDEV_IO_TYPE_FLUSH: 343 rc = spdk_bdev_flush_blocks(pt_node->base_desc, pt_ch->base_ch, 344 bdev_io->u.bdev.offset_blocks, 345 bdev_io->u.bdev.num_blocks, 346 _pt_complete_io, bdev_io); 347 break; 348 case SPDK_BDEV_IO_TYPE_RESET: 349 rc = spdk_bdev_reset(pt_node->base_desc, pt_ch->base_ch, 350 _pt_complete_io, bdev_io); 351 break; 352 case SPDK_BDEV_IO_TYPE_ZCOPY: 353 rc = spdk_bdev_zcopy_start(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.offset_blocks, 354 bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate, 355 _pt_complete_zcopy_io, bdev_io); 356 break; 357 case SPDK_BDEV_IO_TYPE_ABORT: 358 rc = spdk_bdev_abort(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.abort.bio_to_abort, 359 _pt_complete_io, bdev_io); 360 break; 361 default: 362 SPDK_ERRLOG("passthru: unknown I/O type %d\n", bdev_io->type); 363 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 364 return; 365 } 366 if (rc != 0) { 367 if (rc == -ENOMEM) { 368 SPDK_ERRLOG("No memory, start to queue io for passthru.\n"); 369 io_ctx->ch = ch; 370 vbdev_passthru_queue_io(bdev_io); 371 } else { 372 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 373 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 374 } 375 } 376 } 377 378 /* We'll just call the base bdev and let it answer however if we were more 379 * restrictive for some reason (or less) we could get the response back 380 * and modify according to our purposes. 381 */ 382 static bool 383 vbdev_passthru_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 384 { 385 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 386 387 return spdk_bdev_io_type_supported(pt_node->base_bdev, io_type); 388 } 389 390 /* We supplied this as an entry point for upper layers who want to communicate to this 391 * bdev. This is how they get a channel. We are passed the same context we provided when 392 * we created our PT vbdev in examine() which, for this bdev, is the address of one of 393 * our context nodes. From here we'll ask the SPDK channel code to fill out our channel 394 * struct and we'll keep it in our PT node. 395 */ 396 static struct spdk_io_channel * 397 vbdev_passthru_get_io_channel(void *ctx) 398 { 399 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 400 struct spdk_io_channel *pt_ch = NULL; 401 402 /* The IO channel code will allocate a channel for us which consists of 403 * the SPDK channel structure plus the size of our pt_io_channel struct 404 * that we passed in when we registered our IO device. It will then call 405 * our channel create callback to populate any elements that we need to 406 * update. 407 */ 408 pt_ch = spdk_get_io_channel(pt_node); 409 410 return pt_ch; 411 } 412 413 /* This is the output for bdev_get_bdevs() for this vbdev */ 414 static int 415 vbdev_passthru_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 416 { 417 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 418 419 spdk_json_write_name(w, "passthru"); 420 spdk_json_write_object_begin(w); 421 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev)); 422 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev)); 423 spdk_json_write_object_end(w); 424 425 return 0; 426 } 427 428 /* This is used to generate JSON that can configure this module to its current state. */ 429 static int 430 vbdev_passthru_config_json(struct spdk_json_write_ctx *w) 431 { 432 struct vbdev_passthru *pt_node; 433 434 TAILQ_FOREACH(pt_node, &g_pt_nodes, link) { 435 spdk_json_write_object_begin(w); 436 spdk_json_write_named_string(w, "method", "bdev_passthru_create"); 437 spdk_json_write_named_object_begin(w, "params"); 438 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev)); 439 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev)); 440 spdk_json_write_object_end(w); 441 spdk_json_write_object_end(w); 442 } 443 return 0; 444 } 445 446 /* We provide this callback for the SPDK channel code to create a channel using 447 * the channel struct we provided in our module get_io_channel() entry point. Here 448 * we get and save off an underlying base channel of the device below us so that 449 * we can communicate with the base bdev on a per channel basis. If we needed 450 * our own poller for this vbdev, we'd register it here. 451 */ 452 static int 453 pt_bdev_ch_create_cb(void *io_device, void *ctx_buf) 454 { 455 struct pt_io_channel *pt_ch = ctx_buf; 456 struct vbdev_passthru *pt_node = io_device; 457 458 pt_ch->base_ch = spdk_bdev_get_io_channel(pt_node->base_desc); 459 460 return 0; 461 } 462 463 /* We provide this callback for the SPDK channel code to destroy a channel 464 * created with our create callback. We just need to undo anything we did 465 * when we created. If this bdev used its own poller, we'd unregister it here. 466 */ 467 static void 468 pt_bdev_ch_destroy_cb(void *io_device, void *ctx_buf) 469 { 470 struct pt_io_channel *pt_ch = ctx_buf; 471 472 spdk_put_io_channel(pt_ch->base_ch); 473 } 474 475 /* Create the passthru association from the bdev and vbdev name and insert 476 * on the global list. */ 477 static int 478 vbdev_passthru_insert_name(const char *bdev_name, const char *vbdev_name) 479 { 480 struct bdev_names *name; 481 482 TAILQ_FOREACH(name, &g_bdev_names, link) { 483 if (strcmp(vbdev_name, name->vbdev_name) == 0) { 484 SPDK_ERRLOG("passthru bdev %s already exists\n", vbdev_name); 485 return -EEXIST; 486 } 487 } 488 489 name = calloc(1, sizeof(struct bdev_names)); 490 if (!name) { 491 SPDK_ERRLOG("could not allocate bdev_names\n"); 492 return -ENOMEM; 493 } 494 495 name->bdev_name = strdup(bdev_name); 496 if (!name->bdev_name) { 497 SPDK_ERRLOG("could not allocate name->bdev_name\n"); 498 free(name); 499 return -ENOMEM; 500 } 501 502 name->vbdev_name = strdup(vbdev_name); 503 if (!name->vbdev_name) { 504 SPDK_ERRLOG("could not allocate name->vbdev_name\n"); 505 free(name->bdev_name); 506 free(name); 507 return -ENOMEM; 508 } 509 510 TAILQ_INSERT_TAIL(&g_bdev_names, name, link); 511 512 return 0; 513 } 514 515 /* On init, just parse config file and build list of pt vbdevs and bdev name pairs. */ 516 static int 517 vbdev_passthru_init(void) 518 { 519 struct spdk_conf_section *sp = NULL; 520 const char *conf_bdev_name = NULL; 521 const char *conf_vbdev_name = NULL; 522 struct bdev_names *name; 523 int i, rc; 524 525 sp = spdk_conf_find_section(NULL, "Passthru"); 526 if (sp == NULL) { 527 return 0; 528 } 529 530 for (i = 0; ; i++) { 531 if (!spdk_conf_section_get_nval(sp, "PT", i)) { 532 break; 533 } 534 535 conf_bdev_name = spdk_conf_section_get_nmval(sp, "PT", i, 0); 536 if (!conf_bdev_name) { 537 SPDK_ERRLOG("Passthru configuration missing bdev name\n"); 538 break; 539 } 540 541 conf_vbdev_name = spdk_conf_section_get_nmval(sp, "PT", i, 1); 542 if (!conf_vbdev_name) { 543 SPDK_ERRLOG("Passthru configuration missing pt_bdev name\n"); 544 break; 545 } 546 547 rc = vbdev_passthru_insert_name(conf_bdev_name, conf_vbdev_name); 548 if (rc != 0) { 549 return rc; 550 } 551 } 552 TAILQ_FOREACH(name, &g_bdev_names, link) { 553 SPDK_NOTICELOG("conf parse matched: %s\n", name->bdev_name); 554 } 555 return 0; 556 } 557 558 /* Called when the entire module is being torn down. */ 559 static void 560 vbdev_passthru_finish(void) 561 { 562 struct bdev_names *name; 563 564 while ((name = TAILQ_FIRST(&g_bdev_names))) { 565 TAILQ_REMOVE(&g_bdev_names, name, link); 566 free(name->bdev_name); 567 free(name->vbdev_name); 568 free(name); 569 } 570 } 571 572 /* During init we'll be asked how much memory we'd like passed to us 573 * in bev_io structures as context. Here's where we specify how 574 * much context we want per IO. 575 */ 576 static int 577 vbdev_passthru_get_ctx_size(void) 578 { 579 return sizeof(struct passthru_bdev_io); 580 } 581 582 /* Called when SPDK wants to save the current config of this vbdev module to 583 * a file. 584 */ 585 static void 586 vbdev_passthru_get_spdk_running_config(FILE *fp) 587 { 588 struct bdev_names *names = NULL; 589 590 fprintf(fp, "\n[Passthru]\n"); 591 TAILQ_FOREACH(names, &g_bdev_names, link) { 592 fprintf(fp, " PT %s %s\n", names->bdev_name, names->vbdev_name); 593 } 594 fprintf(fp, "\n"); 595 } 596 597 /* Where vbdev_passthru_config_json() is used to generate per module JSON config data, this 598 * function is called to output any per bdev specific methods. For the PT module, there are 599 * none. 600 */ 601 static void 602 vbdev_passthru_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 603 { 604 /* No config per bdev needed */ 605 } 606 607 /* When we register our bdev this is how we specify our entry points. */ 608 static const struct spdk_bdev_fn_table vbdev_passthru_fn_table = { 609 .destruct = vbdev_passthru_destruct, 610 .submit_request = vbdev_passthru_submit_request, 611 .io_type_supported = vbdev_passthru_io_type_supported, 612 .get_io_channel = vbdev_passthru_get_io_channel, 613 .dump_info_json = vbdev_passthru_dump_info_json, 614 .write_config_json = vbdev_passthru_write_config_json, 615 }; 616 617 /* Called when the underlying base bdev goes away. */ 618 static void 619 vbdev_passthru_base_bdev_hotremove_cb(void *ctx) 620 { 621 struct vbdev_passthru *pt_node, *tmp; 622 struct spdk_bdev *bdev_find = ctx; 623 624 TAILQ_FOREACH_SAFE(pt_node, &g_pt_nodes, link, tmp) { 625 if (bdev_find == pt_node->base_bdev) { 626 spdk_bdev_unregister(&pt_node->pt_bdev, NULL, NULL); 627 } 628 } 629 } 630 631 /* Create and register the passthru vbdev if we find it in our list of bdev names. 632 * This can be called either by the examine path or RPC method. 633 */ 634 static int 635 vbdev_passthru_register(struct spdk_bdev *bdev) 636 { 637 struct bdev_names *name; 638 struct vbdev_passthru *pt_node; 639 int rc = 0; 640 641 /* Check our list of names from config versus this bdev and if 642 * there's a match, create the pt_node & bdev accordingly. 643 */ 644 TAILQ_FOREACH(name, &g_bdev_names, link) { 645 if (strcmp(name->bdev_name, bdev->name) != 0) { 646 continue; 647 } 648 649 SPDK_NOTICELOG("Match on %s\n", bdev->name); 650 pt_node = calloc(1, sizeof(struct vbdev_passthru)); 651 if (!pt_node) { 652 rc = -ENOMEM; 653 SPDK_ERRLOG("could not allocate pt_node\n"); 654 break; 655 } 656 657 /* The base bdev that we're attaching to. */ 658 pt_node->base_bdev = bdev; 659 pt_node->pt_bdev.name = strdup(name->vbdev_name); 660 if (!pt_node->pt_bdev.name) { 661 rc = -ENOMEM; 662 SPDK_ERRLOG("could not allocate pt_bdev name\n"); 663 free(pt_node); 664 break; 665 } 666 pt_node->pt_bdev.product_name = "passthru"; 667 668 /* Copy some properties from the underlying base bdev. */ 669 pt_node->pt_bdev.write_cache = bdev->write_cache; 670 pt_node->pt_bdev.required_alignment = bdev->required_alignment; 671 pt_node->pt_bdev.optimal_io_boundary = bdev->optimal_io_boundary; 672 pt_node->pt_bdev.blocklen = bdev->blocklen; 673 pt_node->pt_bdev.blockcnt = bdev->blockcnt; 674 675 pt_node->pt_bdev.md_interleave = bdev->md_interleave; 676 pt_node->pt_bdev.md_len = bdev->md_len; 677 pt_node->pt_bdev.dif_type = bdev->dif_type; 678 pt_node->pt_bdev.dif_is_head_of_md = bdev->dif_is_head_of_md; 679 pt_node->pt_bdev.dif_check_flags = bdev->dif_check_flags; 680 681 /* This is the context that is passed to us when the bdev 682 * layer calls in so we'll save our pt_bdev node here. 683 */ 684 pt_node->pt_bdev.ctxt = pt_node; 685 pt_node->pt_bdev.fn_table = &vbdev_passthru_fn_table; 686 pt_node->pt_bdev.module = &passthru_if; 687 TAILQ_INSERT_TAIL(&g_pt_nodes, pt_node, link); 688 689 spdk_io_device_register(pt_node, pt_bdev_ch_create_cb, pt_bdev_ch_destroy_cb, 690 sizeof(struct pt_io_channel), 691 name->vbdev_name); 692 SPDK_NOTICELOG("io_device created at: 0x%p\n", pt_node); 693 694 rc = spdk_bdev_open(bdev, true, vbdev_passthru_base_bdev_hotremove_cb, 695 bdev, &pt_node->base_desc); 696 if (rc) { 697 SPDK_ERRLOG("could not open bdev %s\n", spdk_bdev_get_name(bdev)); 698 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 699 spdk_io_device_unregister(pt_node, NULL); 700 free(pt_node->pt_bdev.name); 701 free(pt_node); 702 break; 703 } 704 SPDK_NOTICELOG("bdev opened\n"); 705 706 /* Save the thread where the base device is opened */ 707 pt_node->thread = spdk_get_thread(); 708 709 rc = spdk_bdev_module_claim_bdev(bdev, pt_node->base_desc, pt_node->pt_bdev.module); 710 if (rc) { 711 SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(bdev)); 712 spdk_bdev_close(pt_node->base_desc); 713 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 714 spdk_io_device_unregister(pt_node, NULL); 715 free(pt_node->pt_bdev.name); 716 free(pt_node); 717 break; 718 } 719 SPDK_NOTICELOG("bdev claimed\n"); 720 721 rc = spdk_bdev_register(&pt_node->pt_bdev); 722 if (rc) { 723 SPDK_ERRLOG("could not register pt_bdev\n"); 724 spdk_bdev_module_release_bdev(&pt_node->pt_bdev); 725 spdk_bdev_close(pt_node->base_desc); 726 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 727 spdk_io_device_unregister(pt_node, NULL); 728 free(pt_node->pt_bdev.name); 729 free(pt_node); 730 break; 731 } 732 SPDK_NOTICELOG("pt_bdev registered\n"); 733 SPDK_NOTICELOG("created pt_bdev for: %s\n", name->vbdev_name); 734 } 735 736 return rc; 737 } 738 739 /* Create the passthru disk from the given bdev and vbdev name. */ 740 int 741 bdev_passthru_create_disk(const char *bdev_name, const char *vbdev_name) 742 { 743 struct spdk_bdev *bdev = NULL; 744 int rc = 0; 745 746 /* Insert the bdev into our global name list even if it doesn't exist yet, 747 * it may show up soon... 748 */ 749 rc = vbdev_passthru_insert_name(bdev_name, vbdev_name); 750 if (rc) { 751 return rc; 752 } 753 754 bdev = spdk_bdev_get_by_name(bdev_name); 755 if (!bdev) { 756 /* This is not an error, we tracked the name above and it still 757 * may show up later. 758 */ 759 SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n"); 760 return 0; 761 } 762 763 return vbdev_passthru_register(bdev); 764 } 765 766 void 767 bdev_passthru_delete_disk(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 768 { 769 struct bdev_names *name; 770 771 if (!bdev || bdev->module != &passthru_if) { 772 cb_fn(cb_arg, -ENODEV); 773 return; 774 } 775 776 /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the 777 * vbdev does not get re-created if the same bdev is constructed at some other time, 778 * unless the underlying bdev was hot-removed. 779 */ 780 TAILQ_FOREACH(name, &g_bdev_names, link) { 781 if (strcmp(name->vbdev_name, bdev->name) == 0) { 782 TAILQ_REMOVE(&g_bdev_names, name, link); 783 free(name->bdev_name); 784 free(name->vbdev_name); 785 free(name); 786 break; 787 } 788 } 789 790 /* Additional cleanup happens in the destruct callback. */ 791 spdk_bdev_unregister(bdev, cb_fn, cb_arg); 792 } 793 794 /* Because we specified this function in our pt bdev function table when we 795 * registered our pt bdev, we'll get this call anytime a new bdev shows up. 796 * Here we need to decide if we care about it and if so what to do. We 797 * parsed the config file at init so we check the new bdev against the list 798 * we built up at that time and if the user configured us to attach to this 799 * bdev, here's where we do it. 800 */ 801 static void 802 vbdev_passthru_examine(struct spdk_bdev *bdev) 803 { 804 vbdev_passthru_register(bdev); 805 806 spdk_bdev_module_examine_done(&passthru_if); 807 } 808 809 SPDK_LOG_REGISTER_COMPONENT("vbdev_passthru", SPDK_LOG_VBDEV_PASSTHRU) 810