1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) Intel Corporation. 3 * All rights reserved. 4 */ 5 6 /* 7 * This is a simple example of a virtual block device module that passes IO 8 * down to a bdev (or bdevs) that its configured to attach to. 9 */ 10 11 #include "spdk/stdinc.h" 12 13 #include "vbdev_passthru.h" 14 #include "spdk/rpc.h" 15 #include "spdk/env.h" 16 #include "spdk/endian.h" 17 #include "spdk/string.h" 18 #include "spdk/thread.h" 19 #include "spdk/util.h" 20 21 #include "spdk/bdev_module.h" 22 #include "spdk/log.h" 23 24 25 static int vbdev_passthru_init(void); 26 static int vbdev_passthru_get_ctx_size(void); 27 static void vbdev_passthru_examine(struct spdk_bdev *bdev); 28 static void vbdev_passthru_finish(void); 29 static int vbdev_passthru_config_json(struct spdk_json_write_ctx *w); 30 31 static struct spdk_bdev_module passthru_if = { 32 .name = "passthru_external", 33 .module_init = vbdev_passthru_init, 34 .get_ctx_size = vbdev_passthru_get_ctx_size, 35 .examine_config = vbdev_passthru_examine, 36 .module_fini = vbdev_passthru_finish, 37 .config_json = vbdev_passthru_config_json 38 }; 39 40 SPDK_BDEV_MODULE_REGISTER(ext_passthru, &passthru_if) 41 42 /* List of pt_bdev names and their base bdevs via configuration file. 43 * Used so we can parse the conf once at init and use this list in examine(). 44 */ 45 struct bdev_names { 46 char *vbdev_name; 47 char *bdev_name; 48 TAILQ_ENTRY(bdev_names) link; 49 }; 50 static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names); 51 52 /* List of virtual bdevs and associated info for each. */ 53 struct vbdev_passthru { 54 struct spdk_bdev *base_bdev; /* the thing we're attaching to */ 55 struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */ 56 struct spdk_bdev pt_bdev; /* the PT virtual bdev */ 57 TAILQ_ENTRY(vbdev_passthru) link; 58 struct spdk_thread *thread; /* thread where base device is opened */ 59 }; 60 static TAILQ_HEAD(, vbdev_passthru) g_pt_nodes = TAILQ_HEAD_INITIALIZER(g_pt_nodes); 61 62 /* The pt vbdev channel struct. It is allocated and freed on my behalf by the io channel code. 63 * If this vbdev needed to implement a poller or a queue for IO, this is where those things 64 * would be defined. This passthru bdev doesn't actually need to allocate a channel, it could 65 * simply pass back the channel of the bdev underneath it but for example purposes we will 66 * present its own to the upper layers. 67 */ 68 struct pt_io_channel { 69 struct spdk_io_channel *base_ch; /* IO channel of base device */ 70 }; 71 72 /* Just for fun, this pt_bdev module doesn't need it but this is essentially a per IO 73 * context that we get handed by the bdev layer. 74 */ 75 struct passthru_bdev_io { 76 uint8_t test; 77 78 /* bdev related */ 79 struct spdk_io_channel *ch; 80 81 /* for bdev_io_wait */ 82 struct spdk_bdev_io_wait_entry bdev_io_wait; 83 }; 84 85 static void vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); 86 87 88 /* Callback for unregistering the IO device. */ 89 static void 90 _device_unregister_cb(void *io_device) 91 { 92 struct vbdev_passthru *pt_node = io_device; 93 94 /* Done with this pt_node. */ 95 free(pt_node->pt_bdev.name); 96 free(pt_node); 97 } 98 99 /* Wrapper for the bdev close operation. */ 100 static void 101 _vbdev_passthru_destruct(void *ctx) 102 { 103 struct spdk_bdev_desc *desc = ctx; 104 105 spdk_bdev_close(desc); 106 } 107 108 /* Called after we've unregistered following a hot remove callback. 109 * Our finish entry point will be called next. 110 */ 111 static int 112 vbdev_passthru_destruct(void *ctx) 113 { 114 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 115 116 /* It is important to follow this exact sequence of steps for destroying 117 * a vbdev... 118 */ 119 120 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 121 122 /* Unclaim the underlying bdev. */ 123 spdk_bdev_module_release_bdev(pt_node->base_bdev); 124 125 /* Close the underlying bdev on its same opened thread. */ 126 if (pt_node->thread && pt_node->thread != spdk_get_thread()) { 127 spdk_thread_send_msg(pt_node->thread, _vbdev_passthru_destruct, pt_node->base_desc); 128 } else { 129 spdk_bdev_close(pt_node->base_desc); 130 } 131 132 /* Unregister the io_device. */ 133 spdk_io_device_unregister(pt_node, _device_unregister_cb); 134 135 return 0; 136 } 137 138 /* Completion callback for IO that were issued from this bdev. The original bdev_io 139 * is passed in as an arg so we'll complete that one with the appropriate status 140 * and then free the one that this module issued. 141 */ 142 static void 143 _pt_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 144 { 145 struct spdk_bdev_io *orig_io = cb_arg; 146 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 147 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx; 148 149 /* We setup this value in the submission routine, just showing here that it is 150 * passed back to us. 151 */ 152 if (io_ctx->test != 0x5a) { 153 SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n", 154 io_ctx->test); 155 } 156 157 /* Complete the original IO and then free the one that we created here 158 * as a result of issuing an IO via submit_request. 159 */ 160 spdk_bdev_io_complete(orig_io, status); 161 spdk_bdev_free_io(bdev_io); 162 } 163 164 static void 165 _pt_complete_zcopy_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 166 { 167 struct spdk_bdev_io *orig_io = cb_arg; 168 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 169 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx; 170 171 /* We setup this value in the submission routine, just showing here that it is 172 * passed back to us. 173 */ 174 if (io_ctx->test != 0x5a) { 175 SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n", 176 io_ctx->test); 177 } 178 179 /* Complete the original IO and then free the one that we created here 180 * as a result of issuing an IO via submit_request. 181 */ 182 spdk_bdev_io_set_buf(orig_io, bdev_io->u.bdev.iovs[0].iov_base, bdev_io->u.bdev.iovs[0].iov_len); 183 spdk_bdev_io_complete(orig_io, status); 184 spdk_bdev_free_io(bdev_io); 185 } 186 187 static void 188 vbdev_passthru_resubmit_io(void *arg) 189 { 190 struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg; 191 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 192 193 vbdev_passthru_submit_request(io_ctx->ch, bdev_io); 194 } 195 196 static void 197 vbdev_passthru_queue_io(struct spdk_bdev_io *bdev_io) 198 { 199 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 200 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(io_ctx->ch); 201 int rc; 202 203 io_ctx->bdev_io_wait.bdev = bdev_io->bdev; 204 io_ctx->bdev_io_wait.cb_fn = vbdev_passthru_resubmit_io; 205 io_ctx->bdev_io_wait.cb_arg = bdev_io; 206 207 /* Queue the IO using the channel of the base device. */ 208 rc = spdk_bdev_queue_io_wait(bdev_io->bdev, pt_ch->base_ch, &io_ctx->bdev_io_wait); 209 if (rc != 0) { 210 SPDK_ERRLOG("Queue io failed in vbdev_passthru_queue_io, rc=%d.\n", rc); 211 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 212 } 213 } 214 215 /* Callback for getting a buf from the bdev pool in the event that the caller passed 216 * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module 217 * beneath us before we're done with it. That won't happen in this example but it could 218 * if this example were used as a template for something more complex. 219 */ 220 static void 221 pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) 222 { 223 struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, 224 pt_bdev); 225 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); 226 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 227 int rc; 228 229 if (!success) { 230 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 231 return; 232 } 233 234 if (bdev_io->u.bdev.md_buf == NULL) { 235 rc = spdk_bdev_readv_blocks(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, 236 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 237 bdev_io->u.bdev.num_blocks, _pt_complete_io, 238 bdev_io); 239 } else { 240 rc = spdk_bdev_readv_blocks_with_md(pt_node->base_desc, pt_ch->base_ch, 241 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 242 bdev_io->u.bdev.md_buf, 243 bdev_io->u.bdev.offset_blocks, 244 bdev_io->u.bdev.num_blocks, 245 _pt_complete_io, bdev_io); 246 } 247 248 if (rc != 0) { 249 if (rc == -ENOMEM) { 250 SPDK_ERRLOG("No memory, start to queue io for passthru.\n"); 251 io_ctx->ch = ch; 252 vbdev_passthru_queue_io(bdev_io); 253 } else { 254 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 255 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 256 } 257 } 258 } 259 260 /* Called when someone above submits IO to this pt vbdev. We're simply passing it on here 261 * via SPDK IO calls which in turn allocate another bdev IO and call our cpl callback provided 262 * below along with the original bdev_io so that we can complete it once this IO completes. 263 */ 264 static void 265 vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 266 { 267 struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, pt_bdev); 268 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); 269 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 270 int rc = 0; 271 272 /* Setup a per IO context value; we don't do anything with it in the vbdev other 273 * than confirm we get the same thing back in the completion callback just to 274 * demonstrate. 275 */ 276 io_ctx->test = 0x5a; 277 278 switch (bdev_io->type) { 279 case SPDK_BDEV_IO_TYPE_READ: 280 spdk_bdev_io_get_buf(bdev_io, pt_read_get_buf_cb, 281 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 282 break; 283 case SPDK_BDEV_IO_TYPE_WRITE: 284 if (bdev_io->u.bdev.md_buf == NULL) { 285 rc = spdk_bdev_writev_blocks(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, 286 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 287 bdev_io->u.bdev.num_blocks, _pt_complete_io, 288 bdev_io); 289 } else { 290 rc = spdk_bdev_writev_blocks_with_md(pt_node->base_desc, pt_ch->base_ch, 291 bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 292 bdev_io->u.bdev.md_buf, 293 bdev_io->u.bdev.offset_blocks, 294 bdev_io->u.bdev.num_blocks, 295 _pt_complete_io, bdev_io); 296 } 297 break; 298 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 299 rc = spdk_bdev_write_zeroes_blocks(pt_node->base_desc, pt_ch->base_ch, 300 bdev_io->u.bdev.offset_blocks, 301 bdev_io->u.bdev.num_blocks, 302 _pt_complete_io, bdev_io); 303 break; 304 case SPDK_BDEV_IO_TYPE_UNMAP: 305 rc = spdk_bdev_unmap_blocks(pt_node->base_desc, pt_ch->base_ch, 306 bdev_io->u.bdev.offset_blocks, 307 bdev_io->u.bdev.num_blocks, 308 _pt_complete_io, bdev_io); 309 break; 310 case SPDK_BDEV_IO_TYPE_FLUSH: 311 rc = spdk_bdev_flush_blocks(pt_node->base_desc, pt_ch->base_ch, 312 bdev_io->u.bdev.offset_blocks, 313 bdev_io->u.bdev.num_blocks, 314 _pt_complete_io, bdev_io); 315 break; 316 case SPDK_BDEV_IO_TYPE_RESET: 317 rc = spdk_bdev_reset(pt_node->base_desc, pt_ch->base_ch, 318 _pt_complete_io, bdev_io); 319 break; 320 case SPDK_BDEV_IO_TYPE_ZCOPY: 321 rc = spdk_bdev_zcopy_start(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, 322 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 323 bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate, 324 _pt_complete_zcopy_io, bdev_io); 325 break; 326 case SPDK_BDEV_IO_TYPE_ABORT: 327 rc = spdk_bdev_abort(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.abort.bio_to_abort, 328 _pt_complete_io, bdev_io); 329 break; 330 default: 331 SPDK_ERRLOG("passthru: unknown I/O type %d\n", bdev_io->type); 332 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 333 return; 334 } 335 if (rc != 0) { 336 if (rc == -ENOMEM) { 337 SPDK_ERRLOG("No memory, start to queue io for passthru.\n"); 338 io_ctx->ch = ch; 339 vbdev_passthru_queue_io(bdev_io); 340 } else { 341 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 342 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 343 } 344 } 345 } 346 347 /* We'll just call the base bdev and let it answer however if we were more 348 * restrictive for some reason (or less) we could get the response back 349 * and modify according to our purposes. 350 */ 351 static bool 352 vbdev_passthru_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 353 { 354 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 355 356 return spdk_bdev_io_type_supported(pt_node->base_bdev, io_type); 357 } 358 359 /* We supplied this as an entry point for upper layers who want to communicate to this 360 * bdev. This is how they get a channel. We are passed the same context we provided when 361 * we created our PT vbdev in examine() which, for this bdev, is the address of one of 362 * our context nodes. From here we'll ask the SPDK channel code to fill out our channel 363 * struct and we'll keep it in our PT node. 364 */ 365 static struct spdk_io_channel * 366 vbdev_passthru_get_io_channel(void *ctx) 367 { 368 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 369 struct spdk_io_channel *pt_ch = NULL; 370 371 /* The IO channel code will allocate a channel for us which consists of 372 * the SPDK channel structure plus the size of our pt_io_channel struct 373 * that we passed in when we registered our IO device. It will then call 374 * our channel create callback to populate any elements that we need to 375 * update. 376 */ 377 pt_ch = spdk_get_io_channel(pt_node); 378 379 return pt_ch; 380 } 381 382 /* This is the output for bdev_get_bdevs() for this vbdev */ 383 static int 384 vbdev_passthru_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 385 { 386 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 387 388 spdk_json_write_name(w, "passthru_external"); 389 spdk_json_write_object_begin(w); 390 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev)); 391 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev)); 392 spdk_json_write_object_end(w); 393 394 return 0; 395 } 396 397 /* This is used to generate JSON that can configure this module to its current state. */ 398 static int 399 vbdev_passthru_config_json(struct spdk_json_write_ctx *w) 400 { 401 struct vbdev_passthru *pt_node; 402 403 TAILQ_FOREACH(pt_node, &g_pt_nodes, link) { 404 spdk_json_write_object_begin(w); 405 spdk_json_write_named_string(w, "method", "construct_ext_passthru_bdev"); 406 spdk_json_write_named_object_begin(w, "params"); 407 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev)); 408 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev)); 409 spdk_json_write_object_end(w); 410 spdk_json_write_object_end(w); 411 } 412 return 0; 413 } 414 415 /* We provide this callback for the SPDK channel code to create a channel using 416 * the channel struct we provided in our module get_io_channel() entry point. Here 417 * we get and save off an underlying base channel of the device below us so that 418 * we can communicate with the base bdev on a per channel basis. If we needed 419 * our own poller for this vbdev, we'd register it here. 420 */ 421 static int 422 pt_bdev_ch_create_cb(void *io_device, void *ctx_buf) 423 { 424 struct pt_io_channel *pt_ch = ctx_buf; 425 struct vbdev_passthru *pt_node = io_device; 426 427 pt_ch->base_ch = spdk_bdev_get_io_channel(pt_node->base_desc); 428 429 return 0; 430 } 431 432 /* We provide this callback for the SPDK channel code to destroy a channel 433 * created with our create callback. We just need to undo anything we did 434 * when we created. If this bdev used its own poller, we'd unregister it here. 435 */ 436 static void 437 pt_bdev_ch_destroy_cb(void *io_device, void *ctx_buf) 438 { 439 struct pt_io_channel *pt_ch = ctx_buf; 440 441 spdk_put_io_channel(pt_ch->base_ch); 442 } 443 444 /* Create the passthru association from the bdev and vbdev name and insert 445 * on the global list. */ 446 static int 447 vbdev_passthru_insert_name(const char *bdev_name, const char *vbdev_name) 448 { 449 struct bdev_names *name; 450 451 TAILQ_FOREACH(name, &g_bdev_names, link) { 452 if (strcmp(vbdev_name, name->vbdev_name) == 0) { 453 SPDK_ERRLOG("passthru bdev %s already exists\n", vbdev_name); 454 return -EEXIST; 455 } 456 } 457 458 name = calloc(1, sizeof(struct bdev_names)); 459 if (!name) { 460 SPDK_ERRLOG("could not allocate bdev_names\n"); 461 return -ENOMEM; 462 } 463 464 name->bdev_name = strdup(bdev_name); 465 if (!name->bdev_name) { 466 SPDK_ERRLOG("could not allocate name->bdev_name\n"); 467 free(name); 468 return -ENOMEM; 469 } 470 471 name->vbdev_name = strdup(vbdev_name); 472 if (!name->vbdev_name) { 473 SPDK_ERRLOG("could not allocate name->vbdev_name\n"); 474 free(name->bdev_name); 475 free(name); 476 return -ENOMEM; 477 } 478 479 TAILQ_INSERT_TAIL(&g_bdev_names, name, link); 480 481 return 0; 482 } 483 484 /* On init, just perform bdev module specific initialization. */ 485 static int 486 vbdev_passthru_init(void) 487 { 488 return 0; 489 } 490 491 /* Called when the entire module is being torn down. */ 492 static void 493 vbdev_passthru_finish(void) 494 { 495 struct bdev_names *name; 496 497 while ((name = TAILQ_FIRST(&g_bdev_names))) { 498 TAILQ_REMOVE(&g_bdev_names, name, link); 499 free(name->bdev_name); 500 free(name->vbdev_name); 501 free(name); 502 } 503 } 504 505 /* During init we'll be asked how much memory we'd like passed to us 506 * in bev_io structures as context. Here's where we specify how 507 * much context we want per IO. 508 */ 509 static int 510 vbdev_passthru_get_ctx_size(void) 511 { 512 return sizeof(struct passthru_bdev_io); 513 } 514 515 /* Where vbdev_passthru_config_json() is used to generate per module JSON config data, this 516 * function is called to output any per bdev specific methods. For the PT module, there are 517 * none. 518 */ 519 static void 520 vbdev_passthru_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 521 { 522 /* No config per bdev needed */ 523 } 524 525 /* When we register our bdev this is how we specify our entry points. */ 526 static const struct spdk_bdev_fn_table vbdev_passthru_fn_table = { 527 .destruct = vbdev_passthru_destruct, 528 .submit_request = vbdev_passthru_submit_request, 529 .io_type_supported = vbdev_passthru_io_type_supported, 530 .get_io_channel = vbdev_passthru_get_io_channel, 531 .dump_info_json = vbdev_passthru_dump_info_json, 532 .write_config_json = vbdev_passthru_write_config_json, 533 }; 534 535 static void 536 vbdev_passthru_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find) 537 { 538 struct vbdev_passthru *pt_node, *tmp; 539 540 TAILQ_FOREACH_SAFE(pt_node, &g_pt_nodes, link, tmp) { 541 if (bdev_find == pt_node->base_bdev) { 542 spdk_bdev_unregister(&pt_node->pt_bdev, NULL, NULL); 543 } 544 } 545 } 546 547 /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */ 548 static void 549 vbdev_passthru_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 550 void *event_ctx) 551 { 552 switch (type) { 553 case SPDK_BDEV_EVENT_REMOVE: 554 vbdev_passthru_base_bdev_hotremove_cb(bdev); 555 break; 556 default: 557 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 558 break; 559 } 560 } 561 562 /* Create and register the passthru vbdev if we find it in our list of bdev names. 563 * This can be called either by the examine path or RPC method. 564 */ 565 static int 566 vbdev_passthru_register(const char *bdev_name) 567 { 568 struct bdev_names *name; 569 struct vbdev_passthru *pt_node; 570 struct spdk_bdev *bdev; 571 int rc = 0; 572 573 /* Check our list of names from config versus this bdev and if 574 * there's a match, create the pt_node & bdev accordingly. 575 */ 576 TAILQ_FOREACH(name, &g_bdev_names, link) { 577 if (strcmp(name->bdev_name, bdev_name) != 0) { 578 continue; 579 } 580 581 SPDK_NOTICELOG("Match on %s\n", bdev_name); 582 pt_node = calloc(1, sizeof(struct vbdev_passthru)); 583 if (!pt_node) { 584 rc = -ENOMEM; 585 SPDK_ERRLOG("could not allocate pt_node\n"); 586 break; 587 } 588 589 pt_node->pt_bdev.name = strdup(name->vbdev_name); 590 if (!pt_node->pt_bdev.name) { 591 rc = -ENOMEM; 592 SPDK_ERRLOG("could not allocate pt_bdev name\n"); 593 free(pt_node); 594 break; 595 } 596 pt_node->pt_bdev.product_name = "passthru"; 597 598 /* The base bdev that we're attaching to. */ 599 rc = spdk_bdev_open_ext(bdev_name, true, vbdev_passthru_base_bdev_event_cb, 600 NULL, &pt_node->base_desc); 601 if (rc) { 602 if (rc != -ENODEV) { 603 SPDK_ERRLOG("could not open bdev %s\n", bdev_name); 604 } 605 free(pt_node->pt_bdev.name); 606 free(pt_node); 607 break; 608 } 609 SPDK_NOTICELOG("base bdev opened\n"); 610 611 bdev = spdk_bdev_desc_get_bdev(pt_node->base_desc); 612 pt_node->base_bdev = bdev; 613 614 /* Copy some properties from the underlying base bdev. */ 615 pt_node->pt_bdev.write_cache = bdev->write_cache; 616 pt_node->pt_bdev.required_alignment = bdev->required_alignment; 617 pt_node->pt_bdev.optimal_io_boundary = bdev->optimal_io_boundary; 618 pt_node->pt_bdev.blocklen = bdev->blocklen; 619 pt_node->pt_bdev.blockcnt = bdev->blockcnt; 620 621 pt_node->pt_bdev.md_interleave = bdev->md_interleave; 622 pt_node->pt_bdev.md_len = bdev->md_len; 623 pt_node->pt_bdev.dif_type = bdev->dif_type; 624 pt_node->pt_bdev.dif_is_head_of_md = bdev->dif_is_head_of_md; 625 pt_node->pt_bdev.dif_check_flags = bdev->dif_check_flags; 626 627 /* This is the context that is passed to us when the bdev 628 * layer calls in so we'll save our pt_bdev node here. 629 */ 630 pt_node->pt_bdev.ctxt = pt_node; 631 pt_node->pt_bdev.fn_table = &vbdev_passthru_fn_table; 632 pt_node->pt_bdev.module = &passthru_if; 633 TAILQ_INSERT_TAIL(&g_pt_nodes, pt_node, link); 634 635 spdk_io_device_register(pt_node, pt_bdev_ch_create_cb, pt_bdev_ch_destroy_cb, 636 sizeof(struct pt_io_channel), 637 name->vbdev_name); 638 SPDK_NOTICELOG("io_device created at: 0x%p\n", pt_node); 639 640 /* Save the thread where the base device is opened */ 641 pt_node->thread = spdk_get_thread(); 642 643 rc = spdk_bdev_module_claim_bdev(bdev, pt_node->base_desc, pt_node->pt_bdev.module); 644 if (rc) { 645 SPDK_ERRLOG("could not claim bdev %s\n", bdev_name); 646 spdk_bdev_close(pt_node->base_desc); 647 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 648 spdk_io_device_unregister(pt_node, NULL); 649 free(pt_node->pt_bdev.name); 650 free(pt_node); 651 break; 652 } 653 SPDK_NOTICELOG("bdev claimed\n"); 654 655 rc = spdk_bdev_register(&pt_node->pt_bdev); 656 if (rc) { 657 SPDK_ERRLOG("could not register pt_bdev\n"); 658 spdk_bdev_module_release_bdev(&pt_node->pt_bdev); 659 spdk_bdev_close(pt_node->base_desc); 660 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 661 spdk_io_device_unregister(pt_node, NULL); 662 free(pt_node->pt_bdev.name); 663 free(pt_node); 664 break; 665 } 666 SPDK_NOTICELOG("ext_pt_bdev registered\n"); 667 SPDK_NOTICELOG("created ext_pt_bdev for: %s\n", name->vbdev_name); 668 } 669 670 return rc; 671 } 672 673 /* Create the passthru disk from the given bdev and vbdev name. */ 674 int 675 bdev_passthru_external_create_disk(const char *bdev_name, const char *vbdev_name) 676 { 677 int rc; 678 679 /* Insert the bdev name into our global name list even if it doesn't exist yet, 680 * it may show up soon... 681 */ 682 rc = vbdev_passthru_insert_name(bdev_name, vbdev_name); 683 if (rc) { 684 return rc; 685 } 686 687 rc = vbdev_passthru_register(bdev_name); 688 if (rc == -ENODEV) { 689 /* This is not an error, we tracked the name above and it still 690 * may show up later. 691 */ 692 SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n"); 693 rc = 0; 694 } 695 696 return rc; 697 } 698 699 void 700 bdev_passthru_external_delete_disk(struct spdk_bdev *bdev, spdk_bdev_unregister_cb cb_fn, 701 void *cb_arg) 702 { 703 struct bdev_names *name; 704 705 if (!bdev || bdev->module != &passthru_if) { 706 cb_fn(cb_arg, -ENODEV); 707 return; 708 } 709 710 /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the 711 * vbdev does not get re-created if the same bdev is constructed at some other time, 712 * unless the underlying bdev was hot-removed. 713 */ 714 TAILQ_FOREACH(name, &g_bdev_names, link) { 715 if (strcmp(name->vbdev_name, bdev->name) == 0) { 716 TAILQ_REMOVE(&g_bdev_names, name, link); 717 free(name->bdev_name); 718 free(name->vbdev_name); 719 free(name); 720 break; 721 } 722 } 723 724 /* Additional cleanup happens in the destruct callback. */ 725 spdk_bdev_unregister(bdev, cb_fn, cb_arg); 726 } 727 728 /* Because we specified this function in our pt bdev function table when we 729 * registered our pt bdev, we'll get this call anytime a new bdev shows up. 730 * Here we need to decide if we care about it and if so what to do. We 731 * parsed the config file at init so we check the new bdev against the list 732 * we built up at that time and if the user configured us to attach to this 733 * bdev, here's where we do it. 734 */ 735 static void 736 vbdev_passthru_examine(struct spdk_bdev *bdev) 737 { 738 vbdev_passthru_register(bdev->name); 739 740 spdk_bdev_module_examine_done(&passthru_if); 741 } 742