1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * All rights reserved. 4 * Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. All rights reserved. 5 */ 6 7 /* 8 * This is a simple example of a virtual block device module that passes IO 9 * down to a bdev (or bdevs) that its configured to attach to. 10 */ 11 12 #include "spdk/stdinc.h" 13 14 #include "vbdev_passthru.h" 15 #include "spdk/rpc.h" 16 #include "spdk/env.h" 17 #include "spdk/endian.h" 18 #include "spdk/string.h" 19 #include "spdk/thread.h" 20 #include "spdk/util.h" 21 22 #include "spdk/bdev_module.h" 23 #include "spdk/log.h" 24 25 /* This namespace UUID was generated using uuid_generate() method. */ 26 #define BDEV_PASSTHRU_NAMESPACE_UUID "7e25812e-c8c0-4d3f-8599-16d790555b85" 27 28 static int vbdev_passthru_init(void); 29 static int vbdev_passthru_get_ctx_size(void); 30 static void vbdev_passthru_examine(struct spdk_bdev *bdev); 31 static void vbdev_passthru_finish(void); 32 static int vbdev_passthru_config_json(struct spdk_json_write_ctx *w); 33 34 static struct spdk_bdev_module passthru_if = { 35 .name = "passthru", 36 .module_init = vbdev_passthru_init, 37 .get_ctx_size = vbdev_passthru_get_ctx_size, 38 .examine_config = vbdev_passthru_examine, 39 .module_fini = vbdev_passthru_finish, 40 .config_json = vbdev_passthru_config_json 41 }; 42 43 SPDK_BDEV_MODULE_REGISTER(passthru, &passthru_if) 44 45 /* List of pt_bdev names and their base bdevs via configuration file. 46 * Used so we can parse the conf once at init and use this list in examine(). 47 */ 48 struct bdev_names { 49 char *vbdev_name; 50 char *bdev_name; 51 struct spdk_uuid uuid; 52 TAILQ_ENTRY(bdev_names) link; 53 }; 54 static TAILQ_HEAD(, bdev_names) g_bdev_names = TAILQ_HEAD_INITIALIZER(g_bdev_names); 55 56 /* List of virtual bdevs and associated info for each. */ 57 struct vbdev_passthru { 58 struct spdk_bdev *base_bdev; /* the thing we're attaching to */ 59 struct spdk_bdev_desc *base_desc; /* its descriptor we get from open */ 60 struct spdk_bdev pt_bdev; /* the PT virtual bdev */ 61 TAILQ_ENTRY(vbdev_passthru) link; 62 struct spdk_thread *thread; /* thread where base device is opened */ 63 }; 64 static TAILQ_HEAD(, vbdev_passthru) g_pt_nodes = TAILQ_HEAD_INITIALIZER(g_pt_nodes); 65 66 /* The pt vbdev channel struct. It is allocated and freed on my behalf by the io channel code. 67 * If this vbdev needed to implement a poller or a queue for IO, this is where those things 68 * would be defined. This passthru bdev doesn't actually need to allocate a channel, it could 69 * simply pass back the channel of the bdev underneath it but for example purposes we will 70 * present its own to the upper layers. 71 */ 72 struct pt_io_channel { 73 struct spdk_io_channel *base_ch; /* IO channel of base device */ 74 }; 75 76 /* Just for fun, this pt_bdev module doesn't need it but this is essentially a per IO 77 * context that we get handed by the bdev layer. 78 */ 79 struct passthru_bdev_io { 80 uint8_t test; 81 82 /* bdev related */ 83 struct spdk_io_channel *ch; 84 85 /* for bdev_io_wait */ 86 struct spdk_bdev_io_wait_entry bdev_io_wait; 87 }; 88 89 static void vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io); 90 91 92 /* Callback for unregistering the IO device. */ 93 static void 94 _device_unregister_cb(void *io_device) 95 { 96 struct vbdev_passthru *pt_node = io_device; 97 98 /* Done with this pt_node. */ 99 free(pt_node->pt_bdev.name); 100 free(pt_node); 101 } 102 103 /* Wrapper for the bdev close operation. */ 104 static void 105 _vbdev_passthru_destruct(void *ctx) 106 { 107 struct spdk_bdev_desc *desc = ctx; 108 109 spdk_bdev_close(desc); 110 } 111 112 /* Called after we've unregistered following a hot remove callback. 113 * Our finish entry point will be called next. 114 */ 115 static int 116 vbdev_passthru_destruct(void *ctx) 117 { 118 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 119 120 /* It is important to follow this exact sequence of steps for destroying 121 * a vbdev... 122 */ 123 124 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 125 126 /* Unclaim the underlying bdev. */ 127 spdk_bdev_module_release_bdev(pt_node->base_bdev); 128 129 /* Close the underlying bdev on its same opened thread. */ 130 if (pt_node->thread && pt_node->thread != spdk_get_thread()) { 131 spdk_thread_send_msg(pt_node->thread, _vbdev_passthru_destruct, pt_node->base_desc); 132 } else { 133 spdk_bdev_close(pt_node->base_desc); 134 } 135 136 /* Unregister the io_device. */ 137 spdk_io_device_unregister(pt_node, _device_unregister_cb); 138 139 return 0; 140 } 141 142 /* Completion callback for IO that were issued from this bdev. The original bdev_io 143 * is passed in as an arg so we'll complete that one with the appropriate status 144 * and then free the one that this module issued. 145 */ 146 static void 147 _pt_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 148 { 149 struct spdk_bdev_io *orig_io = cb_arg; 150 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 151 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx; 152 153 /* We setup this value in the submission routine, just showing here that it is 154 * passed back to us. 155 */ 156 if (io_ctx->test != 0x5a) { 157 SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n", 158 io_ctx->test); 159 } 160 161 /* Complete the original IO and then free the one that we created here 162 * as a result of issuing an IO via submit_request. 163 */ 164 spdk_bdev_io_complete(orig_io, status); 165 spdk_bdev_free_io(bdev_io); 166 } 167 168 static void 169 _pt_complete_zcopy_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 170 { 171 struct spdk_bdev_io *orig_io = cb_arg; 172 int status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 173 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)orig_io->driver_ctx; 174 175 /* We setup this value in the submission routine, just showing here that it is 176 * passed back to us. 177 */ 178 if (io_ctx->test != 0x5a) { 179 SPDK_ERRLOG("Error, original IO device_ctx is wrong! 0x%x\n", 180 io_ctx->test); 181 } 182 183 /* Complete the original IO and then free the one that we created here 184 * as a result of issuing an IO via submit_request. 185 */ 186 spdk_bdev_io_set_buf(orig_io, bdev_io->u.bdev.iovs[0].iov_base, bdev_io->u.bdev.iovs[0].iov_len); 187 spdk_bdev_io_complete(orig_io, status); 188 spdk_bdev_free_io(bdev_io); 189 } 190 191 static void 192 vbdev_passthru_resubmit_io(void *arg) 193 { 194 struct spdk_bdev_io *bdev_io = (struct spdk_bdev_io *)arg; 195 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 196 197 vbdev_passthru_submit_request(io_ctx->ch, bdev_io); 198 } 199 200 static void 201 vbdev_passthru_queue_io(struct spdk_bdev_io *bdev_io) 202 { 203 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 204 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(io_ctx->ch); 205 int rc; 206 207 io_ctx->bdev_io_wait.bdev = bdev_io->bdev; 208 io_ctx->bdev_io_wait.cb_fn = vbdev_passthru_resubmit_io; 209 io_ctx->bdev_io_wait.cb_arg = bdev_io; 210 211 /* Queue the IO using the channel of the base device. */ 212 rc = spdk_bdev_queue_io_wait(bdev_io->bdev, pt_ch->base_ch, &io_ctx->bdev_io_wait); 213 if (rc != 0) { 214 SPDK_ERRLOG("Queue io failed in vbdev_passthru_queue_io, rc=%d.\n", rc); 215 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 216 } 217 } 218 219 static void 220 pt_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts) 221 { 222 memset(opts, 0, sizeof(*opts)); 223 opts->size = sizeof(*opts); 224 opts->memory_domain = bdev_io->u.bdev.memory_domain; 225 opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx; 226 opts->metadata = bdev_io->u.bdev.md_buf; 227 } 228 229 /* Callback for getting a buf from the bdev pool in the event that the caller passed 230 * in NULL, we need to own the buffer so it doesn't get freed by another vbdev module 231 * beneath us before we're done with it. That won't happen in this example but it could 232 * if this example were used as a template for something more complex. 233 */ 234 static void 235 pt_read_get_buf_cb(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io, bool success) 236 { 237 struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, 238 pt_bdev); 239 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); 240 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 241 struct spdk_bdev_ext_io_opts io_opts; 242 int rc; 243 244 if (!success) { 245 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 246 return; 247 } 248 249 pt_init_ext_io_opts(bdev_io, &io_opts); 250 rc = spdk_bdev_readv_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, 251 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 252 bdev_io->u.bdev.num_blocks, _pt_complete_io, 253 bdev_io, &io_opts); 254 if (rc != 0) { 255 if (rc == -ENOMEM) { 256 SPDK_ERRLOG("No memory, start to queue io for passthru.\n"); 257 io_ctx->ch = ch; 258 vbdev_passthru_queue_io(bdev_io); 259 } else { 260 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 261 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 262 } 263 } 264 } 265 266 /* Called when someone above submits IO to this pt vbdev. We're simply passing it on here 267 * via SPDK IO calls which in turn allocate another bdev IO and call our cpl callback provided 268 * below along with the original bdev_io so that we can complete it once this IO completes. 269 */ 270 static void 271 vbdev_passthru_submit_request(struct spdk_io_channel *ch, struct spdk_bdev_io *bdev_io) 272 { 273 struct vbdev_passthru *pt_node = SPDK_CONTAINEROF(bdev_io->bdev, struct vbdev_passthru, pt_bdev); 274 struct pt_io_channel *pt_ch = spdk_io_channel_get_ctx(ch); 275 struct passthru_bdev_io *io_ctx = (struct passthru_bdev_io *)bdev_io->driver_ctx; 276 struct spdk_bdev_ext_io_opts io_opts; 277 int rc = 0; 278 279 /* Setup a per IO context value; we don't do anything with it in the vbdev other 280 * than confirm we get the same thing back in the completion callback just to 281 * demonstrate. 282 */ 283 io_ctx->test = 0x5a; 284 285 switch (bdev_io->type) { 286 case SPDK_BDEV_IO_TYPE_READ: 287 spdk_bdev_io_get_buf(bdev_io, pt_read_get_buf_cb, 288 bdev_io->u.bdev.num_blocks * bdev_io->bdev->blocklen); 289 break; 290 case SPDK_BDEV_IO_TYPE_WRITE: 291 pt_init_ext_io_opts(bdev_io, &io_opts); 292 rc = spdk_bdev_writev_blocks_ext(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.bdev.iovs, 293 bdev_io->u.bdev.iovcnt, bdev_io->u.bdev.offset_blocks, 294 bdev_io->u.bdev.num_blocks, _pt_complete_io, 295 bdev_io, &io_opts); 296 break; 297 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 298 rc = spdk_bdev_write_zeroes_blocks(pt_node->base_desc, pt_ch->base_ch, 299 bdev_io->u.bdev.offset_blocks, 300 bdev_io->u.bdev.num_blocks, 301 _pt_complete_io, bdev_io); 302 break; 303 case SPDK_BDEV_IO_TYPE_UNMAP: 304 rc = spdk_bdev_unmap_blocks(pt_node->base_desc, pt_ch->base_ch, 305 bdev_io->u.bdev.offset_blocks, 306 bdev_io->u.bdev.num_blocks, 307 _pt_complete_io, bdev_io); 308 break; 309 case SPDK_BDEV_IO_TYPE_FLUSH: 310 rc = spdk_bdev_flush_blocks(pt_node->base_desc, pt_ch->base_ch, 311 bdev_io->u.bdev.offset_blocks, 312 bdev_io->u.bdev.num_blocks, 313 _pt_complete_io, bdev_io); 314 break; 315 case SPDK_BDEV_IO_TYPE_RESET: 316 rc = spdk_bdev_reset(pt_node->base_desc, pt_ch->base_ch, 317 _pt_complete_io, bdev_io); 318 break; 319 case SPDK_BDEV_IO_TYPE_ZCOPY: 320 rc = spdk_bdev_zcopy_start(pt_node->base_desc, pt_ch->base_ch, NULL, 0, 321 bdev_io->u.bdev.offset_blocks, 322 bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate, 323 _pt_complete_zcopy_io, bdev_io); 324 break; 325 case SPDK_BDEV_IO_TYPE_ABORT: 326 rc = spdk_bdev_abort(pt_node->base_desc, pt_ch->base_ch, bdev_io->u.abort.bio_to_abort, 327 _pt_complete_io, bdev_io); 328 break; 329 case SPDK_BDEV_IO_TYPE_COPY: 330 rc = spdk_bdev_copy_blocks(pt_node->base_desc, pt_ch->base_ch, 331 bdev_io->u.bdev.offset_blocks, 332 bdev_io->u.bdev.copy.src_offset_blocks, 333 bdev_io->u.bdev.num_blocks, 334 _pt_complete_io, bdev_io); 335 break; 336 default: 337 SPDK_ERRLOG("passthru: unknown I/O type %d\n", bdev_io->type); 338 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 339 return; 340 } 341 if (rc != 0) { 342 if (rc == -ENOMEM) { 343 SPDK_ERRLOG("No memory, start to queue io for passthru.\n"); 344 io_ctx->ch = ch; 345 vbdev_passthru_queue_io(bdev_io); 346 } else { 347 SPDK_ERRLOG("ERROR on bdev_io submission!\n"); 348 spdk_bdev_io_complete(bdev_io, SPDK_BDEV_IO_STATUS_FAILED); 349 } 350 } 351 } 352 353 /* We'll just call the base bdev and let it answer however if we were more 354 * restrictive for some reason (or less) we could get the response back 355 * and modify according to our purposes. 356 */ 357 static bool 358 vbdev_passthru_io_type_supported(void *ctx, enum spdk_bdev_io_type io_type) 359 { 360 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 361 362 return spdk_bdev_io_type_supported(pt_node->base_bdev, io_type); 363 } 364 365 /* We supplied this as an entry point for upper layers who want to communicate to this 366 * bdev. This is how they get a channel. We are passed the same context we provided when 367 * we created our PT vbdev in examine() which, for this bdev, is the address of one of 368 * our context nodes. From here we'll ask the SPDK channel code to fill out our channel 369 * struct and we'll keep it in our PT node. 370 */ 371 static struct spdk_io_channel * 372 vbdev_passthru_get_io_channel(void *ctx) 373 { 374 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 375 struct spdk_io_channel *pt_ch = NULL; 376 377 /* The IO channel code will allocate a channel for us which consists of 378 * the SPDK channel structure plus the size of our pt_io_channel struct 379 * that we passed in when we registered our IO device. It will then call 380 * our channel create callback to populate any elements that we need to 381 * update. 382 */ 383 pt_ch = spdk_get_io_channel(pt_node); 384 385 return pt_ch; 386 } 387 388 /* This is the output for bdev_get_bdevs() for this vbdev */ 389 static int 390 vbdev_passthru_dump_info_json(void *ctx, struct spdk_json_write_ctx *w) 391 { 392 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 393 394 spdk_json_write_name(w, "passthru"); 395 spdk_json_write_object_begin(w); 396 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev)); 397 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev)); 398 spdk_json_write_object_end(w); 399 400 return 0; 401 } 402 403 /* This is used to generate JSON that can configure this module to its current state. */ 404 static int 405 vbdev_passthru_config_json(struct spdk_json_write_ctx *w) 406 { 407 struct vbdev_passthru *pt_node; 408 409 TAILQ_FOREACH(pt_node, &g_pt_nodes, link) { 410 const struct spdk_uuid *uuid = spdk_bdev_get_uuid(&pt_node->pt_bdev); 411 412 spdk_json_write_object_begin(w); 413 spdk_json_write_named_string(w, "method", "bdev_passthru_create"); 414 spdk_json_write_named_object_begin(w, "params"); 415 spdk_json_write_named_string(w, "base_bdev_name", spdk_bdev_get_name(pt_node->base_bdev)); 416 spdk_json_write_named_string(w, "name", spdk_bdev_get_name(&pt_node->pt_bdev)); 417 if (!spdk_uuid_is_null(uuid)) { 418 char uuid_str[SPDK_UUID_STRING_LEN]; 419 420 spdk_uuid_fmt_lower(uuid_str, sizeof(uuid_str), uuid); 421 spdk_json_write_named_string(w, "uuid", uuid_str); 422 } 423 spdk_json_write_object_end(w); 424 spdk_json_write_object_end(w); 425 } 426 return 0; 427 } 428 429 /* We provide this callback for the SPDK channel code to create a channel using 430 * the channel struct we provided in our module get_io_channel() entry point. Here 431 * we get and save off an underlying base channel of the device below us so that 432 * we can communicate with the base bdev on a per channel basis. If we needed 433 * our own poller for this vbdev, we'd register it here. 434 */ 435 static int 436 pt_bdev_ch_create_cb(void *io_device, void *ctx_buf) 437 { 438 struct pt_io_channel *pt_ch = ctx_buf; 439 struct vbdev_passthru *pt_node = io_device; 440 441 pt_ch->base_ch = spdk_bdev_get_io_channel(pt_node->base_desc); 442 443 return 0; 444 } 445 446 /* We provide this callback for the SPDK channel code to destroy a channel 447 * created with our create callback. We just need to undo anything we did 448 * when we created. If this bdev used its own poller, we'd unregister it here. 449 */ 450 static void 451 pt_bdev_ch_destroy_cb(void *io_device, void *ctx_buf) 452 { 453 struct pt_io_channel *pt_ch = ctx_buf; 454 455 spdk_put_io_channel(pt_ch->base_ch); 456 } 457 458 /* Create the passthru association from the bdev and vbdev name and insert 459 * on the global list. */ 460 static int 461 vbdev_passthru_insert_name(const char *bdev_name, const char *vbdev_name, 462 const struct spdk_uuid *uuid) 463 { 464 struct bdev_names *name; 465 466 TAILQ_FOREACH(name, &g_bdev_names, link) { 467 if (strcmp(vbdev_name, name->vbdev_name) == 0) { 468 SPDK_ERRLOG("passthru bdev %s already exists\n", vbdev_name); 469 return -EEXIST; 470 } 471 } 472 473 name = calloc(1, sizeof(struct bdev_names)); 474 if (!name) { 475 SPDK_ERRLOG("could not allocate bdev_names\n"); 476 return -ENOMEM; 477 } 478 479 name->bdev_name = strdup(bdev_name); 480 if (!name->bdev_name) { 481 SPDK_ERRLOG("could not allocate name->bdev_name\n"); 482 free(name); 483 return -ENOMEM; 484 } 485 486 name->vbdev_name = strdup(vbdev_name); 487 if (!name->vbdev_name) { 488 SPDK_ERRLOG("could not allocate name->vbdev_name\n"); 489 free(name->bdev_name); 490 free(name); 491 return -ENOMEM; 492 } 493 494 spdk_uuid_copy(&name->uuid, uuid); 495 TAILQ_INSERT_TAIL(&g_bdev_names, name, link); 496 497 return 0; 498 } 499 500 /* On init, just perform bdev module specific initialization. */ 501 static int 502 vbdev_passthru_init(void) 503 { 504 return 0; 505 } 506 507 /* Called when the entire module is being torn down. */ 508 static void 509 vbdev_passthru_finish(void) 510 { 511 struct bdev_names *name; 512 513 while ((name = TAILQ_FIRST(&g_bdev_names))) { 514 TAILQ_REMOVE(&g_bdev_names, name, link); 515 free(name->bdev_name); 516 free(name->vbdev_name); 517 free(name); 518 } 519 } 520 521 /* During init we'll be asked how much memory we'd like passed to us 522 * in bev_io structures as context. Here's where we specify how 523 * much context we want per IO. 524 */ 525 static int 526 vbdev_passthru_get_ctx_size(void) 527 { 528 return sizeof(struct passthru_bdev_io); 529 } 530 531 /* Where vbdev_passthru_config_json() is used to generate per module JSON config data, this 532 * function is called to output any per bdev specific methods. For the PT module, there are 533 * none. 534 */ 535 static void 536 vbdev_passthru_write_config_json(struct spdk_bdev *bdev, struct spdk_json_write_ctx *w) 537 { 538 /* No config per bdev needed */ 539 } 540 541 static int 542 vbdev_passthru_get_memory_domains(void *ctx, struct spdk_memory_domain **domains, int array_size) 543 { 544 struct vbdev_passthru *pt_node = (struct vbdev_passthru *)ctx; 545 546 /* Passthru bdev doesn't work with data buffers, so it supports any memory domain used by base_bdev */ 547 return spdk_bdev_get_memory_domains(pt_node->base_bdev, domains, array_size); 548 } 549 550 /* When we register our bdev this is how we specify our entry points. */ 551 static const struct spdk_bdev_fn_table vbdev_passthru_fn_table = { 552 .destruct = vbdev_passthru_destruct, 553 .submit_request = vbdev_passthru_submit_request, 554 .io_type_supported = vbdev_passthru_io_type_supported, 555 .get_io_channel = vbdev_passthru_get_io_channel, 556 .dump_info_json = vbdev_passthru_dump_info_json, 557 .write_config_json = vbdev_passthru_write_config_json, 558 .get_memory_domains = vbdev_passthru_get_memory_domains, 559 }; 560 561 static void 562 vbdev_passthru_base_bdev_hotremove_cb(struct spdk_bdev *bdev_find) 563 { 564 struct vbdev_passthru *pt_node, *tmp; 565 566 TAILQ_FOREACH_SAFE(pt_node, &g_pt_nodes, link, tmp) { 567 if (bdev_find == pt_node->base_bdev) { 568 spdk_bdev_unregister(&pt_node->pt_bdev, NULL, NULL); 569 } 570 } 571 } 572 573 /* Called when the underlying base bdev triggers asynchronous event such as bdev removal. */ 574 static void 575 vbdev_passthru_base_bdev_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 576 void *event_ctx) 577 { 578 switch (type) { 579 case SPDK_BDEV_EVENT_REMOVE: 580 vbdev_passthru_base_bdev_hotremove_cb(bdev); 581 break; 582 default: 583 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 584 break; 585 } 586 } 587 588 /* Create and register the passthru vbdev if we find it in our list of bdev names. 589 * This can be called either by the examine path or RPC method. 590 */ 591 static int 592 vbdev_passthru_register(const char *bdev_name) 593 { 594 struct bdev_names *name; 595 struct vbdev_passthru *pt_node; 596 struct spdk_bdev *bdev; 597 struct spdk_uuid ns_uuid; 598 int rc = 0; 599 600 spdk_uuid_parse(&ns_uuid, BDEV_PASSTHRU_NAMESPACE_UUID); 601 602 /* Check our list of names from config versus this bdev and if 603 * there's a match, create the pt_node & bdev accordingly. 604 */ 605 TAILQ_FOREACH(name, &g_bdev_names, link) { 606 if (strcmp(name->bdev_name, bdev_name) != 0) { 607 continue; 608 } 609 610 SPDK_NOTICELOG("Match on %s\n", bdev_name); 611 pt_node = calloc(1, sizeof(struct vbdev_passthru)); 612 if (!pt_node) { 613 rc = -ENOMEM; 614 SPDK_ERRLOG("could not allocate pt_node\n"); 615 break; 616 } 617 618 pt_node->pt_bdev.name = strdup(name->vbdev_name); 619 if (!pt_node->pt_bdev.name) { 620 rc = -ENOMEM; 621 SPDK_ERRLOG("could not allocate pt_bdev name\n"); 622 free(pt_node); 623 break; 624 } 625 pt_node->pt_bdev.product_name = "passthru"; 626 spdk_uuid_copy(&pt_node->pt_bdev.uuid, &name->uuid); 627 628 /* The base bdev that we're attaching to. */ 629 rc = spdk_bdev_open_ext(bdev_name, true, vbdev_passthru_base_bdev_event_cb, 630 NULL, &pt_node->base_desc); 631 if (rc) { 632 if (rc != -ENODEV) { 633 SPDK_ERRLOG("could not open bdev %s\n", bdev_name); 634 } 635 free(pt_node->pt_bdev.name); 636 free(pt_node); 637 break; 638 } 639 SPDK_NOTICELOG("base bdev opened\n"); 640 641 bdev = spdk_bdev_desc_get_bdev(pt_node->base_desc); 642 pt_node->base_bdev = bdev; 643 644 /* Generate UUID based on namespace UUID + base bdev UUID. */ 645 rc = spdk_uuid_generate_sha1(&pt_node->pt_bdev.uuid, &ns_uuid, 646 (const char *)&pt_node->base_bdev->uuid, sizeof(struct spdk_uuid)); 647 if (rc) { 648 SPDK_ERRLOG("Unable to generate new UUID for passthru bdev\n"); 649 spdk_bdev_close(pt_node->base_desc); 650 free(pt_node->pt_bdev.name); 651 free(pt_node); 652 break; 653 } 654 655 /* Copy some properties from the underlying base bdev. */ 656 pt_node->pt_bdev.write_cache = bdev->write_cache; 657 pt_node->pt_bdev.required_alignment = bdev->required_alignment; 658 pt_node->pt_bdev.optimal_io_boundary = bdev->optimal_io_boundary; 659 pt_node->pt_bdev.blocklen = bdev->blocklen; 660 pt_node->pt_bdev.blockcnt = bdev->blockcnt; 661 662 pt_node->pt_bdev.md_interleave = bdev->md_interleave; 663 pt_node->pt_bdev.md_len = bdev->md_len; 664 pt_node->pt_bdev.dif_type = bdev->dif_type; 665 pt_node->pt_bdev.dif_is_head_of_md = bdev->dif_is_head_of_md; 666 pt_node->pt_bdev.dif_check_flags = bdev->dif_check_flags; 667 668 /* This is the context that is passed to us when the bdev 669 * layer calls in so we'll save our pt_bdev node here. 670 */ 671 pt_node->pt_bdev.ctxt = pt_node; 672 pt_node->pt_bdev.fn_table = &vbdev_passthru_fn_table; 673 pt_node->pt_bdev.module = &passthru_if; 674 TAILQ_INSERT_TAIL(&g_pt_nodes, pt_node, link); 675 676 spdk_io_device_register(pt_node, pt_bdev_ch_create_cb, pt_bdev_ch_destroy_cb, 677 sizeof(struct pt_io_channel), 678 name->vbdev_name); 679 SPDK_NOTICELOG("io_device created at: 0x%p\n", pt_node); 680 681 /* Save the thread where the base device is opened */ 682 pt_node->thread = spdk_get_thread(); 683 684 rc = spdk_bdev_module_claim_bdev(bdev, pt_node->base_desc, pt_node->pt_bdev.module); 685 if (rc) { 686 SPDK_ERRLOG("could not claim bdev %s\n", bdev_name); 687 spdk_bdev_close(pt_node->base_desc); 688 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 689 spdk_io_device_unregister(pt_node, NULL); 690 free(pt_node->pt_bdev.name); 691 free(pt_node); 692 break; 693 } 694 SPDK_NOTICELOG("bdev claimed\n"); 695 696 rc = spdk_bdev_register(&pt_node->pt_bdev); 697 if (rc) { 698 SPDK_ERRLOG("could not register pt_bdev\n"); 699 spdk_bdev_module_release_bdev(&pt_node->pt_bdev); 700 spdk_bdev_close(pt_node->base_desc); 701 TAILQ_REMOVE(&g_pt_nodes, pt_node, link); 702 spdk_io_device_unregister(pt_node, NULL); 703 free(pt_node->pt_bdev.name); 704 free(pt_node); 705 break; 706 } 707 SPDK_NOTICELOG("pt_bdev registered\n"); 708 SPDK_NOTICELOG("created pt_bdev for: %s\n", name->vbdev_name); 709 } 710 711 return rc; 712 } 713 714 /* Create the passthru disk from the given bdev and vbdev name. */ 715 int 716 bdev_passthru_create_disk(const char *bdev_name, const char *vbdev_name, 717 const struct spdk_uuid *uuid) 718 { 719 int rc; 720 721 /* Insert the bdev name into our global name list even if it doesn't exist yet, 722 * it may show up soon... 723 */ 724 rc = vbdev_passthru_insert_name(bdev_name, vbdev_name, uuid); 725 if (rc) { 726 return rc; 727 } 728 729 rc = vbdev_passthru_register(bdev_name); 730 if (rc == -ENODEV) { 731 /* This is not an error, we tracked the name above and it still 732 * may show up later. 733 */ 734 SPDK_NOTICELOG("vbdev creation deferred pending base bdev arrival\n"); 735 rc = 0; 736 } 737 738 return rc; 739 } 740 741 void 742 bdev_passthru_delete_disk(const char *bdev_name, spdk_bdev_unregister_cb cb_fn, void *cb_arg) 743 { 744 struct bdev_names *name; 745 int rc; 746 747 /* Some cleanup happens in the destruct callback. */ 748 rc = spdk_bdev_unregister_by_name(bdev_name, &passthru_if, cb_fn, cb_arg); 749 if (rc == 0) { 750 /* Remove the association (vbdev, bdev) from g_bdev_names. This is required so that the 751 * vbdev does not get re-created if the same bdev is constructed at some other time, 752 * unless the underlying bdev was hot-removed. 753 */ 754 TAILQ_FOREACH(name, &g_bdev_names, link) { 755 if (strcmp(name->vbdev_name, bdev_name) == 0) { 756 TAILQ_REMOVE(&g_bdev_names, name, link); 757 free(name->bdev_name); 758 free(name->vbdev_name); 759 free(name); 760 break; 761 } 762 } 763 } else { 764 cb_fn(cb_arg, rc); 765 } 766 } 767 768 /* Because we specified this function in our pt bdev function table when we 769 * registered our pt bdev, we'll get this call anytime a new bdev shows up. 770 * Here we need to decide if we care about it and if so what to do. We 771 * parsed the config file at init so we check the new bdev against the list 772 * we built up at that time and if the user configured us to attach to this 773 * bdev, here's where we do it. 774 */ 775 static void 776 vbdev_passthru_examine(struct spdk_bdev *bdev) 777 { 778 vbdev_passthru_register(bdev->name); 779 780 spdk_bdev_module_examine_done(&passthru_if); 781 } 782 783 SPDK_LOG_REGISTER_COMPONENT(vbdev_passthru) 784