1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 /* 8 * Common code for partition-like virtual bdevs. 9 */ 10 11 #include "spdk/bdev.h" 12 #include "spdk/likely.h" 13 #include "spdk/log.h" 14 #include "spdk/string.h" 15 #include "spdk/thread.h" 16 17 #include "spdk/bdev_module.h" 18 19 /* This namespace UUID was generated using uuid_generate() method. */ 20 #define BDEV_PART_NAMESPACE_UUID "976b899e-3e1e-4d71-ab69-c2b08e9df8b8" 21 22 struct spdk_bdev_part_base { 23 struct spdk_bdev *bdev; 24 struct spdk_bdev_desc *desc; 25 uint32_t ref; 26 uint32_t channel_size; 27 spdk_bdev_part_base_free_fn base_free_fn; 28 void *ctx; 29 bool claimed; 30 struct spdk_bdev_module *module; 31 struct spdk_bdev_fn_table *fn_table; 32 struct bdev_part_tailq *tailq; 33 spdk_io_channel_create_cb ch_create_cb; 34 spdk_io_channel_destroy_cb ch_destroy_cb; 35 spdk_bdev_remove_cb_t remove_cb; 36 struct spdk_thread *thread; 37 }; 38 39 struct spdk_bdev * 40 spdk_bdev_part_base_get_bdev(struct spdk_bdev_part_base *part_base) 41 { 42 return part_base->bdev; 43 } 44 45 struct spdk_bdev_desc * 46 spdk_bdev_part_base_get_desc(struct spdk_bdev_part_base *part_base) 47 { 48 return part_base->desc; 49 } 50 51 struct bdev_part_tailq * 52 spdk_bdev_part_base_get_tailq(struct spdk_bdev_part_base *part_base) 53 { 54 return part_base->tailq; 55 } 56 57 void * 58 spdk_bdev_part_base_get_ctx(struct spdk_bdev_part_base *part_base) 59 { 60 return part_base->ctx; 61 } 62 63 const char * 64 spdk_bdev_part_base_get_bdev_name(struct spdk_bdev_part_base *part_base) 65 { 66 return part_base->bdev->name; 67 } 68 69 static void 70 bdev_part_base_free(void *ctx) 71 { 72 struct spdk_bdev_desc *desc = ctx; 73 74 spdk_bdev_close(desc); 75 } 76 77 void 78 spdk_bdev_part_base_free(struct spdk_bdev_part_base *base) 79 { 80 if (base->desc) { 81 /* Close the underlying bdev on its same opened thread. */ 82 if (base->thread && base->thread != spdk_get_thread()) { 83 spdk_thread_send_msg(base->thread, bdev_part_base_free, base->desc); 84 } else { 85 spdk_bdev_close(base->desc); 86 } 87 } 88 89 if (base->base_free_fn != NULL) { 90 base->base_free_fn(base->ctx); 91 } 92 93 free(base); 94 } 95 96 static void 97 bdev_part_free_cb(void *io_device) 98 { 99 struct spdk_bdev_part *part = io_device; 100 struct spdk_bdev_part_base *base; 101 102 assert(part); 103 assert(part->internal.base); 104 105 base = part->internal.base; 106 107 TAILQ_REMOVE(base->tailq, part, tailq); 108 109 if (--base->ref == 0) { 110 spdk_bdev_module_release_bdev(base->bdev); 111 spdk_bdev_part_base_free(base); 112 } 113 114 spdk_bdev_destruct_done(&part->internal.bdev, 0); 115 free(part->internal.bdev.name); 116 free(part->internal.bdev.product_name); 117 free(part); 118 } 119 120 int 121 spdk_bdev_part_free(struct spdk_bdev_part *part) 122 { 123 spdk_io_device_unregister(part, bdev_part_free_cb); 124 125 /* Return 1 to indicate that this is an asynchronous operation that isn't complete 126 * until spdk_bdev_destruct_done is called */ 127 return 1; 128 } 129 130 void 131 spdk_bdev_part_base_hotremove(struct spdk_bdev_part_base *part_base, struct bdev_part_tailq *tailq) 132 { 133 struct spdk_bdev_part *part, *tmp; 134 135 TAILQ_FOREACH_SAFE(part, tailq, tailq, tmp) { 136 if (part->internal.base == part_base) { 137 spdk_bdev_unregister(&part->internal.bdev, NULL, NULL); 138 } 139 } 140 } 141 142 static bool 143 bdev_part_io_type_supported(void *_part, enum spdk_bdev_io_type io_type) 144 { 145 struct spdk_bdev_part *part = _part; 146 147 /* We can't decode/modify passthrough NVMe commands, so don't report 148 * that a partition supports these io types, even if the underlying 149 * bdev does. 150 */ 151 switch (io_type) { 152 case SPDK_BDEV_IO_TYPE_NVME_ADMIN: 153 case SPDK_BDEV_IO_TYPE_NVME_IO: 154 case SPDK_BDEV_IO_TYPE_NVME_IO_MD: 155 return false; 156 default: 157 break; 158 } 159 160 return part->internal.base->bdev->fn_table->io_type_supported(part->internal.base->bdev->ctxt, 161 io_type); 162 } 163 164 static struct spdk_io_channel * 165 bdev_part_get_io_channel(void *_part) 166 { 167 struct spdk_bdev_part *part = _part; 168 169 return spdk_get_io_channel(part); 170 } 171 172 struct spdk_bdev * 173 spdk_bdev_part_get_bdev(struct spdk_bdev_part *part) 174 { 175 return &part->internal.bdev; 176 } 177 178 struct spdk_bdev_part_base * 179 spdk_bdev_part_get_base(struct spdk_bdev_part *part) 180 { 181 return part->internal.base; 182 } 183 184 struct spdk_bdev * 185 spdk_bdev_part_get_base_bdev(struct spdk_bdev_part *part) 186 { 187 return part->internal.base->bdev; 188 } 189 190 uint64_t 191 spdk_bdev_part_get_offset_blocks(struct spdk_bdev_part *part) 192 { 193 return part->internal.offset_blocks; 194 } 195 196 static int 197 bdev_part_remap_dif(struct spdk_bdev_io *bdev_io, uint32_t offset, 198 uint32_t remapped_offset) 199 { 200 struct spdk_bdev *bdev = bdev_io->bdev; 201 struct spdk_dif_ctx dif_ctx; 202 struct spdk_dif_error err_blk = {}; 203 int rc; 204 struct spdk_dif_ctx_init_ext_opts dif_opts; 205 206 if (spdk_likely(!(bdev->dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK))) { 207 return 0; 208 } 209 210 dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format); 211 dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16; 212 rc = spdk_dif_ctx_init(&dif_ctx, 213 bdev->blocklen, bdev->md_len, bdev->md_interleave, 214 bdev->dif_is_head_of_md, bdev->dif_type, bdev->dif_check_flags, 215 offset, 0, 0, 0, 0, &dif_opts); 216 if (rc != 0) { 217 SPDK_ERRLOG("Initialization of DIF context failed\n"); 218 return rc; 219 } 220 221 spdk_dif_ctx_set_remapped_init_ref_tag(&dif_ctx, remapped_offset); 222 223 if (bdev->md_interleave) { 224 rc = spdk_dif_remap_ref_tag(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 225 bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk, true); 226 } else { 227 struct iovec md_iov = { 228 .iov_base = bdev_io->u.bdev.md_buf, 229 .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len, 230 }; 231 232 rc = spdk_dix_remap_ref_tag(&md_iov, bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk, true); 233 } 234 235 if (rc != 0) { 236 SPDK_ERRLOG("Remapping reference tag failed. type=%d, offset=%" PRIu32 "\n", 237 err_blk.err_type, err_blk.err_offset); 238 } 239 240 return rc; 241 } 242 243 static void 244 bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 245 { 246 struct spdk_bdev_io *part_io = cb_arg; 247 uint32_t offset, remapped_offset; 248 int rc; 249 250 switch (bdev_io->type) { 251 case SPDK_BDEV_IO_TYPE_READ: 252 if (success) { 253 offset = bdev_io->u.bdev.offset_blocks; 254 remapped_offset = part_io->u.bdev.offset_blocks; 255 256 rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset); 257 if (rc != 0) { 258 success = false; 259 } 260 } 261 break; 262 case SPDK_BDEV_IO_TYPE_ZCOPY: 263 spdk_bdev_io_set_buf(part_io, bdev_io->u.bdev.iovs[0].iov_base, 264 bdev_io->u.bdev.iovs[0].iov_len); 265 break; 266 default: 267 break; 268 } 269 270 if (part_io->internal.f.split) { 271 part_io->internal.split.stored_user_cb(part_io, success, NULL); 272 } else { 273 spdk_bdev_io_complete_base_io_status(part_io, bdev_io); 274 } 275 276 spdk_bdev_free_io(bdev_io); 277 } 278 279 static inline void 280 bdev_part_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts) 281 { 282 memset(opts, 0, sizeof(*opts)); 283 opts->size = sizeof(*opts); 284 opts->memory_domain = bdev_io->u.bdev.memory_domain; 285 opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx; 286 opts->metadata = bdev_io->u.bdev.md_buf; 287 } 288 289 int 290 spdk_bdev_part_submit_request_ext(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io, 291 spdk_bdev_io_completion_cb cb) 292 { 293 struct spdk_bdev_part *part = ch->part; 294 struct spdk_io_channel *base_ch = ch->base_ch; 295 struct spdk_bdev_desc *base_desc = part->internal.base->desc; 296 struct spdk_bdev_ext_io_opts io_opts; 297 uint64_t offset, remapped_offset, remapped_src_offset; 298 int rc = 0; 299 300 if (cb != NULL) { 301 bdev_io->internal.f.split = true; 302 bdev_io->internal.split.stored_user_cb = cb; 303 } 304 305 offset = bdev_io->u.bdev.offset_blocks; 306 remapped_offset = offset + part->internal.offset_blocks; 307 308 /* Modify the I/O to adjust for the offset within the base bdev. */ 309 switch (bdev_io->type) { 310 case SPDK_BDEV_IO_TYPE_READ: 311 bdev_part_init_ext_io_opts(bdev_io, &io_opts); 312 rc = spdk_bdev_readv_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs, 313 bdev_io->u.bdev.iovcnt, remapped_offset, 314 bdev_io->u.bdev.num_blocks, 315 bdev_part_complete_io, bdev_io, &io_opts); 316 break; 317 case SPDK_BDEV_IO_TYPE_WRITE: 318 rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset); 319 if (rc != 0) { 320 return SPDK_BDEV_IO_STATUS_FAILED; 321 } 322 bdev_part_init_ext_io_opts(bdev_io, &io_opts); 323 rc = spdk_bdev_writev_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs, 324 bdev_io->u.bdev.iovcnt, remapped_offset, 325 bdev_io->u.bdev.num_blocks, 326 bdev_part_complete_io, bdev_io, &io_opts); 327 break; 328 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 329 rc = spdk_bdev_write_zeroes_blocks(base_desc, base_ch, remapped_offset, 330 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 331 bdev_io); 332 break; 333 case SPDK_BDEV_IO_TYPE_UNMAP: 334 rc = spdk_bdev_unmap_blocks(base_desc, base_ch, remapped_offset, 335 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 336 bdev_io); 337 break; 338 case SPDK_BDEV_IO_TYPE_FLUSH: 339 rc = spdk_bdev_flush_blocks(base_desc, base_ch, remapped_offset, 340 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 341 bdev_io); 342 break; 343 case SPDK_BDEV_IO_TYPE_RESET: 344 rc = spdk_bdev_reset(base_desc, base_ch, 345 bdev_part_complete_io, bdev_io); 346 break; 347 case SPDK_BDEV_IO_TYPE_ZCOPY: 348 rc = spdk_bdev_zcopy_start(base_desc, base_ch, NULL, 0, remapped_offset, 349 bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate, 350 bdev_part_complete_io, bdev_io); 351 break; 352 case SPDK_BDEV_IO_TYPE_COMPARE: 353 if (!bdev_io->u.bdev.md_buf) { 354 rc = spdk_bdev_comparev_blocks(base_desc, base_ch, 355 bdev_io->u.bdev.iovs, 356 bdev_io->u.bdev.iovcnt, 357 remapped_offset, 358 bdev_io->u.bdev.num_blocks, 359 bdev_part_complete_io, bdev_io); 360 } else { 361 rc = spdk_bdev_comparev_blocks_with_md(base_desc, base_ch, 362 bdev_io->u.bdev.iovs, 363 bdev_io->u.bdev.iovcnt, 364 bdev_io->u.bdev.md_buf, 365 remapped_offset, 366 bdev_io->u.bdev.num_blocks, 367 bdev_part_complete_io, bdev_io); 368 } 369 break; 370 case SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE: 371 rc = spdk_bdev_comparev_and_writev_blocks(base_desc, base_ch, bdev_io->u.bdev.iovs, 372 bdev_io->u.bdev.iovcnt, 373 bdev_io->u.bdev.fused_iovs, 374 bdev_io->u.bdev.fused_iovcnt, 375 remapped_offset, 376 bdev_io->u.bdev.num_blocks, 377 bdev_part_complete_io, bdev_io); 378 break; 379 case SPDK_BDEV_IO_TYPE_COPY: 380 remapped_src_offset = bdev_io->u.bdev.copy.src_offset_blocks + part->internal.offset_blocks; 381 rc = spdk_bdev_copy_blocks(base_desc, base_ch, remapped_offset, remapped_src_offset, 382 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 383 bdev_io); 384 break; 385 default: 386 SPDK_ERRLOG("unknown I/O type %d\n", bdev_io->type); 387 return SPDK_BDEV_IO_STATUS_FAILED; 388 } 389 390 return rc; 391 } 392 393 int 394 spdk_bdev_part_submit_request(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io) 395 { 396 return spdk_bdev_part_submit_request_ext(ch, bdev_io, NULL); 397 } 398 399 static int 400 bdev_part_channel_create_cb(void *io_device, void *ctx_buf) 401 { 402 struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device; 403 struct spdk_bdev_part_channel *ch = ctx_buf; 404 405 ch->part = part; 406 ch->base_ch = spdk_bdev_get_io_channel(part->internal.base->desc); 407 if (ch->base_ch == NULL) { 408 return -1; 409 } 410 411 if (part->internal.base->ch_create_cb) { 412 return part->internal.base->ch_create_cb(io_device, ctx_buf); 413 } else { 414 return 0; 415 } 416 } 417 418 static void 419 bdev_part_channel_destroy_cb(void *io_device, void *ctx_buf) 420 { 421 struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device; 422 struct spdk_bdev_part_channel *ch = ctx_buf; 423 424 if (part->internal.base->ch_destroy_cb) { 425 part->internal.base->ch_destroy_cb(io_device, ctx_buf); 426 } 427 spdk_put_io_channel(ch->base_ch); 428 } 429 430 static void 431 bdev_part_base_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 432 void *event_ctx) 433 { 434 struct spdk_bdev_part_base *base = event_ctx; 435 436 switch (type) { 437 case SPDK_BDEV_EVENT_REMOVE: 438 base->remove_cb(base); 439 break; 440 default: 441 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 442 break; 443 } 444 } 445 446 int 447 spdk_bdev_part_base_construct_ext(const char *bdev_name, 448 spdk_bdev_remove_cb_t remove_cb, struct spdk_bdev_module *module, 449 struct spdk_bdev_fn_table *fn_table, struct bdev_part_tailq *tailq, 450 spdk_bdev_part_base_free_fn free_fn, void *ctx, 451 uint32_t channel_size, spdk_io_channel_create_cb ch_create_cb, 452 spdk_io_channel_destroy_cb ch_destroy_cb, 453 struct spdk_bdev_part_base **_base) 454 { 455 int rc; 456 struct spdk_bdev_part_base *base; 457 458 if (_base == NULL) { 459 return -EINVAL; 460 } 461 462 base = calloc(1, sizeof(*base)); 463 if (!base) { 464 SPDK_ERRLOG("Memory allocation failure\n"); 465 return -ENOMEM; 466 } 467 fn_table->get_io_channel = bdev_part_get_io_channel; 468 fn_table->io_type_supported = bdev_part_io_type_supported; 469 470 base->desc = NULL; 471 base->ref = 0; 472 base->module = module; 473 base->fn_table = fn_table; 474 base->tailq = tailq; 475 base->base_free_fn = free_fn; 476 base->ctx = ctx; 477 base->claimed = false; 478 base->channel_size = channel_size; 479 base->ch_create_cb = ch_create_cb; 480 base->ch_destroy_cb = ch_destroy_cb; 481 base->remove_cb = remove_cb; 482 483 rc = spdk_bdev_open_ext(bdev_name, false, bdev_part_base_event_cb, base, &base->desc); 484 if (rc) { 485 if (rc == -ENODEV) { 486 free(base); 487 } else { 488 SPDK_ERRLOG("could not open bdev %s: %s\n", bdev_name, spdk_strerror(-rc)); 489 spdk_bdev_part_base_free(base); 490 } 491 return rc; 492 } 493 494 base->bdev = spdk_bdev_desc_get_bdev(base->desc); 495 496 /* Save the thread where the base device is opened */ 497 base->thread = spdk_get_thread(); 498 499 *_base = base; 500 501 return 0; 502 } 503 504 void 505 spdk_bdev_part_construct_opts_init(struct spdk_bdev_part_construct_opts *opts, uint64_t size) 506 { 507 if (opts == NULL) { 508 SPDK_ERRLOG("opts should not be NULL\n"); 509 assert(opts != NULL); 510 return; 511 } 512 if (size == 0) { 513 SPDK_ERRLOG("size should not be zero\n"); 514 assert(size != 0); 515 return; 516 } 517 518 memset(opts, 0, size); 519 opts->opts_size = size; 520 } 521 522 static void 523 part_construct_opts_copy(const struct spdk_bdev_part_construct_opts *src, 524 struct spdk_bdev_part_construct_opts *dst) 525 { 526 if (src->opts_size == 0) { 527 SPDK_ERRLOG("size should not be zero\n"); 528 assert(false); 529 } 530 531 memset(dst, 0, sizeof(*dst)); 532 dst->opts_size = src->opts_size; 533 534 #define FIELD_OK(field) \ 535 offsetof(struct spdk_bdev_part_construct_opts, field) + sizeof(src->field) <= src->opts_size 536 537 #define SET_FIELD(field) \ 538 if (FIELD_OK(field)) { \ 539 dst->field = src->field; \ 540 } \ 541 542 SET_FIELD(uuid); 543 544 /* You should not remove this statement, but need to update the assert statement 545 * if you add a new field, and also add a corresponding SET_FIELD statement */ 546 SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_part_construct_opts) == 24, "Incorrect size"); 547 548 #undef FIELD_OK 549 #undef SET_FIELD 550 } 551 552 int 553 spdk_bdev_part_construct_ext(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base, 554 char *name, uint64_t offset_blocks, uint64_t num_blocks, 555 char *product_name, const struct spdk_bdev_part_construct_opts *_opts) 556 { 557 int rc; 558 bool first_claimed = false; 559 struct spdk_bdev_part_construct_opts opts; 560 struct spdk_uuid ns_uuid; 561 562 if (_opts == NULL) { 563 spdk_bdev_part_construct_opts_init(&opts, sizeof(opts)); 564 } else { 565 part_construct_opts_copy(_opts, &opts); 566 } 567 568 part->internal.bdev.blocklen = base->bdev->blocklen; 569 part->internal.bdev.blockcnt = num_blocks; 570 part->internal.offset_blocks = offset_blocks; 571 572 part->internal.bdev.write_cache = base->bdev->write_cache; 573 part->internal.bdev.required_alignment = base->bdev->required_alignment; 574 part->internal.bdev.ctxt = part; 575 part->internal.bdev.module = base->module; 576 part->internal.bdev.fn_table = base->fn_table; 577 578 part->internal.bdev.md_interleave = base->bdev->md_interleave; 579 part->internal.bdev.md_len = base->bdev->md_len; 580 part->internal.bdev.dif_type = base->bdev->dif_type; 581 part->internal.bdev.dif_is_head_of_md = base->bdev->dif_is_head_of_md; 582 part->internal.bdev.dif_check_flags = base->bdev->dif_check_flags; 583 584 part->internal.bdev.name = strdup(name); 585 if (part->internal.bdev.name == NULL) { 586 SPDK_ERRLOG("Failed to allocate name for new part of bdev %s\n", spdk_bdev_get_name(base->bdev)); 587 return -1; 588 } 589 590 part->internal.bdev.product_name = strdup(product_name); 591 if (part->internal.bdev.product_name == NULL) { 592 free(part->internal.bdev.name); 593 SPDK_ERRLOG("Failed to allocate product name for new part of bdev %s\n", 594 spdk_bdev_get_name(base->bdev)); 595 return -1; 596 } 597 598 /* The caller may have already specified a UUID. If not, we'll generate one 599 * based on the namespace UUID, the base bdev's UUID and the block range of the 600 * partition. 601 */ 602 if (!spdk_uuid_is_null(&opts.uuid)) { 603 spdk_uuid_copy(&part->internal.bdev.uuid, &opts.uuid); 604 } else { 605 struct { 606 struct spdk_uuid uuid; 607 uint64_t offset_blocks; 608 uint64_t num_blocks; 609 } base_name; 610 611 /* We need to create a unique base name for this partition. We can't just use 612 * the base bdev's UUID, since it may be used for multiple partitions. So 613 * construct a binary name consisting of the uuid + the block range for this 614 * partition. 615 */ 616 spdk_uuid_copy(&base_name.uuid, &base->bdev->uuid); 617 base_name.offset_blocks = offset_blocks; 618 base_name.num_blocks = num_blocks; 619 620 spdk_uuid_parse(&ns_uuid, BDEV_PART_NAMESPACE_UUID); 621 rc = spdk_uuid_generate_sha1(&part->internal.bdev.uuid, &ns_uuid, 622 (const char *)&base_name, sizeof(base_name)); 623 if (rc) { 624 SPDK_ERRLOG("Could not generate new UUID\n"); 625 free(part->internal.bdev.name); 626 free(part->internal.bdev.product_name); 627 return -1; 628 } 629 } 630 631 base->ref++; 632 part->internal.base = base; 633 634 if (!base->claimed) { 635 int rc; 636 637 rc = spdk_bdev_module_claim_bdev(base->bdev, base->desc, base->module); 638 if (rc) { 639 SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(base->bdev)); 640 free(part->internal.bdev.name); 641 free(part->internal.bdev.product_name); 642 base->ref--; 643 return -1; 644 } 645 base->claimed = true; 646 first_claimed = true; 647 } 648 649 spdk_io_device_register(part, bdev_part_channel_create_cb, 650 bdev_part_channel_destroy_cb, 651 base->channel_size, 652 name); 653 654 rc = spdk_bdev_register(&part->internal.bdev); 655 if (rc == 0) { 656 TAILQ_INSERT_TAIL(base->tailq, part, tailq); 657 } else { 658 spdk_io_device_unregister(part, NULL); 659 if (--base->ref == 0) { 660 spdk_bdev_module_release_bdev(base->bdev); 661 } 662 free(part->internal.bdev.name); 663 free(part->internal.bdev.product_name); 664 if (first_claimed == true) { 665 base->claimed = false; 666 } 667 } 668 669 return rc; 670 } 671 672 int 673 spdk_bdev_part_construct(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base, 674 char *name, uint64_t offset_blocks, uint64_t num_blocks, 675 char *product_name) 676 { 677 return spdk_bdev_part_construct_ext(part, base, name, offset_blocks, num_blocks, 678 product_name, NULL); 679 } 680