1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 /* 8 * Common code for partition-like virtual bdevs. 9 */ 10 11 #include "spdk/bdev.h" 12 #include "spdk/likely.h" 13 #include "spdk/log.h" 14 #include "spdk/string.h" 15 #include "spdk/thread.h" 16 17 #include "spdk/bdev_module.h" 18 19 /* This namespace UUID was generated using uuid_generate() method. */ 20 #define BDEV_PART_NAMESPACE_UUID "976b899e-3e1e-4d71-ab69-c2b08e9df8b8" 21 22 struct spdk_bdev_part_base { 23 struct spdk_bdev *bdev; 24 struct spdk_bdev_desc *desc; 25 uint32_t ref; 26 uint32_t channel_size; 27 spdk_bdev_part_base_free_fn base_free_fn; 28 void *ctx; 29 bool claimed; 30 struct spdk_bdev_module *module; 31 struct spdk_bdev_fn_table *fn_table; 32 struct bdev_part_tailq *tailq; 33 spdk_io_channel_create_cb ch_create_cb; 34 spdk_io_channel_destroy_cb ch_destroy_cb; 35 spdk_bdev_remove_cb_t remove_cb; 36 struct spdk_thread *thread; 37 }; 38 39 struct spdk_bdev * 40 spdk_bdev_part_base_get_bdev(struct spdk_bdev_part_base *part_base) 41 { 42 return part_base->bdev; 43 } 44 45 struct spdk_bdev_desc * 46 spdk_bdev_part_base_get_desc(struct spdk_bdev_part_base *part_base) 47 { 48 return part_base->desc; 49 } 50 51 struct bdev_part_tailq * 52 spdk_bdev_part_base_get_tailq(struct spdk_bdev_part_base *part_base) 53 { 54 return part_base->tailq; 55 } 56 57 void * 58 spdk_bdev_part_base_get_ctx(struct spdk_bdev_part_base *part_base) 59 { 60 return part_base->ctx; 61 } 62 63 const char * 64 spdk_bdev_part_base_get_bdev_name(struct spdk_bdev_part_base *part_base) 65 { 66 return part_base->bdev->name; 67 } 68 69 static void 70 bdev_part_base_free(void *ctx) 71 { 72 struct spdk_bdev_desc *desc = ctx; 73 74 spdk_bdev_close(desc); 75 } 76 77 void 78 spdk_bdev_part_base_free(struct spdk_bdev_part_base *base) 79 { 80 if (base->desc) { 81 /* Close the underlying bdev on its same opened thread. */ 82 if (base->thread && base->thread != spdk_get_thread()) { 83 spdk_thread_send_msg(base->thread, bdev_part_base_free, base->desc); 84 } else { 85 spdk_bdev_close(base->desc); 86 } 87 } 88 89 if (base->base_free_fn != NULL) { 90 base->base_free_fn(base->ctx); 91 } 92 93 free(base); 94 } 95 96 static void 97 bdev_part_free_cb(void *io_device) 98 { 99 struct spdk_bdev_part *part = io_device; 100 struct spdk_bdev_part_base *base; 101 102 assert(part); 103 assert(part->internal.base); 104 105 base = part->internal.base; 106 107 TAILQ_REMOVE(base->tailq, part, tailq); 108 109 if (--base->ref == 0) { 110 spdk_bdev_module_release_bdev(base->bdev); 111 spdk_bdev_part_base_free(base); 112 } 113 114 spdk_bdev_destruct_done(&part->internal.bdev, 0); 115 free(part->internal.bdev.name); 116 free(part->internal.bdev.product_name); 117 free(part); 118 } 119 120 int 121 spdk_bdev_part_free(struct spdk_bdev_part *part) 122 { 123 spdk_io_device_unregister(part, bdev_part_free_cb); 124 125 /* Return 1 to indicate that this is an asynchronous operation that isn't complete 126 * until spdk_bdev_destruct_done is called */ 127 return 1; 128 } 129 130 void 131 spdk_bdev_part_base_hotremove(struct spdk_bdev_part_base *part_base, struct bdev_part_tailq *tailq) 132 { 133 struct spdk_bdev_part *part, *tmp; 134 135 TAILQ_FOREACH_SAFE(part, tailq, tailq, tmp) { 136 if (part->internal.base == part_base) { 137 spdk_bdev_unregister(&part->internal.bdev, NULL, NULL); 138 } 139 } 140 } 141 142 static bool 143 bdev_part_io_type_supported(void *_part, enum spdk_bdev_io_type io_type) 144 { 145 struct spdk_bdev_part *part = _part; 146 147 /* We can't decode/modify passthrough NVMe commands, so don't report 148 * that a partition supports these io types, even if the underlying 149 * bdev does. 150 */ 151 switch (io_type) { 152 case SPDK_BDEV_IO_TYPE_NVME_ADMIN: 153 case SPDK_BDEV_IO_TYPE_NVME_IO: 154 case SPDK_BDEV_IO_TYPE_NVME_IO_MD: 155 return false; 156 default: 157 break; 158 } 159 160 return part->internal.base->bdev->fn_table->io_type_supported(part->internal.base->bdev->ctxt, 161 io_type); 162 } 163 164 static struct spdk_io_channel * 165 bdev_part_get_io_channel(void *_part) 166 { 167 struct spdk_bdev_part *part = _part; 168 169 return spdk_get_io_channel(part); 170 } 171 172 struct spdk_bdev * 173 spdk_bdev_part_get_bdev(struct spdk_bdev_part *part) 174 { 175 return &part->internal.bdev; 176 } 177 178 struct spdk_bdev_part_base * 179 spdk_bdev_part_get_base(struct spdk_bdev_part *part) 180 { 181 return part->internal.base; 182 } 183 184 struct spdk_bdev * 185 spdk_bdev_part_get_base_bdev(struct spdk_bdev_part *part) 186 { 187 return part->internal.base->bdev; 188 } 189 190 uint64_t 191 spdk_bdev_part_get_offset_blocks(struct spdk_bdev_part *part) 192 { 193 return part->internal.offset_blocks; 194 } 195 196 static int 197 bdev_part_remap_dif(struct spdk_bdev_io *bdev_io, uint32_t offset, 198 uint32_t remapped_offset) 199 { 200 struct spdk_bdev *bdev = bdev_io->bdev; 201 struct spdk_dif_ctx dif_ctx; 202 struct spdk_dif_error err_blk = {}; 203 int rc; 204 struct spdk_dif_ctx_init_ext_opts dif_opts; 205 206 if (spdk_likely(!(bdev->dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK))) { 207 return 0; 208 } 209 210 dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format); 211 dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16; 212 rc = spdk_dif_ctx_init(&dif_ctx, 213 bdev->blocklen, bdev->md_len, bdev->md_interleave, 214 bdev->dif_is_head_of_md, bdev->dif_type, bdev->dif_check_flags, 215 offset, 0, 0, 0, 0, &dif_opts); 216 if (rc != 0) { 217 SPDK_ERRLOG("Initialization of DIF context failed\n"); 218 return rc; 219 } 220 221 spdk_dif_ctx_set_remapped_init_ref_tag(&dif_ctx, remapped_offset); 222 223 if (bdev->md_interleave) { 224 rc = spdk_dif_remap_ref_tag(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 225 bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk); 226 } else { 227 struct iovec md_iov = { 228 .iov_base = bdev_io->u.bdev.md_buf, 229 .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len, 230 }; 231 232 rc = spdk_dix_remap_ref_tag(&md_iov, bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk); 233 } 234 235 if (rc != 0) { 236 SPDK_ERRLOG("Remapping reference tag failed. type=%d, offset=%" PRIu32 "\n", 237 err_blk.err_type, err_blk.err_offset); 238 } 239 240 return rc; 241 } 242 243 static void 244 bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 245 { 246 struct spdk_bdev_io *part_io = cb_arg; 247 uint32_t offset, remapped_offset; 248 spdk_bdev_io_completion_cb cb; 249 int rc, status; 250 251 switch (bdev_io->type) { 252 case SPDK_BDEV_IO_TYPE_READ: 253 if (success) { 254 offset = bdev_io->u.bdev.offset_blocks; 255 remapped_offset = part_io->u.bdev.offset_blocks; 256 257 rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset); 258 if (rc != 0) { 259 success = false; 260 } 261 } 262 break; 263 case SPDK_BDEV_IO_TYPE_ZCOPY: 264 spdk_bdev_io_set_buf(part_io, bdev_io->u.bdev.iovs[0].iov_base, 265 bdev_io->u.bdev.iovs[0].iov_len); 266 break; 267 default: 268 break; 269 } 270 271 272 cb = part_io->u.bdev.stored_user_cb; 273 if (cb != NULL) { 274 cb(part_io, success, NULL); 275 } else { 276 status = success ? SPDK_BDEV_IO_STATUS_SUCCESS : SPDK_BDEV_IO_STATUS_FAILED; 277 278 spdk_bdev_io_complete(part_io, status); 279 } 280 281 spdk_bdev_free_io(bdev_io); 282 } 283 284 static inline void 285 bdev_part_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts) 286 { 287 memset(opts, 0, sizeof(*opts)); 288 opts->size = sizeof(*opts); 289 opts->memory_domain = bdev_io->u.bdev.memory_domain; 290 opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx; 291 opts->metadata = bdev_io->u.bdev.md_buf; 292 } 293 294 int 295 spdk_bdev_part_submit_request_ext(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io, 296 spdk_bdev_io_completion_cb cb) 297 { 298 struct spdk_bdev_part *part = ch->part; 299 struct spdk_io_channel *base_ch = ch->base_ch; 300 struct spdk_bdev_desc *base_desc = part->internal.base->desc; 301 struct spdk_bdev_ext_io_opts io_opts; 302 uint64_t offset, remapped_offset, remapped_src_offset; 303 int rc = 0; 304 305 bdev_io->u.bdev.stored_user_cb = cb; 306 307 offset = bdev_io->u.bdev.offset_blocks; 308 remapped_offset = offset + part->internal.offset_blocks; 309 310 /* Modify the I/O to adjust for the offset within the base bdev. */ 311 switch (bdev_io->type) { 312 case SPDK_BDEV_IO_TYPE_READ: 313 bdev_part_init_ext_io_opts(bdev_io, &io_opts); 314 rc = spdk_bdev_readv_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs, 315 bdev_io->u.bdev.iovcnt, remapped_offset, 316 bdev_io->u.bdev.num_blocks, 317 bdev_part_complete_io, bdev_io, &io_opts); 318 break; 319 case SPDK_BDEV_IO_TYPE_WRITE: 320 rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset); 321 if (rc != 0) { 322 return SPDK_BDEV_IO_STATUS_FAILED; 323 } 324 bdev_part_init_ext_io_opts(bdev_io, &io_opts); 325 rc = spdk_bdev_writev_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs, 326 bdev_io->u.bdev.iovcnt, remapped_offset, 327 bdev_io->u.bdev.num_blocks, 328 bdev_part_complete_io, bdev_io, &io_opts); 329 break; 330 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 331 rc = spdk_bdev_write_zeroes_blocks(base_desc, base_ch, remapped_offset, 332 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 333 bdev_io); 334 break; 335 case SPDK_BDEV_IO_TYPE_UNMAP: 336 rc = spdk_bdev_unmap_blocks(base_desc, base_ch, remapped_offset, 337 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 338 bdev_io); 339 break; 340 case SPDK_BDEV_IO_TYPE_FLUSH: 341 rc = spdk_bdev_flush_blocks(base_desc, base_ch, remapped_offset, 342 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 343 bdev_io); 344 break; 345 case SPDK_BDEV_IO_TYPE_RESET: 346 rc = spdk_bdev_reset(base_desc, base_ch, 347 bdev_part_complete_io, bdev_io); 348 break; 349 case SPDK_BDEV_IO_TYPE_ZCOPY: 350 rc = spdk_bdev_zcopy_start(base_desc, base_ch, NULL, 0, remapped_offset, 351 bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate, 352 bdev_part_complete_io, bdev_io); 353 break; 354 case SPDK_BDEV_IO_TYPE_COMPARE: 355 if (!bdev_io->u.bdev.md_buf) { 356 rc = spdk_bdev_comparev_blocks(base_desc, base_ch, 357 bdev_io->u.bdev.iovs, 358 bdev_io->u.bdev.iovcnt, 359 remapped_offset, 360 bdev_io->u.bdev.num_blocks, 361 bdev_part_complete_io, bdev_io); 362 } else { 363 rc = spdk_bdev_comparev_blocks_with_md(base_desc, base_ch, 364 bdev_io->u.bdev.iovs, 365 bdev_io->u.bdev.iovcnt, 366 bdev_io->u.bdev.md_buf, 367 remapped_offset, 368 bdev_io->u.bdev.num_blocks, 369 bdev_part_complete_io, bdev_io); 370 } 371 break; 372 case SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE: 373 rc = spdk_bdev_comparev_and_writev_blocks(base_desc, base_ch, bdev_io->u.bdev.iovs, 374 bdev_io->u.bdev.iovcnt, 375 bdev_io->u.bdev.fused_iovs, 376 bdev_io->u.bdev.fused_iovcnt, 377 remapped_offset, 378 bdev_io->u.bdev.num_blocks, 379 bdev_part_complete_io, bdev_io); 380 break; 381 case SPDK_BDEV_IO_TYPE_COPY: 382 remapped_src_offset = bdev_io->u.bdev.copy.src_offset_blocks + part->internal.offset_blocks; 383 rc = spdk_bdev_copy_blocks(base_desc, base_ch, remapped_offset, remapped_src_offset, 384 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 385 bdev_io); 386 break; 387 default: 388 SPDK_ERRLOG("unknown I/O type %d\n", bdev_io->type); 389 return SPDK_BDEV_IO_STATUS_FAILED; 390 } 391 392 return rc; 393 } 394 395 int 396 spdk_bdev_part_submit_request(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io) 397 { 398 return spdk_bdev_part_submit_request_ext(ch, bdev_io, NULL); 399 } 400 401 static int 402 bdev_part_channel_create_cb(void *io_device, void *ctx_buf) 403 { 404 struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device; 405 struct spdk_bdev_part_channel *ch = ctx_buf; 406 407 ch->part = part; 408 ch->base_ch = spdk_bdev_get_io_channel(part->internal.base->desc); 409 if (ch->base_ch == NULL) { 410 return -1; 411 } 412 413 if (part->internal.base->ch_create_cb) { 414 return part->internal.base->ch_create_cb(io_device, ctx_buf); 415 } else { 416 return 0; 417 } 418 } 419 420 static void 421 bdev_part_channel_destroy_cb(void *io_device, void *ctx_buf) 422 { 423 struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device; 424 struct spdk_bdev_part_channel *ch = ctx_buf; 425 426 if (part->internal.base->ch_destroy_cb) { 427 part->internal.base->ch_destroy_cb(io_device, ctx_buf); 428 } 429 spdk_put_io_channel(ch->base_ch); 430 } 431 432 static void 433 bdev_part_base_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 434 void *event_ctx) 435 { 436 struct spdk_bdev_part_base *base = event_ctx; 437 438 switch (type) { 439 case SPDK_BDEV_EVENT_REMOVE: 440 base->remove_cb(base); 441 break; 442 default: 443 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 444 break; 445 } 446 } 447 448 int 449 spdk_bdev_part_base_construct_ext(const char *bdev_name, 450 spdk_bdev_remove_cb_t remove_cb, struct spdk_bdev_module *module, 451 struct spdk_bdev_fn_table *fn_table, struct bdev_part_tailq *tailq, 452 spdk_bdev_part_base_free_fn free_fn, void *ctx, 453 uint32_t channel_size, spdk_io_channel_create_cb ch_create_cb, 454 spdk_io_channel_destroy_cb ch_destroy_cb, 455 struct spdk_bdev_part_base **_base) 456 { 457 int rc; 458 struct spdk_bdev_part_base *base; 459 460 if (_base == NULL) { 461 return -EINVAL; 462 } 463 464 base = calloc(1, sizeof(*base)); 465 if (!base) { 466 SPDK_ERRLOG("Memory allocation failure\n"); 467 return -ENOMEM; 468 } 469 fn_table->get_io_channel = bdev_part_get_io_channel; 470 fn_table->io_type_supported = bdev_part_io_type_supported; 471 472 base->desc = NULL; 473 base->ref = 0; 474 base->module = module; 475 base->fn_table = fn_table; 476 base->tailq = tailq; 477 base->base_free_fn = free_fn; 478 base->ctx = ctx; 479 base->claimed = false; 480 base->channel_size = channel_size; 481 base->ch_create_cb = ch_create_cb; 482 base->ch_destroy_cb = ch_destroy_cb; 483 base->remove_cb = remove_cb; 484 485 rc = spdk_bdev_open_ext(bdev_name, false, bdev_part_base_event_cb, base, &base->desc); 486 if (rc) { 487 if (rc == -ENODEV) { 488 free(base); 489 } else { 490 SPDK_ERRLOG("could not open bdev %s: %s\n", bdev_name, spdk_strerror(-rc)); 491 spdk_bdev_part_base_free(base); 492 } 493 return rc; 494 } 495 496 base->bdev = spdk_bdev_desc_get_bdev(base->desc); 497 498 /* Save the thread where the base device is opened */ 499 base->thread = spdk_get_thread(); 500 501 *_base = base; 502 503 return 0; 504 } 505 506 void 507 spdk_bdev_part_construct_opts_init(struct spdk_bdev_part_construct_opts *opts, uint64_t size) 508 { 509 if (opts == NULL) { 510 SPDK_ERRLOG("opts should not be NULL\n"); 511 assert(opts != NULL); 512 return; 513 } 514 if (size == 0) { 515 SPDK_ERRLOG("size should not be zero\n"); 516 assert(size != 0); 517 return; 518 } 519 520 memset(opts, 0, size); 521 opts->opts_size = size; 522 } 523 524 static void 525 part_construct_opts_copy(const struct spdk_bdev_part_construct_opts *src, 526 struct spdk_bdev_part_construct_opts *dst) 527 { 528 if (src->opts_size == 0) { 529 SPDK_ERRLOG("size should not be zero\n"); 530 assert(false); 531 } 532 533 memset(dst, 0, sizeof(*dst)); 534 dst->opts_size = src->opts_size; 535 536 #define FIELD_OK(field) \ 537 offsetof(struct spdk_bdev_part_construct_opts, field) + sizeof(src->field) <= src->opts_size 538 539 #define SET_FIELD(field) \ 540 if (FIELD_OK(field)) { \ 541 dst->field = src->field; \ 542 } \ 543 544 SET_FIELD(uuid); 545 546 /* You should not remove this statement, but need to update the assert statement 547 * if you add a new field, and also add a corresponding SET_FIELD statement */ 548 SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_part_construct_opts) == 24, "Incorrect size"); 549 550 #undef FIELD_OK 551 #undef SET_FIELD 552 } 553 554 int 555 spdk_bdev_part_construct_ext(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base, 556 char *name, uint64_t offset_blocks, uint64_t num_blocks, 557 char *product_name, const struct spdk_bdev_part_construct_opts *_opts) 558 { 559 int rc; 560 bool first_claimed = false; 561 struct spdk_bdev_part_construct_opts opts; 562 struct spdk_uuid ns_uuid; 563 564 if (_opts == NULL) { 565 spdk_bdev_part_construct_opts_init(&opts, sizeof(opts)); 566 } else { 567 part_construct_opts_copy(_opts, &opts); 568 } 569 570 part->internal.bdev.blocklen = base->bdev->blocklen; 571 part->internal.bdev.blockcnt = num_blocks; 572 part->internal.offset_blocks = offset_blocks; 573 574 part->internal.bdev.write_cache = base->bdev->write_cache; 575 part->internal.bdev.required_alignment = base->bdev->required_alignment; 576 part->internal.bdev.ctxt = part; 577 part->internal.bdev.module = base->module; 578 part->internal.bdev.fn_table = base->fn_table; 579 580 part->internal.bdev.md_interleave = base->bdev->md_interleave; 581 part->internal.bdev.md_len = base->bdev->md_len; 582 part->internal.bdev.dif_type = base->bdev->dif_type; 583 part->internal.bdev.dif_is_head_of_md = base->bdev->dif_is_head_of_md; 584 part->internal.bdev.dif_check_flags = base->bdev->dif_check_flags; 585 586 part->internal.bdev.name = strdup(name); 587 if (part->internal.bdev.name == NULL) { 588 SPDK_ERRLOG("Failed to allocate name for new part of bdev %s\n", spdk_bdev_get_name(base->bdev)); 589 return -1; 590 } 591 592 part->internal.bdev.product_name = strdup(product_name); 593 if (part->internal.bdev.product_name == NULL) { 594 free(part->internal.bdev.name); 595 SPDK_ERRLOG("Failed to allocate product name for new part of bdev %s\n", 596 spdk_bdev_get_name(base->bdev)); 597 return -1; 598 } 599 600 /* The caller may have already specified a UUID. If not, we'll generate one 601 * based on the namespace UUID, the base bdev's UUID and the block range of the 602 * partition. 603 */ 604 if (!spdk_uuid_is_null(&opts.uuid)) { 605 spdk_uuid_copy(&part->internal.bdev.uuid, &opts.uuid); 606 } else { 607 struct { 608 struct spdk_uuid uuid; 609 uint64_t offset_blocks; 610 uint64_t num_blocks; 611 } base_name; 612 613 /* We need to create a unique base name for this partition. We can't just use 614 * the base bdev's UUID, since it may be used for multiple partitions. So 615 * construct a binary name consisting of the uuid + the block range for this 616 * partition. 617 */ 618 spdk_uuid_copy(&base_name.uuid, &base->bdev->uuid); 619 base_name.offset_blocks = offset_blocks; 620 base_name.num_blocks = num_blocks; 621 622 spdk_uuid_parse(&ns_uuid, BDEV_PART_NAMESPACE_UUID); 623 rc = spdk_uuid_generate_sha1(&part->internal.bdev.uuid, &ns_uuid, 624 (const char *)&base_name, sizeof(base_name)); 625 if (rc) { 626 SPDK_ERRLOG("Could not generate new UUID\n"); 627 free(part->internal.bdev.name); 628 free(part->internal.bdev.product_name); 629 return -1; 630 } 631 } 632 633 base->ref++; 634 part->internal.base = base; 635 636 if (!base->claimed) { 637 int rc; 638 639 rc = spdk_bdev_module_claim_bdev(base->bdev, base->desc, base->module); 640 if (rc) { 641 SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(base->bdev)); 642 free(part->internal.bdev.name); 643 free(part->internal.bdev.product_name); 644 base->ref--; 645 return -1; 646 } 647 base->claimed = true; 648 first_claimed = true; 649 } 650 651 spdk_io_device_register(part, bdev_part_channel_create_cb, 652 bdev_part_channel_destroy_cb, 653 base->channel_size, 654 name); 655 656 rc = spdk_bdev_register(&part->internal.bdev); 657 if (rc == 0) { 658 TAILQ_INSERT_TAIL(base->tailq, part, tailq); 659 } else { 660 spdk_io_device_unregister(part, NULL); 661 if (--base->ref == 0) { 662 spdk_bdev_module_release_bdev(base->bdev); 663 } 664 free(part->internal.bdev.name); 665 free(part->internal.bdev.product_name); 666 if (first_claimed == true) { 667 base->claimed = false; 668 } 669 } 670 671 return rc; 672 } 673 674 int 675 spdk_bdev_part_construct(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base, 676 char *name, uint64_t offset_blocks, uint64_t num_blocks, 677 char *product_name) 678 { 679 return spdk_bdev_part_construct_ext(part, base, name, offset_blocks, num_blocks, 680 product_name, NULL); 681 } 682