1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (C) 2018 Intel Corporation. 3 * Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES. 4 * All rights reserved. 5 */ 6 7 /* 8 * Common code for partition-like virtual bdevs. 9 */ 10 11 #include "spdk/bdev.h" 12 #include "spdk/likely.h" 13 #include "spdk/log.h" 14 #include "spdk/string.h" 15 #include "spdk/thread.h" 16 17 #include "spdk/bdev_module.h" 18 19 /* This namespace UUID was generated using uuid_generate() method. */ 20 #define BDEV_PART_NAMESPACE_UUID "976b899e-3e1e-4d71-ab69-c2b08e9df8b8" 21 22 struct spdk_bdev_part_base { 23 struct spdk_bdev *bdev; 24 struct spdk_bdev_desc *desc; 25 uint32_t ref; 26 uint32_t channel_size; 27 spdk_bdev_part_base_free_fn base_free_fn; 28 void *ctx; 29 bool claimed; 30 struct spdk_bdev_module *module; 31 struct spdk_bdev_fn_table *fn_table; 32 struct bdev_part_tailq *tailq; 33 spdk_io_channel_create_cb ch_create_cb; 34 spdk_io_channel_destroy_cb ch_destroy_cb; 35 spdk_bdev_remove_cb_t remove_cb; 36 struct spdk_thread *thread; 37 }; 38 39 struct spdk_bdev * 40 spdk_bdev_part_base_get_bdev(struct spdk_bdev_part_base *part_base) 41 { 42 return part_base->bdev; 43 } 44 45 struct spdk_bdev_desc * 46 spdk_bdev_part_base_get_desc(struct spdk_bdev_part_base *part_base) 47 { 48 return part_base->desc; 49 } 50 51 struct bdev_part_tailq * 52 spdk_bdev_part_base_get_tailq(struct spdk_bdev_part_base *part_base) 53 { 54 return part_base->tailq; 55 } 56 57 void * 58 spdk_bdev_part_base_get_ctx(struct spdk_bdev_part_base *part_base) 59 { 60 return part_base->ctx; 61 } 62 63 const char * 64 spdk_bdev_part_base_get_bdev_name(struct spdk_bdev_part_base *part_base) 65 { 66 return part_base->bdev->name; 67 } 68 69 static void 70 bdev_part_base_free(void *ctx) 71 { 72 struct spdk_bdev_desc *desc = ctx; 73 74 spdk_bdev_close(desc); 75 } 76 77 void 78 spdk_bdev_part_base_free(struct spdk_bdev_part_base *base) 79 { 80 if (base->desc) { 81 /* Close the underlying bdev on its same opened thread. */ 82 if (base->thread && base->thread != spdk_get_thread()) { 83 spdk_thread_send_msg(base->thread, bdev_part_base_free, base->desc); 84 } else { 85 spdk_bdev_close(base->desc); 86 } 87 } 88 89 if (base->base_free_fn != NULL) { 90 base->base_free_fn(base->ctx); 91 } 92 93 free(base); 94 } 95 96 static void 97 bdev_part_free_cb(void *io_device) 98 { 99 struct spdk_bdev_part *part = io_device; 100 struct spdk_bdev_part_base *base; 101 102 assert(part); 103 assert(part->internal.base); 104 105 base = part->internal.base; 106 107 TAILQ_REMOVE(base->tailq, part, tailq); 108 109 if (--base->ref == 0) { 110 spdk_bdev_module_release_bdev(base->bdev); 111 spdk_bdev_part_base_free(base); 112 } 113 114 spdk_bdev_destruct_done(&part->internal.bdev, 0); 115 free(part->internal.bdev.name); 116 free(part->internal.bdev.product_name); 117 free(part); 118 } 119 120 int 121 spdk_bdev_part_free(struct spdk_bdev_part *part) 122 { 123 spdk_io_device_unregister(part, bdev_part_free_cb); 124 125 /* Return 1 to indicate that this is an asynchronous operation that isn't complete 126 * until spdk_bdev_destruct_done is called */ 127 return 1; 128 } 129 130 void 131 spdk_bdev_part_base_hotremove(struct spdk_bdev_part_base *part_base, struct bdev_part_tailq *tailq) 132 { 133 struct spdk_bdev_part *part, *tmp; 134 135 TAILQ_FOREACH_SAFE(part, tailq, tailq, tmp) { 136 if (part->internal.base == part_base) { 137 spdk_bdev_unregister(&part->internal.bdev, NULL, NULL); 138 } 139 } 140 } 141 142 static bool 143 bdev_part_io_type_supported(void *_part, enum spdk_bdev_io_type io_type) 144 { 145 struct spdk_bdev_part *part = _part; 146 147 /* We can't decode/modify passthrough NVMe commands, so don't report 148 * that a partition supports these io types, even if the underlying 149 * bdev does. 150 */ 151 switch (io_type) { 152 case SPDK_BDEV_IO_TYPE_NVME_ADMIN: 153 case SPDK_BDEV_IO_TYPE_NVME_IO: 154 case SPDK_BDEV_IO_TYPE_NVME_IO_MD: 155 return false; 156 default: 157 break; 158 } 159 160 return part->internal.base->bdev->fn_table->io_type_supported(part->internal.base->bdev->ctxt, 161 io_type); 162 } 163 164 static struct spdk_io_channel * 165 bdev_part_get_io_channel(void *_part) 166 { 167 struct spdk_bdev_part *part = _part; 168 169 return spdk_get_io_channel(part); 170 } 171 172 struct spdk_bdev * 173 spdk_bdev_part_get_bdev(struct spdk_bdev_part *part) 174 { 175 return &part->internal.bdev; 176 } 177 178 struct spdk_bdev_part_base * 179 spdk_bdev_part_get_base(struct spdk_bdev_part *part) 180 { 181 return part->internal.base; 182 } 183 184 struct spdk_bdev * 185 spdk_bdev_part_get_base_bdev(struct spdk_bdev_part *part) 186 { 187 return part->internal.base->bdev; 188 } 189 190 uint64_t 191 spdk_bdev_part_get_offset_blocks(struct spdk_bdev_part *part) 192 { 193 return part->internal.offset_blocks; 194 } 195 196 static int 197 bdev_part_remap_dif(struct spdk_bdev_io *bdev_io, uint32_t offset, 198 uint32_t remapped_offset) 199 { 200 struct spdk_bdev *bdev = bdev_io->bdev; 201 struct spdk_dif_ctx dif_ctx; 202 struct spdk_dif_error err_blk = {}; 203 int rc; 204 struct spdk_dif_ctx_init_ext_opts dif_opts; 205 206 if (spdk_likely(!(bdev->dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK))) { 207 return 0; 208 } 209 210 dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format); 211 dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16; 212 rc = spdk_dif_ctx_init(&dif_ctx, 213 bdev->blocklen, bdev->md_len, bdev->md_interleave, 214 bdev->dif_is_head_of_md, bdev->dif_type, bdev->dif_check_flags, 215 offset, 0, 0, 0, 0, &dif_opts); 216 if (rc != 0) { 217 SPDK_ERRLOG("Initialization of DIF context failed\n"); 218 return rc; 219 } 220 221 spdk_dif_ctx_set_remapped_init_ref_tag(&dif_ctx, remapped_offset); 222 223 if (bdev->md_interleave) { 224 rc = spdk_dif_remap_ref_tag(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt, 225 bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk, true); 226 } else { 227 struct iovec md_iov = { 228 .iov_base = bdev_io->u.bdev.md_buf, 229 .iov_len = bdev_io->u.bdev.num_blocks * bdev->md_len, 230 }; 231 232 rc = spdk_dix_remap_ref_tag(&md_iov, bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk, true); 233 } 234 235 if (rc != 0) { 236 SPDK_ERRLOG("Remapping reference tag failed. type=%d, offset=%" PRIu32 "\n", 237 err_blk.err_type, err_blk.err_offset); 238 } 239 240 return rc; 241 } 242 243 static void 244 bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg) 245 { 246 struct spdk_bdev_io *part_io = cb_arg; 247 uint32_t offset, remapped_offset; 248 int rc; 249 250 switch (bdev_io->type) { 251 case SPDK_BDEV_IO_TYPE_READ: 252 if (success) { 253 offset = bdev_io->u.bdev.offset_blocks; 254 remapped_offset = part_io->u.bdev.offset_blocks; 255 256 rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset); 257 if (rc != 0) { 258 success = false; 259 } 260 } 261 break; 262 case SPDK_BDEV_IO_TYPE_ZCOPY: 263 spdk_bdev_io_set_buf(part_io, bdev_io->u.bdev.iovs[0].iov_base, 264 bdev_io->u.bdev.iovs[0].iov_len); 265 break; 266 default: 267 break; 268 } 269 270 if (part_io->internal.f.split) { 271 part_io->internal.split.stored_user_cb(part_io, success, NULL); 272 } else { 273 spdk_bdev_io_complete_base_io_status(part_io, bdev_io); 274 } 275 276 spdk_bdev_free_io(bdev_io); 277 } 278 279 static inline void 280 bdev_part_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts) 281 { 282 memset(opts, 0, sizeof(*opts)); 283 opts->size = sizeof(*opts); 284 opts->memory_domain = bdev_io->u.bdev.memory_domain; 285 opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx; 286 opts->metadata = bdev_io->u.bdev.md_buf; 287 } 288 289 int 290 spdk_bdev_part_submit_request_ext(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io, 291 spdk_bdev_io_completion_cb cb) 292 { 293 struct spdk_bdev_part *part = ch->part; 294 struct spdk_io_channel *base_ch = ch->base_ch; 295 struct spdk_bdev_desc *base_desc = part->internal.base->desc; 296 struct spdk_bdev_ext_io_opts io_opts; 297 uint64_t offset, remapped_offset, remapped_src_offset; 298 int rc = 0; 299 300 if (cb != NULL) { 301 bdev_io->internal.f.split = true; 302 bdev_io->internal.split.stored_user_cb = cb; 303 } 304 305 offset = bdev_io->u.bdev.offset_blocks; 306 remapped_offset = offset + part->internal.offset_blocks; 307 308 /* Modify the I/O to adjust for the offset within the base bdev. */ 309 switch (bdev_io->type) { 310 case SPDK_BDEV_IO_TYPE_READ: 311 bdev_part_init_ext_io_opts(bdev_io, &io_opts); 312 rc = spdk_bdev_readv_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs, 313 bdev_io->u.bdev.iovcnt, remapped_offset, 314 bdev_io->u.bdev.num_blocks, 315 bdev_part_complete_io, bdev_io, &io_opts); 316 break; 317 case SPDK_BDEV_IO_TYPE_WRITE: 318 rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset); 319 if (rc != 0) { 320 return SPDK_BDEV_IO_STATUS_FAILED; 321 } 322 bdev_part_init_ext_io_opts(bdev_io, &io_opts); 323 rc = spdk_bdev_writev_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs, 324 bdev_io->u.bdev.iovcnt, remapped_offset, 325 bdev_io->u.bdev.num_blocks, 326 bdev_part_complete_io, bdev_io, &io_opts); 327 break; 328 case SPDK_BDEV_IO_TYPE_WRITE_ZEROES: 329 rc = spdk_bdev_write_zeroes_blocks(base_desc, base_ch, remapped_offset, 330 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 331 bdev_io); 332 break; 333 case SPDK_BDEV_IO_TYPE_UNMAP: 334 rc = spdk_bdev_unmap_blocks(base_desc, base_ch, remapped_offset, 335 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 336 bdev_io); 337 break; 338 case SPDK_BDEV_IO_TYPE_FLUSH: 339 rc = spdk_bdev_flush_blocks(base_desc, base_ch, remapped_offset, 340 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 341 bdev_io); 342 break; 343 case SPDK_BDEV_IO_TYPE_RESET: 344 rc = spdk_bdev_reset(base_desc, base_ch, 345 bdev_part_complete_io, bdev_io); 346 break; 347 case SPDK_BDEV_IO_TYPE_ABORT: 348 rc = spdk_bdev_abort(base_desc, base_ch, bdev_io->u.abort.bio_to_abort, 349 bdev_part_complete_io, bdev_io); 350 break; 351 case SPDK_BDEV_IO_TYPE_ZCOPY: 352 rc = spdk_bdev_zcopy_start(base_desc, base_ch, NULL, 0, remapped_offset, 353 bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate, 354 bdev_part_complete_io, bdev_io); 355 break; 356 case SPDK_BDEV_IO_TYPE_COMPARE: 357 if (!bdev_io->u.bdev.md_buf) { 358 rc = spdk_bdev_comparev_blocks(base_desc, base_ch, 359 bdev_io->u.bdev.iovs, 360 bdev_io->u.bdev.iovcnt, 361 remapped_offset, 362 bdev_io->u.bdev.num_blocks, 363 bdev_part_complete_io, bdev_io); 364 } else { 365 rc = spdk_bdev_comparev_blocks_with_md(base_desc, base_ch, 366 bdev_io->u.bdev.iovs, 367 bdev_io->u.bdev.iovcnt, 368 bdev_io->u.bdev.md_buf, 369 remapped_offset, 370 bdev_io->u.bdev.num_blocks, 371 bdev_part_complete_io, bdev_io); 372 } 373 break; 374 case SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE: 375 rc = spdk_bdev_comparev_and_writev_blocks(base_desc, base_ch, bdev_io->u.bdev.iovs, 376 bdev_io->u.bdev.iovcnt, 377 bdev_io->u.bdev.fused_iovs, 378 bdev_io->u.bdev.fused_iovcnt, 379 remapped_offset, 380 bdev_io->u.bdev.num_blocks, 381 bdev_part_complete_io, bdev_io); 382 break; 383 case SPDK_BDEV_IO_TYPE_COPY: 384 remapped_src_offset = bdev_io->u.bdev.copy.src_offset_blocks + part->internal.offset_blocks; 385 rc = spdk_bdev_copy_blocks(base_desc, base_ch, remapped_offset, remapped_src_offset, 386 bdev_io->u.bdev.num_blocks, bdev_part_complete_io, 387 bdev_io); 388 break; 389 default: 390 SPDK_ERRLOG("unknown I/O type %d\n", bdev_io->type); 391 return SPDK_BDEV_IO_STATUS_FAILED; 392 } 393 394 return rc; 395 } 396 397 int 398 spdk_bdev_part_submit_request(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io) 399 { 400 return spdk_bdev_part_submit_request_ext(ch, bdev_io, NULL); 401 } 402 403 static int 404 bdev_part_channel_create_cb(void *io_device, void *ctx_buf) 405 { 406 struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device; 407 struct spdk_bdev_part_channel *ch = ctx_buf; 408 409 ch->part = part; 410 ch->base_ch = spdk_bdev_get_io_channel(part->internal.base->desc); 411 if (ch->base_ch == NULL) { 412 return -1; 413 } 414 415 if (part->internal.base->ch_create_cb) { 416 return part->internal.base->ch_create_cb(io_device, ctx_buf); 417 } else { 418 return 0; 419 } 420 } 421 422 static void 423 bdev_part_channel_destroy_cb(void *io_device, void *ctx_buf) 424 { 425 struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device; 426 struct spdk_bdev_part_channel *ch = ctx_buf; 427 428 if (part->internal.base->ch_destroy_cb) { 429 part->internal.base->ch_destroy_cb(io_device, ctx_buf); 430 } 431 spdk_put_io_channel(ch->base_ch); 432 } 433 434 static void 435 bdev_part_base_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev, 436 void *event_ctx) 437 { 438 struct spdk_bdev_part_base *base = event_ctx; 439 440 switch (type) { 441 case SPDK_BDEV_EVENT_REMOVE: 442 base->remove_cb(base); 443 break; 444 default: 445 SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type); 446 break; 447 } 448 } 449 450 int 451 spdk_bdev_part_base_construct_ext(const char *bdev_name, 452 spdk_bdev_remove_cb_t remove_cb, struct spdk_bdev_module *module, 453 struct spdk_bdev_fn_table *fn_table, struct bdev_part_tailq *tailq, 454 spdk_bdev_part_base_free_fn free_fn, void *ctx, 455 uint32_t channel_size, spdk_io_channel_create_cb ch_create_cb, 456 spdk_io_channel_destroy_cb ch_destroy_cb, 457 struct spdk_bdev_part_base **_base) 458 { 459 int rc; 460 struct spdk_bdev_part_base *base; 461 462 if (_base == NULL) { 463 return -EINVAL; 464 } 465 466 base = calloc(1, sizeof(*base)); 467 if (!base) { 468 SPDK_ERRLOG("Memory allocation failure\n"); 469 return -ENOMEM; 470 } 471 fn_table->get_io_channel = bdev_part_get_io_channel; 472 fn_table->io_type_supported = bdev_part_io_type_supported; 473 474 base->desc = NULL; 475 base->ref = 0; 476 base->module = module; 477 base->fn_table = fn_table; 478 base->tailq = tailq; 479 base->base_free_fn = free_fn; 480 base->ctx = ctx; 481 base->claimed = false; 482 base->channel_size = channel_size; 483 base->ch_create_cb = ch_create_cb; 484 base->ch_destroy_cb = ch_destroy_cb; 485 base->remove_cb = remove_cb; 486 487 rc = spdk_bdev_open_ext(bdev_name, false, bdev_part_base_event_cb, base, &base->desc); 488 if (rc) { 489 if (rc == -ENODEV) { 490 free(base); 491 } else { 492 SPDK_ERRLOG("could not open bdev %s: %s\n", bdev_name, spdk_strerror(-rc)); 493 spdk_bdev_part_base_free(base); 494 } 495 return rc; 496 } 497 498 base->bdev = spdk_bdev_desc_get_bdev(base->desc); 499 500 /* Save the thread where the base device is opened */ 501 base->thread = spdk_get_thread(); 502 503 *_base = base; 504 505 return 0; 506 } 507 508 void 509 spdk_bdev_part_construct_opts_init(struct spdk_bdev_part_construct_opts *opts, uint64_t size) 510 { 511 if (opts == NULL) { 512 SPDK_ERRLOG("opts should not be NULL\n"); 513 assert(opts != NULL); 514 return; 515 } 516 if (size == 0) { 517 SPDK_ERRLOG("size should not be zero\n"); 518 assert(size != 0); 519 return; 520 } 521 522 memset(opts, 0, size); 523 opts->opts_size = size; 524 } 525 526 static void 527 part_construct_opts_copy(const struct spdk_bdev_part_construct_opts *src, 528 struct spdk_bdev_part_construct_opts *dst) 529 { 530 if (src->opts_size == 0) { 531 SPDK_ERRLOG("size should not be zero\n"); 532 assert(false); 533 } 534 535 memset(dst, 0, sizeof(*dst)); 536 dst->opts_size = src->opts_size; 537 538 #define FIELD_OK(field) \ 539 offsetof(struct spdk_bdev_part_construct_opts, field) + sizeof(src->field) <= src->opts_size 540 541 #define SET_FIELD(field) \ 542 if (FIELD_OK(field)) { \ 543 dst->field = src->field; \ 544 } \ 545 546 SET_FIELD(uuid); 547 548 /* You should not remove this statement, but need to update the assert statement 549 * if you add a new field, and also add a corresponding SET_FIELD statement */ 550 SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_part_construct_opts) == 24, "Incorrect size"); 551 552 #undef FIELD_OK 553 #undef SET_FIELD 554 } 555 556 int 557 spdk_bdev_part_construct_ext(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base, 558 char *name, uint64_t offset_blocks, uint64_t num_blocks, 559 char *product_name, const struct spdk_bdev_part_construct_opts *_opts) 560 { 561 int rc; 562 bool first_claimed = false; 563 struct spdk_bdev_part_construct_opts opts; 564 struct spdk_uuid ns_uuid; 565 566 if (_opts == NULL) { 567 spdk_bdev_part_construct_opts_init(&opts, sizeof(opts)); 568 } else { 569 part_construct_opts_copy(_opts, &opts); 570 } 571 572 part->internal.bdev.blocklen = base->bdev->blocklen; 573 part->internal.bdev.blockcnt = num_blocks; 574 part->internal.offset_blocks = offset_blocks; 575 576 part->internal.bdev.write_cache = base->bdev->write_cache; 577 part->internal.bdev.required_alignment = base->bdev->required_alignment; 578 part->internal.bdev.ctxt = part; 579 part->internal.bdev.module = base->module; 580 part->internal.bdev.fn_table = base->fn_table; 581 582 part->internal.bdev.md_interleave = base->bdev->md_interleave; 583 part->internal.bdev.md_len = base->bdev->md_len; 584 part->internal.bdev.dif_type = base->bdev->dif_type; 585 part->internal.bdev.dif_is_head_of_md = base->bdev->dif_is_head_of_md; 586 part->internal.bdev.dif_check_flags = base->bdev->dif_check_flags; 587 588 part->internal.bdev.name = strdup(name); 589 if (part->internal.bdev.name == NULL) { 590 SPDK_ERRLOG("Failed to allocate name for new part of bdev %s\n", spdk_bdev_get_name(base->bdev)); 591 return -1; 592 } 593 594 part->internal.bdev.product_name = strdup(product_name); 595 if (part->internal.bdev.product_name == NULL) { 596 free(part->internal.bdev.name); 597 SPDK_ERRLOG("Failed to allocate product name for new part of bdev %s\n", 598 spdk_bdev_get_name(base->bdev)); 599 return -1; 600 } 601 602 /* The caller may have already specified a UUID. If not, we'll generate one 603 * based on the namespace UUID, the base bdev's UUID and the block range of the 604 * partition. 605 */ 606 if (!spdk_uuid_is_null(&opts.uuid)) { 607 spdk_uuid_copy(&part->internal.bdev.uuid, &opts.uuid); 608 } else { 609 struct { 610 struct spdk_uuid uuid; 611 uint64_t offset_blocks; 612 uint64_t num_blocks; 613 } base_name; 614 615 /* We need to create a unique base name for this partition. We can't just use 616 * the base bdev's UUID, since it may be used for multiple partitions. So 617 * construct a binary name consisting of the uuid + the block range for this 618 * partition. 619 */ 620 spdk_uuid_copy(&base_name.uuid, &base->bdev->uuid); 621 base_name.offset_blocks = offset_blocks; 622 base_name.num_blocks = num_blocks; 623 624 spdk_uuid_parse(&ns_uuid, BDEV_PART_NAMESPACE_UUID); 625 rc = spdk_uuid_generate_sha1(&part->internal.bdev.uuid, &ns_uuid, 626 (const char *)&base_name, sizeof(base_name)); 627 if (rc) { 628 SPDK_ERRLOG("Could not generate new UUID\n"); 629 free(part->internal.bdev.name); 630 free(part->internal.bdev.product_name); 631 return -1; 632 } 633 } 634 635 base->ref++; 636 part->internal.base = base; 637 638 if (!base->claimed) { 639 int rc; 640 641 rc = spdk_bdev_module_claim_bdev(base->bdev, base->desc, base->module); 642 if (rc) { 643 SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(base->bdev)); 644 free(part->internal.bdev.name); 645 free(part->internal.bdev.product_name); 646 base->ref--; 647 return -1; 648 } 649 base->claimed = true; 650 first_claimed = true; 651 } 652 653 spdk_io_device_register(part, bdev_part_channel_create_cb, 654 bdev_part_channel_destroy_cb, 655 base->channel_size, 656 name); 657 658 rc = spdk_bdev_register(&part->internal.bdev); 659 if (rc == 0) { 660 TAILQ_INSERT_TAIL(base->tailq, part, tailq); 661 } else { 662 spdk_io_device_unregister(part, NULL); 663 if (--base->ref == 0) { 664 spdk_bdev_module_release_bdev(base->bdev); 665 } 666 free(part->internal.bdev.name); 667 free(part->internal.bdev.product_name); 668 if (first_claimed == true) { 669 base->claimed = false; 670 } 671 } 672 673 return rc; 674 } 675 676 int 677 spdk_bdev_part_construct(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base, 678 char *name, uint64_t offset_blocks, uint64_t num_blocks, 679 char *product_name) 680 { 681 return spdk_bdev_part_construct_ext(part, base, name, offset_blocks, num_blocks, 682 product_name, NULL); 683 } 684