xref: /spdk/lib/bdev/part.c (revision 66289a6dbe28217365daa40fd92dcf327871c2e8)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2018 Intel Corporation.
3  *   Copyright (c) 2022 NVIDIA CORPORATION & AFFILIATES.
4  *   All rights reserved.
5  */
6 
7 /*
8  * Common code for partition-like virtual bdevs.
9  */
10 
11 #include "spdk/bdev.h"
12 #include "spdk/likely.h"
13 #include "spdk/log.h"
14 #include "spdk/string.h"
15 #include "spdk/thread.h"
16 
17 #include "spdk/bdev_module.h"
18 
19 /* This namespace UUID was generated using uuid_generate() method. */
20 #define BDEV_PART_NAMESPACE_UUID "976b899e-3e1e-4d71-ab69-c2b08e9df8b8"
21 
22 struct spdk_bdev_part_base {
23 	struct spdk_bdev		*bdev;
24 	struct spdk_bdev_desc		*desc;
25 	uint32_t			ref;
26 	uint32_t			channel_size;
27 	spdk_bdev_part_base_free_fn	base_free_fn;
28 	void				*ctx;
29 	bool				claimed;
30 	struct spdk_bdev_module		*module;
31 	struct spdk_bdev_fn_table	*fn_table;
32 	struct bdev_part_tailq		*tailq;
33 	spdk_io_channel_create_cb	ch_create_cb;
34 	spdk_io_channel_destroy_cb	ch_destroy_cb;
35 	spdk_bdev_remove_cb_t		remove_cb;
36 	struct spdk_thread		*thread;
37 };
38 
39 struct spdk_bdev *
40 spdk_bdev_part_base_get_bdev(struct spdk_bdev_part_base *part_base)
41 {
42 	return part_base->bdev;
43 }
44 
45 struct spdk_bdev_desc *
46 spdk_bdev_part_base_get_desc(struct spdk_bdev_part_base *part_base)
47 {
48 	return part_base->desc;
49 }
50 
51 struct bdev_part_tailq *
52 spdk_bdev_part_base_get_tailq(struct spdk_bdev_part_base *part_base)
53 {
54 	return part_base->tailq;
55 }
56 
57 void *
58 spdk_bdev_part_base_get_ctx(struct spdk_bdev_part_base *part_base)
59 {
60 	return part_base->ctx;
61 }
62 
63 const char *
64 spdk_bdev_part_base_get_bdev_name(struct spdk_bdev_part_base *part_base)
65 {
66 	return part_base->bdev->name;
67 }
68 
69 static void
70 bdev_part_base_free(void *ctx)
71 {
72 	struct spdk_bdev_desc *desc = ctx;
73 
74 	spdk_bdev_close(desc);
75 }
76 
77 void
78 spdk_bdev_part_base_free(struct spdk_bdev_part_base *base)
79 {
80 	if (base->desc) {
81 		/* Close the underlying bdev on its same opened thread. */
82 		if (base->thread && base->thread != spdk_get_thread()) {
83 			spdk_thread_send_msg(base->thread, bdev_part_base_free, base->desc);
84 		} else {
85 			spdk_bdev_close(base->desc);
86 		}
87 	}
88 
89 	if (base->base_free_fn != NULL) {
90 		base->base_free_fn(base->ctx);
91 	}
92 
93 	free(base);
94 }
95 
96 static void
97 bdev_part_free_cb(void *io_device)
98 {
99 	struct spdk_bdev_part *part = io_device;
100 	struct spdk_bdev_part_base *base;
101 
102 	assert(part);
103 	assert(part->internal.base);
104 
105 	base = part->internal.base;
106 
107 	TAILQ_REMOVE(base->tailq, part, tailq);
108 
109 	if (--base->ref == 0) {
110 		spdk_bdev_module_release_bdev(base->bdev);
111 		spdk_bdev_part_base_free(base);
112 	}
113 
114 	spdk_bdev_destruct_done(&part->internal.bdev, 0);
115 	free(part->internal.bdev.name);
116 	free(part->internal.bdev.product_name);
117 	free(part);
118 }
119 
120 int
121 spdk_bdev_part_free(struct spdk_bdev_part *part)
122 {
123 	spdk_io_device_unregister(part, bdev_part_free_cb);
124 
125 	/* Return 1 to indicate that this is an asynchronous operation that isn't complete
126 	 * until spdk_bdev_destruct_done is called */
127 	return 1;
128 }
129 
130 void
131 spdk_bdev_part_base_hotremove(struct spdk_bdev_part_base *part_base, struct bdev_part_tailq *tailq)
132 {
133 	struct spdk_bdev_part *part, *tmp;
134 
135 	TAILQ_FOREACH_SAFE(part, tailq, tailq, tmp) {
136 		if (part->internal.base == part_base) {
137 			spdk_bdev_unregister(&part->internal.bdev, NULL, NULL);
138 		}
139 	}
140 }
141 
142 static bool
143 bdev_part_io_type_supported(void *_part, enum spdk_bdev_io_type io_type)
144 {
145 	struct spdk_bdev_part *part = _part;
146 
147 	/* We can't decode/modify passthrough NVMe commands, so don't report
148 	 *  that a partition supports these io types, even if the underlying
149 	 *  bdev does.
150 	 */
151 	switch (io_type) {
152 	case SPDK_BDEV_IO_TYPE_NVME_ADMIN:
153 	case SPDK_BDEV_IO_TYPE_NVME_IO:
154 	case SPDK_BDEV_IO_TYPE_NVME_IO_MD:
155 		return false;
156 	default:
157 		break;
158 	}
159 
160 	return part->internal.base->bdev->fn_table->io_type_supported(part->internal.base->bdev->ctxt,
161 			io_type);
162 }
163 
164 static struct spdk_io_channel *
165 bdev_part_get_io_channel(void *_part)
166 {
167 	struct spdk_bdev_part *part = _part;
168 
169 	return spdk_get_io_channel(part);
170 }
171 
172 struct spdk_bdev *
173 spdk_bdev_part_get_bdev(struct spdk_bdev_part *part)
174 {
175 	return &part->internal.bdev;
176 }
177 
178 struct spdk_bdev_part_base *
179 spdk_bdev_part_get_base(struct spdk_bdev_part *part)
180 {
181 	return part->internal.base;
182 }
183 
184 struct spdk_bdev *
185 spdk_bdev_part_get_base_bdev(struct spdk_bdev_part *part)
186 {
187 	return part->internal.base->bdev;
188 }
189 
190 uint64_t
191 spdk_bdev_part_get_offset_blocks(struct spdk_bdev_part *part)
192 {
193 	return part->internal.offset_blocks;
194 }
195 
196 static int
197 bdev_part_remap_dif(struct spdk_bdev_io *bdev_io, uint32_t offset,
198 		    uint32_t remapped_offset)
199 {
200 	struct spdk_bdev *bdev = bdev_io->bdev;
201 	struct spdk_dif_ctx dif_ctx;
202 	struct spdk_dif_error err_blk = {};
203 	int rc;
204 	struct spdk_dif_ctx_init_ext_opts dif_opts;
205 
206 	if (spdk_likely(!(bdev_io->u.bdev.dif_check_flags & SPDK_DIF_FLAGS_REFTAG_CHECK))) {
207 		return 0;
208 	}
209 
210 	dif_opts.size = SPDK_SIZEOF(&dif_opts, dif_pi_format);
211 	dif_opts.dif_pi_format = SPDK_DIF_PI_FORMAT_16;
212 	rc = spdk_dif_ctx_init(&dif_ctx,
213 			       bdev->blocklen, bdev->md_len, bdev->md_interleave,
214 			       bdev->dif_is_head_of_md, bdev->dif_type, bdev_io->u.bdev.dif_check_flags,
215 			       offset, 0, 0, 0, 0, &dif_opts);
216 	if (rc != 0) {
217 		SPDK_ERRLOG("Initialization of DIF context failed\n");
218 		return rc;
219 	}
220 
221 	spdk_dif_ctx_set_remapped_init_ref_tag(&dif_ctx, remapped_offset);
222 
223 	if (bdev->md_interleave) {
224 		rc = spdk_dif_remap_ref_tag(bdev_io->u.bdev.iovs, bdev_io->u.bdev.iovcnt,
225 					    bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk, true);
226 	} else {
227 		struct iovec md_iov = {
228 			.iov_base	= bdev_io->u.bdev.md_buf,
229 			.iov_len	= bdev_io->u.bdev.num_blocks * bdev->md_len,
230 		};
231 
232 		rc = spdk_dix_remap_ref_tag(&md_iov, bdev_io->u.bdev.num_blocks, &dif_ctx, &err_blk, true);
233 	}
234 
235 	if (rc != 0) {
236 		SPDK_ERRLOG("Remapping reference tag failed. type=%d, offset=%" PRIu32 "\n",
237 			    err_blk.err_type, err_blk.err_offset);
238 	}
239 
240 	return rc;
241 }
242 
243 static void
244 bdev_part_complete_io(struct spdk_bdev_io *bdev_io, bool success, void *cb_arg)
245 {
246 	struct spdk_bdev_io *part_io = cb_arg;
247 	uint32_t offset, remapped_offset;
248 	int rc;
249 
250 	switch (bdev_io->type) {
251 	case SPDK_BDEV_IO_TYPE_READ:
252 		if (success) {
253 			offset = bdev_io->u.bdev.offset_blocks;
254 			remapped_offset = part_io->u.bdev.offset_blocks;
255 
256 			rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset);
257 			if (rc != 0) {
258 				success = false;
259 			}
260 		}
261 		break;
262 	case SPDK_BDEV_IO_TYPE_ZCOPY:
263 		spdk_bdev_io_set_buf(part_io, bdev_io->u.bdev.iovs[0].iov_base,
264 				     bdev_io->u.bdev.iovs[0].iov_len);
265 		break;
266 	default:
267 		break;
268 	}
269 
270 	if (part_io->internal.f.split) {
271 		part_io->internal.split.stored_user_cb(part_io, success, NULL);
272 	} else {
273 		spdk_bdev_io_complete_base_io_status(part_io, bdev_io);
274 	}
275 
276 	spdk_bdev_free_io(bdev_io);
277 }
278 
279 static inline void
280 bdev_part_init_ext_io_opts(struct spdk_bdev_io *bdev_io, struct spdk_bdev_ext_io_opts *opts)
281 {
282 	memset(opts, 0, sizeof(*opts));
283 	opts->size = sizeof(*opts);
284 	opts->memory_domain = bdev_io->u.bdev.memory_domain;
285 	opts->memory_domain_ctx = bdev_io->u.bdev.memory_domain_ctx;
286 	opts->metadata = bdev_io->u.bdev.md_buf;
287 	opts->dif_check_flags_exclude_mask = ~bdev_io->u.bdev.dif_check_flags;
288 }
289 
290 int
291 spdk_bdev_part_submit_request_ext(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io,
292 				  spdk_bdev_io_completion_cb cb)
293 {
294 	struct spdk_bdev_part *part = ch->part;
295 	struct spdk_io_channel *base_ch = ch->base_ch;
296 	struct spdk_bdev_desc *base_desc = part->internal.base->desc;
297 	struct spdk_bdev_ext_io_opts io_opts;
298 	uint64_t offset, remapped_offset, remapped_src_offset;
299 	int rc = 0;
300 
301 	if (cb != NULL) {
302 		bdev_io->internal.f.split = true;
303 		bdev_io->internal.split.stored_user_cb = cb;
304 	}
305 
306 	offset = bdev_io->u.bdev.offset_blocks;
307 	remapped_offset = offset + part->internal.offset_blocks;
308 
309 	/* Modify the I/O to adjust for the offset within the base bdev. */
310 	switch (bdev_io->type) {
311 	case SPDK_BDEV_IO_TYPE_READ:
312 		bdev_part_init_ext_io_opts(bdev_io, &io_opts);
313 		rc = spdk_bdev_readv_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs,
314 						bdev_io->u.bdev.iovcnt, remapped_offset,
315 						bdev_io->u.bdev.num_blocks,
316 						bdev_part_complete_io, bdev_io, &io_opts);
317 		break;
318 	case SPDK_BDEV_IO_TYPE_WRITE:
319 		rc = bdev_part_remap_dif(bdev_io, offset, remapped_offset);
320 		if (rc != 0) {
321 			return SPDK_BDEV_IO_STATUS_FAILED;
322 		}
323 		bdev_part_init_ext_io_opts(bdev_io, &io_opts);
324 		rc = spdk_bdev_writev_blocks_ext(base_desc, base_ch, bdev_io->u.bdev.iovs,
325 						 bdev_io->u.bdev.iovcnt, remapped_offset,
326 						 bdev_io->u.bdev.num_blocks,
327 						 bdev_part_complete_io, bdev_io, &io_opts);
328 		break;
329 	case SPDK_BDEV_IO_TYPE_WRITE_ZEROES:
330 		rc = spdk_bdev_write_zeroes_blocks(base_desc, base_ch, remapped_offset,
331 						   bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
332 						   bdev_io);
333 		break;
334 	case SPDK_BDEV_IO_TYPE_UNMAP:
335 		rc = spdk_bdev_unmap_blocks(base_desc, base_ch, remapped_offset,
336 					    bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
337 					    bdev_io);
338 		break;
339 	case SPDK_BDEV_IO_TYPE_FLUSH:
340 		rc = spdk_bdev_flush_blocks(base_desc, base_ch, remapped_offset,
341 					    bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
342 					    bdev_io);
343 		break;
344 	case SPDK_BDEV_IO_TYPE_RESET:
345 		rc = spdk_bdev_reset(base_desc, base_ch,
346 				     bdev_part_complete_io, bdev_io);
347 		break;
348 	case SPDK_BDEV_IO_TYPE_ABORT:
349 		rc = spdk_bdev_abort(base_desc, base_ch, bdev_io->u.abort.bio_to_abort,
350 				     bdev_part_complete_io, bdev_io);
351 		break;
352 	case SPDK_BDEV_IO_TYPE_ZCOPY:
353 		rc = spdk_bdev_zcopy_start(base_desc, base_ch, NULL, 0, remapped_offset,
354 					   bdev_io->u.bdev.num_blocks, bdev_io->u.bdev.zcopy.populate,
355 					   bdev_part_complete_io, bdev_io);
356 		break;
357 	case SPDK_BDEV_IO_TYPE_COMPARE:
358 		if (!bdev_io->u.bdev.md_buf) {
359 			rc = spdk_bdev_comparev_blocks(base_desc, base_ch,
360 						       bdev_io->u.bdev.iovs,
361 						       bdev_io->u.bdev.iovcnt,
362 						       remapped_offset,
363 						       bdev_io->u.bdev.num_blocks,
364 						       bdev_part_complete_io, bdev_io);
365 		} else {
366 			rc = spdk_bdev_comparev_blocks_with_md(base_desc, base_ch,
367 							       bdev_io->u.bdev.iovs,
368 							       bdev_io->u.bdev.iovcnt,
369 							       bdev_io->u.bdev.md_buf,
370 							       remapped_offset,
371 							       bdev_io->u.bdev.num_blocks,
372 							       bdev_part_complete_io, bdev_io);
373 		}
374 		break;
375 	case SPDK_BDEV_IO_TYPE_COMPARE_AND_WRITE:
376 		rc = spdk_bdev_comparev_and_writev_blocks(base_desc, base_ch, bdev_io->u.bdev.iovs,
377 				bdev_io->u.bdev.iovcnt,
378 				bdev_io->u.bdev.fused_iovs,
379 				bdev_io->u.bdev.fused_iovcnt,
380 				remapped_offset,
381 				bdev_io->u.bdev.num_blocks,
382 				bdev_part_complete_io, bdev_io);
383 		break;
384 	case SPDK_BDEV_IO_TYPE_COPY:
385 		remapped_src_offset = bdev_io->u.bdev.copy.src_offset_blocks + part->internal.offset_blocks;
386 		rc = spdk_bdev_copy_blocks(base_desc, base_ch, remapped_offset, remapped_src_offset,
387 					   bdev_io->u.bdev.num_blocks, bdev_part_complete_io,
388 					   bdev_io);
389 		break;
390 	default:
391 		SPDK_ERRLOG("unknown I/O type %d\n", bdev_io->type);
392 		return SPDK_BDEV_IO_STATUS_FAILED;
393 	}
394 
395 	return rc;
396 }
397 
398 int
399 spdk_bdev_part_submit_request(struct spdk_bdev_part_channel *ch, struct spdk_bdev_io *bdev_io)
400 {
401 	return spdk_bdev_part_submit_request_ext(ch, bdev_io, NULL);
402 }
403 
404 static int
405 bdev_part_channel_create_cb(void *io_device, void *ctx_buf)
406 {
407 	struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device;
408 	struct spdk_bdev_part_channel *ch = ctx_buf;
409 
410 	ch->part = part;
411 	ch->base_ch = spdk_bdev_get_io_channel(part->internal.base->desc);
412 	if (ch->base_ch == NULL) {
413 		return -1;
414 	}
415 
416 	if (part->internal.base->ch_create_cb) {
417 		return part->internal.base->ch_create_cb(io_device, ctx_buf);
418 	} else {
419 		return 0;
420 	}
421 }
422 
423 static void
424 bdev_part_channel_destroy_cb(void *io_device, void *ctx_buf)
425 {
426 	struct spdk_bdev_part *part = (struct spdk_bdev_part *)io_device;
427 	struct spdk_bdev_part_channel *ch = ctx_buf;
428 
429 	if (part->internal.base->ch_destroy_cb) {
430 		part->internal.base->ch_destroy_cb(io_device, ctx_buf);
431 	}
432 	spdk_put_io_channel(ch->base_ch);
433 }
434 
435 static void
436 bdev_part_base_event_cb(enum spdk_bdev_event_type type, struct spdk_bdev *bdev,
437 			void *event_ctx)
438 {
439 	struct spdk_bdev_part_base *base = event_ctx;
440 
441 	switch (type) {
442 	case SPDK_BDEV_EVENT_REMOVE:
443 		base->remove_cb(base);
444 		break;
445 	default:
446 		SPDK_NOTICELOG("Unsupported bdev event: type %d\n", type);
447 		break;
448 	}
449 }
450 
451 int
452 spdk_bdev_part_base_construct_ext(const char *bdev_name,
453 				  spdk_bdev_remove_cb_t remove_cb, struct spdk_bdev_module *module,
454 				  struct spdk_bdev_fn_table *fn_table, struct bdev_part_tailq *tailq,
455 				  spdk_bdev_part_base_free_fn free_fn, void *ctx,
456 				  uint32_t channel_size, spdk_io_channel_create_cb ch_create_cb,
457 				  spdk_io_channel_destroy_cb ch_destroy_cb,
458 				  struct spdk_bdev_part_base **_base)
459 {
460 	int rc;
461 	struct spdk_bdev_part_base *base;
462 
463 	if (_base == NULL) {
464 		return -EINVAL;
465 	}
466 
467 	base = calloc(1, sizeof(*base));
468 	if (!base) {
469 		SPDK_ERRLOG("Memory allocation failure\n");
470 		return -ENOMEM;
471 	}
472 	fn_table->get_io_channel = bdev_part_get_io_channel;
473 	fn_table->io_type_supported = bdev_part_io_type_supported;
474 
475 	base->desc = NULL;
476 	base->ref = 0;
477 	base->module = module;
478 	base->fn_table = fn_table;
479 	base->tailq = tailq;
480 	base->base_free_fn = free_fn;
481 	base->ctx = ctx;
482 	base->claimed = false;
483 	base->channel_size = channel_size;
484 	base->ch_create_cb = ch_create_cb;
485 	base->ch_destroy_cb = ch_destroy_cb;
486 	base->remove_cb = remove_cb;
487 
488 	rc = spdk_bdev_open_ext(bdev_name, false, bdev_part_base_event_cb, base, &base->desc);
489 	if (rc) {
490 		if (rc == -ENODEV) {
491 			free(base);
492 		} else {
493 			SPDK_ERRLOG("could not open bdev %s: %s\n", bdev_name, spdk_strerror(-rc));
494 			spdk_bdev_part_base_free(base);
495 		}
496 		return rc;
497 	}
498 
499 	base->bdev = spdk_bdev_desc_get_bdev(base->desc);
500 
501 	/* Save the thread where the base device is opened */
502 	base->thread = spdk_get_thread();
503 
504 	*_base = base;
505 
506 	return 0;
507 }
508 
509 void
510 spdk_bdev_part_construct_opts_init(struct spdk_bdev_part_construct_opts *opts, uint64_t size)
511 {
512 	if (opts == NULL) {
513 		SPDK_ERRLOG("opts should not be NULL\n");
514 		assert(opts != NULL);
515 		return;
516 	}
517 	if (size == 0) {
518 		SPDK_ERRLOG("size should not be zero\n");
519 		assert(size != 0);
520 		return;
521 	}
522 
523 	memset(opts, 0, size);
524 	opts->opts_size = size;
525 }
526 
527 static void
528 part_construct_opts_copy(const struct spdk_bdev_part_construct_opts *src,
529 			 struct spdk_bdev_part_construct_opts *dst)
530 {
531 	if (src->opts_size == 0) {
532 		SPDK_ERRLOG("size should not be zero\n");
533 		assert(false);
534 	}
535 
536 	memset(dst, 0, sizeof(*dst));
537 	dst->opts_size = src->opts_size;
538 
539 #define FIELD_OK(field) \
540         offsetof(struct spdk_bdev_part_construct_opts, field) + sizeof(src->field) <= src->opts_size
541 
542 #define SET_FIELD(field) \
543         if (FIELD_OK(field)) { \
544                 dst->field = src->field; \
545         } \
546 
547 	SET_FIELD(uuid);
548 
549 	/* You should not remove this statement, but need to update the assert statement
550 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
551 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bdev_part_construct_opts) == 24, "Incorrect size");
552 
553 #undef FIELD_OK
554 #undef SET_FIELD
555 }
556 
557 int
558 spdk_bdev_part_construct_ext(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base,
559 			     char *name, uint64_t offset_blocks, uint64_t num_blocks,
560 			     char *product_name, const struct spdk_bdev_part_construct_opts *_opts)
561 {
562 	int rc;
563 	bool first_claimed = false;
564 	struct spdk_bdev_part_construct_opts opts;
565 	struct spdk_uuid ns_uuid;
566 
567 	if (_opts == NULL) {
568 		spdk_bdev_part_construct_opts_init(&opts, sizeof(opts));
569 	} else {
570 		part_construct_opts_copy(_opts, &opts);
571 	}
572 
573 	part->internal.bdev.blocklen = base->bdev->blocklen;
574 	part->internal.bdev.blockcnt = num_blocks;
575 	part->internal.offset_blocks = offset_blocks;
576 
577 	part->internal.bdev.write_cache = base->bdev->write_cache;
578 	part->internal.bdev.required_alignment = base->bdev->required_alignment;
579 	part->internal.bdev.ctxt = part;
580 	part->internal.bdev.module = base->module;
581 	part->internal.bdev.fn_table = base->fn_table;
582 
583 	part->internal.bdev.md_interleave = base->bdev->md_interleave;
584 	part->internal.bdev.md_len = base->bdev->md_len;
585 	part->internal.bdev.dif_type = base->bdev->dif_type;
586 	part->internal.bdev.dif_is_head_of_md = base->bdev->dif_is_head_of_md;
587 	part->internal.bdev.dif_check_flags = base->bdev->dif_check_flags;
588 
589 	part->internal.bdev.name = strdup(name);
590 	if (part->internal.bdev.name == NULL) {
591 		SPDK_ERRLOG("Failed to allocate name for new part of bdev %s\n", spdk_bdev_get_name(base->bdev));
592 		return -1;
593 	}
594 
595 	part->internal.bdev.product_name = strdup(product_name);
596 	if (part->internal.bdev.product_name == NULL) {
597 		free(part->internal.bdev.name);
598 		SPDK_ERRLOG("Failed to allocate product name for new part of bdev %s\n",
599 			    spdk_bdev_get_name(base->bdev));
600 		return -1;
601 	}
602 
603 	/* The caller may have already specified a UUID.  If not, we'll generate one
604 	 * based on the namespace UUID, the base bdev's UUID and the block range of the
605 	 * partition.
606 	 */
607 	if (!spdk_uuid_is_null(&opts.uuid)) {
608 		spdk_uuid_copy(&part->internal.bdev.uuid, &opts.uuid);
609 	} else {
610 		struct {
611 			struct spdk_uuid	uuid;
612 			uint64_t		offset_blocks;
613 			uint64_t		num_blocks;
614 		} base_name;
615 
616 		/* We need to create a unique base name for this partition.  We can't just use
617 		 * the base bdev's UUID, since it may be used for multiple partitions.  So
618 		 * construct a binary name consisting of the uuid + the block range for this
619 		 * partition.
620 		 */
621 		spdk_uuid_copy(&base_name.uuid, &base->bdev->uuid);
622 		base_name.offset_blocks = offset_blocks;
623 		base_name.num_blocks = num_blocks;
624 
625 		spdk_uuid_parse(&ns_uuid, BDEV_PART_NAMESPACE_UUID);
626 		rc = spdk_uuid_generate_sha1(&part->internal.bdev.uuid, &ns_uuid,
627 					     (const char *)&base_name, sizeof(base_name));
628 		if (rc) {
629 			SPDK_ERRLOG("Could not generate new UUID\n");
630 			free(part->internal.bdev.name);
631 			free(part->internal.bdev.product_name);
632 			return -1;
633 		}
634 	}
635 
636 	base->ref++;
637 	part->internal.base = base;
638 
639 	if (!base->claimed) {
640 		int rc;
641 
642 		rc = spdk_bdev_module_claim_bdev(base->bdev, base->desc, base->module);
643 		if (rc) {
644 			SPDK_ERRLOG("could not claim bdev %s\n", spdk_bdev_get_name(base->bdev));
645 			free(part->internal.bdev.name);
646 			free(part->internal.bdev.product_name);
647 			base->ref--;
648 			return -1;
649 		}
650 		base->claimed = true;
651 		first_claimed = true;
652 	}
653 
654 	spdk_io_device_register(part, bdev_part_channel_create_cb,
655 				bdev_part_channel_destroy_cb,
656 				base->channel_size,
657 				name);
658 
659 	rc = spdk_bdev_register(&part->internal.bdev);
660 	if (rc == 0) {
661 		TAILQ_INSERT_TAIL(base->tailq, part, tailq);
662 	} else {
663 		spdk_io_device_unregister(part, NULL);
664 		if (--base->ref == 0) {
665 			spdk_bdev_module_release_bdev(base->bdev);
666 		}
667 		free(part->internal.bdev.name);
668 		free(part->internal.bdev.product_name);
669 		if (first_claimed == true) {
670 			base->claimed = false;
671 		}
672 	}
673 
674 	return rc;
675 }
676 
677 int
678 spdk_bdev_part_construct(struct spdk_bdev_part *part, struct spdk_bdev_part_base *base,
679 			 char *name, uint64_t offset_blocks, uint64_t num_blocks,
680 			 char *product_name)
681 {
682 	return spdk_bdev_part_construct_ext(part, base, name, offset_blocks, num_blocks,
683 					    product_name, NULL);
684 }
685