xref: /spdk/lib/blob/blobstore.c (revision 245743507d89cb9e3eae0d7a3c6f591890d53293)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/blob.h"
10 #include "spdk/crc32.h"
11 #include "spdk/env.h"
12 #include "spdk/queue.h"
13 #include "spdk/thread.h"
14 #include "spdk/bit_array.h"
15 #include "spdk/bit_pool.h"
16 #include "spdk/likely.h"
17 #include "spdk/util.h"
18 #include "spdk/string.h"
19 
20 #include "spdk_internal/assert.h"
21 #include "spdk/log.h"
22 
23 #include "blobstore.h"
24 
25 #define BLOB_CRC32C_INITIAL    0xffffffffUL
26 
27 static int bs_register_md_thread(struct spdk_blob_store *bs);
28 static int bs_unregister_md_thread(struct spdk_blob_store *bs);
29 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
30 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
31 		uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page,
32 		spdk_blob_op_complete cb_fn, void *cb_arg);
33 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
34 		uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg);
35 
36 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
37 			  uint16_t value_len, bool internal);
38 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name,
39 				const void **value, size_t *value_len, bool internal);
40 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
41 
42 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
43 				   struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg);
44 
45 static void bs_shallow_copy_cluster_find_next(void *cb_arg);
46 
47 /*
48  * External snapshots require a channel per thread per esnap bdev.  The tree
49  * is populated lazily as blob IOs are handled by the back_bs_dev. When this
50  * channel is destroyed, all the channels in the tree are destroyed.
51  */
52 
53 struct blob_esnap_channel {
54 	RB_ENTRY(blob_esnap_channel)	node;
55 	spdk_blob_id			blob_id;
56 	struct spdk_io_channel		*channel;
57 };
58 
59 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2);
60 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io,
61 		spdk_blob_op_with_handle_complete cb_fn, void *cb_arg);
62 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch);
63 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare)
64 
65 static inline bool
66 blob_is_esnap_clone(const struct spdk_blob *blob)
67 {
68 	assert(blob != NULL);
69 	return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT);
70 }
71 
72 static int
73 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2)
74 {
75 	assert(blob1 != NULL && blob2 != NULL);
76 	return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id);
77 }
78 
79 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp);
80 
81 static void
82 blob_verify_md_op(struct spdk_blob *blob)
83 {
84 	assert(blob != NULL);
85 	assert(spdk_get_thread() == blob->bs->md_thread);
86 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
87 }
88 
89 static struct spdk_blob_list *
90 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid)
91 {
92 	struct spdk_blob_list *snapshot_entry = NULL;
93 
94 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
95 		if (snapshot_entry->id == blobid) {
96 			break;
97 		}
98 	}
99 
100 	return snapshot_entry;
101 }
102 
103 static void
104 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page)
105 {
106 	assert(spdk_spin_held(&bs->used_lock));
107 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
108 	assert(spdk_bit_array_get(bs->used_md_pages, page) == false);
109 
110 	spdk_bit_array_set(bs->used_md_pages, page);
111 }
112 
113 static void
114 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page)
115 {
116 	assert(spdk_spin_held(&bs->used_lock));
117 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
118 	assert(spdk_bit_array_get(bs->used_md_pages, page) == true);
119 
120 	spdk_bit_array_clear(bs->used_md_pages, page);
121 }
122 
123 static uint32_t
124 bs_claim_cluster(struct spdk_blob_store *bs)
125 {
126 	uint32_t cluster_num;
127 
128 	assert(spdk_spin_held(&bs->used_lock));
129 
130 	cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters);
131 	if (cluster_num == UINT32_MAX) {
132 		return UINT32_MAX;
133 	}
134 
135 	SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num);
136 	bs->num_free_clusters--;
137 
138 	return cluster_num;
139 }
140 
141 static void
142 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
143 {
144 	assert(spdk_spin_held(&bs->used_lock));
145 	assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters));
146 	assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true);
147 	assert(bs->num_free_clusters < bs->total_clusters);
148 
149 	SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num);
150 
151 	spdk_bit_pool_free_bit(bs->used_clusters, cluster_num);
152 	bs->num_free_clusters++;
153 }
154 
155 static int
156 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
157 {
158 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
159 
160 	blob_verify_md_op(blob);
161 
162 	if (*cluster_lba != 0) {
163 		return -EEXIST;
164 	}
165 
166 	*cluster_lba = bs_cluster_to_lba(blob->bs, cluster);
167 	blob->active.num_allocated_clusters++;
168 
169 	return 0;
170 }
171 
172 static int
173 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
174 		    uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map)
175 {
176 	uint32_t *extent_page = 0;
177 
178 	assert(spdk_spin_held(&blob->bs->used_lock));
179 
180 	*cluster = bs_claim_cluster(blob->bs);
181 	if (*cluster == UINT32_MAX) {
182 		/* No more free clusters. Cannot satisfy the request */
183 		return -ENOSPC;
184 	}
185 
186 	if (blob->use_extent_table) {
187 		extent_page = bs_cluster_to_extent_page(blob, cluster_num);
188 		if (*extent_page == 0) {
189 			/* Extent page shall never occupy md_page so start the search from 1 */
190 			if (*lowest_free_md_page == 0) {
191 				*lowest_free_md_page = 1;
192 			}
193 			/* No extent_page is allocated for the cluster */
194 			*lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages,
195 					       *lowest_free_md_page);
196 			if (*lowest_free_md_page == UINT32_MAX) {
197 				/* No more free md pages. Cannot satisfy the request */
198 				bs_release_cluster(blob->bs, *cluster);
199 				return -ENOSPC;
200 			}
201 			bs_claim_md_page(blob->bs, *lowest_free_md_page);
202 		}
203 	}
204 
205 	SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster,
206 		      blob->id);
207 
208 	if (update_map) {
209 		blob_insert_cluster(blob, cluster_num, *cluster);
210 		if (blob->use_extent_table && *extent_page == 0) {
211 			*extent_page = *lowest_free_md_page;
212 		}
213 	}
214 
215 	return 0;
216 }
217 
218 static void
219 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
220 {
221 	xattrs->count = 0;
222 	xattrs->names = NULL;
223 	xattrs->ctx = NULL;
224 	xattrs->get_value = NULL;
225 }
226 
227 void
228 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size)
229 {
230 	if (!opts) {
231 		SPDK_ERRLOG("opts should not be NULL\n");
232 		return;
233 	}
234 
235 	if (!opts_size) {
236 		SPDK_ERRLOG("opts_size should not be zero value\n");
237 		return;
238 	}
239 
240 	memset(opts, 0, opts_size);
241 	opts->opts_size = opts_size;
242 
243 #define FIELD_OK(field) \
244         offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size
245 
246 #define SET_FIELD(field, value) \
247         if (FIELD_OK(field)) { \
248                 opts->field = value; \
249         } \
250 
251 	SET_FIELD(num_clusters, 0);
252 	SET_FIELD(thin_provision, false);
253 	SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT);
254 
255 	if (FIELD_OK(xattrs)) {
256 		blob_xattrs_init(&opts->xattrs);
257 	}
258 
259 	SET_FIELD(use_extent_table, true);
260 
261 #undef FIELD_OK
262 #undef SET_FIELD
263 }
264 
265 void
266 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size)
267 {
268 	if (!opts) {
269 		SPDK_ERRLOG("opts should not be NULL\n");
270 		return;
271 	}
272 
273 	if (!opts_size) {
274 		SPDK_ERRLOG("opts_size should not be zero value\n");
275 		return;
276 	}
277 
278 	memset(opts, 0, opts_size);
279 	opts->opts_size = opts_size;
280 
281 #define FIELD_OK(field) \
282         offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size
283 
284 #define SET_FIELD(field, value) \
285         if (FIELD_OK(field)) { \
286                 opts->field = value; \
287         } \
288 
289 	SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT);
290 
291 #undef FIELD_OK
292 #undef SET_FILED
293 }
294 
295 static struct spdk_blob *
296 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
297 {
298 	struct spdk_blob *blob;
299 
300 	blob = calloc(1, sizeof(*blob));
301 	if (!blob) {
302 		return NULL;
303 	}
304 
305 	blob->id = id;
306 	blob->bs = bs;
307 
308 	blob->parent_id = SPDK_BLOBID_INVALID;
309 
310 	blob->state = SPDK_BLOB_STATE_DIRTY;
311 	blob->extent_rle_found = false;
312 	blob->extent_table_found = false;
313 	blob->active.num_pages = 1;
314 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
315 	if (!blob->active.pages) {
316 		free(blob);
317 		return NULL;
318 	}
319 
320 	blob->active.pages[0] = bs_blobid_to_page(id);
321 
322 	TAILQ_INIT(&blob->xattrs);
323 	TAILQ_INIT(&blob->xattrs_internal);
324 	TAILQ_INIT(&blob->pending_persists);
325 	TAILQ_INIT(&blob->persists_to_complete);
326 
327 	return blob;
328 }
329 
330 static void
331 xattrs_free(struct spdk_xattr_tailq *xattrs)
332 {
333 	struct spdk_xattr	*xattr, *xattr_tmp;
334 
335 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
336 		TAILQ_REMOVE(xattrs, xattr, link);
337 		free(xattr->name);
338 		free(xattr->value);
339 		free(xattr);
340 	}
341 }
342 
343 static void
344 blob_free(struct spdk_blob *blob)
345 {
346 	assert(blob != NULL);
347 	assert(TAILQ_EMPTY(&blob->pending_persists));
348 	assert(TAILQ_EMPTY(&blob->persists_to_complete));
349 
350 	free(blob->active.extent_pages);
351 	free(blob->clean.extent_pages);
352 	free(blob->active.clusters);
353 	free(blob->clean.clusters);
354 	free(blob->active.pages);
355 	free(blob->clean.pages);
356 
357 	xattrs_free(&blob->xattrs);
358 	xattrs_free(&blob->xattrs_internal);
359 
360 	if (blob->back_bs_dev) {
361 		blob->back_bs_dev->destroy(blob->back_bs_dev);
362 	}
363 
364 	free(blob);
365 }
366 
367 static void
368 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno)
369 {
370 	struct spdk_bs_dev	*bs_dev = ctx;
371 
372 	if (bserrno != 0) {
373 		/*
374 		 * This is probably due to a memory allocation failure when creating the
375 		 * blob_esnap_destroy_ctx before iterating threads.
376 		 */
377 		SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n",
378 			    blob->id, bserrno);
379 		assert(false);
380 	}
381 
382 	if (bs_dev == NULL) {
383 		/*
384 		 * This check exists to make scanbuild happy.
385 		 *
386 		 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while
387 		 * the blobstore is being loaded. It could also be NULL if there was an error
388 		 * opening the esnap device. In each of these cases, no channels could have been
389 		 * created because back_bs_dev->create_channel() would have led to a NULL pointer
390 		 * deref.
391 		 */
392 		assert(false);
393 		return;
394 	}
395 
396 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id);
397 	bs_dev->destroy(bs_dev);
398 }
399 
400 static void
401 blob_back_bs_destroy(struct spdk_blob *blob)
402 {
403 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n",
404 		      blob->id);
405 
406 	blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done,
407 					   blob->back_bs_dev);
408 	blob->back_bs_dev = NULL;
409 }
410 
411 struct freeze_io_ctx {
412 	struct spdk_bs_cpl cpl;
413 	struct spdk_blob *blob;
414 };
415 
416 static void
417 blob_io_sync(struct spdk_io_channel_iter *i)
418 {
419 	spdk_for_each_channel_continue(i, 0);
420 }
421 
422 static void
423 blob_execute_queued_io(struct spdk_io_channel_iter *i)
424 {
425 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
426 	struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch);
427 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
428 	struct spdk_bs_request_set	*set;
429 	struct spdk_bs_user_op_args	*args;
430 	spdk_bs_user_op_t *op, *tmp;
431 
432 	TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) {
433 		set = (struct spdk_bs_request_set *)op;
434 		args = &set->u.user_op;
435 
436 		if (args->blob == ctx->blob) {
437 			TAILQ_REMOVE(&ch->queued_io, op, link);
438 			bs_user_op_execute(op);
439 		}
440 	}
441 
442 	spdk_for_each_channel_continue(i, 0);
443 }
444 
445 static void
446 blob_io_cpl(struct spdk_io_channel_iter *i, int status)
447 {
448 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
449 
450 	ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0);
451 
452 	free(ctx);
453 }
454 
455 static void
456 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
457 {
458 	struct freeze_io_ctx *ctx;
459 
460 	blob_verify_md_op(blob);
461 
462 	ctx = calloc(1, sizeof(*ctx));
463 	if (!ctx) {
464 		cb_fn(cb_arg, -ENOMEM);
465 		return;
466 	}
467 
468 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
469 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
470 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
471 	ctx->blob = blob;
472 
473 	/* Freeze I/O on blob */
474 	blob->frozen_refcnt++;
475 
476 	spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl);
477 }
478 
479 static void
480 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
481 {
482 	struct freeze_io_ctx *ctx;
483 
484 	blob_verify_md_op(blob);
485 
486 	ctx = calloc(1, sizeof(*ctx));
487 	if (!ctx) {
488 		cb_fn(cb_arg, -ENOMEM);
489 		return;
490 	}
491 
492 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
493 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
494 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
495 	ctx->blob = blob;
496 
497 	assert(blob->frozen_refcnt > 0);
498 
499 	blob->frozen_refcnt--;
500 
501 	spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl);
502 }
503 
504 static int
505 blob_mark_clean(struct spdk_blob *blob)
506 {
507 	uint32_t *extent_pages = NULL;
508 	uint64_t *clusters = NULL;
509 	uint32_t *pages = NULL;
510 
511 	assert(blob != NULL);
512 
513 	if (blob->active.num_extent_pages) {
514 		assert(blob->active.extent_pages);
515 		extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages));
516 		if (!extent_pages) {
517 			return -ENOMEM;
518 		}
519 		memcpy(extent_pages, blob->active.extent_pages,
520 		       blob->active.num_extent_pages * sizeof(*extent_pages));
521 	}
522 
523 	if (blob->active.num_clusters) {
524 		assert(blob->active.clusters);
525 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
526 		if (!clusters) {
527 			free(extent_pages);
528 			return -ENOMEM;
529 		}
530 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
531 	}
532 
533 	if (blob->active.num_pages) {
534 		assert(blob->active.pages);
535 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
536 		if (!pages) {
537 			free(extent_pages);
538 			free(clusters);
539 			return -ENOMEM;
540 		}
541 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
542 	}
543 
544 	free(blob->clean.extent_pages);
545 	free(blob->clean.clusters);
546 	free(blob->clean.pages);
547 
548 	blob->clean.num_extent_pages = blob->active.num_extent_pages;
549 	blob->clean.extent_pages = blob->active.extent_pages;
550 	blob->clean.num_clusters = blob->active.num_clusters;
551 	blob->clean.clusters = blob->active.clusters;
552 	blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters;
553 	blob->clean.num_pages = blob->active.num_pages;
554 	blob->clean.pages = blob->active.pages;
555 
556 	blob->active.extent_pages = extent_pages;
557 	blob->active.clusters = clusters;
558 	blob->active.pages = pages;
559 
560 	/* If the metadata was dirtied again while the metadata was being written to disk,
561 	 *  we do not want to revert the DIRTY state back to CLEAN here.
562 	 */
563 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
564 		blob->state = SPDK_BLOB_STATE_CLEAN;
565 	}
566 
567 	return 0;
568 }
569 
570 static int
571 blob_deserialize_xattr(struct spdk_blob *blob,
572 		       struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
573 {
574 	struct spdk_xattr                       *xattr;
575 
576 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
577 	    sizeof(desc_xattr->value_length) +
578 	    desc_xattr->name_length + desc_xattr->value_length) {
579 		return -EINVAL;
580 	}
581 
582 	xattr = calloc(1, sizeof(*xattr));
583 	if (xattr == NULL) {
584 		return -ENOMEM;
585 	}
586 
587 	xattr->name = malloc(desc_xattr->name_length + 1);
588 	if (xattr->name == NULL) {
589 		free(xattr);
590 		return -ENOMEM;
591 	}
592 
593 	xattr->value = malloc(desc_xattr->value_length);
594 	if (xattr->value == NULL) {
595 		free(xattr->name);
596 		free(xattr);
597 		return -ENOMEM;
598 	}
599 
600 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
601 	xattr->name[desc_xattr->name_length] = '\0';
602 	xattr->value_len = desc_xattr->value_length;
603 	memcpy(xattr->value,
604 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
605 	       desc_xattr->value_length);
606 
607 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
608 
609 	return 0;
610 }
611 
612 
613 static int
614 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
615 {
616 	struct spdk_blob_md_descriptor *desc;
617 	size_t	cur_desc = 0;
618 	void *tmp;
619 
620 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
621 	while (cur_desc < sizeof(page->descriptors)) {
622 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
623 			if (desc->length == 0) {
624 				/* If padding and length are 0, this terminates the page */
625 				break;
626 			}
627 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
628 			struct spdk_blob_md_descriptor_flags	*desc_flags;
629 
630 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
631 
632 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
633 				return -EINVAL;
634 			}
635 
636 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
637 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
638 				return -EINVAL;
639 			}
640 
641 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
642 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
643 				blob->data_ro = true;
644 				blob->md_ro = true;
645 			}
646 
647 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
648 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
649 				blob->md_ro = true;
650 			}
651 
652 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
653 				blob->data_ro = true;
654 				blob->md_ro = true;
655 			}
656 
657 			blob->invalid_flags = desc_flags->invalid_flags;
658 			blob->data_ro_flags = desc_flags->data_ro_flags;
659 			blob->md_ro_flags = desc_flags->md_ro_flags;
660 
661 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
662 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
663 			unsigned int				i, j;
664 			unsigned int				cluster_count = blob->active.num_clusters;
665 
666 			if (blob->extent_table_found) {
667 				/* Extent Table already present in the md,
668 				 * both descriptors should never be at the same time. */
669 				return -EINVAL;
670 			}
671 			blob->extent_rle_found = true;
672 
673 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
674 
675 			if (desc_extent_rle->length == 0 ||
676 			    (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) {
677 				return -EINVAL;
678 			}
679 
680 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
681 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
682 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
683 						if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters,
684 										desc_extent_rle->extents[i].cluster_idx + j)) {
685 							return -EINVAL;
686 						}
687 					}
688 					cluster_count++;
689 				}
690 			}
691 
692 			if (cluster_count == 0) {
693 				return -EINVAL;
694 			}
695 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
696 			if (tmp == NULL) {
697 				return -ENOMEM;
698 			}
699 			blob->active.clusters = tmp;
700 			blob->active.cluster_array_size = cluster_count;
701 
702 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
703 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
704 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
705 						blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
706 								desc_extent_rle->extents[i].cluster_idx + j);
707 						blob->active.num_allocated_clusters++;
708 					} else if (spdk_blob_is_thin_provisioned(blob)) {
709 						blob->active.clusters[blob->active.num_clusters++] = 0;
710 					} else {
711 						return -EINVAL;
712 					}
713 				}
714 			}
715 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
716 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
717 			uint32_t num_extent_pages = blob->active.num_extent_pages;
718 			uint32_t i, j;
719 			size_t extent_pages_length;
720 
721 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
722 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
723 
724 			if (blob->extent_rle_found) {
725 				/* This means that Extent RLE is present in MD,
726 				 * both should never be at the same time. */
727 				return -EINVAL;
728 			} else if (blob->extent_table_found &&
729 				   desc_extent_table->num_clusters != blob->remaining_clusters_in_et) {
730 				/* Number of clusters in this ET does not match number
731 				 * from previously read EXTENT_TABLE. */
732 				return -EINVAL;
733 			}
734 
735 			if (desc_extent_table->length == 0 ||
736 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
737 				return -EINVAL;
738 			}
739 
740 			blob->extent_table_found = true;
741 
742 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
743 				num_extent_pages += desc_extent_table->extent_page[i].num_pages;
744 			}
745 
746 			if (num_extent_pages > 0) {
747 				tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t));
748 				if (tmp == NULL) {
749 					return -ENOMEM;
750 				}
751 				blob->active.extent_pages = tmp;
752 			}
753 			blob->active.extent_pages_array_size = num_extent_pages;
754 
755 			blob->remaining_clusters_in_et = desc_extent_table->num_clusters;
756 
757 			/* Extent table entries contain md page numbers for extent pages.
758 			 * Zeroes represent unallocated extent pages, those are run-length-encoded.
759 			 */
760 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
761 				if (desc_extent_table->extent_page[i].page_idx != 0) {
762 					assert(desc_extent_table->extent_page[i].num_pages == 1);
763 					blob->active.extent_pages[blob->active.num_extent_pages++] =
764 						desc_extent_table->extent_page[i].page_idx;
765 				} else if (spdk_blob_is_thin_provisioned(blob)) {
766 					for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) {
767 						blob->active.extent_pages[blob->active.num_extent_pages++] = 0;
768 					}
769 				} else {
770 					return -EINVAL;
771 				}
772 			}
773 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
774 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
775 			unsigned int					i;
776 			unsigned int					cluster_count = 0;
777 			size_t						cluster_idx_length;
778 
779 			if (blob->extent_rle_found) {
780 				/* This means that Extent RLE is present in MD,
781 				 * both should never be at the same time. */
782 				return -EINVAL;
783 			}
784 
785 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
786 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
787 
788 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
789 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
790 				return -EINVAL;
791 			}
792 
793 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
794 				if (desc_extent->cluster_idx[i] != 0) {
795 					if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) {
796 						return -EINVAL;
797 					}
798 				}
799 				cluster_count++;
800 			}
801 
802 			if (cluster_count == 0) {
803 				return -EINVAL;
804 			}
805 
806 			/* When reading extent pages sequentially starting cluster idx should match
807 			 * current size of a blob.
808 			 * If changed to batch reading, this check shall be removed. */
809 			if (desc_extent->start_cluster_idx != blob->active.num_clusters) {
810 				return -EINVAL;
811 			}
812 
813 			tmp = realloc(blob->active.clusters,
814 				      (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters));
815 			if (tmp == NULL) {
816 				return -ENOMEM;
817 			}
818 			blob->active.clusters = tmp;
819 			blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters);
820 
821 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
822 				if (desc_extent->cluster_idx[i] != 0) {
823 					blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
824 							desc_extent->cluster_idx[i]);
825 					blob->active.num_allocated_clusters++;
826 				} else if (spdk_blob_is_thin_provisioned(blob)) {
827 					blob->active.clusters[blob->active.num_clusters++] = 0;
828 				} else {
829 					return -EINVAL;
830 				}
831 			}
832 			assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters);
833 			assert(blob->remaining_clusters_in_et >= cluster_count);
834 			blob->remaining_clusters_in_et -= cluster_count;
835 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
836 			int rc;
837 
838 			rc = blob_deserialize_xattr(blob,
839 						    (struct spdk_blob_md_descriptor_xattr *) desc, false);
840 			if (rc != 0) {
841 				return rc;
842 			}
843 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
844 			int rc;
845 
846 			rc = blob_deserialize_xattr(blob,
847 						    (struct spdk_blob_md_descriptor_xattr *) desc, true);
848 			if (rc != 0) {
849 				return rc;
850 			}
851 		} else {
852 			/* Unrecognized descriptor type.  Do not fail - just continue to the
853 			 *  next descriptor.  If this descriptor is associated with some feature
854 			 *  defined in a newer version of blobstore, that version of blobstore
855 			 *  should create and set an associated feature flag to specify if this
856 			 *  blob can be loaded or not.
857 			 */
858 		}
859 
860 		/* Advance to the next descriptor */
861 		cur_desc += sizeof(*desc) + desc->length;
862 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
863 			break;
864 		}
865 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
866 	}
867 
868 	return 0;
869 }
870 
871 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page);
872 
873 static int
874 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob)
875 {
876 	assert(blob != NULL);
877 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
878 
879 	if (bs_load_cur_extent_page_valid(extent_page) == false) {
880 		return -ENOENT;
881 	}
882 
883 	return blob_parse_page(extent_page, blob);
884 }
885 
886 static int
887 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
888 	   struct spdk_blob *blob)
889 {
890 	const struct spdk_blob_md_page *page;
891 	uint32_t i;
892 	int rc;
893 	void *tmp;
894 
895 	assert(page_count > 0);
896 	assert(pages[0].sequence_num == 0);
897 	assert(blob != NULL);
898 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
899 	assert(blob->active.clusters == NULL);
900 
901 	/* The blobid provided doesn't match what's in the MD, this can
902 	 * happen for example if a bogus blobid is passed in through open.
903 	 */
904 	if (blob->id != pages[0].id) {
905 		SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata "
906 			    "(0x%" PRIx64 ")\n", blob->id, pages[0].id);
907 		return -ENOENT;
908 	}
909 
910 	tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages));
911 	if (!tmp) {
912 		return -ENOMEM;
913 	}
914 	blob->active.pages = tmp;
915 
916 	blob->active.pages[0] = pages[0].id;
917 
918 	for (i = 1; i < page_count; i++) {
919 		assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next));
920 		blob->active.pages[i] = pages[i - 1].next;
921 	}
922 	blob->active.num_pages = page_count;
923 
924 	for (i = 0; i < page_count; i++) {
925 		page = &pages[i];
926 
927 		assert(page->id == blob->id);
928 		assert(page->sequence_num == i);
929 
930 		rc = blob_parse_page(page, blob);
931 		if (rc != 0) {
932 			return rc;
933 		}
934 	}
935 
936 	return 0;
937 }
938 
939 static int
940 blob_serialize_add_page(const struct spdk_blob *blob,
941 			struct spdk_blob_md_page **pages,
942 			uint32_t *page_count,
943 			struct spdk_blob_md_page **last_page)
944 {
945 	struct spdk_blob_md_page *page, *tmp_pages;
946 
947 	assert(pages != NULL);
948 	assert(page_count != NULL);
949 
950 	*last_page = NULL;
951 	if (*page_count == 0) {
952 		assert(*pages == NULL);
953 		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0,
954 				     NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
955 		if (*pages == NULL) {
956 			return -ENOMEM;
957 		}
958 		*page_count = 1;
959 	} else {
960 		assert(*pages != NULL);
961 		tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0);
962 		if (tmp_pages == NULL) {
963 			return -ENOMEM;
964 		}
965 		(*page_count)++;
966 		*pages = tmp_pages;
967 	}
968 
969 	page = &(*pages)[*page_count - 1];
970 	memset(page, 0, sizeof(*page));
971 	page->id = blob->id;
972 	page->sequence_num = *page_count - 1;
973 	page->next = SPDK_INVALID_MD_PAGE;
974 	*last_page = page;
975 
976 	return 0;
977 }
978 
979 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
980  * Update required_sz on both success and failure.
981  *
982  */
983 static int
984 blob_serialize_xattr(const struct spdk_xattr *xattr,
985 		     uint8_t *buf, size_t buf_sz,
986 		     size_t *required_sz, bool internal)
987 {
988 	struct spdk_blob_md_descriptor_xattr	*desc;
989 
990 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
991 		       strlen(xattr->name) +
992 		       xattr->value_len;
993 
994 	if (buf_sz < *required_sz) {
995 		return -1;
996 	}
997 
998 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
999 
1000 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
1001 	desc->length = sizeof(desc->name_length) +
1002 		       sizeof(desc->value_length) +
1003 		       strlen(xattr->name) +
1004 		       xattr->value_len;
1005 	desc->name_length = strlen(xattr->name);
1006 	desc->value_length = xattr->value_len;
1007 
1008 	memcpy(desc->name, xattr->name, desc->name_length);
1009 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
1010 	       xattr->value,
1011 	       desc->value_length);
1012 
1013 	return 0;
1014 }
1015 
1016 static void
1017 blob_serialize_extent_table_entry(const struct spdk_blob *blob,
1018 				  uint64_t start_ep, uint64_t *next_ep,
1019 				  uint8_t **buf, size_t *remaining_sz)
1020 {
1021 	struct spdk_blob_md_descriptor_extent_table *desc;
1022 	size_t cur_sz;
1023 	uint64_t i, et_idx;
1024 	uint32_t extent_page, ep_len;
1025 
1026 	/* The buffer must have room for at least num_clusters entry */
1027 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters);
1028 	if (*remaining_sz < cur_sz) {
1029 		*next_ep = start_ep;
1030 		return;
1031 	}
1032 
1033 	desc = (struct spdk_blob_md_descriptor_extent_table *)*buf;
1034 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE;
1035 
1036 	desc->num_clusters = blob->active.num_clusters;
1037 
1038 	ep_len = 1;
1039 	et_idx = 0;
1040 	for (i = start_ep; i < blob->active.num_extent_pages; i++) {
1041 		if (*remaining_sz < cur_sz  + sizeof(desc->extent_page[0])) {
1042 			/* If we ran out of buffer space, return */
1043 			break;
1044 		}
1045 
1046 		extent_page = blob->active.extent_pages[i];
1047 		/* Verify that next extent_page is unallocated */
1048 		if (extent_page == 0 &&
1049 		    (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) {
1050 			ep_len++;
1051 			continue;
1052 		}
1053 		desc->extent_page[et_idx].page_idx = extent_page;
1054 		desc->extent_page[et_idx].num_pages = ep_len;
1055 		et_idx++;
1056 
1057 		ep_len = 1;
1058 		cur_sz += sizeof(desc->extent_page[et_idx]);
1059 	}
1060 	*next_ep = i;
1061 
1062 	desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx;
1063 	*remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length;
1064 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc->length;
1065 }
1066 
1067 static int
1068 blob_serialize_extent_table(const struct spdk_blob *blob,
1069 			    struct spdk_blob_md_page **pages,
1070 			    struct spdk_blob_md_page *cur_page,
1071 			    uint32_t *page_count, uint8_t **buf,
1072 			    size_t *remaining_sz)
1073 {
1074 	uint64_t				last_extent_page;
1075 	int					rc;
1076 
1077 	last_extent_page = 0;
1078 	/* At least single extent table entry has to be always persisted.
1079 	 * Such case occurs with num_extent_pages == 0. */
1080 	while (last_extent_page <= blob->active.num_extent_pages) {
1081 		blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf,
1082 						  remaining_sz);
1083 
1084 		if (last_extent_page == blob->active.num_extent_pages) {
1085 			break;
1086 		}
1087 
1088 		rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1089 		if (rc < 0) {
1090 			return rc;
1091 		}
1092 
1093 		*buf = (uint8_t *)cur_page->descriptors;
1094 		*remaining_sz = sizeof(cur_page->descriptors);
1095 	}
1096 
1097 	return 0;
1098 }
1099 
1100 static void
1101 blob_serialize_extent_rle(const struct spdk_blob *blob,
1102 			  uint64_t start_cluster, uint64_t *next_cluster,
1103 			  uint8_t **buf, size_t *buf_sz)
1104 {
1105 	struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle;
1106 	size_t cur_sz;
1107 	uint64_t i, extent_idx;
1108 	uint64_t lba, lba_per_cluster, lba_count;
1109 
1110 	/* The buffer must have room for at least one extent */
1111 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]);
1112 	if (*buf_sz < cur_sz) {
1113 		*next_cluster = start_cluster;
1114 		return;
1115 	}
1116 
1117 	desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf;
1118 	desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE;
1119 
1120 	lba_per_cluster = bs_cluster_to_lba(blob->bs, 1);
1121 	/* Assert for scan-build false positive */
1122 	assert(lba_per_cluster > 0);
1123 
1124 	lba = blob->active.clusters[start_cluster];
1125 	lba_count = lba_per_cluster;
1126 	extent_idx = 0;
1127 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
1128 		if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) {
1129 			/* Run-length encode sequential non-zero LBA */
1130 			lba_count += lba_per_cluster;
1131 			continue;
1132 		} else if (lba == 0 && blob->active.clusters[i] == 0) {
1133 			/* Run-length encode unallocated clusters */
1134 			lba_count += lba_per_cluster;
1135 			continue;
1136 		}
1137 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1138 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1139 		extent_idx++;
1140 
1141 		cur_sz += sizeof(desc_extent_rle->extents[extent_idx]);
1142 
1143 		if (*buf_sz < cur_sz) {
1144 			/* If we ran out of buffer space, return */
1145 			*next_cluster = i;
1146 			break;
1147 		}
1148 
1149 		lba = blob->active.clusters[i];
1150 		lba_count = lba_per_cluster;
1151 	}
1152 
1153 	if (*buf_sz >= cur_sz) {
1154 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1155 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1156 		extent_idx++;
1157 
1158 		*next_cluster = blob->active.num_clusters;
1159 	}
1160 
1161 	desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx;
1162 	*buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1163 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1164 }
1165 
1166 static int
1167 blob_serialize_extents_rle(const struct spdk_blob *blob,
1168 			   struct spdk_blob_md_page **pages,
1169 			   struct spdk_blob_md_page *cur_page,
1170 			   uint32_t *page_count, uint8_t **buf,
1171 			   size_t *remaining_sz)
1172 {
1173 	uint64_t				last_cluster;
1174 	int					rc;
1175 
1176 	last_cluster = 0;
1177 	while (last_cluster < blob->active.num_clusters) {
1178 		blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz);
1179 
1180 		if (last_cluster == blob->active.num_clusters) {
1181 			break;
1182 		}
1183 
1184 		rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1185 		if (rc < 0) {
1186 			return rc;
1187 		}
1188 
1189 		*buf = (uint8_t *)cur_page->descriptors;
1190 		*remaining_sz = sizeof(cur_page->descriptors);
1191 	}
1192 
1193 	return 0;
1194 }
1195 
1196 static void
1197 blob_serialize_extent_page(const struct spdk_blob *blob,
1198 			   uint64_t cluster, struct spdk_blob_md_page *page)
1199 {
1200 	struct spdk_blob_md_descriptor_extent_page *desc_extent;
1201 	uint64_t i, extent_idx;
1202 	uint64_t lba, lba_per_cluster;
1203 	uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
1204 
1205 	desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors;
1206 	desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE;
1207 
1208 	lba_per_cluster = bs_cluster_to_lba(blob->bs, 1);
1209 
1210 	desc_extent->start_cluster_idx = start_cluster_idx;
1211 	extent_idx = 0;
1212 	for (i = start_cluster_idx; i < blob->active.num_clusters; i++) {
1213 		lba = blob->active.clusters[i];
1214 		desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster;
1215 		if (extent_idx >= SPDK_EXTENTS_PER_EP) {
1216 			break;
1217 		}
1218 	}
1219 	desc_extent->length = sizeof(desc_extent->start_cluster_idx) +
1220 			      sizeof(desc_extent->cluster_idx[0]) * extent_idx;
1221 }
1222 
1223 static void
1224 blob_serialize_flags(const struct spdk_blob *blob,
1225 		     uint8_t *buf, size_t *buf_sz)
1226 {
1227 	struct spdk_blob_md_descriptor_flags *desc;
1228 
1229 	/*
1230 	 * Flags get serialized first, so we should always have room for the flags
1231 	 *  descriptor.
1232 	 */
1233 	assert(*buf_sz >= sizeof(*desc));
1234 
1235 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
1236 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
1237 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
1238 	desc->invalid_flags = blob->invalid_flags;
1239 	desc->data_ro_flags = blob->data_ro_flags;
1240 	desc->md_ro_flags = blob->md_ro_flags;
1241 
1242 	*buf_sz -= sizeof(*desc);
1243 }
1244 
1245 static int
1246 blob_serialize_xattrs(const struct spdk_blob *blob,
1247 		      const struct spdk_xattr_tailq *xattrs, bool internal,
1248 		      struct spdk_blob_md_page **pages,
1249 		      struct spdk_blob_md_page *cur_page,
1250 		      uint32_t *page_count, uint8_t **buf,
1251 		      size_t *remaining_sz)
1252 {
1253 	const struct spdk_xattr	*xattr;
1254 	int	rc;
1255 
1256 	TAILQ_FOREACH(xattr, xattrs, link) {
1257 		size_t required_sz = 0;
1258 
1259 		rc = blob_serialize_xattr(xattr,
1260 					  *buf, *remaining_sz,
1261 					  &required_sz, internal);
1262 		if (rc < 0) {
1263 			/* Need to add a new page to the chain */
1264 			rc = blob_serialize_add_page(blob, pages, page_count,
1265 						     &cur_page);
1266 			if (rc < 0) {
1267 				spdk_free(*pages);
1268 				*pages = NULL;
1269 				*page_count = 0;
1270 				return rc;
1271 			}
1272 
1273 			*buf = (uint8_t *)cur_page->descriptors;
1274 			*remaining_sz = sizeof(cur_page->descriptors);
1275 
1276 			/* Try again */
1277 			required_sz = 0;
1278 			rc = blob_serialize_xattr(xattr,
1279 						  *buf, *remaining_sz,
1280 						  &required_sz, internal);
1281 
1282 			if (rc < 0) {
1283 				spdk_free(*pages);
1284 				*pages = NULL;
1285 				*page_count = 0;
1286 				return rc;
1287 			}
1288 		}
1289 
1290 		*remaining_sz -= required_sz;
1291 		*buf += required_sz;
1292 	}
1293 
1294 	return 0;
1295 }
1296 
1297 static int
1298 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
1299 	       uint32_t *page_count)
1300 {
1301 	struct spdk_blob_md_page		*cur_page;
1302 	int					rc;
1303 	uint8_t					*buf;
1304 	size_t					remaining_sz;
1305 
1306 	assert(pages != NULL);
1307 	assert(page_count != NULL);
1308 	assert(blob != NULL);
1309 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
1310 
1311 	*pages = NULL;
1312 	*page_count = 0;
1313 
1314 	/* A blob always has at least 1 page, even if it has no descriptors */
1315 	rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1316 	if (rc < 0) {
1317 		return rc;
1318 	}
1319 
1320 	buf = (uint8_t *)cur_page->descriptors;
1321 	remaining_sz = sizeof(cur_page->descriptors);
1322 
1323 	/* Serialize flags */
1324 	blob_serialize_flags(blob, buf, &remaining_sz);
1325 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
1326 
1327 	/* Serialize xattrs */
1328 	rc = blob_serialize_xattrs(blob, &blob->xattrs, false,
1329 				   pages, cur_page, page_count, &buf, &remaining_sz);
1330 	if (rc < 0) {
1331 		return rc;
1332 	}
1333 
1334 	/* Serialize internal xattrs */
1335 	rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
1336 				   pages, cur_page, page_count, &buf, &remaining_sz);
1337 	if (rc < 0) {
1338 		return rc;
1339 	}
1340 
1341 	if (blob->use_extent_table) {
1342 		/* Serialize extent table */
1343 		rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1344 	} else {
1345 		/* Serialize extents */
1346 		rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1347 	}
1348 
1349 	return rc;
1350 }
1351 
1352 struct spdk_blob_load_ctx {
1353 	struct spdk_blob		*blob;
1354 
1355 	struct spdk_blob_md_page	*pages;
1356 	uint32_t			num_pages;
1357 	uint32_t			next_extent_page;
1358 	spdk_bs_sequence_t	        *seq;
1359 
1360 	spdk_bs_sequence_cpl		cb_fn;
1361 	void				*cb_arg;
1362 };
1363 
1364 static uint32_t
1365 blob_md_page_calc_crc(void *page)
1366 {
1367 	uint32_t		crc;
1368 
1369 	crc = BLOB_CRC32C_INITIAL;
1370 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
1371 	crc ^= BLOB_CRC32C_INITIAL;
1372 
1373 	return crc;
1374 
1375 }
1376 
1377 static void
1378 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno)
1379 {
1380 	struct spdk_blob		*blob = ctx->blob;
1381 
1382 	if (bserrno == 0) {
1383 		blob_mark_clean(blob);
1384 	}
1385 
1386 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
1387 
1388 	/* Free the memory */
1389 	spdk_free(ctx->pages);
1390 	free(ctx);
1391 }
1392 
1393 static void
1394 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
1395 {
1396 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1397 	struct spdk_blob		*blob = ctx->blob;
1398 
1399 	if (bserrno == 0) {
1400 		blob->back_bs_dev = bs_create_blob_bs_dev(snapshot);
1401 		if (blob->back_bs_dev == NULL) {
1402 			bserrno = -ENOMEM;
1403 		}
1404 	}
1405 	if (bserrno != 0) {
1406 		SPDK_ERRLOG("Snapshot fail\n");
1407 	}
1408 
1409 	blob_load_final(ctx, bserrno);
1410 }
1411 
1412 static void blob_update_clear_method(struct spdk_blob *blob);
1413 
1414 static int
1415 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx)
1416 {
1417 	struct spdk_blob_store *bs = blob->bs;
1418 	struct spdk_bs_dev *bs_dev = NULL;
1419 	const void *esnap_id = NULL;
1420 	size_t id_len = 0;
1421 	int rc;
1422 
1423 	if (bs->esnap_bs_dev_create == NULL) {
1424 		SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened "
1425 			       "without support for esnap clones\n", blob->id);
1426 		return -ENOTSUP;
1427 	}
1428 	assert(blob->back_bs_dev == NULL);
1429 
1430 	rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true);
1431 	if (rc != 0) {
1432 		SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id);
1433 		return -EINVAL;
1434 	}
1435 	assert(id_len > 0 && id_len < UINT32_MAX);
1436 
1437 	SPDK_INFOLOG(blob, "Creating external snapshot device\n");
1438 
1439 	rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len,
1440 				     &bs_dev);
1441 	if (rc != 0) {
1442 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev "
1443 			      "with error %d\n", blob->id, rc);
1444 		return rc;
1445 	}
1446 
1447 	/*
1448 	 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot.
1449 	 * This especially might happen during spdk_bs_load() iteration.
1450 	 */
1451 	if (bs_dev != NULL) {
1452 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id);
1453 		if ((bs->io_unit_size % bs_dev->blocklen) != 0) {
1454 			SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u "
1455 				       "is not compatible with blobstore block size %u\n",
1456 				       blob->id, bs_dev->blocklen, bs->io_unit_size);
1457 			bs_dev->destroy(bs_dev);
1458 			return -EINVAL;
1459 		}
1460 	}
1461 
1462 	blob->back_bs_dev = bs_dev;
1463 	blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT;
1464 
1465 	return 0;
1466 }
1467 
1468 static void
1469 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg)
1470 {
1471 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1472 	struct spdk_blob		*blob = ctx->blob;
1473 	const void			*value;
1474 	size_t				len;
1475 	int				rc;
1476 
1477 	if (blob_is_esnap_clone(blob)) {
1478 		rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx);
1479 		blob_load_final(ctx, rc);
1480 		return;
1481 	}
1482 
1483 	if (spdk_blob_is_thin_provisioned(blob)) {
1484 		rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
1485 		if (rc == 0) {
1486 			if (len != sizeof(spdk_blob_id)) {
1487 				blob_load_final(ctx, -EINVAL);
1488 				return;
1489 			}
1490 			/* open snapshot blob and continue in the callback function */
1491 			blob->parent_id = *(spdk_blob_id *)value;
1492 			spdk_bs_open_blob(blob->bs, blob->parent_id,
1493 					  blob_load_snapshot_cpl, ctx);
1494 			return;
1495 		} else {
1496 			/* add zeroes_dev for thin provisioned blob */
1497 			blob->back_bs_dev = bs_create_zeroes_dev();
1498 		}
1499 	} else {
1500 		/* standard blob */
1501 		blob->back_bs_dev = NULL;
1502 	}
1503 	blob_load_final(ctx, 0);
1504 }
1505 
1506 static void
1507 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1508 {
1509 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1510 	struct spdk_blob		*blob = ctx->blob;
1511 	struct spdk_blob_md_page	*page;
1512 	uint64_t			i;
1513 	uint32_t			crc;
1514 	uint64_t			lba;
1515 	void				*tmp;
1516 	uint64_t			sz;
1517 
1518 	if (bserrno) {
1519 		SPDK_ERRLOG("Extent page read failed: %d\n", bserrno);
1520 		blob_load_final(ctx, bserrno);
1521 		return;
1522 	}
1523 
1524 	if (ctx->pages == NULL) {
1525 		/* First iteration of this function, allocate buffer for single EXTENT_PAGE */
1526 		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
1527 					  NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
1528 		if (!ctx->pages) {
1529 			blob_load_final(ctx, -ENOMEM);
1530 			return;
1531 		}
1532 		ctx->num_pages = 1;
1533 		ctx->next_extent_page = 0;
1534 	} else {
1535 		page = &ctx->pages[0];
1536 		crc = blob_md_page_calc_crc(page);
1537 		if (crc != page->crc) {
1538 			blob_load_final(ctx, -EINVAL);
1539 			return;
1540 		}
1541 
1542 		if (page->next != SPDK_INVALID_MD_PAGE) {
1543 			blob_load_final(ctx, -EINVAL);
1544 			return;
1545 		}
1546 
1547 		bserrno = blob_parse_extent_page(page, blob);
1548 		if (bserrno) {
1549 			blob_load_final(ctx, bserrno);
1550 			return;
1551 		}
1552 	}
1553 
1554 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
1555 		if (blob->active.extent_pages[i] != 0) {
1556 			/* Extent page was allocated, read and parse it. */
1557 			lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]);
1558 			ctx->next_extent_page = i + 1;
1559 
1560 			bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1561 					     bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1562 					     blob_load_cpl_extents_cpl, ctx);
1563 			return;
1564 		} else {
1565 			/* Thin provisioned blobs can point to unallocated extent pages.
1566 			 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */
1567 
1568 			sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP);
1569 			blob->active.num_clusters += sz;
1570 			blob->remaining_clusters_in_et -= sz;
1571 
1572 			assert(spdk_blob_is_thin_provisioned(blob));
1573 			assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0);
1574 
1575 			tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
1576 			if (tmp == NULL) {
1577 				blob_load_final(ctx, -ENOMEM);
1578 				return;
1579 			}
1580 			memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0,
1581 			       sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size));
1582 			blob->active.clusters = tmp;
1583 			blob->active.cluster_array_size = blob->active.num_clusters;
1584 		}
1585 	}
1586 
1587 	blob_load_backing_dev(seq, ctx);
1588 }
1589 
1590 static void
1591 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1592 {
1593 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1594 	struct spdk_blob		*blob = ctx->blob;
1595 	struct spdk_blob_md_page	*page;
1596 	int				rc;
1597 	uint32_t			crc;
1598 	uint32_t			current_page;
1599 
1600 	if (ctx->num_pages == 1) {
1601 		current_page = bs_blobid_to_page(blob->id);
1602 	} else {
1603 		assert(ctx->num_pages != 0);
1604 		page = &ctx->pages[ctx->num_pages - 2];
1605 		current_page = page->next;
1606 	}
1607 
1608 	if (bserrno) {
1609 		SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n",
1610 			    current_page, blob->id, bserrno);
1611 		blob_load_final(ctx, bserrno);
1612 		return;
1613 	}
1614 
1615 	page = &ctx->pages[ctx->num_pages - 1];
1616 	crc = blob_md_page_calc_crc(page);
1617 	if (crc != page->crc) {
1618 		SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n",
1619 			    current_page, blob->id);
1620 		blob_load_final(ctx, -EINVAL);
1621 		return;
1622 	}
1623 
1624 	if (page->next != SPDK_INVALID_MD_PAGE) {
1625 		struct spdk_blob_md_page *tmp_pages;
1626 		uint32_t next_page = page->next;
1627 		uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page);
1628 
1629 		/* Read the next page */
1630 		tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0);
1631 		if (tmp_pages == NULL) {
1632 			blob_load_final(ctx, -ENOMEM);
1633 			return;
1634 		}
1635 		ctx->num_pages++;
1636 		ctx->pages = tmp_pages;
1637 
1638 		bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
1639 				     next_lba,
1640 				     bs_byte_to_lba(blob->bs, sizeof(*page)),
1641 				     blob_load_cpl, ctx);
1642 		return;
1643 	}
1644 
1645 	/* Parse the pages */
1646 	rc = blob_parse(ctx->pages, ctx->num_pages, blob);
1647 	if (rc) {
1648 		blob_load_final(ctx, rc);
1649 		return;
1650 	}
1651 
1652 	if (blob->extent_table_found == true) {
1653 		/* If EXTENT_TABLE was found, that means support for it should be enabled. */
1654 		assert(blob->extent_rle_found == false);
1655 		blob->use_extent_table = true;
1656 	} else {
1657 		/* If EXTENT_RLE or no extent_* descriptor was found disable support
1658 		 * for extent table. No extent_* descriptors means that blob has length of 0
1659 		 * and no extent_rle descriptors were persisted for it.
1660 		 * EXTENT_TABLE if used, is always present in metadata regardless of length. */
1661 		blob->use_extent_table = false;
1662 	}
1663 
1664 	/* Check the clear_method stored in metadata vs what may have been passed
1665 	 * via spdk_bs_open_blob_ext() and update accordingly.
1666 	 */
1667 	blob_update_clear_method(blob);
1668 
1669 	spdk_free(ctx->pages);
1670 	ctx->pages = NULL;
1671 
1672 	if (blob->extent_table_found) {
1673 		blob_load_cpl_extents_cpl(seq, ctx, 0);
1674 	} else {
1675 		blob_load_backing_dev(seq, ctx);
1676 	}
1677 }
1678 
1679 /* Load a blob from disk given a blobid */
1680 static void
1681 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1682 	  spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1683 {
1684 	struct spdk_blob_load_ctx *ctx;
1685 	struct spdk_blob_store *bs;
1686 	uint32_t page_num;
1687 	uint64_t lba;
1688 
1689 	blob_verify_md_op(blob);
1690 
1691 	bs = blob->bs;
1692 
1693 	ctx = calloc(1, sizeof(*ctx));
1694 	if (!ctx) {
1695 		cb_fn(seq, cb_arg, -ENOMEM);
1696 		return;
1697 	}
1698 
1699 	ctx->blob = blob;
1700 	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0);
1701 	if (!ctx->pages) {
1702 		free(ctx);
1703 		cb_fn(seq, cb_arg, -ENOMEM);
1704 		return;
1705 	}
1706 	ctx->num_pages = 1;
1707 	ctx->cb_fn = cb_fn;
1708 	ctx->cb_arg = cb_arg;
1709 	ctx->seq = seq;
1710 
1711 	page_num = bs_blobid_to_page(blob->id);
1712 	lba = bs_md_page_to_lba(blob->bs, page_num);
1713 
1714 	blob->state = SPDK_BLOB_STATE_LOADING;
1715 
1716 	bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1717 			     bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
1718 			     blob_load_cpl, ctx);
1719 }
1720 
1721 struct spdk_blob_persist_ctx {
1722 	struct spdk_blob		*blob;
1723 
1724 	struct spdk_blob_md_page	*pages;
1725 	uint32_t			next_extent_page;
1726 	struct spdk_blob_md_page	*extent_page;
1727 
1728 	spdk_bs_sequence_t		*seq;
1729 	spdk_bs_sequence_cpl		cb_fn;
1730 	void				*cb_arg;
1731 	TAILQ_ENTRY(spdk_blob_persist_ctx) link;
1732 };
1733 
1734 static void
1735 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba,
1736 		   uint64_t lba_count)
1737 {
1738 	switch (blob->clear_method) {
1739 	case BLOB_CLEAR_WITH_DEFAULT:
1740 	case BLOB_CLEAR_WITH_UNMAP:
1741 		bs_batch_unmap_dev(batch, lba, lba_count);
1742 		break;
1743 	case BLOB_CLEAR_WITH_WRITE_ZEROES:
1744 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
1745 		break;
1746 	case BLOB_CLEAR_WITH_NONE:
1747 	default:
1748 		break;
1749 	}
1750 }
1751 
1752 static int
1753 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs)
1754 {
1755 	uint32_t	crc;
1756 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
1757 
1758 	if (super->version > SPDK_BS_VERSION ||
1759 	    super->version < SPDK_BS_INITIAL_VERSION) {
1760 		return -EILSEQ;
1761 	}
1762 
1763 	if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG,
1764 		   sizeof(super->signature)) != 0) {
1765 		return -EILSEQ;
1766 	}
1767 
1768 	crc = blob_md_page_calc_crc(super);
1769 	if (crc != super->crc) {
1770 		return -EILSEQ;
1771 	}
1772 
1773 	if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
1774 		SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n");
1775 	} else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
1776 		SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n");
1777 	} else {
1778 		SPDK_DEBUGLOG(blob, "Unexpected bstype\n");
1779 		SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
1780 		SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
1781 		return -ENXIO;
1782 	}
1783 
1784 	if (super->size > bs->dev->blockcnt * bs->dev->blocklen) {
1785 		SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n",
1786 			       bs->dev->blockcnt * bs->dev->blocklen, super->size);
1787 		return -EILSEQ;
1788 	}
1789 
1790 	return 0;
1791 }
1792 
1793 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
1794 			  spdk_bs_sequence_cpl cb_fn, void *cb_arg);
1795 
1796 static void
1797 blob_persist_complete_cb(void *arg)
1798 {
1799 	struct spdk_blob_persist_ctx *ctx = arg;
1800 
1801 	/* Call user callback */
1802 	ctx->cb_fn(ctx->seq, ctx->cb_arg, 0);
1803 
1804 	/* Free the memory */
1805 	spdk_free(ctx->pages);
1806 	free(ctx);
1807 }
1808 
1809 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
1810 
1811 static void
1812 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno)
1813 {
1814 	struct spdk_blob_persist_ctx	*next_persist, *tmp;
1815 	struct spdk_blob		*blob = ctx->blob;
1816 
1817 	if (bserrno == 0) {
1818 		blob_mark_clean(blob);
1819 	}
1820 
1821 	assert(ctx == TAILQ_FIRST(&blob->persists_to_complete));
1822 
1823 	/* Complete all persists that were pending when the current persist started */
1824 	TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) {
1825 		TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link);
1826 		spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist);
1827 	}
1828 
1829 	if (TAILQ_EMPTY(&blob->pending_persists)) {
1830 		return;
1831 	}
1832 
1833 	/* Queue up all pending persists for completion and start blob persist with first one */
1834 	TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link);
1835 	next_persist = TAILQ_FIRST(&blob->persists_to_complete);
1836 
1837 	blob->state = SPDK_BLOB_STATE_DIRTY;
1838 	bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist);
1839 }
1840 
1841 static void
1842 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1843 {
1844 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1845 	struct spdk_blob		*blob = ctx->blob;
1846 	struct spdk_blob_store		*bs = blob->bs;
1847 	size_t				i;
1848 
1849 	if (bserrno != 0) {
1850 		blob_persist_complete(seq, ctx, bserrno);
1851 		return;
1852 	}
1853 
1854 	spdk_spin_lock(&bs->used_lock);
1855 
1856 	/* Release all extent_pages that were truncated */
1857 	for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) {
1858 		/* Nothing to release if it was not allocated */
1859 		if (blob->active.extent_pages[i] != 0) {
1860 			bs_release_md_page(bs, blob->active.extent_pages[i]);
1861 		}
1862 	}
1863 
1864 	spdk_spin_unlock(&bs->used_lock);
1865 
1866 	if (blob->active.num_extent_pages == 0) {
1867 		free(blob->active.extent_pages);
1868 		blob->active.extent_pages = NULL;
1869 		blob->active.extent_pages_array_size = 0;
1870 	} else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) {
1871 #ifndef __clang_analyzer__
1872 		void *tmp;
1873 
1874 		/* scan-build really can't figure reallocs, workaround it */
1875 		tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages);
1876 		assert(tmp != NULL);
1877 		blob->active.extent_pages = tmp;
1878 #endif
1879 		blob->active.extent_pages_array_size = blob->active.num_extent_pages;
1880 	}
1881 
1882 	blob_persist_complete(seq, ctx, bserrno);
1883 }
1884 
1885 static void
1886 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
1887 {
1888 	struct spdk_blob		*blob = ctx->blob;
1889 	struct spdk_blob_store		*bs = blob->bs;
1890 	size_t				i;
1891 	uint64_t                        lba;
1892 	uint64_t                        lba_count;
1893 	spdk_bs_batch_t                 *batch;
1894 
1895 	batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx);
1896 	lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1897 
1898 	/* Clear all extent_pages that were truncated */
1899 	for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) {
1900 		/* Nothing to clear if it was not allocated */
1901 		if (blob->active.extent_pages[i] != 0) {
1902 			lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]);
1903 			bs_batch_write_zeroes_dev(batch, lba, lba_count);
1904 		}
1905 	}
1906 
1907 	bs_batch_close(batch);
1908 }
1909 
1910 static void
1911 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1912 {
1913 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1914 	struct spdk_blob		*blob = ctx->blob;
1915 	struct spdk_blob_store		*bs = blob->bs;
1916 	size_t				i;
1917 
1918 	if (bserrno != 0) {
1919 		blob_persist_complete(seq, ctx, bserrno);
1920 		return;
1921 	}
1922 
1923 	spdk_spin_lock(&bs->used_lock);
1924 	/* Release all clusters that were truncated */
1925 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1926 		uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]);
1927 
1928 		/* Nothing to release if it was not allocated */
1929 		if (blob->active.clusters[i] != 0) {
1930 			bs_release_cluster(bs, cluster_num);
1931 		}
1932 	}
1933 	spdk_spin_unlock(&bs->used_lock);
1934 
1935 	if (blob->active.num_clusters == 0) {
1936 		free(blob->active.clusters);
1937 		blob->active.clusters = NULL;
1938 		blob->active.cluster_array_size = 0;
1939 	} else if (blob->active.num_clusters != blob->active.cluster_array_size) {
1940 #ifndef __clang_analyzer__
1941 		void *tmp;
1942 
1943 		/* scan-build really can't figure reallocs, workaround it */
1944 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters);
1945 		assert(tmp != NULL);
1946 		blob->active.clusters = tmp;
1947 
1948 #endif
1949 		blob->active.cluster_array_size = blob->active.num_clusters;
1950 	}
1951 
1952 	/* Move on to clearing extent pages */
1953 	blob_persist_clear_extents(seq, ctx);
1954 }
1955 
1956 static void
1957 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
1958 {
1959 	struct spdk_blob		*blob = ctx->blob;
1960 	struct spdk_blob_store		*bs = blob->bs;
1961 	spdk_bs_batch_t			*batch;
1962 	size_t				i;
1963 	uint64_t			lba;
1964 	uint64_t			lba_count;
1965 
1966 	/* Clusters don't move around in blobs. The list shrinks or grows
1967 	 * at the end, but no changes ever occur in the middle of the list.
1968 	 */
1969 
1970 	batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx);
1971 
1972 	/* Clear all clusters that were truncated */
1973 	lba = 0;
1974 	lba_count = 0;
1975 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1976 		uint64_t next_lba = blob->active.clusters[i];
1977 		uint64_t next_lba_count = bs_cluster_to_lba(bs, 1);
1978 
1979 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1980 			/* This cluster is contiguous with the previous one. */
1981 			lba_count += next_lba_count;
1982 			continue;
1983 		} else if (next_lba == 0) {
1984 			continue;
1985 		}
1986 
1987 		/* This cluster is not contiguous with the previous one. */
1988 
1989 		/* If a run of LBAs previously existing, clear them now */
1990 		if (lba_count > 0) {
1991 			bs_batch_clear_dev(ctx->blob, batch, lba, lba_count);
1992 		}
1993 
1994 		/* Start building the next batch */
1995 		lba = next_lba;
1996 		if (next_lba > 0) {
1997 			lba_count = next_lba_count;
1998 		} else {
1999 			lba_count = 0;
2000 		}
2001 	}
2002 
2003 	/* If we ended with a contiguous set of LBAs, clear them now */
2004 	if (lba_count > 0) {
2005 		bs_batch_clear_dev(ctx->blob, batch, lba, lba_count);
2006 	}
2007 
2008 	bs_batch_close(batch);
2009 }
2010 
2011 static void
2012 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2013 {
2014 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2015 	struct spdk_blob		*blob = ctx->blob;
2016 	struct spdk_blob_store		*bs = blob->bs;
2017 	size_t				i;
2018 
2019 	if (bserrno != 0) {
2020 		blob_persist_complete(seq, ctx, bserrno);
2021 		return;
2022 	}
2023 
2024 	spdk_spin_lock(&bs->used_lock);
2025 
2026 	/* This loop starts at 1 because the first page is special and handled
2027 	 * below. The pages (except the first) are never written in place,
2028 	 * so any pages in the clean list must be zeroed.
2029 	 */
2030 	for (i = 1; i < blob->clean.num_pages; i++) {
2031 		bs_release_md_page(bs, blob->clean.pages[i]);
2032 	}
2033 
2034 	if (blob->active.num_pages == 0) {
2035 		uint32_t page_num;
2036 
2037 		page_num = bs_blobid_to_page(blob->id);
2038 		bs_release_md_page(bs, page_num);
2039 	}
2040 
2041 	spdk_spin_unlock(&bs->used_lock);
2042 
2043 	/* Move on to clearing clusters */
2044 	blob_persist_clear_clusters(seq, ctx);
2045 }
2046 
2047 static void
2048 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2049 {
2050 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2051 	struct spdk_blob		*blob = ctx->blob;
2052 	struct spdk_blob_store		*bs = blob->bs;
2053 	uint64_t			lba;
2054 	uint64_t			lba_count;
2055 	spdk_bs_batch_t			*batch;
2056 	size_t				i;
2057 
2058 	if (bserrno != 0) {
2059 		blob_persist_complete(seq, ctx, bserrno);
2060 		return;
2061 	}
2062 
2063 	batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx);
2064 
2065 	lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
2066 
2067 	/* This loop starts at 1 because the first page is special and handled
2068 	 * below. The pages (except the first) are never written in place,
2069 	 * so any pages in the clean list must be zeroed.
2070 	 */
2071 	for (i = 1; i < blob->clean.num_pages; i++) {
2072 		lba = bs_md_page_to_lba(bs, blob->clean.pages[i]);
2073 
2074 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
2075 	}
2076 
2077 	/* The first page will only be zeroed if this is a delete. */
2078 	if (blob->active.num_pages == 0) {
2079 		uint32_t page_num;
2080 
2081 		/* The first page in the metadata goes where the blobid indicates */
2082 		page_num = bs_blobid_to_page(blob->id);
2083 		lba = bs_md_page_to_lba(bs, page_num);
2084 
2085 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
2086 	}
2087 
2088 	bs_batch_close(batch);
2089 }
2090 
2091 static void
2092 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2093 {
2094 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2095 	struct spdk_blob		*blob = ctx->blob;
2096 	struct spdk_blob_store		*bs = blob->bs;
2097 	uint64_t			lba;
2098 	uint32_t			lba_count;
2099 	struct spdk_blob_md_page	*page;
2100 
2101 	if (bserrno != 0) {
2102 		blob_persist_complete(seq, ctx, bserrno);
2103 		return;
2104 	}
2105 
2106 	if (blob->active.num_pages == 0) {
2107 		/* Move on to the next step */
2108 		blob_persist_zero_pages(seq, ctx, 0);
2109 		return;
2110 	}
2111 
2112 	lba_count = bs_byte_to_lba(bs, sizeof(*page));
2113 
2114 	page = &ctx->pages[0];
2115 	/* The first page in the metadata goes where the blobid indicates */
2116 	lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id));
2117 
2118 	bs_sequence_write_dev(seq, page, lba, lba_count,
2119 			      blob_persist_zero_pages, ctx);
2120 }
2121 
2122 static void
2123 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
2124 {
2125 	struct spdk_blob		*blob = ctx->blob;
2126 	struct spdk_blob_store		*bs = blob->bs;
2127 	uint64_t			lba;
2128 	uint32_t			lba_count;
2129 	struct spdk_blob_md_page	*page;
2130 	spdk_bs_batch_t			*batch;
2131 	size_t				i;
2132 
2133 	/* Clusters don't move around in blobs. The list shrinks or grows
2134 	 * at the end, but no changes ever occur in the middle of the list.
2135 	 */
2136 
2137 	lba_count = bs_byte_to_lba(bs, sizeof(*page));
2138 
2139 	batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx);
2140 
2141 	/* This starts at 1. The root page is not written until
2142 	 * all of the others are finished
2143 	 */
2144 	for (i = 1; i < blob->active.num_pages; i++) {
2145 		page = &ctx->pages[i];
2146 		assert(page->sequence_num == i);
2147 
2148 		lba = bs_md_page_to_lba(bs, blob->active.pages[i]);
2149 
2150 		bs_batch_write_dev(batch, page, lba, lba_count);
2151 	}
2152 
2153 	bs_batch_close(batch);
2154 }
2155 
2156 static int
2157 blob_resize(struct spdk_blob *blob, uint64_t sz)
2158 {
2159 	uint64_t	i;
2160 	uint64_t	*tmp;
2161 	uint64_t	cluster;
2162 	uint32_t	lfmd; /*  lowest free md page */
2163 	uint64_t	num_clusters;
2164 	uint32_t	*ep_tmp;
2165 	uint64_t	new_num_ep = 0, current_num_ep = 0;
2166 	struct spdk_blob_store *bs;
2167 	int		rc;
2168 
2169 	bs = blob->bs;
2170 
2171 	blob_verify_md_op(blob);
2172 
2173 	if (blob->active.num_clusters == sz) {
2174 		return 0;
2175 	}
2176 
2177 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
2178 		/* If this blob was resized to be larger, then smaller, then
2179 		 * larger without syncing, then the cluster array already
2180 		 * contains spare assigned clusters we can use.
2181 		 */
2182 		num_clusters = spdk_min(blob->active.cluster_array_size,
2183 					sz);
2184 	} else {
2185 		num_clusters = blob->active.num_clusters;
2186 	}
2187 
2188 	if (blob->use_extent_table) {
2189 		/* Round up since every cluster beyond current Extent Table size,
2190 		 * requires new extent page. */
2191 		new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP);
2192 		current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP);
2193 	}
2194 
2195 	assert(!spdk_spin_held(&bs->used_lock));
2196 
2197 	/* Check first that we have enough clusters and md pages before we start claiming them.
2198 	 * bs->used_lock is held to ensure that clusters we think are free are still free when we go
2199 	 * to claim them later in this function.
2200 	 */
2201 	if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) {
2202 		spdk_spin_lock(&bs->used_lock);
2203 		if ((sz - num_clusters) > bs->num_free_clusters) {
2204 			rc = -ENOSPC;
2205 			goto out;
2206 		}
2207 		lfmd = 0;
2208 		for (i = current_num_ep; i < new_num_ep ; i++) {
2209 			lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd);
2210 			if (lfmd == UINT32_MAX) {
2211 				/* No more free md pages. Cannot satisfy the request */
2212 				rc = -ENOSPC;
2213 				goto out;
2214 			}
2215 		}
2216 	}
2217 
2218 	if (sz > num_clusters) {
2219 		/* Expand the cluster array if necessary.
2220 		 * We only shrink the array when persisting.
2221 		 */
2222 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz);
2223 		if (sz > 0 && tmp == NULL) {
2224 			rc = -ENOMEM;
2225 			goto out;
2226 		}
2227 		memset(tmp + blob->active.cluster_array_size, 0,
2228 		       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
2229 		blob->active.clusters = tmp;
2230 		blob->active.cluster_array_size = sz;
2231 
2232 		/* Expand the extents table, only if enough clusters were added */
2233 		if (new_num_ep > current_num_ep && blob->use_extent_table) {
2234 			ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep);
2235 			if (new_num_ep > 0 && ep_tmp == NULL) {
2236 				rc = -ENOMEM;
2237 				goto out;
2238 			}
2239 			memset(ep_tmp + blob->active.extent_pages_array_size, 0,
2240 			       sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size));
2241 			blob->active.extent_pages = ep_tmp;
2242 			blob->active.extent_pages_array_size = new_num_ep;
2243 		}
2244 	}
2245 
2246 	blob->state = SPDK_BLOB_STATE_DIRTY;
2247 
2248 	if (spdk_blob_is_thin_provisioned(blob) == false) {
2249 		cluster = 0;
2250 		lfmd = 0;
2251 		for (i = num_clusters; i < sz; i++) {
2252 			bs_allocate_cluster(blob, i, &cluster, &lfmd, true);
2253 			/* Do not increment lfmd here.  lfmd will get updated
2254 			 * to the md_page allocated (if any) when a new extent
2255 			 * page is needed.  Just pass that value again,
2256 			 * bs_allocate_cluster will just start at that index
2257 			 * to find the next free md_page when needed.
2258 			 */
2259 		}
2260 	}
2261 
2262 	/* If we are shrinking the blob, we must adjust num_allocated_clusters */
2263 	for (i = sz; i < num_clusters; i++) {
2264 		if (blob->active.clusters[i] != 0) {
2265 			blob->active.num_allocated_clusters--;
2266 		}
2267 	}
2268 
2269 	blob->active.num_clusters = sz;
2270 	blob->active.num_extent_pages = new_num_ep;
2271 
2272 	rc = 0;
2273 out:
2274 	if (spdk_spin_held(&bs->used_lock)) {
2275 		spdk_spin_unlock(&bs->used_lock);
2276 	}
2277 
2278 	return rc;
2279 }
2280 
2281 static void
2282 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx)
2283 {
2284 	spdk_bs_sequence_t *seq = ctx->seq;
2285 	struct spdk_blob *blob = ctx->blob;
2286 	struct spdk_blob_store *bs = blob->bs;
2287 	uint64_t i;
2288 	uint32_t page_num;
2289 	void *tmp;
2290 	int rc;
2291 
2292 	/* Generate the new metadata */
2293 	rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
2294 	if (rc < 0) {
2295 		blob_persist_complete(seq, ctx, rc);
2296 		return;
2297 	}
2298 
2299 	assert(blob->active.num_pages >= 1);
2300 
2301 	/* Resize the cache of page indices */
2302 	tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
2303 	if (!tmp) {
2304 		blob_persist_complete(seq, ctx, -ENOMEM);
2305 		return;
2306 	}
2307 	blob->active.pages = tmp;
2308 
2309 	/* Assign this metadata to pages. This requires two passes - one to verify that there are
2310 	 * enough pages and a second to actually claim them. The used_lock is held across
2311 	 * both passes to ensure things don't change in the middle.
2312 	 */
2313 	spdk_spin_lock(&bs->used_lock);
2314 	page_num = 0;
2315 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
2316 	for (i = 1; i < blob->active.num_pages; i++) {
2317 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
2318 		if (page_num == UINT32_MAX) {
2319 			spdk_spin_unlock(&bs->used_lock);
2320 			blob_persist_complete(seq, ctx, -ENOMEM);
2321 			return;
2322 		}
2323 		page_num++;
2324 	}
2325 
2326 	page_num = 0;
2327 	blob->active.pages[0] = bs_blobid_to_page(blob->id);
2328 	for (i = 1; i < blob->active.num_pages; i++) {
2329 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
2330 		ctx->pages[i - 1].next = page_num;
2331 		/* Now that previous metadata page is complete, calculate the crc for it. */
2332 		ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]);
2333 		blob->active.pages[i] = page_num;
2334 		bs_claim_md_page(bs, page_num);
2335 		SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num,
2336 			      blob->id);
2337 		page_num++;
2338 	}
2339 	spdk_spin_unlock(&bs->used_lock);
2340 	ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]);
2341 	/* Start writing the metadata from last page to first */
2342 	blob->state = SPDK_BLOB_STATE_CLEAN;
2343 	blob_persist_write_page_chain(seq, ctx);
2344 }
2345 
2346 static void
2347 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2348 {
2349 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2350 	struct spdk_blob		*blob = ctx->blob;
2351 	size_t				i;
2352 	uint32_t			extent_page_id;
2353 	uint32_t                        page_count = 0;
2354 	int				rc;
2355 
2356 	if (ctx->extent_page != NULL) {
2357 		spdk_free(ctx->extent_page);
2358 		ctx->extent_page = NULL;
2359 	}
2360 
2361 	if (bserrno != 0) {
2362 		blob_persist_complete(seq, ctx, bserrno);
2363 		return;
2364 	}
2365 
2366 	/* Only write out Extent Pages when blob was resized. */
2367 	for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) {
2368 		extent_page_id = blob->active.extent_pages[i];
2369 		if (extent_page_id == 0) {
2370 			/* No Extent Page to persist */
2371 			assert(spdk_blob_is_thin_provisioned(blob));
2372 			continue;
2373 		}
2374 		assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id));
2375 		ctx->next_extent_page = i + 1;
2376 		rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page);
2377 		if (rc < 0) {
2378 			blob_persist_complete(seq, ctx, rc);
2379 			return;
2380 		}
2381 
2382 		blob->state = SPDK_BLOB_STATE_DIRTY;
2383 		blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page);
2384 
2385 		ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page);
2386 
2387 		bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id),
2388 				      bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
2389 				      blob_persist_write_extent_pages, ctx);
2390 		return;
2391 	}
2392 
2393 	blob_persist_generate_new_md(ctx);
2394 }
2395 
2396 static void
2397 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2398 {
2399 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2400 	struct spdk_blob *blob = ctx->blob;
2401 
2402 	if (bserrno != 0) {
2403 		blob_persist_complete(seq, ctx, bserrno);
2404 		return;
2405 	}
2406 
2407 	if (blob->active.num_pages == 0) {
2408 		/* This is the signal that the blob should be deleted.
2409 		 * Immediately jump to the clean up routine. */
2410 		assert(blob->clean.num_pages > 0);
2411 		blob->state = SPDK_BLOB_STATE_CLEAN;
2412 		blob_persist_zero_pages(seq, ctx, 0);
2413 		return;
2414 
2415 	}
2416 
2417 	if (blob->clean.num_clusters < blob->active.num_clusters) {
2418 		/* Blob was resized up */
2419 		assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages);
2420 		ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1;
2421 	} else if (blob->active.num_clusters < blob->active.cluster_array_size) {
2422 		/* Blob was resized down */
2423 		assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages);
2424 		ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1;
2425 	} else {
2426 		/* No change in size occurred */
2427 		blob_persist_generate_new_md(ctx);
2428 		return;
2429 	}
2430 
2431 	blob_persist_write_extent_pages(seq, ctx, 0);
2432 }
2433 
2434 struct spdk_bs_mark_dirty {
2435 	struct spdk_blob_store		*bs;
2436 	struct spdk_bs_super_block	*super;
2437 	spdk_bs_sequence_cpl		cb_fn;
2438 	void				*cb_arg;
2439 };
2440 
2441 static void
2442 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2443 {
2444 	struct spdk_bs_mark_dirty *ctx = cb_arg;
2445 
2446 	if (bserrno == 0) {
2447 		ctx->bs->clean = 0;
2448 	}
2449 
2450 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
2451 
2452 	spdk_free(ctx->super);
2453 	free(ctx);
2454 }
2455 
2456 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2457 			   struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg);
2458 
2459 
2460 static void
2461 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2462 {
2463 	struct spdk_bs_mark_dirty *ctx = cb_arg;
2464 	int rc;
2465 
2466 	if (bserrno != 0) {
2467 		bs_mark_dirty_write_cpl(seq, ctx, bserrno);
2468 		return;
2469 	}
2470 
2471 	rc = bs_super_validate(ctx->super, ctx->bs);
2472 	if (rc != 0) {
2473 		bs_mark_dirty_write_cpl(seq, ctx, rc);
2474 		return;
2475 	}
2476 
2477 	ctx->super->clean = 0;
2478 	if (ctx->super->size == 0) {
2479 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
2480 	}
2481 
2482 	bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx);
2483 }
2484 
2485 static void
2486 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2487 	      spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2488 {
2489 	struct spdk_bs_mark_dirty *ctx;
2490 
2491 	/* Blobstore is already marked dirty */
2492 	if (bs->clean == 0) {
2493 		cb_fn(seq, cb_arg, 0);
2494 		return;
2495 	}
2496 
2497 	ctx = calloc(1, sizeof(*ctx));
2498 	if (!ctx) {
2499 		cb_fn(seq, cb_arg, -ENOMEM);
2500 		return;
2501 	}
2502 	ctx->bs = bs;
2503 	ctx->cb_fn = cb_fn;
2504 	ctx->cb_arg = cb_arg;
2505 
2506 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
2507 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2508 	if (!ctx->super) {
2509 		free(ctx);
2510 		cb_fn(seq, cb_arg, -ENOMEM);
2511 		return;
2512 	}
2513 
2514 	bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0),
2515 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
2516 			     bs_mark_dirty_write, ctx);
2517 }
2518 
2519 /* Write a blob to disk */
2520 static void
2521 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
2522 	     spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2523 {
2524 	struct spdk_blob_persist_ctx *ctx;
2525 
2526 	blob_verify_md_op(blob);
2527 
2528 	if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) {
2529 		cb_fn(seq, cb_arg, 0);
2530 		return;
2531 	}
2532 
2533 	ctx = calloc(1, sizeof(*ctx));
2534 	if (!ctx) {
2535 		cb_fn(seq, cb_arg, -ENOMEM);
2536 		return;
2537 	}
2538 	ctx->blob = blob;
2539 	ctx->seq = seq;
2540 	ctx->cb_fn = cb_fn;
2541 	ctx->cb_arg = cb_arg;
2542 
2543 	/* Multiple blob persists can affect one another, via blob->state or
2544 	 * blob mutable data changes. To prevent it, queue up the persists. */
2545 	if (!TAILQ_EMPTY(&blob->persists_to_complete)) {
2546 		TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link);
2547 		return;
2548 	}
2549 	TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link);
2550 
2551 	bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx);
2552 }
2553 
2554 struct spdk_blob_copy_cluster_ctx {
2555 	struct spdk_blob *blob;
2556 	uint8_t *buf;
2557 	uint64_t page;
2558 	uint64_t new_cluster;
2559 	uint32_t new_extent_page;
2560 	spdk_bs_sequence_t *seq;
2561 	struct spdk_blob_md_page *new_cluster_page;
2562 };
2563 
2564 struct spdk_blob_free_cluster_ctx {
2565 	struct spdk_blob *blob;
2566 	uint64_t page;
2567 	struct spdk_blob_md_page *md_page;
2568 	uint64_t cluster_num;
2569 	uint32_t extent_page;
2570 	spdk_bs_sequence_t *seq;
2571 };
2572 
2573 static void
2574 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
2575 {
2576 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2577 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
2578 	TAILQ_HEAD(, spdk_bs_request_set) requests;
2579 	spdk_bs_user_op_t *op;
2580 
2581 	TAILQ_INIT(&requests);
2582 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
2583 
2584 	while (!TAILQ_EMPTY(&requests)) {
2585 		op = TAILQ_FIRST(&requests);
2586 		TAILQ_REMOVE(&requests, op, link);
2587 		if (bserrno == 0) {
2588 			bs_user_op_execute(op);
2589 		} else {
2590 			bs_user_op_abort(op, bserrno);
2591 		}
2592 	}
2593 
2594 	spdk_free(ctx->buf);
2595 	free(ctx);
2596 }
2597 
2598 static void
2599 blob_free_cluster_cpl(void *cb_arg, int bserrno)
2600 {
2601 	struct spdk_blob_free_cluster_ctx *ctx = cb_arg;
2602 	spdk_bs_sequence_t *seq = ctx->seq;
2603 
2604 	bs_sequence_finish(seq, bserrno);
2605 
2606 	free(ctx);
2607 }
2608 
2609 static void
2610 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx)
2611 {
2612 	spdk_spin_lock(&ctx->blob->bs->used_lock);
2613 	bs_release_cluster(ctx->blob->bs, ctx->new_cluster);
2614 	if (ctx->new_extent_page != 0) {
2615 		bs_release_md_page(ctx->blob->bs, ctx->new_extent_page);
2616 	}
2617 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
2618 }
2619 
2620 static void
2621 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno)
2622 {
2623 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2624 
2625 	if (bserrno) {
2626 		SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno);
2627 	}
2628 
2629 	blob_insert_cluster_revert(ctx);
2630 	bs_sequence_finish(ctx->seq, bserrno);
2631 }
2632 
2633 static void
2634 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx)
2635 {
2636 	struct spdk_bs_cpl cpl;
2637 	spdk_bs_batch_t *batch;
2638 	struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel);
2639 
2640 	/*
2641 	 * We allocated a cluster and we copied data to it. But now, we realized that we don't need
2642 	 * this cluster and we want to release it. We must ensure that we clear the data on this
2643 	 * cluster.
2644 	 * The cluster may later be re-allocated by a thick-provisioned blob for example. When
2645 	 * reading from this thick-provisioned blob before writing data, we should read zeroes.
2646 	 */
2647 
2648 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2649 	cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl;
2650 	cpl.u.blob_basic.cb_arg = ctx;
2651 
2652 	batch = bs_batch_open(ch, &cpl, ctx->blob);
2653 	if (!batch) {
2654 		blob_insert_cluster_clear_cpl(ctx, -ENOMEM);
2655 		return;
2656 	}
2657 
2658 	bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2659 			   bs_cluster_to_lba(ctx->blob->bs, 1));
2660 	bs_batch_close(batch);
2661 }
2662 
2663 static void
2664 blob_insert_cluster_cpl(void *cb_arg, int bserrno)
2665 {
2666 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2667 
2668 	if (bserrno) {
2669 		if (bserrno == -EEXIST) {
2670 			/* The metadata insert failed because another thread
2671 			 * allocated the cluster first. Clear and free our cluster
2672 			 * but continue without error. */
2673 			blob_insert_cluster_clear(ctx);
2674 			return;
2675 		}
2676 
2677 		blob_insert_cluster_revert(ctx);
2678 	}
2679 
2680 	bs_sequence_finish(ctx->seq, bserrno);
2681 }
2682 
2683 static void
2684 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2685 {
2686 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2687 	uint32_t cluster_number;
2688 
2689 	if (bserrno) {
2690 		/* The write failed, so jump to the final completion handler */
2691 		bs_sequence_finish(seq, bserrno);
2692 		return;
2693 	}
2694 
2695 	cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page);
2696 
2697 	blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2698 					 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx);
2699 }
2700 
2701 static void
2702 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2703 {
2704 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2705 
2706 	if (bserrno != 0) {
2707 		/* The read failed, so jump to the final completion handler */
2708 		bs_sequence_finish(seq, bserrno);
2709 		return;
2710 	}
2711 
2712 	/* Write whole cluster */
2713 	bs_sequence_write_dev(seq, ctx->buf,
2714 			      bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2715 			      bs_cluster_to_lba(ctx->blob->bs, 1),
2716 			      blob_write_copy_cpl, ctx);
2717 }
2718 
2719 static bool
2720 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba)
2721 {
2722 	uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page);
2723 
2724 	return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) &&
2725 	       blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba);
2726 }
2727 
2728 static void
2729 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba)
2730 {
2731 	struct spdk_blob *blob = ctx->blob;
2732 	uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz);
2733 
2734 	bs_sequence_copy_dev(ctx->seq,
2735 			     bs_cluster_to_lba(blob->bs, ctx->new_cluster),
2736 			     src_lba,
2737 			     lba_count,
2738 			     blob_write_copy_cpl, ctx);
2739 }
2740 
2741 static void
2742 bs_allocate_and_copy_cluster(struct spdk_blob *blob,
2743 			     struct spdk_io_channel *_ch,
2744 			     uint64_t io_unit, spdk_bs_user_op_t *op)
2745 {
2746 	struct spdk_bs_cpl cpl;
2747 	struct spdk_bs_channel *ch;
2748 	struct spdk_blob_copy_cluster_ctx *ctx;
2749 	uint32_t cluster_start_page;
2750 	uint32_t cluster_number;
2751 	bool is_zeroes;
2752 	bool can_copy;
2753 	bool is_valid_range;
2754 	uint64_t copy_src_lba;
2755 	int rc;
2756 
2757 	ch = spdk_io_channel_get_ctx(_ch);
2758 
2759 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
2760 		/* There are already operations pending. Queue this user op
2761 		 * and return because it will be re-executed when the outstanding
2762 		 * cluster allocation completes. */
2763 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2764 		return;
2765 	}
2766 
2767 	/* Round the io_unit offset down to the first page in the cluster */
2768 	cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit);
2769 
2770 	/* Calculate which index in the metadata cluster array the corresponding
2771 	 * cluster is supposed to be at. */
2772 	cluster_number = bs_io_unit_to_cluster_number(blob, io_unit);
2773 
2774 	ctx = calloc(1, sizeof(*ctx));
2775 	if (!ctx) {
2776 		bs_user_op_abort(op, -ENOMEM);
2777 		return;
2778 	}
2779 
2780 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
2781 
2782 	ctx->blob = blob;
2783 	ctx->page = cluster_start_page;
2784 	ctx->new_cluster_page = ch->new_cluster_page;
2785 	memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE);
2786 
2787 	/* Check if the cluster that we intend to do CoW for is valid for
2788 	 * the backing dev. For zeroes backing dev, it'll be always valid.
2789 	 * For other backing dev e.g. a snapshot, it could be invalid if
2790 	 * the blob has been resized after snapshot was taken. */
2791 	is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev,
2792 			 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2793 			 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz));
2794 
2795 	can_copy = is_valid_range && blob_can_copy(blob, cluster_start_page, &copy_src_lba);
2796 
2797 	is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev,
2798 			bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2799 			bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz));
2800 	if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) {
2801 		ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen,
2802 				       NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2803 		if (!ctx->buf) {
2804 			SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
2805 				    blob->bs->cluster_sz);
2806 			free(ctx);
2807 			bs_user_op_abort(op, -ENOMEM);
2808 			return;
2809 		}
2810 	}
2811 
2812 	spdk_spin_lock(&blob->bs->used_lock);
2813 	rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page,
2814 				 false);
2815 	spdk_spin_unlock(&blob->bs->used_lock);
2816 	if (rc != 0) {
2817 		spdk_free(ctx->buf);
2818 		free(ctx);
2819 		bs_user_op_abort(op, rc);
2820 		return;
2821 	}
2822 
2823 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2824 	cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl;
2825 	cpl.u.blob_basic.cb_arg = ctx;
2826 
2827 	ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob);
2828 	if (!ctx->seq) {
2829 		spdk_spin_lock(&blob->bs->used_lock);
2830 		bs_release_cluster(blob->bs, ctx->new_cluster);
2831 		spdk_spin_unlock(&blob->bs->used_lock);
2832 		spdk_free(ctx->buf);
2833 		free(ctx);
2834 		bs_user_op_abort(op, -ENOMEM);
2835 		return;
2836 	}
2837 
2838 	/* Queue the user op to block other incoming operations */
2839 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2840 
2841 	if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) {
2842 		if (can_copy) {
2843 			blob_copy(ctx, op, copy_src_lba);
2844 		} else {
2845 			/* Read cluster from backing device */
2846 			bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
2847 						bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2848 						bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
2849 						blob_write_copy, ctx);
2850 		}
2851 
2852 	} else {
2853 		blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2854 						 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx);
2855 	}
2856 }
2857 
2858 static inline bool
2859 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length,
2860 				 uint64_t *lba,	uint64_t *lba_count)
2861 {
2862 	*lba_count = length;
2863 
2864 	if (!bs_io_unit_is_allocated(blob, io_unit)) {
2865 		assert(blob->back_bs_dev != NULL);
2866 		*lba = bs_io_unit_to_back_dev_lba(blob, io_unit);
2867 		*lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count);
2868 		return false;
2869 	} else {
2870 		*lba = bs_blob_io_unit_to_lba(blob, io_unit);
2871 		return true;
2872 	}
2873 }
2874 
2875 struct op_split_ctx {
2876 	struct spdk_blob *blob;
2877 	struct spdk_io_channel *channel;
2878 	uint64_t io_unit_offset;
2879 	uint64_t io_units_remaining;
2880 	void *curr_payload;
2881 	enum spdk_blob_op_type op_type;
2882 	spdk_bs_sequence_t *seq;
2883 	bool in_submit_ctx;
2884 	bool completed_in_submit_ctx;
2885 	bool done;
2886 };
2887 
2888 static void
2889 blob_request_submit_op_split_next(void *cb_arg, int bserrno)
2890 {
2891 	struct op_split_ctx	*ctx = cb_arg;
2892 	struct spdk_blob	*blob = ctx->blob;
2893 	struct spdk_io_channel	*ch = ctx->channel;
2894 	enum spdk_blob_op_type	op_type = ctx->op_type;
2895 	uint8_t			*buf;
2896 	uint64_t		offset;
2897 	uint64_t		length;
2898 	uint64_t		op_length;
2899 
2900 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2901 		bs_sequence_finish(ctx->seq, bserrno);
2902 		if (ctx->in_submit_ctx) {
2903 			/* Defer freeing of the ctx object, since it will be
2904 			 * accessed when this unwinds back to the submisison
2905 			 * context.
2906 			 */
2907 			ctx->done = true;
2908 		} else {
2909 			free(ctx);
2910 		}
2911 		return;
2912 	}
2913 
2914 	if (ctx->in_submit_ctx) {
2915 		/* If this split operation completed in the context
2916 		 * of its submission, mark the flag and return immediately
2917 		 * to avoid recursion.
2918 		 */
2919 		ctx->completed_in_submit_ctx = true;
2920 		return;
2921 	}
2922 
2923 	while (true) {
2924 		ctx->completed_in_submit_ctx = false;
2925 
2926 		offset = ctx->io_unit_offset;
2927 		length = ctx->io_units_remaining;
2928 		buf = ctx->curr_payload;
2929 		op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob,
2930 				     offset));
2931 
2932 		/* Update length and payload for next operation */
2933 		ctx->io_units_remaining -= op_length;
2934 		ctx->io_unit_offset += op_length;
2935 		if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
2936 			ctx->curr_payload += op_length * blob->bs->io_unit_size;
2937 		}
2938 
2939 		assert(!ctx->in_submit_ctx);
2940 		ctx->in_submit_ctx = true;
2941 
2942 		switch (op_type) {
2943 		case SPDK_BLOB_READ:
2944 			spdk_blob_io_read(blob, ch, buf, offset, op_length,
2945 					  blob_request_submit_op_split_next, ctx);
2946 			break;
2947 		case SPDK_BLOB_WRITE:
2948 			spdk_blob_io_write(blob, ch, buf, offset, op_length,
2949 					   blob_request_submit_op_split_next, ctx);
2950 			break;
2951 		case SPDK_BLOB_UNMAP:
2952 			spdk_blob_io_unmap(blob, ch, offset, op_length,
2953 					   blob_request_submit_op_split_next, ctx);
2954 			break;
2955 		case SPDK_BLOB_WRITE_ZEROES:
2956 			spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
2957 						  blob_request_submit_op_split_next, ctx);
2958 			break;
2959 		case SPDK_BLOB_READV:
2960 		case SPDK_BLOB_WRITEV:
2961 			SPDK_ERRLOG("readv/write not valid\n");
2962 			bs_sequence_finish(ctx->seq, -EINVAL);
2963 			free(ctx);
2964 			return;
2965 		}
2966 
2967 #ifndef __clang_analyzer__
2968 		/* scan-build reports a false positive around accessing the ctx here. It
2969 		 * forms a path that recursively calls this function, but then says
2970 		 * "assuming ctx->in_submit_ctx is false", when that isn't possible.
2971 		 * This path does free(ctx), returns to here, and reports a use-after-free
2972 		 * bug.  Wrapping this bit of code so that scan-build doesn't see it
2973 		 * works around the scan-build bug.
2974 		 */
2975 		assert(ctx->in_submit_ctx);
2976 		ctx->in_submit_ctx = false;
2977 
2978 		/* If the operation completed immediately, loop back and submit the
2979 		 * next operation.  Otherwise we can return and the next split
2980 		 * operation will get submitted when this current operation is
2981 		 * later completed asynchronously.
2982 		 */
2983 		if (ctx->completed_in_submit_ctx) {
2984 			continue;
2985 		} else if (ctx->done) {
2986 			free(ctx);
2987 		}
2988 #endif
2989 		break;
2990 	}
2991 }
2992 
2993 static void
2994 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
2995 			     void *payload, uint64_t offset, uint64_t length,
2996 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2997 {
2998 	struct op_split_ctx *ctx;
2999 	spdk_bs_sequence_t *seq;
3000 	struct spdk_bs_cpl cpl;
3001 
3002 	assert(blob != NULL);
3003 
3004 	ctx = calloc(1, sizeof(struct op_split_ctx));
3005 	if (ctx == NULL) {
3006 		cb_fn(cb_arg, -ENOMEM);
3007 		return;
3008 	}
3009 
3010 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3011 	cpl.u.blob_basic.cb_fn = cb_fn;
3012 	cpl.u.blob_basic.cb_arg = cb_arg;
3013 
3014 	seq = bs_sequence_start_blob(ch, &cpl, blob);
3015 	if (!seq) {
3016 		free(ctx);
3017 		cb_fn(cb_arg, -ENOMEM);
3018 		return;
3019 	}
3020 
3021 	ctx->blob = blob;
3022 	ctx->channel = ch;
3023 	ctx->curr_payload = payload;
3024 	ctx->io_unit_offset = offset;
3025 	ctx->io_units_remaining = length;
3026 	ctx->op_type = op_type;
3027 	ctx->seq = seq;
3028 
3029 	blob_request_submit_op_split_next(ctx, 0);
3030 }
3031 
3032 static void
3033 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno)
3034 {
3035 	struct spdk_blob_free_cluster_ctx *ctx = cb_arg;
3036 
3037 	if (bserrno) {
3038 		bs_sequence_finish(ctx->seq, bserrno);
3039 		free(ctx);
3040 		return;
3041 	}
3042 
3043 	blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num,
3044 				       ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx);
3045 }
3046 
3047 static void
3048 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
3049 			      void *payload, uint64_t offset, uint64_t length,
3050 			      spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
3051 {
3052 	struct spdk_bs_cpl cpl;
3053 	uint64_t lba;
3054 	uint64_t lba_count;
3055 	bool is_allocated;
3056 
3057 	assert(blob != NULL);
3058 
3059 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3060 	cpl.u.blob_basic.cb_fn = cb_fn;
3061 	cpl.u.blob_basic.cb_arg = cb_arg;
3062 
3063 	if (blob->frozen_refcnt) {
3064 		/* This blob I/O is frozen */
3065 		spdk_bs_user_op_t *op;
3066 		struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
3067 
3068 		op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
3069 		if (!op) {
3070 			cb_fn(cb_arg, -ENOMEM);
3071 			return;
3072 		}
3073 
3074 		TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
3075 
3076 		return;
3077 	}
3078 
3079 	is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
3080 
3081 	switch (op_type) {
3082 	case SPDK_BLOB_READ: {
3083 		spdk_bs_batch_t *batch;
3084 
3085 		batch = bs_batch_open(_ch, &cpl, blob);
3086 		if (!batch) {
3087 			cb_fn(cb_arg, -ENOMEM);
3088 			return;
3089 		}
3090 
3091 		if (is_allocated) {
3092 			/* Read from the blob */
3093 			bs_batch_read_dev(batch, payload, lba, lba_count);
3094 		} else {
3095 			/* Read from the backing block device */
3096 			bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
3097 		}
3098 
3099 		bs_batch_close(batch);
3100 		break;
3101 	}
3102 	case SPDK_BLOB_WRITE:
3103 	case SPDK_BLOB_WRITE_ZEROES: {
3104 		if (is_allocated) {
3105 			/* Write to the blob */
3106 			spdk_bs_batch_t *batch;
3107 
3108 			if (lba_count == 0) {
3109 				cb_fn(cb_arg, 0);
3110 				return;
3111 			}
3112 
3113 			batch = bs_batch_open(_ch, &cpl, blob);
3114 			if (!batch) {
3115 				cb_fn(cb_arg, -ENOMEM);
3116 				return;
3117 			}
3118 
3119 			if (op_type == SPDK_BLOB_WRITE) {
3120 				bs_batch_write_dev(batch, payload, lba, lba_count);
3121 			} else {
3122 				bs_batch_write_zeroes_dev(batch, lba, lba_count);
3123 			}
3124 
3125 			bs_batch_close(batch);
3126 		} else {
3127 			/* Queue this operation and allocate the cluster */
3128 			spdk_bs_user_op_t *op;
3129 
3130 			op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
3131 			if (!op) {
3132 				cb_fn(cb_arg, -ENOMEM);
3133 				return;
3134 			}
3135 
3136 			bs_allocate_and_copy_cluster(blob, _ch, offset, op);
3137 		}
3138 		break;
3139 	}
3140 	case SPDK_BLOB_UNMAP: {
3141 		struct spdk_blob_free_cluster_ctx *ctx = NULL;
3142 		spdk_bs_batch_t *batch;
3143 
3144 		/* if aligned with cluster release cluster */
3145 		if (spdk_blob_is_thin_provisioned(blob) && is_allocated &&
3146 		    bs_io_units_per_cluster(blob) == length) {
3147 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
3148 			uint32_t cluster_start_page;
3149 			uint32_t cluster_number;
3150 
3151 			assert(offset % bs_io_units_per_cluster(blob) == 0);
3152 
3153 			/* Round the io_unit offset down to the first page in the cluster */
3154 			cluster_start_page = bs_io_unit_to_cluster_start(blob, offset);
3155 
3156 			/* Calculate which index in the metadata cluster array the corresponding
3157 			 * cluster is supposed to be at. */
3158 			cluster_number = bs_io_unit_to_cluster_number(blob, offset);
3159 
3160 			ctx = calloc(1, sizeof(*ctx));
3161 			if (!ctx) {
3162 				cb_fn(cb_arg, -ENOMEM);
3163 				return;
3164 			}
3165 			/* When freeing a cluster the flow should be (in order):
3166 			 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak
3167 			 * old data)
3168 			 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the
3169 			 * cluster), update and sync metadata freeing the cluster
3170 			 * 3. Once metadata update is done, complete the user unmap request
3171 			 */
3172 			ctx->blob = blob;
3173 			ctx->page = cluster_start_page;
3174 			ctx->cluster_num = cluster_number;
3175 			ctx->md_page = bs_channel->new_cluster_page;
3176 			ctx->seq = bs_sequence_start_bs(_ch, &cpl);
3177 			if (!ctx->seq) {
3178 				free(ctx);
3179 				cb_fn(cb_arg, -ENOMEM);
3180 				return;
3181 			}
3182 
3183 			if (blob->use_extent_table) {
3184 				ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number);
3185 			}
3186 
3187 			cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete;
3188 			cpl.u.blob_basic.cb_arg = ctx;
3189 		}
3190 
3191 		batch = bs_batch_open(_ch, &cpl, blob);
3192 		if (!batch) {
3193 			free(ctx);
3194 			cb_fn(cb_arg, -ENOMEM);
3195 			return;
3196 		}
3197 
3198 		if (is_allocated) {
3199 			bs_batch_unmap_dev(batch, lba, lba_count);
3200 		}
3201 
3202 		bs_batch_close(batch);
3203 		break;
3204 	}
3205 	case SPDK_BLOB_READV:
3206 	case SPDK_BLOB_WRITEV:
3207 		SPDK_ERRLOG("readv/write not valid\n");
3208 		cb_fn(cb_arg, -EINVAL);
3209 		break;
3210 	}
3211 }
3212 
3213 static void
3214 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
3215 		       void *payload, uint64_t offset, uint64_t length,
3216 		       spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
3217 {
3218 	assert(blob != NULL);
3219 
3220 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
3221 		cb_fn(cb_arg, -EPERM);
3222 		return;
3223 	}
3224 
3225 	if (length == 0) {
3226 		cb_fn(cb_arg, 0);
3227 		return;
3228 	}
3229 
3230 	if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
3231 		cb_fn(cb_arg, -EINVAL);
3232 		return;
3233 	}
3234 	if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) {
3235 		blob_request_submit_op_single(_channel, blob, payload, offset, length,
3236 					      cb_fn, cb_arg, op_type);
3237 	} else {
3238 		blob_request_submit_op_split(_channel, blob, payload, offset, length,
3239 					     cb_fn, cb_arg, op_type);
3240 	}
3241 }
3242 
3243 struct rw_iov_ctx {
3244 	struct spdk_blob *blob;
3245 	struct spdk_io_channel *channel;
3246 	spdk_blob_op_complete cb_fn;
3247 	void *cb_arg;
3248 	bool read;
3249 	int iovcnt;
3250 	struct iovec *orig_iov;
3251 	uint64_t io_unit_offset;
3252 	uint64_t io_units_remaining;
3253 	uint64_t io_units_done;
3254 	struct spdk_blob_ext_io_opts *ext_io_opts;
3255 	struct iovec iov[0];
3256 };
3257 
3258 static void
3259 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3260 {
3261 	assert(cb_arg == NULL);
3262 	bs_sequence_finish(seq, bserrno);
3263 }
3264 
3265 static void
3266 rw_iov_split_next(void *cb_arg, int bserrno)
3267 {
3268 	struct rw_iov_ctx *ctx = cb_arg;
3269 	struct spdk_blob *blob = ctx->blob;
3270 	struct iovec *iov, *orig_iov;
3271 	int iovcnt;
3272 	size_t orig_iovoff;
3273 	uint64_t io_units_count, io_units_to_boundary, io_unit_offset;
3274 	uint64_t byte_count;
3275 
3276 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
3277 		ctx->cb_fn(ctx->cb_arg, bserrno);
3278 		free(ctx);
3279 		return;
3280 	}
3281 
3282 	io_unit_offset = ctx->io_unit_offset;
3283 	io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset);
3284 	io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary);
3285 	/*
3286 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
3287 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
3288 	 *  point to the current position in the I/O sequence.
3289 	 */
3290 	byte_count = ctx->io_units_done * blob->bs->io_unit_size;
3291 	orig_iov = &ctx->orig_iov[0];
3292 	orig_iovoff = 0;
3293 	while (byte_count > 0) {
3294 		if (byte_count >= orig_iov->iov_len) {
3295 			byte_count -= orig_iov->iov_len;
3296 			orig_iov++;
3297 		} else {
3298 			orig_iovoff = byte_count;
3299 			byte_count = 0;
3300 		}
3301 	}
3302 
3303 	/*
3304 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
3305 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
3306 	 */
3307 	byte_count = io_units_count * blob->bs->io_unit_size;
3308 	iov = &ctx->iov[0];
3309 	iovcnt = 0;
3310 	while (byte_count > 0) {
3311 		assert(iovcnt < ctx->iovcnt);
3312 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
3313 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
3314 		byte_count -= iov->iov_len;
3315 		orig_iovoff = 0;
3316 		orig_iov++;
3317 		iov++;
3318 		iovcnt++;
3319 	}
3320 
3321 	ctx->io_unit_offset += io_units_count;
3322 	ctx->io_units_remaining -= io_units_count;
3323 	ctx->io_units_done += io_units_count;
3324 	iov = &ctx->iov[0];
3325 
3326 	if (ctx->read) {
3327 		spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
3328 				       io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
3329 	} else {
3330 		spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
3331 					io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
3332 	}
3333 }
3334 
3335 static void
3336 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
3337 			   struct iovec *iov, int iovcnt,
3338 			   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read,
3339 			   struct spdk_blob_ext_io_opts *ext_io_opts)
3340 {
3341 	struct spdk_bs_cpl	cpl;
3342 
3343 	assert(blob != NULL);
3344 
3345 	if (!read && blob->data_ro) {
3346 		cb_fn(cb_arg, -EPERM);
3347 		return;
3348 	}
3349 
3350 	if (length == 0) {
3351 		cb_fn(cb_arg, 0);
3352 		return;
3353 	}
3354 
3355 	if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
3356 		cb_fn(cb_arg, -EINVAL);
3357 		return;
3358 	}
3359 
3360 	/*
3361 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
3362 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
3363 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
3364 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
3365 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
3366 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
3367 	 *  but since this case happens very infrequently, any performance impact will be negligible.
3368 	 *
3369 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
3370 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
3371 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
3372 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
3373 	 */
3374 	if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) {
3375 		uint64_t lba_count;
3376 		uint64_t lba;
3377 		bool is_allocated;
3378 
3379 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3380 		cpl.u.blob_basic.cb_fn = cb_fn;
3381 		cpl.u.blob_basic.cb_arg = cb_arg;
3382 
3383 		if (blob->frozen_refcnt) {
3384 			/* This blob I/O is frozen */
3385 			enum spdk_blob_op_type op_type;
3386 			spdk_bs_user_op_t *op;
3387 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel);
3388 
3389 			op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV;
3390 			op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length);
3391 			if (!op) {
3392 				cb_fn(cb_arg, -ENOMEM);
3393 				return;
3394 			}
3395 
3396 			TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
3397 
3398 			return;
3399 		}
3400 
3401 		is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
3402 
3403 		if (read) {
3404 			spdk_bs_sequence_t *seq;
3405 
3406 			seq = bs_sequence_start_blob(_channel, &cpl, blob);
3407 			if (!seq) {
3408 				cb_fn(cb_arg, -ENOMEM);
3409 				return;
3410 			}
3411 
3412 			seq->ext_io_opts = ext_io_opts;
3413 
3414 			if (is_allocated) {
3415 				bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
3416 			} else {
3417 				bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
3418 							 rw_iov_done, NULL);
3419 			}
3420 		} else {
3421 			if (is_allocated) {
3422 				spdk_bs_sequence_t *seq;
3423 
3424 				seq = bs_sequence_start_blob(_channel, &cpl, blob);
3425 				if (!seq) {
3426 					cb_fn(cb_arg, -ENOMEM);
3427 					return;
3428 				}
3429 
3430 				seq->ext_io_opts = ext_io_opts;
3431 
3432 				bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
3433 			} else {
3434 				/* Queue this operation and allocate the cluster */
3435 				spdk_bs_user_op_t *op;
3436 
3437 				op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset,
3438 						      length);
3439 				if (!op) {
3440 					cb_fn(cb_arg, -ENOMEM);
3441 					return;
3442 				}
3443 
3444 				op->ext_io_opts = ext_io_opts;
3445 
3446 				bs_allocate_and_copy_cluster(blob, _channel, offset, op);
3447 			}
3448 		}
3449 	} else {
3450 		struct rw_iov_ctx *ctx;
3451 
3452 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
3453 		if (ctx == NULL) {
3454 			cb_fn(cb_arg, -ENOMEM);
3455 			return;
3456 		}
3457 
3458 		ctx->blob = blob;
3459 		ctx->channel = _channel;
3460 		ctx->cb_fn = cb_fn;
3461 		ctx->cb_arg = cb_arg;
3462 		ctx->read = read;
3463 		ctx->orig_iov = iov;
3464 		ctx->iovcnt = iovcnt;
3465 		ctx->io_unit_offset = offset;
3466 		ctx->io_units_remaining = length;
3467 		ctx->io_units_done = 0;
3468 		ctx->ext_io_opts = ext_io_opts;
3469 
3470 		rw_iov_split_next(ctx, 0);
3471 	}
3472 }
3473 
3474 static struct spdk_blob *
3475 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
3476 {
3477 	struct spdk_blob find;
3478 
3479 	if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) {
3480 		return NULL;
3481 	}
3482 
3483 	find.id = blobid;
3484 	return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find);
3485 }
3486 
3487 static void
3488 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob,
3489 				    struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry)
3490 {
3491 	assert(blob != NULL);
3492 	*snapshot_entry = NULL;
3493 	*clone_entry = NULL;
3494 
3495 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
3496 		return;
3497 	}
3498 
3499 	TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) {
3500 		if ((*snapshot_entry)->id == blob->parent_id) {
3501 			break;
3502 		}
3503 	}
3504 
3505 	if (*snapshot_entry != NULL) {
3506 		TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) {
3507 			if ((*clone_entry)->id == blob->id) {
3508 				break;
3509 			}
3510 		}
3511 
3512 		assert(*clone_entry != NULL);
3513 	}
3514 }
3515 
3516 static int
3517 bs_channel_create(void *io_device, void *ctx_buf)
3518 {
3519 	struct spdk_blob_store		*bs = io_device;
3520 	struct spdk_bs_channel		*channel = ctx_buf;
3521 	struct spdk_bs_dev		*dev;
3522 	uint32_t			max_ops = bs->max_channel_ops;
3523 	uint32_t			i;
3524 
3525 	dev = bs->dev;
3526 
3527 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
3528 	if (!channel->req_mem) {
3529 		return -1;
3530 	}
3531 
3532 	TAILQ_INIT(&channel->reqs);
3533 
3534 	for (i = 0; i < max_ops; i++) {
3535 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
3536 	}
3537 
3538 	channel->bs = bs;
3539 	channel->dev = dev;
3540 	channel->dev_channel = dev->create_channel(dev);
3541 
3542 	if (!channel->dev_channel) {
3543 		SPDK_ERRLOG("Failed to create device channel.\n");
3544 		free(channel->req_mem);
3545 		return -1;
3546 	}
3547 
3548 	channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY,
3549 				    SPDK_MALLOC_DMA);
3550 	if (!channel->new_cluster_page) {
3551 		SPDK_ERRLOG("Failed to allocate new cluster page\n");
3552 		free(channel->req_mem);
3553 		channel->dev->destroy_channel(channel->dev, channel->dev_channel);
3554 		return -1;
3555 	}
3556 
3557 	TAILQ_INIT(&channel->need_cluster_alloc);
3558 	TAILQ_INIT(&channel->queued_io);
3559 	RB_INIT(&channel->esnap_channels);
3560 
3561 	return 0;
3562 }
3563 
3564 static void
3565 bs_channel_destroy(void *io_device, void *ctx_buf)
3566 {
3567 	struct spdk_bs_channel *channel = ctx_buf;
3568 	spdk_bs_user_op_t *op;
3569 
3570 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
3571 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
3572 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
3573 		bs_user_op_abort(op, -EIO);
3574 	}
3575 
3576 	while (!TAILQ_EMPTY(&channel->queued_io)) {
3577 		op = TAILQ_FIRST(&channel->queued_io);
3578 		TAILQ_REMOVE(&channel->queued_io, op, link);
3579 		bs_user_op_abort(op, -EIO);
3580 	}
3581 
3582 	blob_esnap_destroy_bs_channel(channel);
3583 
3584 	free(channel->req_mem);
3585 	spdk_free(channel->new_cluster_page);
3586 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
3587 }
3588 
3589 static void
3590 bs_dev_destroy(void *io_device)
3591 {
3592 	struct spdk_blob_store *bs = io_device;
3593 	struct spdk_blob	*blob, *blob_tmp;
3594 
3595 	bs->dev->destroy(bs->dev);
3596 
3597 	RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) {
3598 		RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob);
3599 		spdk_bit_array_clear(bs->open_blobids, blob->id);
3600 		blob_free(blob);
3601 	}
3602 
3603 	spdk_spin_destroy(&bs->used_lock);
3604 
3605 	spdk_bit_array_free(&bs->open_blobids);
3606 	spdk_bit_array_free(&bs->used_blobids);
3607 	spdk_bit_array_free(&bs->used_md_pages);
3608 	spdk_bit_pool_free(&bs->used_clusters);
3609 	/*
3610 	 * If this function is called for any reason except a successful unload,
3611 	 * the unload_cpl type will be NONE and this will be a nop.
3612 	 */
3613 	bs_call_cpl(&bs->unload_cpl, bs->unload_err);
3614 
3615 	free(bs);
3616 }
3617 
3618 static int
3619 bs_blob_list_add(struct spdk_blob *blob)
3620 {
3621 	spdk_blob_id snapshot_id;
3622 	struct spdk_blob_list *snapshot_entry = NULL;
3623 	struct spdk_blob_list *clone_entry = NULL;
3624 
3625 	assert(blob != NULL);
3626 
3627 	snapshot_id = blob->parent_id;
3628 	if (snapshot_id == SPDK_BLOBID_INVALID ||
3629 	    snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
3630 		return 0;
3631 	}
3632 
3633 	snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id);
3634 	if (snapshot_entry == NULL) {
3635 		/* Snapshot not found */
3636 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
3637 		if (snapshot_entry == NULL) {
3638 			return -ENOMEM;
3639 		}
3640 		snapshot_entry->id = snapshot_id;
3641 		TAILQ_INIT(&snapshot_entry->clones);
3642 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
3643 	} else {
3644 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
3645 			if (clone_entry->id == blob->id) {
3646 				break;
3647 			}
3648 		}
3649 	}
3650 
3651 	if (clone_entry == NULL) {
3652 		/* Clone not found */
3653 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
3654 		if (clone_entry == NULL) {
3655 			return -ENOMEM;
3656 		}
3657 		clone_entry->id = blob->id;
3658 		TAILQ_INIT(&clone_entry->clones);
3659 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
3660 		snapshot_entry->clone_count++;
3661 	}
3662 
3663 	return 0;
3664 }
3665 
3666 static void
3667 bs_blob_list_remove(struct spdk_blob *blob)
3668 {
3669 	struct spdk_blob_list *snapshot_entry = NULL;
3670 	struct spdk_blob_list *clone_entry = NULL;
3671 
3672 	blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry);
3673 
3674 	if (snapshot_entry == NULL) {
3675 		return;
3676 	}
3677 
3678 	blob->parent_id = SPDK_BLOBID_INVALID;
3679 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3680 	free(clone_entry);
3681 
3682 	snapshot_entry->clone_count--;
3683 }
3684 
3685 static int
3686 bs_blob_list_free(struct spdk_blob_store *bs)
3687 {
3688 	struct spdk_blob_list *snapshot_entry;
3689 	struct spdk_blob_list *snapshot_entry_tmp;
3690 	struct spdk_blob_list *clone_entry;
3691 	struct spdk_blob_list *clone_entry_tmp;
3692 
3693 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
3694 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
3695 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3696 			free(clone_entry);
3697 		}
3698 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
3699 		free(snapshot_entry);
3700 	}
3701 
3702 	return 0;
3703 }
3704 
3705 static void
3706 bs_free(struct spdk_blob_store *bs)
3707 {
3708 	bs_blob_list_free(bs);
3709 
3710 	bs_unregister_md_thread(bs);
3711 	spdk_io_device_unregister(bs, bs_dev_destroy);
3712 }
3713 
3714 void
3715 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size)
3716 {
3717 
3718 	if (!opts) {
3719 		SPDK_ERRLOG("opts should not be NULL\n");
3720 		return;
3721 	}
3722 
3723 	if (!opts_size) {
3724 		SPDK_ERRLOG("opts_size should not be zero value\n");
3725 		return;
3726 	}
3727 
3728 	memset(opts, 0, opts_size);
3729 	opts->opts_size = opts_size;
3730 
3731 #define FIELD_OK(field) \
3732 	offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size
3733 
3734 #define SET_FIELD(field, value) \
3735 	if (FIELD_OK(field)) { \
3736 		opts->field = value; \
3737 	} \
3738 
3739 	SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ);
3740 	SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES);
3741 	SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES);
3742 	SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS);
3743 	SET_FIELD(clear_method,  BS_CLEAR_WITH_UNMAP);
3744 
3745 	if (FIELD_OK(bstype)) {
3746 		memset(&opts->bstype, 0, sizeof(opts->bstype));
3747 	}
3748 
3749 	SET_FIELD(iter_cb_fn, NULL);
3750 	SET_FIELD(iter_cb_arg, NULL);
3751 	SET_FIELD(force_recover, false);
3752 	SET_FIELD(esnap_bs_dev_create, NULL);
3753 	SET_FIELD(esnap_ctx, NULL);
3754 
3755 #undef FIELD_OK
3756 #undef SET_FIELD
3757 }
3758 
3759 static int
3760 bs_opts_verify(struct spdk_bs_opts *opts)
3761 {
3762 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
3763 	    opts->max_channel_ops == 0) {
3764 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
3765 		return -1;
3766 	}
3767 
3768 	return 0;
3769 }
3770 
3771 /* START spdk_bs_load */
3772 
3773 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */
3774 
3775 struct spdk_bs_load_ctx {
3776 	struct spdk_blob_store		*bs;
3777 	struct spdk_bs_super_block	*super;
3778 
3779 	struct spdk_bs_md_mask		*mask;
3780 	bool				in_page_chain;
3781 	uint32_t			page_index;
3782 	uint32_t			cur_page;
3783 	struct spdk_blob_md_page	*page;
3784 
3785 	uint64_t			num_extent_pages;
3786 	uint32_t			*extent_page_num;
3787 	struct spdk_blob_md_page	*extent_pages;
3788 	struct spdk_bit_array		*used_clusters;
3789 
3790 	spdk_bs_sequence_t			*seq;
3791 	spdk_blob_op_with_handle_complete	iter_cb_fn;
3792 	void					*iter_cb_arg;
3793 	struct spdk_blob			*blob;
3794 	spdk_blob_id				blobid;
3795 
3796 	bool					force_recover;
3797 
3798 	/* These fields are used in the spdk_bs_dump path. */
3799 	bool					dumping;
3800 	FILE					*fp;
3801 	spdk_bs_dump_print_xattr		print_xattr_fn;
3802 	char					xattr_name[4096];
3803 };
3804 
3805 static int
3806 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs,
3807 	 struct spdk_bs_load_ctx **_ctx)
3808 {
3809 	struct spdk_blob_store	*bs;
3810 	struct spdk_bs_load_ctx	*ctx;
3811 	uint64_t dev_size;
3812 	int rc;
3813 
3814 	dev_size = dev->blocklen * dev->blockcnt;
3815 	if (dev_size < opts->cluster_sz) {
3816 		/* Device size cannot be smaller than cluster size of blobstore */
3817 		SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
3818 			     dev_size, opts->cluster_sz);
3819 		return -ENOSPC;
3820 	}
3821 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
3822 		/* Cluster size cannot be smaller than page size */
3823 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
3824 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
3825 		return -EINVAL;
3826 	}
3827 	bs = calloc(1, sizeof(struct spdk_blob_store));
3828 	if (!bs) {
3829 		return -ENOMEM;
3830 	}
3831 
3832 	ctx = calloc(1, sizeof(struct spdk_bs_load_ctx));
3833 	if (!ctx) {
3834 		free(bs);
3835 		return -ENOMEM;
3836 	}
3837 
3838 	ctx->bs = bs;
3839 	ctx->iter_cb_fn = opts->iter_cb_fn;
3840 	ctx->iter_cb_arg = opts->iter_cb_arg;
3841 	ctx->force_recover = opts->force_recover;
3842 
3843 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
3844 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3845 	if (!ctx->super) {
3846 		free(ctx);
3847 		free(bs);
3848 		return -ENOMEM;
3849 	}
3850 
3851 	RB_INIT(&bs->open_blobs);
3852 	TAILQ_INIT(&bs->snapshots);
3853 	bs->dev = dev;
3854 	bs->md_thread = spdk_get_thread();
3855 	assert(bs->md_thread != NULL);
3856 
3857 	/*
3858 	 * Do not use bs_lba_to_cluster() here since blockcnt may not be an
3859 	 *  even multiple of the cluster size.
3860 	 */
3861 	bs->cluster_sz = opts->cluster_sz;
3862 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
3863 	ctx->used_clusters = spdk_bit_array_create(bs->total_clusters);
3864 	if (!ctx->used_clusters) {
3865 		spdk_free(ctx->super);
3866 		free(ctx);
3867 		free(bs);
3868 		return -ENOMEM;
3869 	}
3870 
3871 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
3872 	if (spdk_u32_is_pow2(bs->pages_per_cluster)) {
3873 		bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster);
3874 	}
3875 	bs->num_free_clusters = bs->total_clusters;
3876 	bs->io_unit_size = dev->blocklen;
3877 
3878 	bs->max_channel_ops = opts->max_channel_ops;
3879 	bs->super_blob = SPDK_BLOBID_INVALID;
3880 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
3881 	bs->esnap_bs_dev_create = opts->esnap_bs_dev_create;
3882 	bs->esnap_ctx = opts->esnap_ctx;
3883 
3884 	/* The metadata is assumed to be at least 1 page */
3885 	bs->used_md_pages = spdk_bit_array_create(1);
3886 	bs->used_blobids = spdk_bit_array_create(0);
3887 	bs->open_blobids = spdk_bit_array_create(0);
3888 
3889 	spdk_spin_init(&bs->used_lock);
3890 
3891 	spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy,
3892 				sizeof(struct spdk_bs_channel), "blobstore");
3893 	rc = bs_register_md_thread(bs);
3894 	if (rc == -1) {
3895 		spdk_io_device_unregister(bs, NULL);
3896 		spdk_spin_destroy(&bs->used_lock);
3897 		spdk_bit_array_free(&bs->open_blobids);
3898 		spdk_bit_array_free(&bs->used_blobids);
3899 		spdk_bit_array_free(&bs->used_md_pages);
3900 		spdk_bit_array_free(&ctx->used_clusters);
3901 		spdk_free(ctx->super);
3902 		free(ctx);
3903 		free(bs);
3904 		/* FIXME: this is a lie but don't know how to get a proper error code here */
3905 		return -ENOMEM;
3906 	}
3907 
3908 	*_ctx = ctx;
3909 	*_bs = bs;
3910 	return 0;
3911 }
3912 
3913 static void
3914 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno)
3915 {
3916 	assert(bserrno != 0);
3917 
3918 	spdk_free(ctx->super);
3919 	bs_sequence_finish(ctx->seq, bserrno);
3920 	bs_free(ctx->bs);
3921 	spdk_bit_array_free(&ctx->used_clusters);
3922 	free(ctx);
3923 }
3924 
3925 static void
3926 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
3927 	       struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
3928 {
3929 	/* Update the values in the super block */
3930 	super->super_blob = bs->super_blob;
3931 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
3932 	super->crc = blob_md_page_calc_crc(super);
3933 	bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0),
3934 			      bs_byte_to_lba(bs, sizeof(*super)),
3935 			      cb_fn, cb_arg);
3936 }
3937 
3938 static void
3939 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3940 {
3941 	struct spdk_bs_load_ctx	*ctx = arg;
3942 	uint64_t	mask_size, lba, lba_count;
3943 
3944 	/* Write out the used clusters mask */
3945 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3946 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3947 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3948 	if (!ctx->mask) {
3949 		bs_load_ctx_fail(ctx, -ENOMEM);
3950 		return;
3951 	}
3952 
3953 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
3954 	ctx->mask->length = ctx->bs->total_clusters;
3955 	/* We could get here through the normal unload path, or through dirty
3956 	 * shutdown recovery.  For the normal unload path, we use the mask from
3957 	 * the bit pool.  For dirty shutdown recovery, we don't have a bit pool yet -
3958 	 * only the bit array from the load ctx.
3959 	 */
3960 	if (ctx->bs->used_clusters) {
3961 		assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters));
3962 		spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask);
3963 	} else {
3964 		assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters));
3965 		spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask);
3966 	}
3967 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3968 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3969 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3970 }
3971 
3972 static void
3973 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3974 {
3975 	struct spdk_bs_load_ctx	*ctx = arg;
3976 	uint64_t	mask_size, lba, lba_count;
3977 
3978 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3979 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3980 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3981 	if (!ctx->mask) {
3982 		bs_load_ctx_fail(ctx, -ENOMEM);
3983 		return;
3984 	}
3985 
3986 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
3987 	ctx->mask->length = ctx->super->md_len;
3988 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
3989 
3990 	spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask);
3991 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3992 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3993 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3994 }
3995 
3996 static void
3997 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3998 {
3999 	struct spdk_bs_load_ctx	*ctx = arg;
4000 	uint64_t	mask_size, lba, lba_count;
4001 
4002 	if (ctx->super->used_blobid_mask_len == 0) {
4003 		/*
4004 		 * This is a pre-v3 on-disk format where the blobid mask does not get
4005 		 *  written to disk.
4006 		 */
4007 		cb_fn(seq, arg, 0);
4008 		return;
4009 	}
4010 
4011 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
4012 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4013 				 SPDK_MALLOC_DMA);
4014 	if (!ctx->mask) {
4015 		bs_load_ctx_fail(ctx, -ENOMEM);
4016 		return;
4017 	}
4018 
4019 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
4020 	ctx->mask->length = ctx->super->md_len;
4021 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
4022 
4023 	spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask);
4024 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
4025 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
4026 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
4027 }
4028 
4029 static void
4030 blob_set_thin_provision(struct spdk_blob *blob)
4031 {
4032 	blob_verify_md_op(blob);
4033 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
4034 	blob->state = SPDK_BLOB_STATE_DIRTY;
4035 }
4036 
4037 static void
4038 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method)
4039 {
4040 	blob_verify_md_op(blob);
4041 	blob->clear_method = clear_method;
4042 	blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT);
4043 	blob->state = SPDK_BLOB_STATE_DIRTY;
4044 }
4045 
4046 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno);
4047 
4048 static void
4049 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno)
4050 {
4051 	struct spdk_bs_load_ctx *ctx = cb_arg;
4052 	spdk_blob_id id;
4053 	int64_t page_num;
4054 
4055 	/* Iterate to next blob (we can't use spdk_bs_iter_next function as our
4056 	 * last blob has been removed */
4057 	page_num = bs_blobid_to_page(ctx->blobid);
4058 	page_num++;
4059 	page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num);
4060 	if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) {
4061 		bs_load_iter(ctx, NULL, -ENOENT);
4062 		return;
4063 	}
4064 
4065 	id = bs_page_to_blobid(page_num);
4066 
4067 	spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx);
4068 }
4069 
4070 static void
4071 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno)
4072 {
4073 	struct spdk_bs_load_ctx *ctx = cb_arg;
4074 
4075 	if (bserrno != 0) {
4076 		SPDK_ERRLOG("Failed to close corrupted blob\n");
4077 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4078 		return;
4079 	}
4080 
4081 	spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx);
4082 }
4083 
4084 static void
4085 bs_delete_corrupted_blob(void *cb_arg, int bserrno)
4086 {
4087 	struct spdk_bs_load_ctx *ctx = cb_arg;
4088 	uint64_t i;
4089 
4090 	if (bserrno != 0) {
4091 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
4092 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4093 		return;
4094 	}
4095 
4096 	/* Snapshot and clone have the same copy of cluster map and extent pages
4097 	 * at this point. Let's clear both for snapshot now,
4098 	 * so that it won't be cleared for clone later when we remove snapshot.
4099 	 * Also set thin provision to pass data corruption check */
4100 	for (i = 0; i < ctx->blob->active.num_clusters; i++) {
4101 		ctx->blob->active.clusters[i] = 0;
4102 	}
4103 	for (i = 0; i < ctx->blob->active.num_extent_pages; i++) {
4104 		ctx->blob->active.extent_pages[i] = 0;
4105 	}
4106 
4107 	ctx->blob->active.num_allocated_clusters = 0;
4108 
4109 	ctx->blob->md_ro = false;
4110 
4111 	blob_set_thin_provision(ctx->blob);
4112 
4113 	ctx->blobid = ctx->blob->id;
4114 
4115 	spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx);
4116 }
4117 
4118 static void
4119 bs_update_corrupted_blob(void *cb_arg, int bserrno)
4120 {
4121 	struct spdk_bs_load_ctx *ctx = cb_arg;
4122 
4123 	if (bserrno != 0) {
4124 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
4125 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4126 		return;
4127 	}
4128 
4129 	ctx->blob->md_ro = false;
4130 	blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true);
4131 	blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true);
4132 	spdk_blob_set_read_only(ctx->blob);
4133 
4134 	if (ctx->iter_cb_fn) {
4135 		ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0);
4136 	}
4137 	bs_blob_list_add(ctx->blob);
4138 
4139 	spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4140 }
4141 
4142 static void
4143 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
4144 {
4145 	struct spdk_bs_load_ctx *ctx = cb_arg;
4146 
4147 	if (bserrno != 0) {
4148 		SPDK_ERRLOG("Failed to open clone of a corrupted blob\n");
4149 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4150 		return;
4151 	}
4152 
4153 	if (blob->parent_id == ctx->blob->id) {
4154 		/* Power failure occurred before updating clone (snapshot delete case)
4155 		 * or after updating clone (creating snapshot case) - keep snapshot */
4156 		spdk_blob_close(blob, bs_update_corrupted_blob, ctx);
4157 	} else {
4158 		/* Power failure occurred after updating clone (snapshot delete case)
4159 		 * or before updating clone (creating snapshot case) - remove snapshot */
4160 		spdk_blob_close(blob, bs_delete_corrupted_blob, ctx);
4161 	}
4162 }
4163 
4164 static void
4165 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
4166 {
4167 	struct spdk_bs_load_ctx *ctx = arg;
4168 	const void *value;
4169 	size_t len;
4170 	int rc = 0;
4171 
4172 	if (bserrno == 0) {
4173 		/* Examine blob if it is corrupted after power failure. Fix
4174 		 * the ones that can be fixed and remove any other corrupted
4175 		 * ones. If it is not corrupted just process it */
4176 		rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true);
4177 		if (rc != 0) {
4178 			rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true);
4179 			if (rc != 0) {
4180 				/* Not corrupted - process it and continue with iterating through blobs */
4181 				if (ctx->iter_cb_fn) {
4182 					ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
4183 				}
4184 				bs_blob_list_add(blob);
4185 				spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx);
4186 				return;
4187 			}
4188 
4189 		}
4190 
4191 		assert(len == sizeof(spdk_blob_id));
4192 
4193 		ctx->blob = blob;
4194 
4195 		/* Open clone to check if we are able to fix this blob or should we remove it */
4196 		spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx);
4197 		return;
4198 	} else if (bserrno == -ENOENT) {
4199 		bserrno = 0;
4200 	} else {
4201 		/*
4202 		 * This case needs to be looked at further.  Same problem
4203 		 *  exists with applications that rely on explicit blob
4204 		 *  iteration.  We should just skip the blob that failed
4205 		 *  to load and continue on to the next one.
4206 		 */
4207 		SPDK_ERRLOG("Error in iterating blobs\n");
4208 	}
4209 
4210 	ctx->iter_cb_fn = NULL;
4211 
4212 	spdk_free(ctx->super);
4213 	spdk_free(ctx->mask);
4214 	bs_sequence_finish(ctx->seq, bserrno);
4215 	free(ctx);
4216 }
4217 
4218 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
4219 
4220 static void
4221 bs_load_complete(struct spdk_bs_load_ctx *ctx)
4222 {
4223 	ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters);
4224 	if (ctx->dumping) {
4225 		bs_dump_read_md_page(ctx->seq, ctx);
4226 		return;
4227 	}
4228 	spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx);
4229 }
4230 
4231 static void
4232 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4233 {
4234 	struct spdk_bs_load_ctx *ctx = cb_arg;
4235 	int rc;
4236 
4237 	/* The type must be correct */
4238 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
4239 
4240 	/* The length of the mask (in bits) must not be greater than
4241 	 * the length of the buffer (converted to bits) */
4242 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
4243 
4244 	/* The length of the mask must be exactly equal to the size
4245 	 * (in pages) of the metadata region */
4246 	assert(ctx->mask->length == ctx->super->md_len);
4247 
4248 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length);
4249 	if (rc < 0) {
4250 		spdk_free(ctx->mask);
4251 		bs_load_ctx_fail(ctx, rc);
4252 		return;
4253 	}
4254 
4255 	spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask);
4256 	bs_load_complete(ctx);
4257 }
4258 
4259 static void
4260 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4261 {
4262 	struct spdk_bs_load_ctx *ctx = cb_arg;
4263 	uint64_t		lba, lba_count, mask_size;
4264 	int			rc;
4265 
4266 	if (bserrno != 0) {
4267 		bs_load_ctx_fail(ctx, bserrno);
4268 		return;
4269 	}
4270 
4271 	/* The type must be correct */
4272 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
4273 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
4274 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
4275 					     struct spdk_blob_md_page) * 8));
4276 	/*
4277 	 * The length of the mask must be equal to or larger than the total number of clusters. It may be
4278 	 * larger than the total number of clusters due to a failure spdk_bs_grow.
4279 	 */
4280 	assert(ctx->mask->length >= ctx->bs->total_clusters);
4281 	if (ctx->mask->length > ctx->bs->total_clusters) {
4282 		SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters");
4283 		ctx->mask->length = ctx->bs->total_clusters;
4284 	}
4285 
4286 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length);
4287 	if (rc < 0) {
4288 		spdk_free(ctx->mask);
4289 		bs_load_ctx_fail(ctx, rc);
4290 		return;
4291 	}
4292 
4293 	spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask);
4294 	ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters);
4295 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
4296 
4297 	spdk_free(ctx->mask);
4298 
4299 	/* Read the used blobids mask */
4300 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
4301 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4302 				 SPDK_MALLOC_DMA);
4303 	if (!ctx->mask) {
4304 		bs_load_ctx_fail(ctx, -ENOMEM);
4305 		return;
4306 	}
4307 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
4308 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
4309 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
4310 			     bs_load_used_blobids_cpl, ctx);
4311 }
4312 
4313 static void
4314 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4315 {
4316 	struct spdk_bs_load_ctx *ctx = cb_arg;
4317 	uint64_t		lba, lba_count, mask_size;
4318 	int			rc;
4319 
4320 	if (bserrno != 0) {
4321 		bs_load_ctx_fail(ctx, bserrno);
4322 		return;
4323 	}
4324 
4325 	/* The type must be correct */
4326 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
4327 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
4328 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
4329 				     8));
4330 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
4331 	if (ctx->mask->length != ctx->super->md_len) {
4332 		SPDK_ERRLOG("mismatched md_len in used_pages mask: "
4333 			    "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n",
4334 			    ctx->mask->length, ctx->super->md_len);
4335 		assert(false);
4336 	}
4337 
4338 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length);
4339 	if (rc < 0) {
4340 		spdk_free(ctx->mask);
4341 		bs_load_ctx_fail(ctx, rc);
4342 		return;
4343 	}
4344 
4345 	spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask);
4346 	spdk_free(ctx->mask);
4347 
4348 	/* Read the used clusters mask */
4349 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
4350 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4351 				 SPDK_MALLOC_DMA);
4352 	if (!ctx->mask) {
4353 		bs_load_ctx_fail(ctx, -ENOMEM);
4354 		return;
4355 	}
4356 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
4357 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
4358 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
4359 			     bs_load_used_clusters_cpl, ctx);
4360 }
4361 
4362 static void
4363 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx)
4364 {
4365 	uint64_t lba, lba_count, mask_size;
4366 
4367 	/* Read the used pages mask */
4368 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
4369 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
4370 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4371 	if (!ctx->mask) {
4372 		bs_load_ctx_fail(ctx, -ENOMEM);
4373 		return;
4374 	}
4375 
4376 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
4377 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
4378 	bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
4379 			     bs_load_used_pages_cpl, ctx);
4380 }
4381 
4382 static int
4383 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page)
4384 {
4385 	struct spdk_blob_store *bs = ctx->bs;
4386 	struct spdk_blob_md_descriptor *desc;
4387 	size_t	cur_desc = 0;
4388 
4389 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4390 	while (cur_desc < sizeof(page->descriptors)) {
4391 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
4392 			if (desc->length == 0) {
4393 				/* If padding and length are 0, this terminates the page */
4394 				break;
4395 			}
4396 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
4397 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
4398 			unsigned int				i, j;
4399 			unsigned int				cluster_count = 0;
4400 			uint32_t				cluster_idx;
4401 
4402 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
4403 
4404 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
4405 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
4406 					cluster_idx = desc_extent_rle->extents[i].cluster_idx;
4407 					/*
4408 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
4409 					 * in the used cluster map.
4410 					 */
4411 					if (cluster_idx != 0) {
4412 						SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j);
4413 						spdk_bit_array_set(ctx->used_clusters, cluster_idx + j);
4414 						if (bs->num_free_clusters == 0) {
4415 							return -ENOSPC;
4416 						}
4417 						bs->num_free_clusters--;
4418 					}
4419 					cluster_count++;
4420 				}
4421 			}
4422 			if (cluster_count == 0) {
4423 				return -EINVAL;
4424 			}
4425 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4426 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
4427 			uint32_t					i;
4428 			uint32_t					cluster_count = 0;
4429 			uint32_t					cluster_idx;
4430 			size_t						cluster_idx_length;
4431 
4432 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
4433 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
4434 
4435 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
4436 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
4437 				return -EINVAL;
4438 			}
4439 
4440 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
4441 				cluster_idx = desc_extent->cluster_idx[i];
4442 				/*
4443 				 * cluster_idx = 0 means an unallocated cluster - don't mark that
4444 				 * in the used cluster map.
4445 				 */
4446 				if (cluster_idx != 0) {
4447 					if (cluster_idx < desc_extent->start_cluster_idx &&
4448 					    cluster_idx >= desc_extent->start_cluster_idx + cluster_count) {
4449 						return -EINVAL;
4450 					}
4451 					spdk_bit_array_set(ctx->used_clusters, cluster_idx);
4452 					if (bs->num_free_clusters == 0) {
4453 						return -ENOSPC;
4454 					}
4455 					bs->num_free_clusters--;
4456 				}
4457 				cluster_count++;
4458 			}
4459 
4460 			if (cluster_count == 0) {
4461 				return -EINVAL;
4462 			}
4463 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
4464 			/* Skip this item */
4465 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
4466 			/* Skip this item */
4467 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
4468 			/* Skip this item */
4469 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
4470 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
4471 			uint32_t num_extent_pages = ctx->num_extent_pages;
4472 			uint32_t i;
4473 			size_t extent_pages_length;
4474 			void *tmp;
4475 
4476 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
4477 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
4478 
4479 			if (desc_extent_table->length == 0 ||
4480 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
4481 				return -EINVAL;
4482 			}
4483 
4484 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
4485 				if (desc_extent_table->extent_page[i].page_idx != 0) {
4486 					if (desc_extent_table->extent_page[i].num_pages != 1) {
4487 						return -EINVAL;
4488 					}
4489 					num_extent_pages += 1;
4490 				}
4491 			}
4492 
4493 			if (num_extent_pages > 0) {
4494 				tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t));
4495 				if (tmp == NULL) {
4496 					return -ENOMEM;
4497 				}
4498 				ctx->extent_page_num = tmp;
4499 
4500 				/* Extent table entries contain md page numbers for extent pages.
4501 				 * Zeroes represent unallocated extent pages, those are run-length-encoded.
4502 				 */
4503 				for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
4504 					if (desc_extent_table->extent_page[i].page_idx != 0) {
4505 						ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx;
4506 						ctx->num_extent_pages += 1;
4507 					}
4508 				}
4509 			}
4510 		} else {
4511 			/* Error */
4512 			return -EINVAL;
4513 		}
4514 		/* Advance to the next descriptor */
4515 		cur_desc += sizeof(*desc) + desc->length;
4516 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
4517 			break;
4518 		}
4519 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
4520 	}
4521 	return 0;
4522 }
4523 
4524 static bool
4525 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page)
4526 {
4527 	uint32_t crc;
4528 	struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4529 	size_t desc_len;
4530 
4531 	crc = blob_md_page_calc_crc(page);
4532 	if (crc != page->crc) {
4533 		return false;
4534 	}
4535 
4536 	/* Extent page should always be of sequence num 0. */
4537 	if (page->sequence_num != 0) {
4538 		return false;
4539 	}
4540 
4541 	/* Descriptor type must be EXTENT_PAGE. */
4542 	if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4543 		return false;
4544 	}
4545 
4546 	/* Descriptor length cannot exceed the page. */
4547 	desc_len = sizeof(*desc) + desc->length;
4548 	if (desc_len > sizeof(page->descriptors)) {
4549 		return false;
4550 	}
4551 
4552 	/* It has to be the only descriptor in the page. */
4553 	if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) {
4554 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len);
4555 		if (desc->length != 0) {
4556 			return false;
4557 		}
4558 	}
4559 
4560 	return true;
4561 }
4562 
4563 static bool
4564 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
4565 {
4566 	uint32_t crc;
4567 	struct spdk_blob_md_page *page = ctx->page;
4568 
4569 	crc = blob_md_page_calc_crc(page);
4570 	if (crc != page->crc) {
4571 		return false;
4572 	}
4573 
4574 	/* First page of a sequence should match the blobid. */
4575 	if (page->sequence_num == 0 &&
4576 	    bs_page_to_blobid(ctx->cur_page) != page->id) {
4577 		return false;
4578 	}
4579 	assert(bs_load_cur_extent_page_valid(page) == false);
4580 
4581 	return true;
4582 }
4583 
4584 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx);
4585 
4586 static void
4587 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4588 {
4589 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4590 
4591 	if (bserrno != 0) {
4592 		bs_load_ctx_fail(ctx, bserrno);
4593 		return;
4594 	}
4595 
4596 	bs_load_complete(ctx);
4597 }
4598 
4599 static void
4600 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4601 {
4602 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4603 
4604 	spdk_free(ctx->mask);
4605 	ctx->mask = NULL;
4606 
4607 	if (bserrno != 0) {
4608 		bs_load_ctx_fail(ctx, bserrno);
4609 		return;
4610 	}
4611 
4612 	bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl);
4613 }
4614 
4615 static void
4616 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4617 {
4618 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4619 
4620 	spdk_free(ctx->mask);
4621 	ctx->mask = NULL;
4622 
4623 	if (bserrno != 0) {
4624 		bs_load_ctx_fail(ctx, bserrno);
4625 		return;
4626 	}
4627 
4628 	bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl);
4629 }
4630 
4631 static void
4632 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx)
4633 {
4634 	bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl);
4635 }
4636 
4637 static void
4638 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx)
4639 {
4640 	uint64_t num_md_clusters;
4641 	uint64_t i;
4642 
4643 	ctx->in_page_chain = false;
4644 
4645 	do {
4646 		ctx->page_index++;
4647 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
4648 
4649 	if (ctx->page_index < ctx->super->md_len) {
4650 		ctx->cur_page = ctx->page_index;
4651 		bs_load_replay_cur_md_page(ctx);
4652 	} else {
4653 		/* Claim all of the clusters used by the metadata */
4654 		num_md_clusters = spdk_divide_round_up(
4655 					  ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster);
4656 		for (i = 0; i < num_md_clusters; i++) {
4657 			spdk_bit_array_set(ctx->used_clusters, i);
4658 		}
4659 		ctx->bs->num_free_clusters -= num_md_clusters;
4660 		spdk_free(ctx->page);
4661 		bs_load_write_used_md(ctx);
4662 	}
4663 }
4664 
4665 static void
4666 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4667 {
4668 	struct spdk_bs_load_ctx *ctx = cb_arg;
4669 	uint32_t page_num;
4670 	uint64_t i;
4671 
4672 	if (bserrno != 0) {
4673 		spdk_free(ctx->extent_pages);
4674 		bs_load_ctx_fail(ctx, bserrno);
4675 		return;
4676 	}
4677 
4678 	for (i = 0; i < ctx->num_extent_pages; i++) {
4679 		/* Extent pages are only read when present within in chain md.
4680 		 * Integrity of md is not right if that page was not a valid extent page. */
4681 		if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) {
4682 			spdk_free(ctx->extent_pages);
4683 			bs_load_ctx_fail(ctx, -EILSEQ);
4684 			return;
4685 		}
4686 
4687 		page_num = ctx->extent_page_num[i];
4688 		spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
4689 		if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) {
4690 			spdk_free(ctx->extent_pages);
4691 			bs_load_ctx_fail(ctx, -EILSEQ);
4692 			return;
4693 		}
4694 	}
4695 
4696 	spdk_free(ctx->extent_pages);
4697 	free(ctx->extent_page_num);
4698 	ctx->extent_page_num = NULL;
4699 	ctx->num_extent_pages = 0;
4700 
4701 	bs_load_replay_md_chain_cpl(ctx);
4702 }
4703 
4704 static void
4705 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx)
4706 {
4707 	spdk_bs_batch_t *batch;
4708 	uint32_t page;
4709 	uint64_t lba;
4710 	uint64_t i;
4711 
4712 	ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0,
4713 					 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4714 	if (!ctx->extent_pages) {
4715 		bs_load_ctx_fail(ctx, -ENOMEM);
4716 		return;
4717 	}
4718 
4719 	batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx);
4720 
4721 	for (i = 0; i < ctx->num_extent_pages; i++) {
4722 		page = ctx->extent_page_num[i];
4723 		assert(page < ctx->super->md_len);
4724 		lba = bs_md_page_to_lba(ctx->bs, page);
4725 		bs_batch_read_dev(batch, &ctx->extent_pages[i], lba,
4726 				  bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE));
4727 	}
4728 
4729 	bs_batch_close(batch);
4730 }
4731 
4732 static void
4733 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4734 {
4735 	struct spdk_bs_load_ctx *ctx = cb_arg;
4736 	uint32_t page_num;
4737 	struct spdk_blob_md_page *page;
4738 
4739 	if (bserrno != 0) {
4740 		bs_load_ctx_fail(ctx, bserrno);
4741 		return;
4742 	}
4743 
4744 	page_num = ctx->cur_page;
4745 	page = ctx->page;
4746 	if (bs_load_cur_md_page_valid(ctx) == true) {
4747 		if (page->sequence_num == 0 || ctx->in_page_chain == true) {
4748 			spdk_spin_lock(&ctx->bs->used_lock);
4749 			bs_claim_md_page(ctx->bs, page_num);
4750 			spdk_spin_unlock(&ctx->bs->used_lock);
4751 			if (page->sequence_num == 0) {
4752 				SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num);
4753 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
4754 			}
4755 			if (bs_load_replay_md_parse_page(ctx, page)) {
4756 				bs_load_ctx_fail(ctx, -EILSEQ);
4757 				return;
4758 			}
4759 			if (page->next != SPDK_INVALID_MD_PAGE) {
4760 				ctx->in_page_chain = true;
4761 				ctx->cur_page = page->next;
4762 				bs_load_replay_cur_md_page(ctx);
4763 				return;
4764 			}
4765 			if (ctx->num_extent_pages != 0) {
4766 				bs_load_replay_extent_pages(ctx);
4767 				return;
4768 			}
4769 		}
4770 	}
4771 	bs_load_replay_md_chain_cpl(ctx);
4772 }
4773 
4774 static void
4775 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx)
4776 {
4777 	uint64_t lba;
4778 
4779 	assert(ctx->cur_page < ctx->super->md_len);
4780 	lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page);
4781 	bs_sequence_read_dev(ctx->seq, ctx->page, lba,
4782 			     bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
4783 			     bs_load_replay_md_cpl, ctx);
4784 }
4785 
4786 static void
4787 bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
4788 {
4789 	ctx->page_index = 0;
4790 	ctx->cur_page = 0;
4791 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
4792 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4793 	if (!ctx->page) {
4794 		bs_load_ctx_fail(ctx, -ENOMEM);
4795 		return;
4796 	}
4797 	bs_load_replay_cur_md_page(ctx);
4798 }
4799 
4800 static void
4801 bs_recover(struct spdk_bs_load_ctx *ctx)
4802 {
4803 	int		rc;
4804 
4805 	SPDK_NOTICELOG("Performing recovery on blobstore\n");
4806 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
4807 	if (rc < 0) {
4808 		bs_load_ctx_fail(ctx, -ENOMEM);
4809 		return;
4810 	}
4811 
4812 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
4813 	if (rc < 0) {
4814 		bs_load_ctx_fail(ctx, -ENOMEM);
4815 		return;
4816 	}
4817 
4818 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
4819 	if (rc < 0) {
4820 		bs_load_ctx_fail(ctx, -ENOMEM);
4821 		return;
4822 	}
4823 
4824 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len);
4825 	if (rc < 0) {
4826 		bs_load_ctx_fail(ctx, -ENOMEM);
4827 		return;
4828 	}
4829 
4830 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
4831 	bs_load_replay_md(ctx);
4832 }
4833 
4834 static int
4835 bs_parse_super(struct spdk_bs_load_ctx *ctx)
4836 {
4837 	int rc;
4838 
4839 	if (ctx->super->size == 0) {
4840 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
4841 	}
4842 
4843 	if (ctx->super->io_unit_size == 0) {
4844 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
4845 	}
4846 
4847 	ctx->bs->clean = 1;
4848 	ctx->bs->cluster_sz = ctx->super->cluster_size;
4849 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
4850 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
4851 	if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
4852 		ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
4853 	}
4854 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
4855 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
4856 	if (rc < 0) {
4857 		return -ENOMEM;
4858 	}
4859 	ctx->bs->md_start = ctx->super->md_start;
4860 	ctx->bs->md_len = ctx->super->md_len;
4861 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len);
4862 	if (rc < 0) {
4863 		return -ENOMEM;
4864 	}
4865 
4866 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
4867 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
4868 	ctx->bs->super_blob = ctx->super->super_blob;
4869 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
4870 
4871 	return 0;
4872 }
4873 
4874 static void
4875 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4876 {
4877 	struct spdk_bs_load_ctx *ctx = cb_arg;
4878 	int rc;
4879 
4880 	rc = bs_super_validate(ctx->super, ctx->bs);
4881 	if (rc != 0) {
4882 		bs_load_ctx_fail(ctx, rc);
4883 		return;
4884 	}
4885 
4886 	rc = bs_parse_super(ctx);
4887 	if (rc < 0) {
4888 		bs_load_ctx_fail(ctx, rc);
4889 		return;
4890 	}
4891 
4892 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) {
4893 		bs_recover(ctx);
4894 	} else {
4895 		bs_load_read_used_pages(ctx);
4896 	}
4897 }
4898 
4899 static inline int
4900 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst)
4901 {
4902 
4903 	if (!src->opts_size) {
4904 		SPDK_ERRLOG("opts_size should not be zero value\n");
4905 		return -1;
4906 	}
4907 
4908 #define FIELD_OK(field) \
4909         offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size
4910 
4911 #define SET_FIELD(field) \
4912         if (FIELD_OK(field)) { \
4913                 dst->field = src->field; \
4914         } \
4915 
4916 	SET_FIELD(cluster_sz);
4917 	SET_FIELD(num_md_pages);
4918 	SET_FIELD(max_md_ops);
4919 	SET_FIELD(max_channel_ops);
4920 	SET_FIELD(clear_method);
4921 
4922 	if (FIELD_OK(bstype)) {
4923 		memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype));
4924 	}
4925 	SET_FIELD(iter_cb_fn);
4926 	SET_FIELD(iter_cb_arg);
4927 	SET_FIELD(force_recover);
4928 	SET_FIELD(esnap_bs_dev_create);
4929 	SET_FIELD(esnap_ctx);
4930 
4931 	dst->opts_size = src->opts_size;
4932 
4933 	/* You should not remove this statement, but need to update the assert statement
4934 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
4935 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size");
4936 
4937 #undef FIELD_OK
4938 #undef SET_FIELD
4939 
4940 	return 0;
4941 }
4942 
4943 void
4944 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4945 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4946 {
4947 	struct spdk_blob_store	*bs;
4948 	struct spdk_bs_cpl	cpl;
4949 	struct spdk_bs_load_ctx *ctx;
4950 	struct spdk_bs_opts	opts = {};
4951 	int err;
4952 
4953 	SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev);
4954 
4955 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4956 		SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen);
4957 		dev->destroy(dev);
4958 		cb_fn(cb_arg, NULL, -EINVAL);
4959 		return;
4960 	}
4961 
4962 	spdk_bs_opts_init(&opts, sizeof(opts));
4963 	if (o) {
4964 		if (bs_opts_copy(o, &opts)) {
4965 			return;
4966 		}
4967 	}
4968 
4969 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
4970 		dev->destroy(dev);
4971 		cb_fn(cb_arg, NULL, -EINVAL);
4972 		return;
4973 	}
4974 
4975 	err = bs_alloc(dev, &opts, &bs, &ctx);
4976 	if (err) {
4977 		dev->destroy(dev);
4978 		cb_fn(cb_arg, NULL, err);
4979 		return;
4980 	}
4981 
4982 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4983 	cpl.u.bs_handle.cb_fn = cb_fn;
4984 	cpl.u.bs_handle.cb_arg = cb_arg;
4985 	cpl.u.bs_handle.bs = bs;
4986 
4987 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
4988 	if (!ctx->seq) {
4989 		spdk_free(ctx->super);
4990 		free(ctx);
4991 		bs_free(bs);
4992 		cb_fn(cb_arg, NULL, -ENOMEM);
4993 		return;
4994 	}
4995 
4996 	/* Read the super block */
4997 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
4998 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
4999 			     bs_load_super_cpl, ctx);
5000 }
5001 
5002 /* END spdk_bs_load */
5003 
5004 /* START spdk_bs_dump */
5005 
5006 static void
5007 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
5008 {
5009 	spdk_free(ctx->super);
5010 
5011 	/*
5012 	 * We need to defer calling bs_call_cpl() until after
5013 	 * dev destruction, so tuck these away for later use.
5014 	 */
5015 	ctx->bs->unload_err = bserrno;
5016 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5017 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5018 
5019 	bs_sequence_finish(seq, 0);
5020 	bs_free(ctx->bs);
5021 	free(ctx);
5022 }
5023 
5024 static void
5025 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5026 {
5027 	struct spdk_blob_md_descriptor_xattr *desc_xattr;
5028 	uint32_t i;
5029 	const char *type;
5030 
5031 	desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc;
5032 
5033 	if (desc_xattr->length !=
5034 	    sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) +
5035 	    desc_xattr->name_length + desc_xattr->value_length) {
5036 	}
5037 
5038 	memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length);
5039 	ctx->xattr_name[desc_xattr->name_length] = '\0';
5040 	if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
5041 		type = "XATTR";
5042 	} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
5043 		type = "XATTR_INTERNAL";
5044 	} else {
5045 		assert(false);
5046 		type = "XATTR_?";
5047 	}
5048 	fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name);
5049 	fprintf(ctx->fp, "       value = \"");
5050 	ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name,
5051 			    (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
5052 			    desc_xattr->value_length);
5053 	fprintf(ctx->fp, "\"\n");
5054 	for (i = 0; i < desc_xattr->value_length; i++) {
5055 		if (i % 16 == 0) {
5056 			fprintf(ctx->fp, "               ");
5057 		}
5058 		fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i));
5059 		if ((i + 1) % 16 == 0) {
5060 			fprintf(ctx->fp, "\n");
5061 		}
5062 	}
5063 	if (i % 16 != 0) {
5064 		fprintf(ctx->fp, "\n");
5065 	}
5066 }
5067 
5068 struct type_flag_desc {
5069 	uint64_t mask;
5070 	uint64_t val;
5071 	const char *name;
5072 };
5073 
5074 static void
5075 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags,
5076 			struct type_flag_desc *desc, size_t numflags)
5077 {
5078 	uint64_t covered = 0;
5079 	size_t i;
5080 
5081 	for (i = 0; i < numflags; i++) {
5082 		if ((desc[i].mask & flags) != desc[i].val) {
5083 			continue;
5084 		}
5085 		fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name);
5086 		if (desc[i].mask != desc[i].val) {
5087 			fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")",
5088 				desc[i].mask, desc[i].val);
5089 		}
5090 		fprintf(ctx->fp, "\n");
5091 		covered |= desc[i].mask;
5092 	}
5093 	if ((flags & ~covered) != 0) {
5094 		fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered);
5095 	}
5096 }
5097 
5098 static void
5099 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5100 {
5101 	struct spdk_blob_md_descriptor_flags *type_desc;
5102 #define ADD_FLAG(f) { f, f, #f }
5103 #define ADD_MASK_VAL(m, v) { m, v, #v }
5104 	static struct type_flag_desc invalid[] = {
5105 		ADD_FLAG(SPDK_BLOB_THIN_PROV),
5106 		ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR),
5107 		ADD_FLAG(SPDK_BLOB_EXTENT_TABLE),
5108 	};
5109 	static struct type_flag_desc data_ro[] = {
5110 		ADD_FLAG(SPDK_BLOB_READ_ONLY),
5111 	};
5112 	static struct type_flag_desc md_ro[] = {
5113 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT),
5114 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE),
5115 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP),
5116 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES),
5117 	};
5118 #undef ADD_FLAG
5119 #undef ADD_MASK_VAL
5120 
5121 	type_desc = (struct spdk_blob_md_descriptor_flags *)desc;
5122 	fprintf(ctx->fp, "Flags:\n");
5123 	fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags);
5124 	bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid,
5125 				SPDK_COUNTOF(invalid));
5126 	fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags);
5127 	bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro,
5128 				SPDK_COUNTOF(data_ro));
5129 	fprintf(ctx->fp, "\t  md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags);
5130 	bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro,
5131 				SPDK_COUNTOF(md_ro));
5132 }
5133 
5134 static void
5135 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5136 {
5137 	struct spdk_blob_md_descriptor_extent_table *et_desc;
5138 	uint64_t num_extent_pages;
5139 	uint32_t et_idx;
5140 
5141 	et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc;
5142 	num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) /
5143 			   sizeof(et_desc->extent_page[0]);
5144 
5145 	fprintf(ctx->fp, "Extent table:\n");
5146 	for (et_idx = 0; et_idx < num_extent_pages; et_idx++) {
5147 		if (et_desc->extent_page[et_idx].page_idx == 0) {
5148 			/* Zeroes represent unallocated extent pages. */
5149 			continue;
5150 		}
5151 		fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32
5152 			" at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx,
5153 			et_desc->extent_page[et_idx].num_pages,
5154 			bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx));
5155 	}
5156 }
5157 
5158 static void
5159 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx)
5160 {
5161 	uint32_t page_idx = ctx->cur_page;
5162 	struct spdk_blob_md_page *page = ctx->page;
5163 	struct spdk_blob_md_descriptor *desc;
5164 	size_t cur_desc = 0;
5165 	uint32_t crc;
5166 
5167 	fprintf(ctx->fp, "=========\n");
5168 	fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx);
5169 	fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx));
5170 	fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id);
5171 	fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num);
5172 	if (page->next == SPDK_INVALID_MD_PAGE) {
5173 		fprintf(ctx->fp, "Next: None\n");
5174 	} else {
5175 		fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next);
5176 	}
5177 	fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)");
5178 	if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) {
5179 		fprintf(ctx->fp, " md");
5180 	}
5181 	if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) {
5182 		fprintf(ctx->fp, " blob");
5183 	}
5184 	fprintf(ctx->fp, "\n");
5185 
5186 	crc = blob_md_page_calc_crc(page);
5187 	fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch");
5188 
5189 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
5190 	while (cur_desc < sizeof(page->descriptors)) {
5191 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
5192 			if (desc->length == 0) {
5193 				/* If padding and length are 0, this terminates the page */
5194 				break;
5195 			}
5196 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
5197 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
5198 			unsigned int				i;
5199 
5200 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
5201 
5202 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
5203 				if (desc_extent_rle->extents[i].cluster_idx != 0) {
5204 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
5205 						desc_extent_rle->extents[i].cluster_idx);
5206 				} else {
5207 					fprintf(ctx->fp, "Unallocated Extent - ");
5208 				}
5209 				fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length);
5210 				fprintf(ctx->fp, "\n");
5211 			}
5212 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
5213 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
5214 			unsigned int					i;
5215 
5216 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
5217 
5218 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) {
5219 				if (desc_extent->cluster_idx[i] != 0) {
5220 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
5221 						desc_extent->cluster_idx[i]);
5222 				} else {
5223 					fprintf(ctx->fp, "Unallocated Extent");
5224 				}
5225 				fprintf(ctx->fp, "\n");
5226 			}
5227 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
5228 			bs_dump_print_xattr(ctx, desc);
5229 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
5230 			bs_dump_print_xattr(ctx, desc);
5231 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
5232 			bs_dump_print_type_flags(ctx, desc);
5233 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
5234 			bs_dump_print_extent_table(ctx, desc);
5235 		} else {
5236 			/* Error */
5237 			fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type);
5238 		}
5239 		/* Advance to the next descriptor */
5240 		cur_desc += sizeof(*desc) + desc->length;
5241 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
5242 			break;
5243 		}
5244 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
5245 	}
5246 }
5247 
5248 static void
5249 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5250 {
5251 	struct spdk_bs_load_ctx *ctx = cb_arg;
5252 
5253 	if (bserrno != 0) {
5254 		bs_dump_finish(seq, ctx, bserrno);
5255 		return;
5256 	}
5257 
5258 	if (ctx->page->id != 0) {
5259 		bs_dump_print_md_page(ctx);
5260 	}
5261 
5262 	ctx->cur_page++;
5263 
5264 	if (ctx->cur_page < ctx->super->md_len) {
5265 		bs_dump_read_md_page(seq, ctx);
5266 	} else {
5267 		spdk_free(ctx->page);
5268 		bs_dump_finish(seq, ctx, 0);
5269 	}
5270 }
5271 
5272 static void
5273 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
5274 {
5275 	struct spdk_bs_load_ctx *ctx = cb_arg;
5276 	uint64_t lba;
5277 
5278 	assert(ctx->cur_page < ctx->super->md_len);
5279 	lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
5280 	bs_sequence_read_dev(seq, ctx->page, lba,
5281 			     bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
5282 			     bs_dump_read_md_page_cpl, ctx);
5283 }
5284 
5285 static void
5286 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5287 {
5288 	struct spdk_bs_load_ctx *ctx = cb_arg;
5289 	int rc;
5290 
5291 	fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature);
5292 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
5293 		   sizeof(ctx->super->signature)) != 0) {
5294 		fprintf(ctx->fp, "(Mismatch)\n");
5295 		bs_dump_finish(seq, ctx, bserrno);
5296 		return;
5297 	} else {
5298 		fprintf(ctx->fp, "(OK)\n");
5299 	}
5300 	fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version);
5301 	fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc,
5302 		(ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch");
5303 	fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype);
5304 	fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size);
5305 	fprintf(ctx->fp, "Super Blob ID: ");
5306 	if (ctx->super->super_blob == SPDK_BLOBID_INVALID) {
5307 		fprintf(ctx->fp, "(None)\n");
5308 	} else {
5309 		fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob);
5310 	}
5311 	fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean);
5312 	fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start);
5313 	fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len);
5314 	fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start);
5315 	fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len);
5316 	fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start);
5317 	fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len);
5318 	fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start);
5319 	fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);
5320 
5321 	ctx->cur_page = 0;
5322 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
5323 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5324 	if (!ctx->page) {
5325 		bs_dump_finish(seq, ctx, -ENOMEM);
5326 		return;
5327 	}
5328 
5329 	rc = bs_parse_super(ctx);
5330 	if (rc < 0) {
5331 		bs_load_ctx_fail(ctx, rc);
5332 		return;
5333 	}
5334 
5335 	bs_load_read_used_pages(ctx);
5336 }
5337 
5338 void
5339 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn,
5340 	     spdk_bs_op_complete cb_fn, void *cb_arg)
5341 {
5342 	struct spdk_blob_store	*bs;
5343 	struct spdk_bs_cpl	cpl;
5344 	struct spdk_bs_load_ctx *ctx;
5345 	struct spdk_bs_opts	opts = {};
5346 	int err;
5347 
5348 	SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev);
5349 
5350 	spdk_bs_opts_init(&opts, sizeof(opts));
5351 
5352 	err = bs_alloc(dev, &opts, &bs, &ctx);
5353 	if (err) {
5354 		dev->destroy(dev);
5355 		cb_fn(cb_arg, err);
5356 		return;
5357 	}
5358 
5359 	ctx->dumping = true;
5360 	ctx->fp = fp;
5361 	ctx->print_xattr_fn = print_xattr_fn;
5362 
5363 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5364 	cpl.u.bs_basic.cb_fn = cb_fn;
5365 	cpl.u.bs_basic.cb_arg = cb_arg;
5366 
5367 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5368 	if (!ctx->seq) {
5369 		spdk_free(ctx->super);
5370 		free(ctx);
5371 		bs_free(bs);
5372 		cb_fn(cb_arg, -ENOMEM);
5373 		return;
5374 	}
5375 
5376 	/* Read the super block */
5377 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
5378 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5379 			     bs_dump_super_cpl, ctx);
5380 }
5381 
5382 /* END spdk_bs_dump */
5383 
5384 /* START spdk_bs_init */
5385 
5386 static void
5387 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5388 {
5389 	struct spdk_bs_load_ctx *ctx = cb_arg;
5390 
5391 	ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters);
5392 	spdk_free(ctx->super);
5393 	free(ctx);
5394 
5395 	bs_sequence_finish(seq, bserrno);
5396 }
5397 
5398 static void
5399 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5400 {
5401 	struct spdk_bs_load_ctx *ctx = cb_arg;
5402 
5403 	/* Write super block */
5404 	bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0),
5405 			      bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
5406 			      bs_init_persist_super_cpl, ctx);
5407 }
5408 
5409 void
5410 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
5411 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
5412 {
5413 	struct spdk_bs_load_ctx *ctx;
5414 	struct spdk_blob_store	*bs;
5415 	struct spdk_bs_cpl	cpl;
5416 	spdk_bs_sequence_t	*seq;
5417 	spdk_bs_batch_t		*batch;
5418 	uint64_t		num_md_lba;
5419 	uint64_t		num_md_pages;
5420 	uint64_t		num_md_clusters;
5421 	uint64_t		max_used_cluster_mask_len;
5422 	uint32_t		i;
5423 	struct spdk_bs_opts	opts = {};
5424 	int			rc;
5425 	uint64_t		lba, lba_count;
5426 
5427 	SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev);
5428 
5429 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
5430 		SPDK_ERRLOG("unsupported dev block length of %d\n",
5431 			    dev->blocklen);
5432 		dev->destroy(dev);
5433 		cb_fn(cb_arg, NULL, -EINVAL);
5434 		return;
5435 	}
5436 
5437 	spdk_bs_opts_init(&opts, sizeof(opts));
5438 	if (o) {
5439 		if (bs_opts_copy(o, &opts)) {
5440 			return;
5441 		}
5442 	}
5443 
5444 	if (bs_opts_verify(&opts) != 0) {
5445 		dev->destroy(dev);
5446 		cb_fn(cb_arg, NULL, -EINVAL);
5447 		return;
5448 	}
5449 
5450 	rc = bs_alloc(dev, &opts, &bs, &ctx);
5451 	if (rc) {
5452 		dev->destroy(dev);
5453 		cb_fn(cb_arg, NULL, rc);
5454 		return;
5455 	}
5456 
5457 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
5458 		/* By default, allocate 1 page per cluster.
5459 		 * Technically, this over-allocates metadata
5460 		 * because more metadata will reduce the number
5461 		 * of usable clusters. This can be addressed with
5462 		 * more complex math in the future.
5463 		 */
5464 		bs->md_len = bs->total_clusters;
5465 	} else {
5466 		bs->md_len = opts.num_md_pages;
5467 	}
5468 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
5469 	if (rc < 0) {
5470 		spdk_free(ctx->super);
5471 		free(ctx);
5472 		bs_free(bs);
5473 		cb_fn(cb_arg, NULL, -ENOMEM);
5474 		return;
5475 	}
5476 
5477 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
5478 	if (rc < 0) {
5479 		spdk_free(ctx->super);
5480 		free(ctx);
5481 		bs_free(bs);
5482 		cb_fn(cb_arg, NULL, -ENOMEM);
5483 		return;
5484 	}
5485 
5486 	rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len);
5487 	if (rc < 0) {
5488 		spdk_free(ctx->super);
5489 		free(ctx);
5490 		bs_free(bs);
5491 		cb_fn(cb_arg, NULL, -ENOMEM);
5492 		return;
5493 	}
5494 
5495 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
5496 	       sizeof(ctx->super->signature));
5497 	ctx->super->version = SPDK_BS_VERSION;
5498 	ctx->super->length = sizeof(*ctx->super);
5499 	ctx->super->super_blob = bs->super_blob;
5500 	ctx->super->clean = 0;
5501 	ctx->super->cluster_size = bs->cluster_sz;
5502 	ctx->super->io_unit_size = bs->io_unit_size;
5503 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
5504 
5505 	/* Calculate how many pages the metadata consumes at the front
5506 	 * of the disk.
5507 	 */
5508 
5509 	/* The super block uses 1 page */
5510 	num_md_pages = 1;
5511 
5512 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
5513 	 * up to the nearest page, plus a header.
5514 	 */
5515 	ctx->super->used_page_mask_start = num_md_pages;
5516 	ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5517 					 spdk_divide_round_up(bs->md_len, 8),
5518 					 SPDK_BS_PAGE_SIZE);
5519 	num_md_pages += ctx->super->used_page_mask_len;
5520 
5521 	/* The used_clusters mask requires 1 bit per cluster, rounded
5522 	 * up to the nearest page, plus a header.
5523 	 */
5524 	ctx->super->used_cluster_mask_start = num_md_pages;
5525 	ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5526 					    spdk_divide_round_up(bs->total_clusters, 8),
5527 					    SPDK_BS_PAGE_SIZE);
5528 	/* The blobstore might be extended, then the used_cluster bitmap will need more space.
5529 	 * Here we calculate the max clusters we can support according to the
5530 	 * num_md_pages (bs->md_len).
5531 	 */
5532 	max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5533 				    spdk_divide_round_up(bs->md_len, 8),
5534 				    SPDK_BS_PAGE_SIZE);
5535 	max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len,
5536 					     ctx->super->used_cluster_mask_len);
5537 	num_md_pages += max_used_cluster_mask_len;
5538 
5539 	/* The used_blobids mask requires 1 bit per metadata page, rounded
5540 	 * up to the nearest page, plus a header.
5541 	 */
5542 	ctx->super->used_blobid_mask_start = num_md_pages;
5543 	ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5544 					   spdk_divide_round_up(bs->md_len, 8),
5545 					   SPDK_BS_PAGE_SIZE);
5546 	num_md_pages += ctx->super->used_blobid_mask_len;
5547 
5548 	/* The metadata region size was chosen above */
5549 	ctx->super->md_start = bs->md_start = num_md_pages;
5550 	ctx->super->md_len = bs->md_len;
5551 	num_md_pages += bs->md_len;
5552 
5553 	num_md_lba = bs_page_to_lba(bs, num_md_pages);
5554 
5555 	ctx->super->size = dev->blockcnt * dev->blocklen;
5556 
5557 	ctx->super->crc = blob_md_page_calc_crc(ctx->super);
5558 
5559 	num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster);
5560 	if (num_md_clusters > bs->total_clusters) {
5561 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
5562 			    "please decrease number of pages reserved for metadata "
5563 			    "or increase cluster size.\n");
5564 		spdk_free(ctx->super);
5565 		spdk_bit_array_free(&ctx->used_clusters);
5566 		free(ctx);
5567 		bs_free(bs);
5568 		cb_fn(cb_arg, NULL, -ENOMEM);
5569 		return;
5570 	}
5571 	/* Claim all of the clusters used by the metadata */
5572 	for (i = 0; i < num_md_clusters; i++) {
5573 		spdk_bit_array_set(ctx->used_clusters, i);
5574 	}
5575 
5576 	bs->num_free_clusters -= num_md_clusters;
5577 	bs->total_data_clusters = bs->num_free_clusters;
5578 
5579 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
5580 	cpl.u.bs_handle.cb_fn = cb_fn;
5581 	cpl.u.bs_handle.cb_arg = cb_arg;
5582 	cpl.u.bs_handle.bs = bs;
5583 
5584 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5585 	if (!seq) {
5586 		spdk_free(ctx->super);
5587 		free(ctx);
5588 		bs_free(bs);
5589 		cb_fn(cb_arg, NULL, -ENOMEM);
5590 		return;
5591 	}
5592 
5593 	batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx);
5594 
5595 	/* Clear metadata space */
5596 	bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
5597 
5598 	lba = num_md_lba;
5599 	lba_count = ctx->bs->dev->blockcnt - lba;
5600 	switch (opts.clear_method) {
5601 	case BS_CLEAR_WITH_UNMAP:
5602 		/* Trim data clusters */
5603 		bs_batch_unmap_dev(batch, lba, lba_count);
5604 		break;
5605 	case BS_CLEAR_WITH_WRITE_ZEROES:
5606 		/* Write_zeroes to data clusters */
5607 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
5608 		break;
5609 	case BS_CLEAR_WITH_NONE:
5610 	default:
5611 		break;
5612 	}
5613 
5614 	bs_batch_close(batch);
5615 }
5616 
5617 /* END spdk_bs_init */
5618 
5619 /* START spdk_bs_destroy */
5620 
5621 static void
5622 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5623 {
5624 	struct spdk_bs_load_ctx *ctx = cb_arg;
5625 	struct spdk_blob_store *bs = ctx->bs;
5626 
5627 	/*
5628 	 * We need to defer calling bs_call_cpl() until after
5629 	 * dev destruction, so tuck these away for later use.
5630 	 */
5631 	bs->unload_err = bserrno;
5632 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5633 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5634 
5635 	bs_sequence_finish(seq, bserrno);
5636 
5637 	bs_free(bs);
5638 	free(ctx);
5639 }
5640 
5641 void
5642 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
5643 		void *cb_arg)
5644 {
5645 	struct spdk_bs_cpl	cpl;
5646 	spdk_bs_sequence_t	*seq;
5647 	struct spdk_bs_load_ctx *ctx;
5648 
5649 	SPDK_DEBUGLOG(blob, "Destroying blobstore\n");
5650 
5651 	if (!RB_EMPTY(&bs->open_blobs)) {
5652 		SPDK_ERRLOG("Blobstore still has open blobs\n");
5653 		cb_fn(cb_arg, -EBUSY);
5654 		return;
5655 	}
5656 
5657 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5658 	cpl.u.bs_basic.cb_fn = cb_fn;
5659 	cpl.u.bs_basic.cb_arg = cb_arg;
5660 
5661 	ctx = calloc(1, sizeof(*ctx));
5662 	if (!ctx) {
5663 		cb_fn(cb_arg, -ENOMEM);
5664 		return;
5665 	}
5666 
5667 	ctx->bs = bs;
5668 
5669 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5670 	if (!seq) {
5671 		free(ctx);
5672 		cb_fn(cb_arg, -ENOMEM);
5673 		return;
5674 	}
5675 
5676 	/* Write zeroes to the super block */
5677 	bs_sequence_write_zeroes_dev(seq,
5678 				     bs_page_to_lba(bs, 0),
5679 				     bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
5680 				     bs_destroy_trim_cpl, ctx);
5681 }
5682 
5683 /* END spdk_bs_destroy */
5684 
5685 /* START spdk_bs_unload */
5686 
5687 static void
5688 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno)
5689 {
5690 	spdk_bs_sequence_t *seq = ctx->seq;
5691 
5692 	spdk_free(ctx->super);
5693 
5694 	/*
5695 	 * We need to defer calling bs_call_cpl() until after
5696 	 * dev destruction, so tuck these away for later use.
5697 	 */
5698 	ctx->bs->unload_err = bserrno;
5699 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5700 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5701 
5702 	bs_sequence_finish(seq, bserrno);
5703 
5704 	bs_free(ctx->bs);
5705 	free(ctx);
5706 }
5707 
5708 static void
5709 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5710 {
5711 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5712 
5713 	bs_unload_finish(ctx, bserrno);
5714 }
5715 
5716 static void
5717 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5718 {
5719 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5720 
5721 	spdk_free(ctx->mask);
5722 
5723 	if (bserrno != 0) {
5724 		bs_unload_finish(ctx, bserrno);
5725 		return;
5726 	}
5727 
5728 	ctx->super->clean = 1;
5729 
5730 	bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx);
5731 }
5732 
5733 static void
5734 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5735 {
5736 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5737 
5738 	spdk_free(ctx->mask);
5739 	ctx->mask = NULL;
5740 
5741 	if (bserrno != 0) {
5742 		bs_unload_finish(ctx, bserrno);
5743 		return;
5744 	}
5745 
5746 	bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl);
5747 }
5748 
5749 static void
5750 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5751 {
5752 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5753 
5754 	spdk_free(ctx->mask);
5755 	ctx->mask = NULL;
5756 
5757 	if (bserrno != 0) {
5758 		bs_unload_finish(ctx, bserrno);
5759 		return;
5760 	}
5761 
5762 	bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl);
5763 }
5764 
5765 static void
5766 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5767 {
5768 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5769 	int rc;
5770 
5771 	if (bserrno != 0) {
5772 		bs_unload_finish(ctx, bserrno);
5773 		return;
5774 	}
5775 
5776 	rc = bs_super_validate(ctx->super, ctx->bs);
5777 	if (rc != 0) {
5778 		bs_unload_finish(ctx, rc);
5779 		return;
5780 	}
5781 
5782 	bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl);
5783 }
5784 
5785 void
5786 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
5787 {
5788 	struct spdk_bs_cpl	cpl;
5789 	struct spdk_bs_load_ctx *ctx;
5790 
5791 	SPDK_DEBUGLOG(blob, "Syncing blobstore\n");
5792 
5793 	/*
5794 	 * If external snapshot channels are being destroyed while the blobstore is unloaded, the
5795 	 * unload is deferred until after the channel destruction completes.
5796 	 */
5797 	if (bs->esnap_channels_unloading != 0) {
5798 		if (bs->esnap_unload_cb_fn != NULL) {
5799 			SPDK_ERRLOG("Blobstore unload in progress\n");
5800 			cb_fn(cb_arg, -EBUSY);
5801 			return;
5802 		}
5803 		SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32
5804 			      " esnap clones are unloading\n", bs->esnap_channels_unloading);
5805 		bs->esnap_unload_cb_fn = cb_fn;
5806 		bs->esnap_unload_cb_arg = cb_arg;
5807 		return;
5808 	}
5809 	if (bs->esnap_unload_cb_fn != NULL) {
5810 		SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n");
5811 		assert(bs->esnap_unload_cb_fn == cb_fn);
5812 		assert(bs->esnap_unload_cb_arg == cb_arg);
5813 		bs->esnap_unload_cb_fn = NULL;
5814 		bs->esnap_unload_cb_arg = NULL;
5815 	}
5816 
5817 	if (!RB_EMPTY(&bs->open_blobs)) {
5818 		SPDK_ERRLOG("Blobstore still has open blobs\n");
5819 		cb_fn(cb_arg, -EBUSY);
5820 		return;
5821 	}
5822 
5823 	ctx = calloc(1, sizeof(*ctx));
5824 	if (!ctx) {
5825 		cb_fn(cb_arg, -ENOMEM);
5826 		return;
5827 	}
5828 
5829 	ctx->bs = bs;
5830 
5831 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
5832 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5833 	if (!ctx->super) {
5834 		free(ctx);
5835 		cb_fn(cb_arg, -ENOMEM);
5836 		return;
5837 	}
5838 
5839 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5840 	cpl.u.bs_basic.cb_fn = cb_fn;
5841 	cpl.u.bs_basic.cb_arg = cb_arg;
5842 
5843 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5844 	if (!ctx->seq) {
5845 		spdk_free(ctx->super);
5846 		free(ctx);
5847 		cb_fn(cb_arg, -ENOMEM);
5848 		return;
5849 	}
5850 
5851 	/* Read super block */
5852 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
5853 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5854 			     bs_unload_read_super_cpl, ctx);
5855 }
5856 
5857 /* END spdk_bs_unload */
5858 
5859 /* START spdk_bs_set_super */
5860 
5861 struct spdk_bs_set_super_ctx {
5862 	struct spdk_blob_store		*bs;
5863 	struct spdk_bs_super_block	*super;
5864 };
5865 
5866 static void
5867 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5868 {
5869 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
5870 
5871 	if (bserrno != 0) {
5872 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
5873 	}
5874 
5875 	spdk_free(ctx->super);
5876 
5877 	bs_sequence_finish(seq, bserrno);
5878 
5879 	free(ctx);
5880 }
5881 
5882 static void
5883 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5884 {
5885 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
5886 	int rc;
5887 
5888 	if (bserrno != 0) {
5889 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
5890 		spdk_free(ctx->super);
5891 		bs_sequence_finish(seq, bserrno);
5892 		free(ctx);
5893 		return;
5894 	}
5895 
5896 	rc = bs_super_validate(ctx->super, ctx->bs);
5897 	if (rc != 0) {
5898 		SPDK_ERRLOG("Not a valid super block\n");
5899 		spdk_free(ctx->super);
5900 		bs_sequence_finish(seq, rc);
5901 		free(ctx);
5902 		return;
5903 	}
5904 
5905 	bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx);
5906 }
5907 
5908 void
5909 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
5910 		  spdk_bs_op_complete cb_fn, void *cb_arg)
5911 {
5912 	struct spdk_bs_cpl		cpl;
5913 	spdk_bs_sequence_t		*seq;
5914 	struct spdk_bs_set_super_ctx	*ctx;
5915 
5916 	SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n");
5917 
5918 	ctx = calloc(1, sizeof(*ctx));
5919 	if (!ctx) {
5920 		cb_fn(cb_arg, -ENOMEM);
5921 		return;
5922 	}
5923 
5924 	ctx->bs = bs;
5925 
5926 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
5927 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5928 	if (!ctx->super) {
5929 		free(ctx);
5930 		cb_fn(cb_arg, -ENOMEM);
5931 		return;
5932 	}
5933 
5934 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5935 	cpl.u.bs_basic.cb_fn = cb_fn;
5936 	cpl.u.bs_basic.cb_arg = cb_arg;
5937 
5938 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5939 	if (!seq) {
5940 		spdk_free(ctx->super);
5941 		free(ctx);
5942 		cb_fn(cb_arg, -ENOMEM);
5943 		return;
5944 	}
5945 
5946 	bs->super_blob = blobid;
5947 
5948 	/* Read super block */
5949 	bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0),
5950 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5951 			     bs_set_super_read_cpl, ctx);
5952 }
5953 
5954 /* END spdk_bs_set_super */
5955 
5956 void
5957 spdk_bs_get_super(struct spdk_blob_store *bs,
5958 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5959 {
5960 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
5961 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
5962 	} else {
5963 		cb_fn(cb_arg, bs->super_blob, 0);
5964 	}
5965 }
5966 
5967 uint64_t
5968 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
5969 {
5970 	return bs->cluster_sz;
5971 }
5972 
5973 uint64_t
5974 spdk_bs_get_page_size(struct spdk_blob_store *bs)
5975 {
5976 	return SPDK_BS_PAGE_SIZE;
5977 }
5978 
5979 uint64_t
5980 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
5981 {
5982 	return bs->io_unit_size;
5983 }
5984 
5985 uint64_t
5986 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
5987 {
5988 	return bs->num_free_clusters;
5989 }
5990 
5991 uint64_t
5992 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
5993 {
5994 	return bs->total_data_clusters;
5995 }
5996 
5997 static int
5998 bs_register_md_thread(struct spdk_blob_store *bs)
5999 {
6000 	bs->md_channel = spdk_get_io_channel(bs);
6001 	if (!bs->md_channel) {
6002 		SPDK_ERRLOG("Failed to get IO channel.\n");
6003 		return -1;
6004 	}
6005 
6006 	return 0;
6007 }
6008 
6009 static int
6010 bs_unregister_md_thread(struct spdk_blob_store *bs)
6011 {
6012 	spdk_put_io_channel(bs->md_channel);
6013 
6014 	return 0;
6015 }
6016 
6017 spdk_blob_id
6018 spdk_blob_get_id(struct spdk_blob *blob)
6019 {
6020 	assert(blob != NULL);
6021 
6022 	return blob->id;
6023 }
6024 
6025 uint64_t
6026 spdk_blob_get_num_pages(struct spdk_blob *blob)
6027 {
6028 	assert(blob != NULL);
6029 
6030 	return bs_cluster_to_page(blob->bs, blob->active.num_clusters);
6031 }
6032 
6033 uint64_t
6034 spdk_blob_get_num_io_units(struct spdk_blob *blob)
6035 {
6036 	assert(blob != NULL);
6037 
6038 	return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs);
6039 }
6040 
6041 uint64_t
6042 spdk_blob_get_num_clusters(struct spdk_blob *blob)
6043 {
6044 	assert(blob != NULL);
6045 
6046 	return blob->active.num_clusters;
6047 }
6048 
6049 uint64_t
6050 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob)
6051 {
6052 	assert(blob != NULL);
6053 
6054 	return blob->active.num_allocated_clusters;
6055 }
6056 
6057 static uint64_t
6058 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated)
6059 {
6060 	uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob);
6061 
6062 	while (offset < blob_io_unit_num) {
6063 		if (bs_io_unit_is_allocated(blob, offset) == is_allocated) {
6064 			return offset;
6065 		}
6066 
6067 		offset += bs_num_io_units_to_cluster_boundary(blob, offset);
6068 	}
6069 
6070 	return UINT64_MAX;
6071 }
6072 
6073 uint64_t
6074 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset)
6075 {
6076 	return blob_find_io_unit(blob, offset, true);
6077 }
6078 
6079 uint64_t
6080 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset)
6081 {
6082 	return blob_find_io_unit(blob, offset, false);
6083 }
6084 
6085 /* START spdk_bs_create_blob */
6086 
6087 static void
6088 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6089 {
6090 	struct spdk_blob *blob = cb_arg;
6091 	uint32_t page_idx = bs_blobid_to_page(blob->id);
6092 
6093 	if (bserrno != 0) {
6094 		spdk_spin_lock(&blob->bs->used_lock);
6095 		spdk_bit_array_clear(blob->bs->used_blobids, page_idx);
6096 		bs_release_md_page(blob->bs, page_idx);
6097 		spdk_spin_unlock(&blob->bs->used_lock);
6098 	}
6099 
6100 	blob_free(blob);
6101 
6102 	bs_sequence_finish(seq, bserrno);
6103 }
6104 
6105 static int
6106 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
6107 		bool internal)
6108 {
6109 	uint64_t i;
6110 	size_t value_len = 0;
6111 	int rc;
6112 	const void *value = NULL;
6113 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
6114 		return -EINVAL;
6115 	}
6116 	for (i = 0; i < xattrs->count; i++) {
6117 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
6118 		if (value == NULL || value_len == 0) {
6119 			return -EINVAL;
6120 		}
6121 		rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
6122 		if (rc < 0) {
6123 			return rc;
6124 		}
6125 	}
6126 	return 0;
6127 }
6128 
6129 static void
6130 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst)
6131 {
6132 #define FIELD_OK(field) \
6133         offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size
6134 
6135 #define SET_FIELD(field) \
6136         if (FIELD_OK(field)) { \
6137                 dst->field = src->field; \
6138         } \
6139 
6140 	SET_FIELD(num_clusters);
6141 	SET_FIELD(thin_provision);
6142 	SET_FIELD(clear_method);
6143 
6144 	if (FIELD_OK(xattrs)) {
6145 		memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs));
6146 	}
6147 
6148 	SET_FIELD(use_extent_table);
6149 	SET_FIELD(esnap_id);
6150 	SET_FIELD(esnap_id_len);
6151 
6152 	dst->opts_size = src->opts_size;
6153 
6154 	/* You should not remove this statement, but need to update the assert statement
6155 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
6156 	SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size");
6157 
6158 #undef FIELD_OK
6159 #undef SET_FIELD
6160 }
6161 
6162 static void
6163 bs_create_blob(struct spdk_blob_store *bs,
6164 	       const struct spdk_blob_opts *opts,
6165 	       const struct spdk_blob_xattr_opts *internal_xattrs,
6166 	       spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6167 {
6168 	struct spdk_blob	*blob;
6169 	uint32_t		page_idx;
6170 	struct spdk_bs_cpl	cpl;
6171 	struct spdk_blob_opts	opts_local;
6172 	struct spdk_blob_xattr_opts internal_xattrs_default;
6173 	spdk_bs_sequence_t	*seq;
6174 	spdk_blob_id		id;
6175 	int rc;
6176 
6177 	assert(spdk_get_thread() == bs->md_thread);
6178 
6179 	spdk_spin_lock(&bs->used_lock);
6180 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
6181 	if (page_idx == UINT32_MAX) {
6182 		spdk_spin_unlock(&bs->used_lock);
6183 		cb_fn(cb_arg, 0, -ENOMEM);
6184 		return;
6185 	}
6186 	spdk_bit_array_set(bs->used_blobids, page_idx);
6187 	bs_claim_md_page(bs, page_idx);
6188 	spdk_spin_unlock(&bs->used_lock);
6189 
6190 	id = bs_page_to_blobid(page_idx);
6191 
6192 	SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx);
6193 
6194 	spdk_blob_opts_init(&opts_local, sizeof(opts_local));
6195 	if (opts) {
6196 		blob_opts_copy(opts, &opts_local);
6197 	}
6198 
6199 	blob = blob_alloc(bs, id);
6200 	if (!blob) {
6201 		rc = -ENOMEM;
6202 		goto error;
6203 	}
6204 
6205 	blob->use_extent_table = opts_local.use_extent_table;
6206 	if (blob->use_extent_table) {
6207 		blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE;
6208 	}
6209 
6210 	if (!internal_xattrs) {
6211 		blob_xattrs_init(&internal_xattrs_default);
6212 		internal_xattrs = &internal_xattrs_default;
6213 	}
6214 
6215 	rc = blob_set_xattrs(blob, &opts_local.xattrs, false);
6216 	if (rc < 0) {
6217 		goto error;
6218 	}
6219 
6220 	rc = blob_set_xattrs(blob, internal_xattrs, true);
6221 	if (rc < 0) {
6222 		goto error;
6223 	}
6224 
6225 	if (opts_local.thin_provision) {
6226 		blob_set_thin_provision(blob);
6227 	}
6228 
6229 	blob_set_clear_method(blob, opts_local.clear_method);
6230 
6231 	if (opts_local.esnap_id != NULL) {
6232 		if (opts_local.esnap_id_len > UINT16_MAX) {
6233 			SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n",
6234 				    opts_local.esnap_id_len);
6235 			rc = -EINVAL;
6236 			goto error;
6237 
6238 		}
6239 		blob_set_thin_provision(blob);
6240 		blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
6241 		rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID,
6242 				    opts_local.esnap_id, opts_local.esnap_id_len, true);
6243 		if (rc != 0) {
6244 			goto error;
6245 		}
6246 	}
6247 
6248 	rc = blob_resize(blob, opts_local.num_clusters);
6249 	if (rc < 0) {
6250 		goto error;
6251 	}
6252 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6253 	cpl.u.blobid.cb_fn = cb_fn;
6254 	cpl.u.blobid.cb_arg = cb_arg;
6255 	cpl.u.blobid.blobid = blob->id;
6256 
6257 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
6258 	if (!seq) {
6259 		rc = -ENOMEM;
6260 		goto error;
6261 	}
6262 
6263 	blob_persist(seq, blob, bs_create_blob_cpl, blob);
6264 	return;
6265 
6266 error:
6267 	SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n",
6268 		    spdk_strerror(rc), opts_local.num_clusters);
6269 	if (blob != NULL) {
6270 		blob_free(blob);
6271 	}
6272 	spdk_spin_lock(&bs->used_lock);
6273 	spdk_bit_array_clear(bs->used_blobids, page_idx);
6274 	bs_release_md_page(bs, page_idx);
6275 	spdk_spin_unlock(&bs->used_lock);
6276 	cb_fn(cb_arg, 0, rc);
6277 }
6278 
6279 void
6280 spdk_bs_create_blob(struct spdk_blob_store *bs,
6281 		    spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6282 {
6283 	bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
6284 }
6285 
6286 void
6287 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
6288 			spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6289 {
6290 	bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
6291 }
6292 
6293 /* END spdk_bs_create_blob */
6294 
6295 /* START blob_cleanup */
6296 
6297 struct spdk_clone_snapshot_ctx {
6298 	struct spdk_bs_cpl      cpl;
6299 	int bserrno;
6300 	bool frozen;
6301 
6302 	struct spdk_io_channel *channel;
6303 
6304 	/* Current cluster for inflate operation */
6305 	uint64_t cluster;
6306 
6307 	/* For inflation force allocation of all unallocated clusters and remove
6308 	 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */
6309 	bool allocate_all;
6310 
6311 	struct {
6312 		spdk_blob_id id;
6313 		struct spdk_blob *blob;
6314 		bool md_ro;
6315 	} original;
6316 	struct {
6317 		spdk_blob_id id;
6318 		struct spdk_blob *blob;
6319 	} new;
6320 
6321 	/* xattrs specified for snapshot/clones only. They have no impact on
6322 	 * the original blobs xattrs. */
6323 	const struct spdk_blob_xattr_opts *xattrs;
6324 };
6325 
6326 static void
6327 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
6328 {
6329 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
6330 	struct spdk_bs_cpl *cpl = &ctx->cpl;
6331 
6332 	if (bserrno != 0) {
6333 		if (ctx->bserrno != 0) {
6334 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6335 		} else {
6336 			ctx->bserrno = bserrno;
6337 		}
6338 	}
6339 
6340 	switch (cpl->type) {
6341 	case SPDK_BS_CPL_TYPE_BLOBID:
6342 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
6343 		break;
6344 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
6345 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
6346 		break;
6347 	default:
6348 		SPDK_UNREACHABLE();
6349 		break;
6350 	}
6351 
6352 	free(ctx);
6353 }
6354 
6355 static void
6356 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
6357 {
6358 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6359 	struct spdk_blob *origblob = ctx->original.blob;
6360 
6361 	if (bserrno != 0) {
6362 		if (ctx->bserrno != 0) {
6363 			SPDK_ERRLOG("Unfreeze error %d\n", bserrno);
6364 		} else {
6365 			ctx->bserrno = bserrno;
6366 		}
6367 	}
6368 
6369 	ctx->original.id = origblob->id;
6370 	origblob->locked_operation_in_progress = false;
6371 
6372 	/* Revert md_ro to original state */
6373 	origblob->md_ro = ctx->original.md_ro;
6374 
6375 	spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx);
6376 }
6377 
6378 static void
6379 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
6380 {
6381 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6382 	struct spdk_blob *origblob = ctx->original.blob;
6383 
6384 	if (bserrno != 0) {
6385 		if (ctx->bserrno != 0) {
6386 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6387 		} else {
6388 			ctx->bserrno = bserrno;
6389 		}
6390 	}
6391 
6392 	if (ctx->frozen) {
6393 		/* Unfreeze any outstanding I/O */
6394 		blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx);
6395 	} else {
6396 		bs_snapshot_unfreeze_cpl(ctx, 0);
6397 	}
6398 
6399 }
6400 
6401 static void
6402 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno)
6403 {
6404 	struct spdk_blob *newblob = ctx->new.blob;
6405 
6406 	if (bserrno != 0) {
6407 		if (ctx->bserrno != 0) {
6408 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6409 		} else {
6410 			ctx->bserrno = bserrno;
6411 		}
6412 	}
6413 
6414 	ctx->new.id = newblob->id;
6415 	spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx);
6416 }
6417 
6418 /* END blob_cleanup */
6419 
6420 /* START spdk_bs_create_snapshot */
6421 
6422 static void
6423 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2)
6424 {
6425 	uint64_t *cluster_temp;
6426 	uint64_t num_allocated_clusters_temp;
6427 	uint32_t *extent_page_temp;
6428 
6429 	cluster_temp = blob1->active.clusters;
6430 	blob1->active.clusters = blob2->active.clusters;
6431 	blob2->active.clusters = cluster_temp;
6432 
6433 	num_allocated_clusters_temp = blob1->active.num_allocated_clusters;
6434 	blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters;
6435 	blob2->active.num_allocated_clusters = num_allocated_clusters_temp;
6436 
6437 	extent_page_temp = blob1->active.extent_pages;
6438 	blob1->active.extent_pages = blob2->active.extent_pages;
6439 	blob2->active.extent_pages = extent_page_temp;
6440 }
6441 
6442 /* Copies an internal xattr */
6443 static int
6444 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name)
6445 {
6446 	const void	*val = NULL;
6447 	size_t		len;
6448 	int		bserrno;
6449 
6450 	bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true);
6451 	if (bserrno != 0) {
6452 		SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name);
6453 		return bserrno;
6454 	}
6455 
6456 	bserrno = blob_set_xattr(toblob, name, val, len, true);
6457 	if (bserrno != 0) {
6458 		SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n",
6459 			    name, toblob->id);
6460 		return bserrno;
6461 	}
6462 	return 0;
6463 }
6464 
6465 static void
6466 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
6467 {
6468 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6469 	struct spdk_blob *origblob = ctx->original.blob;
6470 	struct spdk_blob *newblob = ctx->new.blob;
6471 
6472 	if (bserrno != 0) {
6473 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6474 		if (blob_is_esnap_clone(newblob)) {
6475 			bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID);
6476 			origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
6477 		}
6478 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6479 		return;
6480 	}
6481 
6482 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
6483 	bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
6484 	if (bserrno != 0) {
6485 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6486 		return;
6487 	}
6488 
6489 	bs_blob_list_add(ctx->original.blob);
6490 
6491 	spdk_blob_set_read_only(newblob);
6492 
6493 	/* sync snapshot metadata */
6494 	spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx);
6495 }
6496 
6497 static void
6498 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
6499 {
6500 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6501 	struct spdk_blob *origblob = ctx->original.blob;
6502 	struct spdk_blob *newblob = ctx->new.blob;
6503 
6504 	if (bserrno != 0) {
6505 		/* return cluster map back to original */
6506 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6507 
6508 		/* Newblob md sync failed. Valid clusters are only present in origblob.
6509 		 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred.
6510 		 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */
6511 		blob_set_thin_provision(newblob);
6512 		assert(spdk_mem_all_zero(newblob->active.clusters,
6513 					 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
6514 		assert(spdk_mem_all_zero(newblob->active.extent_pages,
6515 					 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
6516 
6517 		bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6518 		return;
6519 	}
6520 
6521 	/* Set internal xattr for snapshot id */
6522 	bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
6523 	if (bserrno != 0) {
6524 		/* return cluster map back to original */
6525 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6526 		blob_set_thin_provision(newblob);
6527 		bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6528 		return;
6529 	}
6530 
6531 	/* Create new back_bs_dev for snapshot */
6532 	origblob->back_bs_dev = bs_create_blob_bs_dev(newblob);
6533 	if (origblob->back_bs_dev == NULL) {
6534 		/* return cluster map back to original */
6535 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6536 		blob_set_thin_provision(newblob);
6537 		bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
6538 		return;
6539 	}
6540 
6541 	/* Remove the xattr that references an external snapshot */
6542 	if (blob_is_esnap_clone(origblob)) {
6543 		origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT;
6544 		bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true);
6545 		if (bserrno != 0) {
6546 			if (bserrno == -ENOENT) {
6547 				SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID
6548 					    " xattr to remove\n", origblob->id);
6549 				assert(false);
6550 			} else {
6551 				/* return cluster map back to original */
6552 				bs_snapshot_swap_cluster_maps(newblob, origblob);
6553 				blob_set_thin_provision(newblob);
6554 				bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6555 				return;
6556 			}
6557 		}
6558 	}
6559 
6560 	bs_blob_list_remove(origblob);
6561 	origblob->parent_id = newblob->id;
6562 	/* set clone blob as thin provisioned */
6563 	blob_set_thin_provision(origblob);
6564 
6565 	bs_blob_list_add(newblob);
6566 
6567 	/* sync clone metadata */
6568 	spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx);
6569 }
6570 
6571 static void
6572 bs_snapshot_freeze_cpl(void *cb_arg, int rc)
6573 {
6574 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6575 	struct spdk_blob *origblob = ctx->original.blob;
6576 	struct spdk_blob *newblob = ctx->new.blob;
6577 	int bserrno;
6578 
6579 	if (rc != 0) {
6580 		bs_clone_snapshot_newblob_cleanup(ctx, rc);
6581 		return;
6582 	}
6583 
6584 	ctx->frozen = true;
6585 
6586 	if (blob_is_esnap_clone(origblob)) {
6587 		/* Clean up any channels associated with the original blob id because future IO will
6588 		 * perform IO using the snapshot blob_id.
6589 		 */
6590 		blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL);
6591 	}
6592 	if (newblob->back_bs_dev) {
6593 		blob_back_bs_destroy(newblob);
6594 	}
6595 	/* set new back_bs_dev for snapshot */
6596 	newblob->back_bs_dev = origblob->back_bs_dev;
6597 	/* Set invalid flags from origblob */
6598 	newblob->invalid_flags = origblob->invalid_flags;
6599 
6600 	/* inherit parent from original blob if set */
6601 	newblob->parent_id = origblob->parent_id;
6602 	switch (origblob->parent_id) {
6603 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
6604 		bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID);
6605 		if (bserrno != 0) {
6606 			bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6607 			return;
6608 		}
6609 		break;
6610 	case SPDK_BLOBID_INVALID:
6611 		break;
6612 	default:
6613 		/* Set internal xattr for snapshot id */
6614 		bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT,
6615 					 &origblob->parent_id, sizeof(spdk_blob_id), true);
6616 		if (bserrno != 0) {
6617 			bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6618 			return;
6619 		}
6620 	}
6621 
6622 	/* swap cluster maps */
6623 	bs_snapshot_swap_cluster_maps(newblob, origblob);
6624 
6625 	/* Set the clear method on the new blob to match the original. */
6626 	blob_set_clear_method(newblob, origblob->clear_method);
6627 
6628 	/* sync snapshot metadata */
6629 	spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx);
6630 }
6631 
6632 static void
6633 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6634 {
6635 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6636 	struct spdk_blob *origblob = ctx->original.blob;
6637 	struct spdk_blob *newblob = _blob;
6638 
6639 	if (bserrno != 0) {
6640 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6641 		return;
6642 	}
6643 
6644 	ctx->new.blob = newblob;
6645 	assert(spdk_blob_is_thin_provisioned(newblob));
6646 	assert(spdk_mem_all_zero(newblob->active.clusters,
6647 				 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
6648 	assert(spdk_mem_all_zero(newblob->active.extent_pages,
6649 				 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
6650 
6651 	blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx);
6652 }
6653 
6654 static void
6655 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
6656 {
6657 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6658 	struct spdk_blob *origblob = ctx->original.blob;
6659 
6660 	if (bserrno != 0) {
6661 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6662 		return;
6663 	}
6664 
6665 	ctx->new.id = blobid;
6666 	ctx->cpl.u.blobid.blobid = blobid;
6667 
6668 	spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx);
6669 }
6670 
6671 
6672 static void
6673 bs_xattr_snapshot(void *arg, const char *name,
6674 		  const void **value, size_t *value_len)
6675 {
6676 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
6677 
6678 	struct spdk_blob *blob = (struct spdk_blob *)arg;
6679 	*value = &blob->id;
6680 	*value_len = sizeof(blob->id);
6681 }
6682 
6683 static void
6684 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6685 {
6686 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6687 	struct spdk_blob_opts opts;
6688 	struct spdk_blob_xattr_opts internal_xattrs;
6689 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
6690 
6691 	if (bserrno != 0) {
6692 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6693 		return;
6694 	}
6695 
6696 	ctx->original.blob = _blob;
6697 
6698 	if (_blob->data_ro || _blob->md_ro) {
6699 		SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%"
6700 			      PRIx64 "\n", _blob->id);
6701 		ctx->bserrno = -EINVAL;
6702 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6703 		return;
6704 	}
6705 
6706 	if (_blob->locked_operation_in_progress) {
6707 		SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n");
6708 		ctx->bserrno = -EBUSY;
6709 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6710 		return;
6711 	}
6712 
6713 	_blob->locked_operation_in_progress = true;
6714 
6715 	spdk_blob_opts_init(&opts, sizeof(opts));
6716 	blob_xattrs_init(&internal_xattrs);
6717 
6718 	/* Change the size of new blob to the same as in original blob,
6719 	 * but do not allocate clusters */
6720 	opts.thin_provision = true;
6721 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
6722 	opts.use_extent_table = _blob->use_extent_table;
6723 
6724 	/* If there are any xattrs specified for snapshot, set them now */
6725 	if (ctx->xattrs) {
6726 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
6727 	}
6728 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
6729 	internal_xattrs.count = 1;
6730 	internal_xattrs.ctx = _blob;
6731 	internal_xattrs.names = xattrs_names;
6732 	internal_xattrs.get_value = bs_xattr_snapshot;
6733 
6734 	bs_create_blob(_blob->bs, &opts, &internal_xattrs,
6735 		       bs_snapshot_newblob_create_cpl, ctx);
6736 }
6737 
6738 void
6739 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
6740 			const struct spdk_blob_xattr_opts *snapshot_xattrs,
6741 			spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6742 {
6743 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
6744 
6745 	if (!ctx) {
6746 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
6747 		return;
6748 	}
6749 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6750 	ctx->cpl.u.blobid.cb_fn = cb_fn;
6751 	ctx->cpl.u.blobid.cb_arg = cb_arg;
6752 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
6753 	ctx->bserrno = 0;
6754 	ctx->frozen = false;
6755 	ctx->original.id = blobid;
6756 	ctx->xattrs = snapshot_xattrs;
6757 
6758 	spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx);
6759 }
6760 /* END spdk_bs_create_snapshot */
6761 
6762 /* START spdk_bs_create_clone */
6763 
6764 static void
6765 bs_xattr_clone(void *arg, const char *name,
6766 	       const void **value, size_t *value_len)
6767 {
6768 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
6769 
6770 	struct spdk_blob *blob = (struct spdk_blob *)arg;
6771 	*value = &blob->id;
6772 	*value_len = sizeof(blob->id);
6773 }
6774 
6775 static void
6776 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6777 {
6778 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6779 	struct spdk_blob *clone = _blob;
6780 
6781 	ctx->new.blob = clone;
6782 	bs_blob_list_add(clone);
6783 
6784 	spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx);
6785 }
6786 
6787 static void
6788 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
6789 {
6790 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6791 
6792 	ctx->cpl.u.blobid.blobid = blobid;
6793 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx);
6794 }
6795 
6796 static void
6797 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6798 {
6799 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6800 	struct spdk_blob_opts		opts;
6801 	struct spdk_blob_xattr_opts internal_xattrs;
6802 	char *xattr_names[] = { BLOB_SNAPSHOT };
6803 
6804 	if (bserrno != 0) {
6805 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6806 		return;
6807 	}
6808 
6809 	ctx->original.blob = _blob;
6810 	ctx->original.md_ro = _blob->md_ro;
6811 
6812 	if (!_blob->data_ro || !_blob->md_ro) {
6813 		SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n");
6814 		ctx->bserrno = -EINVAL;
6815 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6816 		return;
6817 	}
6818 
6819 	if (_blob->locked_operation_in_progress) {
6820 		SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n");
6821 		ctx->bserrno = -EBUSY;
6822 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6823 		return;
6824 	}
6825 
6826 	_blob->locked_operation_in_progress = true;
6827 
6828 	spdk_blob_opts_init(&opts, sizeof(opts));
6829 	blob_xattrs_init(&internal_xattrs);
6830 
6831 	opts.thin_provision = true;
6832 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
6833 	opts.use_extent_table = _blob->use_extent_table;
6834 	if (ctx->xattrs) {
6835 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
6836 	}
6837 
6838 	/* Set internal xattr BLOB_SNAPSHOT */
6839 	internal_xattrs.count = 1;
6840 	internal_xattrs.ctx = _blob;
6841 	internal_xattrs.names = xattr_names;
6842 	internal_xattrs.get_value = bs_xattr_clone;
6843 
6844 	bs_create_blob(_blob->bs, &opts, &internal_xattrs,
6845 		       bs_clone_newblob_create_cpl, ctx);
6846 }
6847 
6848 void
6849 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
6850 		     const struct spdk_blob_xattr_opts *clone_xattrs,
6851 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6852 {
6853 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
6854 
6855 	if (!ctx) {
6856 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
6857 		return;
6858 	}
6859 
6860 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6861 	ctx->cpl.u.blobid.cb_fn = cb_fn;
6862 	ctx->cpl.u.blobid.cb_arg = cb_arg;
6863 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
6864 	ctx->bserrno = 0;
6865 	ctx->xattrs = clone_xattrs;
6866 	ctx->original.id = blobid;
6867 
6868 	spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx);
6869 }
6870 
6871 /* END spdk_bs_create_clone */
6872 
6873 /* START spdk_bs_inflate_blob */
6874 
6875 static void
6876 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno)
6877 {
6878 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6879 	struct spdk_blob *_blob = ctx->original.blob;
6880 
6881 	if (bserrno != 0) {
6882 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6883 		return;
6884 	}
6885 
6886 	/* Temporarily override md_ro flag for MD modification */
6887 	_blob->md_ro = false;
6888 
6889 	bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true);
6890 	if (bserrno != 0) {
6891 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6892 		return;
6893 	}
6894 
6895 	assert(_parent != NULL);
6896 
6897 	bs_blob_list_remove(_blob);
6898 	_blob->parent_id = _parent->id;
6899 
6900 	blob_back_bs_destroy(_blob);
6901 	_blob->back_bs_dev = bs_create_blob_bs_dev(_parent);
6902 	bs_blob_list_add(_blob);
6903 
6904 	spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx);
6905 }
6906 
6907 static void
6908 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx)
6909 {
6910 	struct spdk_blob *_blob = ctx->original.blob;
6911 	struct spdk_blob *_parent;
6912 
6913 	if (ctx->allocate_all) {
6914 		/* remove thin provisioning */
6915 		bs_blob_list_remove(_blob);
6916 		if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
6917 			blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true);
6918 			_blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT;
6919 		} else {
6920 			blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
6921 		}
6922 		_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
6923 		blob_back_bs_destroy(_blob);
6924 		_blob->parent_id = SPDK_BLOBID_INVALID;
6925 	} else {
6926 		/* For now, esnap clones always have allocate_all set. */
6927 		assert(!blob_is_esnap_clone(_blob));
6928 
6929 		_parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob;
6930 		if (_parent->parent_id != SPDK_BLOBID_INVALID) {
6931 			/* We must change the parent of the inflated blob */
6932 			spdk_bs_open_blob(_blob->bs, _parent->parent_id,
6933 					  bs_inflate_blob_set_parent_cpl, ctx);
6934 			return;
6935 		}
6936 
6937 		bs_blob_list_remove(_blob);
6938 		_blob->parent_id = SPDK_BLOBID_INVALID;
6939 		blob_back_bs_destroy(_blob);
6940 		_blob->back_bs_dev = bs_create_zeroes_dev();
6941 	}
6942 
6943 	/* Temporarily override md_ro flag for MD modification */
6944 	_blob->md_ro = false;
6945 	blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
6946 	_blob->state = SPDK_BLOB_STATE_DIRTY;
6947 
6948 	spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx);
6949 }
6950 
6951 /* Check if cluster needs allocation */
6952 static inline bool
6953 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all)
6954 {
6955 	struct spdk_blob_bs_dev *b;
6956 
6957 	assert(blob != NULL);
6958 
6959 	if (blob->active.clusters[cluster] != 0) {
6960 		/* Cluster is already allocated */
6961 		return false;
6962 	}
6963 
6964 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
6965 		/* Blob have no parent blob */
6966 		return allocate_all;
6967 	}
6968 
6969 	if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
6970 		return true;
6971 	}
6972 
6973 	b = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
6974 	return (allocate_all || b->blob->active.clusters[cluster] != 0);
6975 }
6976 
6977 static void
6978 bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
6979 {
6980 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6981 	struct spdk_blob *_blob = ctx->original.blob;
6982 	struct spdk_bs_cpl cpl;
6983 	spdk_bs_user_op_t *op;
6984 	uint64_t offset;
6985 
6986 	if (bserrno != 0) {
6987 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6988 		return;
6989 	}
6990 
6991 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
6992 		if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) {
6993 			break;
6994 		}
6995 	}
6996 
6997 	if (ctx->cluster < _blob->active.num_clusters) {
6998 		offset = bs_cluster_to_lba(_blob->bs, ctx->cluster);
6999 
7000 		/* We may safely increment a cluster before copying */
7001 		ctx->cluster++;
7002 
7003 		/* Use a dummy 0B read as a context for cluster copy */
7004 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7005 		cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next;
7006 		cpl.u.blob_basic.cb_arg = ctx;
7007 
7008 		op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob,
7009 				      NULL, 0, offset, 0);
7010 		if (!op) {
7011 			bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM);
7012 			return;
7013 		}
7014 
7015 		bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op);
7016 	} else {
7017 		bs_inflate_blob_done(ctx);
7018 	}
7019 }
7020 
7021 static void
7022 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
7023 {
7024 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
7025 	uint64_t clusters_needed;
7026 	uint64_t i;
7027 
7028 	if (bserrno != 0) {
7029 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
7030 		return;
7031 	}
7032 
7033 	ctx->original.blob = _blob;
7034 	ctx->original.md_ro = _blob->md_ro;
7035 
7036 	if (_blob->locked_operation_in_progress) {
7037 		SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n");
7038 		ctx->bserrno = -EBUSY;
7039 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
7040 		return;
7041 	}
7042 
7043 	_blob->locked_operation_in_progress = true;
7044 
7045 	switch (_blob->parent_id) {
7046 	case SPDK_BLOBID_INVALID:
7047 		if (!ctx->allocate_all) {
7048 			/* This blob has no parent, so we cannot decouple it. */
7049 			SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n");
7050 			bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
7051 			return;
7052 		}
7053 		break;
7054 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
7055 		/*
7056 		 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which
7057 		 * clusters require allocation. Until there is a blobstore consumer that
7058 		 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not
7059 		 * worth the effort.
7060 		 */
7061 		ctx->allocate_all = true;
7062 		break;
7063 	default:
7064 		break;
7065 	}
7066 
7067 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
7068 		/* This is not thin provisioned blob. No need to inflate. */
7069 		bs_clone_snapshot_origblob_cleanup(ctx, 0);
7070 		return;
7071 	}
7072 
7073 	/* Do two passes - one to verify that we can obtain enough clusters
7074 	 * and another to actually claim them.
7075 	 */
7076 	clusters_needed = 0;
7077 	for (i = 0; i < _blob->active.num_clusters; i++) {
7078 		if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) {
7079 			clusters_needed++;
7080 		}
7081 	}
7082 
7083 	if (clusters_needed > _blob->bs->num_free_clusters) {
7084 		/* Not enough free clusters. Cannot satisfy the request. */
7085 		bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
7086 		return;
7087 	}
7088 
7089 	ctx->cluster = 0;
7090 	bs_inflate_blob_touch_next(ctx, 0);
7091 }
7092 
7093 static void
7094 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7095 		spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg)
7096 {
7097 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
7098 
7099 	if (!ctx) {
7100 		cb_fn(cb_arg, -ENOMEM);
7101 		return;
7102 	}
7103 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7104 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
7105 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
7106 	ctx->bserrno = 0;
7107 	ctx->original.id = blobid;
7108 	ctx->channel = channel;
7109 	ctx->allocate_all = allocate_all;
7110 
7111 	spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx);
7112 }
7113 
7114 void
7115 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7116 		     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
7117 {
7118 	bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg);
7119 }
7120 
7121 void
7122 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7123 			     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
7124 {
7125 	bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg);
7126 }
7127 /* END spdk_bs_inflate_blob */
7128 
7129 /* START spdk_bs_blob_shallow_copy */
7130 
7131 struct shallow_copy_ctx {
7132 	struct spdk_bs_cpl cpl;
7133 	int bserrno;
7134 
7135 	/* Blob source for copy */
7136 	struct spdk_blob_store *bs;
7137 	spdk_blob_id blobid;
7138 	struct spdk_blob *blob;
7139 	struct spdk_io_channel *blob_channel;
7140 
7141 	/* Destination device for copy */
7142 	struct spdk_bs_dev *ext_dev;
7143 	struct spdk_io_channel *ext_channel;
7144 
7145 	/* Current cluster for copy operation */
7146 	uint64_t cluster;
7147 
7148 	/* Buffer for blob reading */
7149 	uint8_t *read_buff;
7150 
7151 	/* Struct for external device writing */
7152 	struct spdk_bs_dev_cb_args ext_args;
7153 
7154 	/* Actual number of copied clusters */
7155 	uint64_t copied_clusters_count;
7156 
7157 	/* Status callback for updates about the ongoing operation */
7158 	spdk_blob_shallow_copy_status status_cb;
7159 
7160 	/* Argument passed to function status_cb */
7161 	void *status_cb_arg;
7162 };
7163 
7164 static void
7165 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno)
7166 {
7167 	struct shallow_copy_ctx *ctx = cb_arg;
7168 	struct spdk_bs_cpl *cpl = &ctx->cpl;
7169 
7170 	if (bserrno != 0) {
7171 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno);
7172 		ctx->bserrno = bserrno;
7173 	}
7174 
7175 	ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel);
7176 	spdk_free(ctx->read_buff);
7177 
7178 	cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
7179 
7180 	free(ctx);
7181 }
7182 
7183 static void
7184 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
7185 {
7186 	struct shallow_copy_ctx *ctx = cb_arg;
7187 	struct spdk_blob *_blob = ctx->blob;
7188 
7189 	if (bserrno != 0) {
7190 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno);
7191 		ctx->bserrno = bserrno;
7192 		_blob->locked_operation_in_progress = false;
7193 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7194 		return;
7195 	}
7196 
7197 	ctx->cluster++;
7198 	if (ctx->status_cb) {
7199 		ctx->copied_clusters_count++;
7200 		ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg);
7201 	}
7202 
7203 	bs_shallow_copy_cluster_find_next(ctx);
7204 }
7205 
7206 static void
7207 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno)
7208 {
7209 	struct shallow_copy_ctx *ctx = cb_arg;
7210 	struct spdk_bs_dev *ext_dev = ctx->ext_dev;
7211 	struct spdk_blob *_blob = ctx->blob;
7212 
7213 	if (bserrno != 0) {
7214 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno);
7215 		ctx->bserrno = bserrno;
7216 		_blob->locked_operation_in_progress = false;
7217 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7218 		return;
7219 	}
7220 
7221 	ctx->ext_args.channel = ctx->ext_channel;
7222 	ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl;
7223 	ctx->ext_args.cb_arg = ctx;
7224 
7225 	ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff,
7226 		       bs_cluster_to_lba(_blob->bs, ctx->cluster),
7227 		       bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz),
7228 		       &ctx->ext_args);
7229 }
7230 
7231 static void
7232 bs_shallow_copy_cluster_find_next(void *cb_arg)
7233 {
7234 	struct shallow_copy_ctx *ctx = cb_arg;
7235 	struct spdk_blob *_blob = ctx->blob;
7236 
7237 	while (ctx->cluster < _blob->active.num_clusters) {
7238 		if (_blob->active.clusters[ctx->cluster] != 0) {
7239 			break;
7240 		}
7241 
7242 		ctx->cluster++;
7243 	}
7244 
7245 	if (ctx->cluster < _blob->active.num_clusters) {
7246 		blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff,
7247 					      bs_cluster_to_lba(_blob->bs, ctx->cluster),
7248 					      bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz),
7249 					      bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ);
7250 	} else {
7251 		_blob->locked_operation_in_progress = false;
7252 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7253 	}
7254 }
7255 
7256 static void
7257 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
7258 {
7259 	struct shallow_copy_ctx *ctx = cb_arg;
7260 	struct spdk_bs_dev *ext_dev = ctx->ext_dev;
7261 	uint32_t blob_block_size;
7262 	uint64_t blob_total_size;
7263 
7264 	if (bserrno != 0) {
7265 		SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno);
7266 		ctx->bserrno = bserrno;
7267 		bs_shallow_copy_cleanup_finish(ctx, 0);
7268 		return;
7269 	}
7270 
7271 	if (!spdk_blob_is_read_only(_blob)) {
7272 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id);
7273 		ctx->bserrno = -EPERM;
7274 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7275 		return;
7276 	}
7277 
7278 	blob_block_size = _blob->bs->dev->blocklen;
7279 	blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs);
7280 
7281 	if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) {
7282 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n",
7283 			    _blob->id);
7284 		ctx->bserrno = -EINVAL;
7285 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7286 		return;
7287 	}
7288 
7289 	if (blob_block_size % ext_dev->blocklen != 0) {
7290 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \
7291 blobstore block size\n", _blob->id);
7292 		ctx->bserrno = -EINVAL;
7293 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7294 		return;
7295 	}
7296 
7297 	ctx->blob = _blob;
7298 
7299 	if (_blob->locked_operation_in_progress) {
7300 		SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id);
7301 		ctx->bserrno = -EBUSY;
7302 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7303 		return;
7304 	}
7305 
7306 	_blob->locked_operation_in_progress = true;
7307 
7308 	ctx->cluster = 0;
7309 	bs_shallow_copy_cluster_find_next(ctx);
7310 }
7311 
7312 int
7313 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7314 			  spdk_blob_id blobid, struct spdk_bs_dev *ext_dev,
7315 			  spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg,
7316 			  spdk_blob_op_complete cb_fn, void *cb_arg)
7317 {
7318 	struct shallow_copy_ctx *ctx;
7319 	struct spdk_io_channel *ext_channel;
7320 
7321 	ctx = calloc(1, sizeof(*ctx));
7322 	if (!ctx) {
7323 		return -ENOMEM;
7324 	}
7325 
7326 	ctx->bs = bs;
7327 	ctx->blobid = blobid;
7328 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7329 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
7330 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
7331 	ctx->bserrno = 0;
7332 	ctx->blob_channel = channel;
7333 	ctx->status_cb = status_cb_fn;
7334 	ctx->status_cb_arg = status_cb_arg;
7335 	ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL,
7336 				     SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
7337 	if (!ctx->read_buff) {
7338 		free(ctx);
7339 		return -ENOMEM;
7340 	}
7341 
7342 	ext_channel = ext_dev->create_channel(ext_dev);
7343 	if (!ext_channel) {
7344 		spdk_free(ctx->read_buff);
7345 		free(ctx);
7346 		return -ENOMEM;
7347 	}
7348 	ctx->ext_dev = ext_dev;
7349 	ctx->ext_channel = ext_channel;
7350 
7351 	spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx);
7352 
7353 	return 0;
7354 }
7355 /* END spdk_bs_blob_shallow_copy */
7356 
7357 /* START spdk_blob_resize */
7358 struct spdk_bs_resize_ctx {
7359 	spdk_blob_op_complete cb_fn;
7360 	void *cb_arg;
7361 	struct spdk_blob *blob;
7362 	uint64_t sz;
7363 	int rc;
7364 };
7365 
7366 static void
7367 bs_resize_unfreeze_cpl(void *cb_arg, int rc)
7368 {
7369 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
7370 
7371 	if (rc != 0) {
7372 		SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc);
7373 	}
7374 
7375 	if (ctx->rc != 0) {
7376 		SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc);
7377 		rc = ctx->rc;
7378 	}
7379 
7380 	ctx->blob->locked_operation_in_progress = false;
7381 
7382 	ctx->cb_fn(ctx->cb_arg, rc);
7383 	free(ctx);
7384 }
7385 
7386 static void
7387 bs_resize_freeze_cpl(void *cb_arg, int rc)
7388 {
7389 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
7390 
7391 	if (rc != 0) {
7392 		ctx->blob->locked_operation_in_progress = false;
7393 		ctx->cb_fn(ctx->cb_arg, rc);
7394 		free(ctx);
7395 		return;
7396 	}
7397 
7398 	ctx->rc = blob_resize(ctx->blob, ctx->sz);
7399 
7400 	blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx);
7401 }
7402 
7403 void
7404 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
7405 {
7406 	struct spdk_bs_resize_ctx *ctx;
7407 
7408 	blob_verify_md_op(blob);
7409 
7410 	SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz);
7411 
7412 	if (blob->md_ro) {
7413 		cb_fn(cb_arg, -EPERM);
7414 		return;
7415 	}
7416 
7417 	if (sz == blob->active.num_clusters) {
7418 		cb_fn(cb_arg, 0);
7419 		return;
7420 	}
7421 
7422 	if (blob->locked_operation_in_progress) {
7423 		cb_fn(cb_arg, -EBUSY);
7424 		return;
7425 	}
7426 
7427 	ctx = calloc(1, sizeof(*ctx));
7428 	if (!ctx) {
7429 		cb_fn(cb_arg, -ENOMEM);
7430 		return;
7431 	}
7432 
7433 	blob->locked_operation_in_progress = true;
7434 	ctx->cb_fn = cb_fn;
7435 	ctx->cb_arg = cb_arg;
7436 	ctx->blob = blob;
7437 	ctx->sz = sz;
7438 	blob_freeze_io(blob, bs_resize_freeze_cpl, ctx);
7439 }
7440 
7441 /* END spdk_blob_resize */
7442 
7443 
7444 /* START spdk_bs_delete_blob */
7445 
7446 static void
7447 bs_delete_close_cpl(void *cb_arg, int bserrno)
7448 {
7449 	spdk_bs_sequence_t *seq = cb_arg;
7450 
7451 	bs_sequence_finish(seq, bserrno);
7452 }
7453 
7454 static void
7455 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
7456 {
7457 	struct spdk_blob *blob = cb_arg;
7458 
7459 	if (bserrno != 0) {
7460 		/*
7461 		 * We already removed this blob from the blobstore tailq, so
7462 		 *  we need to free it here since this is the last reference
7463 		 *  to it.
7464 		 */
7465 		blob_free(blob);
7466 		bs_delete_close_cpl(seq, bserrno);
7467 		return;
7468 	}
7469 
7470 	/*
7471 	 * This will immediately decrement the ref_count and call
7472 	 *  the completion routine since the metadata state is clean.
7473 	 *  By calling spdk_blob_close, we reduce the number of call
7474 	 *  points into code that touches the blob->open_ref count
7475 	 *  and the blobstore's blob list.
7476 	 */
7477 	spdk_blob_close(blob, bs_delete_close_cpl, seq);
7478 }
7479 
7480 struct delete_snapshot_ctx {
7481 	struct spdk_blob_list *parent_snapshot_entry;
7482 	struct spdk_blob *snapshot;
7483 	struct spdk_blob_md_page *page;
7484 	bool snapshot_md_ro;
7485 	struct spdk_blob *clone;
7486 	bool clone_md_ro;
7487 	spdk_blob_op_with_handle_complete cb_fn;
7488 	void *cb_arg;
7489 	int bserrno;
7490 	uint32_t next_extent_page;
7491 };
7492 
7493 static void
7494 delete_blob_cleanup_finish(void *cb_arg, int bserrno)
7495 {
7496 	struct delete_snapshot_ctx *ctx = cb_arg;
7497 
7498 	if (bserrno != 0) {
7499 		SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno);
7500 	}
7501 
7502 	assert(ctx != NULL);
7503 
7504 	if (bserrno != 0 && ctx->bserrno == 0) {
7505 		ctx->bserrno = bserrno;
7506 	}
7507 
7508 	ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno);
7509 	spdk_free(ctx->page);
7510 	free(ctx);
7511 }
7512 
7513 static void
7514 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno)
7515 {
7516 	struct delete_snapshot_ctx *ctx = cb_arg;
7517 
7518 	if (bserrno != 0) {
7519 		ctx->bserrno = bserrno;
7520 		SPDK_ERRLOG("Clone cleanup error %d\n", bserrno);
7521 	}
7522 
7523 	if (ctx->bserrno != 0) {
7524 		assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL);
7525 		RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot);
7526 		spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id);
7527 	}
7528 
7529 	ctx->snapshot->locked_operation_in_progress = false;
7530 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
7531 
7532 	spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx);
7533 }
7534 
7535 static void
7536 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno)
7537 {
7538 	struct delete_snapshot_ctx *ctx = cb_arg;
7539 
7540 	ctx->clone->locked_operation_in_progress = false;
7541 	ctx->clone->md_ro = ctx->clone_md_ro;
7542 
7543 	spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx);
7544 }
7545 
7546 static void
7547 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
7548 {
7549 	struct delete_snapshot_ctx *ctx = cb_arg;
7550 
7551 	if (bserrno) {
7552 		ctx->bserrno = bserrno;
7553 		delete_snapshot_cleanup_clone(ctx, 0);
7554 		return;
7555 	}
7556 
7557 	ctx->clone->locked_operation_in_progress = false;
7558 	spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx);
7559 }
7560 
7561 static void
7562 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno)
7563 {
7564 	struct delete_snapshot_ctx *ctx = cb_arg;
7565 	struct spdk_blob_list *parent_snapshot_entry = NULL;
7566 	struct spdk_blob_list *snapshot_entry = NULL;
7567 	struct spdk_blob_list *clone_entry = NULL;
7568 	struct spdk_blob_list *snapshot_clone_entry = NULL;
7569 
7570 	if (bserrno) {
7571 		SPDK_ERRLOG("Failed to sync MD on blob\n");
7572 		ctx->bserrno = bserrno;
7573 		delete_snapshot_cleanup_clone(ctx, 0);
7574 		return;
7575 	}
7576 
7577 	/* Get snapshot entry for the snapshot we want to remove */
7578 	snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id);
7579 
7580 	assert(snapshot_entry != NULL);
7581 
7582 	/* Remove clone entry in this snapshot (at this point there can be only one clone) */
7583 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7584 	assert(clone_entry != NULL);
7585 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
7586 	snapshot_entry->clone_count--;
7587 	assert(TAILQ_EMPTY(&snapshot_entry->clones));
7588 
7589 	switch (ctx->snapshot->parent_id) {
7590 	case SPDK_BLOBID_INVALID:
7591 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
7592 		/* No parent snapshot - just remove clone entry */
7593 		free(clone_entry);
7594 		break;
7595 	default:
7596 		/* This snapshot is at the same time a clone of another snapshot - we need to
7597 		 * update parent snapshot (remove current clone, add new one inherited from
7598 		 * the snapshot that is being removed) */
7599 
7600 		/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
7601 		 * snapshot that we are removing */
7602 		blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry,
7603 						    &snapshot_clone_entry);
7604 
7605 		/* Switch clone entry in parent snapshot */
7606 		TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link);
7607 		TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link);
7608 		free(snapshot_clone_entry);
7609 	}
7610 
7611 	/* Restore md_ro flags */
7612 	ctx->clone->md_ro = ctx->clone_md_ro;
7613 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
7614 
7615 	blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx);
7616 }
7617 
7618 static void
7619 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
7620 {
7621 	struct delete_snapshot_ctx *ctx = cb_arg;
7622 	uint64_t i;
7623 
7624 	ctx->snapshot->md_ro = false;
7625 
7626 	if (bserrno) {
7627 		SPDK_ERRLOG("Failed to sync MD on clone\n");
7628 		ctx->bserrno = bserrno;
7629 
7630 		/* Restore snapshot to previous state */
7631 		bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
7632 		if (bserrno != 0) {
7633 			delete_snapshot_cleanup_clone(ctx, bserrno);
7634 			return;
7635 		}
7636 
7637 		spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx);
7638 		return;
7639 	}
7640 
7641 	/* Clear cluster map entries for snapshot */
7642 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
7643 		if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
7644 			if (ctx->snapshot->active.clusters[i] != 0) {
7645 				ctx->snapshot->active.num_allocated_clusters--;
7646 			}
7647 			ctx->snapshot->active.clusters[i] = 0;
7648 		}
7649 	}
7650 	for (i = 0; i < ctx->snapshot->active.num_extent_pages &&
7651 	     i < ctx->clone->active.num_extent_pages; i++) {
7652 		if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) {
7653 			ctx->snapshot->active.extent_pages[i] = 0;
7654 		}
7655 	}
7656 
7657 	blob_set_thin_provision(ctx->snapshot);
7658 	ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY;
7659 
7660 	if (ctx->parent_snapshot_entry != NULL) {
7661 		ctx->snapshot->back_bs_dev = NULL;
7662 	}
7663 
7664 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx);
7665 }
7666 
7667 static void
7668 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx)
7669 {
7670 	int bserrno;
7671 
7672 	/* Delete old backing bs_dev from clone (related to snapshot that will be removed) */
7673 	blob_back_bs_destroy(ctx->clone);
7674 
7675 	/* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */
7676 	if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
7677 		bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot,
7678 						 BLOB_EXTERNAL_SNAPSHOT_ID);
7679 		if (bserrno != 0) {
7680 			ctx->bserrno = bserrno;
7681 
7682 			/* Restore snapshot to previous state */
7683 			bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
7684 			if (bserrno != 0) {
7685 				delete_snapshot_cleanup_clone(ctx, bserrno);
7686 				return;
7687 			}
7688 
7689 			spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx);
7690 			return;
7691 		}
7692 		ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT;
7693 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
7694 		/* Do not delete the external snapshot along with this snapshot */
7695 		ctx->snapshot->back_bs_dev = NULL;
7696 		ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
7697 	} else if (ctx->parent_snapshot_entry != NULL) {
7698 		/* ...to parent snapshot */
7699 		ctx->clone->parent_id = ctx->parent_snapshot_entry->id;
7700 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
7701 		blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id,
7702 			       sizeof(spdk_blob_id),
7703 			       true);
7704 	} else {
7705 		/* ...to blobid invalid and zeroes dev */
7706 		ctx->clone->parent_id = SPDK_BLOBID_INVALID;
7707 		ctx->clone->back_bs_dev = bs_create_zeroes_dev();
7708 		blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true);
7709 	}
7710 
7711 	spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx);
7712 }
7713 
7714 static void
7715 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno)
7716 {
7717 	struct delete_snapshot_ctx *ctx = cb_arg;
7718 	uint32_t *extent_page;
7719 	uint64_t i;
7720 
7721 	for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages &&
7722 	     i < ctx->clone->active.num_extent_pages; i++) {
7723 		if (ctx->snapshot->active.extent_pages[i] == 0) {
7724 			/* No extent page to use from snapshot */
7725 			continue;
7726 		}
7727 
7728 		extent_page = &ctx->clone->active.extent_pages[i];
7729 		if (*extent_page == 0) {
7730 			/* Copy extent page from snapshot when clone did not have a matching one */
7731 			*extent_page = ctx->snapshot->active.extent_pages[i];
7732 			continue;
7733 		}
7734 
7735 		/* Clone and snapshot both contain partially filled matching extent pages.
7736 		 * Update the clone extent page in place with cluster map containing the mix of both. */
7737 		ctx->next_extent_page = i + 1;
7738 		memset(ctx->page, 0, SPDK_BS_PAGE_SIZE);
7739 
7740 		blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page,
7741 				       delete_snapshot_update_extent_pages, ctx);
7742 		return;
7743 	}
7744 	delete_snapshot_update_extent_pages_cpl(ctx);
7745 }
7746 
7747 static void
7748 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
7749 {
7750 	struct delete_snapshot_ctx *ctx = cb_arg;
7751 	uint64_t i;
7752 
7753 	/* Temporarily override md_ro flag for clone for MD modification */
7754 	ctx->clone_md_ro = ctx->clone->md_ro;
7755 	ctx->clone->md_ro = false;
7756 
7757 	if (bserrno) {
7758 		SPDK_ERRLOG("Failed to sync MD with xattr on blob\n");
7759 		ctx->bserrno = bserrno;
7760 		delete_snapshot_cleanup_clone(ctx, 0);
7761 		return;
7762 	}
7763 
7764 	/* Copy snapshot map to clone map (only unallocated clusters in clone) */
7765 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
7766 		if (ctx->clone->active.clusters[i] == 0) {
7767 			ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
7768 			if (ctx->clone->active.clusters[i] != 0) {
7769 				ctx->clone->active.num_allocated_clusters++;
7770 			}
7771 		}
7772 	}
7773 	ctx->next_extent_page = 0;
7774 	delete_snapshot_update_extent_pages(ctx, 0);
7775 }
7776 
7777 static void
7778 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno)
7779 {
7780 	struct delete_snapshot_ctx *ctx = cb_arg;
7781 
7782 	if (bserrno != 0) {
7783 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n",
7784 			    blob->id, bserrno);
7785 		/* That error should not stop us from syncing metadata. */
7786 	}
7787 
7788 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx);
7789 }
7790 
7791 static void
7792 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno)
7793 {
7794 	struct delete_snapshot_ctx *ctx = cb_arg;
7795 
7796 	if (bserrno) {
7797 		SPDK_ERRLOG("Failed to freeze I/O on clone\n");
7798 		ctx->bserrno = bserrno;
7799 		delete_snapshot_cleanup_clone(ctx, 0);
7800 		return;
7801 	}
7802 
7803 	/* Temporarily override md_ro flag for snapshot for MD modification */
7804 	ctx->snapshot_md_ro = ctx->snapshot->md_ro;
7805 	ctx->snapshot->md_ro = false;
7806 
7807 	/* Mark blob as pending for removal for power failure safety, use clone id for recovery */
7808 	ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id,
7809 				      sizeof(spdk_blob_id), true);
7810 	if (ctx->bserrno != 0) {
7811 		delete_snapshot_cleanup_clone(ctx, 0);
7812 		return;
7813 	}
7814 
7815 	if (blob_is_esnap_clone(ctx->snapshot)) {
7816 		blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false,
7817 						   delete_snapshot_esnap_channels_destroyed_cb,
7818 						   ctx);
7819 		return;
7820 	}
7821 
7822 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx);
7823 }
7824 
7825 static void
7826 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno)
7827 {
7828 	struct delete_snapshot_ctx *ctx = cb_arg;
7829 
7830 	if (bserrno) {
7831 		SPDK_ERRLOG("Failed to open clone\n");
7832 		ctx->bserrno = bserrno;
7833 		delete_snapshot_cleanup_snapshot(ctx, 0);
7834 		return;
7835 	}
7836 
7837 	ctx->clone = clone;
7838 
7839 	if (clone->locked_operation_in_progress) {
7840 		SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n");
7841 		ctx->bserrno = -EBUSY;
7842 		spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx);
7843 		return;
7844 	}
7845 
7846 	clone->locked_operation_in_progress = true;
7847 
7848 	blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx);
7849 }
7850 
7851 static void
7852 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx)
7853 {
7854 	struct spdk_blob_list *snapshot_entry = NULL;
7855 	struct spdk_blob_list *clone_entry = NULL;
7856 	struct spdk_blob_list *snapshot_clone_entry = NULL;
7857 
7858 	/* Get snapshot entry for the snapshot we want to remove */
7859 	snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id);
7860 
7861 	assert(snapshot_entry != NULL);
7862 
7863 	/* Get clone of the snapshot (at this point there can be only one clone) */
7864 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7865 	assert(snapshot_entry->clone_count == 1);
7866 	assert(clone_entry != NULL);
7867 
7868 	/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
7869 	 * snapshot that we are removing */
7870 	blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry,
7871 					    &snapshot_clone_entry);
7872 
7873 	spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx);
7874 }
7875 
7876 static void
7877 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
7878 {
7879 	spdk_bs_sequence_t *seq = cb_arg;
7880 	struct spdk_blob_list *snapshot_entry = NULL;
7881 	uint32_t page_num;
7882 
7883 	if (bserrno) {
7884 		SPDK_ERRLOG("Failed to remove blob\n");
7885 		bs_sequence_finish(seq, bserrno);
7886 		return;
7887 	}
7888 
7889 	/* Remove snapshot from the list */
7890 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
7891 	if (snapshot_entry != NULL) {
7892 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
7893 		free(snapshot_entry);
7894 	}
7895 
7896 	page_num = bs_blobid_to_page(blob->id);
7897 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
7898 	blob->state = SPDK_BLOB_STATE_DIRTY;
7899 	blob->active.num_pages = 0;
7900 	blob_resize(blob, 0);
7901 
7902 	blob_persist(seq, blob, bs_delete_persist_cpl, blob);
7903 }
7904 
7905 static int
7906 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone)
7907 {
7908 	struct spdk_blob_list *snapshot_entry = NULL;
7909 	struct spdk_blob_list *clone_entry = NULL;
7910 	struct spdk_blob *clone = NULL;
7911 	bool has_one_clone = false;
7912 
7913 	/* Check if this is a snapshot with clones */
7914 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
7915 	if (snapshot_entry != NULL) {
7916 		if (snapshot_entry->clone_count > 1) {
7917 			SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n");
7918 			return -EBUSY;
7919 		} else if (snapshot_entry->clone_count == 1) {
7920 			has_one_clone = true;
7921 		}
7922 	}
7923 
7924 	/* Check if someone has this blob open (besides this delete context):
7925 	 * - open_ref = 1 - only this context opened blob, so it is ok to remove it
7926 	 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot
7927 	 *	and that is ok, because we will update it accordingly */
7928 	if (blob->open_ref <= 2 && has_one_clone) {
7929 		clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7930 		assert(clone_entry != NULL);
7931 		clone = blob_lookup(blob->bs, clone_entry->id);
7932 
7933 		if (blob->open_ref == 2 && clone == NULL) {
7934 			/* Clone is closed and someone else opened this blob */
7935 			SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
7936 			return -EBUSY;
7937 		}
7938 
7939 		*update_clone = true;
7940 		return 0;
7941 	}
7942 
7943 	if (blob->open_ref > 1) {
7944 		SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
7945 		return -EBUSY;
7946 	}
7947 
7948 	assert(has_one_clone == false);
7949 	*update_clone = false;
7950 	return 0;
7951 }
7952 
7953 static void
7954 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno)
7955 {
7956 	spdk_bs_sequence_t *seq = cb_arg;
7957 
7958 	bs_sequence_finish(seq, -ENOMEM);
7959 }
7960 
7961 static void
7962 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
7963 {
7964 	spdk_bs_sequence_t *seq = cb_arg;
7965 	struct delete_snapshot_ctx *ctx;
7966 	bool update_clone = false;
7967 
7968 	if (bserrno != 0) {
7969 		bs_sequence_finish(seq, bserrno);
7970 		return;
7971 	}
7972 
7973 	blob_verify_md_op(blob);
7974 
7975 	ctx = calloc(1, sizeof(*ctx));
7976 	if (ctx == NULL) {
7977 		spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq);
7978 		return;
7979 	}
7980 
7981 	ctx->snapshot = blob;
7982 	ctx->cb_fn = bs_delete_blob_finish;
7983 	ctx->cb_arg = seq;
7984 
7985 	/* Check if blob can be removed and if it is a snapshot with clone on top of it */
7986 	ctx->bserrno = bs_is_blob_deletable(blob, &update_clone);
7987 	if (ctx->bserrno) {
7988 		spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
7989 		return;
7990 	}
7991 
7992 	if (blob->locked_operation_in_progress) {
7993 		SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n");
7994 		ctx->bserrno = -EBUSY;
7995 		spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
7996 		return;
7997 	}
7998 
7999 	blob->locked_operation_in_progress = true;
8000 
8001 	/*
8002 	 * Remove the blob from the blob_store list now, to ensure it does not
8003 	 *  get returned after this point by blob_lookup().
8004 	 */
8005 	spdk_bit_array_clear(blob->bs->open_blobids, blob->id);
8006 	RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob);
8007 
8008 	if (update_clone) {
8009 		ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
8010 		if (!ctx->page) {
8011 			ctx->bserrno = -ENOMEM;
8012 			spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
8013 			return;
8014 		}
8015 		/* This blob is a snapshot with active clone - update clone first */
8016 		update_clone_on_snapshot_deletion(blob, ctx);
8017 	} else {
8018 		/* This blob does not have any clones - just remove it */
8019 		bs_blob_list_remove(blob);
8020 		bs_delete_blob_finish(seq, blob, 0);
8021 		free(ctx);
8022 	}
8023 }
8024 
8025 void
8026 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
8027 		    spdk_blob_op_complete cb_fn, void *cb_arg)
8028 {
8029 	struct spdk_bs_cpl	cpl;
8030 	spdk_bs_sequence_t	*seq;
8031 
8032 	SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid);
8033 
8034 	assert(spdk_get_thread() == bs->md_thread);
8035 
8036 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8037 	cpl.u.blob_basic.cb_fn = cb_fn;
8038 	cpl.u.blob_basic.cb_arg = cb_arg;
8039 
8040 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
8041 	if (!seq) {
8042 		cb_fn(cb_arg, -ENOMEM);
8043 		return;
8044 	}
8045 
8046 	spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq);
8047 }
8048 
8049 /* END spdk_bs_delete_blob */
8050 
8051 /* START spdk_bs_open_blob */
8052 
8053 static void
8054 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8055 {
8056 	struct spdk_blob *blob = cb_arg;
8057 	struct spdk_blob *existing;
8058 
8059 	if (bserrno != 0) {
8060 		blob_free(blob);
8061 		seq->cpl.u.blob_handle.blob = NULL;
8062 		bs_sequence_finish(seq, bserrno);
8063 		return;
8064 	}
8065 
8066 	existing = blob_lookup(blob->bs, blob->id);
8067 	if (existing) {
8068 		blob_free(blob);
8069 		existing->open_ref++;
8070 		seq->cpl.u.blob_handle.blob = existing;
8071 		bs_sequence_finish(seq, 0);
8072 		return;
8073 	}
8074 
8075 	blob->open_ref++;
8076 
8077 	spdk_bit_array_set(blob->bs->open_blobids, blob->id);
8078 	RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob);
8079 
8080 	bs_sequence_finish(seq, bserrno);
8081 }
8082 
8083 static inline void
8084 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst)
8085 {
8086 #define FIELD_OK(field) \
8087         offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size
8088 
8089 #define SET_FIELD(field) \
8090         if (FIELD_OK(field)) { \
8091                 dst->field = src->field; \
8092         } \
8093 
8094 	SET_FIELD(clear_method);
8095 	SET_FIELD(esnap_ctx);
8096 
8097 	dst->opts_size = src->opts_size;
8098 
8099 	/* You should not remove this statement, but need to update the assert statement
8100 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
8101 	SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size");
8102 
8103 #undef FIELD_OK
8104 #undef SET_FIELD
8105 }
8106 
8107 static void
8108 bs_open_blob(struct spdk_blob_store *bs,
8109 	     spdk_blob_id blobid,
8110 	     struct spdk_blob_open_opts *opts,
8111 	     spdk_blob_op_with_handle_complete cb_fn,
8112 	     void *cb_arg)
8113 {
8114 	struct spdk_blob		*blob;
8115 	struct spdk_bs_cpl		cpl;
8116 	struct spdk_blob_open_opts	opts_local;
8117 	spdk_bs_sequence_t		*seq;
8118 	uint32_t			page_num;
8119 
8120 	SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid);
8121 	assert(spdk_get_thread() == bs->md_thread);
8122 
8123 	page_num = bs_blobid_to_page(blobid);
8124 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
8125 		/* Invalid blobid */
8126 		cb_fn(cb_arg, NULL, -ENOENT);
8127 		return;
8128 	}
8129 
8130 	blob = blob_lookup(bs, blobid);
8131 	if (blob) {
8132 		blob->open_ref++;
8133 		cb_fn(cb_arg, blob, 0);
8134 		return;
8135 	}
8136 
8137 	blob = blob_alloc(bs, blobid);
8138 	if (!blob) {
8139 		cb_fn(cb_arg, NULL, -ENOMEM);
8140 		return;
8141 	}
8142 
8143 	spdk_blob_open_opts_init(&opts_local, sizeof(opts_local));
8144 	if (opts) {
8145 		blob_open_opts_copy(opts, &opts_local);
8146 	}
8147 
8148 	blob->clear_method = opts_local.clear_method;
8149 
8150 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
8151 	cpl.u.blob_handle.cb_fn = cb_fn;
8152 	cpl.u.blob_handle.cb_arg = cb_arg;
8153 	cpl.u.blob_handle.blob = blob;
8154 	cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx;
8155 
8156 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
8157 	if (!seq) {
8158 		blob_free(blob);
8159 		cb_fn(cb_arg, NULL, -ENOMEM);
8160 		return;
8161 	}
8162 
8163 	blob_load(seq, blob, bs_open_blob_cpl, blob);
8164 }
8165 
8166 void
8167 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
8168 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8169 {
8170 	bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg);
8171 }
8172 
8173 void
8174 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
8175 		      struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8176 {
8177 	bs_open_blob(bs, blobid, opts, cb_fn, cb_arg);
8178 }
8179 
8180 /* END spdk_bs_open_blob */
8181 
8182 /* START spdk_blob_set_read_only */
8183 int
8184 spdk_blob_set_read_only(struct spdk_blob *blob)
8185 {
8186 	blob_verify_md_op(blob);
8187 
8188 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
8189 
8190 	blob->state = SPDK_BLOB_STATE_DIRTY;
8191 	return 0;
8192 }
8193 /* END spdk_blob_set_read_only */
8194 
8195 /* START spdk_blob_sync_md */
8196 
8197 static void
8198 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8199 {
8200 	struct spdk_blob *blob = cb_arg;
8201 
8202 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
8203 		blob->data_ro = true;
8204 		blob->md_ro = true;
8205 	}
8206 
8207 	bs_sequence_finish(seq, bserrno);
8208 }
8209 
8210 static void
8211 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
8212 {
8213 	struct spdk_bs_cpl	cpl;
8214 	spdk_bs_sequence_t	*seq;
8215 
8216 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8217 	cpl.u.blob_basic.cb_fn = cb_fn;
8218 	cpl.u.blob_basic.cb_arg = cb_arg;
8219 
8220 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8221 	if (!seq) {
8222 		cb_fn(cb_arg, -ENOMEM);
8223 		return;
8224 	}
8225 
8226 	blob_persist(seq, blob, blob_sync_md_cpl, blob);
8227 }
8228 
8229 void
8230 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
8231 {
8232 	blob_verify_md_op(blob);
8233 
8234 	SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id);
8235 
8236 	if (blob->md_ro) {
8237 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
8238 		cb_fn(cb_arg, 0);
8239 		return;
8240 	}
8241 
8242 	blob_sync_md(blob, cb_fn, cb_arg);
8243 }
8244 
8245 /* END spdk_blob_sync_md */
8246 
8247 struct spdk_blob_cluster_op_ctx {
8248 	struct spdk_thread	*thread;
8249 	struct spdk_blob	*blob;
8250 	uint32_t		cluster_num;	/* cluster index in blob */
8251 	uint32_t		cluster;	/* cluster on disk */
8252 	uint32_t		extent_page;	/* extent page on disk */
8253 	struct spdk_blob_md_page *page; /* preallocated extent page */
8254 	int			rc;
8255 	spdk_blob_op_complete	cb_fn;
8256 	void			*cb_arg;
8257 };
8258 
8259 static void
8260 blob_op_cluster_msg_cpl(void *arg)
8261 {
8262 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8263 
8264 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
8265 	free(ctx);
8266 }
8267 
8268 static void
8269 blob_op_cluster_msg_cb(void *arg, int bserrno)
8270 {
8271 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8272 
8273 	ctx->rc = bserrno;
8274 	spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8275 }
8276 
8277 static void
8278 blob_insert_new_ep_cb(void *arg, int bserrno)
8279 {
8280 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8281 	uint32_t *extent_page;
8282 
8283 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8284 	*extent_page = ctx->extent_page;
8285 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8286 	blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8287 }
8288 
8289 struct spdk_blob_write_extent_page_ctx {
8290 	struct spdk_blob_store		*bs;
8291 
8292 	uint32_t			extent;
8293 	struct spdk_blob_md_page	*page;
8294 };
8295 
8296 static void
8297 blob_free_cluster_msg_cb(void *arg, int bserrno)
8298 {
8299 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8300 
8301 	spdk_spin_lock(&ctx->blob->bs->used_lock);
8302 	bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster));
8303 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
8304 
8305 	ctx->rc = bserrno;
8306 	spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8307 }
8308 
8309 static void
8310 blob_free_cluster_update_ep_cb(void *arg, int bserrno)
8311 {
8312 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8313 
8314 	if (bserrno != 0 || ctx->blob->bs->clean == 0) {
8315 		blob_free_cluster_msg_cb(ctx, bserrno);
8316 		return;
8317 	}
8318 
8319 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8320 	blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx);
8321 }
8322 
8323 static void
8324 blob_free_cluster_free_ep_cb(void *arg, int bserrno)
8325 {
8326 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8327 
8328 	spdk_spin_lock(&ctx->blob->bs->used_lock);
8329 	assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8330 	bs_release_md_page(ctx->blob->bs, ctx->extent_page);
8331 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
8332 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8333 	blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx);
8334 }
8335 
8336 static void
8337 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8338 {
8339 	struct spdk_blob_write_extent_page_ctx *ctx = cb_arg;
8340 
8341 	free(ctx);
8342 	bs_sequence_finish(seq, bserrno);
8343 }
8344 
8345 static void
8346 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8347 {
8348 	struct spdk_blob_write_extent_page_ctx *ctx = cb_arg;
8349 
8350 	if (bserrno != 0) {
8351 		blob_persist_extent_page_cpl(seq, ctx, bserrno);
8352 		return;
8353 	}
8354 	bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent),
8355 			      bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
8356 			      blob_persist_extent_page_cpl, ctx);
8357 }
8358 
8359 static void
8360 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
8361 		       struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg)
8362 {
8363 	struct spdk_blob_write_extent_page_ctx	*ctx;
8364 	spdk_bs_sequence_t			*seq;
8365 	struct spdk_bs_cpl			cpl;
8366 
8367 	ctx = calloc(1, sizeof(*ctx));
8368 	if (!ctx) {
8369 		cb_fn(cb_arg, -ENOMEM);
8370 		return;
8371 	}
8372 	ctx->bs = blob->bs;
8373 	ctx->extent = extent;
8374 	ctx->page = page;
8375 
8376 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8377 	cpl.u.blob_basic.cb_fn = cb_fn;
8378 	cpl.u.blob_basic.cb_arg = cb_arg;
8379 
8380 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8381 	if (!seq) {
8382 		free(ctx);
8383 		cb_fn(cb_arg, -ENOMEM);
8384 		return;
8385 	}
8386 
8387 	assert(page);
8388 	page->next = SPDK_INVALID_MD_PAGE;
8389 	page->id = blob->id;
8390 	page->sequence_num = 0;
8391 
8392 	blob_serialize_extent_page(blob, cluster_num, page);
8393 
8394 	page->crc = blob_md_page_calc_crc(page);
8395 
8396 	assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true);
8397 
8398 	bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx);
8399 }
8400 
8401 static void
8402 blob_insert_cluster_msg(void *arg)
8403 {
8404 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8405 	uint32_t *extent_page;
8406 
8407 	ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
8408 	if (ctx->rc != 0) {
8409 		spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8410 		return;
8411 	}
8412 
8413 	if (ctx->blob->use_extent_table == false) {
8414 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
8415 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8416 		blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8417 		return;
8418 	}
8419 
8420 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8421 	if (*extent_page == 0) {
8422 		/* Extent page requires allocation.
8423 		 * It was already claimed in the used_md_pages map and placed in ctx. */
8424 		assert(ctx->extent_page != 0);
8425 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8426 		blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page,
8427 				       blob_insert_new_ep_cb, ctx);
8428 	} else {
8429 		/* It is possible for original thread to allocate extent page for
8430 		 * different cluster in the same extent page. In such case proceed with
8431 		 * updating the existing extent page, but release the additional one. */
8432 		if (ctx->extent_page != 0) {
8433 			spdk_spin_lock(&ctx->blob->bs->used_lock);
8434 			assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8435 			bs_release_md_page(ctx->blob->bs, ctx->extent_page);
8436 			spdk_spin_unlock(&ctx->blob->bs->used_lock);
8437 			ctx->extent_page = 0;
8438 		}
8439 		/* Extent page already allocated.
8440 		 * Every cluster allocation, requires just an update of single extent page. */
8441 		blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page,
8442 				       blob_op_cluster_msg_cb, ctx);
8443 	}
8444 }
8445 
8446 static void
8447 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
8448 				 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page,
8449 				 spdk_blob_op_complete cb_fn, void *cb_arg)
8450 {
8451 	struct spdk_blob_cluster_op_ctx *ctx;
8452 
8453 	ctx = calloc(1, sizeof(*ctx));
8454 	if (ctx == NULL) {
8455 		cb_fn(cb_arg, -ENOMEM);
8456 		return;
8457 	}
8458 
8459 	ctx->thread = spdk_get_thread();
8460 	ctx->blob = blob;
8461 	ctx->cluster_num = cluster_num;
8462 	ctx->cluster = cluster;
8463 	ctx->extent_page = extent_page;
8464 	ctx->page = page;
8465 	ctx->cb_fn = cb_fn;
8466 	ctx->cb_arg = cb_arg;
8467 
8468 	spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx);
8469 }
8470 
8471 static void
8472 blob_free_cluster_msg(void *arg)
8473 {
8474 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8475 	uint32_t *extent_page;
8476 	uint32_t start_cluster_idx;
8477 	bool free_extent_page = true;
8478 	size_t i;
8479 
8480 	ctx->cluster = ctx->blob->active.clusters[ctx->cluster_num];
8481 	ctx->blob->active.clusters[ctx->cluster_num] = 0;
8482 	if (ctx->cluster != 0) {
8483 		ctx->blob->active.num_allocated_clusters--;
8484 	}
8485 
8486 	if (ctx->blob->use_extent_table == false) {
8487 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
8488 		spdk_spin_lock(&ctx->blob->bs->used_lock);
8489 		bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster));
8490 		spdk_spin_unlock(&ctx->blob->bs->used_lock);
8491 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8492 		blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8493 		return;
8494 	}
8495 
8496 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8497 
8498 	/* There shouldn't be parallel release operations on same cluster */
8499 	assert(*extent_page == ctx->extent_page);
8500 
8501 	start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
8502 	for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) {
8503 		if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) {
8504 			free_extent_page = false;
8505 			break;
8506 		}
8507 	}
8508 
8509 	if (free_extent_page) {
8510 		assert(ctx->extent_page != 0);
8511 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8512 		ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0;
8513 		blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page,
8514 				       blob_free_cluster_free_ep_cb, ctx);
8515 	} else {
8516 		blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page,
8517 				       blob_free_cluster_update_ep_cb, ctx);
8518 	}
8519 }
8520 
8521 
8522 static void
8523 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page,
8524 			       struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg)
8525 {
8526 	struct spdk_blob_cluster_op_ctx *ctx;
8527 
8528 	ctx = calloc(1, sizeof(*ctx));
8529 	if (ctx == NULL) {
8530 		cb_fn(cb_arg, -ENOMEM);
8531 		return;
8532 	}
8533 
8534 	ctx->thread = spdk_get_thread();
8535 	ctx->blob = blob;
8536 	ctx->cluster_num = cluster_num;
8537 	ctx->extent_page = extent_page;
8538 	ctx->page = page;
8539 	ctx->cb_fn = cb_fn;
8540 	ctx->cb_arg = cb_arg;
8541 
8542 	spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx);
8543 }
8544 
8545 /* START spdk_blob_close */
8546 
8547 static void
8548 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8549 {
8550 	struct spdk_blob *blob = cb_arg;
8551 
8552 	if (bserrno == 0) {
8553 		blob->open_ref--;
8554 		if (blob->open_ref == 0) {
8555 			/*
8556 			 * Blobs with active.num_pages == 0 are deleted blobs.
8557 			 *  these blobs are removed from the blob_store list
8558 			 *  when the deletion process starts - so don't try to
8559 			 *  remove them again.
8560 			 */
8561 			if (blob->active.num_pages > 0) {
8562 				spdk_bit_array_clear(blob->bs->open_blobids, blob->id);
8563 				RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob);
8564 			}
8565 			blob_free(blob);
8566 		}
8567 	}
8568 
8569 	bs_sequence_finish(seq, bserrno);
8570 }
8571 
8572 static void
8573 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno)
8574 {
8575 	spdk_bs_sequence_t	*seq = cb_arg;
8576 
8577 	if (bserrno != 0) {
8578 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n",
8579 			      blob->id, bserrno);
8580 		bs_sequence_finish(seq, bserrno);
8581 		return;
8582 	}
8583 
8584 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n",
8585 		      blob->id, spdk_thread_get_name(spdk_get_thread()));
8586 
8587 	/* Sync metadata */
8588 	blob_persist(seq, blob, blob_close_cpl, blob);
8589 }
8590 
8591 void
8592 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
8593 {
8594 	struct spdk_bs_cpl	cpl;
8595 	spdk_bs_sequence_t	*seq;
8596 
8597 	blob_verify_md_op(blob);
8598 
8599 	SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id);
8600 
8601 	if (blob->open_ref == 0) {
8602 		cb_fn(cb_arg, -EBADF);
8603 		return;
8604 	}
8605 
8606 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8607 	cpl.u.blob_basic.cb_fn = cb_fn;
8608 	cpl.u.blob_basic.cb_arg = cb_arg;
8609 
8610 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8611 	if (!seq) {
8612 		cb_fn(cb_arg, -ENOMEM);
8613 		return;
8614 	}
8615 
8616 	if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) {
8617 		blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq);
8618 		return;
8619 	}
8620 
8621 	/* Sync metadata */
8622 	blob_persist(seq, blob, blob_close_cpl, blob);
8623 }
8624 
8625 /* END spdk_blob_close */
8626 
8627 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
8628 {
8629 	return spdk_get_io_channel(bs);
8630 }
8631 
8632 void
8633 spdk_bs_free_io_channel(struct spdk_io_channel *channel)
8634 {
8635 	blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel));
8636 	spdk_put_io_channel(channel);
8637 }
8638 
8639 void
8640 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
8641 		   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
8642 {
8643 	blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
8644 			       SPDK_BLOB_UNMAP);
8645 }
8646 
8647 void
8648 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
8649 			  uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
8650 {
8651 	blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
8652 			       SPDK_BLOB_WRITE_ZEROES);
8653 }
8654 
8655 void
8656 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
8657 		   void *payload, uint64_t offset, uint64_t length,
8658 		   spdk_blob_op_complete cb_fn, void *cb_arg)
8659 {
8660 	blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
8661 			       SPDK_BLOB_WRITE);
8662 }
8663 
8664 void
8665 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
8666 		  void *payload, uint64_t offset, uint64_t length,
8667 		  spdk_blob_op_complete cb_fn, void *cb_arg)
8668 {
8669 	blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
8670 			       SPDK_BLOB_READ);
8671 }
8672 
8673 void
8674 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
8675 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8676 		    spdk_blob_op_complete cb_fn, void *cb_arg)
8677 {
8678 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL);
8679 }
8680 
8681 void
8682 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
8683 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8684 		   spdk_blob_op_complete cb_fn, void *cb_arg)
8685 {
8686 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL);
8687 }
8688 
8689 void
8690 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
8691 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8692 			spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
8693 {
8694 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false,
8695 				   io_opts);
8696 }
8697 
8698 void
8699 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
8700 		       struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8701 		       spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
8702 {
8703 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true,
8704 				   io_opts);
8705 }
8706 
8707 struct spdk_bs_iter_ctx {
8708 	int64_t page_num;
8709 	struct spdk_blob_store *bs;
8710 
8711 	spdk_blob_op_with_handle_complete cb_fn;
8712 	void *cb_arg;
8713 };
8714 
8715 static void
8716 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
8717 {
8718 	struct spdk_bs_iter_ctx *ctx = cb_arg;
8719 	struct spdk_blob_store *bs = ctx->bs;
8720 	spdk_blob_id id;
8721 
8722 	if (bserrno == 0) {
8723 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
8724 		free(ctx);
8725 		return;
8726 	}
8727 
8728 	ctx->page_num++;
8729 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
8730 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
8731 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
8732 		free(ctx);
8733 		return;
8734 	}
8735 
8736 	id = bs_page_to_blobid(ctx->page_num);
8737 
8738 	spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx);
8739 }
8740 
8741 void
8742 spdk_bs_iter_first(struct spdk_blob_store *bs,
8743 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8744 {
8745 	struct spdk_bs_iter_ctx *ctx;
8746 
8747 	ctx = calloc(1, sizeof(*ctx));
8748 	if (!ctx) {
8749 		cb_fn(cb_arg, NULL, -ENOMEM);
8750 		return;
8751 	}
8752 
8753 	ctx->page_num = -1;
8754 	ctx->bs = bs;
8755 	ctx->cb_fn = cb_fn;
8756 	ctx->cb_arg = cb_arg;
8757 
8758 	bs_iter_cpl(ctx, NULL, -1);
8759 }
8760 
8761 static void
8762 bs_iter_close_cpl(void *cb_arg, int bserrno)
8763 {
8764 	struct spdk_bs_iter_ctx *ctx = cb_arg;
8765 
8766 	bs_iter_cpl(ctx, NULL, -1);
8767 }
8768 
8769 void
8770 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
8771 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8772 {
8773 	struct spdk_bs_iter_ctx *ctx;
8774 
8775 	assert(blob != NULL);
8776 
8777 	ctx = calloc(1, sizeof(*ctx));
8778 	if (!ctx) {
8779 		cb_fn(cb_arg, NULL, -ENOMEM);
8780 		return;
8781 	}
8782 
8783 	ctx->page_num = bs_blobid_to_page(blob->id);
8784 	ctx->bs = bs;
8785 	ctx->cb_fn = cb_fn;
8786 	ctx->cb_arg = cb_arg;
8787 
8788 	/* Close the existing blob */
8789 	spdk_blob_close(blob, bs_iter_close_cpl, ctx);
8790 }
8791 
8792 static int
8793 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
8794 	       uint16_t value_len, bool internal)
8795 {
8796 	struct spdk_xattr_tailq *xattrs;
8797 	struct spdk_xattr	*xattr;
8798 	size_t			desc_size;
8799 	void			*tmp;
8800 
8801 	blob_verify_md_op(blob);
8802 
8803 	if (blob->md_ro) {
8804 		return -EPERM;
8805 	}
8806 
8807 	desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len;
8808 	if (desc_size > SPDK_BS_MAX_DESC_SIZE) {
8809 		SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name,
8810 			      desc_size, SPDK_BS_MAX_DESC_SIZE);
8811 		return -ENOMEM;
8812 	}
8813 
8814 	if (internal) {
8815 		xattrs = &blob->xattrs_internal;
8816 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
8817 	} else {
8818 		xattrs = &blob->xattrs;
8819 	}
8820 
8821 	TAILQ_FOREACH(xattr, xattrs, link) {
8822 		if (!strcmp(name, xattr->name)) {
8823 			tmp = malloc(value_len);
8824 			if (!tmp) {
8825 				return -ENOMEM;
8826 			}
8827 
8828 			free(xattr->value);
8829 			xattr->value_len = value_len;
8830 			xattr->value = tmp;
8831 			memcpy(xattr->value, value, value_len);
8832 
8833 			blob->state = SPDK_BLOB_STATE_DIRTY;
8834 
8835 			return 0;
8836 		}
8837 	}
8838 
8839 	xattr = calloc(1, sizeof(*xattr));
8840 	if (!xattr) {
8841 		return -ENOMEM;
8842 	}
8843 
8844 	xattr->name = strdup(name);
8845 	if (!xattr->name) {
8846 		free(xattr);
8847 		return -ENOMEM;
8848 	}
8849 
8850 	xattr->value_len = value_len;
8851 	xattr->value = malloc(value_len);
8852 	if (!xattr->value) {
8853 		free(xattr->name);
8854 		free(xattr);
8855 		return -ENOMEM;
8856 	}
8857 	memcpy(xattr->value, value, value_len);
8858 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
8859 
8860 	blob->state = SPDK_BLOB_STATE_DIRTY;
8861 
8862 	return 0;
8863 }
8864 
8865 int
8866 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
8867 		    uint16_t value_len)
8868 {
8869 	return blob_set_xattr(blob, name, value, value_len, false);
8870 }
8871 
8872 static int
8873 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
8874 {
8875 	struct spdk_xattr_tailq *xattrs;
8876 	struct spdk_xattr	*xattr;
8877 
8878 	blob_verify_md_op(blob);
8879 
8880 	if (blob->md_ro) {
8881 		return -EPERM;
8882 	}
8883 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
8884 
8885 	TAILQ_FOREACH(xattr, xattrs, link) {
8886 		if (!strcmp(name, xattr->name)) {
8887 			TAILQ_REMOVE(xattrs, xattr, link);
8888 			free(xattr->value);
8889 			free(xattr->name);
8890 			free(xattr);
8891 
8892 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
8893 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
8894 			}
8895 			blob->state = SPDK_BLOB_STATE_DIRTY;
8896 
8897 			return 0;
8898 		}
8899 	}
8900 
8901 	return -ENOENT;
8902 }
8903 
8904 int
8905 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
8906 {
8907 	return blob_remove_xattr(blob, name, false);
8908 }
8909 
8910 static int
8911 blob_get_xattr_value(struct spdk_blob *blob, const char *name,
8912 		     const void **value, size_t *value_len, bool internal)
8913 {
8914 	struct spdk_xattr	*xattr;
8915 	struct spdk_xattr_tailq *xattrs;
8916 
8917 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
8918 
8919 	TAILQ_FOREACH(xattr, xattrs, link) {
8920 		if (!strcmp(name, xattr->name)) {
8921 			*value = xattr->value;
8922 			*value_len = xattr->value_len;
8923 			return 0;
8924 		}
8925 	}
8926 	return -ENOENT;
8927 }
8928 
8929 int
8930 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
8931 			  const void **value, size_t *value_len)
8932 {
8933 	blob_verify_md_op(blob);
8934 
8935 	return blob_get_xattr_value(blob, name, value, value_len, false);
8936 }
8937 
8938 struct spdk_xattr_names {
8939 	uint32_t	count;
8940 	const char	*names[0];
8941 };
8942 
8943 static int
8944 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
8945 {
8946 	struct spdk_xattr	*xattr;
8947 	int			count = 0;
8948 
8949 	TAILQ_FOREACH(xattr, xattrs, link) {
8950 		count++;
8951 	}
8952 
8953 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
8954 	if (*names == NULL) {
8955 		return -ENOMEM;
8956 	}
8957 
8958 	TAILQ_FOREACH(xattr, xattrs, link) {
8959 		(*names)->names[(*names)->count++] = xattr->name;
8960 	}
8961 
8962 	return 0;
8963 }
8964 
8965 int
8966 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
8967 {
8968 	blob_verify_md_op(blob);
8969 
8970 	return blob_get_xattr_names(&blob->xattrs, names);
8971 }
8972 
8973 uint32_t
8974 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
8975 {
8976 	assert(names != NULL);
8977 
8978 	return names->count;
8979 }
8980 
8981 const char *
8982 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
8983 {
8984 	if (index >= names->count) {
8985 		return NULL;
8986 	}
8987 
8988 	return names->names[index];
8989 }
8990 
8991 void
8992 spdk_xattr_names_free(struct spdk_xattr_names *names)
8993 {
8994 	free(names);
8995 }
8996 
8997 struct spdk_bs_type
8998 spdk_bs_get_bstype(struct spdk_blob_store *bs)
8999 {
9000 	return bs->bstype;
9001 }
9002 
9003 void
9004 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
9005 {
9006 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
9007 }
9008 
9009 bool
9010 spdk_blob_is_read_only(struct spdk_blob *blob)
9011 {
9012 	assert(blob != NULL);
9013 	return (blob->data_ro || blob->md_ro);
9014 }
9015 
9016 bool
9017 spdk_blob_is_snapshot(struct spdk_blob *blob)
9018 {
9019 	struct spdk_blob_list *snapshot_entry;
9020 
9021 	assert(blob != NULL);
9022 
9023 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
9024 	if (snapshot_entry == NULL) {
9025 		return false;
9026 	}
9027 
9028 	return true;
9029 }
9030 
9031 bool
9032 spdk_blob_is_clone(struct spdk_blob *blob)
9033 {
9034 	assert(blob != NULL);
9035 
9036 	if (blob->parent_id != SPDK_BLOBID_INVALID &&
9037 	    blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
9038 		assert(spdk_blob_is_thin_provisioned(blob));
9039 		return true;
9040 	}
9041 
9042 	return false;
9043 }
9044 
9045 bool
9046 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
9047 {
9048 	assert(blob != NULL);
9049 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
9050 }
9051 
9052 bool
9053 spdk_blob_is_esnap_clone(const struct spdk_blob *blob)
9054 {
9055 	return blob_is_esnap_clone(blob);
9056 }
9057 
9058 static void
9059 blob_update_clear_method(struct spdk_blob *blob)
9060 {
9061 	enum blob_clear_method stored_cm;
9062 
9063 	assert(blob != NULL);
9064 
9065 	/* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored
9066 	 * in metadata previously.  If something other than the default was
9067 	 * specified, ignore stored value and used what was passed in.
9068 	 */
9069 	stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT);
9070 
9071 	if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) {
9072 		blob->clear_method = stored_cm;
9073 	} else if (blob->clear_method != stored_cm) {
9074 		SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n",
9075 			     blob->clear_method, stored_cm);
9076 	}
9077 }
9078 
9079 spdk_blob_id
9080 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
9081 {
9082 	struct spdk_blob_list *snapshot_entry = NULL;
9083 	struct spdk_blob_list *clone_entry = NULL;
9084 
9085 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
9086 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
9087 			if (clone_entry->id == blob_id) {
9088 				return snapshot_entry->id;
9089 			}
9090 		}
9091 	}
9092 
9093 	return SPDK_BLOBID_INVALID;
9094 }
9095 
9096 int
9097 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
9098 		     size_t *count)
9099 {
9100 	struct spdk_blob_list *snapshot_entry, *clone_entry;
9101 	size_t n;
9102 
9103 	snapshot_entry = bs_get_snapshot_entry(bs, blobid);
9104 	if (snapshot_entry == NULL) {
9105 		*count = 0;
9106 		return 0;
9107 	}
9108 
9109 	if (ids == NULL || *count < snapshot_entry->clone_count) {
9110 		*count = snapshot_entry->clone_count;
9111 		return -ENOMEM;
9112 	}
9113 	*count = snapshot_entry->clone_count;
9114 
9115 	n = 0;
9116 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
9117 		ids[n++] = clone_entry->id;
9118 	}
9119 
9120 	return 0;
9121 }
9122 
9123 static void
9124 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx)
9125 {
9126 	int rc;
9127 
9128 	if (ctx->super->size == 0) {
9129 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9130 	}
9131 
9132 	if (ctx->super->io_unit_size == 0) {
9133 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
9134 	}
9135 
9136 	/* Parse the super block */
9137 	ctx->bs->clean = 1;
9138 	ctx->bs->cluster_sz = ctx->super->cluster_size;
9139 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
9140 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
9141 	if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
9142 		ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
9143 	}
9144 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
9145 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
9146 	if (rc < 0) {
9147 		bs_load_ctx_fail(ctx, -ENOMEM);
9148 		return;
9149 	}
9150 	ctx->bs->md_start = ctx->super->md_start;
9151 	ctx->bs->md_len = ctx->super->md_len;
9152 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len);
9153 	if (rc < 0) {
9154 		bs_load_ctx_fail(ctx, -ENOMEM);
9155 		return;
9156 	}
9157 
9158 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
9159 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
9160 	ctx->bs->super_blob = ctx->super->super_blob;
9161 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
9162 
9163 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
9164 		SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n");
9165 		bs_load_ctx_fail(ctx, -EIO);
9166 		return;
9167 	} else {
9168 		bs_load_read_used_pages(ctx);
9169 	}
9170 }
9171 
9172 static void
9173 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9174 {
9175 	struct spdk_bs_load_ctx	*ctx = cb_arg;
9176 
9177 	if (bserrno != 0) {
9178 		bs_load_ctx_fail(ctx, bserrno);
9179 		return;
9180 	}
9181 	bs_load_grow_continue(ctx);
9182 }
9183 
9184 static void
9185 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9186 {
9187 	struct spdk_bs_load_ctx	*ctx = cb_arg;
9188 
9189 	if (bserrno != 0) {
9190 		bs_load_ctx_fail(ctx, bserrno);
9191 		return;
9192 	}
9193 
9194 	spdk_free(ctx->mask);
9195 
9196 	bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0),
9197 			      bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
9198 			      bs_load_grow_super_write_cpl, ctx);
9199 }
9200 
9201 static void
9202 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9203 {
9204 	struct spdk_bs_load_ctx *ctx = cb_arg;
9205 	uint64_t		lba, lba_count;
9206 	uint64_t		dev_size;
9207 	uint64_t		total_clusters;
9208 
9209 	if (bserrno != 0) {
9210 		bs_load_ctx_fail(ctx, bserrno);
9211 		return;
9212 	}
9213 
9214 	/* The type must be correct */
9215 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
9216 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
9217 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
9218 					     struct spdk_blob_md_page) * 8));
9219 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9220 	total_clusters = dev_size / ctx->super->cluster_size;
9221 	ctx->mask->length = total_clusters;
9222 
9223 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
9224 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
9225 	bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count,
9226 			      bs_load_grow_used_clusters_write_cpl, ctx);
9227 }
9228 
9229 static void
9230 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx)
9231 {
9232 	uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask;
9233 	uint64_t lba, lba_count, mask_size;
9234 
9235 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9236 	total_clusters = dev_size / ctx->super->cluster_size;
9237 	used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
9238 				spdk_divide_round_up(total_clusters, 8),
9239 				SPDK_BS_PAGE_SIZE);
9240 	max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start;
9241 	/* No necessary to grow or no space to grow */
9242 	if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) {
9243 		SPDK_DEBUGLOG(blob, "No grow\n");
9244 		bs_load_grow_continue(ctx);
9245 		return;
9246 	}
9247 
9248 	SPDK_DEBUGLOG(blob, "Resize blobstore\n");
9249 
9250 	ctx->super->size = dev_size;
9251 	ctx->super->used_cluster_mask_len = used_cluster_mask_len;
9252 	ctx->super->crc = blob_md_page_calc_crc(ctx->super);
9253 
9254 	mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
9255 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
9256 				 SPDK_MALLOC_DMA);
9257 	if (!ctx->mask) {
9258 		bs_load_ctx_fail(ctx, -ENOMEM);
9259 		return;
9260 	}
9261 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
9262 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
9263 	bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
9264 			     bs_load_grow_used_clusters_read_cpl, ctx);
9265 }
9266 
9267 static void
9268 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9269 {
9270 	struct spdk_bs_load_ctx *ctx = cb_arg;
9271 	int rc;
9272 
9273 	rc = bs_super_validate(ctx->super, ctx->bs);
9274 	if (rc != 0) {
9275 		bs_load_ctx_fail(ctx, rc);
9276 		return;
9277 	}
9278 
9279 	bs_load_try_to_grow(ctx);
9280 }
9281 
9282 struct spdk_bs_grow_ctx {
9283 	struct spdk_blob_store		*bs;
9284 	struct spdk_bs_super_block	*super;
9285 
9286 	struct spdk_bit_pool		*new_used_clusters;
9287 	struct spdk_bs_md_mask		*new_used_clusters_mask;
9288 
9289 	spdk_bs_sequence_t		*seq;
9290 };
9291 
9292 static void
9293 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno)
9294 {
9295 	if (bserrno != 0) {
9296 		spdk_bit_pool_free(&ctx->new_used_clusters);
9297 	}
9298 
9299 	bs_sequence_finish(ctx->seq, bserrno);
9300 	free(ctx->new_used_clusters_mask);
9301 	spdk_free(ctx->super);
9302 	free(ctx);
9303 }
9304 
9305 static void
9306 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9307 {
9308 	struct spdk_bs_grow_ctx	*ctx = cb_arg;
9309 	struct spdk_blob_store *bs = ctx->bs;
9310 	uint64_t total_clusters;
9311 
9312 	if (bserrno != 0) {
9313 		bs_grow_live_done(ctx, bserrno);
9314 		return;
9315 	}
9316 
9317 	/*
9318 	 * Blobstore is not clean until unload, for now only the super block is up to date.
9319 	 * This is similar to state right after blobstore init, when bs_write_used_md() didn't
9320 	 * yet execute.
9321 	 * When cleanly unloaded, the used md pages will be written out.
9322 	 * In case of unclean shutdown, loading blobstore will go through recovery path correctly
9323 	 * filling out the used_clusters with new size and writing it out.
9324 	 */
9325 	bs->clean = 0;
9326 
9327 	/* Reverting the super->size past this point is complex, avoid any error paths
9328 	 * that require to do so. */
9329 	spdk_spin_lock(&bs->used_lock);
9330 
9331 	total_clusters = ctx->super->size / ctx->super->cluster_size;
9332 
9333 	assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters));
9334 	spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask);
9335 
9336 	assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters));
9337 	spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask);
9338 
9339 	spdk_bit_pool_free(&bs->used_clusters);
9340 	bs->used_clusters = ctx->new_used_clusters;
9341 
9342 	bs->total_clusters = total_clusters;
9343 	bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up(
9344 					  bs->md_start + bs->md_len, bs->pages_per_cluster);
9345 
9346 	bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters);
9347 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
9348 	spdk_spin_unlock(&bs->used_lock);
9349 
9350 	bs_grow_live_done(ctx, 0);
9351 }
9352 
9353 static void
9354 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9355 {
9356 	struct spdk_bs_grow_ctx *ctx = cb_arg;
9357 	uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask;
9358 	int rc;
9359 
9360 	if (bserrno != 0) {
9361 		bs_grow_live_done(ctx, bserrno);
9362 		return;
9363 	}
9364 
9365 	rc = bs_super_validate(ctx->super, ctx->bs);
9366 	if (rc != 0) {
9367 		bs_grow_live_done(ctx, rc);
9368 		return;
9369 	}
9370 
9371 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9372 	total_clusters = dev_size / ctx->super->cluster_size;
9373 	used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
9374 				spdk_divide_round_up(total_clusters, 8),
9375 				SPDK_BS_PAGE_SIZE);
9376 	max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start;
9377 	/* Only checking dev_size. Since it can change, but total_clusters remain the same. */
9378 	if (dev_size == ctx->super->size) {
9379 		SPDK_DEBUGLOG(blob, "No need to grow blobstore\n");
9380 		bs_grow_live_done(ctx, 0);
9381 		return;
9382 	}
9383 	/*
9384 	 * Blobstore cannot be shrunk, so check before if:
9385 	 * - new size of the device is smaller than size in super_block
9386 	 * - new total number of clusters is smaller than used_clusters bit_pool
9387 	 * - there is enough space in metadata for used_cluster_mask to be written out
9388 	 */
9389 	if (dev_size < ctx->super->size ||
9390 	    total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) ||
9391 	    used_cluster_mask_len > max_used_cluster_mask) {
9392 		SPDK_DEBUGLOG(blob, "No space to grow blobstore\n");
9393 		bs_grow_live_done(ctx, -ENOSPC);
9394 		return;
9395 	}
9396 
9397 	SPDK_DEBUGLOG(blob, "Resizing blobstore\n");
9398 
9399 	ctx->new_used_clusters_mask = calloc(1, total_clusters);
9400 	if (!ctx->new_used_clusters_mask) {
9401 		bs_grow_live_done(ctx, -ENOMEM);
9402 		return;
9403 	}
9404 	ctx->new_used_clusters = spdk_bit_pool_create(total_clusters);
9405 	if (!ctx->new_used_clusters) {
9406 		bs_grow_live_done(ctx, -ENOMEM);
9407 		return;
9408 	}
9409 
9410 	ctx->super->clean = 0;
9411 	ctx->super->size = dev_size;
9412 	ctx->super->used_cluster_mask_len = used_cluster_mask_len;
9413 	bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx);
9414 }
9415 
9416 void
9417 spdk_bs_grow_live(struct spdk_blob_store *bs,
9418 		  spdk_bs_op_complete cb_fn, void *cb_arg)
9419 {
9420 	struct spdk_bs_cpl	cpl;
9421 	struct spdk_bs_grow_ctx *ctx;
9422 
9423 	assert(spdk_get_thread() == bs->md_thread);
9424 
9425 	SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev);
9426 
9427 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
9428 	cpl.u.bs_basic.cb_fn = cb_fn;
9429 	cpl.u.bs_basic.cb_arg = cb_arg;
9430 
9431 	ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx));
9432 	if (!ctx) {
9433 		cb_fn(cb_arg, -ENOMEM);
9434 		return;
9435 	}
9436 	ctx->bs = bs;
9437 
9438 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
9439 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
9440 	if (!ctx->super) {
9441 		free(ctx);
9442 		cb_fn(cb_arg, -ENOMEM);
9443 		return;
9444 	}
9445 
9446 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
9447 	if (!ctx->seq) {
9448 		spdk_free(ctx->super);
9449 		free(ctx);
9450 		cb_fn(cb_arg, -ENOMEM);
9451 		return;
9452 	}
9453 
9454 	/* Read the super block */
9455 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
9456 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
9457 			     bs_grow_live_load_super_cpl, ctx);
9458 }
9459 
9460 void
9461 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
9462 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
9463 {
9464 	struct spdk_blob_store	*bs;
9465 	struct spdk_bs_cpl	cpl;
9466 	struct spdk_bs_load_ctx *ctx;
9467 	struct spdk_bs_opts	opts = {};
9468 	int err;
9469 
9470 	SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev);
9471 
9472 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
9473 		SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen);
9474 		dev->destroy(dev);
9475 		cb_fn(cb_arg, NULL, -EINVAL);
9476 		return;
9477 	}
9478 
9479 	spdk_bs_opts_init(&opts, sizeof(opts));
9480 	if (o) {
9481 		if (bs_opts_copy(o, &opts)) {
9482 			return;
9483 		}
9484 	}
9485 
9486 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
9487 		dev->destroy(dev);
9488 		cb_fn(cb_arg, NULL, -EINVAL);
9489 		return;
9490 	}
9491 
9492 	err = bs_alloc(dev, &opts, &bs, &ctx);
9493 	if (err) {
9494 		dev->destroy(dev);
9495 		cb_fn(cb_arg, NULL, err);
9496 		return;
9497 	}
9498 
9499 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
9500 	cpl.u.bs_handle.cb_fn = cb_fn;
9501 	cpl.u.bs_handle.cb_arg = cb_arg;
9502 	cpl.u.bs_handle.bs = bs;
9503 
9504 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
9505 	if (!ctx->seq) {
9506 		spdk_free(ctx->super);
9507 		free(ctx);
9508 		bs_free(bs);
9509 		cb_fn(cb_arg, NULL, -ENOMEM);
9510 		return;
9511 	}
9512 
9513 	/* Read the super block */
9514 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
9515 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
9516 			     bs_grow_load_super_cpl, ctx);
9517 }
9518 
9519 int
9520 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len)
9521 {
9522 	if (!blob_is_esnap_clone(blob)) {
9523 		return -EINVAL;
9524 	}
9525 
9526 	return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true);
9527 }
9528 
9529 struct spdk_io_channel *
9530 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob)
9531 {
9532 	struct spdk_bs_channel		*bs_channel = spdk_io_channel_get_ctx(ch);
9533 	struct spdk_bs_dev		*bs_dev = blob->back_bs_dev;
9534 	struct blob_esnap_channel	find = {};
9535 	struct blob_esnap_channel	*esnap_channel, *existing;
9536 
9537 	find.blob_id = blob->id;
9538 	esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find);
9539 	if (spdk_likely(esnap_channel != NULL)) {
9540 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n",
9541 			      blob->id, spdk_thread_get_name(spdk_get_thread()));
9542 		return esnap_channel->channel;
9543 	}
9544 
9545 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n",
9546 		      blob->id, spdk_thread_get_name(spdk_get_thread()));
9547 
9548 	esnap_channel = calloc(1, sizeof(*esnap_channel));
9549 	if (esnap_channel == NULL) {
9550 		SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n",
9551 			       find.blob_id);
9552 		return NULL;
9553 	}
9554 	esnap_channel->channel = bs_dev->create_channel(bs_dev);
9555 	if (esnap_channel->channel == NULL) {
9556 		SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id);
9557 		free(esnap_channel);
9558 		return NULL;
9559 	}
9560 	esnap_channel->blob_id = find.blob_id;
9561 	existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel);
9562 	if (spdk_unlikely(existing != NULL)) {
9563 		/*
9564 		 * This should be unreachable: all modifications to this tree happen on this thread.
9565 		 */
9566 		SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id);
9567 		assert(false);
9568 
9569 		bs_dev->destroy_channel(bs_dev, esnap_channel->channel);
9570 		free(esnap_channel);
9571 
9572 		return existing->channel;
9573 	}
9574 
9575 	return esnap_channel->channel;
9576 }
9577 
9578 static int
9579 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2)
9580 {
9581 	return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id);
9582 }
9583 
9584 struct blob_esnap_destroy_ctx {
9585 	spdk_blob_op_with_handle_complete	cb_fn;
9586 	void					*cb_arg;
9587 	struct spdk_blob			*blob;
9588 	struct spdk_bs_dev			*back_bs_dev;
9589 	bool					abort_io;
9590 };
9591 
9592 static void
9593 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status)
9594 {
9595 	struct blob_esnap_destroy_ctx	*ctx = spdk_io_channel_iter_get_ctx(i);
9596 	struct spdk_blob		*blob = ctx->blob;
9597 	struct spdk_blob_store		*bs = blob->bs;
9598 
9599 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n",
9600 		      blob->id);
9601 
9602 	if (ctx->cb_fn != NULL) {
9603 		ctx->cb_fn(ctx->cb_arg, blob, status);
9604 	}
9605 	free(ctx);
9606 
9607 	bs->esnap_channels_unloading--;
9608 	if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) {
9609 		spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg);
9610 	}
9611 }
9612 
9613 static void
9614 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i)
9615 {
9616 	struct blob_esnap_destroy_ctx	*ctx = spdk_io_channel_iter_get_ctx(i);
9617 	struct spdk_blob		*blob = ctx->blob;
9618 	struct spdk_bs_dev		*bs_dev = ctx->back_bs_dev;
9619 	struct spdk_io_channel		*channel = spdk_io_channel_iter_get_channel(i);
9620 	struct spdk_bs_channel		*bs_channel = spdk_io_channel_get_ctx(channel);
9621 	struct blob_esnap_channel	*esnap_channel;
9622 	struct blob_esnap_channel	find = {};
9623 
9624 	assert(spdk_get_thread() == spdk_io_channel_get_thread(channel));
9625 
9626 	find.blob_id = blob->id;
9627 	esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find);
9628 	if (esnap_channel != NULL) {
9629 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n",
9630 			      blob->id, spdk_thread_get_name(spdk_get_thread()));
9631 		RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel);
9632 
9633 		if (ctx->abort_io) {
9634 			spdk_bs_user_op_t *op, *tmp;
9635 
9636 			TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) {
9637 				if (op->back_channel == esnap_channel->channel) {
9638 					TAILQ_REMOVE(&bs_channel->queued_io, op, link);
9639 					bs_user_op_abort(op, -EIO);
9640 				}
9641 			}
9642 		}
9643 
9644 		bs_dev->destroy_channel(bs_dev, esnap_channel->channel);
9645 		free(esnap_channel);
9646 	}
9647 
9648 	spdk_for_each_channel_continue(i, 0);
9649 }
9650 
9651 /*
9652  * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be
9653  * used when closing an esnap clone blob and after decoupling from the parent.
9654  */
9655 static void
9656 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io,
9657 				   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
9658 {
9659 	struct blob_esnap_destroy_ctx	*ctx;
9660 
9661 	if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) {
9662 		if (cb_fn != NULL) {
9663 			cb_fn(cb_arg, blob, 0);
9664 		}
9665 		return;
9666 	}
9667 
9668 	ctx = calloc(1, sizeof(*ctx));
9669 	if (ctx == NULL) {
9670 		if (cb_fn != NULL) {
9671 			cb_fn(cb_arg, blob, -ENOMEM);
9672 		}
9673 		return;
9674 	}
9675 	ctx->cb_fn = cb_fn;
9676 	ctx->cb_arg = cb_arg;
9677 	ctx->blob = blob;
9678 	ctx->back_bs_dev = blob->back_bs_dev;
9679 	ctx->abort_io = abort_io;
9680 
9681 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n",
9682 		      blob->id);
9683 
9684 	blob->bs->esnap_channels_unloading++;
9685 	spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx,
9686 			      blob_esnap_destroy_channels_done);
9687 }
9688 
9689 /*
9690  * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a
9691  * bs_channel is destroyed.
9692  */
9693 static void
9694 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch)
9695 {
9696 	struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp;
9697 
9698 	assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch)));
9699 
9700 	SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n",
9701 		      spdk_thread_get_name(spdk_get_thread()));
9702 	RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels,
9703 			esnap_channel_tmp) {
9704 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64
9705 			      ": destroying one channel in thread %s\n",
9706 			      esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread()));
9707 		RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel);
9708 		spdk_put_io_channel(esnap_channel->channel);
9709 		free(esnap_channel);
9710 	}
9711 	SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n",
9712 		      spdk_thread_get_name(spdk_get_thread()));
9713 }
9714 
9715 struct set_bs_dev_ctx {
9716 	struct spdk_blob	*blob;
9717 	struct spdk_bs_dev	*back_bs_dev;
9718 	spdk_blob_op_complete	cb_fn;
9719 	void			*cb_arg;
9720 	int			bserrno;
9721 };
9722 
9723 static void
9724 blob_set_back_bs_dev_done(void *_ctx, int bserrno)
9725 {
9726 	struct set_bs_dev_ctx	*ctx = _ctx;
9727 
9728 	if (bserrno != 0) {
9729 		/* Even though the unfreeze failed, the update may have succeed. */
9730 		SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id,
9731 			    bserrno);
9732 	}
9733 	ctx->cb_fn(ctx->cb_arg, ctx->bserrno);
9734 	free(ctx);
9735 }
9736 
9737 static void
9738 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno)
9739 {
9740 	struct set_bs_dev_ctx	*ctx = _ctx;
9741 
9742 	if (bserrno != 0) {
9743 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n",
9744 			    blob->id, bserrno);
9745 		ctx->bserrno = bserrno;
9746 		blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx);
9747 		return;
9748 	}
9749 
9750 	if (blob->back_bs_dev != NULL) {
9751 		blob->back_bs_dev->destroy(blob->back_bs_dev);
9752 	}
9753 
9754 	SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id);
9755 	blob->back_bs_dev = ctx->back_bs_dev;
9756 	ctx->bserrno = 0;
9757 
9758 	blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx);
9759 }
9760 
9761 static void
9762 blob_frozen_destroy_esnap_channels(void *_ctx, int bserrno)
9763 {
9764 	struct set_bs_dev_ctx	*ctx = _ctx;
9765 	struct spdk_blob	*blob = ctx->blob;
9766 
9767 	if (bserrno != 0) {
9768 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id,
9769 			    bserrno);
9770 		ctx->cb_fn(ctx->cb_arg, bserrno);
9771 		free(ctx);
9772 		return;
9773 	}
9774 
9775 	/*
9776 	 * This does not prevent future reads from the esnap device because any future IO will
9777 	 * lazily create a new esnap IO channel.
9778 	 */
9779 	blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx);
9780 }
9781 
9782 void
9783 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev,
9784 			   spdk_blob_op_complete cb_fn, void *cb_arg)
9785 {
9786 	struct set_bs_dev_ctx	*ctx;
9787 
9788 	if (!blob_is_esnap_clone(blob)) {
9789 		SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id);
9790 		cb_fn(cb_arg, -EINVAL);
9791 		return;
9792 	}
9793 
9794 	ctx = calloc(1, sizeof(*ctx));
9795 	if (ctx == NULL) {
9796 		SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n",
9797 			    blob->id);
9798 		cb_fn(cb_arg, -ENOMEM);
9799 		return;
9800 	}
9801 	ctx->cb_fn = cb_fn;
9802 	ctx->cb_arg = cb_arg;
9803 	ctx->back_bs_dev = back_bs_dev;
9804 	ctx->blob = blob;
9805 	blob_freeze_io(blob, blob_frozen_destroy_esnap_channels, ctx);
9806 }
9807 
9808 struct spdk_bs_dev *
9809 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob)
9810 {
9811 	if (!blob_is_esnap_clone(blob)) {
9812 		SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id);
9813 		return NULL;
9814 	}
9815 
9816 	return blob->back_bs_dev;
9817 }
9818 
9819 bool
9820 spdk_blob_is_degraded(const struct spdk_blob *blob)
9821 {
9822 	if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) {
9823 		return true;
9824 	}
9825 	if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) {
9826 		return false;
9827 	}
9828 
9829 	return blob->back_bs_dev->is_degraded(blob->back_bs_dev);
9830 }
9831 
9832 SPDK_LOG_REGISTER_COMPONENT(blob)
9833 SPDK_LOG_REGISTER_COMPONENT(blob_esnap)
9834