xref: /spdk/lib/blob/blobstore.c (revision 40b11d96241a5b40eeb065071584c4ff1a645b70)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/blob.h"
10 #include "spdk/crc32.h"
11 #include "spdk/env.h"
12 #include "spdk/queue.h"
13 #include "spdk/thread.h"
14 #include "spdk/bit_array.h"
15 #include "spdk/bit_pool.h"
16 #include "spdk/likely.h"
17 #include "spdk/util.h"
18 #include "spdk/string.h"
19 
20 #include "spdk_internal/assert.h"
21 #include "spdk/log.h"
22 
23 #include "blobstore.h"
24 
25 #define BLOB_CRC32C_INITIAL    0xffffffffUL
26 
27 static int bs_register_md_thread(struct spdk_blob_store *bs);
28 static int bs_unregister_md_thread(struct spdk_blob_store *bs);
29 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
30 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
31 		uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page,
32 		spdk_blob_op_complete cb_fn, void *cb_arg);
33 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
34 		uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg);
35 
36 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
37 			  uint16_t value_len, bool internal);
38 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name,
39 				const void **value, size_t *value_len, bool internal);
40 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
41 
42 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
43 				   struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg);
44 static void blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg);
45 
46 static void bs_shallow_copy_cluster_find_next(void *cb_arg);
47 
48 /*
49  * External snapshots require a channel per thread per esnap bdev.  The tree
50  * is populated lazily as blob IOs are handled by the back_bs_dev. When this
51  * channel is destroyed, all the channels in the tree are destroyed.
52  */
53 
54 struct blob_esnap_channel {
55 	RB_ENTRY(blob_esnap_channel)	node;
56 	spdk_blob_id			blob_id;
57 	struct spdk_io_channel		*channel;
58 };
59 
60 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2);
61 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io,
62 		spdk_blob_op_with_handle_complete cb_fn, void *cb_arg);
63 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch);
64 static void blob_set_back_bs_dev_frozen(void *_ctx, int bserrno);
65 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare)
66 
67 static inline bool
68 blob_is_esnap_clone(const struct spdk_blob *blob)
69 {
70 	assert(blob != NULL);
71 	return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT);
72 }
73 
74 static int
75 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2)
76 {
77 	assert(blob1 != NULL && blob2 != NULL);
78 	return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id);
79 }
80 
81 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp);
82 
83 static void
84 blob_verify_md_op(struct spdk_blob *blob)
85 {
86 	assert(blob != NULL);
87 	assert(spdk_get_thread() == blob->bs->md_thread);
88 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
89 }
90 
91 static struct spdk_blob_list *
92 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid)
93 {
94 	struct spdk_blob_list *snapshot_entry = NULL;
95 
96 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
97 		if (snapshot_entry->id == blobid) {
98 			break;
99 		}
100 	}
101 
102 	return snapshot_entry;
103 }
104 
105 static void
106 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page)
107 {
108 	assert(spdk_spin_held(&bs->used_lock));
109 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
110 	assert(spdk_bit_array_get(bs->used_md_pages, page) == false);
111 
112 	spdk_bit_array_set(bs->used_md_pages, page);
113 }
114 
115 static void
116 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page)
117 {
118 	assert(spdk_spin_held(&bs->used_lock));
119 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
120 	assert(spdk_bit_array_get(bs->used_md_pages, page) == true);
121 
122 	spdk_bit_array_clear(bs->used_md_pages, page);
123 }
124 
125 static uint32_t
126 bs_claim_cluster(struct spdk_blob_store *bs)
127 {
128 	uint32_t cluster_num;
129 
130 	assert(spdk_spin_held(&bs->used_lock));
131 
132 	cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters);
133 	if (cluster_num == UINT32_MAX) {
134 		return UINT32_MAX;
135 	}
136 
137 	SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num);
138 	bs->num_free_clusters--;
139 
140 	return cluster_num;
141 }
142 
143 static void
144 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
145 {
146 	assert(spdk_spin_held(&bs->used_lock));
147 	assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters));
148 	assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true);
149 	assert(bs->num_free_clusters < bs->total_clusters);
150 
151 	SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num);
152 
153 	spdk_bit_pool_free_bit(bs->used_clusters, cluster_num);
154 	bs->num_free_clusters++;
155 }
156 
157 static int
158 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
159 {
160 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
161 
162 	blob_verify_md_op(blob);
163 
164 	if (*cluster_lba != 0) {
165 		return -EEXIST;
166 	}
167 
168 	*cluster_lba = bs_cluster_to_lba(blob->bs, cluster);
169 	blob->active.num_allocated_clusters++;
170 
171 	return 0;
172 }
173 
174 static int
175 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
176 		    uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map)
177 {
178 	uint32_t *extent_page = 0;
179 
180 	assert(spdk_spin_held(&blob->bs->used_lock));
181 
182 	*cluster = bs_claim_cluster(blob->bs);
183 	if (*cluster == UINT32_MAX) {
184 		/* No more free clusters. Cannot satisfy the request */
185 		return -ENOSPC;
186 	}
187 
188 	if (blob->use_extent_table) {
189 		extent_page = bs_cluster_to_extent_page(blob, cluster_num);
190 		if (*extent_page == 0) {
191 			/* Extent page shall never occupy md_page so start the search from 1 */
192 			if (*lowest_free_md_page == 0) {
193 				*lowest_free_md_page = 1;
194 			}
195 			/* No extent_page is allocated for the cluster */
196 			*lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages,
197 					       *lowest_free_md_page);
198 			if (*lowest_free_md_page == UINT32_MAX) {
199 				/* No more free md pages. Cannot satisfy the request */
200 				bs_release_cluster(blob->bs, *cluster);
201 				return -ENOSPC;
202 			}
203 			bs_claim_md_page(blob->bs, *lowest_free_md_page);
204 		}
205 	}
206 
207 	SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster,
208 		      blob->id);
209 
210 	if (update_map) {
211 		blob_insert_cluster(blob, cluster_num, *cluster);
212 		if (blob->use_extent_table && *extent_page == 0) {
213 			*extent_page = *lowest_free_md_page;
214 		}
215 	}
216 
217 	return 0;
218 }
219 
220 static void
221 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
222 {
223 	xattrs->count = 0;
224 	xattrs->names = NULL;
225 	xattrs->ctx = NULL;
226 	xattrs->get_value = NULL;
227 }
228 
229 void
230 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size)
231 {
232 	if (!opts) {
233 		SPDK_ERRLOG("opts should not be NULL\n");
234 		return;
235 	}
236 
237 	if (!opts_size) {
238 		SPDK_ERRLOG("opts_size should not be zero value\n");
239 		return;
240 	}
241 
242 	memset(opts, 0, opts_size);
243 	opts->opts_size = opts_size;
244 
245 #define FIELD_OK(field) \
246         offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size
247 
248 #define SET_FIELD(field, value) \
249         if (FIELD_OK(field)) { \
250                 opts->field = value; \
251         } \
252 
253 	SET_FIELD(num_clusters, 0);
254 	SET_FIELD(thin_provision, false);
255 	SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT);
256 
257 	if (FIELD_OK(xattrs)) {
258 		blob_xattrs_init(&opts->xattrs);
259 	}
260 
261 	SET_FIELD(use_extent_table, true);
262 
263 #undef FIELD_OK
264 #undef SET_FIELD
265 }
266 
267 void
268 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size)
269 {
270 	if (!opts) {
271 		SPDK_ERRLOG("opts should not be NULL\n");
272 		return;
273 	}
274 
275 	if (!opts_size) {
276 		SPDK_ERRLOG("opts_size should not be zero value\n");
277 		return;
278 	}
279 
280 	memset(opts, 0, opts_size);
281 	opts->opts_size = opts_size;
282 
283 #define FIELD_OK(field) \
284         offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size
285 
286 #define SET_FIELD(field, value) \
287         if (FIELD_OK(field)) { \
288                 opts->field = value; \
289         } \
290 
291 	SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT);
292 
293 #undef FIELD_OK
294 #undef SET_FILED
295 }
296 
297 static struct spdk_blob *
298 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
299 {
300 	struct spdk_blob *blob;
301 
302 	blob = calloc(1, sizeof(*blob));
303 	if (!blob) {
304 		return NULL;
305 	}
306 
307 	blob->id = id;
308 	blob->bs = bs;
309 
310 	blob->parent_id = SPDK_BLOBID_INVALID;
311 
312 	blob->state = SPDK_BLOB_STATE_DIRTY;
313 	blob->extent_rle_found = false;
314 	blob->extent_table_found = false;
315 	blob->active.num_pages = 1;
316 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
317 	if (!blob->active.pages) {
318 		free(blob);
319 		return NULL;
320 	}
321 
322 	blob->active.pages[0] = bs_blobid_to_page(id);
323 
324 	TAILQ_INIT(&blob->xattrs);
325 	TAILQ_INIT(&blob->xattrs_internal);
326 	TAILQ_INIT(&blob->pending_persists);
327 	TAILQ_INIT(&blob->persists_to_complete);
328 
329 	return blob;
330 }
331 
332 static void
333 xattrs_free(struct spdk_xattr_tailq *xattrs)
334 {
335 	struct spdk_xattr	*xattr, *xattr_tmp;
336 
337 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
338 		TAILQ_REMOVE(xattrs, xattr, link);
339 		free(xattr->name);
340 		free(xattr->value);
341 		free(xattr);
342 	}
343 }
344 
345 static void
346 blob_free(struct spdk_blob *blob)
347 {
348 	assert(blob != NULL);
349 	assert(TAILQ_EMPTY(&blob->pending_persists));
350 	assert(TAILQ_EMPTY(&blob->persists_to_complete));
351 
352 	free(blob->active.extent_pages);
353 	free(blob->clean.extent_pages);
354 	free(blob->active.clusters);
355 	free(blob->clean.clusters);
356 	free(blob->active.pages);
357 	free(blob->clean.pages);
358 
359 	xattrs_free(&blob->xattrs);
360 	xattrs_free(&blob->xattrs_internal);
361 
362 	if (blob->back_bs_dev) {
363 		blob->back_bs_dev->destroy(blob->back_bs_dev);
364 	}
365 
366 	free(blob);
367 }
368 
369 static void
370 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno)
371 {
372 	struct spdk_bs_dev	*bs_dev = ctx;
373 
374 	if (bserrno != 0) {
375 		/*
376 		 * This is probably due to a memory allocation failure when creating the
377 		 * blob_esnap_destroy_ctx before iterating threads.
378 		 */
379 		SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n",
380 			    blob->id, bserrno);
381 		assert(false);
382 	}
383 
384 	if (bs_dev == NULL) {
385 		/*
386 		 * This check exists to make scanbuild happy.
387 		 *
388 		 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while
389 		 * the blobstore is being loaded. It could also be NULL if there was an error
390 		 * opening the esnap device. In each of these cases, no channels could have been
391 		 * created because back_bs_dev->create_channel() would have led to a NULL pointer
392 		 * deref.
393 		 */
394 		assert(false);
395 		return;
396 	}
397 
398 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id);
399 	bs_dev->destroy(bs_dev);
400 }
401 
402 static void
403 blob_back_bs_destroy(struct spdk_blob *blob)
404 {
405 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n",
406 		      blob->id);
407 
408 	blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done,
409 					   blob->back_bs_dev);
410 	blob->back_bs_dev = NULL;
411 }
412 
413 struct set_bs_dev_ctx {
414 	struct spdk_blob	*blob;
415 	struct spdk_bs_dev	*back_bs_dev;
416 
417 	spdk_blob_op_complete	cb_fn;
418 	void			*cb_arg;
419 	int			bserrno;
420 };
421 
422 static void
423 blob_set_back_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev,
424 		     spdk_blob_op_complete cb_fn, void *cb_arg)
425 {
426 	struct set_bs_dev_ctx	*ctx;
427 
428 	ctx = calloc(1, sizeof(*ctx));
429 	if (ctx == NULL) {
430 		SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n",
431 			    blob->id);
432 		cb_fn(cb_arg, -ENOMEM);
433 		return;
434 	}
435 
436 	ctx->cb_fn = cb_fn;
437 	ctx->cb_arg = cb_arg;
438 	ctx->back_bs_dev = back_bs_dev;
439 	ctx->blob = blob;
440 
441 	blob_freeze_io(blob, blob_set_back_bs_dev_frozen, ctx);
442 }
443 
444 struct freeze_io_ctx {
445 	struct spdk_bs_cpl cpl;
446 	struct spdk_blob *blob;
447 };
448 
449 static void
450 blob_io_sync(struct spdk_io_channel_iter *i)
451 {
452 	spdk_for_each_channel_continue(i, 0);
453 }
454 
455 static void
456 blob_execute_queued_io(struct spdk_io_channel_iter *i)
457 {
458 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
459 	struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch);
460 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
461 	struct spdk_bs_request_set	*set;
462 	struct spdk_bs_user_op_args	*args;
463 	spdk_bs_user_op_t *op, *tmp;
464 
465 	TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) {
466 		set = (struct spdk_bs_request_set *)op;
467 		args = &set->u.user_op;
468 
469 		if (args->blob == ctx->blob) {
470 			TAILQ_REMOVE(&ch->queued_io, op, link);
471 			bs_user_op_execute(op);
472 		}
473 	}
474 
475 	spdk_for_each_channel_continue(i, 0);
476 }
477 
478 static void
479 blob_io_cpl(struct spdk_io_channel_iter *i, int status)
480 {
481 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
482 
483 	ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0);
484 
485 	free(ctx);
486 }
487 
488 static void
489 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
490 {
491 	struct freeze_io_ctx *ctx;
492 
493 	blob_verify_md_op(blob);
494 
495 	ctx = calloc(1, sizeof(*ctx));
496 	if (!ctx) {
497 		cb_fn(cb_arg, -ENOMEM);
498 		return;
499 	}
500 
501 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
502 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
503 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
504 	ctx->blob = blob;
505 
506 	/* Freeze I/O on blob */
507 	blob->frozen_refcnt++;
508 
509 	spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl);
510 }
511 
512 static void
513 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
514 {
515 	struct freeze_io_ctx *ctx;
516 
517 	blob_verify_md_op(blob);
518 
519 	ctx = calloc(1, sizeof(*ctx));
520 	if (!ctx) {
521 		cb_fn(cb_arg, -ENOMEM);
522 		return;
523 	}
524 
525 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
526 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
527 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
528 	ctx->blob = blob;
529 
530 	assert(blob->frozen_refcnt > 0);
531 
532 	blob->frozen_refcnt--;
533 
534 	spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl);
535 }
536 
537 static int
538 blob_mark_clean(struct spdk_blob *blob)
539 {
540 	uint32_t *extent_pages = NULL;
541 	uint64_t *clusters = NULL;
542 	uint32_t *pages = NULL;
543 
544 	assert(blob != NULL);
545 
546 	if (blob->active.num_extent_pages) {
547 		assert(blob->active.extent_pages);
548 		extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages));
549 		if (!extent_pages) {
550 			return -ENOMEM;
551 		}
552 		memcpy(extent_pages, blob->active.extent_pages,
553 		       blob->active.num_extent_pages * sizeof(*extent_pages));
554 	}
555 
556 	if (blob->active.num_clusters) {
557 		assert(blob->active.clusters);
558 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
559 		if (!clusters) {
560 			free(extent_pages);
561 			return -ENOMEM;
562 		}
563 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
564 	}
565 
566 	if (blob->active.num_pages) {
567 		assert(blob->active.pages);
568 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
569 		if (!pages) {
570 			free(extent_pages);
571 			free(clusters);
572 			return -ENOMEM;
573 		}
574 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
575 	}
576 
577 	free(blob->clean.extent_pages);
578 	free(blob->clean.clusters);
579 	free(blob->clean.pages);
580 
581 	blob->clean.num_extent_pages = blob->active.num_extent_pages;
582 	blob->clean.extent_pages = blob->active.extent_pages;
583 	blob->clean.num_clusters = blob->active.num_clusters;
584 	blob->clean.clusters = blob->active.clusters;
585 	blob->clean.num_allocated_clusters = blob->active.num_allocated_clusters;
586 	blob->clean.num_pages = blob->active.num_pages;
587 	blob->clean.pages = blob->active.pages;
588 
589 	blob->active.extent_pages = extent_pages;
590 	blob->active.clusters = clusters;
591 	blob->active.pages = pages;
592 
593 	/* If the metadata was dirtied again while the metadata was being written to disk,
594 	 *  we do not want to revert the DIRTY state back to CLEAN here.
595 	 */
596 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
597 		blob->state = SPDK_BLOB_STATE_CLEAN;
598 	}
599 
600 	return 0;
601 }
602 
603 static int
604 blob_deserialize_xattr(struct spdk_blob *blob,
605 		       struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
606 {
607 	struct spdk_xattr                       *xattr;
608 
609 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
610 	    sizeof(desc_xattr->value_length) +
611 	    desc_xattr->name_length + desc_xattr->value_length) {
612 		return -EINVAL;
613 	}
614 
615 	xattr = calloc(1, sizeof(*xattr));
616 	if (xattr == NULL) {
617 		return -ENOMEM;
618 	}
619 
620 	xattr->name = malloc(desc_xattr->name_length + 1);
621 	if (xattr->name == NULL) {
622 		free(xattr);
623 		return -ENOMEM;
624 	}
625 
626 	xattr->value = malloc(desc_xattr->value_length);
627 	if (xattr->value == NULL) {
628 		free(xattr->name);
629 		free(xattr);
630 		return -ENOMEM;
631 	}
632 
633 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
634 	xattr->name[desc_xattr->name_length] = '\0';
635 	xattr->value_len = desc_xattr->value_length;
636 	memcpy(xattr->value,
637 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
638 	       desc_xattr->value_length);
639 
640 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
641 
642 	return 0;
643 }
644 
645 
646 static int
647 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
648 {
649 	struct spdk_blob_md_descriptor *desc;
650 	size_t	cur_desc = 0;
651 	void *tmp;
652 
653 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
654 	while (cur_desc < sizeof(page->descriptors)) {
655 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
656 			if (desc->length == 0) {
657 				/* If padding and length are 0, this terminates the page */
658 				break;
659 			}
660 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
661 			struct spdk_blob_md_descriptor_flags	*desc_flags;
662 
663 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
664 
665 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
666 				return -EINVAL;
667 			}
668 
669 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
670 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
671 				return -EINVAL;
672 			}
673 
674 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
675 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
676 				blob->data_ro = true;
677 				blob->md_ro = true;
678 			}
679 
680 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
681 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
682 				blob->md_ro = true;
683 			}
684 
685 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
686 				blob->data_ro = true;
687 				blob->md_ro = true;
688 			}
689 
690 			blob->invalid_flags = desc_flags->invalid_flags;
691 			blob->data_ro_flags = desc_flags->data_ro_flags;
692 			blob->md_ro_flags = desc_flags->md_ro_flags;
693 
694 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
695 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
696 			unsigned int				i, j;
697 			unsigned int				cluster_count = blob->active.num_clusters;
698 
699 			if (blob->extent_table_found) {
700 				/* Extent Table already present in the md,
701 				 * both descriptors should never be at the same time. */
702 				return -EINVAL;
703 			}
704 			blob->extent_rle_found = true;
705 
706 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
707 
708 			if (desc_extent_rle->length == 0 ||
709 			    (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) {
710 				return -EINVAL;
711 			}
712 
713 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
714 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
715 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
716 						if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters,
717 										desc_extent_rle->extents[i].cluster_idx + j)) {
718 							return -EINVAL;
719 						}
720 					}
721 					cluster_count++;
722 				}
723 			}
724 
725 			if (cluster_count == 0) {
726 				return -EINVAL;
727 			}
728 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
729 			if (tmp == NULL) {
730 				return -ENOMEM;
731 			}
732 			blob->active.clusters = tmp;
733 			blob->active.cluster_array_size = cluster_count;
734 
735 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
736 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
737 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
738 						blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
739 								desc_extent_rle->extents[i].cluster_idx + j);
740 						blob->active.num_allocated_clusters++;
741 					} else if (spdk_blob_is_thin_provisioned(blob)) {
742 						blob->active.clusters[blob->active.num_clusters++] = 0;
743 					} else {
744 						return -EINVAL;
745 					}
746 				}
747 			}
748 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
749 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
750 			uint32_t num_extent_pages = blob->active.num_extent_pages;
751 			uint32_t i, j;
752 			size_t extent_pages_length;
753 
754 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
755 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
756 
757 			if (blob->extent_rle_found) {
758 				/* This means that Extent RLE is present in MD,
759 				 * both should never be at the same time. */
760 				return -EINVAL;
761 			} else if (blob->extent_table_found &&
762 				   desc_extent_table->num_clusters != blob->remaining_clusters_in_et) {
763 				/* Number of clusters in this ET does not match number
764 				 * from previously read EXTENT_TABLE. */
765 				return -EINVAL;
766 			}
767 
768 			if (desc_extent_table->length == 0 ||
769 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
770 				return -EINVAL;
771 			}
772 
773 			blob->extent_table_found = true;
774 
775 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
776 				num_extent_pages += desc_extent_table->extent_page[i].num_pages;
777 			}
778 
779 			if (num_extent_pages > 0) {
780 				tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t));
781 				if (tmp == NULL) {
782 					return -ENOMEM;
783 				}
784 				blob->active.extent_pages = tmp;
785 			}
786 			blob->active.extent_pages_array_size = num_extent_pages;
787 
788 			blob->remaining_clusters_in_et = desc_extent_table->num_clusters;
789 
790 			/* Extent table entries contain md page numbers for extent pages.
791 			 * Zeroes represent unallocated extent pages, those are run-length-encoded.
792 			 */
793 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
794 				if (desc_extent_table->extent_page[i].page_idx != 0) {
795 					assert(desc_extent_table->extent_page[i].num_pages == 1);
796 					blob->active.extent_pages[blob->active.num_extent_pages++] =
797 						desc_extent_table->extent_page[i].page_idx;
798 				} else if (spdk_blob_is_thin_provisioned(blob)) {
799 					for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) {
800 						blob->active.extent_pages[blob->active.num_extent_pages++] = 0;
801 					}
802 				} else {
803 					return -EINVAL;
804 				}
805 			}
806 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
807 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
808 			unsigned int					i;
809 			unsigned int					cluster_count = 0;
810 			size_t						cluster_idx_length;
811 
812 			if (blob->extent_rle_found) {
813 				/* This means that Extent RLE is present in MD,
814 				 * both should never be at the same time. */
815 				return -EINVAL;
816 			}
817 
818 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
819 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
820 
821 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
822 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
823 				return -EINVAL;
824 			}
825 
826 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
827 				if (desc_extent->cluster_idx[i] != 0) {
828 					if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) {
829 						return -EINVAL;
830 					}
831 				}
832 				cluster_count++;
833 			}
834 
835 			if (cluster_count == 0) {
836 				return -EINVAL;
837 			}
838 
839 			/* When reading extent pages sequentially starting cluster idx should match
840 			 * current size of a blob.
841 			 * If changed to batch reading, this check shall be removed. */
842 			if (desc_extent->start_cluster_idx != blob->active.num_clusters) {
843 				return -EINVAL;
844 			}
845 
846 			tmp = realloc(blob->active.clusters,
847 				      (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters));
848 			if (tmp == NULL) {
849 				return -ENOMEM;
850 			}
851 			blob->active.clusters = tmp;
852 			blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters);
853 
854 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
855 				if (desc_extent->cluster_idx[i] != 0) {
856 					blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
857 							desc_extent->cluster_idx[i]);
858 					blob->active.num_allocated_clusters++;
859 				} else if (spdk_blob_is_thin_provisioned(blob)) {
860 					blob->active.clusters[blob->active.num_clusters++] = 0;
861 				} else {
862 					return -EINVAL;
863 				}
864 			}
865 			assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters);
866 			assert(blob->remaining_clusters_in_et >= cluster_count);
867 			blob->remaining_clusters_in_et -= cluster_count;
868 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
869 			int rc;
870 
871 			rc = blob_deserialize_xattr(blob,
872 						    (struct spdk_blob_md_descriptor_xattr *) desc, false);
873 			if (rc != 0) {
874 				return rc;
875 			}
876 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
877 			int rc;
878 
879 			rc = blob_deserialize_xattr(blob,
880 						    (struct spdk_blob_md_descriptor_xattr *) desc, true);
881 			if (rc != 0) {
882 				return rc;
883 			}
884 		} else {
885 			/* Unrecognized descriptor type.  Do not fail - just continue to the
886 			 *  next descriptor.  If this descriptor is associated with some feature
887 			 *  defined in a newer version of blobstore, that version of blobstore
888 			 *  should create and set an associated feature flag to specify if this
889 			 *  blob can be loaded or not.
890 			 */
891 		}
892 
893 		/* Advance to the next descriptor */
894 		cur_desc += sizeof(*desc) + desc->length;
895 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
896 			break;
897 		}
898 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
899 	}
900 
901 	return 0;
902 }
903 
904 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page);
905 
906 static int
907 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob)
908 {
909 	assert(blob != NULL);
910 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
911 
912 	if (bs_load_cur_extent_page_valid(extent_page) == false) {
913 		return -ENOENT;
914 	}
915 
916 	return blob_parse_page(extent_page, blob);
917 }
918 
919 static int
920 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
921 	   struct spdk_blob *blob)
922 {
923 	const struct spdk_blob_md_page *page;
924 	uint32_t i;
925 	int rc;
926 	void *tmp;
927 
928 	assert(page_count > 0);
929 	assert(pages[0].sequence_num == 0);
930 	assert(blob != NULL);
931 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
932 	assert(blob->active.clusters == NULL);
933 
934 	/* The blobid provided doesn't match what's in the MD, this can
935 	 * happen for example if a bogus blobid is passed in through open.
936 	 */
937 	if (blob->id != pages[0].id) {
938 		SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata "
939 			    "(0x%" PRIx64 ")\n", blob->id, pages[0].id);
940 		return -ENOENT;
941 	}
942 
943 	tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages));
944 	if (!tmp) {
945 		return -ENOMEM;
946 	}
947 	blob->active.pages = tmp;
948 
949 	blob->active.pages[0] = pages[0].id;
950 
951 	for (i = 1; i < page_count; i++) {
952 		assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next));
953 		blob->active.pages[i] = pages[i - 1].next;
954 	}
955 	blob->active.num_pages = page_count;
956 
957 	for (i = 0; i < page_count; i++) {
958 		page = &pages[i];
959 
960 		assert(page->id == blob->id);
961 		assert(page->sequence_num == i);
962 
963 		rc = blob_parse_page(page, blob);
964 		if (rc != 0) {
965 			return rc;
966 		}
967 	}
968 
969 	return 0;
970 }
971 
972 static int
973 blob_serialize_add_page(const struct spdk_blob *blob,
974 			struct spdk_blob_md_page **pages,
975 			uint32_t *page_count,
976 			struct spdk_blob_md_page **last_page)
977 {
978 	struct spdk_blob_md_page *page, *tmp_pages;
979 
980 	assert(pages != NULL);
981 	assert(page_count != NULL);
982 
983 	*last_page = NULL;
984 	if (*page_count == 0) {
985 		assert(*pages == NULL);
986 		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0,
987 				     NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
988 		if (*pages == NULL) {
989 			return -ENOMEM;
990 		}
991 		*page_count = 1;
992 	} else {
993 		assert(*pages != NULL);
994 		tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0);
995 		if (tmp_pages == NULL) {
996 			return -ENOMEM;
997 		}
998 		(*page_count)++;
999 		*pages = tmp_pages;
1000 	}
1001 
1002 	page = &(*pages)[*page_count - 1];
1003 	memset(page, 0, sizeof(*page));
1004 	page->id = blob->id;
1005 	page->sequence_num = *page_count - 1;
1006 	page->next = SPDK_INVALID_MD_PAGE;
1007 	*last_page = page;
1008 
1009 	return 0;
1010 }
1011 
1012 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
1013  * Update required_sz on both success and failure.
1014  *
1015  */
1016 static int
1017 blob_serialize_xattr(const struct spdk_xattr *xattr,
1018 		     uint8_t *buf, size_t buf_sz,
1019 		     size_t *required_sz, bool internal)
1020 {
1021 	struct spdk_blob_md_descriptor_xattr	*desc;
1022 
1023 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
1024 		       strlen(xattr->name) +
1025 		       xattr->value_len;
1026 
1027 	if (buf_sz < *required_sz) {
1028 		return -1;
1029 	}
1030 
1031 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
1032 
1033 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
1034 	desc->length = sizeof(desc->name_length) +
1035 		       sizeof(desc->value_length) +
1036 		       strlen(xattr->name) +
1037 		       xattr->value_len;
1038 	desc->name_length = strlen(xattr->name);
1039 	desc->value_length = xattr->value_len;
1040 
1041 	memcpy(desc->name, xattr->name, desc->name_length);
1042 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
1043 	       xattr->value,
1044 	       desc->value_length);
1045 
1046 	return 0;
1047 }
1048 
1049 static void
1050 blob_serialize_extent_table_entry(const struct spdk_blob *blob,
1051 				  uint64_t start_ep, uint64_t *next_ep,
1052 				  uint8_t **buf, size_t *remaining_sz)
1053 {
1054 	struct spdk_blob_md_descriptor_extent_table *desc;
1055 	size_t cur_sz;
1056 	uint64_t i, et_idx;
1057 	uint32_t extent_page, ep_len;
1058 
1059 	/* The buffer must have room for at least num_clusters entry */
1060 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters);
1061 	if (*remaining_sz < cur_sz) {
1062 		*next_ep = start_ep;
1063 		return;
1064 	}
1065 
1066 	desc = (struct spdk_blob_md_descriptor_extent_table *)*buf;
1067 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE;
1068 
1069 	desc->num_clusters = blob->active.num_clusters;
1070 
1071 	ep_len = 1;
1072 	et_idx = 0;
1073 	for (i = start_ep; i < blob->active.num_extent_pages; i++) {
1074 		if (*remaining_sz < cur_sz  + sizeof(desc->extent_page[0])) {
1075 			/* If we ran out of buffer space, return */
1076 			break;
1077 		}
1078 
1079 		extent_page = blob->active.extent_pages[i];
1080 		/* Verify that next extent_page is unallocated */
1081 		if (extent_page == 0 &&
1082 		    (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) {
1083 			ep_len++;
1084 			continue;
1085 		}
1086 		desc->extent_page[et_idx].page_idx = extent_page;
1087 		desc->extent_page[et_idx].num_pages = ep_len;
1088 		et_idx++;
1089 
1090 		ep_len = 1;
1091 		cur_sz += sizeof(desc->extent_page[et_idx]);
1092 	}
1093 	*next_ep = i;
1094 
1095 	desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx;
1096 	*remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length;
1097 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc->length;
1098 }
1099 
1100 static int
1101 blob_serialize_extent_table(const struct spdk_blob *blob,
1102 			    struct spdk_blob_md_page **pages,
1103 			    struct spdk_blob_md_page *cur_page,
1104 			    uint32_t *page_count, uint8_t **buf,
1105 			    size_t *remaining_sz)
1106 {
1107 	uint64_t				last_extent_page;
1108 	int					rc;
1109 
1110 	last_extent_page = 0;
1111 	/* At least single extent table entry has to be always persisted.
1112 	 * Such case occurs with num_extent_pages == 0. */
1113 	while (last_extent_page <= blob->active.num_extent_pages) {
1114 		blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf,
1115 						  remaining_sz);
1116 
1117 		if (last_extent_page == blob->active.num_extent_pages) {
1118 			break;
1119 		}
1120 
1121 		rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1122 		if (rc < 0) {
1123 			return rc;
1124 		}
1125 
1126 		*buf = (uint8_t *)cur_page->descriptors;
1127 		*remaining_sz = sizeof(cur_page->descriptors);
1128 	}
1129 
1130 	return 0;
1131 }
1132 
1133 static void
1134 blob_serialize_extent_rle(const struct spdk_blob *blob,
1135 			  uint64_t start_cluster, uint64_t *next_cluster,
1136 			  uint8_t **buf, size_t *buf_sz)
1137 {
1138 	struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle;
1139 	size_t cur_sz;
1140 	uint64_t i, extent_idx;
1141 	uint64_t lba, lba_per_cluster, lba_count;
1142 
1143 	/* The buffer must have room for at least one extent */
1144 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]);
1145 	if (*buf_sz < cur_sz) {
1146 		*next_cluster = start_cluster;
1147 		return;
1148 	}
1149 
1150 	desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf;
1151 	desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE;
1152 
1153 	lba_per_cluster = bs_cluster_to_lba(blob->bs, 1);
1154 	/* Assert for scan-build false positive */
1155 	assert(lba_per_cluster > 0);
1156 
1157 	lba = blob->active.clusters[start_cluster];
1158 	lba_count = lba_per_cluster;
1159 	extent_idx = 0;
1160 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
1161 		if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) {
1162 			/* Run-length encode sequential non-zero LBA */
1163 			lba_count += lba_per_cluster;
1164 			continue;
1165 		} else if (lba == 0 && blob->active.clusters[i] == 0) {
1166 			/* Run-length encode unallocated clusters */
1167 			lba_count += lba_per_cluster;
1168 			continue;
1169 		}
1170 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1171 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1172 		extent_idx++;
1173 
1174 		cur_sz += sizeof(desc_extent_rle->extents[extent_idx]);
1175 
1176 		if (*buf_sz < cur_sz) {
1177 			/* If we ran out of buffer space, return */
1178 			*next_cluster = i;
1179 			break;
1180 		}
1181 
1182 		lba = blob->active.clusters[i];
1183 		lba_count = lba_per_cluster;
1184 	}
1185 
1186 	if (*buf_sz >= cur_sz) {
1187 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1188 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1189 		extent_idx++;
1190 
1191 		*next_cluster = blob->active.num_clusters;
1192 	}
1193 
1194 	desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx;
1195 	*buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1196 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1197 }
1198 
1199 static int
1200 blob_serialize_extents_rle(const struct spdk_blob *blob,
1201 			   struct spdk_blob_md_page **pages,
1202 			   struct spdk_blob_md_page *cur_page,
1203 			   uint32_t *page_count, uint8_t **buf,
1204 			   size_t *remaining_sz)
1205 {
1206 	uint64_t				last_cluster;
1207 	int					rc;
1208 
1209 	last_cluster = 0;
1210 	while (last_cluster < blob->active.num_clusters) {
1211 		blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz);
1212 
1213 		if (last_cluster == blob->active.num_clusters) {
1214 			break;
1215 		}
1216 
1217 		rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1218 		if (rc < 0) {
1219 			return rc;
1220 		}
1221 
1222 		*buf = (uint8_t *)cur_page->descriptors;
1223 		*remaining_sz = sizeof(cur_page->descriptors);
1224 	}
1225 
1226 	return 0;
1227 }
1228 
1229 static void
1230 blob_serialize_extent_page(const struct spdk_blob *blob,
1231 			   uint64_t cluster, struct spdk_blob_md_page *page)
1232 {
1233 	struct spdk_blob_md_descriptor_extent_page *desc_extent;
1234 	uint64_t i, extent_idx;
1235 	uint64_t lba, lba_per_cluster;
1236 	uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
1237 
1238 	desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors;
1239 	desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE;
1240 
1241 	lba_per_cluster = bs_cluster_to_lba(blob->bs, 1);
1242 
1243 	desc_extent->start_cluster_idx = start_cluster_idx;
1244 	extent_idx = 0;
1245 	for (i = start_cluster_idx; i < blob->active.num_clusters; i++) {
1246 		lba = blob->active.clusters[i];
1247 		desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster;
1248 		if (extent_idx >= SPDK_EXTENTS_PER_EP) {
1249 			break;
1250 		}
1251 	}
1252 	desc_extent->length = sizeof(desc_extent->start_cluster_idx) +
1253 			      sizeof(desc_extent->cluster_idx[0]) * extent_idx;
1254 }
1255 
1256 static void
1257 blob_serialize_flags(const struct spdk_blob *blob,
1258 		     uint8_t *buf, size_t *buf_sz)
1259 {
1260 	struct spdk_blob_md_descriptor_flags *desc;
1261 
1262 	/*
1263 	 * Flags get serialized first, so we should always have room for the flags
1264 	 *  descriptor.
1265 	 */
1266 	assert(*buf_sz >= sizeof(*desc));
1267 
1268 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
1269 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
1270 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
1271 	desc->invalid_flags = blob->invalid_flags;
1272 	desc->data_ro_flags = blob->data_ro_flags;
1273 	desc->md_ro_flags = blob->md_ro_flags;
1274 
1275 	*buf_sz -= sizeof(*desc);
1276 }
1277 
1278 static int
1279 blob_serialize_xattrs(const struct spdk_blob *blob,
1280 		      const struct spdk_xattr_tailq *xattrs, bool internal,
1281 		      struct spdk_blob_md_page **pages,
1282 		      struct spdk_blob_md_page *cur_page,
1283 		      uint32_t *page_count, uint8_t **buf,
1284 		      size_t *remaining_sz)
1285 {
1286 	const struct spdk_xattr	*xattr;
1287 	int	rc;
1288 
1289 	TAILQ_FOREACH(xattr, xattrs, link) {
1290 		size_t required_sz = 0;
1291 
1292 		rc = blob_serialize_xattr(xattr,
1293 					  *buf, *remaining_sz,
1294 					  &required_sz, internal);
1295 		if (rc < 0) {
1296 			/* Need to add a new page to the chain */
1297 			rc = blob_serialize_add_page(blob, pages, page_count,
1298 						     &cur_page);
1299 			if (rc < 0) {
1300 				spdk_free(*pages);
1301 				*pages = NULL;
1302 				*page_count = 0;
1303 				return rc;
1304 			}
1305 
1306 			*buf = (uint8_t *)cur_page->descriptors;
1307 			*remaining_sz = sizeof(cur_page->descriptors);
1308 
1309 			/* Try again */
1310 			required_sz = 0;
1311 			rc = blob_serialize_xattr(xattr,
1312 						  *buf, *remaining_sz,
1313 						  &required_sz, internal);
1314 
1315 			if (rc < 0) {
1316 				spdk_free(*pages);
1317 				*pages = NULL;
1318 				*page_count = 0;
1319 				return rc;
1320 			}
1321 		}
1322 
1323 		*remaining_sz -= required_sz;
1324 		*buf += required_sz;
1325 	}
1326 
1327 	return 0;
1328 }
1329 
1330 static int
1331 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
1332 	       uint32_t *page_count)
1333 {
1334 	struct spdk_blob_md_page		*cur_page;
1335 	int					rc;
1336 	uint8_t					*buf;
1337 	size_t					remaining_sz;
1338 
1339 	assert(pages != NULL);
1340 	assert(page_count != NULL);
1341 	assert(blob != NULL);
1342 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
1343 
1344 	*pages = NULL;
1345 	*page_count = 0;
1346 
1347 	/* A blob always has at least 1 page, even if it has no descriptors */
1348 	rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1349 	if (rc < 0) {
1350 		return rc;
1351 	}
1352 
1353 	buf = (uint8_t *)cur_page->descriptors;
1354 	remaining_sz = sizeof(cur_page->descriptors);
1355 
1356 	/* Serialize flags */
1357 	blob_serialize_flags(blob, buf, &remaining_sz);
1358 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
1359 
1360 	/* Serialize xattrs */
1361 	rc = blob_serialize_xattrs(blob, &blob->xattrs, false,
1362 				   pages, cur_page, page_count, &buf, &remaining_sz);
1363 	if (rc < 0) {
1364 		return rc;
1365 	}
1366 
1367 	/* Serialize internal xattrs */
1368 	rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
1369 				   pages, cur_page, page_count, &buf, &remaining_sz);
1370 	if (rc < 0) {
1371 		return rc;
1372 	}
1373 
1374 	if (blob->use_extent_table) {
1375 		/* Serialize extent table */
1376 		rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1377 	} else {
1378 		/* Serialize extents */
1379 		rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1380 	}
1381 
1382 	return rc;
1383 }
1384 
1385 struct spdk_blob_load_ctx {
1386 	struct spdk_blob		*blob;
1387 
1388 	struct spdk_blob_md_page	*pages;
1389 	uint32_t			num_pages;
1390 	uint32_t			next_extent_page;
1391 	spdk_bs_sequence_t	        *seq;
1392 
1393 	spdk_bs_sequence_cpl		cb_fn;
1394 	void				*cb_arg;
1395 };
1396 
1397 static uint32_t
1398 blob_md_page_calc_crc(void *page)
1399 {
1400 	uint32_t		crc;
1401 
1402 	crc = BLOB_CRC32C_INITIAL;
1403 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
1404 	crc ^= BLOB_CRC32C_INITIAL;
1405 
1406 	return crc;
1407 
1408 }
1409 
1410 static void
1411 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno)
1412 {
1413 	struct spdk_blob		*blob = ctx->blob;
1414 
1415 	if (bserrno == 0) {
1416 		blob_mark_clean(blob);
1417 	}
1418 
1419 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
1420 
1421 	/* Free the memory */
1422 	spdk_free(ctx->pages);
1423 	free(ctx);
1424 }
1425 
1426 static void
1427 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
1428 {
1429 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1430 	struct spdk_blob		*blob = ctx->blob;
1431 
1432 	if (bserrno == 0) {
1433 		blob->back_bs_dev = bs_create_blob_bs_dev(snapshot);
1434 		if (blob->back_bs_dev == NULL) {
1435 			bserrno = -ENOMEM;
1436 		}
1437 	}
1438 	if (bserrno != 0) {
1439 		SPDK_ERRLOG("Snapshot fail\n");
1440 	}
1441 
1442 	blob_load_final(ctx, bserrno);
1443 }
1444 
1445 static void blob_update_clear_method(struct spdk_blob *blob);
1446 
1447 static int
1448 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx)
1449 {
1450 	struct spdk_blob_store *bs = blob->bs;
1451 	struct spdk_bs_dev *bs_dev = NULL;
1452 	const void *esnap_id = NULL;
1453 	size_t id_len = 0;
1454 	int rc;
1455 
1456 	if (bs->esnap_bs_dev_create == NULL) {
1457 		SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened "
1458 			       "without support for esnap clones\n", blob->id);
1459 		return -ENOTSUP;
1460 	}
1461 	assert(blob->back_bs_dev == NULL);
1462 
1463 	rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true);
1464 	if (rc != 0) {
1465 		SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id);
1466 		return -EINVAL;
1467 	}
1468 	assert(id_len > 0 && id_len < UINT32_MAX);
1469 
1470 	SPDK_INFOLOG(blob, "Creating external snapshot device\n");
1471 
1472 	rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len,
1473 				     &bs_dev);
1474 	if (rc != 0) {
1475 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev "
1476 			      "with error %d\n", blob->id, rc);
1477 		return rc;
1478 	}
1479 
1480 	/*
1481 	 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot.
1482 	 * This especially might happen during spdk_bs_load() iteration.
1483 	 */
1484 	if (bs_dev != NULL) {
1485 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id);
1486 		if ((bs->io_unit_size % bs_dev->blocklen) != 0) {
1487 			SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u "
1488 				       "is not compatible with blobstore block size %u\n",
1489 				       blob->id, bs_dev->blocklen, bs->io_unit_size);
1490 			bs_dev->destroy(bs_dev);
1491 			return -EINVAL;
1492 		}
1493 	}
1494 
1495 	blob->back_bs_dev = bs_dev;
1496 	blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT;
1497 
1498 	return 0;
1499 }
1500 
1501 static void
1502 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg)
1503 {
1504 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1505 	struct spdk_blob		*blob = ctx->blob;
1506 	const void			*value;
1507 	size_t				len;
1508 	int				rc;
1509 
1510 	if (blob_is_esnap_clone(blob)) {
1511 		rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx);
1512 		blob_load_final(ctx, rc);
1513 		return;
1514 	}
1515 
1516 	if (spdk_blob_is_thin_provisioned(blob)) {
1517 		rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
1518 		if (rc == 0) {
1519 			if (len != sizeof(spdk_blob_id)) {
1520 				blob_load_final(ctx, -EINVAL);
1521 				return;
1522 			}
1523 			/* open snapshot blob and continue in the callback function */
1524 			blob->parent_id = *(spdk_blob_id *)value;
1525 			spdk_bs_open_blob(blob->bs, blob->parent_id,
1526 					  blob_load_snapshot_cpl, ctx);
1527 			return;
1528 		} else {
1529 			/* add zeroes_dev for thin provisioned blob */
1530 			blob->back_bs_dev = bs_create_zeroes_dev();
1531 		}
1532 	} else {
1533 		/* standard blob */
1534 		blob->back_bs_dev = NULL;
1535 	}
1536 	blob_load_final(ctx, 0);
1537 }
1538 
1539 static void
1540 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1541 {
1542 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1543 	struct spdk_blob		*blob = ctx->blob;
1544 	struct spdk_blob_md_page	*page;
1545 	uint64_t			i;
1546 	uint32_t			crc;
1547 	uint64_t			lba;
1548 	void				*tmp;
1549 	uint64_t			sz;
1550 
1551 	if (bserrno) {
1552 		SPDK_ERRLOG("Extent page read failed: %d\n", bserrno);
1553 		blob_load_final(ctx, bserrno);
1554 		return;
1555 	}
1556 
1557 	if (ctx->pages == NULL) {
1558 		/* First iteration of this function, allocate buffer for single EXTENT_PAGE */
1559 		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
1560 					  NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
1561 		if (!ctx->pages) {
1562 			blob_load_final(ctx, -ENOMEM);
1563 			return;
1564 		}
1565 		ctx->num_pages = 1;
1566 		ctx->next_extent_page = 0;
1567 	} else {
1568 		page = &ctx->pages[0];
1569 		crc = blob_md_page_calc_crc(page);
1570 		if (crc != page->crc) {
1571 			blob_load_final(ctx, -EINVAL);
1572 			return;
1573 		}
1574 
1575 		if (page->next != SPDK_INVALID_MD_PAGE) {
1576 			blob_load_final(ctx, -EINVAL);
1577 			return;
1578 		}
1579 
1580 		bserrno = blob_parse_extent_page(page, blob);
1581 		if (bserrno) {
1582 			blob_load_final(ctx, bserrno);
1583 			return;
1584 		}
1585 	}
1586 
1587 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
1588 		if (blob->active.extent_pages[i] != 0) {
1589 			/* Extent page was allocated, read and parse it. */
1590 			lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]);
1591 			ctx->next_extent_page = i + 1;
1592 
1593 			bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1594 					     bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1595 					     blob_load_cpl_extents_cpl, ctx);
1596 			return;
1597 		} else {
1598 			/* Thin provisioned blobs can point to unallocated extent pages.
1599 			 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */
1600 
1601 			sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP);
1602 			blob->active.num_clusters += sz;
1603 			blob->remaining_clusters_in_et -= sz;
1604 
1605 			assert(spdk_blob_is_thin_provisioned(blob));
1606 			assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0);
1607 
1608 			tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
1609 			if (tmp == NULL) {
1610 				blob_load_final(ctx, -ENOMEM);
1611 				return;
1612 			}
1613 			memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0,
1614 			       sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size));
1615 			blob->active.clusters = tmp;
1616 			blob->active.cluster_array_size = blob->active.num_clusters;
1617 		}
1618 	}
1619 
1620 	blob_load_backing_dev(seq, ctx);
1621 }
1622 
1623 static void
1624 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1625 {
1626 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1627 	struct spdk_blob		*blob = ctx->blob;
1628 	struct spdk_blob_md_page	*page;
1629 	int				rc;
1630 	uint32_t			crc;
1631 	uint32_t			current_page;
1632 
1633 	if (ctx->num_pages == 1) {
1634 		current_page = bs_blobid_to_page(blob->id);
1635 	} else {
1636 		assert(ctx->num_pages != 0);
1637 		page = &ctx->pages[ctx->num_pages - 2];
1638 		current_page = page->next;
1639 	}
1640 
1641 	if (bserrno) {
1642 		SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n",
1643 			    current_page, blob->id, bserrno);
1644 		blob_load_final(ctx, bserrno);
1645 		return;
1646 	}
1647 
1648 	page = &ctx->pages[ctx->num_pages - 1];
1649 	crc = blob_md_page_calc_crc(page);
1650 	if (crc != page->crc) {
1651 		SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n",
1652 			    current_page, blob->id);
1653 		blob_load_final(ctx, -EINVAL);
1654 		return;
1655 	}
1656 
1657 	if (page->next != SPDK_INVALID_MD_PAGE) {
1658 		struct spdk_blob_md_page *tmp_pages;
1659 		uint32_t next_page = page->next;
1660 		uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page);
1661 
1662 		/* Read the next page */
1663 		tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0);
1664 		if (tmp_pages == NULL) {
1665 			blob_load_final(ctx, -ENOMEM);
1666 			return;
1667 		}
1668 		ctx->num_pages++;
1669 		ctx->pages = tmp_pages;
1670 
1671 		bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
1672 				     next_lba,
1673 				     bs_byte_to_lba(blob->bs, sizeof(*page)),
1674 				     blob_load_cpl, ctx);
1675 		return;
1676 	}
1677 
1678 	/* Parse the pages */
1679 	rc = blob_parse(ctx->pages, ctx->num_pages, blob);
1680 	if (rc) {
1681 		blob_load_final(ctx, rc);
1682 		return;
1683 	}
1684 
1685 	if (blob->extent_table_found == true) {
1686 		/* If EXTENT_TABLE was found, that means support for it should be enabled. */
1687 		assert(blob->extent_rle_found == false);
1688 		blob->use_extent_table = true;
1689 	} else {
1690 		/* If EXTENT_RLE or no extent_* descriptor was found disable support
1691 		 * for extent table. No extent_* descriptors means that blob has length of 0
1692 		 * and no extent_rle descriptors were persisted for it.
1693 		 * EXTENT_TABLE if used, is always present in metadata regardless of length. */
1694 		blob->use_extent_table = false;
1695 	}
1696 
1697 	/* Check the clear_method stored in metadata vs what may have been passed
1698 	 * via spdk_bs_open_blob_ext() and update accordingly.
1699 	 */
1700 	blob_update_clear_method(blob);
1701 
1702 	spdk_free(ctx->pages);
1703 	ctx->pages = NULL;
1704 
1705 	if (blob->extent_table_found) {
1706 		blob_load_cpl_extents_cpl(seq, ctx, 0);
1707 	} else {
1708 		blob_load_backing_dev(seq, ctx);
1709 	}
1710 }
1711 
1712 /* Load a blob from disk given a blobid */
1713 static void
1714 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1715 	  spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1716 {
1717 	struct spdk_blob_load_ctx *ctx;
1718 	struct spdk_blob_store *bs;
1719 	uint32_t page_num;
1720 	uint64_t lba;
1721 
1722 	blob_verify_md_op(blob);
1723 
1724 	bs = blob->bs;
1725 
1726 	ctx = calloc(1, sizeof(*ctx));
1727 	if (!ctx) {
1728 		cb_fn(seq, cb_arg, -ENOMEM);
1729 		return;
1730 	}
1731 
1732 	ctx->blob = blob;
1733 	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0);
1734 	if (!ctx->pages) {
1735 		free(ctx);
1736 		cb_fn(seq, cb_arg, -ENOMEM);
1737 		return;
1738 	}
1739 	ctx->num_pages = 1;
1740 	ctx->cb_fn = cb_fn;
1741 	ctx->cb_arg = cb_arg;
1742 	ctx->seq = seq;
1743 
1744 	page_num = bs_blobid_to_page(blob->id);
1745 	lba = bs_md_page_to_lba(blob->bs, page_num);
1746 
1747 	blob->state = SPDK_BLOB_STATE_LOADING;
1748 
1749 	bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1750 			     bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
1751 			     blob_load_cpl, ctx);
1752 }
1753 
1754 struct spdk_blob_persist_ctx {
1755 	struct spdk_blob		*blob;
1756 
1757 	struct spdk_blob_md_page	*pages;
1758 	uint32_t			next_extent_page;
1759 	struct spdk_blob_md_page	*extent_page;
1760 
1761 	spdk_bs_sequence_t		*seq;
1762 	spdk_bs_sequence_cpl		cb_fn;
1763 	void				*cb_arg;
1764 	TAILQ_ENTRY(spdk_blob_persist_ctx) link;
1765 };
1766 
1767 static void
1768 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba,
1769 		   uint64_t lba_count)
1770 {
1771 	switch (blob->clear_method) {
1772 	case BLOB_CLEAR_WITH_DEFAULT:
1773 	case BLOB_CLEAR_WITH_UNMAP:
1774 		bs_batch_unmap_dev(batch, lba, lba_count);
1775 		break;
1776 	case BLOB_CLEAR_WITH_WRITE_ZEROES:
1777 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
1778 		break;
1779 	case BLOB_CLEAR_WITH_NONE:
1780 	default:
1781 		break;
1782 	}
1783 }
1784 
1785 static int
1786 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs)
1787 {
1788 	uint32_t	crc;
1789 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
1790 
1791 	if (super->version > SPDK_BS_VERSION ||
1792 	    super->version < SPDK_BS_INITIAL_VERSION) {
1793 		return -EILSEQ;
1794 	}
1795 
1796 	if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG,
1797 		   sizeof(super->signature)) != 0) {
1798 		return -EILSEQ;
1799 	}
1800 
1801 	crc = blob_md_page_calc_crc(super);
1802 	if (crc != super->crc) {
1803 		return -EILSEQ;
1804 	}
1805 
1806 	if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
1807 		SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n");
1808 	} else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
1809 		SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n");
1810 	} else {
1811 		SPDK_DEBUGLOG(blob, "Unexpected bstype\n");
1812 		SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
1813 		SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
1814 		return -ENXIO;
1815 	}
1816 
1817 	if (super->size > bs->dev->blockcnt * bs->dev->blocklen) {
1818 		SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n",
1819 			       bs->dev->blockcnt * bs->dev->blocklen, super->size);
1820 		return -EILSEQ;
1821 	}
1822 
1823 	return 0;
1824 }
1825 
1826 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
1827 			  spdk_bs_sequence_cpl cb_fn, void *cb_arg);
1828 
1829 static void
1830 blob_persist_complete_cb(void *arg)
1831 {
1832 	struct spdk_blob_persist_ctx *ctx = arg;
1833 
1834 	/* Call user callback */
1835 	ctx->cb_fn(ctx->seq, ctx->cb_arg, 0);
1836 
1837 	/* Free the memory */
1838 	spdk_free(ctx->pages);
1839 	free(ctx);
1840 }
1841 
1842 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
1843 
1844 static void
1845 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno)
1846 {
1847 	struct spdk_blob_persist_ctx	*next_persist, *tmp;
1848 	struct spdk_blob		*blob = ctx->blob;
1849 
1850 	if (bserrno == 0) {
1851 		blob_mark_clean(blob);
1852 	}
1853 
1854 	assert(ctx == TAILQ_FIRST(&blob->persists_to_complete));
1855 
1856 	/* Complete all persists that were pending when the current persist started */
1857 	TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) {
1858 		TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link);
1859 		spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist);
1860 	}
1861 
1862 	if (TAILQ_EMPTY(&blob->pending_persists)) {
1863 		return;
1864 	}
1865 
1866 	/* Queue up all pending persists for completion and start blob persist with first one */
1867 	TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link);
1868 	next_persist = TAILQ_FIRST(&blob->persists_to_complete);
1869 
1870 	blob->state = SPDK_BLOB_STATE_DIRTY;
1871 	bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist);
1872 }
1873 
1874 static void
1875 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1876 {
1877 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1878 	struct spdk_blob		*blob = ctx->blob;
1879 	struct spdk_blob_store		*bs = blob->bs;
1880 	size_t				i;
1881 
1882 	if (bserrno != 0) {
1883 		blob_persist_complete(seq, ctx, bserrno);
1884 		return;
1885 	}
1886 
1887 	spdk_spin_lock(&bs->used_lock);
1888 
1889 	/* Release all extent_pages that were truncated */
1890 	for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) {
1891 		/* Nothing to release if it was not allocated */
1892 		if (blob->active.extent_pages[i] != 0) {
1893 			bs_release_md_page(bs, blob->active.extent_pages[i]);
1894 		}
1895 	}
1896 
1897 	spdk_spin_unlock(&bs->used_lock);
1898 
1899 	if (blob->active.num_extent_pages == 0) {
1900 		free(blob->active.extent_pages);
1901 		blob->active.extent_pages = NULL;
1902 		blob->active.extent_pages_array_size = 0;
1903 	} else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) {
1904 #ifndef __clang_analyzer__
1905 		void *tmp;
1906 
1907 		/* scan-build really can't figure reallocs, workaround it */
1908 		tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages);
1909 		assert(tmp != NULL);
1910 		blob->active.extent_pages = tmp;
1911 #endif
1912 		blob->active.extent_pages_array_size = blob->active.num_extent_pages;
1913 	}
1914 
1915 	blob_persist_complete(seq, ctx, bserrno);
1916 }
1917 
1918 static void
1919 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
1920 {
1921 	struct spdk_blob		*blob = ctx->blob;
1922 	struct spdk_blob_store		*bs = blob->bs;
1923 	size_t				i;
1924 	uint64_t                        lba;
1925 	uint64_t                        lba_count;
1926 	spdk_bs_batch_t                 *batch;
1927 
1928 	batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx);
1929 	lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1930 
1931 	/* Clear all extent_pages that were truncated */
1932 	for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) {
1933 		/* Nothing to clear if it was not allocated */
1934 		if (blob->active.extent_pages[i] != 0) {
1935 			lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]);
1936 			bs_batch_write_zeroes_dev(batch, lba, lba_count);
1937 		}
1938 	}
1939 
1940 	bs_batch_close(batch);
1941 }
1942 
1943 static void
1944 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1945 {
1946 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1947 	struct spdk_blob		*blob = ctx->blob;
1948 	struct spdk_blob_store		*bs = blob->bs;
1949 	size_t				i;
1950 
1951 	if (bserrno != 0) {
1952 		blob_persist_complete(seq, ctx, bserrno);
1953 		return;
1954 	}
1955 
1956 	spdk_spin_lock(&bs->used_lock);
1957 	/* Release all clusters that were truncated */
1958 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1959 		uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]);
1960 
1961 		/* Nothing to release if it was not allocated */
1962 		if (blob->active.clusters[i] != 0) {
1963 			bs_release_cluster(bs, cluster_num);
1964 		}
1965 	}
1966 	spdk_spin_unlock(&bs->used_lock);
1967 
1968 	if (blob->active.num_clusters == 0) {
1969 		free(blob->active.clusters);
1970 		blob->active.clusters = NULL;
1971 		blob->active.cluster_array_size = 0;
1972 	} else if (blob->active.num_clusters != blob->active.cluster_array_size) {
1973 #ifndef __clang_analyzer__
1974 		void *tmp;
1975 
1976 		/* scan-build really can't figure reallocs, workaround it */
1977 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters);
1978 		assert(tmp != NULL);
1979 		blob->active.clusters = tmp;
1980 
1981 #endif
1982 		blob->active.cluster_array_size = blob->active.num_clusters;
1983 	}
1984 
1985 	/* Move on to clearing extent pages */
1986 	blob_persist_clear_extents(seq, ctx);
1987 }
1988 
1989 static void
1990 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
1991 {
1992 	struct spdk_blob		*blob = ctx->blob;
1993 	struct spdk_blob_store		*bs = blob->bs;
1994 	spdk_bs_batch_t			*batch;
1995 	size_t				i;
1996 	uint64_t			lba;
1997 	uint64_t			lba_count;
1998 
1999 	/* Clusters don't move around in blobs. The list shrinks or grows
2000 	 * at the end, but no changes ever occur in the middle of the list.
2001 	 */
2002 
2003 	batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx);
2004 
2005 	/* Clear all clusters that were truncated */
2006 	lba = 0;
2007 	lba_count = 0;
2008 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
2009 		uint64_t next_lba = blob->active.clusters[i];
2010 		uint64_t next_lba_count = bs_cluster_to_lba(bs, 1);
2011 
2012 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
2013 			/* This cluster is contiguous with the previous one. */
2014 			lba_count += next_lba_count;
2015 			continue;
2016 		} else if (next_lba == 0) {
2017 			continue;
2018 		}
2019 
2020 		/* This cluster is not contiguous with the previous one. */
2021 
2022 		/* If a run of LBAs previously existing, clear them now */
2023 		if (lba_count > 0) {
2024 			bs_batch_clear_dev(ctx->blob, batch, lba, lba_count);
2025 		}
2026 
2027 		/* Start building the next batch */
2028 		lba = next_lba;
2029 		if (next_lba > 0) {
2030 			lba_count = next_lba_count;
2031 		} else {
2032 			lba_count = 0;
2033 		}
2034 	}
2035 
2036 	/* If we ended with a contiguous set of LBAs, clear them now */
2037 	if (lba_count > 0) {
2038 		bs_batch_clear_dev(ctx->blob, batch, lba, lba_count);
2039 	}
2040 
2041 	bs_batch_close(batch);
2042 }
2043 
2044 static void
2045 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2046 {
2047 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2048 	struct spdk_blob		*blob = ctx->blob;
2049 	struct spdk_blob_store		*bs = blob->bs;
2050 	size_t				i;
2051 
2052 	if (bserrno != 0) {
2053 		blob_persist_complete(seq, ctx, bserrno);
2054 		return;
2055 	}
2056 
2057 	spdk_spin_lock(&bs->used_lock);
2058 
2059 	/* This loop starts at 1 because the first page is special and handled
2060 	 * below. The pages (except the first) are never written in place,
2061 	 * so any pages in the clean list must be zeroed.
2062 	 */
2063 	for (i = 1; i < blob->clean.num_pages; i++) {
2064 		bs_release_md_page(bs, blob->clean.pages[i]);
2065 	}
2066 
2067 	if (blob->active.num_pages == 0) {
2068 		uint32_t page_num;
2069 
2070 		page_num = bs_blobid_to_page(blob->id);
2071 		bs_release_md_page(bs, page_num);
2072 	}
2073 
2074 	spdk_spin_unlock(&bs->used_lock);
2075 
2076 	/* Move on to clearing clusters */
2077 	blob_persist_clear_clusters(seq, ctx);
2078 }
2079 
2080 static void
2081 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2082 {
2083 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2084 	struct spdk_blob		*blob = ctx->blob;
2085 	struct spdk_blob_store		*bs = blob->bs;
2086 	uint64_t			lba;
2087 	uint64_t			lba_count;
2088 	spdk_bs_batch_t			*batch;
2089 	size_t				i;
2090 
2091 	if (bserrno != 0) {
2092 		blob_persist_complete(seq, ctx, bserrno);
2093 		return;
2094 	}
2095 
2096 	batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx);
2097 
2098 	lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
2099 
2100 	/* This loop starts at 1 because the first page is special and handled
2101 	 * below. The pages (except the first) are never written in place,
2102 	 * so any pages in the clean list must be zeroed.
2103 	 */
2104 	for (i = 1; i < blob->clean.num_pages; i++) {
2105 		lba = bs_md_page_to_lba(bs, blob->clean.pages[i]);
2106 
2107 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
2108 	}
2109 
2110 	/* The first page will only be zeroed if this is a delete. */
2111 	if (blob->active.num_pages == 0) {
2112 		uint32_t page_num;
2113 
2114 		/* The first page in the metadata goes where the blobid indicates */
2115 		page_num = bs_blobid_to_page(blob->id);
2116 		lba = bs_md_page_to_lba(bs, page_num);
2117 
2118 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
2119 	}
2120 
2121 	bs_batch_close(batch);
2122 }
2123 
2124 static void
2125 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2126 {
2127 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2128 	struct spdk_blob		*blob = ctx->blob;
2129 	struct spdk_blob_store		*bs = blob->bs;
2130 	uint64_t			lba;
2131 	uint32_t			lba_count;
2132 	struct spdk_blob_md_page	*page;
2133 
2134 	if (bserrno != 0) {
2135 		blob_persist_complete(seq, ctx, bserrno);
2136 		return;
2137 	}
2138 
2139 	if (blob->active.num_pages == 0) {
2140 		/* Move on to the next step */
2141 		blob_persist_zero_pages(seq, ctx, 0);
2142 		return;
2143 	}
2144 
2145 	lba_count = bs_byte_to_lba(bs, sizeof(*page));
2146 
2147 	page = &ctx->pages[0];
2148 	/* The first page in the metadata goes where the blobid indicates */
2149 	lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id));
2150 
2151 	bs_sequence_write_dev(seq, page, lba, lba_count,
2152 			      blob_persist_zero_pages, ctx);
2153 }
2154 
2155 static void
2156 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
2157 {
2158 	struct spdk_blob		*blob = ctx->blob;
2159 	struct spdk_blob_store		*bs = blob->bs;
2160 	uint64_t			lba;
2161 	uint32_t			lba_count;
2162 	struct spdk_blob_md_page	*page;
2163 	spdk_bs_batch_t			*batch;
2164 	size_t				i;
2165 
2166 	/* Clusters don't move around in blobs. The list shrinks or grows
2167 	 * at the end, but no changes ever occur in the middle of the list.
2168 	 */
2169 
2170 	lba_count = bs_byte_to_lba(bs, sizeof(*page));
2171 
2172 	batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx);
2173 
2174 	/* This starts at 1. The root page is not written until
2175 	 * all of the others are finished
2176 	 */
2177 	for (i = 1; i < blob->active.num_pages; i++) {
2178 		page = &ctx->pages[i];
2179 		assert(page->sequence_num == i);
2180 
2181 		lba = bs_md_page_to_lba(bs, blob->active.pages[i]);
2182 
2183 		bs_batch_write_dev(batch, page, lba, lba_count);
2184 	}
2185 
2186 	bs_batch_close(batch);
2187 }
2188 
2189 static int
2190 blob_resize(struct spdk_blob *blob, uint64_t sz)
2191 {
2192 	uint64_t	i;
2193 	uint64_t	*tmp;
2194 	uint64_t	cluster;
2195 	uint32_t	lfmd; /*  lowest free md page */
2196 	uint64_t	num_clusters;
2197 	uint32_t	*ep_tmp;
2198 	uint64_t	new_num_ep = 0, current_num_ep = 0;
2199 	struct spdk_blob_store *bs;
2200 	int		rc;
2201 
2202 	bs = blob->bs;
2203 
2204 	blob_verify_md_op(blob);
2205 
2206 	if (blob->active.num_clusters == sz) {
2207 		return 0;
2208 	}
2209 
2210 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
2211 		/* If this blob was resized to be larger, then smaller, then
2212 		 * larger without syncing, then the cluster array already
2213 		 * contains spare assigned clusters we can use.
2214 		 */
2215 		num_clusters = spdk_min(blob->active.cluster_array_size,
2216 					sz);
2217 	} else {
2218 		num_clusters = blob->active.num_clusters;
2219 	}
2220 
2221 	if (blob->use_extent_table) {
2222 		/* Round up since every cluster beyond current Extent Table size,
2223 		 * requires new extent page. */
2224 		new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP);
2225 		current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP);
2226 	}
2227 
2228 	assert(!spdk_spin_held(&bs->used_lock));
2229 
2230 	/* Check first that we have enough clusters and md pages before we start claiming them.
2231 	 * bs->used_lock is held to ensure that clusters we think are free are still free when we go
2232 	 * to claim them later in this function.
2233 	 */
2234 	if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) {
2235 		spdk_spin_lock(&bs->used_lock);
2236 		if ((sz - num_clusters) > bs->num_free_clusters) {
2237 			rc = -ENOSPC;
2238 			goto out;
2239 		}
2240 		lfmd = 0;
2241 		for (i = current_num_ep; i < new_num_ep ; i++) {
2242 			lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd);
2243 			if (lfmd == UINT32_MAX) {
2244 				/* No more free md pages. Cannot satisfy the request */
2245 				rc = -ENOSPC;
2246 				goto out;
2247 			}
2248 		}
2249 	}
2250 
2251 	if (sz > num_clusters) {
2252 		/* Expand the cluster array if necessary.
2253 		 * We only shrink the array when persisting.
2254 		 */
2255 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz);
2256 		if (sz > 0 && tmp == NULL) {
2257 			rc = -ENOMEM;
2258 			goto out;
2259 		}
2260 		memset(tmp + blob->active.cluster_array_size, 0,
2261 		       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
2262 		blob->active.clusters = tmp;
2263 		blob->active.cluster_array_size = sz;
2264 
2265 		/* Expand the extents table, only if enough clusters were added */
2266 		if (new_num_ep > current_num_ep && blob->use_extent_table) {
2267 			ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep);
2268 			if (new_num_ep > 0 && ep_tmp == NULL) {
2269 				rc = -ENOMEM;
2270 				goto out;
2271 			}
2272 			memset(ep_tmp + blob->active.extent_pages_array_size, 0,
2273 			       sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size));
2274 			blob->active.extent_pages = ep_tmp;
2275 			blob->active.extent_pages_array_size = new_num_ep;
2276 		}
2277 	}
2278 
2279 	blob->state = SPDK_BLOB_STATE_DIRTY;
2280 
2281 	if (spdk_blob_is_thin_provisioned(blob) == false) {
2282 		cluster = 0;
2283 		lfmd = 0;
2284 		for (i = num_clusters; i < sz; i++) {
2285 			bs_allocate_cluster(blob, i, &cluster, &lfmd, true);
2286 			/* Do not increment lfmd here.  lfmd will get updated
2287 			 * to the md_page allocated (if any) when a new extent
2288 			 * page is needed.  Just pass that value again,
2289 			 * bs_allocate_cluster will just start at that index
2290 			 * to find the next free md_page when needed.
2291 			 */
2292 		}
2293 	}
2294 
2295 	/* If we are shrinking the blob, we must adjust num_allocated_clusters */
2296 	for (i = sz; i < num_clusters; i++) {
2297 		if (blob->active.clusters[i] != 0) {
2298 			blob->active.num_allocated_clusters--;
2299 		}
2300 	}
2301 
2302 	blob->active.num_clusters = sz;
2303 	blob->active.num_extent_pages = new_num_ep;
2304 
2305 	rc = 0;
2306 out:
2307 	if (spdk_spin_held(&bs->used_lock)) {
2308 		spdk_spin_unlock(&bs->used_lock);
2309 	}
2310 
2311 	return rc;
2312 }
2313 
2314 static void
2315 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx)
2316 {
2317 	spdk_bs_sequence_t *seq = ctx->seq;
2318 	struct spdk_blob *blob = ctx->blob;
2319 	struct spdk_blob_store *bs = blob->bs;
2320 	uint64_t i;
2321 	uint32_t page_num;
2322 	void *tmp;
2323 	int rc;
2324 
2325 	/* Generate the new metadata */
2326 	rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
2327 	if (rc < 0) {
2328 		blob_persist_complete(seq, ctx, rc);
2329 		return;
2330 	}
2331 
2332 	assert(blob->active.num_pages >= 1);
2333 
2334 	/* Resize the cache of page indices */
2335 	tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
2336 	if (!tmp) {
2337 		blob_persist_complete(seq, ctx, -ENOMEM);
2338 		return;
2339 	}
2340 	blob->active.pages = tmp;
2341 
2342 	/* Assign this metadata to pages. This requires two passes - one to verify that there are
2343 	 * enough pages and a second to actually claim them. The used_lock is held across
2344 	 * both passes to ensure things don't change in the middle.
2345 	 */
2346 	spdk_spin_lock(&bs->used_lock);
2347 	page_num = 0;
2348 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
2349 	for (i = 1; i < blob->active.num_pages; i++) {
2350 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
2351 		if (page_num == UINT32_MAX) {
2352 			spdk_spin_unlock(&bs->used_lock);
2353 			blob_persist_complete(seq, ctx, -ENOMEM);
2354 			return;
2355 		}
2356 		page_num++;
2357 	}
2358 
2359 	page_num = 0;
2360 	blob->active.pages[0] = bs_blobid_to_page(blob->id);
2361 	for (i = 1; i < blob->active.num_pages; i++) {
2362 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
2363 		ctx->pages[i - 1].next = page_num;
2364 		/* Now that previous metadata page is complete, calculate the crc for it. */
2365 		ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]);
2366 		blob->active.pages[i] = page_num;
2367 		bs_claim_md_page(bs, page_num);
2368 		SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num,
2369 			      blob->id);
2370 		page_num++;
2371 	}
2372 	spdk_spin_unlock(&bs->used_lock);
2373 	ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]);
2374 	/* Start writing the metadata from last page to first */
2375 	blob->state = SPDK_BLOB_STATE_CLEAN;
2376 	blob_persist_write_page_chain(seq, ctx);
2377 }
2378 
2379 static void
2380 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2381 {
2382 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2383 	struct spdk_blob		*blob = ctx->blob;
2384 	size_t				i;
2385 	uint32_t			extent_page_id;
2386 	uint32_t                        page_count = 0;
2387 	int				rc;
2388 
2389 	if (ctx->extent_page != NULL) {
2390 		spdk_free(ctx->extent_page);
2391 		ctx->extent_page = NULL;
2392 	}
2393 
2394 	if (bserrno != 0) {
2395 		blob_persist_complete(seq, ctx, bserrno);
2396 		return;
2397 	}
2398 
2399 	/* Only write out Extent Pages when blob was resized. */
2400 	for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) {
2401 		extent_page_id = blob->active.extent_pages[i];
2402 		if (extent_page_id == 0) {
2403 			/* No Extent Page to persist */
2404 			assert(spdk_blob_is_thin_provisioned(blob));
2405 			continue;
2406 		}
2407 		assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id));
2408 		ctx->next_extent_page = i + 1;
2409 		rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page);
2410 		if (rc < 0) {
2411 			blob_persist_complete(seq, ctx, rc);
2412 			return;
2413 		}
2414 
2415 		blob->state = SPDK_BLOB_STATE_DIRTY;
2416 		blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page);
2417 
2418 		ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page);
2419 
2420 		bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id),
2421 				      bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
2422 				      blob_persist_write_extent_pages, ctx);
2423 		return;
2424 	}
2425 
2426 	blob_persist_generate_new_md(ctx);
2427 }
2428 
2429 static void
2430 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2431 {
2432 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2433 	struct spdk_blob *blob = ctx->blob;
2434 
2435 	if (bserrno != 0) {
2436 		blob_persist_complete(seq, ctx, bserrno);
2437 		return;
2438 	}
2439 
2440 	if (blob->active.num_pages == 0) {
2441 		/* This is the signal that the blob should be deleted.
2442 		 * Immediately jump to the clean up routine. */
2443 		assert(blob->clean.num_pages > 0);
2444 		blob->state = SPDK_BLOB_STATE_CLEAN;
2445 		blob_persist_zero_pages(seq, ctx, 0);
2446 		return;
2447 
2448 	}
2449 
2450 	if (blob->clean.num_clusters < blob->active.num_clusters) {
2451 		/* Blob was resized up */
2452 		assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages);
2453 		ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1;
2454 	} else if (blob->active.num_clusters < blob->active.cluster_array_size) {
2455 		/* Blob was resized down */
2456 		assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages);
2457 		ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1;
2458 	} else {
2459 		/* No change in size occurred */
2460 		blob_persist_generate_new_md(ctx);
2461 		return;
2462 	}
2463 
2464 	blob_persist_write_extent_pages(seq, ctx, 0);
2465 }
2466 
2467 struct spdk_bs_mark_dirty {
2468 	struct spdk_blob_store		*bs;
2469 	struct spdk_bs_super_block	*super;
2470 	spdk_bs_sequence_cpl		cb_fn;
2471 	void				*cb_arg;
2472 };
2473 
2474 static void
2475 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2476 {
2477 	struct spdk_bs_mark_dirty *ctx = cb_arg;
2478 
2479 	if (bserrno == 0) {
2480 		ctx->bs->clean = 0;
2481 	}
2482 
2483 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
2484 
2485 	spdk_free(ctx->super);
2486 	free(ctx);
2487 }
2488 
2489 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2490 			   struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg);
2491 
2492 
2493 static void
2494 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2495 {
2496 	struct spdk_bs_mark_dirty *ctx = cb_arg;
2497 	int rc;
2498 
2499 	if (bserrno != 0) {
2500 		bs_mark_dirty_write_cpl(seq, ctx, bserrno);
2501 		return;
2502 	}
2503 
2504 	rc = bs_super_validate(ctx->super, ctx->bs);
2505 	if (rc != 0) {
2506 		bs_mark_dirty_write_cpl(seq, ctx, rc);
2507 		return;
2508 	}
2509 
2510 	ctx->super->clean = 0;
2511 	if (ctx->super->size == 0) {
2512 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
2513 	}
2514 
2515 	bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx);
2516 }
2517 
2518 static void
2519 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2520 	      spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2521 {
2522 	struct spdk_bs_mark_dirty *ctx;
2523 
2524 	/* Blobstore is already marked dirty */
2525 	if (bs->clean == 0) {
2526 		cb_fn(seq, cb_arg, 0);
2527 		return;
2528 	}
2529 
2530 	ctx = calloc(1, sizeof(*ctx));
2531 	if (!ctx) {
2532 		cb_fn(seq, cb_arg, -ENOMEM);
2533 		return;
2534 	}
2535 	ctx->bs = bs;
2536 	ctx->cb_fn = cb_fn;
2537 	ctx->cb_arg = cb_arg;
2538 
2539 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
2540 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2541 	if (!ctx->super) {
2542 		free(ctx);
2543 		cb_fn(seq, cb_arg, -ENOMEM);
2544 		return;
2545 	}
2546 
2547 	bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0),
2548 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
2549 			     bs_mark_dirty_write, ctx);
2550 }
2551 
2552 /* Write a blob to disk */
2553 static void
2554 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
2555 	     spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2556 {
2557 	struct spdk_blob_persist_ctx *ctx;
2558 
2559 	blob_verify_md_op(blob);
2560 
2561 	if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) {
2562 		cb_fn(seq, cb_arg, 0);
2563 		return;
2564 	}
2565 
2566 	ctx = calloc(1, sizeof(*ctx));
2567 	if (!ctx) {
2568 		cb_fn(seq, cb_arg, -ENOMEM);
2569 		return;
2570 	}
2571 	ctx->blob = blob;
2572 	ctx->seq = seq;
2573 	ctx->cb_fn = cb_fn;
2574 	ctx->cb_arg = cb_arg;
2575 
2576 	/* Multiple blob persists can affect one another, via blob->state or
2577 	 * blob mutable data changes. To prevent it, queue up the persists. */
2578 	if (!TAILQ_EMPTY(&blob->persists_to_complete)) {
2579 		TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link);
2580 		return;
2581 	}
2582 	TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link);
2583 
2584 	bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx);
2585 }
2586 
2587 struct spdk_blob_copy_cluster_ctx {
2588 	struct spdk_blob *blob;
2589 	uint8_t *buf;
2590 	uint64_t page;
2591 	uint64_t new_cluster;
2592 	uint32_t new_extent_page;
2593 	spdk_bs_sequence_t *seq;
2594 	struct spdk_blob_md_page *new_cluster_page;
2595 };
2596 
2597 struct spdk_blob_free_cluster_ctx {
2598 	struct spdk_blob *blob;
2599 	uint64_t page;
2600 	struct spdk_blob_md_page *md_page;
2601 	uint64_t cluster_num;
2602 	uint32_t extent_page;
2603 	spdk_bs_sequence_t *seq;
2604 };
2605 
2606 static void
2607 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
2608 {
2609 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2610 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
2611 	TAILQ_HEAD(, spdk_bs_request_set) requests;
2612 	spdk_bs_user_op_t *op;
2613 
2614 	TAILQ_INIT(&requests);
2615 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
2616 
2617 	while (!TAILQ_EMPTY(&requests)) {
2618 		op = TAILQ_FIRST(&requests);
2619 		TAILQ_REMOVE(&requests, op, link);
2620 		if (bserrno == 0) {
2621 			bs_user_op_execute(op);
2622 		} else {
2623 			bs_user_op_abort(op, bserrno);
2624 		}
2625 	}
2626 
2627 	spdk_free(ctx->buf);
2628 	free(ctx);
2629 }
2630 
2631 static void
2632 blob_free_cluster_cpl(void *cb_arg, int bserrno)
2633 {
2634 	struct spdk_blob_free_cluster_ctx *ctx = cb_arg;
2635 	spdk_bs_sequence_t *seq = ctx->seq;
2636 
2637 	bs_sequence_finish(seq, bserrno);
2638 
2639 	free(ctx);
2640 }
2641 
2642 static void
2643 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx)
2644 {
2645 	spdk_spin_lock(&ctx->blob->bs->used_lock);
2646 	bs_release_cluster(ctx->blob->bs, ctx->new_cluster);
2647 	if (ctx->new_extent_page != 0) {
2648 		bs_release_md_page(ctx->blob->bs, ctx->new_extent_page);
2649 	}
2650 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
2651 }
2652 
2653 static void
2654 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno)
2655 {
2656 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2657 
2658 	if (bserrno) {
2659 		SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno);
2660 	}
2661 
2662 	blob_insert_cluster_revert(ctx);
2663 	bs_sequence_finish(ctx->seq, bserrno);
2664 }
2665 
2666 static void
2667 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx)
2668 {
2669 	struct spdk_bs_cpl cpl;
2670 	spdk_bs_batch_t *batch;
2671 	struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel);
2672 
2673 	/*
2674 	 * We allocated a cluster and we copied data to it. But now, we realized that we don't need
2675 	 * this cluster and we want to release it. We must ensure that we clear the data on this
2676 	 * cluster.
2677 	 * The cluster may later be re-allocated by a thick-provisioned blob for example. When
2678 	 * reading from this thick-provisioned blob before writing data, we should read zeroes.
2679 	 */
2680 
2681 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2682 	cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl;
2683 	cpl.u.blob_basic.cb_arg = ctx;
2684 
2685 	batch = bs_batch_open(ch, &cpl, ctx->blob);
2686 	if (!batch) {
2687 		blob_insert_cluster_clear_cpl(ctx, -ENOMEM);
2688 		return;
2689 	}
2690 
2691 	bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2692 			   bs_cluster_to_lba(ctx->blob->bs, 1));
2693 	bs_batch_close(batch);
2694 }
2695 
2696 static void
2697 blob_insert_cluster_cpl(void *cb_arg, int bserrno)
2698 {
2699 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2700 
2701 	if (bserrno) {
2702 		if (bserrno == -EEXIST) {
2703 			/* The metadata insert failed because another thread
2704 			 * allocated the cluster first. Clear and free our cluster
2705 			 * but continue without error. */
2706 			blob_insert_cluster_clear(ctx);
2707 			return;
2708 		}
2709 
2710 		blob_insert_cluster_revert(ctx);
2711 	}
2712 
2713 	bs_sequence_finish(ctx->seq, bserrno);
2714 }
2715 
2716 static void
2717 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2718 {
2719 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2720 	uint32_t cluster_number;
2721 
2722 	if (bserrno) {
2723 		/* The write failed, so jump to the final completion handler */
2724 		bs_sequence_finish(seq, bserrno);
2725 		return;
2726 	}
2727 
2728 	cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page);
2729 
2730 	blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2731 					 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx);
2732 }
2733 
2734 static void
2735 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2736 {
2737 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2738 
2739 	if (bserrno != 0) {
2740 		/* The read failed, so jump to the final completion handler */
2741 		bs_sequence_finish(seq, bserrno);
2742 		return;
2743 	}
2744 
2745 	/* Write whole cluster */
2746 	bs_sequence_write_dev(seq, ctx->buf,
2747 			      bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2748 			      bs_cluster_to_lba(ctx->blob->bs, 1),
2749 			      blob_write_copy_cpl, ctx);
2750 }
2751 
2752 static bool
2753 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba)
2754 {
2755 	uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page);
2756 
2757 	return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) &&
2758 	       blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba);
2759 }
2760 
2761 static void
2762 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba)
2763 {
2764 	struct spdk_blob *blob = ctx->blob;
2765 	uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz);
2766 
2767 	bs_sequence_copy_dev(ctx->seq,
2768 			     bs_cluster_to_lba(blob->bs, ctx->new_cluster),
2769 			     src_lba,
2770 			     lba_count,
2771 			     blob_write_copy_cpl, ctx);
2772 }
2773 
2774 static void
2775 bs_allocate_and_copy_cluster(struct spdk_blob *blob,
2776 			     struct spdk_io_channel *_ch,
2777 			     uint64_t io_unit, spdk_bs_user_op_t *op)
2778 {
2779 	struct spdk_bs_cpl cpl;
2780 	struct spdk_bs_channel *ch;
2781 	struct spdk_blob_copy_cluster_ctx *ctx;
2782 	uint32_t cluster_start_page;
2783 	uint32_t cluster_number;
2784 	bool is_zeroes;
2785 	bool can_copy;
2786 	bool is_valid_range;
2787 	uint64_t copy_src_lba;
2788 	int rc;
2789 
2790 	ch = spdk_io_channel_get_ctx(_ch);
2791 
2792 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
2793 		/* There are already operations pending. Queue this user op
2794 		 * and return because it will be re-executed when the outstanding
2795 		 * cluster allocation completes. */
2796 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2797 		return;
2798 	}
2799 
2800 	/* Round the io_unit offset down to the first page in the cluster */
2801 	cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit);
2802 
2803 	/* Calculate which index in the metadata cluster array the corresponding
2804 	 * cluster is supposed to be at. */
2805 	cluster_number = bs_io_unit_to_cluster_number(blob, io_unit);
2806 
2807 	ctx = calloc(1, sizeof(*ctx));
2808 	if (!ctx) {
2809 		bs_user_op_abort(op, -ENOMEM);
2810 		return;
2811 	}
2812 
2813 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
2814 
2815 	ctx->blob = blob;
2816 	ctx->page = cluster_start_page;
2817 	ctx->new_cluster_page = ch->new_cluster_page;
2818 	memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE);
2819 
2820 	/* Check if the cluster that we intend to do CoW for is valid for
2821 	 * the backing dev. For zeroes backing dev, it'll be always valid.
2822 	 * For other backing dev e.g. a snapshot, it could be invalid if
2823 	 * the blob has been resized after snapshot was taken. */
2824 	is_valid_range = blob->back_bs_dev->is_range_valid(blob->back_bs_dev,
2825 			 bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2826 			 bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz));
2827 
2828 	can_copy = is_valid_range && blob_can_copy(blob, cluster_start_page, &copy_src_lba);
2829 
2830 	is_zeroes = is_valid_range && blob->back_bs_dev->is_zeroes(blob->back_bs_dev,
2831 			bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2832 			bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz));
2833 	if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) {
2834 		ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen,
2835 				       NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2836 		if (!ctx->buf) {
2837 			SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
2838 				    blob->bs->cluster_sz);
2839 			free(ctx);
2840 			bs_user_op_abort(op, -ENOMEM);
2841 			return;
2842 		}
2843 	}
2844 
2845 	spdk_spin_lock(&blob->bs->used_lock);
2846 	rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page,
2847 				 false);
2848 	spdk_spin_unlock(&blob->bs->used_lock);
2849 	if (rc != 0) {
2850 		spdk_free(ctx->buf);
2851 		free(ctx);
2852 		bs_user_op_abort(op, rc);
2853 		return;
2854 	}
2855 
2856 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2857 	cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl;
2858 	cpl.u.blob_basic.cb_arg = ctx;
2859 
2860 	ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob);
2861 	if (!ctx->seq) {
2862 		spdk_spin_lock(&blob->bs->used_lock);
2863 		bs_release_cluster(blob->bs, ctx->new_cluster);
2864 		spdk_spin_unlock(&blob->bs->used_lock);
2865 		spdk_free(ctx->buf);
2866 		free(ctx);
2867 		bs_user_op_abort(op, -ENOMEM);
2868 		return;
2869 	}
2870 
2871 	/* Queue the user op to block other incoming operations */
2872 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2873 
2874 	if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) {
2875 		if (can_copy) {
2876 			blob_copy(ctx, op, copy_src_lba);
2877 		} else {
2878 			/* Read cluster from backing device */
2879 			bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
2880 						bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2881 						bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
2882 						blob_write_copy, ctx);
2883 		}
2884 
2885 	} else {
2886 		blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2887 						 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx);
2888 	}
2889 }
2890 
2891 static inline bool
2892 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length,
2893 				 uint64_t *lba,	uint64_t *lba_count)
2894 {
2895 	*lba_count = length;
2896 
2897 	if (!bs_io_unit_is_allocated(blob, io_unit)) {
2898 		assert(blob->back_bs_dev != NULL);
2899 		*lba = bs_io_unit_to_back_dev_lba(blob, io_unit);
2900 		*lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count);
2901 		return false;
2902 	} else {
2903 		*lba = bs_blob_io_unit_to_lba(blob, io_unit);
2904 		return true;
2905 	}
2906 }
2907 
2908 struct op_split_ctx {
2909 	struct spdk_blob *blob;
2910 	struct spdk_io_channel *channel;
2911 	uint64_t io_unit_offset;
2912 	uint64_t io_units_remaining;
2913 	void *curr_payload;
2914 	enum spdk_blob_op_type op_type;
2915 	spdk_bs_sequence_t *seq;
2916 	bool in_submit_ctx;
2917 	bool completed_in_submit_ctx;
2918 	bool done;
2919 };
2920 
2921 static void
2922 blob_request_submit_op_split_next(void *cb_arg, int bserrno)
2923 {
2924 	struct op_split_ctx	*ctx = cb_arg;
2925 	struct spdk_blob	*blob = ctx->blob;
2926 	struct spdk_io_channel	*ch = ctx->channel;
2927 	enum spdk_blob_op_type	op_type = ctx->op_type;
2928 	uint8_t			*buf;
2929 	uint64_t		offset;
2930 	uint64_t		length;
2931 	uint64_t		op_length;
2932 
2933 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2934 		bs_sequence_finish(ctx->seq, bserrno);
2935 		if (ctx->in_submit_ctx) {
2936 			/* Defer freeing of the ctx object, since it will be
2937 			 * accessed when this unwinds back to the submisison
2938 			 * context.
2939 			 */
2940 			ctx->done = true;
2941 		} else {
2942 			free(ctx);
2943 		}
2944 		return;
2945 	}
2946 
2947 	if (ctx->in_submit_ctx) {
2948 		/* If this split operation completed in the context
2949 		 * of its submission, mark the flag and return immediately
2950 		 * to avoid recursion.
2951 		 */
2952 		ctx->completed_in_submit_ctx = true;
2953 		return;
2954 	}
2955 
2956 	while (true) {
2957 		ctx->completed_in_submit_ctx = false;
2958 
2959 		offset = ctx->io_unit_offset;
2960 		length = ctx->io_units_remaining;
2961 		buf = ctx->curr_payload;
2962 		op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob,
2963 				     offset));
2964 
2965 		/* Update length and payload for next operation */
2966 		ctx->io_units_remaining -= op_length;
2967 		ctx->io_unit_offset += op_length;
2968 		if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
2969 			ctx->curr_payload += op_length * blob->bs->io_unit_size;
2970 		}
2971 
2972 		assert(!ctx->in_submit_ctx);
2973 		ctx->in_submit_ctx = true;
2974 
2975 		switch (op_type) {
2976 		case SPDK_BLOB_READ:
2977 			spdk_blob_io_read(blob, ch, buf, offset, op_length,
2978 					  blob_request_submit_op_split_next, ctx);
2979 			break;
2980 		case SPDK_BLOB_WRITE:
2981 			spdk_blob_io_write(blob, ch, buf, offset, op_length,
2982 					   blob_request_submit_op_split_next, ctx);
2983 			break;
2984 		case SPDK_BLOB_UNMAP:
2985 			spdk_blob_io_unmap(blob, ch, offset, op_length,
2986 					   blob_request_submit_op_split_next, ctx);
2987 			break;
2988 		case SPDK_BLOB_WRITE_ZEROES:
2989 			spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
2990 						  blob_request_submit_op_split_next, ctx);
2991 			break;
2992 		case SPDK_BLOB_READV:
2993 		case SPDK_BLOB_WRITEV:
2994 			SPDK_ERRLOG("readv/write not valid\n");
2995 			bs_sequence_finish(ctx->seq, -EINVAL);
2996 			free(ctx);
2997 			return;
2998 		}
2999 
3000 #ifndef __clang_analyzer__
3001 		/* scan-build reports a false positive around accessing the ctx here. It
3002 		 * forms a path that recursively calls this function, but then says
3003 		 * "assuming ctx->in_submit_ctx is false", when that isn't possible.
3004 		 * This path does free(ctx), returns to here, and reports a use-after-free
3005 		 * bug.  Wrapping this bit of code so that scan-build doesn't see it
3006 		 * works around the scan-build bug.
3007 		 */
3008 		assert(ctx->in_submit_ctx);
3009 		ctx->in_submit_ctx = false;
3010 
3011 		/* If the operation completed immediately, loop back and submit the
3012 		 * next operation.  Otherwise we can return and the next split
3013 		 * operation will get submitted when this current operation is
3014 		 * later completed asynchronously.
3015 		 */
3016 		if (ctx->completed_in_submit_ctx) {
3017 			continue;
3018 		} else if (ctx->done) {
3019 			free(ctx);
3020 		}
3021 #endif
3022 		break;
3023 	}
3024 }
3025 
3026 static void
3027 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
3028 			     void *payload, uint64_t offset, uint64_t length,
3029 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
3030 {
3031 	struct op_split_ctx *ctx;
3032 	spdk_bs_sequence_t *seq;
3033 	struct spdk_bs_cpl cpl;
3034 
3035 	assert(blob != NULL);
3036 
3037 	ctx = calloc(1, sizeof(struct op_split_ctx));
3038 	if (ctx == NULL) {
3039 		cb_fn(cb_arg, -ENOMEM);
3040 		return;
3041 	}
3042 
3043 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3044 	cpl.u.blob_basic.cb_fn = cb_fn;
3045 	cpl.u.blob_basic.cb_arg = cb_arg;
3046 
3047 	seq = bs_sequence_start_blob(ch, &cpl, blob);
3048 	if (!seq) {
3049 		free(ctx);
3050 		cb_fn(cb_arg, -ENOMEM);
3051 		return;
3052 	}
3053 
3054 	ctx->blob = blob;
3055 	ctx->channel = ch;
3056 	ctx->curr_payload = payload;
3057 	ctx->io_unit_offset = offset;
3058 	ctx->io_units_remaining = length;
3059 	ctx->op_type = op_type;
3060 	ctx->seq = seq;
3061 
3062 	blob_request_submit_op_split_next(ctx, 0);
3063 }
3064 
3065 static void
3066 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno)
3067 {
3068 	struct spdk_blob_free_cluster_ctx *ctx = cb_arg;
3069 
3070 	if (bserrno) {
3071 		bs_sequence_finish(ctx->seq, bserrno);
3072 		free(ctx);
3073 		return;
3074 	}
3075 
3076 	blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num,
3077 				       ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx);
3078 }
3079 
3080 static void
3081 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
3082 			      void *payload, uint64_t offset, uint64_t length,
3083 			      spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
3084 {
3085 	struct spdk_bs_cpl cpl;
3086 	uint64_t lba;
3087 	uint64_t lba_count;
3088 	bool is_allocated;
3089 
3090 	assert(blob != NULL);
3091 
3092 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3093 	cpl.u.blob_basic.cb_fn = cb_fn;
3094 	cpl.u.blob_basic.cb_arg = cb_arg;
3095 
3096 	if (blob->frozen_refcnt) {
3097 		/* This blob I/O is frozen */
3098 		spdk_bs_user_op_t *op;
3099 		struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
3100 
3101 		op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
3102 		if (!op) {
3103 			cb_fn(cb_arg, -ENOMEM);
3104 			return;
3105 		}
3106 
3107 		TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
3108 
3109 		return;
3110 	}
3111 
3112 	is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
3113 
3114 	switch (op_type) {
3115 	case SPDK_BLOB_READ: {
3116 		spdk_bs_batch_t *batch;
3117 
3118 		batch = bs_batch_open(_ch, &cpl, blob);
3119 		if (!batch) {
3120 			cb_fn(cb_arg, -ENOMEM);
3121 			return;
3122 		}
3123 
3124 		if (is_allocated) {
3125 			/* Read from the blob */
3126 			bs_batch_read_dev(batch, payload, lba, lba_count);
3127 		} else {
3128 			/* Read from the backing block device */
3129 			bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
3130 		}
3131 
3132 		bs_batch_close(batch);
3133 		break;
3134 	}
3135 	case SPDK_BLOB_WRITE:
3136 	case SPDK_BLOB_WRITE_ZEROES: {
3137 		if (is_allocated) {
3138 			/* Write to the blob */
3139 			spdk_bs_batch_t *batch;
3140 
3141 			if (lba_count == 0) {
3142 				cb_fn(cb_arg, 0);
3143 				return;
3144 			}
3145 
3146 			batch = bs_batch_open(_ch, &cpl, blob);
3147 			if (!batch) {
3148 				cb_fn(cb_arg, -ENOMEM);
3149 				return;
3150 			}
3151 
3152 			if (op_type == SPDK_BLOB_WRITE) {
3153 				bs_batch_write_dev(batch, payload, lba, lba_count);
3154 			} else {
3155 				bs_batch_write_zeroes_dev(batch, lba, lba_count);
3156 			}
3157 
3158 			bs_batch_close(batch);
3159 		} else {
3160 			/* Queue this operation and allocate the cluster */
3161 			spdk_bs_user_op_t *op;
3162 
3163 			op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
3164 			if (!op) {
3165 				cb_fn(cb_arg, -ENOMEM);
3166 				return;
3167 			}
3168 
3169 			bs_allocate_and_copy_cluster(blob, _ch, offset, op);
3170 		}
3171 		break;
3172 	}
3173 	case SPDK_BLOB_UNMAP: {
3174 		struct spdk_blob_free_cluster_ctx *ctx = NULL;
3175 		spdk_bs_batch_t *batch;
3176 
3177 		/* if aligned with cluster release cluster */
3178 		if (spdk_blob_is_thin_provisioned(blob) && is_allocated &&
3179 		    bs_io_units_per_cluster(blob) == length) {
3180 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
3181 			uint32_t cluster_start_page;
3182 			uint32_t cluster_number;
3183 
3184 			assert(offset % bs_io_units_per_cluster(blob) == 0);
3185 
3186 			/* Round the io_unit offset down to the first page in the cluster */
3187 			cluster_start_page = bs_io_unit_to_cluster_start(blob, offset);
3188 
3189 			/* Calculate which index in the metadata cluster array the corresponding
3190 			 * cluster is supposed to be at. */
3191 			cluster_number = bs_io_unit_to_cluster_number(blob, offset);
3192 
3193 			ctx = calloc(1, sizeof(*ctx));
3194 			if (!ctx) {
3195 				cb_fn(cb_arg, -ENOMEM);
3196 				return;
3197 			}
3198 			/* When freeing a cluster the flow should be (in order):
3199 			 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak
3200 			 * old data)
3201 			 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the
3202 			 * cluster), update and sync metadata freeing the cluster
3203 			 * 3. Once metadata update is done, complete the user unmap request
3204 			 */
3205 			ctx->blob = blob;
3206 			ctx->page = cluster_start_page;
3207 			ctx->cluster_num = cluster_number;
3208 			ctx->md_page = bs_channel->new_cluster_page;
3209 			ctx->seq = bs_sequence_start_bs(_ch, &cpl);
3210 			if (!ctx->seq) {
3211 				free(ctx);
3212 				cb_fn(cb_arg, -ENOMEM);
3213 				return;
3214 			}
3215 
3216 			if (blob->use_extent_table) {
3217 				ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number);
3218 			}
3219 
3220 			cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete;
3221 			cpl.u.blob_basic.cb_arg = ctx;
3222 		}
3223 
3224 		batch = bs_batch_open(_ch, &cpl, blob);
3225 		if (!batch) {
3226 			free(ctx);
3227 			cb_fn(cb_arg, -ENOMEM);
3228 			return;
3229 		}
3230 
3231 		if (is_allocated) {
3232 			bs_batch_unmap_dev(batch, lba, lba_count);
3233 		}
3234 
3235 		bs_batch_close(batch);
3236 		break;
3237 	}
3238 	case SPDK_BLOB_READV:
3239 	case SPDK_BLOB_WRITEV:
3240 		SPDK_ERRLOG("readv/write not valid\n");
3241 		cb_fn(cb_arg, -EINVAL);
3242 		break;
3243 	}
3244 }
3245 
3246 static void
3247 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
3248 		       void *payload, uint64_t offset, uint64_t length,
3249 		       spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
3250 {
3251 	assert(blob != NULL);
3252 
3253 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
3254 		cb_fn(cb_arg, -EPERM);
3255 		return;
3256 	}
3257 
3258 	if (length == 0) {
3259 		cb_fn(cb_arg, 0);
3260 		return;
3261 	}
3262 
3263 	if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
3264 		cb_fn(cb_arg, -EINVAL);
3265 		return;
3266 	}
3267 	if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) {
3268 		blob_request_submit_op_single(_channel, blob, payload, offset, length,
3269 					      cb_fn, cb_arg, op_type);
3270 	} else {
3271 		blob_request_submit_op_split(_channel, blob, payload, offset, length,
3272 					     cb_fn, cb_arg, op_type);
3273 	}
3274 }
3275 
3276 struct rw_iov_ctx {
3277 	struct spdk_blob *blob;
3278 	struct spdk_io_channel *channel;
3279 	spdk_blob_op_complete cb_fn;
3280 	void *cb_arg;
3281 	bool read;
3282 	int iovcnt;
3283 	struct iovec *orig_iov;
3284 	uint64_t io_unit_offset;
3285 	uint64_t io_units_remaining;
3286 	uint64_t io_units_done;
3287 	struct spdk_blob_ext_io_opts *ext_io_opts;
3288 	struct iovec iov[0];
3289 };
3290 
3291 static void
3292 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3293 {
3294 	assert(cb_arg == NULL);
3295 	bs_sequence_finish(seq, bserrno);
3296 }
3297 
3298 static void
3299 rw_iov_split_next(void *cb_arg, int bserrno)
3300 {
3301 	struct rw_iov_ctx *ctx = cb_arg;
3302 	struct spdk_blob *blob = ctx->blob;
3303 	struct iovec *iov, *orig_iov;
3304 	int iovcnt;
3305 	size_t orig_iovoff;
3306 	uint64_t io_units_count, io_units_to_boundary, io_unit_offset;
3307 	uint64_t byte_count;
3308 
3309 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
3310 		ctx->cb_fn(ctx->cb_arg, bserrno);
3311 		free(ctx);
3312 		return;
3313 	}
3314 
3315 	io_unit_offset = ctx->io_unit_offset;
3316 	io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset);
3317 	io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary);
3318 	/*
3319 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
3320 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
3321 	 *  point to the current position in the I/O sequence.
3322 	 */
3323 	byte_count = ctx->io_units_done * blob->bs->io_unit_size;
3324 	orig_iov = &ctx->orig_iov[0];
3325 	orig_iovoff = 0;
3326 	while (byte_count > 0) {
3327 		if (byte_count >= orig_iov->iov_len) {
3328 			byte_count -= orig_iov->iov_len;
3329 			orig_iov++;
3330 		} else {
3331 			orig_iovoff = byte_count;
3332 			byte_count = 0;
3333 		}
3334 	}
3335 
3336 	/*
3337 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
3338 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
3339 	 */
3340 	byte_count = io_units_count * blob->bs->io_unit_size;
3341 	iov = &ctx->iov[0];
3342 	iovcnt = 0;
3343 	while (byte_count > 0) {
3344 		assert(iovcnt < ctx->iovcnt);
3345 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
3346 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
3347 		byte_count -= iov->iov_len;
3348 		orig_iovoff = 0;
3349 		orig_iov++;
3350 		iov++;
3351 		iovcnt++;
3352 	}
3353 
3354 	ctx->io_unit_offset += io_units_count;
3355 	ctx->io_units_remaining -= io_units_count;
3356 	ctx->io_units_done += io_units_count;
3357 	iov = &ctx->iov[0];
3358 
3359 	if (ctx->read) {
3360 		spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
3361 				       io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
3362 	} else {
3363 		spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
3364 					io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
3365 	}
3366 }
3367 
3368 static void
3369 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
3370 			   struct iovec *iov, int iovcnt,
3371 			   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read,
3372 			   struct spdk_blob_ext_io_opts *ext_io_opts)
3373 {
3374 	struct spdk_bs_cpl	cpl;
3375 
3376 	assert(blob != NULL);
3377 
3378 	if (!read && blob->data_ro) {
3379 		cb_fn(cb_arg, -EPERM);
3380 		return;
3381 	}
3382 
3383 	if (length == 0) {
3384 		cb_fn(cb_arg, 0);
3385 		return;
3386 	}
3387 
3388 	if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
3389 		cb_fn(cb_arg, -EINVAL);
3390 		return;
3391 	}
3392 
3393 	/*
3394 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
3395 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
3396 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
3397 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
3398 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
3399 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
3400 	 *  but since this case happens very infrequently, any performance impact will be negligible.
3401 	 *
3402 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
3403 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
3404 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
3405 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
3406 	 */
3407 	if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) {
3408 		uint64_t lba_count;
3409 		uint64_t lba;
3410 		bool is_allocated;
3411 
3412 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3413 		cpl.u.blob_basic.cb_fn = cb_fn;
3414 		cpl.u.blob_basic.cb_arg = cb_arg;
3415 
3416 		if (blob->frozen_refcnt) {
3417 			/* This blob I/O is frozen */
3418 			enum spdk_blob_op_type op_type;
3419 			spdk_bs_user_op_t *op;
3420 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel);
3421 
3422 			op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV;
3423 			op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length);
3424 			if (!op) {
3425 				cb_fn(cb_arg, -ENOMEM);
3426 				return;
3427 			}
3428 
3429 			TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
3430 
3431 			return;
3432 		}
3433 
3434 		is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
3435 
3436 		if (read) {
3437 			spdk_bs_sequence_t *seq;
3438 
3439 			seq = bs_sequence_start_blob(_channel, &cpl, blob);
3440 			if (!seq) {
3441 				cb_fn(cb_arg, -ENOMEM);
3442 				return;
3443 			}
3444 
3445 			seq->ext_io_opts = ext_io_opts;
3446 
3447 			if (is_allocated) {
3448 				bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
3449 			} else {
3450 				bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
3451 							 rw_iov_done, NULL);
3452 			}
3453 		} else {
3454 			if (is_allocated) {
3455 				spdk_bs_sequence_t *seq;
3456 
3457 				seq = bs_sequence_start_blob(_channel, &cpl, blob);
3458 				if (!seq) {
3459 					cb_fn(cb_arg, -ENOMEM);
3460 					return;
3461 				}
3462 
3463 				seq->ext_io_opts = ext_io_opts;
3464 
3465 				bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
3466 			} else {
3467 				/* Queue this operation and allocate the cluster */
3468 				spdk_bs_user_op_t *op;
3469 
3470 				op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset,
3471 						      length);
3472 				if (!op) {
3473 					cb_fn(cb_arg, -ENOMEM);
3474 					return;
3475 				}
3476 
3477 				op->ext_io_opts = ext_io_opts;
3478 
3479 				bs_allocate_and_copy_cluster(blob, _channel, offset, op);
3480 			}
3481 		}
3482 	} else {
3483 		struct rw_iov_ctx *ctx;
3484 
3485 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
3486 		if (ctx == NULL) {
3487 			cb_fn(cb_arg, -ENOMEM);
3488 			return;
3489 		}
3490 
3491 		ctx->blob = blob;
3492 		ctx->channel = _channel;
3493 		ctx->cb_fn = cb_fn;
3494 		ctx->cb_arg = cb_arg;
3495 		ctx->read = read;
3496 		ctx->orig_iov = iov;
3497 		ctx->iovcnt = iovcnt;
3498 		ctx->io_unit_offset = offset;
3499 		ctx->io_units_remaining = length;
3500 		ctx->io_units_done = 0;
3501 		ctx->ext_io_opts = ext_io_opts;
3502 
3503 		rw_iov_split_next(ctx, 0);
3504 	}
3505 }
3506 
3507 static struct spdk_blob *
3508 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
3509 {
3510 	struct spdk_blob find;
3511 
3512 	if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) {
3513 		return NULL;
3514 	}
3515 
3516 	find.id = blobid;
3517 	return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find);
3518 }
3519 
3520 static void
3521 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob,
3522 				    struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry)
3523 {
3524 	assert(blob != NULL);
3525 	*snapshot_entry = NULL;
3526 	*clone_entry = NULL;
3527 
3528 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
3529 		return;
3530 	}
3531 
3532 	TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) {
3533 		if ((*snapshot_entry)->id == blob->parent_id) {
3534 			break;
3535 		}
3536 	}
3537 
3538 	if (*snapshot_entry != NULL) {
3539 		TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) {
3540 			if ((*clone_entry)->id == blob->id) {
3541 				break;
3542 			}
3543 		}
3544 
3545 		assert(*clone_entry != NULL);
3546 	}
3547 }
3548 
3549 static int
3550 bs_channel_create(void *io_device, void *ctx_buf)
3551 {
3552 	struct spdk_blob_store		*bs = io_device;
3553 	struct spdk_bs_channel		*channel = ctx_buf;
3554 	struct spdk_bs_dev		*dev;
3555 	uint32_t			max_ops = bs->max_channel_ops;
3556 	uint32_t			i;
3557 
3558 	dev = bs->dev;
3559 
3560 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
3561 	if (!channel->req_mem) {
3562 		return -1;
3563 	}
3564 
3565 	TAILQ_INIT(&channel->reqs);
3566 
3567 	for (i = 0; i < max_ops; i++) {
3568 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
3569 	}
3570 
3571 	channel->bs = bs;
3572 	channel->dev = dev;
3573 	channel->dev_channel = dev->create_channel(dev);
3574 
3575 	if (!channel->dev_channel) {
3576 		SPDK_ERRLOG("Failed to create device channel.\n");
3577 		free(channel->req_mem);
3578 		return -1;
3579 	}
3580 
3581 	channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY,
3582 				    SPDK_MALLOC_DMA);
3583 	if (!channel->new_cluster_page) {
3584 		SPDK_ERRLOG("Failed to allocate new cluster page\n");
3585 		free(channel->req_mem);
3586 		channel->dev->destroy_channel(channel->dev, channel->dev_channel);
3587 		return -1;
3588 	}
3589 
3590 	TAILQ_INIT(&channel->need_cluster_alloc);
3591 	TAILQ_INIT(&channel->queued_io);
3592 	RB_INIT(&channel->esnap_channels);
3593 
3594 	return 0;
3595 }
3596 
3597 static void
3598 bs_channel_destroy(void *io_device, void *ctx_buf)
3599 {
3600 	struct spdk_bs_channel *channel = ctx_buf;
3601 	spdk_bs_user_op_t *op;
3602 
3603 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
3604 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
3605 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
3606 		bs_user_op_abort(op, -EIO);
3607 	}
3608 
3609 	while (!TAILQ_EMPTY(&channel->queued_io)) {
3610 		op = TAILQ_FIRST(&channel->queued_io);
3611 		TAILQ_REMOVE(&channel->queued_io, op, link);
3612 		bs_user_op_abort(op, -EIO);
3613 	}
3614 
3615 	blob_esnap_destroy_bs_channel(channel);
3616 
3617 	free(channel->req_mem);
3618 	spdk_free(channel->new_cluster_page);
3619 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
3620 }
3621 
3622 static void
3623 bs_dev_destroy(void *io_device)
3624 {
3625 	struct spdk_blob_store *bs = io_device;
3626 	struct spdk_blob	*blob, *blob_tmp;
3627 
3628 	bs->dev->destroy(bs->dev);
3629 
3630 	RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) {
3631 		RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob);
3632 		spdk_bit_array_clear(bs->open_blobids, blob->id);
3633 		blob_free(blob);
3634 	}
3635 
3636 	spdk_spin_destroy(&bs->used_lock);
3637 
3638 	spdk_bit_array_free(&bs->open_blobids);
3639 	spdk_bit_array_free(&bs->used_blobids);
3640 	spdk_bit_array_free(&bs->used_md_pages);
3641 	spdk_bit_pool_free(&bs->used_clusters);
3642 	/*
3643 	 * If this function is called for any reason except a successful unload,
3644 	 * the unload_cpl type will be NONE and this will be a nop.
3645 	 */
3646 	bs_call_cpl(&bs->unload_cpl, bs->unload_err);
3647 
3648 	free(bs);
3649 }
3650 
3651 static int
3652 bs_blob_list_add(struct spdk_blob *blob)
3653 {
3654 	spdk_blob_id snapshot_id;
3655 	struct spdk_blob_list *snapshot_entry = NULL;
3656 	struct spdk_blob_list *clone_entry = NULL;
3657 
3658 	assert(blob != NULL);
3659 
3660 	snapshot_id = blob->parent_id;
3661 	if (snapshot_id == SPDK_BLOBID_INVALID ||
3662 	    snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
3663 		return 0;
3664 	}
3665 
3666 	snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id);
3667 	if (snapshot_entry == NULL) {
3668 		/* Snapshot not found */
3669 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
3670 		if (snapshot_entry == NULL) {
3671 			return -ENOMEM;
3672 		}
3673 		snapshot_entry->id = snapshot_id;
3674 		TAILQ_INIT(&snapshot_entry->clones);
3675 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
3676 	} else {
3677 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
3678 			if (clone_entry->id == blob->id) {
3679 				break;
3680 			}
3681 		}
3682 	}
3683 
3684 	if (clone_entry == NULL) {
3685 		/* Clone not found */
3686 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
3687 		if (clone_entry == NULL) {
3688 			return -ENOMEM;
3689 		}
3690 		clone_entry->id = blob->id;
3691 		TAILQ_INIT(&clone_entry->clones);
3692 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
3693 		snapshot_entry->clone_count++;
3694 	}
3695 
3696 	return 0;
3697 }
3698 
3699 static void
3700 bs_blob_list_remove(struct spdk_blob *blob)
3701 {
3702 	struct spdk_blob_list *snapshot_entry = NULL;
3703 	struct spdk_blob_list *clone_entry = NULL;
3704 
3705 	blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry);
3706 
3707 	if (snapshot_entry == NULL) {
3708 		return;
3709 	}
3710 
3711 	blob->parent_id = SPDK_BLOBID_INVALID;
3712 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3713 	free(clone_entry);
3714 
3715 	snapshot_entry->clone_count--;
3716 }
3717 
3718 static int
3719 bs_blob_list_free(struct spdk_blob_store *bs)
3720 {
3721 	struct spdk_blob_list *snapshot_entry;
3722 	struct spdk_blob_list *snapshot_entry_tmp;
3723 	struct spdk_blob_list *clone_entry;
3724 	struct spdk_blob_list *clone_entry_tmp;
3725 
3726 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
3727 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
3728 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3729 			free(clone_entry);
3730 		}
3731 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
3732 		free(snapshot_entry);
3733 	}
3734 
3735 	return 0;
3736 }
3737 
3738 static void
3739 bs_free(struct spdk_blob_store *bs)
3740 {
3741 	bs_blob_list_free(bs);
3742 
3743 	bs_unregister_md_thread(bs);
3744 	spdk_io_device_unregister(bs, bs_dev_destroy);
3745 }
3746 
3747 void
3748 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size)
3749 {
3750 
3751 	if (!opts) {
3752 		SPDK_ERRLOG("opts should not be NULL\n");
3753 		return;
3754 	}
3755 
3756 	if (!opts_size) {
3757 		SPDK_ERRLOG("opts_size should not be zero value\n");
3758 		return;
3759 	}
3760 
3761 	memset(opts, 0, opts_size);
3762 	opts->opts_size = opts_size;
3763 
3764 #define FIELD_OK(field) \
3765 	offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size
3766 
3767 #define SET_FIELD(field, value) \
3768 	if (FIELD_OK(field)) { \
3769 		opts->field = value; \
3770 	} \
3771 
3772 	SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ);
3773 	SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES);
3774 	SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES);
3775 	SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS);
3776 	SET_FIELD(clear_method,  BS_CLEAR_WITH_UNMAP);
3777 
3778 	if (FIELD_OK(bstype)) {
3779 		memset(&opts->bstype, 0, sizeof(opts->bstype));
3780 	}
3781 
3782 	SET_FIELD(iter_cb_fn, NULL);
3783 	SET_FIELD(iter_cb_arg, NULL);
3784 	SET_FIELD(force_recover, false);
3785 	SET_FIELD(esnap_bs_dev_create, NULL);
3786 	SET_FIELD(esnap_ctx, NULL);
3787 
3788 #undef FIELD_OK
3789 #undef SET_FIELD
3790 }
3791 
3792 static int
3793 bs_opts_verify(struct spdk_bs_opts *opts)
3794 {
3795 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
3796 	    opts->max_channel_ops == 0) {
3797 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
3798 		return -1;
3799 	}
3800 
3801 	return 0;
3802 }
3803 
3804 /* START spdk_bs_load */
3805 
3806 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */
3807 
3808 struct spdk_bs_load_ctx {
3809 	struct spdk_blob_store		*bs;
3810 	struct spdk_bs_super_block	*super;
3811 
3812 	struct spdk_bs_md_mask		*mask;
3813 	bool				in_page_chain;
3814 	uint32_t			page_index;
3815 	uint32_t			cur_page;
3816 	struct spdk_blob_md_page	*page;
3817 
3818 	uint64_t			num_extent_pages;
3819 	uint32_t			*extent_page_num;
3820 	struct spdk_blob_md_page	*extent_pages;
3821 	struct spdk_bit_array		*used_clusters;
3822 
3823 	spdk_bs_sequence_t			*seq;
3824 	spdk_blob_op_with_handle_complete	iter_cb_fn;
3825 	void					*iter_cb_arg;
3826 	struct spdk_blob			*blob;
3827 	spdk_blob_id				blobid;
3828 
3829 	bool					force_recover;
3830 
3831 	/* These fields are used in the spdk_bs_dump path. */
3832 	bool					dumping;
3833 	FILE					*fp;
3834 	spdk_bs_dump_print_xattr		print_xattr_fn;
3835 	char					xattr_name[4096];
3836 };
3837 
3838 static int
3839 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs,
3840 	 struct spdk_bs_load_ctx **_ctx)
3841 {
3842 	struct spdk_blob_store	*bs;
3843 	struct spdk_bs_load_ctx	*ctx;
3844 	uint64_t dev_size;
3845 	int rc;
3846 
3847 	dev_size = dev->blocklen * dev->blockcnt;
3848 	if (dev_size < opts->cluster_sz) {
3849 		/* Device size cannot be smaller than cluster size of blobstore */
3850 		SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
3851 			     dev_size, opts->cluster_sz);
3852 		return -ENOSPC;
3853 	}
3854 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
3855 		/* Cluster size cannot be smaller than page size */
3856 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
3857 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
3858 		return -EINVAL;
3859 	}
3860 	bs = calloc(1, sizeof(struct spdk_blob_store));
3861 	if (!bs) {
3862 		return -ENOMEM;
3863 	}
3864 
3865 	ctx = calloc(1, sizeof(struct spdk_bs_load_ctx));
3866 	if (!ctx) {
3867 		free(bs);
3868 		return -ENOMEM;
3869 	}
3870 
3871 	ctx->bs = bs;
3872 	ctx->iter_cb_fn = opts->iter_cb_fn;
3873 	ctx->iter_cb_arg = opts->iter_cb_arg;
3874 	ctx->force_recover = opts->force_recover;
3875 
3876 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
3877 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3878 	if (!ctx->super) {
3879 		free(ctx);
3880 		free(bs);
3881 		return -ENOMEM;
3882 	}
3883 
3884 	RB_INIT(&bs->open_blobs);
3885 	TAILQ_INIT(&bs->snapshots);
3886 	bs->dev = dev;
3887 	bs->md_thread = spdk_get_thread();
3888 	assert(bs->md_thread != NULL);
3889 
3890 	/*
3891 	 * Do not use bs_lba_to_cluster() here since blockcnt may not be an
3892 	 *  even multiple of the cluster size.
3893 	 */
3894 	bs->cluster_sz = opts->cluster_sz;
3895 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
3896 	ctx->used_clusters = spdk_bit_array_create(bs->total_clusters);
3897 	if (!ctx->used_clusters) {
3898 		spdk_free(ctx->super);
3899 		free(ctx);
3900 		free(bs);
3901 		return -ENOMEM;
3902 	}
3903 
3904 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
3905 	if (spdk_u32_is_pow2(bs->pages_per_cluster)) {
3906 		bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster);
3907 	}
3908 	bs->num_free_clusters = bs->total_clusters;
3909 	bs->io_unit_size = dev->blocklen;
3910 
3911 	bs->max_channel_ops = opts->max_channel_ops;
3912 	bs->super_blob = SPDK_BLOBID_INVALID;
3913 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
3914 	bs->esnap_bs_dev_create = opts->esnap_bs_dev_create;
3915 	bs->esnap_ctx = opts->esnap_ctx;
3916 
3917 	/* The metadata is assumed to be at least 1 page */
3918 	bs->used_md_pages = spdk_bit_array_create(1);
3919 	bs->used_blobids = spdk_bit_array_create(0);
3920 	bs->open_blobids = spdk_bit_array_create(0);
3921 
3922 	spdk_spin_init(&bs->used_lock);
3923 
3924 	spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy,
3925 				sizeof(struct spdk_bs_channel), "blobstore");
3926 	rc = bs_register_md_thread(bs);
3927 	if (rc == -1) {
3928 		spdk_io_device_unregister(bs, NULL);
3929 		spdk_spin_destroy(&bs->used_lock);
3930 		spdk_bit_array_free(&bs->open_blobids);
3931 		spdk_bit_array_free(&bs->used_blobids);
3932 		spdk_bit_array_free(&bs->used_md_pages);
3933 		spdk_bit_array_free(&ctx->used_clusters);
3934 		spdk_free(ctx->super);
3935 		free(ctx);
3936 		free(bs);
3937 		/* FIXME: this is a lie but don't know how to get a proper error code here */
3938 		return -ENOMEM;
3939 	}
3940 
3941 	*_ctx = ctx;
3942 	*_bs = bs;
3943 	return 0;
3944 }
3945 
3946 static void
3947 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno)
3948 {
3949 	assert(bserrno != 0);
3950 
3951 	spdk_free(ctx->super);
3952 	bs_sequence_finish(ctx->seq, bserrno);
3953 	bs_free(ctx->bs);
3954 	spdk_bit_array_free(&ctx->used_clusters);
3955 	free(ctx);
3956 }
3957 
3958 static void
3959 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
3960 	       struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
3961 {
3962 	/* Update the values in the super block */
3963 	super->super_blob = bs->super_blob;
3964 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
3965 	super->crc = blob_md_page_calc_crc(super);
3966 	bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0),
3967 			      bs_byte_to_lba(bs, sizeof(*super)),
3968 			      cb_fn, cb_arg);
3969 }
3970 
3971 static void
3972 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3973 {
3974 	struct spdk_bs_load_ctx	*ctx = arg;
3975 	uint64_t	mask_size, lba, lba_count;
3976 
3977 	/* Write out the used clusters mask */
3978 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3979 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3980 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3981 	if (!ctx->mask) {
3982 		bs_load_ctx_fail(ctx, -ENOMEM);
3983 		return;
3984 	}
3985 
3986 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
3987 	ctx->mask->length = ctx->bs->total_clusters;
3988 	/* We could get here through the normal unload path, or through dirty
3989 	 * shutdown recovery.  For the normal unload path, we use the mask from
3990 	 * the bit pool.  For dirty shutdown recovery, we don't have a bit pool yet -
3991 	 * only the bit array from the load ctx.
3992 	 */
3993 	if (ctx->bs->used_clusters) {
3994 		assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters));
3995 		spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask);
3996 	} else {
3997 		assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters));
3998 		spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask);
3999 	}
4000 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
4001 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
4002 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
4003 }
4004 
4005 static void
4006 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
4007 {
4008 	struct spdk_bs_load_ctx	*ctx = arg;
4009 	uint64_t	mask_size, lba, lba_count;
4010 
4011 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
4012 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
4013 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4014 	if (!ctx->mask) {
4015 		bs_load_ctx_fail(ctx, -ENOMEM);
4016 		return;
4017 	}
4018 
4019 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
4020 	ctx->mask->length = ctx->super->md_len;
4021 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
4022 
4023 	spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask);
4024 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
4025 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
4026 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
4027 }
4028 
4029 static void
4030 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
4031 {
4032 	struct spdk_bs_load_ctx	*ctx = arg;
4033 	uint64_t	mask_size, lba, lba_count;
4034 
4035 	if (ctx->super->used_blobid_mask_len == 0) {
4036 		/*
4037 		 * This is a pre-v3 on-disk format where the blobid mask does not get
4038 		 *  written to disk.
4039 		 */
4040 		cb_fn(seq, arg, 0);
4041 		return;
4042 	}
4043 
4044 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
4045 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4046 				 SPDK_MALLOC_DMA);
4047 	if (!ctx->mask) {
4048 		bs_load_ctx_fail(ctx, -ENOMEM);
4049 		return;
4050 	}
4051 
4052 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
4053 	ctx->mask->length = ctx->super->md_len;
4054 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
4055 
4056 	spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask);
4057 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
4058 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
4059 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
4060 }
4061 
4062 static void
4063 blob_set_thin_provision(struct spdk_blob *blob)
4064 {
4065 	blob_verify_md_op(blob);
4066 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
4067 	blob->state = SPDK_BLOB_STATE_DIRTY;
4068 }
4069 
4070 static void
4071 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method)
4072 {
4073 	blob_verify_md_op(blob);
4074 	blob->clear_method = clear_method;
4075 	blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT);
4076 	blob->state = SPDK_BLOB_STATE_DIRTY;
4077 }
4078 
4079 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno);
4080 
4081 static void
4082 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno)
4083 {
4084 	struct spdk_bs_load_ctx *ctx = cb_arg;
4085 	spdk_blob_id id;
4086 	int64_t page_num;
4087 
4088 	/* Iterate to next blob (we can't use spdk_bs_iter_next function as our
4089 	 * last blob has been removed */
4090 	page_num = bs_blobid_to_page(ctx->blobid);
4091 	page_num++;
4092 	page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num);
4093 	if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) {
4094 		bs_load_iter(ctx, NULL, -ENOENT);
4095 		return;
4096 	}
4097 
4098 	id = bs_page_to_blobid(page_num);
4099 
4100 	spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx);
4101 }
4102 
4103 static void
4104 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno)
4105 {
4106 	struct spdk_bs_load_ctx *ctx = cb_arg;
4107 
4108 	if (bserrno != 0) {
4109 		SPDK_ERRLOG("Failed to close corrupted blob\n");
4110 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4111 		return;
4112 	}
4113 
4114 	spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx);
4115 }
4116 
4117 static void
4118 bs_delete_corrupted_blob(void *cb_arg, int bserrno)
4119 {
4120 	struct spdk_bs_load_ctx *ctx = cb_arg;
4121 	uint64_t i;
4122 
4123 	if (bserrno != 0) {
4124 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
4125 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4126 		return;
4127 	}
4128 
4129 	/* Snapshot and clone have the same copy of cluster map and extent pages
4130 	 * at this point. Let's clear both for snapshot now,
4131 	 * so that it won't be cleared for clone later when we remove snapshot.
4132 	 * Also set thin provision to pass data corruption check */
4133 	for (i = 0; i < ctx->blob->active.num_clusters; i++) {
4134 		ctx->blob->active.clusters[i] = 0;
4135 	}
4136 	for (i = 0; i < ctx->blob->active.num_extent_pages; i++) {
4137 		ctx->blob->active.extent_pages[i] = 0;
4138 	}
4139 
4140 	ctx->blob->active.num_allocated_clusters = 0;
4141 
4142 	ctx->blob->md_ro = false;
4143 
4144 	blob_set_thin_provision(ctx->blob);
4145 
4146 	ctx->blobid = ctx->blob->id;
4147 
4148 	spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx);
4149 }
4150 
4151 static void
4152 bs_update_corrupted_blob(void *cb_arg, int bserrno)
4153 {
4154 	struct spdk_bs_load_ctx *ctx = cb_arg;
4155 
4156 	if (bserrno != 0) {
4157 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
4158 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4159 		return;
4160 	}
4161 
4162 	ctx->blob->md_ro = false;
4163 	blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true);
4164 	blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true);
4165 	spdk_blob_set_read_only(ctx->blob);
4166 
4167 	if (ctx->iter_cb_fn) {
4168 		ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0);
4169 	}
4170 	bs_blob_list_add(ctx->blob);
4171 
4172 	spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4173 }
4174 
4175 static void
4176 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
4177 {
4178 	struct spdk_bs_load_ctx *ctx = cb_arg;
4179 
4180 	if (bserrno != 0) {
4181 		SPDK_ERRLOG("Failed to open clone of a corrupted blob\n");
4182 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4183 		return;
4184 	}
4185 
4186 	if (blob->parent_id == ctx->blob->id) {
4187 		/* Power failure occurred before updating clone (snapshot delete case)
4188 		 * or after updating clone (creating snapshot case) - keep snapshot */
4189 		spdk_blob_close(blob, bs_update_corrupted_blob, ctx);
4190 	} else {
4191 		/* Power failure occurred after updating clone (snapshot delete case)
4192 		 * or before updating clone (creating snapshot case) - remove snapshot */
4193 		spdk_blob_close(blob, bs_delete_corrupted_blob, ctx);
4194 	}
4195 }
4196 
4197 static void
4198 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
4199 {
4200 	struct spdk_bs_load_ctx *ctx = arg;
4201 	const void *value;
4202 	size_t len;
4203 	int rc = 0;
4204 
4205 	if (bserrno == 0) {
4206 		/* Examine blob if it is corrupted after power failure. Fix
4207 		 * the ones that can be fixed and remove any other corrupted
4208 		 * ones. If it is not corrupted just process it */
4209 		rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true);
4210 		if (rc != 0) {
4211 			rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true);
4212 			if (rc != 0) {
4213 				/* Not corrupted - process it and continue with iterating through blobs */
4214 				if (ctx->iter_cb_fn) {
4215 					ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
4216 				}
4217 				bs_blob_list_add(blob);
4218 				spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx);
4219 				return;
4220 			}
4221 
4222 		}
4223 
4224 		assert(len == sizeof(spdk_blob_id));
4225 
4226 		ctx->blob = blob;
4227 
4228 		/* Open clone to check if we are able to fix this blob or should we remove it */
4229 		spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx);
4230 		return;
4231 	} else if (bserrno == -ENOENT) {
4232 		bserrno = 0;
4233 	} else {
4234 		/*
4235 		 * This case needs to be looked at further.  Same problem
4236 		 *  exists with applications that rely on explicit blob
4237 		 *  iteration.  We should just skip the blob that failed
4238 		 *  to load and continue on to the next one.
4239 		 */
4240 		SPDK_ERRLOG("Error in iterating blobs\n");
4241 	}
4242 
4243 	ctx->iter_cb_fn = NULL;
4244 
4245 	spdk_free(ctx->super);
4246 	spdk_free(ctx->mask);
4247 	bs_sequence_finish(ctx->seq, bserrno);
4248 	free(ctx);
4249 }
4250 
4251 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
4252 
4253 static void
4254 bs_load_complete(struct spdk_bs_load_ctx *ctx)
4255 {
4256 	ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters);
4257 	if (ctx->dumping) {
4258 		bs_dump_read_md_page(ctx->seq, ctx);
4259 		return;
4260 	}
4261 	spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx);
4262 }
4263 
4264 static void
4265 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4266 {
4267 	struct spdk_bs_load_ctx *ctx = cb_arg;
4268 	int rc;
4269 
4270 	/* The type must be correct */
4271 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
4272 
4273 	/* The length of the mask (in bits) must not be greater than
4274 	 * the length of the buffer (converted to bits) */
4275 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
4276 
4277 	/* The length of the mask must be exactly equal to the size
4278 	 * (in pages) of the metadata region */
4279 	assert(ctx->mask->length == ctx->super->md_len);
4280 
4281 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length);
4282 	if (rc < 0) {
4283 		spdk_free(ctx->mask);
4284 		bs_load_ctx_fail(ctx, rc);
4285 		return;
4286 	}
4287 
4288 	spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask);
4289 	bs_load_complete(ctx);
4290 }
4291 
4292 static void
4293 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4294 {
4295 	struct spdk_bs_load_ctx *ctx = cb_arg;
4296 	uint64_t		lba, lba_count, mask_size;
4297 	int			rc;
4298 
4299 	if (bserrno != 0) {
4300 		bs_load_ctx_fail(ctx, bserrno);
4301 		return;
4302 	}
4303 
4304 	/* The type must be correct */
4305 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
4306 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
4307 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
4308 					     struct spdk_blob_md_page) * 8));
4309 	/*
4310 	 * The length of the mask must be equal to or larger than the total number of clusters. It may be
4311 	 * larger than the total number of clusters due to a failure spdk_bs_grow.
4312 	 */
4313 	assert(ctx->mask->length >= ctx->bs->total_clusters);
4314 	if (ctx->mask->length > ctx->bs->total_clusters) {
4315 		SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters");
4316 		ctx->mask->length = ctx->bs->total_clusters;
4317 	}
4318 
4319 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length);
4320 	if (rc < 0) {
4321 		spdk_free(ctx->mask);
4322 		bs_load_ctx_fail(ctx, rc);
4323 		return;
4324 	}
4325 
4326 	spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask);
4327 	ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters);
4328 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
4329 
4330 	spdk_free(ctx->mask);
4331 
4332 	/* Read the used blobids mask */
4333 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
4334 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4335 				 SPDK_MALLOC_DMA);
4336 	if (!ctx->mask) {
4337 		bs_load_ctx_fail(ctx, -ENOMEM);
4338 		return;
4339 	}
4340 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
4341 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
4342 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
4343 			     bs_load_used_blobids_cpl, ctx);
4344 }
4345 
4346 static void
4347 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4348 {
4349 	struct spdk_bs_load_ctx *ctx = cb_arg;
4350 	uint64_t		lba, lba_count, mask_size;
4351 	int			rc;
4352 
4353 	if (bserrno != 0) {
4354 		bs_load_ctx_fail(ctx, bserrno);
4355 		return;
4356 	}
4357 
4358 	/* The type must be correct */
4359 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
4360 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
4361 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
4362 				     8));
4363 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
4364 	if (ctx->mask->length != ctx->super->md_len) {
4365 		SPDK_ERRLOG("mismatched md_len in used_pages mask: "
4366 			    "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n",
4367 			    ctx->mask->length, ctx->super->md_len);
4368 		assert(false);
4369 	}
4370 
4371 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length);
4372 	if (rc < 0) {
4373 		spdk_free(ctx->mask);
4374 		bs_load_ctx_fail(ctx, rc);
4375 		return;
4376 	}
4377 
4378 	spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask);
4379 	spdk_free(ctx->mask);
4380 
4381 	/* Read the used clusters mask */
4382 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
4383 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4384 				 SPDK_MALLOC_DMA);
4385 	if (!ctx->mask) {
4386 		bs_load_ctx_fail(ctx, -ENOMEM);
4387 		return;
4388 	}
4389 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
4390 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
4391 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
4392 			     bs_load_used_clusters_cpl, ctx);
4393 }
4394 
4395 static void
4396 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx)
4397 {
4398 	uint64_t lba, lba_count, mask_size;
4399 
4400 	/* Read the used pages mask */
4401 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
4402 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
4403 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4404 	if (!ctx->mask) {
4405 		bs_load_ctx_fail(ctx, -ENOMEM);
4406 		return;
4407 	}
4408 
4409 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
4410 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
4411 	bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
4412 			     bs_load_used_pages_cpl, ctx);
4413 }
4414 
4415 static int
4416 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page)
4417 {
4418 	struct spdk_blob_store *bs = ctx->bs;
4419 	struct spdk_blob_md_descriptor *desc;
4420 	size_t	cur_desc = 0;
4421 
4422 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4423 	while (cur_desc < sizeof(page->descriptors)) {
4424 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
4425 			if (desc->length == 0) {
4426 				/* If padding and length are 0, this terminates the page */
4427 				break;
4428 			}
4429 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
4430 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
4431 			unsigned int				i, j;
4432 			unsigned int				cluster_count = 0;
4433 			uint32_t				cluster_idx;
4434 
4435 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
4436 
4437 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
4438 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
4439 					cluster_idx = desc_extent_rle->extents[i].cluster_idx;
4440 					/*
4441 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
4442 					 * in the used cluster map.
4443 					 */
4444 					if (cluster_idx != 0) {
4445 						SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j);
4446 						spdk_bit_array_set(ctx->used_clusters, cluster_idx + j);
4447 						if (bs->num_free_clusters == 0) {
4448 							return -ENOSPC;
4449 						}
4450 						bs->num_free_clusters--;
4451 					}
4452 					cluster_count++;
4453 				}
4454 			}
4455 			if (cluster_count == 0) {
4456 				return -EINVAL;
4457 			}
4458 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4459 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
4460 			uint32_t					i;
4461 			uint32_t					cluster_count = 0;
4462 			uint32_t					cluster_idx;
4463 			size_t						cluster_idx_length;
4464 
4465 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
4466 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
4467 
4468 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
4469 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
4470 				return -EINVAL;
4471 			}
4472 
4473 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
4474 				cluster_idx = desc_extent->cluster_idx[i];
4475 				/*
4476 				 * cluster_idx = 0 means an unallocated cluster - don't mark that
4477 				 * in the used cluster map.
4478 				 */
4479 				if (cluster_idx != 0) {
4480 					if (cluster_idx < desc_extent->start_cluster_idx &&
4481 					    cluster_idx >= desc_extent->start_cluster_idx + cluster_count) {
4482 						return -EINVAL;
4483 					}
4484 					spdk_bit_array_set(ctx->used_clusters, cluster_idx);
4485 					if (bs->num_free_clusters == 0) {
4486 						return -ENOSPC;
4487 					}
4488 					bs->num_free_clusters--;
4489 				}
4490 				cluster_count++;
4491 			}
4492 
4493 			if (cluster_count == 0) {
4494 				return -EINVAL;
4495 			}
4496 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
4497 			/* Skip this item */
4498 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
4499 			/* Skip this item */
4500 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
4501 			/* Skip this item */
4502 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
4503 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
4504 			uint32_t num_extent_pages = ctx->num_extent_pages;
4505 			uint32_t i;
4506 			size_t extent_pages_length;
4507 			void *tmp;
4508 
4509 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
4510 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
4511 
4512 			if (desc_extent_table->length == 0 ||
4513 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
4514 				return -EINVAL;
4515 			}
4516 
4517 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
4518 				if (desc_extent_table->extent_page[i].page_idx != 0) {
4519 					if (desc_extent_table->extent_page[i].num_pages != 1) {
4520 						return -EINVAL;
4521 					}
4522 					num_extent_pages += 1;
4523 				}
4524 			}
4525 
4526 			if (num_extent_pages > 0) {
4527 				tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t));
4528 				if (tmp == NULL) {
4529 					return -ENOMEM;
4530 				}
4531 				ctx->extent_page_num = tmp;
4532 
4533 				/* Extent table entries contain md page numbers for extent pages.
4534 				 * Zeroes represent unallocated extent pages, those are run-length-encoded.
4535 				 */
4536 				for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
4537 					if (desc_extent_table->extent_page[i].page_idx != 0) {
4538 						ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx;
4539 						ctx->num_extent_pages += 1;
4540 					}
4541 				}
4542 			}
4543 		} else {
4544 			/* Error */
4545 			return -EINVAL;
4546 		}
4547 		/* Advance to the next descriptor */
4548 		cur_desc += sizeof(*desc) + desc->length;
4549 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
4550 			break;
4551 		}
4552 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
4553 	}
4554 	return 0;
4555 }
4556 
4557 static bool
4558 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page)
4559 {
4560 	uint32_t crc;
4561 	struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4562 	size_t desc_len;
4563 
4564 	crc = blob_md_page_calc_crc(page);
4565 	if (crc != page->crc) {
4566 		return false;
4567 	}
4568 
4569 	/* Extent page should always be of sequence num 0. */
4570 	if (page->sequence_num != 0) {
4571 		return false;
4572 	}
4573 
4574 	/* Descriptor type must be EXTENT_PAGE. */
4575 	if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4576 		return false;
4577 	}
4578 
4579 	/* Descriptor length cannot exceed the page. */
4580 	desc_len = sizeof(*desc) + desc->length;
4581 	if (desc_len > sizeof(page->descriptors)) {
4582 		return false;
4583 	}
4584 
4585 	/* It has to be the only descriptor in the page. */
4586 	if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) {
4587 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len);
4588 		if (desc->length != 0) {
4589 			return false;
4590 		}
4591 	}
4592 
4593 	return true;
4594 }
4595 
4596 static bool
4597 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
4598 {
4599 	uint32_t crc;
4600 	struct spdk_blob_md_page *page = ctx->page;
4601 
4602 	crc = blob_md_page_calc_crc(page);
4603 	if (crc != page->crc) {
4604 		return false;
4605 	}
4606 
4607 	/* First page of a sequence should match the blobid. */
4608 	if (page->sequence_num == 0 &&
4609 	    bs_page_to_blobid(ctx->cur_page) != page->id) {
4610 		return false;
4611 	}
4612 	assert(bs_load_cur_extent_page_valid(page) == false);
4613 
4614 	return true;
4615 }
4616 
4617 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx);
4618 
4619 static void
4620 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4621 {
4622 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4623 
4624 	if (bserrno != 0) {
4625 		bs_load_ctx_fail(ctx, bserrno);
4626 		return;
4627 	}
4628 
4629 	bs_load_complete(ctx);
4630 }
4631 
4632 static void
4633 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4634 {
4635 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4636 
4637 	spdk_free(ctx->mask);
4638 	ctx->mask = NULL;
4639 
4640 	if (bserrno != 0) {
4641 		bs_load_ctx_fail(ctx, bserrno);
4642 		return;
4643 	}
4644 
4645 	bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl);
4646 }
4647 
4648 static void
4649 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4650 {
4651 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4652 
4653 	spdk_free(ctx->mask);
4654 	ctx->mask = NULL;
4655 
4656 	if (bserrno != 0) {
4657 		bs_load_ctx_fail(ctx, bserrno);
4658 		return;
4659 	}
4660 
4661 	bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl);
4662 }
4663 
4664 static void
4665 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx)
4666 {
4667 	bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl);
4668 }
4669 
4670 static void
4671 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx)
4672 {
4673 	uint64_t num_md_clusters;
4674 	uint64_t i;
4675 
4676 	ctx->in_page_chain = false;
4677 
4678 	do {
4679 		ctx->page_index++;
4680 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
4681 
4682 	if (ctx->page_index < ctx->super->md_len) {
4683 		ctx->cur_page = ctx->page_index;
4684 		bs_load_replay_cur_md_page(ctx);
4685 	} else {
4686 		/* Claim all of the clusters used by the metadata */
4687 		num_md_clusters = spdk_divide_round_up(
4688 					  ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster);
4689 		for (i = 0; i < num_md_clusters; i++) {
4690 			spdk_bit_array_set(ctx->used_clusters, i);
4691 		}
4692 		ctx->bs->num_free_clusters -= num_md_clusters;
4693 		spdk_free(ctx->page);
4694 		bs_load_write_used_md(ctx);
4695 	}
4696 }
4697 
4698 static void
4699 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4700 {
4701 	struct spdk_bs_load_ctx *ctx = cb_arg;
4702 	uint32_t page_num;
4703 	uint64_t i;
4704 
4705 	if (bserrno != 0) {
4706 		spdk_free(ctx->extent_pages);
4707 		bs_load_ctx_fail(ctx, bserrno);
4708 		return;
4709 	}
4710 
4711 	for (i = 0; i < ctx->num_extent_pages; i++) {
4712 		/* Extent pages are only read when present within in chain md.
4713 		 * Integrity of md is not right if that page was not a valid extent page. */
4714 		if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) {
4715 			spdk_free(ctx->extent_pages);
4716 			bs_load_ctx_fail(ctx, -EILSEQ);
4717 			return;
4718 		}
4719 
4720 		page_num = ctx->extent_page_num[i];
4721 		spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
4722 		if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) {
4723 			spdk_free(ctx->extent_pages);
4724 			bs_load_ctx_fail(ctx, -EILSEQ);
4725 			return;
4726 		}
4727 	}
4728 
4729 	spdk_free(ctx->extent_pages);
4730 	free(ctx->extent_page_num);
4731 	ctx->extent_page_num = NULL;
4732 	ctx->num_extent_pages = 0;
4733 
4734 	bs_load_replay_md_chain_cpl(ctx);
4735 }
4736 
4737 static void
4738 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx)
4739 {
4740 	spdk_bs_batch_t *batch;
4741 	uint32_t page;
4742 	uint64_t lba;
4743 	uint64_t i;
4744 
4745 	ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0,
4746 					 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4747 	if (!ctx->extent_pages) {
4748 		bs_load_ctx_fail(ctx, -ENOMEM);
4749 		return;
4750 	}
4751 
4752 	batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx);
4753 
4754 	for (i = 0; i < ctx->num_extent_pages; i++) {
4755 		page = ctx->extent_page_num[i];
4756 		assert(page < ctx->super->md_len);
4757 		lba = bs_md_page_to_lba(ctx->bs, page);
4758 		bs_batch_read_dev(batch, &ctx->extent_pages[i], lba,
4759 				  bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE));
4760 	}
4761 
4762 	bs_batch_close(batch);
4763 }
4764 
4765 static void
4766 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4767 {
4768 	struct spdk_bs_load_ctx *ctx = cb_arg;
4769 	uint32_t page_num;
4770 	struct spdk_blob_md_page *page;
4771 
4772 	if (bserrno != 0) {
4773 		bs_load_ctx_fail(ctx, bserrno);
4774 		return;
4775 	}
4776 
4777 	page_num = ctx->cur_page;
4778 	page = ctx->page;
4779 	if (bs_load_cur_md_page_valid(ctx) == true) {
4780 		if (page->sequence_num == 0 || ctx->in_page_chain == true) {
4781 			spdk_spin_lock(&ctx->bs->used_lock);
4782 			bs_claim_md_page(ctx->bs, page_num);
4783 			spdk_spin_unlock(&ctx->bs->used_lock);
4784 			if (page->sequence_num == 0) {
4785 				SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num);
4786 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
4787 			}
4788 			if (bs_load_replay_md_parse_page(ctx, page)) {
4789 				bs_load_ctx_fail(ctx, -EILSEQ);
4790 				return;
4791 			}
4792 			if (page->next != SPDK_INVALID_MD_PAGE) {
4793 				ctx->in_page_chain = true;
4794 				ctx->cur_page = page->next;
4795 				bs_load_replay_cur_md_page(ctx);
4796 				return;
4797 			}
4798 			if (ctx->num_extent_pages != 0) {
4799 				bs_load_replay_extent_pages(ctx);
4800 				return;
4801 			}
4802 		}
4803 	}
4804 	bs_load_replay_md_chain_cpl(ctx);
4805 }
4806 
4807 static void
4808 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx)
4809 {
4810 	uint64_t lba;
4811 
4812 	assert(ctx->cur_page < ctx->super->md_len);
4813 	lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page);
4814 	bs_sequence_read_dev(ctx->seq, ctx->page, lba,
4815 			     bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
4816 			     bs_load_replay_md_cpl, ctx);
4817 }
4818 
4819 static void
4820 bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
4821 {
4822 	ctx->page_index = 0;
4823 	ctx->cur_page = 0;
4824 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
4825 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4826 	if (!ctx->page) {
4827 		bs_load_ctx_fail(ctx, -ENOMEM);
4828 		return;
4829 	}
4830 	bs_load_replay_cur_md_page(ctx);
4831 }
4832 
4833 static void
4834 bs_recover(struct spdk_bs_load_ctx *ctx)
4835 {
4836 	int		rc;
4837 
4838 	SPDK_NOTICELOG("Performing recovery on blobstore\n");
4839 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
4840 	if (rc < 0) {
4841 		bs_load_ctx_fail(ctx, -ENOMEM);
4842 		return;
4843 	}
4844 
4845 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
4846 	if (rc < 0) {
4847 		bs_load_ctx_fail(ctx, -ENOMEM);
4848 		return;
4849 	}
4850 
4851 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
4852 	if (rc < 0) {
4853 		bs_load_ctx_fail(ctx, -ENOMEM);
4854 		return;
4855 	}
4856 
4857 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len);
4858 	if (rc < 0) {
4859 		bs_load_ctx_fail(ctx, -ENOMEM);
4860 		return;
4861 	}
4862 
4863 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
4864 	bs_load_replay_md(ctx);
4865 }
4866 
4867 static int
4868 bs_parse_super(struct spdk_bs_load_ctx *ctx)
4869 {
4870 	int rc;
4871 
4872 	if (ctx->super->size == 0) {
4873 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
4874 	}
4875 
4876 	if (ctx->super->io_unit_size == 0) {
4877 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
4878 	}
4879 
4880 	ctx->bs->clean = 1;
4881 	ctx->bs->cluster_sz = ctx->super->cluster_size;
4882 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
4883 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
4884 	if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
4885 		ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
4886 	}
4887 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
4888 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
4889 	if (rc < 0) {
4890 		return -ENOMEM;
4891 	}
4892 	ctx->bs->md_start = ctx->super->md_start;
4893 	ctx->bs->md_len = ctx->super->md_len;
4894 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len);
4895 	if (rc < 0) {
4896 		return -ENOMEM;
4897 	}
4898 
4899 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
4900 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
4901 	ctx->bs->super_blob = ctx->super->super_blob;
4902 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
4903 
4904 	return 0;
4905 }
4906 
4907 static void
4908 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4909 {
4910 	struct spdk_bs_load_ctx *ctx = cb_arg;
4911 	int rc;
4912 
4913 	rc = bs_super_validate(ctx->super, ctx->bs);
4914 	if (rc != 0) {
4915 		bs_load_ctx_fail(ctx, rc);
4916 		return;
4917 	}
4918 
4919 	rc = bs_parse_super(ctx);
4920 	if (rc < 0) {
4921 		bs_load_ctx_fail(ctx, rc);
4922 		return;
4923 	}
4924 
4925 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) {
4926 		bs_recover(ctx);
4927 	} else {
4928 		bs_load_read_used_pages(ctx);
4929 	}
4930 }
4931 
4932 static inline int
4933 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst)
4934 {
4935 
4936 	if (!src->opts_size) {
4937 		SPDK_ERRLOG("opts_size should not be zero value\n");
4938 		return -1;
4939 	}
4940 
4941 #define FIELD_OK(field) \
4942         offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size
4943 
4944 #define SET_FIELD(field) \
4945         if (FIELD_OK(field)) { \
4946                 dst->field = src->field; \
4947         } \
4948 
4949 	SET_FIELD(cluster_sz);
4950 	SET_FIELD(num_md_pages);
4951 	SET_FIELD(max_md_ops);
4952 	SET_FIELD(max_channel_ops);
4953 	SET_FIELD(clear_method);
4954 
4955 	if (FIELD_OK(bstype)) {
4956 		memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype));
4957 	}
4958 	SET_FIELD(iter_cb_fn);
4959 	SET_FIELD(iter_cb_arg);
4960 	SET_FIELD(force_recover);
4961 	SET_FIELD(esnap_bs_dev_create);
4962 	SET_FIELD(esnap_ctx);
4963 
4964 	dst->opts_size = src->opts_size;
4965 
4966 	/* You should not remove this statement, but need to update the assert statement
4967 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
4968 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size");
4969 
4970 #undef FIELD_OK
4971 #undef SET_FIELD
4972 
4973 	return 0;
4974 }
4975 
4976 void
4977 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4978 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4979 {
4980 	struct spdk_blob_store	*bs;
4981 	struct spdk_bs_cpl	cpl;
4982 	struct spdk_bs_load_ctx *ctx;
4983 	struct spdk_bs_opts	opts = {};
4984 	int err;
4985 
4986 	SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev);
4987 
4988 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4989 		SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen);
4990 		dev->destroy(dev);
4991 		cb_fn(cb_arg, NULL, -EINVAL);
4992 		return;
4993 	}
4994 
4995 	spdk_bs_opts_init(&opts, sizeof(opts));
4996 	if (o) {
4997 		if (bs_opts_copy(o, &opts)) {
4998 			return;
4999 		}
5000 	}
5001 
5002 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
5003 		dev->destroy(dev);
5004 		cb_fn(cb_arg, NULL, -EINVAL);
5005 		return;
5006 	}
5007 
5008 	err = bs_alloc(dev, &opts, &bs, &ctx);
5009 	if (err) {
5010 		dev->destroy(dev);
5011 		cb_fn(cb_arg, NULL, err);
5012 		return;
5013 	}
5014 
5015 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
5016 	cpl.u.bs_handle.cb_fn = cb_fn;
5017 	cpl.u.bs_handle.cb_arg = cb_arg;
5018 	cpl.u.bs_handle.bs = bs;
5019 
5020 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5021 	if (!ctx->seq) {
5022 		spdk_free(ctx->super);
5023 		free(ctx);
5024 		bs_free(bs);
5025 		cb_fn(cb_arg, NULL, -ENOMEM);
5026 		return;
5027 	}
5028 
5029 	/* Read the super block */
5030 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
5031 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5032 			     bs_load_super_cpl, ctx);
5033 }
5034 
5035 /* END spdk_bs_load */
5036 
5037 /* START spdk_bs_dump */
5038 
5039 static void
5040 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
5041 {
5042 	spdk_free(ctx->super);
5043 
5044 	/*
5045 	 * We need to defer calling bs_call_cpl() until after
5046 	 * dev destruction, so tuck these away for later use.
5047 	 */
5048 	ctx->bs->unload_err = bserrno;
5049 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5050 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5051 
5052 	bs_sequence_finish(seq, 0);
5053 	bs_free(ctx->bs);
5054 	free(ctx);
5055 }
5056 
5057 static void
5058 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5059 {
5060 	struct spdk_blob_md_descriptor_xattr *desc_xattr;
5061 	uint32_t i;
5062 	const char *type;
5063 
5064 	desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc;
5065 
5066 	if (desc_xattr->length !=
5067 	    sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) +
5068 	    desc_xattr->name_length + desc_xattr->value_length) {
5069 	}
5070 
5071 	memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length);
5072 	ctx->xattr_name[desc_xattr->name_length] = '\0';
5073 	if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
5074 		type = "XATTR";
5075 	} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
5076 		type = "XATTR_INTERNAL";
5077 	} else {
5078 		assert(false);
5079 		type = "XATTR_?";
5080 	}
5081 	fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name);
5082 	fprintf(ctx->fp, "       value = \"");
5083 	ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name,
5084 			    (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
5085 			    desc_xattr->value_length);
5086 	fprintf(ctx->fp, "\"\n");
5087 	for (i = 0; i < desc_xattr->value_length; i++) {
5088 		if (i % 16 == 0) {
5089 			fprintf(ctx->fp, "               ");
5090 		}
5091 		fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i));
5092 		if ((i + 1) % 16 == 0) {
5093 			fprintf(ctx->fp, "\n");
5094 		}
5095 	}
5096 	if (i % 16 != 0) {
5097 		fprintf(ctx->fp, "\n");
5098 	}
5099 }
5100 
5101 struct type_flag_desc {
5102 	uint64_t mask;
5103 	uint64_t val;
5104 	const char *name;
5105 };
5106 
5107 static void
5108 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags,
5109 			struct type_flag_desc *desc, size_t numflags)
5110 {
5111 	uint64_t covered = 0;
5112 	size_t i;
5113 
5114 	for (i = 0; i < numflags; i++) {
5115 		if ((desc[i].mask & flags) != desc[i].val) {
5116 			continue;
5117 		}
5118 		fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name);
5119 		if (desc[i].mask != desc[i].val) {
5120 			fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")",
5121 				desc[i].mask, desc[i].val);
5122 		}
5123 		fprintf(ctx->fp, "\n");
5124 		covered |= desc[i].mask;
5125 	}
5126 	if ((flags & ~covered) != 0) {
5127 		fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered);
5128 	}
5129 }
5130 
5131 static void
5132 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5133 {
5134 	struct spdk_blob_md_descriptor_flags *type_desc;
5135 #define ADD_FLAG(f) { f, f, #f }
5136 #define ADD_MASK_VAL(m, v) { m, v, #v }
5137 	static struct type_flag_desc invalid[] = {
5138 		ADD_FLAG(SPDK_BLOB_THIN_PROV),
5139 		ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR),
5140 		ADD_FLAG(SPDK_BLOB_EXTENT_TABLE),
5141 	};
5142 	static struct type_flag_desc data_ro[] = {
5143 		ADD_FLAG(SPDK_BLOB_READ_ONLY),
5144 	};
5145 	static struct type_flag_desc md_ro[] = {
5146 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT),
5147 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE),
5148 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP),
5149 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES),
5150 	};
5151 #undef ADD_FLAG
5152 #undef ADD_MASK_VAL
5153 
5154 	type_desc = (struct spdk_blob_md_descriptor_flags *)desc;
5155 	fprintf(ctx->fp, "Flags:\n");
5156 	fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags);
5157 	bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid,
5158 				SPDK_COUNTOF(invalid));
5159 	fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags);
5160 	bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro,
5161 				SPDK_COUNTOF(data_ro));
5162 	fprintf(ctx->fp, "\t  md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags);
5163 	bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro,
5164 				SPDK_COUNTOF(md_ro));
5165 }
5166 
5167 static void
5168 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5169 {
5170 	struct spdk_blob_md_descriptor_extent_table *et_desc;
5171 	uint64_t num_extent_pages;
5172 	uint32_t et_idx;
5173 
5174 	et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc;
5175 	num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) /
5176 			   sizeof(et_desc->extent_page[0]);
5177 
5178 	fprintf(ctx->fp, "Extent table:\n");
5179 	for (et_idx = 0; et_idx < num_extent_pages; et_idx++) {
5180 		if (et_desc->extent_page[et_idx].page_idx == 0) {
5181 			/* Zeroes represent unallocated extent pages. */
5182 			continue;
5183 		}
5184 		fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32
5185 			" at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx,
5186 			et_desc->extent_page[et_idx].num_pages,
5187 			bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx));
5188 	}
5189 }
5190 
5191 static void
5192 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx)
5193 {
5194 	uint32_t page_idx = ctx->cur_page;
5195 	struct spdk_blob_md_page *page = ctx->page;
5196 	struct spdk_blob_md_descriptor *desc;
5197 	size_t cur_desc = 0;
5198 	uint32_t crc;
5199 
5200 	fprintf(ctx->fp, "=========\n");
5201 	fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx);
5202 	fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx));
5203 	fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id);
5204 	fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num);
5205 	if (page->next == SPDK_INVALID_MD_PAGE) {
5206 		fprintf(ctx->fp, "Next: None\n");
5207 	} else {
5208 		fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next);
5209 	}
5210 	fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)");
5211 	if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) {
5212 		fprintf(ctx->fp, " md");
5213 	}
5214 	if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) {
5215 		fprintf(ctx->fp, " blob");
5216 	}
5217 	fprintf(ctx->fp, "\n");
5218 
5219 	crc = blob_md_page_calc_crc(page);
5220 	fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch");
5221 
5222 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
5223 	while (cur_desc < sizeof(page->descriptors)) {
5224 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
5225 			if (desc->length == 0) {
5226 				/* If padding and length are 0, this terminates the page */
5227 				break;
5228 			}
5229 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
5230 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
5231 			unsigned int				i;
5232 
5233 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
5234 
5235 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
5236 				if (desc_extent_rle->extents[i].cluster_idx != 0) {
5237 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
5238 						desc_extent_rle->extents[i].cluster_idx);
5239 				} else {
5240 					fprintf(ctx->fp, "Unallocated Extent - ");
5241 				}
5242 				fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length);
5243 				fprintf(ctx->fp, "\n");
5244 			}
5245 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
5246 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
5247 			unsigned int					i;
5248 
5249 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
5250 
5251 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) {
5252 				if (desc_extent->cluster_idx[i] != 0) {
5253 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
5254 						desc_extent->cluster_idx[i]);
5255 				} else {
5256 					fprintf(ctx->fp, "Unallocated Extent");
5257 				}
5258 				fprintf(ctx->fp, "\n");
5259 			}
5260 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
5261 			bs_dump_print_xattr(ctx, desc);
5262 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
5263 			bs_dump_print_xattr(ctx, desc);
5264 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
5265 			bs_dump_print_type_flags(ctx, desc);
5266 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
5267 			bs_dump_print_extent_table(ctx, desc);
5268 		} else {
5269 			/* Error */
5270 			fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type);
5271 		}
5272 		/* Advance to the next descriptor */
5273 		cur_desc += sizeof(*desc) + desc->length;
5274 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
5275 			break;
5276 		}
5277 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
5278 	}
5279 }
5280 
5281 static void
5282 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5283 {
5284 	struct spdk_bs_load_ctx *ctx = cb_arg;
5285 
5286 	if (bserrno != 0) {
5287 		bs_dump_finish(seq, ctx, bserrno);
5288 		return;
5289 	}
5290 
5291 	if (ctx->page->id != 0) {
5292 		bs_dump_print_md_page(ctx);
5293 	}
5294 
5295 	ctx->cur_page++;
5296 
5297 	if (ctx->cur_page < ctx->super->md_len) {
5298 		bs_dump_read_md_page(seq, ctx);
5299 	} else {
5300 		spdk_free(ctx->page);
5301 		bs_dump_finish(seq, ctx, 0);
5302 	}
5303 }
5304 
5305 static void
5306 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
5307 {
5308 	struct spdk_bs_load_ctx *ctx = cb_arg;
5309 	uint64_t lba;
5310 
5311 	assert(ctx->cur_page < ctx->super->md_len);
5312 	lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
5313 	bs_sequence_read_dev(seq, ctx->page, lba,
5314 			     bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
5315 			     bs_dump_read_md_page_cpl, ctx);
5316 }
5317 
5318 static void
5319 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5320 {
5321 	struct spdk_bs_load_ctx *ctx = cb_arg;
5322 	int rc;
5323 
5324 	fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature);
5325 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
5326 		   sizeof(ctx->super->signature)) != 0) {
5327 		fprintf(ctx->fp, "(Mismatch)\n");
5328 		bs_dump_finish(seq, ctx, bserrno);
5329 		return;
5330 	} else {
5331 		fprintf(ctx->fp, "(OK)\n");
5332 	}
5333 	fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version);
5334 	fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc,
5335 		(ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch");
5336 	fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype);
5337 	fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size);
5338 	fprintf(ctx->fp, "Super Blob ID: ");
5339 	if (ctx->super->super_blob == SPDK_BLOBID_INVALID) {
5340 		fprintf(ctx->fp, "(None)\n");
5341 	} else {
5342 		fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob);
5343 	}
5344 	fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean);
5345 	fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start);
5346 	fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len);
5347 	fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start);
5348 	fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len);
5349 	fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start);
5350 	fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len);
5351 	fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start);
5352 	fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);
5353 
5354 	ctx->cur_page = 0;
5355 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
5356 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5357 	if (!ctx->page) {
5358 		bs_dump_finish(seq, ctx, -ENOMEM);
5359 		return;
5360 	}
5361 
5362 	rc = bs_parse_super(ctx);
5363 	if (rc < 0) {
5364 		bs_load_ctx_fail(ctx, rc);
5365 		return;
5366 	}
5367 
5368 	bs_load_read_used_pages(ctx);
5369 }
5370 
5371 void
5372 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn,
5373 	     spdk_bs_op_complete cb_fn, void *cb_arg)
5374 {
5375 	struct spdk_blob_store	*bs;
5376 	struct spdk_bs_cpl	cpl;
5377 	struct spdk_bs_load_ctx *ctx;
5378 	struct spdk_bs_opts	opts = {};
5379 	int err;
5380 
5381 	SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev);
5382 
5383 	spdk_bs_opts_init(&opts, sizeof(opts));
5384 
5385 	err = bs_alloc(dev, &opts, &bs, &ctx);
5386 	if (err) {
5387 		dev->destroy(dev);
5388 		cb_fn(cb_arg, err);
5389 		return;
5390 	}
5391 
5392 	ctx->dumping = true;
5393 	ctx->fp = fp;
5394 	ctx->print_xattr_fn = print_xattr_fn;
5395 
5396 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5397 	cpl.u.bs_basic.cb_fn = cb_fn;
5398 	cpl.u.bs_basic.cb_arg = cb_arg;
5399 
5400 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5401 	if (!ctx->seq) {
5402 		spdk_free(ctx->super);
5403 		free(ctx);
5404 		bs_free(bs);
5405 		cb_fn(cb_arg, -ENOMEM);
5406 		return;
5407 	}
5408 
5409 	/* Read the super block */
5410 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
5411 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5412 			     bs_dump_super_cpl, ctx);
5413 }
5414 
5415 /* END spdk_bs_dump */
5416 
5417 /* START spdk_bs_init */
5418 
5419 static void
5420 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5421 {
5422 	struct spdk_bs_load_ctx *ctx = cb_arg;
5423 
5424 	ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters);
5425 	spdk_free(ctx->super);
5426 	free(ctx);
5427 
5428 	bs_sequence_finish(seq, bserrno);
5429 }
5430 
5431 static void
5432 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5433 {
5434 	struct spdk_bs_load_ctx *ctx = cb_arg;
5435 
5436 	/* Write super block */
5437 	bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0),
5438 			      bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
5439 			      bs_init_persist_super_cpl, ctx);
5440 }
5441 
5442 void
5443 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
5444 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
5445 {
5446 	struct spdk_bs_load_ctx *ctx;
5447 	struct spdk_blob_store	*bs;
5448 	struct spdk_bs_cpl	cpl;
5449 	spdk_bs_sequence_t	*seq;
5450 	spdk_bs_batch_t		*batch;
5451 	uint64_t		num_md_lba;
5452 	uint64_t		num_md_pages;
5453 	uint64_t		num_md_clusters;
5454 	uint64_t		max_used_cluster_mask_len;
5455 	uint32_t		i;
5456 	struct spdk_bs_opts	opts = {};
5457 	int			rc;
5458 	uint64_t		lba, lba_count;
5459 
5460 	SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev);
5461 
5462 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
5463 		SPDK_ERRLOG("unsupported dev block length of %d\n",
5464 			    dev->blocklen);
5465 		dev->destroy(dev);
5466 		cb_fn(cb_arg, NULL, -EINVAL);
5467 		return;
5468 	}
5469 
5470 	spdk_bs_opts_init(&opts, sizeof(opts));
5471 	if (o) {
5472 		if (bs_opts_copy(o, &opts)) {
5473 			return;
5474 		}
5475 	}
5476 
5477 	if (bs_opts_verify(&opts) != 0) {
5478 		dev->destroy(dev);
5479 		cb_fn(cb_arg, NULL, -EINVAL);
5480 		return;
5481 	}
5482 
5483 	rc = bs_alloc(dev, &opts, &bs, &ctx);
5484 	if (rc) {
5485 		dev->destroy(dev);
5486 		cb_fn(cb_arg, NULL, rc);
5487 		return;
5488 	}
5489 
5490 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
5491 		/* By default, allocate 1 page per cluster.
5492 		 * Technically, this over-allocates metadata
5493 		 * because more metadata will reduce the number
5494 		 * of usable clusters. This can be addressed with
5495 		 * more complex math in the future.
5496 		 */
5497 		bs->md_len = bs->total_clusters;
5498 	} else {
5499 		bs->md_len = opts.num_md_pages;
5500 	}
5501 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
5502 	if (rc < 0) {
5503 		spdk_free(ctx->super);
5504 		free(ctx);
5505 		bs_free(bs);
5506 		cb_fn(cb_arg, NULL, -ENOMEM);
5507 		return;
5508 	}
5509 
5510 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
5511 	if (rc < 0) {
5512 		spdk_free(ctx->super);
5513 		free(ctx);
5514 		bs_free(bs);
5515 		cb_fn(cb_arg, NULL, -ENOMEM);
5516 		return;
5517 	}
5518 
5519 	rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len);
5520 	if (rc < 0) {
5521 		spdk_free(ctx->super);
5522 		free(ctx);
5523 		bs_free(bs);
5524 		cb_fn(cb_arg, NULL, -ENOMEM);
5525 		return;
5526 	}
5527 
5528 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
5529 	       sizeof(ctx->super->signature));
5530 	ctx->super->version = SPDK_BS_VERSION;
5531 	ctx->super->length = sizeof(*ctx->super);
5532 	ctx->super->super_blob = bs->super_blob;
5533 	ctx->super->clean = 0;
5534 	ctx->super->cluster_size = bs->cluster_sz;
5535 	ctx->super->io_unit_size = bs->io_unit_size;
5536 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
5537 
5538 	/* Calculate how many pages the metadata consumes at the front
5539 	 * of the disk.
5540 	 */
5541 
5542 	/* The super block uses 1 page */
5543 	num_md_pages = 1;
5544 
5545 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
5546 	 * up to the nearest page, plus a header.
5547 	 */
5548 	ctx->super->used_page_mask_start = num_md_pages;
5549 	ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5550 					 spdk_divide_round_up(bs->md_len, 8),
5551 					 SPDK_BS_PAGE_SIZE);
5552 	num_md_pages += ctx->super->used_page_mask_len;
5553 
5554 	/* The used_clusters mask requires 1 bit per cluster, rounded
5555 	 * up to the nearest page, plus a header.
5556 	 */
5557 	ctx->super->used_cluster_mask_start = num_md_pages;
5558 	ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5559 					    spdk_divide_round_up(bs->total_clusters, 8),
5560 					    SPDK_BS_PAGE_SIZE);
5561 	/* The blobstore might be extended, then the used_cluster bitmap will need more space.
5562 	 * Here we calculate the max clusters we can support according to the
5563 	 * num_md_pages (bs->md_len).
5564 	 */
5565 	max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5566 				    spdk_divide_round_up(bs->md_len, 8),
5567 				    SPDK_BS_PAGE_SIZE);
5568 	max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len,
5569 					     ctx->super->used_cluster_mask_len);
5570 	num_md_pages += max_used_cluster_mask_len;
5571 
5572 	/* The used_blobids mask requires 1 bit per metadata page, rounded
5573 	 * up to the nearest page, plus a header.
5574 	 */
5575 	ctx->super->used_blobid_mask_start = num_md_pages;
5576 	ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5577 					   spdk_divide_round_up(bs->md_len, 8),
5578 					   SPDK_BS_PAGE_SIZE);
5579 	num_md_pages += ctx->super->used_blobid_mask_len;
5580 
5581 	/* The metadata region size was chosen above */
5582 	ctx->super->md_start = bs->md_start = num_md_pages;
5583 	ctx->super->md_len = bs->md_len;
5584 	num_md_pages += bs->md_len;
5585 
5586 	num_md_lba = bs_page_to_lba(bs, num_md_pages);
5587 
5588 	ctx->super->size = dev->blockcnt * dev->blocklen;
5589 
5590 	ctx->super->crc = blob_md_page_calc_crc(ctx->super);
5591 
5592 	num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster);
5593 	if (num_md_clusters > bs->total_clusters) {
5594 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
5595 			    "please decrease number of pages reserved for metadata "
5596 			    "or increase cluster size.\n");
5597 		spdk_free(ctx->super);
5598 		spdk_bit_array_free(&ctx->used_clusters);
5599 		free(ctx);
5600 		bs_free(bs);
5601 		cb_fn(cb_arg, NULL, -ENOMEM);
5602 		return;
5603 	}
5604 	/* Claim all of the clusters used by the metadata */
5605 	for (i = 0; i < num_md_clusters; i++) {
5606 		spdk_bit_array_set(ctx->used_clusters, i);
5607 	}
5608 
5609 	bs->num_free_clusters -= num_md_clusters;
5610 	bs->total_data_clusters = bs->num_free_clusters;
5611 
5612 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
5613 	cpl.u.bs_handle.cb_fn = cb_fn;
5614 	cpl.u.bs_handle.cb_arg = cb_arg;
5615 	cpl.u.bs_handle.bs = bs;
5616 
5617 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5618 	if (!seq) {
5619 		spdk_free(ctx->super);
5620 		free(ctx);
5621 		bs_free(bs);
5622 		cb_fn(cb_arg, NULL, -ENOMEM);
5623 		return;
5624 	}
5625 
5626 	batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx);
5627 
5628 	/* Clear metadata space */
5629 	bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
5630 
5631 	lba = num_md_lba;
5632 	lba_count = ctx->bs->dev->blockcnt - lba;
5633 	switch (opts.clear_method) {
5634 	case BS_CLEAR_WITH_UNMAP:
5635 		/* Trim data clusters */
5636 		bs_batch_unmap_dev(batch, lba, lba_count);
5637 		break;
5638 	case BS_CLEAR_WITH_WRITE_ZEROES:
5639 		/* Write_zeroes to data clusters */
5640 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
5641 		break;
5642 	case BS_CLEAR_WITH_NONE:
5643 	default:
5644 		break;
5645 	}
5646 
5647 	bs_batch_close(batch);
5648 }
5649 
5650 /* END spdk_bs_init */
5651 
5652 /* START spdk_bs_destroy */
5653 
5654 static void
5655 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5656 {
5657 	struct spdk_bs_load_ctx *ctx = cb_arg;
5658 	struct spdk_blob_store *bs = ctx->bs;
5659 
5660 	/*
5661 	 * We need to defer calling bs_call_cpl() until after
5662 	 * dev destruction, so tuck these away for later use.
5663 	 */
5664 	bs->unload_err = bserrno;
5665 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5666 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5667 
5668 	bs_sequence_finish(seq, bserrno);
5669 
5670 	bs_free(bs);
5671 	free(ctx);
5672 }
5673 
5674 void
5675 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
5676 		void *cb_arg)
5677 {
5678 	struct spdk_bs_cpl	cpl;
5679 	spdk_bs_sequence_t	*seq;
5680 	struct spdk_bs_load_ctx *ctx;
5681 
5682 	SPDK_DEBUGLOG(blob, "Destroying blobstore\n");
5683 
5684 	if (!RB_EMPTY(&bs->open_blobs)) {
5685 		SPDK_ERRLOG("Blobstore still has open blobs\n");
5686 		cb_fn(cb_arg, -EBUSY);
5687 		return;
5688 	}
5689 
5690 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5691 	cpl.u.bs_basic.cb_fn = cb_fn;
5692 	cpl.u.bs_basic.cb_arg = cb_arg;
5693 
5694 	ctx = calloc(1, sizeof(*ctx));
5695 	if (!ctx) {
5696 		cb_fn(cb_arg, -ENOMEM);
5697 		return;
5698 	}
5699 
5700 	ctx->bs = bs;
5701 
5702 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5703 	if (!seq) {
5704 		free(ctx);
5705 		cb_fn(cb_arg, -ENOMEM);
5706 		return;
5707 	}
5708 
5709 	/* Write zeroes to the super block */
5710 	bs_sequence_write_zeroes_dev(seq,
5711 				     bs_page_to_lba(bs, 0),
5712 				     bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
5713 				     bs_destroy_trim_cpl, ctx);
5714 }
5715 
5716 /* END spdk_bs_destroy */
5717 
5718 /* START spdk_bs_unload */
5719 
5720 static void
5721 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno)
5722 {
5723 	spdk_bs_sequence_t *seq = ctx->seq;
5724 
5725 	spdk_free(ctx->super);
5726 
5727 	/*
5728 	 * We need to defer calling bs_call_cpl() until after
5729 	 * dev destruction, so tuck these away for later use.
5730 	 */
5731 	ctx->bs->unload_err = bserrno;
5732 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5733 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5734 
5735 	bs_sequence_finish(seq, bserrno);
5736 
5737 	bs_free(ctx->bs);
5738 	free(ctx);
5739 }
5740 
5741 static void
5742 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5743 {
5744 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5745 
5746 	bs_unload_finish(ctx, bserrno);
5747 }
5748 
5749 static void
5750 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5751 {
5752 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5753 
5754 	spdk_free(ctx->mask);
5755 
5756 	if (bserrno != 0) {
5757 		bs_unload_finish(ctx, bserrno);
5758 		return;
5759 	}
5760 
5761 	ctx->super->clean = 1;
5762 
5763 	bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx);
5764 }
5765 
5766 static void
5767 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5768 {
5769 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5770 
5771 	spdk_free(ctx->mask);
5772 	ctx->mask = NULL;
5773 
5774 	if (bserrno != 0) {
5775 		bs_unload_finish(ctx, bserrno);
5776 		return;
5777 	}
5778 
5779 	bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl);
5780 }
5781 
5782 static void
5783 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5784 {
5785 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5786 
5787 	spdk_free(ctx->mask);
5788 	ctx->mask = NULL;
5789 
5790 	if (bserrno != 0) {
5791 		bs_unload_finish(ctx, bserrno);
5792 		return;
5793 	}
5794 
5795 	bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl);
5796 }
5797 
5798 static void
5799 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5800 {
5801 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5802 	int rc;
5803 
5804 	if (bserrno != 0) {
5805 		bs_unload_finish(ctx, bserrno);
5806 		return;
5807 	}
5808 
5809 	rc = bs_super_validate(ctx->super, ctx->bs);
5810 	if (rc != 0) {
5811 		bs_unload_finish(ctx, rc);
5812 		return;
5813 	}
5814 
5815 	bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl);
5816 }
5817 
5818 void
5819 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
5820 {
5821 	struct spdk_bs_cpl	cpl;
5822 	struct spdk_bs_load_ctx *ctx;
5823 
5824 	SPDK_DEBUGLOG(blob, "Syncing blobstore\n");
5825 
5826 	/*
5827 	 * If external snapshot channels are being destroyed while the blobstore is unloaded, the
5828 	 * unload is deferred until after the channel destruction completes.
5829 	 */
5830 	if (bs->esnap_channels_unloading != 0) {
5831 		if (bs->esnap_unload_cb_fn != NULL) {
5832 			SPDK_ERRLOG("Blobstore unload in progress\n");
5833 			cb_fn(cb_arg, -EBUSY);
5834 			return;
5835 		}
5836 		SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32
5837 			      " esnap clones are unloading\n", bs->esnap_channels_unloading);
5838 		bs->esnap_unload_cb_fn = cb_fn;
5839 		bs->esnap_unload_cb_arg = cb_arg;
5840 		return;
5841 	}
5842 	if (bs->esnap_unload_cb_fn != NULL) {
5843 		SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n");
5844 		assert(bs->esnap_unload_cb_fn == cb_fn);
5845 		assert(bs->esnap_unload_cb_arg == cb_arg);
5846 		bs->esnap_unload_cb_fn = NULL;
5847 		bs->esnap_unload_cb_arg = NULL;
5848 	}
5849 
5850 	if (!RB_EMPTY(&bs->open_blobs)) {
5851 		SPDK_ERRLOG("Blobstore still has open blobs\n");
5852 		cb_fn(cb_arg, -EBUSY);
5853 		return;
5854 	}
5855 
5856 	ctx = calloc(1, sizeof(*ctx));
5857 	if (!ctx) {
5858 		cb_fn(cb_arg, -ENOMEM);
5859 		return;
5860 	}
5861 
5862 	ctx->bs = bs;
5863 
5864 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
5865 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5866 	if (!ctx->super) {
5867 		free(ctx);
5868 		cb_fn(cb_arg, -ENOMEM);
5869 		return;
5870 	}
5871 
5872 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5873 	cpl.u.bs_basic.cb_fn = cb_fn;
5874 	cpl.u.bs_basic.cb_arg = cb_arg;
5875 
5876 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5877 	if (!ctx->seq) {
5878 		spdk_free(ctx->super);
5879 		free(ctx);
5880 		cb_fn(cb_arg, -ENOMEM);
5881 		return;
5882 	}
5883 
5884 	/* Read super block */
5885 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
5886 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5887 			     bs_unload_read_super_cpl, ctx);
5888 }
5889 
5890 /* END spdk_bs_unload */
5891 
5892 /* START spdk_bs_set_super */
5893 
5894 struct spdk_bs_set_super_ctx {
5895 	struct spdk_blob_store		*bs;
5896 	struct spdk_bs_super_block	*super;
5897 };
5898 
5899 static void
5900 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5901 {
5902 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
5903 
5904 	if (bserrno != 0) {
5905 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
5906 	}
5907 
5908 	spdk_free(ctx->super);
5909 
5910 	bs_sequence_finish(seq, bserrno);
5911 
5912 	free(ctx);
5913 }
5914 
5915 static void
5916 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5917 {
5918 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
5919 	int rc;
5920 
5921 	if (bserrno != 0) {
5922 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
5923 		spdk_free(ctx->super);
5924 		bs_sequence_finish(seq, bserrno);
5925 		free(ctx);
5926 		return;
5927 	}
5928 
5929 	rc = bs_super_validate(ctx->super, ctx->bs);
5930 	if (rc != 0) {
5931 		SPDK_ERRLOG("Not a valid super block\n");
5932 		spdk_free(ctx->super);
5933 		bs_sequence_finish(seq, rc);
5934 		free(ctx);
5935 		return;
5936 	}
5937 
5938 	bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx);
5939 }
5940 
5941 void
5942 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
5943 		  spdk_bs_op_complete cb_fn, void *cb_arg)
5944 {
5945 	struct spdk_bs_cpl		cpl;
5946 	spdk_bs_sequence_t		*seq;
5947 	struct spdk_bs_set_super_ctx	*ctx;
5948 
5949 	SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n");
5950 
5951 	ctx = calloc(1, sizeof(*ctx));
5952 	if (!ctx) {
5953 		cb_fn(cb_arg, -ENOMEM);
5954 		return;
5955 	}
5956 
5957 	ctx->bs = bs;
5958 
5959 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
5960 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5961 	if (!ctx->super) {
5962 		free(ctx);
5963 		cb_fn(cb_arg, -ENOMEM);
5964 		return;
5965 	}
5966 
5967 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5968 	cpl.u.bs_basic.cb_fn = cb_fn;
5969 	cpl.u.bs_basic.cb_arg = cb_arg;
5970 
5971 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5972 	if (!seq) {
5973 		spdk_free(ctx->super);
5974 		free(ctx);
5975 		cb_fn(cb_arg, -ENOMEM);
5976 		return;
5977 	}
5978 
5979 	bs->super_blob = blobid;
5980 
5981 	/* Read super block */
5982 	bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0),
5983 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5984 			     bs_set_super_read_cpl, ctx);
5985 }
5986 
5987 /* END spdk_bs_set_super */
5988 
5989 void
5990 spdk_bs_get_super(struct spdk_blob_store *bs,
5991 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5992 {
5993 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
5994 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
5995 	} else {
5996 		cb_fn(cb_arg, bs->super_blob, 0);
5997 	}
5998 }
5999 
6000 uint64_t
6001 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
6002 {
6003 	return bs->cluster_sz;
6004 }
6005 
6006 uint64_t
6007 spdk_bs_get_page_size(struct spdk_blob_store *bs)
6008 {
6009 	return SPDK_BS_PAGE_SIZE;
6010 }
6011 
6012 uint64_t
6013 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
6014 {
6015 	return bs->io_unit_size;
6016 }
6017 
6018 uint64_t
6019 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
6020 {
6021 	return bs->num_free_clusters;
6022 }
6023 
6024 uint64_t
6025 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
6026 {
6027 	return bs->total_data_clusters;
6028 }
6029 
6030 static int
6031 bs_register_md_thread(struct spdk_blob_store *bs)
6032 {
6033 	bs->md_channel = spdk_get_io_channel(bs);
6034 	if (!bs->md_channel) {
6035 		SPDK_ERRLOG("Failed to get IO channel.\n");
6036 		return -1;
6037 	}
6038 
6039 	return 0;
6040 }
6041 
6042 static int
6043 bs_unregister_md_thread(struct spdk_blob_store *bs)
6044 {
6045 	spdk_put_io_channel(bs->md_channel);
6046 
6047 	return 0;
6048 }
6049 
6050 spdk_blob_id
6051 spdk_blob_get_id(struct spdk_blob *blob)
6052 {
6053 	assert(blob != NULL);
6054 
6055 	return blob->id;
6056 }
6057 
6058 uint64_t
6059 spdk_blob_get_num_pages(struct spdk_blob *blob)
6060 {
6061 	assert(blob != NULL);
6062 
6063 	return bs_cluster_to_page(blob->bs, blob->active.num_clusters);
6064 }
6065 
6066 uint64_t
6067 spdk_blob_get_num_io_units(struct spdk_blob *blob)
6068 {
6069 	assert(blob != NULL);
6070 
6071 	return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs);
6072 }
6073 
6074 uint64_t
6075 spdk_blob_get_num_clusters(struct spdk_blob *blob)
6076 {
6077 	assert(blob != NULL);
6078 
6079 	return blob->active.num_clusters;
6080 }
6081 
6082 uint64_t
6083 spdk_blob_get_num_allocated_clusters(struct spdk_blob *blob)
6084 {
6085 	assert(blob != NULL);
6086 
6087 	return blob->active.num_allocated_clusters;
6088 }
6089 
6090 static uint64_t
6091 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated)
6092 {
6093 	uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob);
6094 
6095 	while (offset < blob_io_unit_num) {
6096 		if (bs_io_unit_is_allocated(blob, offset) == is_allocated) {
6097 			return offset;
6098 		}
6099 
6100 		offset += bs_num_io_units_to_cluster_boundary(blob, offset);
6101 	}
6102 
6103 	return UINT64_MAX;
6104 }
6105 
6106 uint64_t
6107 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset)
6108 {
6109 	return blob_find_io_unit(blob, offset, true);
6110 }
6111 
6112 uint64_t
6113 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset)
6114 {
6115 	return blob_find_io_unit(blob, offset, false);
6116 }
6117 
6118 /* START spdk_bs_create_blob */
6119 
6120 static void
6121 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6122 {
6123 	struct spdk_blob *blob = cb_arg;
6124 	uint32_t page_idx = bs_blobid_to_page(blob->id);
6125 
6126 	if (bserrno != 0) {
6127 		spdk_spin_lock(&blob->bs->used_lock);
6128 		spdk_bit_array_clear(blob->bs->used_blobids, page_idx);
6129 		bs_release_md_page(blob->bs, page_idx);
6130 		spdk_spin_unlock(&blob->bs->used_lock);
6131 	}
6132 
6133 	blob_free(blob);
6134 
6135 	bs_sequence_finish(seq, bserrno);
6136 }
6137 
6138 static int
6139 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
6140 		bool internal)
6141 {
6142 	uint64_t i;
6143 	size_t value_len = 0;
6144 	int rc;
6145 	const void *value = NULL;
6146 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
6147 		return -EINVAL;
6148 	}
6149 	for (i = 0; i < xattrs->count; i++) {
6150 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
6151 		if (value == NULL || value_len == 0) {
6152 			return -EINVAL;
6153 		}
6154 		rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
6155 		if (rc < 0) {
6156 			return rc;
6157 		}
6158 	}
6159 	return 0;
6160 }
6161 
6162 static void
6163 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst)
6164 {
6165 #define FIELD_OK(field) \
6166         offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size
6167 
6168 #define SET_FIELD(field) \
6169         if (FIELD_OK(field)) { \
6170                 dst->field = src->field; \
6171         } \
6172 
6173 	SET_FIELD(num_clusters);
6174 	SET_FIELD(thin_provision);
6175 	SET_FIELD(clear_method);
6176 
6177 	if (FIELD_OK(xattrs)) {
6178 		memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs));
6179 	}
6180 
6181 	SET_FIELD(use_extent_table);
6182 	SET_FIELD(esnap_id);
6183 	SET_FIELD(esnap_id_len);
6184 
6185 	dst->opts_size = src->opts_size;
6186 
6187 	/* You should not remove this statement, but need to update the assert statement
6188 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
6189 	SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size");
6190 
6191 #undef FIELD_OK
6192 #undef SET_FIELD
6193 }
6194 
6195 static void
6196 bs_create_blob(struct spdk_blob_store *bs,
6197 	       const struct spdk_blob_opts *opts,
6198 	       const struct spdk_blob_xattr_opts *internal_xattrs,
6199 	       spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6200 {
6201 	struct spdk_blob	*blob;
6202 	uint32_t		page_idx;
6203 	struct spdk_bs_cpl	cpl;
6204 	struct spdk_blob_opts	opts_local;
6205 	struct spdk_blob_xattr_opts internal_xattrs_default;
6206 	spdk_bs_sequence_t	*seq;
6207 	spdk_blob_id		id;
6208 	int rc;
6209 
6210 	assert(spdk_get_thread() == bs->md_thread);
6211 
6212 	spdk_spin_lock(&bs->used_lock);
6213 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
6214 	if (page_idx == UINT32_MAX) {
6215 		spdk_spin_unlock(&bs->used_lock);
6216 		cb_fn(cb_arg, 0, -ENOMEM);
6217 		return;
6218 	}
6219 	spdk_bit_array_set(bs->used_blobids, page_idx);
6220 	bs_claim_md_page(bs, page_idx);
6221 	spdk_spin_unlock(&bs->used_lock);
6222 
6223 	id = bs_page_to_blobid(page_idx);
6224 
6225 	SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx);
6226 
6227 	spdk_blob_opts_init(&opts_local, sizeof(opts_local));
6228 	if (opts) {
6229 		blob_opts_copy(opts, &opts_local);
6230 	}
6231 
6232 	blob = blob_alloc(bs, id);
6233 	if (!blob) {
6234 		rc = -ENOMEM;
6235 		goto error;
6236 	}
6237 
6238 	blob->use_extent_table = opts_local.use_extent_table;
6239 	if (blob->use_extent_table) {
6240 		blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE;
6241 	}
6242 
6243 	if (!internal_xattrs) {
6244 		blob_xattrs_init(&internal_xattrs_default);
6245 		internal_xattrs = &internal_xattrs_default;
6246 	}
6247 
6248 	rc = blob_set_xattrs(blob, &opts_local.xattrs, false);
6249 	if (rc < 0) {
6250 		goto error;
6251 	}
6252 
6253 	rc = blob_set_xattrs(blob, internal_xattrs, true);
6254 	if (rc < 0) {
6255 		goto error;
6256 	}
6257 
6258 	if (opts_local.thin_provision) {
6259 		blob_set_thin_provision(blob);
6260 	}
6261 
6262 	blob_set_clear_method(blob, opts_local.clear_method);
6263 
6264 	if (opts_local.esnap_id != NULL) {
6265 		if (opts_local.esnap_id_len > UINT16_MAX) {
6266 			SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n",
6267 				    opts_local.esnap_id_len);
6268 			rc = -EINVAL;
6269 			goto error;
6270 
6271 		}
6272 		blob_set_thin_provision(blob);
6273 		blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
6274 		rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID,
6275 				    opts_local.esnap_id, opts_local.esnap_id_len, true);
6276 		if (rc != 0) {
6277 			goto error;
6278 		}
6279 	}
6280 
6281 	rc = blob_resize(blob, opts_local.num_clusters);
6282 	if (rc < 0) {
6283 		goto error;
6284 	}
6285 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6286 	cpl.u.blobid.cb_fn = cb_fn;
6287 	cpl.u.blobid.cb_arg = cb_arg;
6288 	cpl.u.blobid.blobid = blob->id;
6289 
6290 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
6291 	if (!seq) {
6292 		rc = -ENOMEM;
6293 		goto error;
6294 	}
6295 
6296 	blob_persist(seq, blob, bs_create_blob_cpl, blob);
6297 	return;
6298 
6299 error:
6300 	SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n",
6301 		    spdk_strerror(rc), opts_local.num_clusters);
6302 	if (blob != NULL) {
6303 		blob_free(blob);
6304 	}
6305 	spdk_spin_lock(&bs->used_lock);
6306 	spdk_bit_array_clear(bs->used_blobids, page_idx);
6307 	bs_release_md_page(bs, page_idx);
6308 	spdk_spin_unlock(&bs->used_lock);
6309 	cb_fn(cb_arg, 0, rc);
6310 }
6311 
6312 void
6313 spdk_bs_create_blob(struct spdk_blob_store *bs,
6314 		    spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6315 {
6316 	bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
6317 }
6318 
6319 void
6320 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
6321 			spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6322 {
6323 	bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
6324 }
6325 
6326 /* END spdk_bs_create_blob */
6327 
6328 /* START blob_cleanup */
6329 
6330 struct spdk_clone_snapshot_ctx {
6331 	struct spdk_bs_cpl      cpl;
6332 	int bserrno;
6333 	bool frozen;
6334 
6335 	struct spdk_io_channel *channel;
6336 
6337 	/* Current cluster for inflate operation */
6338 	uint64_t cluster;
6339 
6340 	/* For inflation force allocation of all unallocated clusters and remove
6341 	 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */
6342 	bool allocate_all;
6343 
6344 	struct {
6345 		spdk_blob_id id;
6346 		struct spdk_blob *blob;
6347 		bool md_ro;
6348 	} original;
6349 	struct {
6350 		spdk_blob_id id;
6351 		struct spdk_blob *blob;
6352 	} new;
6353 
6354 	/* xattrs specified for snapshot/clones only. They have no impact on
6355 	 * the original blobs xattrs. */
6356 	const struct spdk_blob_xattr_opts *xattrs;
6357 };
6358 
6359 static void
6360 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
6361 {
6362 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
6363 	struct spdk_bs_cpl *cpl = &ctx->cpl;
6364 
6365 	if (bserrno != 0) {
6366 		if (ctx->bserrno != 0) {
6367 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6368 		} else {
6369 			ctx->bserrno = bserrno;
6370 		}
6371 	}
6372 
6373 	switch (cpl->type) {
6374 	case SPDK_BS_CPL_TYPE_BLOBID:
6375 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
6376 		break;
6377 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
6378 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
6379 		break;
6380 	default:
6381 		SPDK_UNREACHABLE();
6382 		break;
6383 	}
6384 
6385 	free(ctx);
6386 }
6387 
6388 static void
6389 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
6390 {
6391 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6392 	struct spdk_blob *origblob = ctx->original.blob;
6393 
6394 	if (bserrno != 0) {
6395 		if (ctx->bserrno != 0) {
6396 			SPDK_ERRLOG("Unfreeze error %d\n", bserrno);
6397 		} else {
6398 			ctx->bserrno = bserrno;
6399 		}
6400 	}
6401 
6402 	ctx->original.id = origblob->id;
6403 	origblob->locked_operation_in_progress = false;
6404 
6405 	/* Revert md_ro to original state */
6406 	origblob->md_ro = ctx->original.md_ro;
6407 
6408 	spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx);
6409 }
6410 
6411 static void
6412 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
6413 {
6414 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6415 	struct spdk_blob *origblob = ctx->original.blob;
6416 
6417 	if (bserrno != 0) {
6418 		if (ctx->bserrno != 0) {
6419 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6420 		} else {
6421 			ctx->bserrno = bserrno;
6422 		}
6423 	}
6424 
6425 	if (ctx->frozen) {
6426 		/* Unfreeze any outstanding I/O */
6427 		blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx);
6428 	} else {
6429 		bs_snapshot_unfreeze_cpl(ctx, 0);
6430 	}
6431 
6432 }
6433 
6434 static void
6435 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno)
6436 {
6437 	struct spdk_blob *newblob = ctx->new.blob;
6438 
6439 	if (bserrno != 0) {
6440 		if (ctx->bserrno != 0) {
6441 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6442 		} else {
6443 			ctx->bserrno = bserrno;
6444 		}
6445 	}
6446 
6447 	ctx->new.id = newblob->id;
6448 	spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx);
6449 }
6450 
6451 /* END blob_cleanup */
6452 
6453 /* START spdk_bs_create_snapshot */
6454 
6455 static void
6456 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2)
6457 {
6458 	uint64_t *cluster_temp;
6459 	uint64_t num_allocated_clusters_temp;
6460 	uint32_t *extent_page_temp;
6461 
6462 	cluster_temp = blob1->active.clusters;
6463 	blob1->active.clusters = blob2->active.clusters;
6464 	blob2->active.clusters = cluster_temp;
6465 
6466 	num_allocated_clusters_temp = blob1->active.num_allocated_clusters;
6467 	blob1->active.num_allocated_clusters = blob2->active.num_allocated_clusters;
6468 	blob2->active.num_allocated_clusters = num_allocated_clusters_temp;
6469 
6470 	extent_page_temp = blob1->active.extent_pages;
6471 	blob1->active.extent_pages = blob2->active.extent_pages;
6472 	blob2->active.extent_pages = extent_page_temp;
6473 }
6474 
6475 /* Copies an internal xattr */
6476 static int
6477 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name)
6478 {
6479 	const void	*val = NULL;
6480 	size_t		len;
6481 	int		bserrno;
6482 
6483 	bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true);
6484 	if (bserrno != 0) {
6485 		SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name);
6486 		return bserrno;
6487 	}
6488 
6489 	bserrno = blob_set_xattr(toblob, name, val, len, true);
6490 	if (bserrno != 0) {
6491 		SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n",
6492 			    name, toblob->id);
6493 		return bserrno;
6494 	}
6495 	return 0;
6496 }
6497 
6498 static void
6499 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
6500 {
6501 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6502 	struct spdk_blob *origblob = ctx->original.blob;
6503 	struct spdk_blob *newblob = ctx->new.blob;
6504 
6505 	if (bserrno != 0) {
6506 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6507 		if (blob_is_esnap_clone(newblob)) {
6508 			bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID);
6509 			origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
6510 		}
6511 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6512 		return;
6513 	}
6514 
6515 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
6516 	bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
6517 	if (bserrno != 0) {
6518 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6519 		return;
6520 	}
6521 
6522 	bs_blob_list_add(ctx->original.blob);
6523 
6524 	spdk_blob_set_read_only(newblob);
6525 
6526 	/* sync snapshot metadata */
6527 	spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx);
6528 }
6529 
6530 static void
6531 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
6532 {
6533 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6534 	struct spdk_blob *origblob = ctx->original.blob;
6535 	struct spdk_blob *newblob = ctx->new.blob;
6536 
6537 	if (bserrno != 0) {
6538 		/* return cluster map back to original */
6539 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6540 
6541 		/* Newblob md sync failed. Valid clusters are only present in origblob.
6542 		 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred.
6543 		 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */
6544 		blob_set_thin_provision(newblob);
6545 		assert(spdk_mem_all_zero(newblob->active.clusters,
6546 					 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
6547 		assert(spdk_mem_all_zero(newblob->active.extent_pages,
6548 					 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
6549 
6550 		bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6551 		return;
6552 	}
6553 
6554 	/* Set internal xattr for snapshot id */
6555 	bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
6556 	if (bserrno != 0) {
6557 		/* return cluster map back to original */
6558 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6559 		blob_set_thin_provision(newblob);
6560 		bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6561 		return;
6562 	}
6563 
6564 	/* Create new back_bs_dev for snapshot */
6565 	origblob->back_bs_dev = bs_create_blob_bs_dev(newblob);
6566 	if (origblob->back_bs_dev == NULL) {
6567 		/* return cluster map back to original */
6568 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6569 		blob_set_thin_provision(newblob);
6570 		bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
6571 		return;
6572 	}
6573 
6574 	/* Remove the xattr that references an external snapshot */
6575 	if (blob_is_esnap_clone(origblob)) {
6576 		origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT;
6577 		bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true);
6578 		if (bserrno != 0) {
6579 			if (bserrno == -ENOENT) {
6580 				SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID
6581 					    " xattr to remove\n", origblob->id);
6582 				assert(false);
6583 			} else {
6584 				/* return cluster map back to original */
6585 				bs_snapshot_swap_cluster_maps(newblob, origblob);
6586 				blob_set_thin_provision(newblob);
6587 				bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6588 				return;
6589 			}
6590 		}
6591 	}
6592 
6593 	bs_blob_list_remove(origblob);
6594 	origblob->parent_id = newblob->id;
6595 	/* set clone blob as thin provisioned */
6596 	blob_set_thin_provision(origblob);
6597 
6598 	bs_blob_list_add(newblob);
6599 
6600 	/* sync clone metadata */
6601 	spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx);
6602 }
6603 
6604 static void
6605 bs_snapshot_freeze_cpl(void *cb_arg, int rc)
6606 {
6607 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6608 	struct spdk_blob *origblob = ctx->original.blob;
6609 	struct spdk_blob *newblob = ctx->new.blob;
6610 	int bserrno;
6611 
6612 	if (rc != 0) {
6613 		bs_clone_snapshot_newblob_cleanup(ctx, rc);
6614 		return;
6615 	}
6616 
6617 	ctx->frozen = true;
6618 
6619 	if (blob_is_esnap_clone(origblob)) {
6620 		/* Clean up any channels associated with the original blob id because future IO will
6621 		 * perform IO using the snapshot blob_id.
6622 		 */
6623 		blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL);
6624 	}
6625 	if (newblob->back_bs_dev) {
6626 		blob_back_bs_destroy(newblob);
6627 	}
6628 	/* set new back_bs_dev for snapshot */
6629 	newblob->back_bs_dev = origblob->back_bs_dev;
6630 	/* Set invalid flags from origblob */
6631 	newblob->invalid_flags = origblob->invalid_flags;
6632 
6633 	/* inherit parent from original blob if set */
6634 	newblob->parent_id = origblob->parent_id;
6635 	switch (origblob->parent_id) {
6636 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
6637 		bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID);
6638 		if (bserrno != 0) {
6639 			bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6640 			return;
6641 		}
6642 		break;
6643 	case SPDK_BLOBID_INVALID:
6644 		break;
6645 	default:
6646 		/* Set internal xattr for snapshot id */
6647 		bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT,
6648 					 &origblob->parent_id, sizeof(spdk_blob_id), true);
6649 		if (bserrno != 0) {
6650 			bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6651 			return;
6652 		}
6653 	}
6654 
6655 	/* swap cluster maps */
6656 	bs_snapshot_swap_cluster_maps(newblob, origblob);
6657 
6658 	/* Set the clear method on the new blob to match the original. */
6659 	blob_set_clear_method(newblob, origblob->clear_method);
6660 
6661 	/* sync snapshot metadata */
6662 	spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx);
6663 }
6664 
6665 static void
6666 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6667 {
6668 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6669 	struct spdk_blob *origblob = ctx->original.blob;
6670 	struct spdk_blob *newblob = _blob;
6671 
6672 	if (bserrno != 0) {
6673 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6674 		return;
6675 	}
6676 
6677 	ctx->new.blob = newblob;
6678 	assert(spdk_blob_is_thin_provisioned(newblob));
6679 	assert(spdk_mem_all_zero(newblob->active.clusters,
6680 				 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
6681 	assert(spdk_mem_all_zero(newblob->active.extent_pages,
6682 				 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
6683 
6684 	blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx);
6685 }
6686 
6687 static void
6688 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
6689 {
6690 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6691 	struct spdk_blob *origblob = ctx->original.blob;
6692 
6693 	if (bserrno != 0) {
6694 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6695 		return;
6696 	}
6697 
6698 	ctx->new.id = blobid;
6699 	ctx->cpl.u.blobid.blobid = blobid;
6700 
6701 	spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx);
6702 }
6703 
6704 
6705 static void
6706 bs_xattr_snapshot(void *arg, const char *name,
6707 		  const void **value, size_t *value_len)
6708 {
6709 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
6710 
6711 	struct spdk_blob *blob = (struct spdk_blob *)arg;
6712 	*value = &blob->id;
6713 	*value_len = sizeof(blob->id);
6714 }
6715 
6716 static void
6717 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6718 {
6719 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6720 	struct spdk_blob_opts opts;
6721 	struct spdk_blob_xattr_opts internal_xattrs;
6722 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
6723 
6724 	if (bserrno != 0) {
6725 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6726 		return;
6727 	}
6728 
6729 	ctx->original.blob = _blob;
6730 
6731 	if (_blob->data_ro || _blob->md_ro) {
6732 		SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%"
6733 			      PRIx64 "\n", _blob->id);
6734 		ctx->bserrno = -EINVAL;
6735 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6736 		return;
6737 	}
6738 
6739 	if (_blob->locked_operation_in_progress) {
6740 		SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n");
6741 		ctx->bserrno = -EBUSY;
6742 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6743 		return;
6744 	}
6745 
6746 	_blob->locked_operation_in_progress = true;
6747 
6748 	spdk_blob_opts_init(&opts, sizeof(opts));
6749 	blob_xattrs_init(&internal_xattrs);
6750 
6751 	/* Change the size of new blob to the same as in original blob,
6752 	 * but do not allocate clusters */
6753 	opts.thin_provision = true;
6754 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
6755 	opts.use_extent_table = _blob->use_extent_table;
6756 
6757 	/* If there are any xattrs specified for snapshot, set them now */
6758 	if (ctx->xattrs) {
6759 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
6760 	}
6761 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
6762 	internal_xattrs.count = 1;
6763 	internal_xattrs.ctx = _blob;
6764 	internal_xattrs.names = xattrs_names;
6765 	internal_xattrs.get_value = bs_xattr_snapshot;
6766 
6767 	bs_create_blob(_blob->bs, &opts, &internal_xattrs,
6768 		       bs_snapshot_newblob_create_cpl, ctx);
6769 }
6770 
6771 void
6772 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
6773 			const struct spdk_blob_xattr_opts *snapshot_xattrs,
6774 			spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6775 {
6776 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
6777 
6778 	if (!ctx) {
6779 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
6780 		return;
6781 	}
6782 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6783 	ctx->cpl.u.blobid.cb_fn = cb_fn;
6784 	ctx->cpl.u.blobid.cb_arg = cb_arg;
6785 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
6786 	ctx->bserrno = 0;
6787 	ctx->frozen = false;
6788 	ctx->original.id = blobid;
6789 	ctx->xattrs = snapshot_xattrs;
6790 
6791 	spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx);
6792 }
6793 /* END spdk_bs_create_snapshot */
6794 
6795 /* START spdk_bs_create_clone */
6796 
6797 static void
6798 bs_xattr_clone(void *arg, const char *name,
6799 	       const void **value, size_t *value_len)
6800 {
6801 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
6802 
6803 	struct spdk_blob *blob = (struct spdk_blob *)arg;
6804 	*value = &blob->id;
6805 	*value_len = sizeof(blob->id);
6806 }
6807 
6808 static void
6809 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6810 {
6811 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6812 	struct spdk_blob *clone = _blob;
6813 
6814 	ctx->new.blob = clone;
6815 	bs_blob_list_add(clone);
6816 
6817 	spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx);
6818 }
6819 
6820 static void
6821 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
6822 {
6823 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6824 
6825 	ctx->cpl.u.blobid.blobid = blobid;
6826 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx);
6827 }
6828 
6829 static void
6830 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6831 {
6832 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6833 	struct spdk_blob_opts		opts;
6834 	struct spdk_blob_xattr_opts internal_xattrs;
6835 	char *xattr_names[] = { BLOB_SNAPSHOT };
6836 
6837 	if (bserrno != 0) {
6838 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6839 		return;
6840 	}
6841 
6842 	ctx->original.blob = _blob;
6843 	ctx->original.md_ro = _blob->md_ro;
6844 
6845 	if (!_blob->data_ro || !_blob->md_ro) {
6846 		SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n");
6847 		ctx->bserrno = -EINVAL;
6848 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6849 		return;
6850 	}
6851 
6852 	if (_blob->locked_operation_in_progress) {
6853 		SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n");
6854 		ctx->bserrno = -EBUSY;
6855 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6856 		return;
6857 	}
6858 
6859 	_blob->locked_operation_in_progress = true;
6860 
6861 	spdk_blob_opts_init(&opts, sizeof(opts));
6862 	blob_xattrs_init(&internal_xattrs);
6863 
6864 	opts.thin_provision = true;
6865 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
6866 	opts.use_extent_table = _blob->use_extent_table;
6867 	if (ctx->xattrs) {
6868 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
6869 	}
6870 
6871 	/* Set internal xattr BLOB_SNAPSHOT */
6872 	internal_xattrs.count = 1;
6873 	internal_xattrs.ctx = _blob;
6874 	internal_xattrs.names = xattr_names;
6875 	internal_xattrs.get_value = bs_xattr_clone;
6876 
6877 	bs_create_blob(_blob->bs, &opts, &internal_xattrs,
6878 		       bs_clone_newblob_create_cpl, ctx);
6879 }
6880 
6881 void
6882 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
6883 		     const struct spdk_blob_xattr_opts *clone_xattrs,
6884 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6885 {
6886 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
6887 
6888 	if (!ctx) {
6889 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
6890 		return;
6891 	}
6892 
6893 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6894 	ctx->cpl.u.blobid.cb_fn = cb_fn;
6895 	ctx->cpl.u.blobid.cb_arg = cb_arg;
6896 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
6897 	ctx->bserrno = 0;
6898 	ctx->xattrs = clone_xattrs;
6899 	ctx->original.id = blobid;
6900 
6901 	spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx);
6902 }
6903 
6904 /* END spdk_bs_create_clone */
6905 
6906 /* START spdk_bs_inflate_blob */
6907 
6908 static void
6909 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno)
6910 {
6911 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6912 	struct spdk_blob *_blob = ctx->original.blob;
6913 
6914 	if (bserrno != 0) {
6915 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6916 		return;
6917 	}
6918 
6919 	/* Temporarily override md_ro flag for MD modification */
6920 	_blob->md_ro = false;
6921 
6922 	bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true);
6923 	if (bserrno != 0) {
6924 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6925 		return;
6926 	}
6927 
6928 	assert(_parent != NULL);
6929 
6930 	bs_blob_list_remove(_blob);
6931 	_blob->parent_id = _parent->id;
6932 
6933 	blob_back_bs_destroy(_blob);
6934 	_blob->back_bs_dev = bs_create_blob_bs_dev(_parent);
6935 	bs_blob_list_add(_blob);
6936 
6937 	spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx);
6938 }
6939 
6940 static void
6941 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx)
6942 {
6943 	struct spdk_blob *_blob = ctx->original.blob;
6944 	struct spdk_blob *_parent;
6945 
6946 	if (ctx->allocate_all) {
6947 		/* remove thin provisioning */
6948 		bs_blob_list_remove(_blob);
6949 		if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
6950 			blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true);
6951 			_blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT;
6952 		} else {
6953 			blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
6954 		}
6955 		_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
6956 		blob_back_bs_destroy(_blob);
6957 		_blob->parent_id = SPDK_BLOBID_INVALID;
6958 	} else {
6959 		/* For now, esnap clones always have allocate_all set. */
6960 		assert(!blob_is_esnap_clone(_blob));
6961 
6962 		_parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob;
6963 		if (_parent->parent_id != SPDK_BLOBID_INVALID) {
6964 			/* We must change the parent of the inflated blob */
6965 			spdk_bs_open_blob(_blob->bs, _parent->parent_id,
6966 					  bs_inflate_blob_set_parent_cpl, ctx);
6967 			return;
6968 		}
6969 
6970 		bs_blob_list_remove(_blob);
6971 		_blob->parent_id = SPDK_BLOBID_INVALID;
6972 		blob_back_bs_destroy(_blob);
6973 		_blob->back_bs_dev = bs_create_zeroes_dev();
6974 	}
6975 
6976 	/* Temporarily override md_ro flag for MD modification */
6977 	_blob->md_ro = false;
6978 	blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
6979 	_blob->state = SPDK_BLOB_STATE_DIRTY;
6980 
6981 	spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx);
6982 }
6983 
6984 /* Check if cluster needs allocation */
6985 static inline bool
6986 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all)
6987 {
6988 	struct spdk_blob_bs_dev *b;
6989 
6990 	assert(blob != NULL);
6991 
6992 	if (blob->active.clusters[cluster] != 0) {
6993 		/* Cluster is already allocated */
6994 		return false;
6995 	}
6996 
6997 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
6998 		/* Blob have no parent blob */
6999 		return allocate_all;
7000 	}
7001 
7002 	if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
7003 		return true;
7004 	}
7005 
7006 	b = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
7007 	return (allocate_all || b->blob->active.clusters[cluster] != 0);
7008 }
7009 
7010 static void
7011 bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
7012 {
7013 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
7014 	struct spdk_blob *_blob = ctx->original.blob;
7015 	struct spdk_bs_cpl cpl;
7016 	spdk_bs_user_op_t *op;
7017 	uint64_t offset;
7018 
7019 	if (bserrno != 0) {
7020 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
7021 		return;
7022 	}
7023 
7024 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
7025 		if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) {
7026 			break;
7027 		}
7028 	}
7029 
7030 	if (ctx->cluster < _blob->active.num_clusters) {
7031 		offset = bs_cluster_to_lba(_blob->bs, ctx->cluster);
7032 
7033 		/* We may safely increment a cluster before copying */
7034 		ctx->cluster++;
7035 
7036 		/* Use a dummy 0B read as a context for cluster copy */
7037 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7038 		cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next;
7039 		cpl.u.blob_basic.cb_arg = ctx;
7040 
7041 		op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob,
7042 				      NULL, 0, offset, 0);
7043 		if (!op) {
7044 			bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM);
7045 			return;
7046 		}
7047 
7048 		bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op);
7049 	} else {
7050 		bs_inflate_blob_done(ctx);
7051 	}
7052 }
7053 
7054 static void
7055 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
7056 {
7057 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
7058 	uint64_t clusters_needed;
7059 	uint64_t i;
7060 
7061 	if (bserrno != 0) {
7062 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
7063 		return;
7064 	}
7065 
7066 	ctx->original.blob = _blob;
7067 	ctx->original.md_ro = _blob->md_ro;
7068 
7069 	if (_blob->locked_operation_in_progress) {
7070 		SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n");
7071 		ctx->bserrno = -EBUSY;
7072 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
7073 		return;
7074 	}
7075 
7076 	_blob->locked_operation_in_progress = true;
7077 
7078 	switch (_blob->parent_id) {
7079 	case SPDK_BLOBID_INVALID:
7080 		if (!ctx->allocate_all) {
7081 			/* This blob has no parent, so we cannot decouple it. */
7082 			SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n");
7083 			bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
7084 			return;
7085 		}
7086 		break;
7087 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
7088 		/*
7089 		 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which
7090 		 * clusters require allocation. Until there is a blobstore consumer that
7091 		 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not
7092 		 * worth the effort.
7093 		 */
7094 		ctx->allocate_all = true;
7095 		break;
7096 	default:
7097 		break;
7098 	}
7099 
7100 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
7101 		/* This is not thin provisioned blob. No need to inflate. */
7102 		bs_clone_snapshot_origblob_cleanup(ctx, 0);
7103 		return;
7104 	}
7105 
7106 	/* Do two passes - one to verify that we can obtain enough clusters
7107 	 * and another to actually claim them.
7108 	 */
7109 	clusters_needed = 0;
7110 	for (i = 0; i < _blob->active.num_clusters; i++) {
7111 		if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) {
7112 			clusters_needed++;
7113 		}
7114 	}
7115 
7116 	if (clusters_needed > _blob->bs->num_free_clusters) {
7117 		/* Not enough free clusters. Cannot satisfy the request. */
7118 		bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
7119 		return;
7120 	}
7121 
7122 	ctx->cluster = 0;
7123 	bs_inflate_blob_touch_next(ctx, 0);
7124 }
7125 
7126 static void
7127 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7128 		spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg)
7129 {
7130 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
7131 
7132 	if (!ctx) {
7133 		cb_fn(cb_arg, -ENOMEM);
7134 		return;
7135 	}
7136 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7137 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
7138 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
7139 	ctx->bserrno = 0;
7140 	ctx->original.id = blobid;
7141 	ctx->channel = channel;
7142 	ctx->allocate_all = allocate_all;
7143 
7144 	spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx);
7145 }
7146 
7147 void
7148 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7149 		     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
7150 {
7151 	bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg);
7152 }
7153 
7154 void
7155 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7156 			     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
7157 {
7158 	bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg);
7159 }
7160 /* END spdk_bs_inflate_blob */
7161 
7162 /* START spdk_bs_blob_shallow_copy */
7163 
7164 struct shallow_copy_ctx {
7165 	struct spdk_bs_cpl cpl;
7166 	int bserrno;
7167 
7168 	/* Blob source for copy */
7169 	struct spdk_blob_store *bs;
7170 	spdk_blob_id blobid;
7171 	struct spdk_blob *blob;
7172 	struct spdk_io_channel *blob_channel;
7173 
7174 	/* Destination device for copy */
7175 	struct spdk_bs_dev *ext_dev;
7176 	struct spdk_io_channel *ext_channel;
7177 
7178 	/* Current cluster for copy operation */
7179 	uint64_t cluster;
7180 
7181 	/* Buffer for blob reading */
7182 	uint8_t *read_buff;
7183 
7184 	/* Struct for external device writing */
7185 	struct spdk_bs_dev_cb_args ext_args;
7186 
7187 	/* Actual number of copied clusters */
7188 	uint64_t copied_clusters_count;
7189 
7190 	/* Status callback for updates about the ongoing operation */
7191 	spdk_blob_shallow_copy_status status_cb;
7192 
7193 	/* Argument passed to function status_cb */
7194 	void *status_cb_arg;
7195 };
7196 
7197 static void
7198 bs_shallow_copy_cleanup_finish(void *cb_arg, int bserrno)
7199 {
7200 	struct shallow_copy_ctx *ctx = cb_arg;
7201 	struct spdk_bs_cpl *cpl = &ctx->cpl;
7202 
7203 	if (bserrno != 0) {
7204 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, cleanup error %d\n", ctx->blob->id, bserrno);
7205 		ctx->bserrno = bserrno;
7206 	}
7207 
7208 	ctx->ext_dev->destroy_channel(ctx->ext_dev, ctx->ext_channel);
7209 	spdk_free(ctx->read_buff);
7210 
7211 	cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
7212 
7213 	free(ctx);
7214 }
7215 
7216 static void
7217 bs_shallow_copy_bdev_write_cpl(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
7218 {
7219 	struct shallow_copy_ctx *ctx = cb_arg;
7220 	struct spdk_blob *_blob = ctx->blob;
7221 
7222 	if (bserrno != 0) {
7223 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, ext dev write error %d\n", ctx->blob->id, bserrno);
7224 		ctx->bserrno = bserrno;
7225 		_blob->locked_operation_in_progress = false;
7226 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7227 		return;
7228 	}
7229 
7230 	ctx->cluster++;
7231 	if (ctx->status_cb) {
7232 		ctx->copied_clusters_count++;
7233 		ctx->status_cb(ctx->copied_clusters_count, ctx->status_cb_arg);
7234 	}
7235 
7236 	bs_shallow_copy_cluster_find_next(ctx);
7237 }
7238 
7239 static void
7240 bs_shallow_copy_blob_read_cpl(void *cb_arg, int bserrno)
7241 {
7242 	struct shallow_copy_ctx *ctx = cb_arg;
7243 	struct spdk_bs_dev *ext_dev = ctx->ext_dev;
7244 	struct spdk_blob *_blob = ctx->blob;
7245 
7246 	if (bserrno != 0) {
7247 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob read error %d\n", ctx->blob->id, bserrno);
7248 		ctx->bserrno = bserrno;
7249 		_blob->locked_operation_in_progress = false;
7250 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7251 		return;
7252 	}
7253 
7254 	ctx->ext_args.channel = ctx->ext_channel;
7255 	ctx->ext_args.cb_fn = bs_shallow_copy_bdev_write_cpl;
7256 	ctx->ext_args.cb_arg = ctx;
7257 
7258 	ext_dev->write(ext_dev, ctx->ext_channel, ctx->read_buff,
7259 		       bs_cluster_to_lba(_blob->bs, ctx->cluster),
7260 		       bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz),
7261 		       &ctx->ext_args);
7262 }
7263 
7264 static void
7265 bs_shallow_copy_cluster_find_next(void *cb_arg)
7266 {
7267 	struct shallow_copy_ctx *ctx = cb_arg;
7268 	struct spdk_blob *_blob = ctx->blob;
7269 
7270 	while (ctx->cluster < _blob->active.num_clusters) {
7271 		if (_blob->active.clusters[ctx->cluster] != 0) {
7272 			break;
7273 		}
7274 
7275 		ctx->cluster++;
7276 	}
7277 
7278 	if (ctx->cluster < _blob->active.num_clusters) {
7279 		blob_request_submit_op_single(ctx->blob_channel, _blob, ctx->read_buff,
7280 					      bs_cluster_to_lba(_blob->bs, ctx->cluster),
7281 					      bs_dev_byte_to_lba(_blob->bs->dev, _blob->bs->cluster_sz),
7282 					      bs_shallow_copy_blob_read_cpl, ctx, SPDK_BLOB_READ);
7283 	} else {
7284 		_blob->locked_operation_in_progress = false;
7285 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7286 	}
7287 }
7288 
7289 static void
7290 bs_shallow_copy_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
7291 {
7292 	struct shallow_copy_ctx *ctx = cb_arg;
7293 	struct spdk_bs_dev *ext_dev = ctx->ext_dev;
7294 	uint32_t blob_block_size;
7295 	uint64_t blob_total_size;
7296 
7297 	if (bserrno != 0) {
7298 		SPDK_ERRLOG("Shallow copy blob open error %d\n", bserrno);
7299 		ctx->bserrno = bserrno;
7300 		bs_shallow_copy_cleanup_finish(ctx, 0);
7301 		return;
7302 	}
7303 
7304 	if (!spdk_blob_is_read_only(_blob)) {
7305 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, blob must be read only\n", _blob->id);
7306 		ctx->bserrno = -EPERM;
7307 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7308 		return;
7309 	}
7310 
7311 	blob_block_size = _blob->bs->dev->blocklen;
7312 	blob_total_size = spdk_blob_get_num_clusters(_blob) * spdk_bs_get_cluster_size(_blob->bs);
7313 
7314 	if (blob_total_size > ext_dev->blockcnt * ext_dev->blocklen) {
7315 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device must have at least blob size\n",
7316 			    _blob->id);
7317 		ctx->bserrno = -EINVAL;
7318 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7319 		return;
7320 	}
7321 
7322 	if (blob_block_size % ext_dev->blocklen != 0) {
7323 		SPDK_ERRLOG("blob 0x%" PRIx64 " shallow copy, external device block size is not compatible with \
7324 blobstore block size\n", _blob->id);
7325 		ctx->bserrno = -EINVAL;
7326 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7327 		return;
7328 	}
7329 
7330 	ctx->blob = _blob;
7331 
7332 	if (_blob->locked_operation_in_progress) {
7333 		SPDK_DEBUGLOG(blob, "blob 0x%" PRIx64 " shallow copy - another operation in progress\n", _blob->id);
7334 		ctx->bserrno = -EBUSY;
7335 		spdk_blob_close(_blob, bs_shallow_copy_cleanup_finish, ctx);
7336 		return;
7337 	}
7338 
7339 	_blob->locked_operation_in_progress = true;
7340 
7341 	ctx->cluster = 0;
7342 	bs_shallow_copy_cluster_find_next(ctx);
7343 }
7344 
7345 int
7346 spdk_bs_blob_shallow_copy(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7347 			  spdk_blob_id blobid, struct spdk_bs_dev *ext_dev,
7348 			  spdk_blob_shallow_copy_status status_cb_fn, void *status_cb_arg,
7349 			  spdk_blob_op_complete cb_fn, void *cb_arg)
7350 {
7351 	struct shallow_copy_ctx *ctx;
7352 	struct spdk_io_channel *ext_channel;
7353 
7354 	ctx = calloc(1, sizeof(*ctx));
7355 	if (!ctx) {
7356 		return -ENOMEM;
7357 	}
7358 
7359 	ctx->bs = bs;
7360 	ctx->blobid = blobid;
7361 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7362 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
7363 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
7364 	ctx->bserrno = 0;
7365 	ctx->blob_channel = channel;
7366 	ctx->status_cb = status_cb_fn;
7367 	ctx->status_cb_arg = status_cb_arg;
7368 	ctx->read_buff = spdk_malloc(bs->cluster_sz, bs->dev->blocklen, NULL,
7369 				     SPDK_ENV_LCORE_ID_ANY, SPDK_MALLOC_DMA);
7370 	if (!ctx->read_buff) {
7371 		free(ctx);
7372 		return -ENOMEM;
7373 	}
7374 
7375 	ext_channel = ext_dev->create_channel(ext_dev);
7376 	if (!ext_channel) {
7377 		spdk_free(ctx->read_buff);
7378 		free(ctx);
7379 		return -ENOMEM;
7380 	}
7381 	ctx->ext_dev = ext_dev;
7382 	ctx->ext_channel = ext_channel;
7383 
7384 	spdk_bs_open_blob(ctx->bs, ctx->blobid, bs_shallow_copy_blob_open_cpl, ctx);
7385 
7386 	return 0;
7387 }
7388 /* END spdk_bs_blob_shallow_copy */
7389 
7390 /* START spdk_blob_resize */
7391 struct spdk_bs_resize_ctx {
7392 	spdk_blob_op_complete cb_fn;
7393 	void *cb_arg;
7394 	struct spdk_blob *blob;
7395 	uint64_t sz;
7396 	int rc;
7397 };
7398 
7399 static void
7400 bs_resize_unfreeze_cpl(void *cb_arg, int rc)
7401 {
7402 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
7403 
7404 	if (rc != 0) {
7405 		SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc);
7406 	}
7407 
7408 	if (ctx->rc != 0) {
7409 		SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc);
7410 		rc = ctx->rc;
7411 	}
7412 
7413 	ctx->blob->locked_operation_in_progress = false;
7414 
7415 	ctx->cb_fn(ctx->cb_arg, rc);
7416 	free(ctx);
7417 }
7418 
7419 static void
7420 bs_resize_freeze_cpl(void *cb_arg, int rc)
7421 {
7422 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
7423 
7424 	if (rc != 0) {
7425 		ctx->blob->locked_operation_in_progress = false;
7426 		ctx->cb_fn(ctx->cb_arg, rc);
7427 		free(ctx);
7428 		return;
7429 	}
7430 
7431 	ctx->rc = blob_resize(ctx->blob, ctx->sz);
7432 
7433 	blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx);
7434 }
7435 
7436 void
7437 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
7438 {
7439 	struct spdk_bs_resize_ctx *ctx;
7440 
7441 	blob_verify_md_op(blob);
7442 
7443 	SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz);
7444 
7445 	if (blob->md_ro) {
7446 		cb_fn(cb_arg, -EPERM);
7447 		return;
7448 	}
7449 
7450 	if (sz == blob->active.num_clusters) {
7451 		cb_fn(cb_arg, 0);
7452 		return;
7453 	}
7454 
7455 	if (blob->locked_operation_in_progress) {
7456 		cb_fn(cb_arg, -EBUSY);
7457 		return;
7458 	}
7459 
7460 	ctx = calloc(1, sizeof(*ctx));
7461 	if (!ctx) {
7462 		cb_fn(cb_arg, -ENOMEM);
7463 		return;
7464 	}
7465 
7466 	blob->locked_operation_in_progress = true;
7467 	ctx->cb_fn = cb_fn;
7468 	ctx->cb_arg = cb_arg;
7469 	ctx->blob = blob;
7470 	ctx->sz = sz;
7471 	blob_freeze_io(blob, bs_resize_freeze_cpl, ctx);
7472 }
7473 
7474 /* END spdk_blob_resize */
7475 
7476 
7477 /* START spdk_bs_delete_blob */
7478 
7479 static void
7480 bs_delete_close_cpl(void *cb_arg, int bserrno)
7481 {
7482 	spdk_bs_sequence_t *seq = cb_arg;
7483 
7484 	bs_sequence_finish(seq, bserrno);
7485 }
7486 
7487 static void
7488 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
7489 {
7490 	struct spdk_blob *blob = cb_arg;
7491 
7492 	if (bserrno != 0) {
7493 		/*
7494 		 * We already removed this blob from the blobstore tailq, so
7495 		 *  we need to free it here since this is the last reference
7496 		 *  to it.
7497 		 */
7498 		blob_free(blob);
7499 		bs_delete_close_cpl(seq, bserrno);
7500 		return;
7501 	}
7502 
7503 	/*
7504 	 * This will immediately decrement the ref_count and call
7505 	 *  the completion routine since the metadata state is clean.
7506 	 *  By calling spdk_blob_close, we reduce the number of call
7507 	 *  points into code that touches the blob->open_ref count
7508 	 *  and the blobstore's blob list.
7509 	 */
7510 	spdk_blob_close(blob, bs_delete_close_cpl, seq);
7511 }
7512 
7513 struct delete_snapshot_ctx {
7514 	struct spdk_blob_list *parent_snapshot_entry;
7515 	struct spdk_blob *snapshot;
7516 	struct spdk_blob_md_page *page;
7517 	bool snapshot_md_ro;
7518 	struct spdk_blob *clone;
7519 	bool clone_md_ro;
7520 	spdk_blob_op_with_handle_complete cb_fn;
7521 	void *cb_arg;
7522 	int bserrno;
7523 	uint32_t next_extent_page;
7524 };
7525 
7526 static void
7527 delete_blob_cleanup_finish(void *cb_arg, int bserrno)
7528 {
7529 	struct delete_snapshot_ctx *ctx = cb_arg;
7530 
7531 	if (bserrno != 0) {
7532 		SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno);
7533 	}
7534 
7535 	assert(ctx != NULL);
7536 
7537 	if (bserrno != 0 && ctx->bserrno == 0) {
7538 		ctx->bserrno = bserrno;
7539 	}
7540 
7541 	ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno);
7542 	spdk_free(ctx->page);
7543 	free(ctx);
7544 }
7545 
7546 static void
7547 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno)
7548 {
7549 	struct delete_snapshot_ctx *ctx = cb_arg;
7550 
7551 	if (bserrno != 0) {
7552 		ctx->bserrno = bserrno;
7553 		SPDK_ERRLOG("Clone cleanup error %d\n", bserrno);
7554 	}
7555 
7556 	if (ctx->bserrno != 0) {
7557 		assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL);
7558 		RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot);
7559 		spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id);
7560 	}
7561 
7562 	ctx->snapshot->locked_operation_in_progress = false;
7563 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
7564 
7565 	spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx);
7566 }
7567 
7568 static void
7569 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno)
7570 {
7571 	struct delete_snapshot_ctx *ctx = cb_arg;
7572 
7573 	ctx->clone->locked_operation_in_progress = false;
7574 	ctx->clone->md_ro = ctx->clone_md_ro;
7575 
7576 	spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx);
7577 }
7578 
7579 static void
7580 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
7581 {
7582 	struct delete_snapshot_ctx *ctx = cb_arg;
7583 
7584 	if (bserrno) {
7585 		ctx->bserrno = bserrno;
7586 		delete_snapshot_cleanup_clone(ctx, 0);
7587 		return;
7588 	}
7589 
7590 	ctx->clone->locked_operation_in_progress = false;
7591 	spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx);
7592 }
7593 
7594 static void
7595 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno)
7596 {
7597 	struct delete_snapshot_ctx *ctx = cb_arg;
7598 	struct spdk_blob_list *parent_snapshot_entry = NULL;
7599 	struct spdk_blob_list *snapshot_entry = NULL;
7600 	struct spdk_blob_list *clone_entry = NULL;
7601 	struct spdk_blob_list *snapshot_clone_entry = NULL;
7602 
7603 	if (bserrno) {
7604 		SPDK_ERRLOG("Failed to sync MD on blob\n");
7605 		ctx->bserrno = bserrno;
7606 		delete_snapshot_cleanup_clone(ctx, 0);
7607 		return;
7608 	}
7609 
7610 	/* Get snapshot entry for the snapshot we want to remove */
7611 	snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id);
7612 
7613 	assert(snapshot_entry != NULL);
7614 
7615 	/* Remove clone entry in this snapshot (at this point there can be only one clone) */
7616 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7617 	assert(clone_entry != NULL);
7618 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
7619 	snapshot_entry->clone_count--;
7620 	assert(TAILQ_EMPTY(&snapshot_entry->clones));
7621 
7622 	switch (ctx->snapshot->parent_id) {
7623 	case SPDK_BLOBID_INVALID:
7624 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
7625 		/* No parent snapshot - just remove clone entry */
7626 		free(clone_entry);
7627 		break;
7628 	default:
7629 		/* This snapshot is at the same time a clone of another snapshot - we need to
7630 		 * update parent snapshot (remove current clone, add new one inherited from
7631 		 * the snapshot that is being removed) */
7632 
7633 		/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
7634 		 * snapshot that we are removing */
7635 		blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry,
7636 						    &snapshot_clone_entry);
7637 
7638 		/* Switch clone entry in parent snapshot */
7639 		TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link);
7640 		TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link);
7641 		free(snapshot_clone_entry);
7642 	}
7643 
7644 	/* Restore md_ro flags */
7645 	ctx->clone->md_ro = ctx->clone_md_ro;
7646 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
7647 
7648 	blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx);
7649 }
7650 
7651 static void
7652 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
7653 {
7654 	struct delete_snapshot_ctx *ctx = cb_arg;
7655 	uint64_t i;
7656 
7657 	ctx->snapshot->md_ro = false;
7658 
7659 	if (bserrno) {
7660 		SPDK_ERRLOG("Failed to sync MD on clone\n");
7661 		ctx->bserrno = bserrno;
7662 
7663 		/* Restore snapshot to previous state */
7664 		bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
7665 		if (bserrno != 0) {
7666 			delete_snapshot_cleanup_clone(ctx, bserrno);
7667 			return;
7668 		}
7669 
7670 		spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx);
7671 		return;
7672 	}
7673 
7674 	/* Clear cluster map entries for snapshot */
7675 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
7676 		if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
7677 			if (ctx->snapshot->active.clusters[i] != 0) {
7678 				ctx->snapshot->active.num_allocated_clusters--;
7679 			}
7680 			ctx->snapshot->active.clusters[i] = 0;
7681 		}
7682 	}
7683 	for (i = 0; i < ctx->snapshot->active.num_extent_pages &&
7684 	     i < ctx->clone->active.num_extent_pages; i++) {
7685 		if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) {
7686 			ctx->snapshot->active.extent_pages[i] = 0;
7687 		}
7688 	}
7689 
7690 	blob_set_thin_provision(ctx->snapshot);
7691 	ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY;
7692 
7693 	if (ctx->parent_snapshot_entry != NULL) {
7694 		ctx->snapshot->back_bs_dev = NULL;
7695 	}
7696 
7697 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx);
7698 }
7699 
7700 static void
7701 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx)
7702 {
7703 	int bserrno;
7704 
7705 	/* Delete old backing bs_dev from clone (related to snapshot that will be removed) */
7706 	blob_back_bs_destroy(ctx->clone);
7707 
7708 	/* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */
7709 	if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
7710 		bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot,
7711 						 BLOB_EXTERNAL_SNAPSHOT_ID);
7712 		if (bserrno != 0) {
7713 			ctx->bserrno = bserrno;
7714 
7715 			/* Restore snapshot to previous state */
7716 			bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
7717 			if (bserrno != 0) {
7718 				delete_snapshot_cleanup_clone(ctx, bserrno);
7719 				return;
7720 			}
7721 
7722 			spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx);
7723 			return;
7724 		}
7725 		ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT;
7726 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
7727 		/* Do not delete the external snapshot along with this snapshot */
7728 		ctx->snapshot->back_bs_dev = NULL;
7729 		ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
7730 	} else if (ctx->parent_snapshot_entry != NULL) {
7731 		/* ...to parent snapshot */
7732 		ctx->clone->parent_id = ctx->parent_snapshot_entry->id;
7733 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
7734 		blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id,
7735 			       sizeof(spdk_blob_id),
7736 			       true);
7737 	} else {
7738 		/* ...to blobid invalid and zeroes dev */
7739 		ctx->clone->parent_id = SPDK_BLOBID_INVALID;
7740 		ctx->clone->back_bs_dev = bs_create_zeroes_dev();
7741 		blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true);
7742 	}
7743 
7744 	spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx);
7745 }
7746 
7747 static void
7748 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno)
7749 {
7750 	struct delete_snapshot_ctx *ctx = cb_arg;
7751 	uint32_t *extent_page;
7752 	uint64_t i;
7753 
7754 	for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages &&
7755 	     i < ctx->clone->active.num_extent_pages; i++) {
7756 		if (ctx->snapshot->active.extent_pages[i] == 0) {
7757 			/* No extent page to use from snapshot */
7758 			continue;
7759 		}
7760 
7761 		extent_page = &ctx->clone->active.extent_pages[i];
7762 		if (*extent_page == 0) {
7763 			/* Copy extent page from snapshot when clone did not have a matching one */
7764 			*extent_page = ctx->snapshot->active.extent_pages[i];
7765 			continue;
7766 		}
7767 
7768 		/* Clone and snapshot both contain partially filled matching extent pages.
7769 		 * Update the clone extent page in place with cluster map containing the mix of both. */
7770 		ctx->next_extent_page = i + 1;
7771 		memset(ctx->page, 0, SPDK_BS_PAGE_SIZE);
7772 
7773 		blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page,
7774 				       delete_snapshot_update_extent_pages, ctx);
7775 		return;
7776 	}
7777 	delete_snapshot_update_extent_pages_cpl(ctx);
7778 }
7779 
7780 static void
7781 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
7782 {
7783 	struct delete_snapshot_ctx *ctx = cb_arg;
7784 	uint64_t i;
7785 
7786 	/* Temporarily override md_ro flag for clone for MD modification */
7787 	ctx->clone_md_ro = ctx->clone->md_ro;
7788 	ctx->clone->md_ro = false;
7789 
7790 	if (bserrno) {
7791 		SPDK_ERRLOG("Failed to sync MD with xattr on blob\n");
7792 		ctx->bserrno = bserrno;
7793 		delete_snapshot_cleanup_clone(ctx, 0);
7794 		return;
7795 	}
7796 
7797 	/* Copy snapshot map to clone map (only unallocated clusters in clone) */
7798 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
7799 		if (ctx->clone->active.clusters[i] == 0) {
7800 			ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
7801 			if (ctx->clone->active.clusters[i] != 0) {
7802 				ctx->clone->active.num_allocated_clusters++;
7803 			}
7804 		}
7805 	}
7806 	ctx->next_extent_page = 0;
7807 	delete_snapshot_update_extent_pages(ctx, 0);
7808 }
7809 
7810 static void
7811 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno)
7812 {
7813 	struct delete_snapshot_ctx *ctx = cb_arg;
7814 
7815 	if (bserrno != 0) {
7816 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n",
7817 			    blob->id, bserrno);
7818 		/* That error should not stop us from syncing metadata. */
7819 	}
7820 
7821 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx);
7822 }
7823 
7824 static void
7825 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno)
7826 {
7827 	struct delete_snapshot_ctx *ctx = cb_arg;
7828 
7829 	if (bserrno) {
7830 		SPDK_ERRLOG("Failed to freeze I/O on clone\n");
7831 		ctx->bserrno = bserrno;
7832 		delete_snapshot_cleanup_clone(ctx, 0);
7833 		return;
7834 	}
7835 
7836 	/* Temporarily override md_ro flag for snapshot for MD modification */
7837 	ctx->snapshot_md_ro = ctx->snapshot->md_ro;
7838 	ctx->snapshot->md_ro = false;
7839 
7840 	/* Mark blob as pending for removal for power failure safety, use clone id for recovery */
7841 	ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id,
7842 				      sizeof(spdk_blob_id), true);
7843 	if (ctx->bserrno != 0) {
7844 		delete_snapshot_cleanup_clone(ctx, 0);
7845 		return;
7846 	}
7847 
7848 	if (blob_is_esnap_clone(ctx->snapshot)) {
7849 		blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false,
7850 						   delete_snapshot_esnap_channels_destroyed_cb,
7851 						   ctx);
7852 		return;
7853 	}
7854 
7855 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx);
7856 }
7857 
7858 static void
7859 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno)
7860 {
7861 	struct delete_snapshot_ctx *ctx = cb_arg;
7862 
7863 	if (bserrno) {
7864 		SPDK_ERRLOG("Failed to open clone\n");
7865 		ctx->bserrno = bserrno;
7866 		delete_snapshot_cleanup_snapshot(ctx, 0);
7867 		return;
7868 	}
7869 
7870 	ctx->clone = clone;
7871 
7872 	if (clone->locked_operation_in_progress) {
7873 		SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n");
7874 		ctx->bserrno = -EBUSY;
7875 		spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx);
7876 		return;
7877 	}
7878 
7879 	clone->locked_operation_in_progress = true;
7880 
7881 	blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx);
7882 }
7883 
7884 static void
7885 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx)
7886 {
7887 	struct spdk_blob_list *snapshot_entry = NULL;
7888 	struct spdk_blob_list *clone_entry = NULL;
7889 	struct spdk_blob_list *snapshot_clone_entry = NULL;
7890 
7891 	/* Get snapshot entry for the snapshot we want to remove */
7892 	snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id);
7893 
7894 	assert(snapshot_entry != NULL);
7895 
7896 	/* Get clone of the snapshot (at this point there can be only one clone) */
7897 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7898 	assert(snapshot_entry->clone_count == 1);
7899 	assert(clone_entry != NULL);
7900 
7901 	/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
7902 	 * snapshot that we are removing */
7903 	blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry,
7904 					    &snapshot_clone_entry);
7905 
7906 	spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx);
7907 }
7908 
7909 static void
7910 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
7911 {
7912 	spdk_bs_sequence_t *seq = cb_arg;
7913 	struct spdk_blob_list *snapshot_entry = NULL;
7914 	uint32_t page_num;
7915 
7916 	if (bserrno) {
7917 		SPDK_ERRLOG("Failed to remove blob\n");
7918 		bs_sequence_finish(seq, bserrno);
7919 		return;
7920 	}
7921 
7922 	/* Remove snapshot from the list */
7923 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
7924 	if (snapshot_entry != NULL) {
7925 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
7926 		free(snapshot_entry);
7927 	}
7928 
7929 	page_num = bs_blobid_to_page(blob->id);
7930 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
7931 	blob->state = SPDK_BLOB_STATE_DIRTY;
7932 	blob->active.num_pages = 0;
7933 	blob_resize(blob, 0);
7934 
7935 	blob_persist(seq, blob, bs_delete_persist_cpl, blob);
7936 }
7937 
7938 static int
7939 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone)
7940 {
7941 	struct spdk_blob_list *snapshot_entry = NULL;
7942 	struct spdk_blob_list *clone_entry = NULL;
7943 	struct spdk_blob *clone = NULL;
7944 	bool has_one_clone = false;
7945 
7946 	/* Check if this is a snapshot with clones */
7947 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
7948 	if (snapshot_entry != NULL) {
7949 		if (snapshot_entry->clone_count > 1) {
7950 			SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n");
7951 			return -EBUSY;
7952 		} else if (snapshot_entry->clone_count == 1) {
7953 			has_one_clone = true;
7954 		}
7955 	}
7956 
7957 	/* Check if someone has this blob open (besides this delete context):
7958 	 * - open_ref = 1 - only this context opened blob, so it is ok to remove it
7959 	 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot
7960 	 *	and that is ok, because we will update it accordingly */
7961 	if (blob->open_ref <= 2 && has_one_clone) {
7962 		clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7963 		assert(clone_entry != NULL);
7964 		clone = blob_lookup(blob->bs, clone_entry->id);
7965 
7966 		if (blob->open_ref == 2 && clone == NULL) {
7967 			/* Clone is closed and someone else opened this blob */
7968 			SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
7969 			return -EBUSY;
7970 		}
7971 
7972 		*update_clone = true;
7973 		return 0;
7974 	}
7975 
7976 	if (blob->open_ref > 1) {
7977 		SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
7978 		return -EBUSY;
7979 	}
7980 
7981 	assert(has_one_clone == false);
7982 	*update_clone = false;
7983 	return 0;
7984 }
7985 
7986 static void
7987 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno)
7988 {
7989 	spdk_bs_sequence_t *seq = cb_arg;
7990 
7991 	bs_sequence_finish(seq, -ENOMEM);
7992 }
7993 
7994 static void
7995 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
7996 {
7997 	spdk_bs_sequence_t *seq = cb_arg;
7998 	struct delete_snapshot_ctx *ctx;
7999 	bool update_clone = false;
8000 
8001 	if (bserrno != 0) {
8002 		bs_sequence_finish(seq, bserrno);
8003 		return;
8004 	}
8005 
8006 	blob_verify_md_op(blob);
8007 
8008 	ctx = calloc(1, sizeof(*ctx));
8009 	if (ctx == NULL) {
8010 		spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq);
8011 		return;
8012 	}
8013 
8014 	ctx->snapshot = blob;
8015 	ctx->cb_fn = bs_delete_blob_finish;
8016 	ctx->cb_arg = seq;
8017 
8018 	/* Check if blob can be removed and if it is a snapshot with clone on top of it */
8019 	ctx->bserrno = bs_is_blob_deletable(blob, &update_clone);
8020 	if (ctx->bserrno) {
8021 		spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
8022 		return;
8023 	}
8024 
8025 	if (blob->locked_operation_in_progress) {
8026 		SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n");
8027 		ctx->bserrno = -EBUSY;
8028 		spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
8029 		return;
8030 	}
8031 
8032 	blob->locked_operation_in_progress = true;
8033 
8034 	/*
8035 	 * Remove the blob from the blob_store list now, to ensure it does not
8036 	 *  get returned after this point by blob_lookup().
8037 	 */
8038 	spdk_bit_array_clear(blob->bs->open_blobids, blob->id);
8039 	RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob);
8040 
8041 	if (update_clone) {
8042 		ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
8043 		if (!ctx->page) {
8044 			ctx->bserrno = -ENOMEM;
8045 			spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
8046 			return;
8047 		}
8048 		/* This blob is a snapshot with active clone - update clone first */
8049 		update_clone_on_snapshot_deletion(blob, ctx);
8050 	} else {
8051 		/* This blob does not have any clones - just remove it */
8052 		bs_blob_list_remove(blob);
8053 		bs_delete_blob_finish(seq, blob, 0);
8054 		free(ctx);
8055 	}
8056 }
8057 
8058 void
8059 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
8060 		    spdk_blob_op_complete cb_fn, void *cb_arg)
8061 {
8062 	struct spdk_bs_cpl	cpl;
8063 	spdk_bs_sequence_t	*seq;
8064 
8065 	SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid);
8066 
8067 	assert(spdk_get_thread() == bs->md_thread);
8068 
8069 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8070 	cpl.u.blob_basic.cb_fn = cb_fn;
8071 	cpl.u.blob_basic.cb_arg = cb_arg;
8072 
8073 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
8074 	if (!seq) {
8075 		cb_fn(cb_arg, -ENOMEM);
8076 		return;
8077 	}
8078 
8079 	spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq);
8080 }
8081 
8082 /* END spdk_bs_delete_blob */
8083 
8084 /* START spdk_bs_open_blob */
8085 
8086 static void
8087 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8088 {
8089 	struct spdk_blob *blob = cb_arg;
8090 	struct spdk_blob *existing;
8091 
8092 	if (bserrno != 0) {
8093 		blob_free(blob);
8094 		seq->cpl.u.blob_handle.blob = NULL;
8095 		bs_sequence_finish(seq, bserrno);
8096 		return;
8097 	}
8098 
8099 	existing = blob_lookup(blob->bs, blob->id);
8100 	if (existing) {
8101 		blob_free(blob);
8102 		existing->open_ref++;
8103 		seq->cpl.u.blob_handle.blob = existing;
8104 		bs_sequence_finish(seq, 0);
8105 		return;
8106 	}
8107 
8108 	blob->open_ref++;
8109 
8110 	spdk_bit_array_set(blob->bs->open_blobids, blob->id);
8111 	RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob);
8112 
8113 	bs_sequence_finish(seq, bserrno);
8114 }
8115 
8116 static inline void
8117 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst)
8118 {
8119 #define FIELD_OK(field) \
8120         offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size
8121 
8122 #define SET_FIELD(field) \
8123         if (FIELD_OK(field)) { \
8124                 dst->field = src->field; \
8125         } \
8126 
8127 	SET_FIELD(clear_method);
8128 	SET_FIELD(esnap_ctx);
8129 
8130 	dst->opts_size = src->opts_size;
8131 
8132 	/* You should not remove this statement, but need to update the assert statement
8133 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
8134 	SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size");
8135 
8136 #undef FIELD_OK
8137 #undef SET_FIELD
8138 }
8139 
8140 static void
8141 bs_open_blob(struct spdk_blob_store *bs,
8142 	     spdk_blob_id blobid,
8143 	     struct spdk_blob_open_opts *opts,
8144 	     spdk_blob_op_with_handle_complete cb_fn,
8145 	     void *cb_arg)
8146 {
8147 	struct spdk_blob		*blob;
8148 	struct spdk_bs_cpl		cpl;
8149 	struct spdk_blob_open_opts	opts_local;
8150 	spdk_bs_sequence_t		*seq;
8151 	uint32_t			page_num;
8152 
8153 	SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid);
8154 	assert(spdk_get_thread() == bs->md_thread);
8155 
8156 	page_num = bs_blobid_to_page(blobid);
8157 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
8158 		/* Invalid blobid */
8159 		cb_fn(cb_arg, NULL, -ENOENT);
8160 		return;
8161 	}
8162 
8163 	blob = blob_lookup(bs, blobid);
8164 	if (blob) {
8165 		blob->open_ref++;
8166 		cb_fn(cb_arg, blob, 0);
8167 		return;
8168 	}
8169 
8170 	blob = blob_alloc(bs, blobid);
8171 	if (!blob) {
8172 		cb_fn(cb_arg, NULL, -ENOMEM);
8173 		return;
8174 	}
8175 
8176 	spdk_blob_open_opts_init(&opts_local, sizeof(opts_local));
8177 	if (opts) {
8178 		blob_open_opts_copy(opts, &opts_local);
8179 	}
8180 
8181 	blob->clear_method = opts_local.clear_method;
8182 
8183 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
8184 	cpl.u.blob_handle.cb_fn = cb_fn;
8185 	cpl.u.blob_handle.cb_arg = cb_arg;
8186 	cpl.u.blob_handle.blob = blob;
8187 	cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx;
8188 
8189 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
8190 	if (!seq) {
8191 		blob_free(blob);
8192 		cb_fn(cb_arg, NULL, -ENOMEM);
8193 		return;
8194 	}
8195 
8196 	blob_load(seq, blob, bs_open_blob_cpl, blob);
8197 }
8198 
8199 void
8200 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
8201 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8202 {
8203 	bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg);
8204 }
8205 
8206 void
8207 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
8208 		      struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8209 {
8210 	bs_open_blob(bs, blobid, opts, cb_fn, cb_arg);
8211 }
8212 
8213 /* END spdk_bs_open_blob */
8214 
8215 /* START spdk_blob_set_read_only */
8216 int
8217 spdk_blob_set_read_only(struct spdk_blob *blob)
8218 {
8219 	blob_verify_md_op(blob);
8220 
8221 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
8222 
8223 	blob->state = SPDK_BLOB_STATE_DIRTY;
8224 	return 0;
8225 }
8226 /* END spdk_blob_set_read_only */
8227 
8228 /* START spdk_blob_sync_md */
8229 
8230 static void
8231 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8232 {
8233 	struct spdk_blob *blob = cb_arg;
8234 
8235 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
8236 		blob->data_ro = true;
8237 		blob->md_ro = true;
8238 	}
8239 
8240 	bs_sequence_finish(seq, bserrno);
8241 }
8242 
8243 static void
8244 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
8245 {
8246 	struct spdk_bs_cpl	cpl;
8247 	spdk_bs_sequence_t	*seq;
8248 
8249 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8250 	cpl.u.blob_basic.cb_fn = cb_fn;
8251 	cpl.u.blob_basic.cb_arg = cb_arg;
8252 
8253 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8254 	if (!seq) {
8255 		cb_fn(cb_arg, -ENOMEM);
8256 		return;
8257 	}
8258 
8259 	blob_persist(seq, blob, blob_sync_md_cpl, blob);
8260 }
8261 
8262 void
8263 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
8264 {
8265 	blob_verify_md_op(blob);
8266 
8267 	SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id);
8268 
8269 	if (blob->md_ro) {
8270 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
8271 		cb_fn(cb_arg, 0);
8272 		return;
8273 	}
8274 
8275 	blob_sync_md(blob, cb_fn, cb_arg);
8276 }
8277 
8278 /* END spdk_blob_sync_md */
8279 
8280 struct spdk_blob_cluster_op_ctx {
8281 	struct spdk_thread	*thread;
8282 	struct spdk_blob	*blob;
8283 	uint32_t		cluster_num;	/* cluster index in blob */
8284 	uint32_t		cluster;	/* cluster on disk */
8285 	uint32_t		extent_page;	/* extent page on disk */
8286 	struct spdk_blob_md_page *page; /* preallocated extent page */
8287 	int			rc;
8288 	spdk_blob_op_complete	cb_fn;
8289 	void			*cb_arg;
8290 };
8291 
8292 static void
8293 blob_op_cluster_msg_cpl(void *arg)
8294 {
8295 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8296 
8297 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
8298 	free(ctx);
8299 }
8300 
8301 static void
8302 blob_op_cluster_msg_cb(void *arg, int bserrno)
8303 {
8304 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8305 
8306 	ctx->rc = bserrno;
8307 	spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8308 }
8309 
8310 static void
8311 blob_insert_new_ep_cb(void *arg, int bserrno)
8312 {
8313 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8314 	uint32_t *extent_page;
8315 
8316 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8317 	*extent_page = ctx->extent_page;
8318 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8319 	blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8320 }
8321 
8322 struct spdk_blob_write_extent_page_ctx {
8323 	struct spdk_blob_store		*bs;
8324 
8325 	uint32_t			extent;
8326 	struct spdk_blob_md_page	*page;
8327 };
8328 
8329 static void
8330 blob_free_cluster_msg_cb(void *arg, int bserrno)
8331 {
8332 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8333 
8334 	spdk_spin_lock(&ctx->blob->bs->used_lock);
8335 	bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster));
8336 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
8337 
8338 	ctx->rc = bserrno;
8339 	spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8340 }
8341 
8342 static void
8343 blob_free_cluster_update_ep_cb(void *arg, int bserrno)
8344 {
8345 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8346 
8347 	if (bserrno != 0 || ctx->blob->bs->clean == 0) {
8348 		blob_free_cluster_msg_cb(ctx, bserrno);
8349 		return;
8350 	}
8351 
8352 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8353 	blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx);
8354 }
8355 
8356 static void
8357 blob_free_cluster_free_ep_cb(void *arg, int bserrno)
8358 {
8359 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8360 
8361 	spdk_spin_lock(&ctx->blob->bs->used_lock);
8362 	assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8363 	bs_release_md_page(ctx->blob->bs, ctx->extent_page);
8364 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
8365 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8366 	blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx);
8367 }
8368 
8369 static void
8370 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8371 {
8372 	struct spdk_blob_write_extent_page_ctx *ctx = cb_arg;
8373 
8374 	free(ctx);
8375 	bs_sequence_finish(seq, bserrno);
8376 }
8377 
8378 static void
8379 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8380 {
8381 	struct spdk_blob_write_extent_page_ctx *ctx = cb_arg;
8382 
8383 	if (bserrno != 0) {
8384 		blob_persist_extent_page_cpl(seq, ctx, bserrno);
8385 		return;
8386 	}
8387 	bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent),
8388 			      bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
8389 			      blob_persist_extent_page_cpl, ctx);
8390 }
8391 
8392 static void
8393 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
8394 		       struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg)
8395 {
8396 	struct spdk_blob_write_extent_page_ctx	*ctx;
8397 	spdk_bs_sequence_t			*seq;
8398 	struct spdk_bs_cpl			cpl;
8399 
8400 	ctx = calloc(1, sizeof(*ctx));
8401 	if (!ctx) {
8402 		cb_fn(cb_arg, -ENOMEM);
8403 		return;
8404 	}
8405 	ctx->bs = blob->bs;
8406 	ctx->extent = extent;
8407 	ctx->page = page;
8408 
8409 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8410 	cpl.u.blob_basic.cb_fn = cb_fn;
8411 	cpl.u.blob_basic.cb_arg = cb_arg;
8412 
8413 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8414 	if (!seq) {
8415 		free(ctx);
8416 		cb_fn(cb_arg, -ENOMEM);
8417 		return;
8418 	}
8419 
8420 	assert(page);
8421 	page->next = SPDK_INVALID_MD_PAGE;
8422 	page->id = blob->id;
8423 	page->sequence_num = 0;
8424 
8425 	blob_serialize_extent_page(blob, cluster_num, page);
8426 
8427 	page->crc = blob_md_page_calc_crc(page);
8428 
8429 	assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true);
8430 
8431 	bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx);
8432 }
8433 
8434 static void
8435 blob_insert_cluster_msg(void *arg)
8436 {
8437 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8438 	uint32_t *extent_page;
8439 
8440 	ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
8441 	if (ctx->rc != 0) {
8442 		spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8443 		return;
8444 	}
8445 
8446 	if (ctx->blob->use_extent_table == false) {
8447 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
8448 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8449 		blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8450 		return;
8451 	}
8452 
8453 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8454 	if (*extent_page == 0) {
8455 		/* Extent page requires allocation.
8456 		 * It was already claimed in the used_md_pages map and placed in ctx. */
8457 		assert(ctx->extent_page != 0);
8458 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8459 		blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page,
8460 				       blob_insert_new_ep_cb, ctx);
8461 	} else {
8462 		/* It is possible for original thread to allocate extent page for
8463 		 * different cluster in the same extent page. In such case proceed with
8464 		 * updating the existing extent page, but release the additional one. */
8465 		if (ctx->extent_page != 0) {
8466 			spdk_spin_lock(&ctx->blob->bs->used_lock);
8467 			assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8468 			bs_release_md_page(ctx->blob->bs, ctx->extent_page);
8469 			spdk_spin_unlock(&ctx->blob->bs->used_lock);
8470 			ctx->extent_page = 0;
8471 		}
8472 		/* Extent page already allocated.
8473 		 * Every cluster allocation, requires just an update of single extent page. */
8474 		blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page,
8475 				       blob_op_cluster_msg_cb, ctx);
8476 	}
8477 }
8478 
8479 static void
8480 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
8481 				 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page,
8482 				 spdk_blob_op_complete cb_fn, void *cb_arg)
8483 {
8484 	struct spdk_blob_cluster_op_ctx *ctx;
8485 
8486 	ctx = calloc(1, sizeof(*ctx));
8487 	if (ctx == NULL) {
8488 		cb_fn(cb_arg, -ENOMEM);
8489 		return;
8490 	}
8491 
8492 	ctx->thread = spdk_get_thread();
8493 	ctx->blob = blob;
8494 	ctx->cluster_num = cluster_num;
8495 	ctx->cluster = cluster;
8496 	ctx->extent_page = extent_page;
8497 	ctx->page = page;
8498 	ctx->cb_fn = cb_fn;
8499 	ctx->cb_arg = cb_arg;
8500 
8501 	spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx);
8502 }
8503 
8504 static void
8505 blob_free_cluster_msg(void *arg)
8506 {
8507 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8508 	uint32_t *extent_page;
8509 	uint32_t start_cluster_idx;
8510 	bool free_extent_page = true;
8511 	size_t i;
8512 
8513 	ctx->cluster = ctx->blob->active.clusters[ctx->cluster_num];
8514 
8515 	/* There were concurrent unmaps to the same cluster, only release the cluster on the first one */
8516 	if (ctx->cluster == 0) {
8517 		blob_op_cluster_msg_cb(ctx, 0);
8518 		return;
8519 	}
8520 
8521 	ctx->blob->active.clusters[ctx->cluster_num] = 0;
8522 	if (ctx->cluster != 0) {
8523 		ctx->blob->active.num_allocated_clusters--;
8524 	}
8525 
8526 	if (ctx->blob->use_extent_table == false) {
8527 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
8528 		spdk_spin_lock(&ctx->blob->bs->used_lock);
8529 		bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster));
8530 		spdk_spin_unlock(&ctx->blob->bs->used_lock);
8531 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8532 		blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8533 		return;
8534 	}
8535 
8536 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8537 
8538 	/* There shouldn't be parallel release operations on same cluster */
8539 	assert(*extent_page == ctx->extent_page);
8540 
8541 	start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
8542 	for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) {
8543 		if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) {
8544 			free_extent_page = false;
8545 			break;
8546 		}
8547 	}
8548 
8549 	if (free_extent_page) {
8550 		assert(ctx->extent_page != 0);
8551 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8552 		ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0;
8553 		blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page,
8554 				       blob_free_cluster_free_ep_cb, ctx);
8555 	} else {
8556 		blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page,
8557 				       blob_free_cluster_update_ep_cb, ctx);
8558 	}
8559 }
8560 
8561 
8562 static void
8563 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page,
8564 			       struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg)
8565 {
8566 	struct spdk_blob_cluster_op_ctx *ctx;
8567 
8568 	ctx = calloc(1, sizeof(*ctx));
8569 	if (ctx == NULL) {
8570 		cb_fn(cb_arg, -ENOMEM);
8571 		return;
8572 	}
8573 
8574 	ctx->thread = spdk_get_thread();
8575 	ctx->blob = blob;
8576 	ctx->cluster_num = cluster_num;
8577 	ctx->extent_page = extent_page;
8578 	ctx->page = page;
8579 	ctx->cb_fn = cb_fn;
8580 	ctx->cb_arg = cb_arg;
8581 
8582 	spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx);
8583 }
8584 
8585 /* START spdk_blob_close */
8586 
8587 static void
8588 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8589 {
8590 	struct spdk_blob *blob = cb_arg;
8591 
8592 	if (bserrno == 0) {
8593 		blob->open_ref--;
8594 		if (blob->open_ref == 0) {
8595 			/*
8596 			 * Blobs with active.num_pages == 0 are deleted blobs.
8597 			 *  these blobs are removed from the blob_store list
8598 			 *  when the deletion process starts - so don't try to
8599 			 *  remove them again.
8600 			 */
8601 			if (blob->active.num_pages > 0) {
8602 				spdk_bit_array_clear(blob->bs->open_blobids, blob->id);
8603 				RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob);
8604 			}
8605 			blob_free(blob);
8606 		}
8607 	}
8608 
8609 	bs_sequence_finish(seq, bserrno);
8610 }
8611 
8612 static void
8613 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno)
8614 {
8615 	spdk_bs_sequence_t	*seq = cb_arg;
8616 
8617 	if (bserrno != 0) {
8618 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n",
8619 			      blob->id, bserrno);
8620 		bs_sequence_finish(seq, bserrno);
8621 		return;
8622 	}
8623 
8624 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n",
8625 		      blob->id, spdk_thread_get_name(spdk_get_thread()));
8626 
8627 	/* Sync metadata */
8628 	blob_persist(seq, blob, blob_close_cpl, blob);
8629 }
8630 
8631 void
8632 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
8633 {
8634 	struct spdk_bs_cpl	cpl;
8635 	spdk_bs_sequence_t	*seq;
8636 
8637 	blob_verify_md_op(blob);
8638 
8639 	SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id);
8640 
8641 	if (blob->open_ref == 0) {
8642 		cb_fn(cb_arg, -EBADF);
8643 		return;
8644 	}
8645 
8646 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8647 	cpl.u.blob_basic.cb_fn = cb_fn;
8648 	cpl.u.blob_basic.cb_arg = cb_arg;
8649 
8650 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8651 	if (!seq) {
8652 		cb_fn(cb_arg, -ENOMEM);
8653 		return;
8654 	}
8655 
8656 	if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) {
8657 		blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq);
8658 		return;
8659 	}
8660 
8661 	/* Sync metadata */
8662 	blob_persist(seq, blob, blob_close_cpl, blob);
8663 }
8664 
8665 /* END spdk_blob_close */
8666 
8667 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
8668 {
8669 	return spdk_get_io_channel(bs);
8670 }
8671 
8672 void
8673 spdk_bs_free_io_channel(struct spdk_io_channel *channel)
8674 {
8675 	blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel));
8676 	spdk_put_io_channel(channel);
8677 }
8678 
8679 void
8680 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
8681 		   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
8682 {
8683 	blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
8684 			       SPDK_BLOB_UNMAP);
8685 }
8686 
8687 void
8688 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
8689 			  uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
8690 {
8691 	blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
8692 			       SPDK_BLOB_WRITE_ZEROES);
8693 }
8694 
8695 void
8696 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
8697 		   void *payload, uint64_t offset, uint64_t length,
8698 		   spdk_blob_op_complete cb_fn, void *cb_arg)
8699 {
8700 	blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
8701 			       SPDK_BLOB_WRITE);
8702 }
8703 
8704 void
8705 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
8706 		  void *payload, uint64_t offset, uint64_t length,
8707 		  spdk_blob_op_complete cb_fn, void *cb_arg)
8708 {
8709 	blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
8710 			       SPDK_BLOB_READ);
8711 }
8712 
8713 void
8714 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
8715 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8716 		    spdk_blob_op_complete cb_fn, void *cb_arg)
8717 {
8718 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL);
8719 }
8720 
8721 void
8722 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
8723 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8724 		   spdk_blob_op_complete cb_fn, void *cb_arg)
8725 {
8726 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL);
8727 }
8728 
8729 void
8730 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
8731 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8732 			spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
8733 {
8734 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false,
8735 				   io_opts);
8736 }
8737 
8738 void
8739 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
8740 		       struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8741 		       spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
8742 {
8743 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true,
8744 				   io_opts);
8745 }
8746 
8747 struct spdk_bs_iter_ctx {
8748 	int64_t page_num;
8749 	struct spdk_blob_store *bs;
8750 
8751 	spdk_blob_op_with_handle_complete cb_fn;
8752 	void *cb_arg;
8753 };
8754 
8755 static void
8756 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
8757 {
8758 	struct spdk_bs_iter_ctx *ctx = cb_arg;
8759 	struct spdk_blob_store *bs = ctx->bs;
8760 	spdk_blob_id id;
8761 
8762 	if (bserrno == 0) {
8763 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
8764 		free(ctx);
8765 		return;
8766 	}
8767 
8768 	ctx->page_num++;
8769 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
8770 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
8771 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
8772 		free(ctx);
8773 		return;
8774 	}
8775 
8776 	id = bs_page_to_blobid(ctx->page_num);
8777 
8778 	spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx);
8779 }
8780 
8781 void
8782 spdk_bs_iter_first(struct spdk_blob_store *bs,
8783 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8784 {
8785 	struct spdk_bs_iter_ctx *ctx;
8786 
8787 	ctx = calloc(1, sizeof(*ctx));
8788 	if (!ctx) {
8789 		cb_fn(cb_arg, NULL, -ENOMEM);
8790 		return;
8791 	}
8792 
8793 	ctx->page_num = -1;
8794 	ctx->bs = bs;
8795 	ctx->cb_fn = cb_fn;
8796 	ctx->cb_arg = cb_arg;
8797 
8798 	bs_iter_cpl(ctx, NULL, -1);
8799 }
8800 
8801 static void
8802 bs_iter_close_cpl(void *cb_arg, int bserrno)
8803 {
8804 	struct spdk_bs_iter_ctx *ctx = cb_arg;
8805 
8806 	bs_iter_cpl(ctx, NULL, -1);
8807 }
8808 
8809 void
8810 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
8811 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8812 {
8813 	struct spdk_bs_iter_ctx *ctx;
8814 
8815 	assert(blob != NULL);
8816 
8817 	ctx = calloc(1, sizeof(*ctx));
8818 	if (!ctx) {
8819 		cb_fn(cb_arg, NULL, -ENOMEM);
8820 		return;
8821 	}
8822 
8823 	ctx->page_num = bs_blobid_to_page(blob->id);
8824 	ctx->bs = bs;
8825 	ctx->cb_fn = cb_fn;
8826 	ctx->cb_arg = cb_arg;
8827 
8828 	/* Close the existing blob */
8829 	spdk_blob_close(blob, bs_iter_close_cpl, ctx);
8830 }
8831 
8832 static int
8833 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
8834 	       uint16_t value_len, bool internal)
8835 {
8836 	struct spdk_xattr_tailq *xattrs;
8837 	struct spdk_xattr	*xattr;
8838 	size_t			desc_size;
8839 	void			*tmp;
8840 
8841 	blob_verify_md_op(blob);
8842 
8843 	if (blob->md_ro) {
8844 		return -EPERM;
8845 	}
8846 
8847 	desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len;
8848 	if (desc_size > SPDK_BS_MAX_DESC_SIZE) {
8849 		SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name,
8850 			      desc_size, SPDK_BS_MAX_DESC_SIZE);
8851 		return -ENOMEM;
8852 	}
8853 
8854 	if (internal) {
8855 		xattrs = &blob->xattrs_internal;
8856 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
8857 	} else {
8858 		xattrs = &blob->xattrs;
8859 	}
8860 
8861 	TAILQ_FOREACH(xattr, xattrs, link) {
8862 		if (!strcmp(name, xattr->name)) {
8863 			tmp = malloc(value_len);
8864 			if (!tmp) {
8865 				return -ENOMEM;
8866 			}
8867 
8868 			free(xattr->value);
8869 			xattr->value_len = value_len;
8870 			xattr->value = tmp;
8871 			memcpy(xattr->value, value, value_len);
8872 
8873 			blob->state = SPDK_BLOB_STATE_DIRTY;
8874 
8875 			return 0;
8876 		}
8877 	}
8878 
8879 	xattr = calloc(1, sizeof(*xattr));
8880 	if (!xattr) {
8881 		return -ENOMEM;
8882 	}
8883 
8884 	xattr->name = strdup(name);
8885 	if (!xattr->name) {
8886 		free(xattr);
8887 		return -ENOMEM;
8888 	}
8889 
8890 	xattr->value_len = value_len;
8891 	xattr->value = malloc(value_len);
8892 	if (!xattr->value) {
8893 		free(xattr->name);
8894 		free(xattr);
8895 		return -ENOMEM;
8896 	}
8897 	memcpy(xattr->value, value, value_len);
8898 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
8899 
8900 	blob->state = SPDK_BLOB_STATE_DIRTY;
8901 
8902 	return 0;
8903 }
8904 
8905 int
8906 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
8907 		    uint16_t value_len)
8908 {
8909 	return blob_set_xattr(blob, name, value, value_len, false);
8910 }
8911 
8912 static int
8913 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
8914 {
8915 	struct spdk_xattr_tailq *xattrs;
8916 	struct spdk_xattr	*xattr;
8917 
8918 	blob_verify_md_op(blob);
8919 
8920 	if (blob->md_ro) {
8921 		return -EPERM;
8922 	}
8923 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
8924 
8925 	TAILQ_FOREACH(xattr, xattrs, link) {
8926 		if (!strcmp(name, xattr->name)) {
8927 			TAILQ_REMOVE(xattrs, xattr, link);
8928 			free(xattr->value);
8929 			free(xattr->name);
8930 			free(xattr);
8931 
8932 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
8933 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
8934 			}
8935 			blob->state = SPDK_BLOB_STATE_DIRTY;
8936 
8937 			return 0;
8938 		}
8939 	}
8940 
8941 	return -ENOENT;
8942 }
8943 
8944 int
8945 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
8946 {
8947 	return blob_remove_xattr(blob, name, false);
8948 }
8949 
8950 static int
8951 blob_get_xattr_value(struct spdk_blob *blob, const char *name,
8952 		     const void **value, size_t *value_len, bool internal)
8953 {
8954 	struct spdk_xattr	*xattr;
8955 	struct spdk_xattr_tailq *xattrs;
8956 
8957 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
8958 
8959 	TAILQ_FOREACH(xattr, xattrs, link) {
8960 		if (!strcmp(name, xattr->name)) {
8961 			*value = xattr->value;
8962 			*value_len = xattr->value_len;
8963 			return 0;
8964 		}
8965 	}
8966 	return -ENOENT;
8967 }
8968 
8969 int
8970 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
8971 			  const void **value, size_t *value_len)
8972 {
8973 	blob_verify_md_op(blob);
8974 
8975 	return blob_get_xattr_value(blob, name, value, value_len, false);
8976 }
8977 
8978 struct spdk_xattr_names {
8979 	uint32_t	count;
8980 	const char	*names[0];
8981 };
8982 
8983 static int
8984 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
8985 {
8986 	struct spdk_xattr	*xattr;
8987 	int			count = 0;
8988 
8989 	TAILQ_FOREACH(xattr, xattrs, link) {
8990 		count++;
8991 	}
8992 
8993 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
8994 	if (*names == NULL) {
8995 		return -ENOMEM;
8996 	}
8997 
8998 	TAILQ_FOREACH(xattr, xattrs, link) {
8999 		(*names)->names[(*names)->count++] = xattr->name;
9000 	}
9001 
9002 	return 0;
9003 }
9004 
9005 int
9006 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
9007 {
9008 	blob_verify_md_op(blob);
9009 
9010 	return blob_get_xattr_names(&blob->xattrs, names);
9011 }
9012 
9013 uint32_t
9014 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
9015 {
9016 	assert(names != NULL);
9017 
9018 	return names->count;
9019 }
9020 
9021 const char *
9022 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
9023 {
9024 	if (index >= names->count) {
9025 		return NULL;
9026 	}
9027 
9028 	return names->names[index];
9029 }
9030 
9031 void
9032 spdk_xattr_names_free(struct spdk_xattr_names *names)
9033 {
9034 	free(names);
9035 }
9036 
9037 struct spdk_bs_type
9038 spdk_bs_get_bstype(struct spdk_blob_store *bs)
9039 {
9040 	return bs->bstype;
9041 }
9042 
9043 void
9044 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
9045 {
9046 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
9047 }
9048 
9049 bool
9050 spdk_blob_is_read_only(struct spdk_blob *blob)
9051 {
9052 	assert(blob != NULL);
9053 	return (blob->data_ro || blob->md_ro);
9054 }
9055 
9056 bool
9057 spdk_blob_is_snapshot(struct spdk_blob *blob)
9058 {
9059 	struct spdk_blob_list *snapshot_entry;
9060 
9061 	assert(blob != NULL);
9062 
9063 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
9064 	if (snapshot_entry == NULL) {
9065 		return false;
9066 	}
9067 
9068 	return true;
9069 }
9070 
9071 bool
9072 spdk_blob_is_clone(struct spdk_blob *blob)
9073 {
9074 	assert(blob != NULL);
9075 
9076 	if (blob->parent_id != SPDK_BLOBID_INVALID &&
9077 	    blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
9078 		assert(spdk_blob_is_thin_provisioned(blob));
9079 		return true;
9080 	}
9081 
9082 	return false;
9083 }
9084 
9085 bool
9086 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
9087 {
9088 	assert(blob != NULL);
9089 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
9090 }
9091 
9092 bool
9093 spdk_blob_is_esnap_clone(const struct spdk_blob *blob)
9094 {
9095 	return blob_is_esnap_clone(blob);
9096 }
9097 
9098 static void
9099 blob_update_clear_method(struct spdk_blob *blob)
9100 {
9101 	enum blob_clear_method stored_cm;
9102 
9103 	assert(blob != NULL);
9104 
9105 	/* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored
9106 	 * in metadata previously.  If something other than the default was
9107 	 * specified, ignore stored value and used what was passed in.
9108 	 */
9109 	stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT);
9110 
9111 	if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) {
9112 		blob->clear_method = stored_cm;
9113 	} else if (blob->clear_method != stored_cm) {
9114 		SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n",
9115 			     blob->clear_method, stored_cm);
9116 	}
9117 }
9118 
9119 spdk_blob_id
9120 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
9121 {
9122 	struct spdk_blob_list *snapshot_entry = NULL;
9123 	struct spdk_blob_list *clone_entry = NULL;
9124 
9125 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
9126 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
9127 			if (clone_entry->id == blob_id) {
9128 				return snapshot_entry->id;
9129 			}
9130 		}
9131 	}
9132 
9133 	return SPDK_BLOBID_INVALID;
9134 }
9135 
9136 int
9137 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
9138 		     size_t *count)
9139 {
9140 	struct spdk_blob_list *snapshot_entry, *clone_entry;
9141 	size_t n;
9142 
9143 	snapshot_entry = bs_get_snapshot_entry(bs, blobid);
9144 	if (snapshot_entry == NULL) {
9145 		*count = 0;
9146 		return 0;
9147 	}
9148 
9149 	if (ids == NULL || *count < snapshot_entry->clone_count) {
9150 		*count = snapshot_entry->clone_count;
9151 		return -ENOMEM;
9152 	}
9153 	*count = snapshot_entry->clone_count;
9154 
9155 	n = 0;
9156 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
9157 		ids[n++] = clone_entry->id;
9158 	}
9159 
9160 	return 0;
9161 }
9162 
9163 static void
9164 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx)
9165 {
9166 	int rc;
9167 
9168 	if (ctx->super->size == 0) {
9169 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9170 	}
9171 
9172 	if (ctx->super->io_unit_size == 0) {
9173 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
9174 	}
9175 
9176 	/* Parse the super block */
9177 	ctx->bs->clean = 1;
9178 	ctx->bs->cluster_sz = ctx->super->cluster_size;
9179 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
9180 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
9181 	if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
9182 		ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
9183 	}
9184 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
9185 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
9186 	if (rc < 0) {
9187 		bs_load_ctx_fail(ctx, -ENOMEM);
9188 		return;
9189 	}
9190 	ctx->bs->md_start = ctx->super->md_start;
9191 	ctx->bs->md_len = ctx->super->md_len;
9192 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len);
9193 	if (rc < 0) {
9194 		bs_load_ctx_fail(ctx, -ENOMEM);
9195 		return;
9196 	}
9197 
9198 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
9199 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
9200 	ctx->bs->super_blob = ctx->super->super_blob;
9201 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
9202 
9203 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
9204 		SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n");
9205 		bs_load_ctx_fail(ctx, -EIO);
9206 		return;
9207 	} else {
9208 		bs_load_read_used_pages(ctx);
9209 	}
9210 }
9211 
9212 static void
9213 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9214 {
9215 	struct spdk_bs_load_ctx	*ctx = cb_arg;
9216 
9217 	if (bserrno != 0) {
9218 		bs_load_ctx_fail(ctx, bserrno);
9219 		return;
9220 	}
9221 	bs_load_grow_continue(ctx);
9222 }
9223 
9224 static void
9225 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9226 {
9227 	struct spdk_bs_load_ctx	*ctx = cb_arg;
9228 
9229 	if (bserrno != 0) {
9230 		bs_load_ctx_fail(ctx, bserrno);
9231 		return;
9232 	}
9233 
9234 	spdk_free(ctx->mask);
9235 
9236 	bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0),
9237 			      bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
9238 			      bs_load_grow_super_write_cpl, ctx);
9239 }
9240 
9241 static void
9242 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9243 {
9244 	struct spdk_bs_load_ctx *ctx = cb_arg;
9245 	uint64_t		lba, lba_count;
9246 	uint64_t		dev_size;
9247 	uint64_t		total_clusters;
9248 
9249 	if (bserrno != 0) {
9250 		bs_load_ctx_fail(ctx, bserrno);
9251 		return;
9252 	}
9253 
9254 	/* The type must be correct */
9255 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
9256 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
9257 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
9258 					     struct spdk_blob_md_page) * 8));
9259 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9260 	total_clusters = dev_size / ctx->super->cluster_size;
9261 	ctx->mask->length = total_clusters;
9262 
9263 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
9264 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
9265 	bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count,
9266 			      bs_load_grow_used_clusters_write_cpl, ctx);
9267 }
9268 
9269 static void
9270 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx)
9271 {
9272 	uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask;
9273 	uint64_t lba, lba_count, mask_size;
9274 
9275 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9276 	total_clusters = dev_size / ctx->super->cluster_size;
9277 	used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
9278 				spdk_divide_round_up(total_clusters, 8),
9279 				SPDK_BS_PAGE_SIZE);
9280 	max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start;
9281 	/* No necessary to grow or no space to grow */
9282 	if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) {
9283 		SPDK_DEBUGLOG(blob, "No grow\n");
9284 		bs_load_grow_continue(ctx);
9285 		return;
9286 	}
9287 
9288 	SPDK_DEBUGLOG(blob, "Resize blobstore\n");
9289 
9290 	ctx->super->size = dev_size;
9291 	ctx->super->used_cluster_mask_len = used_cluster_mask_len;
9292 	ctx->super->crc = blob_md_page_calc_crc(ctx->super);
9293 
9294 	mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
9295 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
9296 				 SPDK_MALLOC_DMA);
9297 	if (!ctx->mask) {
9298 		bs_load_ctx_fail(ctx, -ENOMEM);
9299 		return;
9300 	}
9301 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
9302 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
9303 	bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
9304 			     bs_load_grow_used_clusters_read_cpl, ctx);
9305 }
9306 
9307 static void
9308 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9309 {
9310 	struct spdk_bs_load_ctx *ctx = cb_arg;
9311 	int rc;
9312 
9313 	rc = bs_super_validate(ctx->super, ctx->bs);
9314 	if (rc != 0) {
9315 		bs_load_ctx_fail(ctx, rc);
9316 		return;
9317 	}
9318 
9319 	bs_load_try_to_grow(ctx);
9320 }
9321 
9322 struct spdk_bs_grow_ctx {
9323 	struct spdk_blob_store		*bs;
9324 	struct spdk_bs_super_block	*super;
9325 
9326 	struct spdk_bit_pool		*new_used_clusters;
9327 	struct spdk_bs_md_mask		*new_used_clusters_mask;
9328 
9329 	spdk_bs_sequence_t		*seq;
9330 };
9331 
9332 static void
9333 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno)
9334 {
9335 	if (bserrno != 0) {
9336 		spdk_bit_pool_free(&ctx->new_used_clusters);
9337 	}
9338 
9339 	bs_sequence_finish(ctx->seq, bserrno);
9340 	free(ctx->new_used_clusters_mask);
9341 	spdk_free(ctx->super);
9342 	free(ctx);
9343 }
9344 
9345 static void
9346 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9347 {
9348 	struct spdk_bs_grow_ctx	*ctx = cb_arg;
9349 	struct spdk_blob_store *bs = ctx->bs;
9350 	uint64_t total_clusters;
9351 
9352 	if (bserrno != 0) {
9353 		bs_grow_live_done(ctx, bserrno);
9354 		return;
9355 	}
9356 
9357 	/*
9358 	 * Blobstore is not clean until unload, for now only the super block is up to date.
9359 	 * This is similar to state right after blobstore init, when bs_write_used_md() didn't
9360 	 * yet execute.
9361 	 * When cleanly unloaded, the used md pages will be written out.
9362 	 * In case of unclean shutdown, loading blobstore will go through recovery path correctly
9363 	 * filling out the used_clusters with new size and writing it out.
9364 	 */
9365 	bs->clean = 0;
9366 
9367 	/* Reverting the super->size past this point is complex, avoid any error paths
9368 	 * that require to do so. */
9369 	spdk_spin_lock(&bs->used_lock);
9370 
9371 	total_clusters = ctx->super->size / ctx->super->cluster_size;
9372 
9373 	assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters));
9374 	spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask);
9375 
9376 	assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters));
9377 	spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask);
9378 
9379 	spdk_bit_pool_free(&bs->used_clusters);
9380 	bs->used_clusters = ctx->new_used_clusters;
9381 
9382 	bs->total_clusters = total_clusters;
9383 	bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up(
9384 					  bs->md_start + bs->md_len, bs->pages_per_cluster);
9385 
9386 	bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters);
9387 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
9388 	spdk_spin_unlock(&bs->used_lock);
9389 
9390 	bs_grow_live_done(ctx, 0);
9391 }
9392 
9393 static void
9394 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9395 {
9396 	struct spdk_bs_grow_ctx *ctx = cb_arg;
9397 	uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask;
9398 	int rc;
9399 
9400 	if (bserrno != 0) {
9401 		bs_grow_live_done(ctx, bserrno);
9402 		return;
9403 	}
9404 
9405 	rc = bs_super_validate(ctx->super, ctx->bs);
9406 	if (rc != 0) {
9407 		bs_grow_live_done(ctx, rc);
9408 		return;
9409 	}
9410 
9411 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9412 	total_clusters = dev_size / ctx->super->cluster_size;
9413 	used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
9414 				spdk_divide_round_up(total_clusters, 8),
9415 				SPDK_BS_PAGE_SIZE);
9416 	max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start;
9417 	/* Only checking dev_size. Since it can change, but total_clusters remain the same. */
9418 	if (dev_size == ctx->super->size) {
9419 		SPDK_DEBUGLOG(blob, "No need to grow blobstore\n");
9420 		bs_grow_live_done(ctx, 0);
9421 		return;
9422 	}
9423 	/*
9424 	 * Blobstore cannot be shrunk, so check before if:
9425 	 * - new size of the device is smaller than size in super_block
9426 	 * - new total number of clusters is smaller than used_clusters bit_pool
9427 	 * - there is enough space in metadata for used_cluster_mask to be written out
9428 	 */
9429 	if (dev_size < ctx->super->size ||
9430 	    total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) ||
9431 	    used_cluster_mask_len > max_used_cluster_mask) {
9432 		SPDK_DEBUGLOG(blob, "No space to grow blobstore\n");
9433 		bs_grow_live_done(ctx, -ENOSPC);
9434 		return;
9435 	}
9436 
9437 	SPDK_DEBUGLOG(blob, "Resizing blobstore\n");
9438 
9439 	ctx->new_used_clusters_mask = calloc(1, total_clusters);
9440 	if (!ctx->new_used_clusters_mask) {
9441 		bs_grow_live_done(ctx, -ENOMEM);
9442 		return;
9443 	}
9444 	ctx->new_used_clusters = spdk_bit_pool_create(total_clusters);
9445 	if (!ctx->new_used_clusters) {
9446 		bs_grow_live_done(ctx, -ENOMEM);
9447 		return;
9448 	}
9449 
9450 	ctx->super->clean = 0;
9451 	ctx->super->size = dev_size;
9452 	ctx->super->used_cluster_mask_len = used_cluster_mask_len;
9453 	bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx);
9454 }
9455 
9456 void
9457 spdk_bs_grow_live(struct spdk_blob_store *bs,
9458 		  spdk_bs_op_complete cb_fn, void *cb_arg)
9459 {
9460 	struct spdk_bs_cpl	cpl;
9461 	struct spdk_bs_grow_ctx *ctx;
9462 
9463 	assert(spdk_get_thread() == bs->md_thread);
9464 
9465 	SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev);
9466 
9467 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
9468 	cpl.u.bs_basic.cb_fn = cb_fn;
9469 	cpl.u.bs_basic.cb_arg = cb_arg;
9470 
9471 	ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx));
9472 	if (!ctx) {
9473 		cb_fn(cb_arg, -ENOMEM);
9474 		return;
9475 	}
9476 	ctx->bs = bs;
9477 
9478 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
9479 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
9480 	if (!ctx->super) {
9481 		free(ctx);
9482 		cb_fn(cb_arg, -ENOMEM);
9483 		return;
9484 	}
9485 
9486 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
9487 	if (!ctx->seq) {
9488 		spdk_free(ctx->super);
9489 		free(ctx);
9490 		cb_fn(cb_arg, -ENOMEM);
9491 		return;
9492 	}
9493 
9494 	/* Read the super block */
9495 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
9496 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
9497 			     bs_grow_live_load_super_cpl, ctx);
9498 }
9499 
9500 void
9501 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
9502 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
9503 {
9504 	struct spdk_blob_store	*bs;
9505 	struct spdk_bs_cpl	cpl;
9506 	struct spdk_bs_load_ctx *ctx;
9507 	struct spdk_bs_opts	opts = {};
9508 	int err;
9509 
9510 	SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev);
9511 
9512 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
9513 		SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen);
9514 		dev->destroy(dev);
9515 		cb_fn(cb_arg, NULL, -EINVAL);
9516 		return;
9517 	}
9518 
9519 	spdk_bs_opts_init(&opts, sizeof(opts));
9520 	if (o) {
9521 		if (bs_opts_copy(o, &opts)) {
9522 			return;
9523 		}
9524 	}
9525 
9526 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
9527 		dev->destroy(dev);
9528 		cb_fn(cb_arg, NULL, -EINVAL);
9529 		return;
9530 	}
9531 
9532 	err = bs_alloc(dev, &opts, &bs, &ctx);
9533 	if (err) {
9534 		dev->destroy(dev);
9535 		cb_fn(cb_arg, NULL, err);
9536 		return;
9537 	}
9538 
9539 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
9540 	cpl.u.bs_handle.cb_fn = cb_fn;
9541 	cpl.u.bs_handle.cb_arg = cb_arg;
9542 	cpl.u.bs_handle.bs = bs;
9543 
9544 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
9545 	if (!ctx->seq) {
9546 		spdk_free(ctx->super);
9547 		free(ctx);
9548 		bs_free(bs);
9549 		cb_fn(cb_arg, NULL, -ENOMEM);
9550 		return;
9551 	}
9552 
9553 	/* Read the super block */
9554 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
9555 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
9556 			     bs_grow_load_super_cpl, ctx);
9557 }
9558 
9559 int
9560 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len)
9561 {
9562 	if (!blob_is_esnap_clone(blob)) {
9563 		return -EINVAL;
9564 	}
9565 
9566 	return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true);
9567 }
9568 
9569 struct spdk_io_channel *
9570 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob)
9571 {
9572 	struct spdk_bs_channel		*bs_channel = spdk_io_channel_get_ctx(ch);
9573 	struct spdk_bs_dev		*bs_dev = blob->back_bs_dev;
9574 	struct blob_esnap_channel	find = {};
9575 	struct blob_esnap_channel	*esnap_channel, *existing;
9576 
9577 	find.blob_id = blob->id;
9578 	esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find);
9579 	if (spdk_likely(esnap_channel != NULL)) {
9580 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n",
9581 			      blob->id, spdk_thread_get_name(spdk_get_thread()));
9582 		return esnap_channel->channel;
9583 	}
9584 
9585 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n",
9586 		      blob->id, spdk_thread_get_name(spdk_get_thread()));
9587 
9588 	esnap_channel = calloc(1, sizeof(*esnap_channel));
9589 	if (esnap_channel == NULL) {
9590 		SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n",
9591 			       find.blob_id);
9592 		return NULL;
9593 	}
9594 	esnap_channel->channel = bs_dev->create_channel(bs_dev);
9595 	if (esnap_channel->channel == NULL) {
9596 		SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id);
9597 		free(esnap_channel);
9598 		return NULL;
9599 	}
9600 	esnap_channel->blob_id = find.blob_id;
9601 	existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel);
9602 	if (spdk_unlikely(existing != NULL)) {
9603 		/*
9604 		 * This should be unreachable: all modifications to this tree happen on this thread.
9605 		 */
9606 		SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id);
9607 		assert(false);
9608 
9609 		bs_dev->destroy_channel(bs_dev, esnap_channel->channel);
9610 		free(esnap_channel);
9611 
9612 		return existing->channel;
9613 	}
9614 
9615 	return esnap_channel->channel;
9616 }
9617 
9618 static int
9619 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2)
9620 {
9621 	return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id);
9622 }
9623 
9624 struct blob_esnap_destroy_ctx {
9625 	spdk_blob_op_with_handle_complete	cb_fn;
9626 	void					*cb_arg;
9627 	struct spdk_blob			*blob;
9628 	struct spdk_bs_dev			*back_bs_dev;
9629 	bool					abort_io;
9630 };
9631 
9632 static void
9633 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status)
9634 {
9635 	struct blob_esnap_destroy_ctx	*ctx = spdk_io_channel_iter_get_ctx(i);
9636 	struct spdk_blob		*blob = ctx->blob;
9637 	struct spdk_blob_store		*bs = blob->bs;
9638 
9639 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n",
9640 		      blob->id);
9641 
9642 	if (ctx->cb_fn != NULL) {
9643 		ctx->cb_fn(ctx->cb_arg, blob, status);
9644 	}
9645 	free(ctx);
9646 
9647 	bs->esnap_channels_unloading--;
9648 	if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) {
9649 		spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg);
9650 	}
9651 }
9652 
9653 static void
9654 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i)
9655 {
9656 	struct blob_esnap_destroy_ctx	*ctx = spdk_io_channel_iter_get_ctx(i);
9657 	struct spdk_blob		*blob = ctx->blob;
9658 	struct spdk_bs_dev		*bs_dev = ctx->back_bs_dev;
9659 	struct spdk_io_channel		*channel = spdk_io_channel_iter_get_channel(i);
9660 	struct spdk_bs_channel		*bs_channel = spdk_io_channel_get_ctx(channel);
9661 	struct blob_esnap_channel	*esnap_channel;
9662 	struct blob_esnap_channel	find = {};
9663 
9664 	assert(spdk_get_thread() == spdk_io_channel_get_thread(channel));
9665 
9666 	find.blob_id = blob->id;
9667 	esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find);
9668 	if (esnap_channel != NULL) {
9669 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n",
9670 			      blob->id, spdk_thread_get_name(spdk_get_thread()));
9671 		RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel);
9672 
9673 		if (ctx->abort_io) {
9674 			spdk_bs_user_op_t *op, *tmp;
9675 
9676 			TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) {
9677 				if (op->back_channel == esnap_channel->channel) {
9678 					TAILQ_REMOVE(&bs_channel->queued_io, op, link);
9679 					bs_user_op_abort(op, -EIO);
9680 				}
9681 			}
9682 		}
9683 
9684 		bs_dev->destroy_channel(bs_dev, esnap_channel->channel);
9685 		free(esnap_channel);
9686 	}
9687 
9688 	spdk_for_each_channel_continue(i, 0);
9689 }
9690 
9691 /*
9692  * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be
9693  * used when closing an esnap clone blob and after decoupling from the parent.
9694  */
9695 static void
9696 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io,
9697 				   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
9698 {
9699 	struct blob_esnap_destroy_ctx	*ctx;
9700 
9701 	if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) {
9702 		if (cb_fn != NULL) {
9703 			cb_fn(cb_arg, blob, 0);
9704 		}
9705 		return;
9706 	}
9707 
9708 	ctx = calloc(1, sizeof(*ctx));
9709 	if (ctx == NULL) {
9710 		if (cb_fn != NULL) {
9711 			cb_fn(cb_arg, blob, -ENOMEM);
9712 		}
9713 		return;
9714 	}
9715 	ctx->cb_fn = cb_fn;
9716 	ctx->cb_arg = cb_arg;
9717 	ctx->blob = blob;
9718 	ctx->back_bs_dev = blob->back_bs_dev;
9719 	ctx->abort_io = abort_io;
9720 
9721 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n",
9722 		      blob->id);
9723 
9724 	blob->bs->esnap_channels_unloading++;
9725 	spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx,
9726 			      blob_esnap_destroy_channels_done);
9727 }
9728 
9729 /*
9730  * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a
9731  * bs_channel is destroyed.
9732  */
9733 static void
9734 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch)
9735 {
9736 	struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp;
9737 
9738 	assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch)));
9739 
9740 	SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n",
9741 		      spdk_thread_get_name(spdk_get_thread()));
9742 	RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels,
9743 			esnap_channel_tmp) {
9744 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64
9745 			      ": destroying one channel in thread %s\n",
9746 			      esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread()));
9747 		RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel);
9748 		spdk_put_io_channel(esnap_channel->channel);
9749 		free(esnap_channel);
9750 	}
9751 	SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n",
9752 		      spdk_thread_get_name(spdk_get_thread()));
9753 }
9754 
9755 static void
9756 blob_set_back_bs_dev_done(void *_ctx, int bserrno)
9757 {
9758 	struct set_bs_dev_ctx	*ctx = _ctx;
9759 
9760 	if (bserrno != 0) {
9761 		/* Even though the unfreeze failed, the update may have succeed. */
9762 		SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id,
9763 			    bserrno);
9764 	}
9765 	ctx->cb_fn(ctx->cb_arg, ctx->bserrno);
9766 	free(ctx);
9767 }
9768 
9769 static void
9770 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno)
9771 {
9772 	struct set_bs_dev_ctx	*ctx = _ctx;
9773 
9774 	if (bserrno != 0) {
9775 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n",
9776 			    blob->id, bserrno);
9777 		ctx->bserrno = bserrno;
9778 		blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx);
9779 		return;
9780 	}
9781 
9782 	if (blob->back_bs_dev != NULL) {
9783 		blob->back_bs_dev->destroy(blob->back_bs_dev);
9784 		blob->back_bs_dev = NULL;
9785 	}
9786 
9787 	SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id);
9788 	blob->back_bs_dev = ctx->back_bs_dev;
9789 	ctx->bserrno = 0;
9790 
9791 	blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx);
9792 }
9793 
9794 static void
9795 blob_set_back_bs_dev_frozen(void *_ctx, int bserrno)
9796 {
9797 	struct set_bs_dev_ctx	*ctx = _ctx;
9798 	struct spdk_blob	*blob = ctx->blob;
9799 
9800 	if (bserrno != 0) {
9801 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id,
9802 			    bserrno);
9803 		ctx->cb_fn(ctx->cb_arg, bserrno);
9804 		free(ctx);
9805 		return;
9806 	}
9807 
9808 	/*
9809 	 * This does not prevent future reads from the esnap device because any future IO will
9810 	 * lazily create a new esnap IO channel.
9811 	 */
9812 	blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx);
9813 }
9814 
9815 void
9816 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev,
9817 			   spdk_blob_op_complete cb_fn, void *cb_arg)
9818 {
9819 	if (!blob_is_esnap_clone(blob)) {
9820 		SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id);
9821 		cb_fn(cb_arg, -EINVAL);
9822 		return;
9823 	}
9824 
9825 	blob_set_back_bs_dev(blob, back_bs_dev, cb_fn, cb_arg);
9826 }
9827 
9828 struct spdk_bs_dev *
9829 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob)
9830 {
9831 	if (!blob_is_esnap_clone(blob)) {
9832 		SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id);
9833 		return NULL;
9834 	}
9835 
9836 	return blob->back_bs_dev;
9837 }
9838 
9839 bool
9840 spdk_blob_is_degraded(const struct spdk_blob *blob)
9841 {
9842 	if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) {
9843 		return true;
9844 	}
9845 	if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) {
9846 		return false;
9847 	}
9848 
9849 	return blob->back_bs_dev->is_degraded(blob->back_bs_dev);
9850 }
9851 
9852 SPDK_LOG_REGISTER_COMPONENT(blob)
9853 SPDK_LOG_REGISTER_COMPONENT(blob_esnap)
9854