xref: /spdk/lib/blob/blobstore.c (revision fc04b134c3ae995e5bd4184fa8c35dee30f50229)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/blob.h"
10 #include "spdk/crc32.h"
11 #include "spdk/env.h"
12 #include "spdk/queue.h"
13 #include "spdk/thread.h"
14 #include "spdk/bit_array.h"
15 #include "spdk/bit_pool.h"
16 #include "spdk/likely.h"
17 #include "spdk/util.h"
18 #include "spdk/string.h"
19 
20 #include "spdk_internal/assert.h"
21 #include "spdk/log.h"
22 
23 #include "blobstore.h"
24 
25 #define BLOB_CRC32C_INITIAL    0xffffffffUL
26 
27 static int bs_register_md_thread(struct spdk_blob_store *bs);
28 static int bs_unregister_md_thread(struct spdk_blob_store *bs);
29 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
30 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
31 		uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page,
32 		spdk_blob_op_complete cb_fn, void *cb_arg);
33 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
34 		uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg);
35 
36 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
37 			  uint16_t value_len, bool internal);
38 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name,
39 				const void **value, size_t *value_len, bool internal);
40 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
41 
42 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
43 				   struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg);
44 
45 /*
46  * External snapshots require a channel per thread per esnap bdev.  The tree
47  * is populated lazily as blob IOs are handled by the back_bs_dev. When this
48  * channel is destroyed, all the channels in the tree are destroyed.
49  */
50 
51 struct blob_esnap_channel {
52 	RB_ENTRY(blob_esnap_channel)	node;
53 	spdk_blob_id			blob_id;
54 	struct spdk_io_channel		*channel;
55 };
56 
57 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2);
58 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io,
59 		spdk_blob_op_with_handle_complete cb_fn, void *cb_arg);
60 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch);
61 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare)
62 
63 static inline bool
64 blob_is_esnap_clone(const struct spdk_blob *blob)
65 {
66 	assert(blob != NULL);
67 	return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT);
68 }
69 
70 static int
71 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2)
72 {
73 	assert(blob1 != NULL && blob2 != NULL);
74 	return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id);
75 }
76 
77 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp);
78 
79 static void
80 blob_verify_md_op(struct spdk_blob *blob)
81 {
82 	assert(blob != NULL);
83 	assert(spdk_get_thread() == blob->bs->md_thread);
84 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
85 }
86 
87 static struct spdk_blob_list *
88 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid)
89 {
90 	struct spdk_blob_list *snapshot_entry = NULL;
91 
92 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
93 		if (snapshot_entry->id == blobid) {
94 			break;
95 		}
96 	}
97 
98 	return snapshot_entry;
99 }
100 
101 static void
102 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page)
103 {
104 	assert(spdk_spin_held(&bs->used_lock));
105 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
106 	assert(spdk_bit_array_get(bs->used_md_pages, page) == false);
107 
108 	spdk_bit_array_set(bs->used_md_pages, page);
109 }
110 
111 static void
112 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page)
113 {
114 	assert(spdk_spin_held(&bs->used_lock));
115 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
116 	assert(spdk_bit_array_get(bs->used_md_pages, page) == true);
117 
118 	spdk_bit_array_clear(bs->used_md_pages, page);
119 }
120 
121 static uint32_t
122 bs_claim_cluster(struct spdk_blob_store *bs)
123 {
124 	uint32_t cluster_num;
125 
126 	assert(spdk_spin_held(&bs->used_lock));
127 
128 	cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters);
129 	if (cluster_num == UINT32_MAX) {
130 		return UINT32_MAX;
131 	}
132 
133 	SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num);
134 	bs->num_free_clusters--;
135 
136 	return cluster_num;
137 }
138 
139 static void
140 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
141 {
142 	assert(spdk_spin_held(&bs->used_lock));
143 	assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters));
144 	assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true);
145 	assert(bs->num_free_clusters < bs->total_clusters);
146 
147 	SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num);
148 
149 	spdk_bit_pool_free_bit(bs->used_clusters, cluster_num);
150 	bs->num_free_clusters++;
151 }
152 
153 static int
154 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
155 {
156 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
157 
158 	blob_verify_md_op(blob);
159 
160 	if (*cluster_lba != 0) {
161 		return -EEXIST;
162 	}
163 
164 	*cluster_lba = bs_cluster_to_lba(blob->bs, cluster);
165 	return 0;
166 }
167 
168 static int
169 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
170 		    uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map)
171 {
172 	uint32_t *extent_page = 0;
173 
174 	assert(spdk_spin_held(&blob->bs->used_lock));
175 
176 	*cluster = bs_claim_cluster(blob->bs);
177 	if (*cluster == UINT32_MAX) {
178 		/* No more free clusters. Cannot satisfy the request */
179 		return -ENOSPC;
180 	}
181 
182 	if (blob->use_extent_table) {
183 		extent_page = bs_cluster_to_extent_page(blob, cluster_num);
184 		if (*extent_page == 0) {
185 			/* Extent page shall never occupy md_page so start the search from 1 */
186 			if (*lowest_free_md_page == 0) {
187 				*lowest_free_md_page = 1;
188 			}
189 			/* No extent_page is allocated for the cluster */
190 			*lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages,
191 					       *lowest_free_md_page);
192 			if (*lowest_free_md_page == UINT32_MAX) {
193 				/* No more free md pages. Cannot satisfy the request */
194 				bs_release_cluster(blob->bs, *cluster);
195 				return -ENOSPC;
196 			}
197 			bs_claim_md_page(blob->bs, *lowest_free_md_page);
198 		}
199 	}
200 
201 	SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster,
202 		      blob->id);
203 
204 	if (update_map) {
205 		blob_insert_cluster(blob, cluster_num, *cluster);
206 		if (blob->use_extent_table && *extent_page == 0) {
207 			*extent_page = *lowest_free_md_page;
208 		}
209 	}
210 
211 	return 0;
212 }
213 
214 static void
215 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
216 {
217 	xattrs->count = 0;
218 	xattrs->names = NULL;
219 	xattrs->ctx = NULL;
220 	xattrs->get_value = NULL;
221 }
222 
223 void
224 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size)
225 {
226 	if (!opts) {
227 		SPDK_ERRLOG("opts should not be NULL\n");
228 		return;
229 	}
230 
231 	if (!opts_size) {
232 		SPDK_ERRLOG("opts_size should not be zero value\n");
233 		return;
234 	}
235 
236 	memset(opts, 0, opts_size);
237 	opts->opts_size = opts_size;
238 
239 #define FIELD_OK(field) \
240         offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size
241 
242 #define SET_FIELD(field, value) \
243         if (FIELD_OK(field)) { \
244                 opts->field = value; \
245         } \
246 
247 	SET_FIELD(num_clusters, 0);
248 	SET_FIELD(thin_provision, false);
249 	SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT);
250 
251 	if (FIELD_OK(xattrs)) {
252 		blob_xattrs_init(&opts->xattrs);
253 	}
254 
255 	SET_FIELD(use_extent_table, true);
256 
257 #undef FIELD_OK
258 #undef SET_FIELD
259 }
260 
261 void
262 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size)
263 {
264 	if (!opts) {
265 		SPDK_ERRLOG("opts should not be NULL\n");
266 		return;
267 	}
268 
269 	if (!opts_size) {
270 		SPDK_ERRLOG("opts_size should not be zero value\n");
271 		return;
272 	}
273 
274 	memset(opts, 0, opts_size);
275 	opts->opts_size = opts_size;
276 
277 #define FIELD_OK(field) \
278         offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size
279 
280 #define SET_FIELD(field, value) \
281         if (FIELD_OK(field)) { \
282                 opts->field = value; \
283         } \
284 
285 	SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT);
286 
287 #undef FIELD_OK
288 #undef SET_FILED
289 }
290 
291 static struct spdk_blob *
292 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
293 {
294 	struct spdk_blob *blob;
295 
296 	blob = calloc(1, sizeof(*blob));
297 	if (!blob) {
298 		return NULL;
299 	}
300 
301 	blob->id = id;
302 	blob->bs = bs;
303 
304 	blob->parent_id = SPDK_BLOBID_INVALID;
305 
306 	blob->state = SPDK_BLOB_STATE_DIRTY;
307 	blob->extent_rle_found = false;
308 	blob->extent_table_found = false;
309 	blob->active.num_pages = 1;
310 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
311 	if (!blob->active.pages) {
312 		free(blob);
313 		return NULL;
314 	}
315 
316 	blob->active.pages[0] = bs_blobid_to_page(id);
317 
318 	TAILQ_INIT(&blob->xattrs);
319 	TAILQ_INIT(&blob->xattrs_internal);
320 	TAILQ_INIT(&blob->pending_persists);
321 	TAILQ_INIT(&blob->persists_to_complete);
322 
323 	return blob;
324 }
325 
326 static void
327 xattrs_free(struct spdk_xattr_tailq *xattrs)
328 {
329 	struct spdk_xattr	*xattr, *xattr_tmp;
330 
331 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
332 		TAILQ_REMOVE(xattrs, xattr, link);
333 		free(xattr->name);
334 		free(xattr->value);
335 		free(xattr);
336 	}
337 }
338 
339 static void
340 blob_free(struct spdk_blob *blob)
341 {
342 	assert(blob != NULL);
343 	assert(TAILQ_EMPTY(&blob->pending_persists));
344 	assert(TAILQ_EMPTY(&blob->persists_to_complete));
345 
346 	free(blob->active.extent_pages);
347 	free(blob->clean.extent_pages);
348 	free(blob->active.clusters);
349 	free(blob->clean.clusters);
350 	free(blob->active.pages);
351 	free(blob->clean.pages);
352 
353 	xattrs_free(&blob->xattrs);
354 	xattrs_free(&blob->xattrs_internal);
355 
356 	if (blob->back_bs_dev) {
357 		blob->back_bs_dev->destroy(blob->back_bs_dev);
358 	}
359 
360 	free(blob);
361 }
362 
363 static void
364 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno)
365 {
366 	struct spdk_bs_dev	*bs_dev = ctx;
367 
368 	if (bserrno != 0) {
369 		/*
370 		 * This is probably due to a memory allocation failure when creating the
371 		 * blob_esnap_destroy_ctx before iterating threads.
372 		 */
373 		SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n",
374 			    blob->id, bserrno);
375 		assert(false);
376 	}
377 
378 	if (bs_dev == NULL) {
379 		/*
380 		 * This check exists to make scanbuild happy.
381 		 *
382 		 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while
383 		 * the blobstore is being loaded. It could also be NULL if there was an error
384 		 * opening the esnap device. In each of these cases, no channels could have been
385 		 * created because back_bs_dev->create_channel() would have led to a NULL pointer
386 		 * deref.
387 		 */
388 		assert(false);
389 		return;
390 	}
391 
392 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id);
393 	bs_dev->destroy(bs_dev);
394 }
395 
396 static void
397 blob_back_bs_destroy(struct spdk_blob *blob)
398 {
399 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n",
400 		      blob->id);
401 
402 	blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done,
403 					   blob->back_bs_dev);
404 	blob->back_bs_dev = NULL;
405 }
406 
407 struct freeze_io_ctx {
408 	struct spdk_bs_cpl cpl;
409 	struct spdk_blob *blob;
410 };
411 
412 static void
413 blob_io_sync(struct spdk_io_channel_iter *i)
414 {
415 	spdk_for_each_channel_continue(i, 0);
416 }
417 
418 static void
419 blob_execute_queued_io(struct spdk_io_channel_iter *i)
420 {
421 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
422 	struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch);
423 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
424 	struct spdk_bs_request_set	*set;
425 	struct spdk_bs_user_op_args	*args;
426 	spdk_bs_user_op_t *op, *tmp;
427 
428 	TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) {
429 		set = (struct spdk_bs_request_set *)op;
430 		args = &set->u.user_op;
431 
432 		if (args->blob == ctx->blob) {
433 			TAILQ_REMOVE(&ch->queued_io, op, link);
434 			bs_user_op_execute(op);
435 		}
436 	}
437 
438 	spdk_for_each_channel_continue(i, 0);
439 }
440 
441 static void
442 blob_io_cpl(struct spdk_io_channel_iter *i, int status)
443 {
444 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
445 
446 	ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0);
447 
448 	free(ctx);
449 }
450 
451 static void
452 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
453 {
454 	struct freeze_io_ctx *ctx;
455 
456 	blob_verify_md_op(blob);
457 
458 	ctx = calloc(1, sizeof(*ctx));
459 	if (!ctx) {
460 		cb_fn(cb_arg, -ENOMEM);
461 		return;
462 	}
463 
464 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
465 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
466 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
467 	ctx->blob = blob;
468 
469 	/* Freeze I/O on blob */
470 	blob->frozen_refcnt++;
471 
472 	spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl);
473 }
474 
475 static void
476 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
477 {
478 	struct freeze_io_ctx *ctx;
479 
480 	blob_verify_md_op(blob);
481 
482 	ctx = calloc(1, sizeof(*ctx));
483 	if (!ctx) {
484 		cb_fn(cb_arg, -ENOMEM);
485 		return;
486 	}
487 
488 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
489 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
490 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
491 	ctx->blob = blob;
492 
493 	assert(blob->frozen_refcnt > 0);
494 
495 	blob->frozen_refcnt--;
496 
497 	spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl);
498 }
499 
500 static int
501 blob_mark_clean(struct spdk_blob *blob)
502 {
503 	uint32_t *extent_pages = NULL;
504 	uint64_t *clusters = NULL;
505 	uint32_t *pages = NULL;
506 
507 	assert(blob != NULL);
508 
509 	if (blob->active.num_extent_pages) {
510 		assert(blob->active.extent_pages);
511 		extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages));
512 		if (!extent_pages) {
513 			return -ENOMEM;
514 		}
515 		memcpy(extent_pages, blob->active.extent_pages,
516 		       blob->active.num_extent_pages * sizeof(*extent_pages));
517 	}
518 
519 	if (blob->active.num_clusters) {
520 		assert(blob->active.clusters);
521 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
522 		if (!clusters) {
523 			free(extent_pages);
524 			return -ENOMEM;
525 		}
526 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
527 	}
528 
529 	if (blob->active.num_pages) {
530 		assert(blob->active.pages);
531 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
532 		if (!pages) {
533 			free(extent_pages);
534 			free(clusters);
535 			return -ENOMEM;
536 		}
537 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
538 	}
539 
540 	free(blob->clean.extent_pages);
541 	free(blob->clean.clusters);
542 	free(blob->clean.pages);
543 
544 	blob->clean.num_extent_pages = blob->active.num_extent_pages;
545 	blob->clean.extent_pages = blob->active.extent_pages;
546 	blob->clean.num_clusters = blob->active.num_clusters;
547 	blob->clean.clusters = blob->active.clusters;
548 	blob->clean.num_pages = blob->active.num_pages;
549 	blob->clean.pages = blob->active.pages;
550 
551 	blob->active.extent_pages = extent_pages;
552 	blob->active.clusters = clusters;
553 	blob->active.pages = pages;
554 
555 	/* If the metadata was dirtied again while the metadata was being written to disk,
556 	 *  we do not want to revert the DIRTY state back to CLEAN here.
557 	 */
558 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
559 		blob->state = SPDK_BLOB_STATE_CLEAN;
560 	}
561 
562 	return 0;
563 }
564 
565 static int
566 blob_deserialize_xattr(struct spdk_blob *blob,
567 		       struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
568 {
569 	struct spdk_xattr                       *xattr;
570 
571 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
572 	    sizeof(desc_xattr->value_length) +
573 	    desc_xattr->name_length + desc_xattr->value_length) {
574 		return -EINVAL;
575 	}
576 
577 	xattr = calloc(1, sizeof(*xattr));
578 	if (xattr == NULL) {
579 		return -ENOMEM;
580 	}
581 
582 	xattr->name = malloc(desc_xattr->name_length + 1);
583 	if (xattr->name == NULL) {
584 		free(xattr);
585 		return -ENOMEM;
586 	}
587 
588 	xattr->value = malloc(desc_xattr->value_length);
589 	if (xattr->value == NULL) {
590 		free(xattr->name);
591 		free(xattr);
592 		return -ENOMEM;
593 	}
594 
595 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
596 	xattr->name[desc_xattr->name_length] = '\0';
597 	xattr->value_len = desc_xattr->value_length;
598 	memcpy(xattr->value,
599 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
600 	       desc_xattr->value_length);
601 
602 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
603 
604 	return 0;
605 }
606 
607 
608 static int
609 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
610 {
611 	struct spdk_blob_md_descriptor *desc;
612 	size_t	cur_desc = 0;
613 	void *tmp;
614 
615 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
616 	while (cur_desc < sizeof(page->descriptors)) {
617 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
618 			if (desc->length == 0) {
619 				/* If padding and length are 0, this terminates the page */
620 				break;
621 			}
622 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
623 			struct spdk_blob_md_descriptor_flags	*desc_flags;
624 
625 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
626 
627 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
628 				return -EINVAL;
629 			}
630 
631 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
632 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
633 				return -EINVAL;
634 			}
635 
636 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
637 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
638 				blob->data_ro = true;
639 				blob->md_ro = true;
640 			}
641 
642 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
643 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
644 				blob->md_ro = true;
645 			}
646 
647 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
648 				blob->data_ro = true;
649 				blob->md_ro = true;
650 			}
651 
652 			blob->invalid_flags = desc_flags->invalid_flags;
653 			blob->data_ro_flags = desc_flags->data_ro_flags;
654 			blob->md_ro_flags = desc_flags->md_ro_flags;
655 
656 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
657 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
658 			unsigned int				i, j;
659 			unsigned int				cluster_count = blob->active.num_clusters;
660 
661 			if (blob->extent_table_found) {
662 				/* Extent Table already present in the md,
663 				 * both descriptors should never be at the same time. */
664 				return -EINVAL;
665 			}
666 			blob->extent_rle_found = true;
667 
668 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
669 
670 			if (desc_extent_rle->length == 0 ||
671 			    (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) {
672 				return -EINVAL;
673 			}
674 
675 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
676 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
677 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
678 						if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters,
679 										desc_extent_rle->extents[i].cluster_idx + j)) {
680 							return -EINVAL;
681 						}
682 					}
683 					cluster_count++;
684 				}
685 			}
686 
687 			if (cluster_count == 0) {
688 				return -EINVAL;
689 			}
690 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
691 			if (tmp == NULL) {
692 				return -ENOMEM;
693 			}
694 			blob->active.clusters = tmp;
695 			blob->active.cluster_array_size = cluster_count;
696 
697 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
698 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
699 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
700 						blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
701 								desc_extent_rle->extents[i].cluster_idx + j);
702 					} else if (spdk_blob_is_thin_provisioned(blob)) {
703 						blob->active.clusters[blob->active.num_clusters++] = 0;
704 					} else {
705 						return -EINVAL;
706 					}
707 				}
708 			}
709 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
710 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
711 			uint32_t num_extent_pages = blob->active.num_extent_pages;
712 			uint32_t i, j;
713 			size_t extent_pages_length;
714 
715 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
716 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
717 
718 			if (blob->extent_rle_found) {
719 				/* This means that Extent RLE is present in MD,
720 				 * both should never be at the same time. */
721 				return -EINVAL;
722 			} else if (blob->extent_table_found &&
723 				   desc_extent_table->num_clusters != blob->remaining_clusters_in_et) {
724 				/* Number of clusters in this ET does not match number
725 				 * from previously read EXTENT_TABLE. */
726 				return -EINVAL;
727 			}
728 
729 			if (desc_extent_table->length == 0 ||
730 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
731 				return -EINVAL;
732 			}
733 
734 			blob->extent_table_found = true;
735 
736 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
737 				num_extent_pages += desc_extent_table->extent_page[i].num_pages;
738 			}
739 
740 			if (num_extent_pages > 0) {
741 				tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t));
742 				if (tmp == NULL) {
743 					return -ENOMEM;
744 				}
745 				blob->active.extent_pages = tmp;
746 			}
747 			blob->active.extent_pages_array_size = num_extent_pages;
748 
749 			blob->remaining_clusters_in_et = desc_extent_table->num_clusters;
750 
751 			/* Extent table entries contain md page numbers for extent pages.
752 			 * Zeroes represent unallocated extent pages, those are run-length-encoded.
753 			 */
754 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
755 				if (desc_extent_table->extent_page[i].page_idx != 0) {
756 					assert(desc_extent_table->extent_page[i].num_pages == 1);
757 					blob->active.extent_pages[blob->active.num_extent_pages++] =
758 						desc_extent_table->extent_page[i].page_idx;
759 				} else if (spdk_blob_is_thin_provisioned(blob)) {
760 					for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) {
761 						blob->active.extent_pages[blob->active.num_extent_pages++] = 0;
762 					}
763 				} else {
764 					return -EINVAL;
765 				}
766 			}
767 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
768 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
769 			unsigned int					i;
770 			unsigned int					cluster_count = 0;
771 			size_t						cluster_idx_length;
772 
773 			if (blob->extent_rle_found) {
774 				/* This means that Extent RLE is present in MD,
775 				 * both should never be at the same time. */
776 				return -EINVAL;
777 			}
778 
779 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
780 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
781 
782 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
783 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
784 				return -EINVAL;
785 			}
786 
787 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
788 				if (desc_extent->cluster_idx[i] != 0) {
789 					if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) {
790 						return -EINVAL;
791 					}
792 				}
793 				cluster_count++;
794 			}
795 
796 			if (cluster_count == 0) {
797 				return -EINVAL;
798 			}
799 
800 			/* When reading extent pages sequentially starting cluster idx should match
801 			 * current size of a blob.
802 			 * If changed to batch reading, this check shall be removed. */
803 			if (desc_extent->start_cluster_idx != blob->active.num_clusters) {
804 				return -EINVAL;
805 			}
806 
807 			tmp = realloc(blob->active.clusters,
808 				      (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters));
809 			if (tmp == NULL) {
810 				return -ENOMEM;
811 			}
812 			blob->active.clusters = tmp;
813 			blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters);
814 
815 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
816 				if (desc_extent->cluster_idx[i] != 0) {
817 					blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
818 							desc_extent->cluster_idx[i]);
819 				} else if (spdk_blob_is_thin_provisioned(blob)) {
820 					blob->active.clusters[blob->active.num_clusters++] = 0;
821 				} else {
822 					return -EINVAL;
823 				}
824 			}
825 			assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters);
826 			assert(blob->remaining_clusters_in_et >= cluster_count);
827 			blob->remaining_clusters_in_et -= cluster_count;
828 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
829 			int rc;
830 
831 			rc = blob_deserialize_xattr(blob,
832 						    (struct spdk_blob_md_descriptor_xattr *) desc, false);
833 			if (rc != 0) {
834 				return rc;
835 			}
836 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
837 			int rc;
838 
839 			rc = blob_deserialize_xattr(blob,
840 						    (struct spdk_blob_md_descriptor_xattr *) desc, true);
841 			if (rc != 0) {
842 				return rc;
843 			}
844 		} else {
845 			/* Unrecognized descriptor type.  Do not fail - just continue to the
846 			 *  next descriptor.  If this descriptor is associated with some feature
847 			 *  defined in a newer version of blobstore, that version of blobstore
848 			 *  should create and set an associated feature flag to specify if this
849 			 *  blob can be loaded or not.
850 			 */
851 		}
852 
853 		/* Advance to the next descriptor */
854 		cur_desc += sizeof(*desc) + desc->length;
855 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
856 			break;
857 		}
858 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
859 	}
860 
861 	return 0;
862 }
863 
864 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page);
865 
866 static int
867 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob)
868 {
869 	assert(blob != NULL);
870 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
871 
872 	if (bs_load_cur_extent_page_valid(extent_page) == false) {
873 		return -ENOENT;
874 	}
875 
876 	return blob_parse_page(extent_page, blob);
877 }
878 
879 static int
880 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
881 	   struct spdk_blob *blob)
882 {
883 	const struct spdk_blob_md_page *page;
884 	uint32_t i;
885 	int rc;
886 	void *tmp;
887 
888 	assert(page_count > 0);
889 	assert(pages[0].sequence_num == 0);
890 	assert(blob != NULL);
891 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
892 	assert(blob->active.clusters == NULL);
893 
894 	/* The blobid provided doesn't match what's in the MD, this can
895 	 * happen for example if a bogus blobid is passed in through open.
896 	 */
897 	if (blob->id != pages[0].id) {
898 		SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata "
899 			    "(0x%" PRIx64 ")\n", blob->id, pages[0].id);
900 		return -ENOENT;
901 	}
902 
903 	tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages));
904 	if (!tmp) {
905 		return -ENOMEM;
906 	}
907 	blob->active.pages = tmp;
908 
909 	blob->active.pages[0] = pages[0].id;
910 
911 	for (i = 1; i < page_count; i++) {
912 		assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next));
913 		blob->active.pages[i] = pages[i - 1].next;
914 	}
915 	blob->active.num_pages = page_count;
916 
917 	for (i = 0; i < page_count; i++) {
918 		page = &pages[i];
919 
920 		assert(page->id == blob->id);
921 		assert(page->sequence_num == i);
922 
923 		rc = blob_parse_page(page, blob);
924 		if (rc != 0) {
925 			return rc;
926 		}
927 	}
928 
929 	return 0;
930 }
931 
932 static int
933 blob_serialize_add_page(const struct spdk_blob *blob,
934 			struct spdk_blob_md_page **pages,
935 			uint32_t *page_count,
936 			struct spdk_blob_md_page **last_page)
937 {
938 	struct spdk_blob_md_page *page, *tmp_pages;
939 
940 	assert(pages != NULL);
941 	assert(page_count != NULL);
942 
943 	*last_page = NULL;
944 	if (*page_count == 0) {
945 		assert(*pages == NULL);
946 		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0,
947 				     NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
948 		if (*pages == NULL) {
949 			return -ENOMEM;
950 		}
951 		*page_count = 1;
952 	} else {
953 		assert(*pages != NULL);
954 		tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0);
955 		if (tmp_pages == NULL) {
956 			return -ENOMEM;
957 		}
958 		(*page_count)++;
959 		*pages = tmp_pages;
960 	}
961 
962 	page = &(*pages)[*page_count - 1];
963 	memset(page, 0, sizeof(*page));
964 	page->id = blob->id;
965 	page->sequence_num = *page_count - 1;
966 	page->next = SPDK_INVALID_MD_PAGE;
967 	*last_page = page;
968 
969 	return 0;
970 }
971 
972 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
973  * Update required_sz on both success and failure.
974  *
975  */
976 static int
977 blob_serialize_xattr(const struct spdk_xattr *xattr,
978 		     uint8_t *buf, size_t buf_sz,
979 		     size_t *required_sz, bool internal)
980 {
981 	struct spdk_blob_md_descriptor_xattr	*desc;
982 
983 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
984 		       strlen(xattr->name) +
985 		       xattr->value_len;
986 
987 	if (buf_sz < *required_sz) {
988 		return -1;
989 	}
990 
991 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
992 
993 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
994 	desc->length = sizeof(desc->name_length) +
995 		       sizeof(desc->value_length) +
996 		       strlen(xattr->name) +
997 		       xattr->value_len;
998 	desc->name_length = strlen(xattr->name);
999 	desc->value_length = xattr->value_len;
1000 
1001 	memcpy(desc->name, xattr->name, desc->name_length);
1002 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
1003 	       xattr->value,
1004 	       desc->value_length);
1005 
1006 	return 0;
1007 }
1008 
1009 static void
1010 blob_serialize_extent_table_entry(const struct spdk_blob *blob,
1011 				  uint64_t start_ep, uint64_t *next_ep,
1012 				  uint8_t **buf, size_t *remaining_sz)
1013 {
1014 	struct spdk_blob_md_descriptor_extent_table *desc;
1015 	size_t cur_sz;
1016 	uint64_t i, et_idx;
1017 	uint32_t extent_page, ep_len;
1018 
1019 	/* The buffer must have room for at least num_clusters entry */
1020 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters);
1021 	if (*remaining_sz < cur_sz) {
1022 		*next_ep = start_ep;
1023 		return;
1024 	}
1025 
1026 	desc = (struct spdk_blob_md_descriptor_extent_table *)*buf;
1027 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE;
1028 
1029 	desc->num_clusters = blob->active.num_clusters;
1030 
1031 	ep_len = 1;
1032 	et_idx = 0;
1033 	for (i = start_ep; i < blob->active.num_extent_pages; i++) {
1034 		if (*remaining_sz < cur_sz  + sizeof(desc->extent_page[0])) {
1035 			/* If we ran out of buffer space, return */
1036 			break;
1037 		}
1038 
1039 		extent_page = blob->active.extent_pages[i];
1040 		/* Verify that next extent_page is unallocated */
1041 		if (extent_page == 0 &&
1042 		    (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) {
1043 			ep_len++;
1044 			continue;
1045 		}
1046 		desc->extent_page[et_idx].page_idx = extent_page;
1047 		desc->extent_page[et_idx].num_pages = ep_len;
1048 		et_idx++;
1049 
1050 		ep_len = 1;
1051 		cur_sz += sizeof(desc->extent_page[et_idx]);
1052 	}
1053 	*next_ep = i;
1054 
1055 	desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx;
1056 	*remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length;
1057 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc->length;
1058 }
1059 
1060 static int
1061 blob_serialize_extent_table(const struct spdk_blob *blob,
1062 			    struct spdk_blob_md_page **pages,
1063 			    struct spdk_blob_md_page *cur_page,
1064 			    uint32_t *page_count, uint8_t **buf,
1065 			    size_t *remaining_sz)
1066 {
1067 	uint64_t				last_extent_page;
1068 	int					rc;
1069 
1070 	last_extent_page = 0;
1071 	/* At least single extent table entry has to be always persisted.
1072 	 * Such case occurs with num_extent_pages == 0. */
1073 	while (last_extent_page <= blob->active.num_extent_pages) {
1074 		blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf,
1075 						  remaining_sz);
1076 
1077 		if (last_extent_page == blob->active.num_extent_pages) {
1078 			break;
1079 		}
1080 
1081 		rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1082 		if (rc < 0) {
1083 			return rc;
1084 		}
1085 
1086 		*buf = (uint8_t *)cur_page->descriptors;
1087 		*remaining_sz = sizeof(cur_page->descriptors);
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 static void
1094 blob_serialize_extent_rle(const struct spdk_blob *blob,
1095 			  uint64_t start_cluster, uint64_t *next_cluster,
1096 			  uint8_t **buf, size_t *buf_sz)
1097 {
1098 	struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle;
1099 	size_t cur_sz;
1100 	uint64_t i, extent_idx;
1101 	uint64_t lba, lba_per_cluster, lba_count;
1102 
1103 	/* The buffer must have room for at least one extent */
1104 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]);
1105 	if (*buf_sz < cur_sz) {
1106 		*next_cluster = start_cluster;
1107 		return;
1108 	}
1109 
1110 	desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf;
1111 	desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE;
1112 
1113 	lba_per_cluster = bs_cluster_to_lba(blob->bs, 1);
1114 	/* Assert for scan-build false positive */
1115 	assert(lba_per_cluster > 0);
1116 
1117 	lba = blob->active.clusters[start_cluster];
1118 	lba_count = lba_per_cluster;
1119 	extent_idx = 0;
1120 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
1121 		if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) {
1122 			/* Run-length encode sequential non-zero LBA */
1123 			lba_count += lba_per_cluster;
1124 			continue;
1125 		} else if (lba == 0 && blob->active.clusters[i] == 0) {
1126 			/* Run-length encode unallocated clusters */
1127 			lba_count += lba_per_cluster;
1128 			continue;
1129 		}
1130 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1131 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1132 		extent_idx++;
1133 
1134 		cur_sz += sizeof(desc_extent_rle->extents[extent_idx]);
1135 
1136 		if (*buf_sz < cur_sz) {
1137 			/* If we ran out of buffer space, return */
1138 			*next_cluster = i;
1139 			break;
1140 		}
1141 
1142 		lba = blob->active.clusters[i];
1143 		lba_count = lba_per_cluster;
1144 	}
1145 
1146 	if (*buf_sz >= cur_sz) {
1147 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1148 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1149 		extent_idx++;
1150 
1151 		*next_cluster = blob->active.num_clusters;
1152 	}
1153 
1154 	desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx;
1155 	*buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1156 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1157 }
1158 
1159 static int
1160 blob_serialize_extents_rle(const struct spdk_blob *blob,
1161 			   struct spdk_blob_md_page **pages,
1162 			   struct spdk_blob_md_page *cur_page,
1163 			   uint32_t *page_count, uint8_t **buf,
1164 			   size_t *remaining_sz)
1165 {
1166 	uint64_t				last_cluster;
1167 	int					rc;
1168 
1169 	last_cluster = 0;
1170 	while (last_cluster < blob->active.num_clusters) {
1171 		blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz);
1172 
1173 		if (last_cluster == blob->active.num_clusters) {
1174 			break;
1175 		}
1176 
1177 		rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1178 		if (rc < 0) {
1179 			return rc;
1180 		}
1181 
1182 		*buf = (uint8_t *)cur_page->descriptors;
1183 		*remaining_sz = sizeof(cur_page->descriptors);
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 static void
1190 blob_serialize_extent_page(const struct spdk_blob *blob,
1191 			   uint64_t cluster, struct spdk_blob_md_page *page)
1192 {
1193 	struct spdk_blob_md_descriptor_extent_page *desc_extent;
1194 	uint64_t i, extent_idx;
1195 	uint64_t lba, lba_per_cluster;
1196 	uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
1197 
1198 	desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors;
1199 	desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE;
1200 
1201 	lba_per_cluster = bs_cluster_to_lba(blob->bs, 1);
1202 
1203 	desc_extent->start_cluster_idx = start_cluster_idx;
1204 	extent_idx = 0;
1205 	for (i = start_cluster_idx; i < blob->active.num_clusters; i++) {
1206 		lba = blob->active.clusters[i];
1207 		desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster;
1208 		if (extent_idx >= SPDK_EXTENTS_PER_EP) {
1209 			break;
1210 		}
1211 	}
1212 	desc_extent->length = sizeof(desc_extent->start_cluster_idx) +
1213 			      sizeof(desc_extent->cluster_idx[0]) * extent_idx;
1214 }
1215 
1216 static void
1217 blob_serialize_flags(const struct spdk_blob *blob,
1218 		     uint8_t *buf, size_t *buf_sz)
1219 {
1220 	struct spdk_blob_md_descriptor_flags *desc;
1221 
1222 	/*
1223 	 * Flags get serialized first, so we should always have room for the flags
1224 	 *  descriptor.
1225 	 */
1226 	assert(*buf_sz >= sizeof(*desc));
1227 
1228 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
1229 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
1230 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
1231 	desc->invalid_flags = blob->invalid_flags;
1232 	desc->data_ro_flags = blob->data_ro_flags;
1233 	desc->md_ro_flags = blob->md_ro_flags;
1234 
1235 	*buf_sz -= sizeof(*desc);
1236 }
1237 
1238 static int
1239 blob_serialize_xattrs(const struct spdk_blob *blob,
1240 		      const struct spdk_xattr_tailq *xattrs, bool internal,
1241 		      struct spdk_blob_md_page **pages,
1242 		      struct spdk_blob_md_page *cur_page,
1243 		      uint32_t *page_count, uint8_t **buf,
1244 		      size_t *remaining_sz)
1245 {
1246 	const struct spdk_xattr	*xattr;
1247 	int	rc;
1248 
1249 	TAILQ_FOREACH(xattr, xattrs, link) {
1250 		size_t required_sz = 0;
1251 
1252 		rc = blob_serialize_xattr(xattr,
1253 					  *buf, *remaining_sz,
1254 					  &required_sz, internal);
1255 		if (rc < 0) {
1256 			/* Need to add a new page to the chain */
1257 			rc = blob_serialize_add_page(blob, pages, page_count,
1258 						     &cur_page);
1259 			if (rc < 0) {
1260 				spdk_free(*pages);
1261 				*pages = NULL;
1262 				*page_count = 0;
1263 				return rc;
1264 			}
1265 
1266 			*buf = (uint8_t *)cur_page->descriptors;
1267 			*remaining_sz = sizeof(cur_page->descriptors);
1268 
1269 			/* Try again */
1270 			required_sz = 0;
1271 			rc = blob_serialize_xattr(xattr,
1272 						  *buf, *remaining_sz,
1273 						  &required_sz, internal);
1274 
1275 			if (rc < 0) {
1276 				spdk_free(*pages);
1277 				*pages = NULL;
1278 				*page_count = 0;
1279 				return rc;
1280 			}
1281 		}
1282 
1283 		*remaining_sz -= required_sz;
1284 		*buf += required_sz;
1285 	}
1286 
1287 	return 0;
1288 }
1289 
1290 static int
1291 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
1292 	       uint32_t *page_count)
1293 {
1294 	struct spdk_blob_md_page		*cur_page;
1295 	int					rc;
1296 	uint8_t					*buf;
1297 	size_t					remaining_sz;
1298 
1299 	assert(pages != NULL);
1300 	assert(page_count != NULL);
1301 	assert(blob != NULL);
1302 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
1303 
1304 	*pages = NULL;
1305 	*page_count = 0;
1306 
1307 	/* A blob always has at least 1 page, even if it has no descriptors */
1308 	rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1309 	if (rc < 0) {
1310 		return rc;
1311 	}
1312 
1313 	buf = (uint8_t *)cur_page->descriptors;
1314 	remaining_sz = sizeof(cur_page->descriptors);
1315 
1316 	/* Serialize flags */
1317 	blob_serialize_flags(blob, buf, &remaining_sz);
1318 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
1319 
1320 	/* Serialize xattrs */
1321 	rc = blob_serialize_xattrs(blob, &blob->xattrs, false,
1322 				   pages, cur_page, page_count, &buf, &remaining_sz);
1323 	if (rc < 0) {
1324 		return rc;
1325 	}
1326 
1327 	/* Serialize internal xattrs */
1328 	rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
1329 				   pages, cur_page, page_count, &buf, &remaining_sz);
1330 	if (rc < 0) {
1331 		return rc;
1332 	}
1333 
1334 	if (blob->use_extent_table) {
1335 		/* Serialize extent table */
1336 		rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1337 	} else {
1338 		/* Serialize extents */
1339 		rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1340 	}
1341 
1342 	return rc;
1343 }
1344 
1345 struct spdk_blob_load_ctx {
1346 	struct spdk_blob		*blob;
1347 
1348 	struct spdk_blob_md_page	*pages;
1349 	uint32_t			num_pages;
1350 	uint32_t			next_extent_page;
1351 	spdk_bs_sequence_t	        *seq;
1352 
1353 	spdk_bs_sequence_cpl		cb_fn;
1354 	void				*cb_arg;
1355 };
1356 
1357 static uint32_t
1358 blob_md_page_calc_crc(void *page)
1359 {
1360 	uint32_t		crc;
1361 
1362 	crc = BLOB_CRC32C_INITIAL;
1363 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
1364 	crc ^= BLOB_CRC32C_INITIAL;
1365 
1366 	return crc;
1367 
1368 }
1369 
1370 static void
1371 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno)
1372 {
1373 	struct spdk_blob		*blob = ctx->blob;
1374 
1375 	if (bserrno == 0) {
1376 		blob_mark_clean(blob);
1377 	}
1378 
1379 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
1380 
1381 	/* Free the memory */
1382 	spdk_free(ctx->pages);
1383 	free(ctx);
1384 }
1385 
1386 static void
1387 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
1388 {
1389 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1390 	struct spdk_blob		*blob = ctx->blob;
1391 
1392 	if (bserrno == 0) {
1393 		blob->back_bs_dev = bs_create_blob_bs_dev(snapshot);
1394 		if (blob->back_bs_dev == NULL) {
1395 			bserrno = -ENOMEM;
1396 		}
1397 	}
1398 	if (bserrno != 0) {
1399 		SPDK_ERRLOG("Snapshot fail\n");
1400 	}
1401 
1402 	blob_load_final(ctx, bserrno);
1403 }
1404 
1405 static void blob_update_clear_method(struct spdk_blob *blob);
1406 
1407 static int
1408 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx)
1409 {
1410 	struct spdk_blob_store *bs = blob->bs;
1411 	struct spdk_bs_dev *bs_dev = NULL;
1412 	const void *esnap_id = NULL;
1413 	size_t id_len = 0;
1414 	int rc;
1415 
1416 	if (bs->esnap_bs_dev_create == NULL) {
1417 		SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened "
1418 			       "without support for esnap clones\n", blob->id);
1419 		return -ENOTSUP;
1420 	}
1421 	assert(blob->back_bs_dev == NULL);
1422 
1423 	rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true);
1424 	if (rc != 0) {
1425 		SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id);
1426 		return -EINVAL;
1427 	}
1428 	assert(id_len > 0 && id_len < UINT32_MAX);
1429 
1430 	SPDK_INFOLOG(blob, "Creating external snapshot device\n");
1431 
1432 	rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len,
1433 				     &bs_dev);
1434 	if (rc != 0) {
1435 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev "
1436 			      "with error %d\n", blob->id, rc);
1437 		return rc;
1438 	}
1439 
1440 	/*
1441 	 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot.
1442 	 * This especially might happen during spdk_bs_load() iteration.
1443 	 */
1444 	if (bs_dev != NULL) {
1445 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id);
1446 		if ((bs->io_unit_size % bs_dev->blocklen) != 0) {
1447 			SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u "
1448 				       "is not compatible with blobstore block size %u\n",
1449 				       blob->id, bs_dev->blocklen, bs->io_unit_size);
1450 			bs_dev->destroy(bs_dev);
1451 			return -EINVAL;
1452 		}
1453 	}
1454 
1455 	blob->back_bs_dev = bs_dev;
1456 	blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT;
1457 
1458 	return 0;
1459 }
1460 
1461 static void
1462 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg)
1463 {
1464 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1465 	struct spdk_blob		*blob = ctx->blob;
1466 	const void			*value;
1467 	size_t				len;
1468 	int				rc;
1469 
1470 	if (blob_is_esnap_clone(blob)) {
1471 		rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx);
1472 		blob_load_final(ctx, rc);
1473 		return;
1474 	}
1475 
1476 	if (spdk_blob_is_thin_provisioned(blob)) {
1477 		rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
1478 		if (rc == 0) {
1479 			if (len != sizeof(spdk_blob_id)) {
1480 				blob_load_final(ctx, -EINVAL);
1481 				return;
1482 			}
1483 			/* open snapshot blob and continue in the callback function */
1484 			blob->parent_id = *(spdk_blob_id *)value;
1485 			spdk_bs_open_blob(blob->bs, blob->parent_id,
1486 					  blob_load_snapshot_cpl, ctx);
1487 			return;
1488 		} else {
1489 			/* add zeroes_dev for thin provisioned blob */
1490 			blob->back_bs_dev = bs_create_zeroes_dev();
1491 		}
1492 	} else {
1493 		/* standard blob */
1494 		blob->back_bs_dev = NULL;
1495 	}
1496 	blob_load_final(ctx, 0);
1497 }
1498 
1499 static void
1500 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1501 {
1502 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1503 	struct spdk_blob		*blob = ctx->blob;
1504 	struct spdk_blob_md_page	*page;
1505 	uint64_t			i;
1506 	uint32_t			crc;
1507 	uint64_t			lba;
1508 	void				*tmp;
1509 	uint64_t			sz;
1510 
1511 	if (bserrno) {
1512 		SPDK_ERRLOG("Extent page read failed: %d\n", bserrno);
1513 		blob_load_final(ctx, bserrno);
1514 		return;
1515 	}
1516 
1517 	if (ctx->pages == NULL) {
1518 		/* First iteration of this function, allocate buffer for single EXTENT_PAGE */
1519 		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
1520 					  NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
1521 		if (!ctx->pages) {
1522 			blob_load_final(ctx, -ENOMEM);
1523 			return;
1524 		}
1525 		ctx->num_pages = 1;
1526 		ctx->next_extent_page = 0;
1527 	} else {
1528 		page = &ctx->pages[0];
1529 		crc = blob_md_page_calc_crc(page);
1530 		if (crc != page->crc) {
1531 			blob_load_final(ctx, -EINVAL);
1532 			return;
1533 		}
1534 
1535 		if (page->next != SPDK_INVALID_MD_PAGE) {
1536 			blob_load_final(ctx, -EINVAL);
1537 			return;
1538 		}
1539 
1540 		bserrno = blob_parse_extent_page(page, blob);
1541 		if (bserrno) {
1542 			blob_load_final(ctx, bserrno);
1543 			return;
1544 		}
1545 	}
1546 
1547 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
1548 		if (blob->active.extent_pages[i] != 0) {
1549 			/* Extent page was allocated, read and parse it. */
1550 			lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]);
1551 			ctx->next_extent_page = i + 1;
1552 
1553 			bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1554 					     bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1555 					     blob_load_cpl_extents_cpl, ctx);
1556 			return;
1557 		} else {
1558 			/* Thin provisioned blobs can point to unallocated extent pages.
1559 			 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */
1560 
1561 			sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP);
1562 			blob->active.num_clusters += sz;
1563 			blob->remaining_clusters_in_et -= sz;
1564 
1565 			assert(spdk_blob_is_thin_provisioned(blob));
1566 			assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0);
1567 
1568 			tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
1569 			if (tmp == NULL) {
1570 				blob_load_final(ctx, -ENOMEM);
1571 				return;
1572 			}
1573 			memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0,
1574 			       sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size));
1575 			blob->active.clusters = tmp;
1576 			blob->active.cluster_array_size = blob->active.num_clusters;
1577 		}
1578 	}
1579 
1580 	blob_load_backing_dev(seq, ctx);
1581 }
1582 
1583 static void
1584 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1585 {
1586 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1587 	struct spdk_blob		*blob = ctx->blob;
1588 	struct spdk_blob_md_page	*page;
1589 	int				rc;
1590 	uint32_t			crc;
1591 	uint32_t			current_page;
1592 
1593 	if (ctx->num_pages == 1) {
1594 		current_page = bs_blobid_to_page(blob->id);
1595 	} else {
1596 		assert(ctx->num_pages != 0);
1597 		page = &ctx->pages[ctx->num_pages - 2];
1598 		current_page = page->next;
1599 	}
1600 
1601 	if (bserrno) {
1602 		SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n",
1603 			    current_page, blob->id, bserrno);
1604 		blob_load_final(ctx, bserrno);
1605 		return;
1606 	}
1607 
1608 	page = &ctx->pages[ctx->num_pages - 1];
1609 	crc = blob_md_page_calc_crc(page);
1610 	if (crc != page->crc) {
1611 		SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n",
1612 			    current_page, blob->id);
1613 		blob_load_final(ctx, -EINVAL);
1614 		return;
1615 	}
1616 
1617 	if (page->next != SPDK_INVALID_MD_PAGE) {
1618 		struct spdk_blob_md_page *tmp_pages;
1619 		uint32_t next_page = page->next;
1620 		uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page);
1621 
1622 		/* Read the next page */
1623 		tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0);
1624 		if (tmp_pages == NULL) {
1625 			blob_load_final(ctx, -ENOMEM);
1626 			return;
1627 		}
1628 		ctx->num_pages++;
1629 		ctx->pages = tmp_pages;
1630 
1631 		bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
1632 				     next_lba,
1633 				     bs_byte_to_lba(blob->bs, sizeof(*page)),
1634 				     blob_load_cpl, ctx);
1635 		return;
1636 	}
1637 
1638 	/* Parse the pages */
1639 	rc = blob_parse(ctx->pages, ctx->num_pages, blob);
1640 	if (rc) {
1641 		blob_load_final(ctx, rc);
1642 		return;
1643 	}
1644 
1645 	if (blob->extent_table_found == true) {
1646 		/* If EXTENT_TABLE was found, that means support for it should be enabled. */
1647 		assert(blob->extent_rle_found == false);
1648 		blob->use_extent_table = true;
1649 	} else {
1650 		/* If EXTENT_RLE or no extent_* descriptor was found disable support
1651 		 * for extent table. No extent_* descriptors means that blob has length of 0
1652 		 * and no extent_rle descriptors were persisted for it.
1653 		 * EXTENT_TABLE if used, is always present in metadata regardless of length. */
1654 		blob->use_extent_table = false;
1655 	}
1656 
1657 	/* Check the clear_method stored in metadata vs what may have been passed
1658 	 * via spdk_bs_open_blob_ext() and update accordingly.
1659 	 */
1660 	blob_update_clear_method(blob);
1661 
1662 	spdk_free(ctx->pages);
1663 	ctx->pages = NULL;
1664 
1665 	if (blob->extent_table_found) {
1666 		blob_load_cpl_extents_cpl(seq, ctx, 0);
1667 	} else {
1668 		blob_load_backing_dev(seq, ctx);
1669 	}
1670 }
1671 
1672 /* Load a blob from disk given a blobid */
1673 static void
1674 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1675 	  spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1676 {
1677 	struct spdk_blob_load_ctx *ctx;
1678 	struct spdk_blob_store *bs;
1679 	uint32_t page_num;
1680 	uint64_t lba;
1681 
1682 	blob_verify_md_op(blob);
1683 
1684 	bs = blob->bs;
1685 
1686 	ctx = calloc(1, sizeof(*ctx));
1687 	if (!ctx) {
1688 		cb_fn(seq, cb_arg, -ENOMEM);
1689 		return;
1690 	}
1691 
1692 	ctx->blob = blob;
1693 	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0);
1694 	if (!ctx->pages) {
1695 		free(ctx);
1696 		cb_fn(seq, cb_arg, -ENOMEM);
1697 		return;
1698 	}
1699 	ctx->num_pages = 1;
1700 	ctx->cb_fn = cb_fn;
1701 	ctx->cb_arg = cb_arg;
1702 	ctx->seq = seq;
1703 
1704 	page_num = bs_blobid_to_page(blob->id);
1705 	lba = bs_md_page_to_lba(blob->bs, page_num);
1706 
1707 	blob->state = SPDK_BLOB_STATE_LOADING;
1708 
1709 	bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1710 			     bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
1711 			     blob_load_cpl, ctx);
1712 }
1713 
1714 struct spdk_blob_persist_ctx {
1715 	struct spdk_blob		*blob;
1716 
1717 	struct spdk_blob_md_page	*pages;
1718 	uint32_t			next_extent_page;
1719 	struct spdk_blob_md_page	*extent_page;
1720 
1721 	spdk_bs_sequence_t		*seq;
1722 	spdk_bs_sequence_cpl		cb_fn;
1723 	void				*cb_arg;
1724 	TAILQ_ENTRY(spdk_blob_persist_ctx) link;
1725 };
1726 
1727 static void
1728 bs_batch_clear_dev(struct spdk_blob *blob, spdk_bs_batch_t *batch, uint64_t lba,
1729 		   uint64_t lba_count)
1730 {
1731 	switch (blob->clear_method) {
1732 	case BLOB_CLEAR_WITH_DEFAULT:
1733 	case BLOB_CLEAR_WITH_UNMAP:
1734 		bs_batch_unmap_dev(batch, lba, lba_count);
1735 		break;
1736 	case BLOB_CLEAR_WITH_WRITE_ZEROES:
1737 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
1738 		break;
1739 	case BLOB_CLEAR_WITH_NONE:
1740 	default:
1741 		break;
1742 	}
1743 }
1744 
1745 static int
1746 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs)
1747 {
1748 	uint32_t	crc;
1749 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
1750 
1751 	if (super->version > SPDK_BS_VERSION ||
1752 	    super->version < SPDK_BS_INITIAL_VERSION) {
1753 		return -EILSEQ;
1754 	}
1755 
1756 	if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG,
1757 		   sizeof(super->signature)) != 0) {
1758 		return -EILSEQ;
1759 	}
1760 
1761 	crc = blob_md_page_calc_crc(super);
1762 	if (crc != super->crc) {
1763 		return -EILSEQ;
1764 	}
1765 
1766 	if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
1767 		SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n");
1768 	} else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
1769 		SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n");
1770 	} else {
1771 		SPDK_DEBUGLOG(blob, "Unexpected bstype\n");
1772 		SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
1773 		SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
1774 		return -ENXIO;
1775 	}
1776 
1777 	if (super->size > bs->dev->blockcnt * bs->dev->blocklen) {
1778 		SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n",
1779 			       bs->dev->blockcnt * bs->dev->blocklen, super->size);
1780 		return -EILSEQ;
1781 	}
1782 
1783 	return 0;
1784 }
1785 
1786 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
1787 			  spdk_bs_sequence_cpl cb_fn, void *cb_arg);
1788 
1789 static void
1790 blob_persist_complete_cb(void *arg)
1791 {
1792 	struct spdk_blob_persist_ctx *ctx = arg;
1793 
1794 	/* Call user callback */
1795 	ctx->cb_fn(ctx->seq, ctx->cb_arg, 0);
1796 
1797 	/* Free the memory */
1798 	spdk_free(ctx->pages);
1799 	free(ctx);
1800 }
1801 
1802 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
1803 
1804 static void
1805 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno)
1806 {
1807 	struct spdk_blob_persist_ctx	*next_persist, *tmp;
1808 	struct spdk_blob		*blob = ctx->blob;
1809 
1810 	if (bserrno == 0) {
1811 		blob_mark_clean(blob);
1812 	}
1813 
1814 	assert(ctx == TAILQ_FIRST(&blob->persists_to_complete));
1815 
1816 	/* Complete all persists that were pending when the current persist started */
1817 	TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) {
1818 		TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link);
1819 		spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist);
1820 	}
1821 
1822 	if (TAILQ_EMPTY(&blob->pending_persists)) {
1823 		return;
1824 	}
1825 
1826 	/* Queue up all pending persists for completion and start blob persist with first one */
1827 	TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link);
1828 	next_persist = TAILQ_FIRST(&blob->persists_to_complete);
1829 
1830 	blob->state = SPDK_BLOB_STATE_DIRTY;
1831 	bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist);
1832 }
1833 
1834 static void
1835 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1836 {
1837 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1838 	struct spdk_blob		*blob = ctx->blob;
1839 	struct spdk_blob_store		*bs = blob->bs;
1840 	size_t				i;
1841 
1842 	if (bserrno != 0) {
1843 		blob_persist_complete(seq, ctx, bserrno);
1844 		return;
1845 	}
1846 
1847 	spdk_spin_lock(&bs->used_lock);
1848 
1849 	/* Release all extent_pages that were truncated */
1850 	for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) {
1851 		/* Nothing to release if it was not allocated */
1852 		if (blob->active.extent_pages[i] != 0) {
1853 			bs_release_md_page(bs, blob->active.extent_pages[i]);
1854 		}
1855 	}
1856 
1857 	spdk_spin_unlock(&bs->used_lock);
1858 
1859 	if (blob->active.num_extent_pages == 0) {
1860 		free(blob->active.extent_pages);
1861 		blob->active.extent_pages = NULL;
1862 		blob->active.extent_pages_array_size = 0;
1863 	} else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) {
1864 #ifndef __clang_analyzer__
1865 		void *tmp;
1866 
1867 		/* scan-build really can't figure reallocs, workaround it */
1868 		tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages);
1869 		assert(tmp != NULL);
1870 		blob->active.extent_pages = tmp;
1871 #endif
1872 		blob->active.extent_pages_array_size = blob->active.num_extent_pages;
1873 	}
1874 
1875 	blob_persist_complete(seq, ctx, bserrno);
1876 }
1877 
1878 static void
1879 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
1880 {
1881 	struct spdk_blob		*blob = ctx->blob;
1882 	struct spdk_blob_store		*bs = blob->bs;
1883 	size_t				i;
1884 	uint64_t                        lba;
1885 	uint64_t                        lba_count;
1886 	spdk_bs_batch_t                 *batch;
1887 
1888 	batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx);
1889 	lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1890 
1891 	/* Clear all extent_pages that were truncated */
1892 	for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) {
1893 		/* Nothing to clear if it was not allocated */
1894 		if (blob->active.extent_pages[i] != 0) {
1895 			lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]);
1896 			bs_batch_write_zeroes_dev(batch, lba, lba_count);
1897 		}
1898 	}
1899 
1900 	bs_batch_close(batch);
1901 }
1902 
1903 static void
1904 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1905 {
1906 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1907 	struct spdk_blob		*blob = ctx->blob;
1908 	struct spdk_blob_store		*bs = blob->bs;
1909 	size_t				i;
1910 
1911 	if (bserrno != 0) {
1912 		blob_persist_complete(seq, ctx, bserrno);
1913 		return;
1914 	}
1915 
1916 	spdk_spin_lock(&bs->used_lock);
1917 	/* Release all clusters that were truncated */
1918 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1919 		uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]);
1920 
1921 		/* Nothing to release if it was not allocated */
1922 		if (blob->active.clusters[i] != 0) {
1923 			bs_release_cluster(bs, cluster_num);
1924 		}
1925 	}
1926 	spdk_spin_unlock(&bs->used_lock);
1927 
1928 	if (blob->active.num_clusters == 0) {
1929 		free(blob->active.clusters);
1930 		blob->active.clusters = NULL;
1931 		blob->active.cluster_array_size = 0;
1932 	} else if (blob->active.num_clusters != blob->active.cluster_array_size) {
1933 #ifndef __clang_analyzer__
1934 		void *tmp;
1935 
1936 		/* scan-build really can't figure reallocs, workaround it */
1937 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters);
1938 		assert(tmp != NULL);
1939 		blob->active.clusters = tmp;
1940 
1941 #endif
1942 		blob->active.cluster_array_size = blob->active.num_clusters;
1943 	}
1944 
1945 	/* Move on to clearing extent pages */
1946 	blob_persist_clear_extents(seq, ctx);
1947 }
1948 
1949 static void
1950 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
1951 {
1952 	struct spdk_blob		*blob = ctx->blob;
1953 	struct spdk_blob_store		*bs = blob->bs;
1954 	spdk_bs_batch_t			*batch;
1955 	size_t				i;
1956 	uint64_t			lba;
1957 	uint64_t			lba_count;
1958 
1959 	/* Clusters don't move around in blobs. The list shrinks or grows
1960 	 * at the end, but no changes ever occur in the middle of the list.
1961 	 */
1962 
1963 	batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx);
1964 
1965 	/* Clear all clusters that were truncated */
1966 	lba = 0;
1967 	lba_count = 0;
1968 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1969 		uint64_t next_lba = blob->active.clusters[i];
1970 		uint64_t next_lba_count = bs_cluster_to_lba(bs, 1);
1971 
1972 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1973 			/* This cluster is contiguous with the previous one. */
1974 			lba_count += next_lba_count;
1975 			continue;
1976 		} else if (next_lba == 0) {
1977 			continue;
1978 		}
1979 
1980 		/* This cluster is not contiguous with the previous one. */
1981 
1982 		/* If a run of LBAs previously existing, clear them now */
1983 		if (lba_count > 0) {
1984 			bs_batch_clear_dev(ctx->blob, batch, lba, lba_count);
1985 		}
1986 
1987 		/* Start building the next batch */
1988 		lba = next_lba;
1989 		if (next_lba > 0) {
1990 			lba_count = next_lba_count;
1991 		} else {
1992 			lba_count = 0;
1993 		}
1994 	}
1995 
1996 	/* If we ended with a contiguous set of LBAs, clear them now */
1997 	if (lba_count > 0) {
1998 		bs_batch_clear_dev(ctx->blob, batch, lba, lba_count);
1999 	}
2000 
2001 	bs_batch_close(batch);
2002 }
2003 
2004 static void
2005 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2006 {
2007 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2008 	struct spdk_blob		*blob = ctx->blob;
2009 	struct spdk_blob_store		*bs = blob->bs;
2010 	size_t				i;
2011 
2012 	if (bserrno != 0) {
2013 		blob_persist_complete(seq, ctx, bserrno);
2014 		return;
2015 	}
2016 
2017 	spdk_spin_lock(&bs->used_lock);
2018 
2019 	/* This loop starts at 1 because the first page is special and handled
2020 	 * below. The pages (except the first) are never written in place,
2021 	 * so any pages in the clean list must be zeroed.
2022 	 */
2023 	for (i = 1; i < blob->clean.num_pages; i++) {
2024 		bs_release_md_page(bs, blob->clean.pages[i]);
2025 	}
2026 
2027 	if (blob->active.num_pages == 0) {
2028 		uint32_t page_num;
2029 
2030 		page_num = bs_blobid_to_page(blob->id);
2031 		bs_release_md_page(bs, page_num);
2032 	}
2033 
2034 	spdk_spin_unlock(&bs->used_lock);
2035 
2036 	/* Move on to clearing clusters */
2037 	blob_persist_clear_clusters(seq, ctx);
2038 }
2039 
2040 static void
2041 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2042 {
2043 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2044 	struct spdk_blob		*blob = ctx->blob;
2045 	struct spdk_blob_store		*bs = blob->bs;
2046 	uint64_t			lba;
2047 	uint64_t			lba_count;
2048 	spdk_bs_batch_t			*batch;
2049 	size_t				i;
2050 
2051 	if (bserrno != 0) {
2052 		blob_persist_complete(seq, ctx, bserrno);
2053 		return;
2054 	}
2055 
2056 	batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx);
2057 
2058 	lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
2059 
2060 	/* This loop starts at 1 because the first page is special and handled
2061 	 * below. The pages (except the first) are never written in place,
2062 	 * so any pages in the clean list must be zeroed.
2063 	 */
2064 	for (i = 1; i < blob->clean.num_pages; i++) {
2065 		lba = bs_md_page_to_lba(bs, blob->clean.pages[i]);
2066 
2067 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
2068 	}
2069 
2070 	/* The first page will only be zeroed if this is a delete. */
2071 	if (blob->active.num_pages == 0) {
2072 		uint32_t page_num;
2073 
2074 		/* The first page in the metadata goes where the blobid indicates */
2075 		page_num = bs_blobid_to_page(blob->id);
2076 		lba = bs_md_page_to_lba(bs, page_num);
2077 
2078 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
2079 	}
2080 
2081 	bs_batch_close(batch);
2082 }
2083 
2084 static void
2085 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2086 {
2087 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2088 	struct spdk_blob		*blob = ctx->blob;
2089 	struct spdk_blob_store		*bs = blob->bs;
2090 	uint64_t			lba;
2091 	uint32_t			lba_count;
2092 	struct spdk_blob_md_page	*page;
2093 
2094 	if (bserrno != 0) {
2095 		blob_persist_complete(seq, ctx, bserrno);
2096 		return;
2097 	}
2098 
2099 	if (blob->active.num_pages == 0) {
2100 		/* Move on to the next step */
2101 		blob_persist_zero_pages(seq, ctx, 0);
2102 		return;
2103 	}
2104 
2105 	lba_count = bs_byte_to_lba(bs, sizeof(*page));
2106 
2107 	page = &ctx->pages[0];
2108 	/* The first page in the metadata goes where the blobid indicates */
2109 	lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id));
2110 
2111 	bs_sequence_write_dev(seq, page, lba, lba_count,
2112 			      blob_persist_zero_pages, ctx);
2113 }
2114 
2115 static void
2116 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
2117 {
2118 	struct spdk_blob		*blob = ctx->blob;
2119 	struct spdk_blob_store		*bs = blob->bs;
2120 	uint64_t			lba;
2121 	uint32_t			lba_count;
2122 	struct spdk_blob_md_page	*page;
2123 	spdk_bs_batch_t			*batch;
2124 	size_t				i;
2125 
2126 	/* Clusters don't move around in blobs. The list shrinks or grows
2127 	 * at the end, but no changes ever occur in the middle of the list.
2128 	 */
2129 
2130 	lba_count = bs_byte_to_lba(bs, sizeof(*page));
2131 
2132 	batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx);
2133 
2134 	/* This starts at 1. The root page is not written until
2135 	 * all of the others are finished
2136 	 */
2137 	for (i = 1; i < blob->active.num_pages; i++) {
2138 		page = &ctx->pages[i];
2139 		assert(page->sequence_num == i);
2140 
2141 		lba = bs_md_page_to_lba(bs, blob->active.pages[i]);
2142 
2143 		bs_batch_write_dev(batch, page, lba, lba_count);
2144 	}
2145 
2146 	bs_batch_close(batch);
2147 }
2148 
2149 static int
2150 blob_resize(struct spdk_blob *blob, uint64_t sz)
2151 {
2152 	uint64_t	i;
2153 	uint64_t	*tmp;
2154 	uint64_t	cluster;
2155 	uint32_t	lfmd; /*  lowest free md page */
2156 	uint64_t	num_clusters;
2157 	uint32_t	*ep_tmp;
2158 	uint64_t	new_num_ep = 0, current_num_ep = 0;
2159 	struct spdk_blob_store *bs;
2160 	int		rc;
2161 
2162 	bs = blob->bs;
2163 
2164 	blob_verify_md_op(blob);
2165 
2166 	if (blob->active.num_clusters == sz) {
2167 		return 0;
2168 	}
2169 
2170 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
2171 		/* If this blob was resized to be larger, then smaller, then
2172 		 * larger without syncing, then the cluster array already
2173 		 * contains spare assigned clusters we can use.
2174 		 */
2175 		num_clusters = spdk_min(blob->active.cluster_array_size,
2176 					sz);
2177 	} else {
2178 		num_clusters = blob->active.num_clusters;
2179 	}
2180 
2181 	if (blob->use_extent_table) {
2182 		/* Round up since every cluster beyond current Extent Table size,
2183 		 * requires new extent page. */
2184 		new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP);
2185 		current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP);
2186 	}
2187 
2188 	assert(!spdk_spin_held(&bs->used_lock));
2189 
2190 	/* Check first that we have enough clusters and md pages before we start claiming them.
2191 	 * bs->used_lock is held to ensure that clusters we think are free are still free when we go
2192 	 * to claim them later in this function.
2193 	 */
2194 	if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) {
2195 		spdk_spin_lock(&bs->used_lock);
2196 		if ((sz - num_clusters) > bs->num_free_clusters) {
2197 			rc = -ENOSPC;
2198 			goto out;
2199 		}
2200 		lfmd = 0;
2201 		for (i = current_num_ep; i < new_num_ep ; i++) {
2202 			lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd);
2203 			if (lfmd == UINT32_MAX) {
2204 				/* No more free md pages. Cannot satisfy the request */
2205 				rc = -ENOSPC;
2206 				goto out;
2207 			}
2208 		}
2209 	}
2210 
2211 	if (sz > num_clusters) {
2212 		/* Expand the cluster array if necessary.
2213 		 * We only shrink the array when persisting.
2214 		 */
2215 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz);
2216 		if (sz > 0 && tmp == NULL) {
2217 			rc = -ENOMEM;
2218 			goto out;
2219 		}
2220 		memset(tmp + blob->active.cluster_array_size, 0,
2221 		       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
2222 		blob->active.clusters = tmp;
2223 		blob->active.cluster_array_size = sz;
2224 
2225 		/* Expand the extents table, only if enough clusters were added */
2226 		if (new_num_ep > current_num_ep && blob->use_extent_table) {
2227 			ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep);
2228 			if (new_num_ep > 0 && ep_tmp == NULL) {
2229 				rc = -ENOMEM;
2230 				goto out;
2231 			}
2232 			memset(ep_tmp + blob->active.extent_pages_array_size, 0,
2233 			       sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size));
2234 			blob->active.extent_pages = ep_tmp;
2235 			blob->active.extent_pages_array_size = new_num_ep;
2236 		}
2237 	}
2238 
2239 	blob->state = SPDK_BLOB_STATE_DIRTY;
2240 
2241 	if (spdk_blob_is_thin_provisioned(blob) == false) {
2242 		cluster = 0;
2243 		lfmd = 0;
2244 		for (i = num_clusters; i < sz; i++) {
2245 			bs_allocate_cluster(blob, i, &cluster, &lfmd, true);
2246 			/* Do not increment lfmd here.  lfmd will get updated
2247 			 * to the md_page allocated (if any) when a new extent
2248 			 * page is needed.  Just pass that value again,
2249 			 * bs_allocate_cluster will just start at that index
2250 			 * to find the next free md_page when needed.
2251 			 */
2252 		}
2253 	}
2254 
2255 	blob->active.num_clusters = sz;
2256 	blob->active.num_extent_pages = new_num_ep;
2257 
2258 	rc = 0;
2259 out:
2260 	if (spdk_spin_held(&bs->used_lock)) {
2261 		spdk_spin_unlock(&bs->used_lock);
2262 	}
2263 
2264 	return rc;
2265 }
2266 
2267 static void
2268 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx)
2269 {
2270 	spdk_bs_sequence_t *seq = ctx->seq;
2271 	struct spdk_blob *blob = ctx->blob;
2272 	struct spdk_blob_store *bs = blob->bs;
2273 	uint64_t i;
2274 	uint32_t page_num;
2275 	void *tmp;
2276 	int rc;
2277 
2278 	/* Generate the new metadata */
2279 	rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
2280 	if (rc < 0) {
2281 		blob_persist_complete(seq, ctx, rc);
2282 		return;
2283 	}
2284 
2285 	assert(blob->active.num_pages >= 1);
2286 
2287 	/* Resize the cache of page indices */
2288 	tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
2289 	if (!tmp) {
2290 		blob_persist_complete(seq, ctx, -ENOMEM);
2291 		return;
2292 	}
2293 	blob->active.pages = tmp;
2294 
2295 	/* Assign this metadata to pages. This requires two passes - one to verify that there are
2296 	 * enough pages and a second to actually claim them. The used_lock is held across
2297 	 * both passes to ensure things don't change in the middle.
2298 	 */
2299 	spdk_spin_lock(&bs->used_lock);
2300 	page_num = 0;
2301 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
2302 	for (i = 1; i < blob->active.num_pages; i++) {
2303 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
2304 		if (page_num == UINT32_MAX) {
2305 			spdk_spin_unlock(&bs->used_lock);
2306 			blob_persist_complete(seq, ctx, -ENOMEM);
2307 			return;
2308 		}
2309 		page_num++;
2310 	}
2311 
2312 	page_num = 0;
2313 	blob->active.pages[0] = bs_blobid_to_page(blob->id);
2314 	for (i = 1; i < blob->active.num_pages; i++) {
2315 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
2316 		ctx->pages[i - 1].next = page_num;
2317 		/* Now that previous metadata page is complete, calculate the crc for it. */
2318 		ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]);
2319 		blob->active.pages[i] = page_num;
2320 		bs_claim_md_page(bs, page_num);
2321 		SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num,
2322 			      blob->id);
2323 		page_num++;
2324 	}
2325 	spdk_spin_unlock(&bs->used_lock);
2326 	ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]);
2327 	/* Start writing the metadata from last page to first */
2328 	blob->state = SPDK_BLOB_STATE_CLEAN;
2329 	blob_persist_write_page_chain(seq, ctx);
2330 }
2331 
2332 static void
2333 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2334 {
2335 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2336 	struct spdk_blob		*blob = ctx->blob;
2337 	size_t				i;
2338 	uint32_t			extent_page_id;
2339 	uint32_t                        page_count = 0;
2340 	int				rc;
2341 
2342 	if (ctx->extent_page != NULL) {
2343 		spdk_free(ctx->extent_page);
2344 		ctx->extent_page = NULL;
2345 	}
2346 
2347 	if (bserrno != 0) {
2348 		blob_persist_complete(seq, ctx, bserrno);
2349 		return;
2350 	}
2351 
2352 	/* Only write out Extent Pages when blob was resized. */
2353 	for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) {
2354 		extent_page_id = blob->active.extent_pages[i];
2355 		if (extent_page_id == 0) {
2356 			/* No Extent Page to persist */
2357 			assert(spdk_blob_is_thin_provisioned(blob));
2358 			continue;
2359 		}
2360 		assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id));
2361 		ctx->next_extent_page = i + 1;
2362 		rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page);
2363 		if (rc < 0) {
2364 			blob_persist_complete(seq, ctx, rc);
2365 			return;
2366 		}
2367 
2368 		blob->state = SPDK_BLOB_STATE_DIRTY;
2369 		blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page);
2370 
2371 		ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page);
2372 
2373 		bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id),
2374 				      bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
2375 				      blob_persist_write_extent_pages, ctx);
2376 		return;
2377 	}
2378 
2379 	blob_persist_generate_new_md(ctx);
2380 }
2381 
2382 static void
2383 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2384 {
2385 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2386 	struct spdk_blob *blob = ctx->blob;
2387 
2388 	if (bserrno != 0) {
2389 		blob_persist_complete(seq, ctx, bserrno);
2390 		return;
2391 	}
2392 
2393 	if (blob->active.num_pages == 0) {
2394 		/* This is the signal that the blob should be deleted.
2395 		 * Immediately jump to the clean up routine. */
2396 		assert(blob->clean.num_pages > 0);
2397 		blob->state = SPDK_BLOB_STATE_CLEAN;
2398 		blob_persist_zero_pages(seq, ctx, 0);
2399 		return;
2400 
2401 	}
2402 
2403 	if (blob->clean.num_clusters < blob->active.num_clusters) {
2404 		/* Blob was resized up */
2405 		assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages);
2406 		ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1;
2407 	} else if (blob->active.num_clusters < blob->active.cluster_array_size) {
2408 		/* Blob was resized down */
2409 		assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages);
2410 		ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1;
2411 	} else {
2412 		/* No change in size occurred */
2413 		blob_persist_generate_new_md(ctx);
2414 		return;
2415 	}
2416 
2417 	blob_persist_write_extent_pages(seq, ctx, 0);
2418 }
2419 
2420 struct spdk_bs_mark_dirty {
2421 	struct spdk_blob_store		*bs;
2422 	struct spdk_bs_super_block	*super;
2423 	spdk_bs_sequence_cpl		cb_fn;
2424 	void				*cb_arg;
2425 };
2426 
2427 static void
2428 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2429 {
2430 	struct spdk_bs_mark_dirty *ctx = cb_arg;
2431 
2432 	if (bserrno == 0) {
2433 		ctx->bs->clean = 0;
2434 	}
2435 
2436 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
2437 
2438 	spdk_free(ctx->super);
2439 	free(ctx);
2440 }
2441 
2442 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2443 			   struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg);
2444 
2445 
2446 static void
2447 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2448 {
2449 	struct spdk_bs_mark_dirty *ctx = cb_arg;
2450 	int rc;
2451 
2452 	if (bserrno != 0) {
2453 		bs_mark_dirty_write_cpl(seq, ctx, bserrno);
2454 		return;
2455 	}
2456 
2457 	rc = bs_super_validate(ctx->super, ctx->bs);
2458 	if (rc != 0) {
2459 		bs_mark_dirty_write_cpl(seq, ctx, rc);
2460 		return;
2461 	}
2462 
2463 	ctx->super->clean = 0;
2464 	if (ctx->super->size == 0) {
2465 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
2466 	}
2467 
2468 	bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx);
2469 }
2470 
2471 static void
2472 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2473 	      spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2474 {
2475 	struct spdk_bs_mark_dirty *ctx;
2476 
2477 	/* Blobstore is already marked dirty */
2478 	if (bs->clean == 0) {
2479 		cb_fn(seq, cb_arg, 0);
2480 		return;
2481 	}
2482 
2483 	ctx = calloc(1, sizeof(*ctx));
2484 	if (!ctx) {
2485 		cb_fn(seq, cb_arg, -ENOMEM);
2486 		return;
2487 	}
2488 	ctx->bs = bs;
2489 	ctx->cb_fn = cb_fn;
2490 	ctx->cb_arg = cb_arg;
2491 
2492 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
2493 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2494 	if (!ctx->super) {
2495 		free(ctx);
2496 		cb_fn(seq, cb_arg, -ENOMEM);
2497 		return;
2498 	}
2499 
2500 	bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0),
2501 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
2502 			     bs_mark_dirty_write, ctx);
2503 }
2504 
2505 /* Write a blob to disk */
2506 static void
2507 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
2508 	     spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2509 {
2510 	struct spdk_blob_persist_ctx *ctx;
2511 
2512 	blob_verify_md_op(blob);
2513 
2514 	if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) {
2515 		cb_fn(seq, cb_arg, 0);
2516 		return;
2517 	}
2518 
2519 	ctx = calloc(1, sizeof(*ctx));
2520 	if (!ctx) {
2521 		cb_fn(seq, cb_arg, -ENOMEM);
2522 		return;
2523 	}
2524 	ctx->blob = blob;
2525 	ctx->seq = seq;
2526 	ctx->cb_fn = cb_fn;
2527 	ctx->cb_arg = cb_arg;
2528 
2529 	/* Multiple blob persists can affect one another, via blob->state or
2530 	 * blob mutable data changes. To prevent it, queue up the persists. */
2531 	if (!TAILQ_EMPTY(&blob->persists_to_complete)) {
2532 		TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link);
2533 		return;
2534 	}
2535 	TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link);
2536 
2537 	bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx);
2538 }
2539 
2540 struct spdk_blob_copy_cluster_ctx {
2541 	struct spdk_blob *blob;
2542 	uint8_t *buf;
2543 	uint64_t page;
2544 	uint64_t new_cluster;
2545 	uint32_t new_extent_page;
2546 	spdk_bs_sequence_t *seq;
2547 	struct spdk_blob_md_page *new_cluster_page;
2548 };
2549 
2550 struct spdk_blob_free_cluster_ctx {
2551 	struct spdk_blob *blob;
2552 	uint64_t page;
2553 	struct spdk_blob_md_page *md_page;
2554 	uint64_t cluster_num;
2555 	uint32_t extent_page;
2556 	spdk_bs_sequence_t *seq;
2557 };
2558 
2559 static void
2560 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
2561 {
2562 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2563 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
2564 	TAILQ_HEAD(, spdk_bs_request_set) requests;
2565 	spdk_bs_user_op_t *op;
2566 
2567 	TAILQ_INIT(&requests);
2568 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
2569 
2570 	while (!TAILQ_EMPTY(&requests)) {
2571 		op = TAILQ_FIRST(&requests);
2572 		TAILQ_REMOVE(&requests, op, link);
2573 		if (bserrno == 0) {
2574 			bs_user_op_execute(op);
2575 		} else {
2576 			bs_user_op_abort(op, bserrno);
2577 		}
2578 	}
2579 
2580 	spdk_free(ctx->buf);
2581 	free(ctx);
2582 }
2583 
2584 static void
2585 blob_free_cluster_cpl(void *cb_arg, int bserrno)
2586 {
2587 	struct spdk_blob_free_cluster_ctx *ctx = cb_arg;
2588 	spdk_bs_sequence_t *seq = ctx->seq;
2589 
2590 	bs_sequence_finish(seq, bserrno);
2591 
2592 	free(ctx);
2593 }
2594 
2595 static void
2596 blob_insert_cluster_revert(struct spdk_blob_copy_cluster_ctx *ctx)
2597 {
2598 	spdk_spin_lock(&ctx->blob->bs->used_lock);
2599 	bs_release_cluster(ctx->blob->bs, ctx->new_cluster);
2600 	if (ctx->new_extent_page != 0) {
2601 		bs_release_md_page(ctx->blob->bs, ctx->new_extent_page);
2602 	}
2603 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
2604 }
2605 
2606 static void
2607 blob_insert_cluster_clear_cpl(void *cb_arg, int bserrno)
2608 {
2609 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2610 
2611 	if (bserrno) {
2612 		SPDK_WARNLOG("Failed to clear cluster: %d\n", bserrno);
2613 	}
2614 
2615 	blob_insert_cluster_revert(ctx);
2616 	bs_sequence_finish(ctx->seq, bserrno);
2617 }
2618 
2619 static void
2620 blob_insert_cluster_clear(struct spdk_blob_copy_cluster_ctx *ctx)
2621 {
2622 	struct spdk_bs_cpl cpl;
2623 	spdk_bs_batch_t *batch;
2624 	struct spdk_io_channel *ch = spdk_io_channel_from_ctx(ctx->seq->channel);
2625 
2626 	/*
2627 	 * We allocated a cluster and we copied data to it. But now, we realized that we don't need
2628 	 * this cluster and we want to release it. We must ensure that we clear the data on this
2629 	 * cluster.
2630 	 * The cluster may later be re-allocated by a thick-provisioned blob for example. When
2631 	 * reading from this thick-provisioned blob before writing data, we should read zeroes.
2632 	 */
2633 
2634 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2635 	cpl.u.blob_basic.cb_fn = blob_insert_cluster_clear_cpl;
2636 	cpl.u.blob_basic.cb_arg = ctx;
2637 
2638 	batch = bs_batch_open(ch, &cpl, ctx->blob);
2639 	if (!batch) {
2640 		blob_insert_cluster_clear_cpl(ctx, -ENOMEM);
2641 		return;
2642 	}
2643 
2644 	bs_batch_clear_dev(ctx->blob, batch, bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2645 			   bs_cluster_to_lba(ctx->blob->bs, 1));
2646 	bs_batch_close(batch);
2647 }
2648 
2649 static void
2650 blob_insert_cluster_cpl(void *cb_arg, int bserrno)
2651 {
2652 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2653 
2654 	if (bserrno) {
2655 		if (bserrno == -EEXIST) {
2656 			/* The metadata insert failed because another thread
2657 			 * allocated the cluster first. Clear and free our cluster
2658 			 * but continue without error. */
2659 			blob_insert_cluster_clear(ctx);
2660 			return;
2661 		}
2662 
2663 		blob_insert_cluster_revert(ctx);
2664 	}
2665 
2666 	bs_sequence_finish(ctx->seq, bserrno);
2667 }
2668 
2669 static void
2670 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2671 {
2672 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2673 	uint32_t cluster_number;
2674 
2675 	if (bserrno) {
2676 		/* The write failed, so jump to the final completion handler */
2677 		bs_sequence_finish(seq, bserrno);
2678 		return;
2679 	}
2680 
2681 	cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page);
2682 
2683 	blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2684 					 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx);
2685 }
2686 
2687 static void
2688 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2689 {
2690 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2691 
2692 	if (bserrno != 0) {
2693 		/* The read failed, so jump to the final completion handler */
2694 		bs_sequence_finish(seq, bserrno);
2695 		return;
2696 	}
2697 
2698 	/* Write whole cluster */
2699 	bs_sequence_write_dev(seq, ctx->buf,
2700 			      bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2701 			      bs_cluster_to_lba(ctx->blob->bs, 1),
2702 			      blob_write_copy_cpl, ctx);
2703 }
2704 
2705 static bool
2706 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba)
2707 {
2708 	uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page);
2709 
2710 	return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) &&
2711 	       blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba);
2712 }
2713 
2714 static void
2715 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba)
2716 {
2717 	struct spdk_blob *blob = ctx->blob;
2718 	uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz);
2719 
2720 	bs_sequence_copy_dev(ctx->seq,
2721 			     bs_cluster_to_lba(blob->bs, ctx->new_cluster),
2722 			     src_lba,
2723 			     lba_count,
2724 			     blob_write_copy_cpl, ctx);
2725 }
2726 
2727 static void
2728 bs_allocate_and_copy_cluster(struct spdk_blob *blob,
2729 			     struct spdk_io_channel *_ch,
2730 			     uint64_t io_unit, spdk_bs_user_op_t *op)
2731 {
2732 	struct spdk_bs_cpl cpl;
2733 	struct spdk_bs_channel *ch;
2734 	struct spdk_blob_copy_cluster_ctx *ctx;
2735 	uint32_t cluster_start_page;
2736 	uint32_t cluster_number;
2737 	bool is_zeroes;
2738 	bool can_copy;
2739 	uint64_t copy_src_lba;
2740 	int rc;
2741 
2742 	ch = spdk_io_channel_get_ctx(_ch);
2743 
2744 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
2745 		/* There are already operations pending. Queue this user op
2746 		 * and return because it will be re-executed when the outstanding
2747 		 * cluster allocation completes. */
2748 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2749 		return;
2750 	}
2751 
2752 	/* Round the io_unit offset down to the first page in the cluster */
2753 	cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit);
2754 
2755 	/* Calculate which index in the metadata cluster array the corresponding
2756 	 * cluster is supposed to be at. */
2757 	cluster_number = bs_io_unit_to_cluster_number(blob, io_unit);
2758 
2759 	ctx = calloc(1, sizeof(*ctx));
2760 	if (!ctx) {
2761 		bs_user_op_abort(op, -ENOMEM);
2762 		return;
2763 	}
2764 
2765 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
2766 
2767 	ctx->blob = blob;
2768 	ctx->page = cluster_start_page;
2769 	ctx->new_cluster_page = ch->new_cluster_page;
2770 	memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE);
2771 	can_copy = blob_can_copy(blob, cluster_start_page, &copy_src_lba);
2772 
2773 	is_zeroes = blob->back_bs_dev->is_zeroes(blob->back_bs_dev,
2774 			bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2775 			bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz));
2776 	if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) {
2777 		ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen,
2778 				       NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2779 		if (!ctx->buf) {
2780 			SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
2781 				    blob->bs->cluster_sz);
2782 			free(ctx);
2783 			bs_user_op_abort(op, -ENOMEM);
2784 			return;
2785 		}
2786 	}
2787 
2788 	spdk_spin_lock(&blob->bs->used_lock);
2789 	rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page,
2790 				 false);
2791 	spdk_spin_unlock(&blob->bs->used_lock);
2792 	if (rc != 0) {
2793 		spdk_free(ctx->buf);
2794 		free(ctx);
2795 		bs_user_op_abort(op, rc);
2796 		return;
2797 	}
2798 
2799 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2800 	cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl;
2801 	cpl.u.blob_basic.cb_arg = ctx;
2802 
2803 	ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob);
2804 	if (!ctx->seq) {
2805 		spdk_spin_lock(&blob->bs->used_lock);
2806 		bs_release_cluster(blob->bs, ctx->new_cluster);
2807 		spdk_spin_unlock(&blob->bs->used_lock);
2808 		spdk_free(ctx->buf);
2809 		free(ctx);
2810 		bs_user_op_abort(op, -ENOMEM);
2811 		return;
2812 	}
2813 
2814 	/* Queue the user op to block other incoming operations */
2815 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2816 
2817 	if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) {
2818 		if (can_copy) {
2819 			blob_copy(ctx, op, copy_src_lba);
2820 		} else {
2821 			/* Read cluster from backing device */
2822 			bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
2823 						bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2824 						bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
2825 						blob_write_copy, ctx);
2826 		}
2827 
2828 	} else {
2829 		blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2830 						 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx);
2831 	}
2832 }
2833 
2834 static inline bool
2835 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length,
2836 				 uint64_t *lba,	uint64_t *lba_count)
2837 {
2838 	*lba_count = length;
2839 
2840 	if (!bs_io_unit_is_allocated(blob, io_unit)) {
2841 		assert(blob->back_bs_dev != NULL);
2842 		*lba = bs_io_unit_to_back_dev_lba(blob, io_unit);
2843 		*lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count);
2844 		return false;
2845 	} else {
2846 		*lba = bs_blob_io_unit_to_lba(blob, io_unit);
2847 		return true;
2848 	}
2849 }
2850 
2851 struct op_split_ctx {
2852 	struct spdk_blob *blob;
2853 	struct spdk_io_channel *channel;
2854 	uint64_t io_unit_offset;
2855 	uint64_t io_units_remaining;
2856 	void *curr_payload;
2857 	enum spdk_blob_op_type op_type;
2858 	spdk_bs_sequence_t *seq;
2859 	bool in_submit_ctx;
2860 	bool completed_in_submit_ctx;
2861 	bool done;
2862 };
2863 
2864 static void
2865 blob_request_submit_op_split_next(void *cb_arg, int bserrno)
2866 {
2867 	struct op_split_ctx	*ctx = cb_arg;
2868 	struct spdk_blob	*blob = ctx->blob;
2869 	struct spdk_io_channel	*ch = ctx->channel;
2870 	enum spdk_blob_op_type	op_type = ctx->op_type;
2871 	uint8_t			*buf;
2872 	uint64_t		offset;
2873 	uint64_t		length;
2874 	uint64_t		op_length;
2875 
2876 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2877 		bs_sequence_finish(ctx->seq, bserrno);
2878 		if (ctx->in_submit_ctx) {
2879 			/* Defer freeing of the ctx object, since it will be
2880 			 * accessed when this unwinds back to the submisison
2881 			 * context.
2882 			 */
2883 			ctx->done = true;
2884 		} else {
2885 			free(ctx);
2886 		}
2887 		return;
2888 	}
2889 
2890 	if (ctx->in_submit_ctx) {
2891 		/* If this split operation completed in the context
2892 		 * of its submission, mark the flag and return immediately
2893 		 * to avoid recursion.
2894 		 */
2895 		ctx->completed_in_submit_ctx = true;
2896 		return;
2897 	}
2898 
2899 	while (true) {
2900 		ctx->completed_in_submit_ctx = false;
2901 
2902 		offset = ctx->io_unit_offset;
2903 		length = ctx->io_units_remaining;
2904 		buf = ctx->curr_payload;
2905 		op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob,
2906 				     offset));
2907 
2908 		/* Update length and payload for next operation */
2909 		ctx->io_units_remaining -= op_length;
2910 		ctx->io_unit_offset += op_length;
2911 		if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
2912 			ctx->curr_payload += op_length * blob->bs->io_unit_size;
2913 		}
2914 
2915 		assert(!ctx->in_submit_ctx);
2916 		ctx->in_submit_ctx = true;
2917 
2918 		switch (op_type) {
2919 		case SPDK_BLOB_READ:
2920 			spdk_blob_io_read(blob, ch, buf, offset, op_length,
2921 					  blob_request_submit_op_split_next, ctx);
2922 			break;
2923 		case SPDK_BLOB_WRITE:
2924 			spdk_blob_io_write(blob, ch, buf, offset, op_length,
2925 					   blob_request_submit_op_split_next, ctx);
2926 			break;
2927 		case SPDK_BLOB_UNMAP:
2928 			spdk_blob_io_unmap(blob, ch, offset, op_length,
2929 					   blob_request_submit_op_split_next, ctx);
2930 			break;
2931 		case SPDK_BLOB_WRITE_ZEROES:
2932 			spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
2933 						  blob_request_submit_op_split_next, ctx);
2934 			break;
2935 		case SPDK_BLOB_READV:
2936 		case SPDK_BLOB_WRITEV:
2937 			SPDK_ERRLOG("readv/write not valid\n");
2938 			bs_sequence_finish(ctx->seq, -EINVAL);
2939 			free(ctx);
2940 			return;
2941 		}
2942 
2943 #ifndef __clang_analyzer__
2944 		/* scan-build reports a false positive around accessing the ctx here. It
2945 		 * forms a path that recursively calls this function, but then says
2946 		 * "assuming ctx->in_submit_ctx is false", when that isn't possible.
2947 		 * This path does free(ctx), returns to here, and reports a use-after-free
2948 		 * bug.  Wrapping this bit of code so that scan-build doesn't see it
2949 		 * works around the scan-build bug.
2950 		 */
2951 		assert(ctx->in_submit_ctx);
2952 		ctx->in_submit_ctx = false;
2953 
2954 		/* If the operation completed immediately, loop back and submit the
2955 		 * next operation.  Otherwise we can return and the next split
2956 		 * operation will get submitted when this current operation is
2957 		 * later completed asynchronously.
2958 		 */
2959 		if (ctx->completed_in_submit_ctx) {
2960 			continue;
2961 		} else if (ctx->done) {
2962 			free(ctx);
2963 		}
2964 #endif
2965 		break;
2966 	}
2967 }
2968 
2969 static void
2970 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
2971 			     void *payload, uint64_t offset, uint64_t length,
2972 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2973 {
2974 	struct op_split_ctx *ctx;
2975 	spdk_bs_sequence_t *seq;
2976 	struct spdk_bs_cpl cpl;
2977 
2978 	assert(blob != NULL);
2979 
2980 	ctx = calloc(1, sizeof(struct op_split_ctx));
2981 	if (ctx == NULL) {
2982 		cb_fn(cb_arg, -ENOMEM);
2983 		return;
2984 	}
2985 
2986 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2987 	cpl.u.blob_basic.cb_fn = cb_fn;
2988 	cpl.u.blob_basic.cb_arg = cb_arg;
2989 
2990 	seq = bs_sequence_start_blob(ch, &cpl, blob);
2991 	if (!seq) {
2992 		free(ctx);
2993 		cb_fn(cb_arg, -ENOMEM);
2994 		return;
2995 	}
2996 
2997 	ctx->blob = blob;
2998 	ctx->channel = ch;
2999 	ctx->curr_payload = payload;
3000 	ctx->io_unit_offset = offset;
3001 	ctx->io_units_remaining = length;
3002 	ctx->op_type = op_type;
3003 	ctx->seq = seq;
3004 
3005 	blob_request_submit_op_split_next(ctx, 0);
3006 }
3007 
3008 static void
3009 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno)
3010 {
3011 	struct spdk_blob_free_cluster_ctx *ctx = cb_arg;
3012 
3013 	if (bserrno) {
3014 		bs_sequence_finish(ctx->seq, bserrno);
3015 		free(ctx);
3016 		return;
3017 	}
3018 
3019 	blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num,
3020 				       ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx);
3021 }
3022 
3023 static void
3024 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
3025 			      void *payload, uint64_t offset, uint64_t length,
3026 			      spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
3027 {
3028 	struct spdk_bs_cpl cpl;
3029 	uint64_t lba;
3030 	uint64_t lba_count;
3031 	bool is_allocated;
3032 
3033 	assert(blob != NULL);
3034 
3035 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3036 	cpl.u.blob_basic.cb_fn = cb_fn;
3037 	cpl.u.blob_basic.cb_arg = cb_arg;
3038 
3039 	if (blob->frozen_refcnt) {
3040 		/* This blob I/O is frozen */
3041 		spdk_bs_user_op_t *op;
3042 		struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
3043 
3044 		op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
3045 		if (!op) {
3046 			cb_fn(cb_arg, -ENOMEM);
3047 			return;
3048 		}
3049 
3050 		TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
3051 
3052 		return;
3053 	}
3054 
3055 	is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
3056 
3057 	switch (op_type) {
3058 	case SPDK_BLOB_READ: {
3059 		spdk_bs_batch_t *batch;
3060 
3061 		batch = bs_batch_open(_ch, &cpl, blob);
3062 		if (!batch) {
3063 			cb_fn(cb_arg, -ENOMEM);
3064 			return;
3065 		}
3066 
3067 		if (is_allocated) {
3068 			/* Read from the blob */
3069 			bs_batch_read_dev(batch, payload, lba, lba_count);
3070 		} else {
3071 			/* Read from the backing block device */
3072 			bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
3073 		}
3074 
3075 		bs_batch_close(batch);
3076 		break;
3077 	}
3078 	case SPDK_BLOB_WRITE:
3079 	case SPDK_BLOB_WRITE_ZEROES: {
3080 		if (is_allocated) {
3081 			/* Write to the blob */
3082 			spdk_bs_batch_t *batch;
3083 
3084 			if (lba_count == 0) {
3085 				cb_fn(cb_arg, 0);
3086 				return;
3087 			}
3088 
3089 			batch = bs_batch_open(_ch, &cpl, blob);
3090 			if (!batch) {
3091 				cb_fn(cb_arg, -ENOMEM);
3092 				return;
3093 			}
3094 
3095 			if (op_type == SPDK_BLOB_WRITE) {
3096 				bs_batch_write_dev(batch, payload, lba, lba_count);
3097 			} else {
3098 				bs_batch_write_zeroes_dev(batch, lba, lba_count);
3099 			}
3100 
3101 			bs_batch_close(batch);
3102 		} else {
3103 			/* Queue this operation and allocate the cluster */
3104 			spdk_bs_user_op_t *op;
3105 
3106 			op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
3107 			if (!op) {
3108 				cb_fn(cb_arg, -ENOMEM);
3109 				return;
3110 			}
3111 
3112 			bs_allocate_and_copy_cluster(blob, _ch, offset, op);
3113 		}
3114 		break;
3115 	}
3116 	case SPDK_BLOB_UNMAP: {
3117 		struct spdk_blob_free_cluster_ctx *ctx = NULL;
3118 		spdk_bs_batch_t *batch;
3119 
3120 		/* if aligned with cluster release cluster */
3121 		if (spdk_blob_is_thin_provisioned(blob) && is_allocated &&
3122 		    bs_io_units_per_cluster(blob) == length) {
3123 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
3124 			uint32_t cluster_start_page;
3125 			uint32_t cluster_number;
3126 
3127 			assert(offset % bs_io_units_per_cluster(blob) == 0);
3128 
3129 			/* Round the io_unit offset down to the first page in the cluster */
3130 			cluster_start_page = bs_io_unit_to_cluster_start(blob, offset);
3131 
3132 			/* Calculate which index in the metadata cluster array the corresponding
3133 			 * cluster is supposed to be at. */
3134 			cluster_number = bs_io_unit_to_cluster_number(blob, offset);
3135 
3136 			ctx = calloc(1, sizeof(*ctx));
3137 			if (!ctx) {
3138 				cb_fn(cb_arg, -ENOMEM);
3139 				return;
3140 			}
3141 			/* When freeing a cluster the flow should be (in order):
3142 			 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak
3143 			 * old data)
3144 			 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the
3145 			 * cluster), update and sync metadata freeing the cluster
3146 			 * 3. Once metadata update is done, complete the user unmap request
3147 			 */
3148 			ctx->blob = blob;
3149 			ctx->page = cluster_start_page;
3150 			ctx->cluster_num = cluster_number;
3151 			ctx->md_page = bs_channel->new_cluster_page;
3152 			ctx->seq = bs_sequence_start_bs(_ch, &cpl);
3153 			if (!ctx->seq) {
3154 				free(ctx);
3155 				cb_fn(cb_arg, -ENOMEM);
3156 				return;
3157 			}
3158 
3159 			if (blob->use_extent_table) {
3160 				ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number);
3161 			}
3162 
3163 			cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete;
3164 			cpl.u.blob_basic.cb_arg = ctx;
3165 		}
3166 
3167 		batch = bs_batch_open(_ch, &cpl, blob);
3168 		if (!batch) {
3169 			free(ctx);
3170 			cb_fn(cb_arg, -ENOMEM);
3171 			return;
3172 		}
3173 
3174 		if (is_allocated) {
3175 			bs_batch_unmap_dev(batch, lba, lba_count);
3176 		}
3177 
3178 		bs_batch_close(batch);
3179 		break;
3180 	}
3181 	case SPDK_BLOB_READV:
3182 	case SPDK_BLOB_WRITEV:
3183 		SPDK_ERRLOG("readv/write not valid\n");
3184 		cb_fn(cb_arg, -EINVAL);
3185 		break;
3186 	}
3187 }
3188 
3189 static void
3190 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
3191 		       void *payload, uint64_t offset, uint64_t length,
3192 		       spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
3193 {
3194 	assert(blob != NULL);
3195 
3196 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
3197 		cb_fn(cb_arg, -EPERM);
3198 		return;
3199 	}
3200 
3201 	if (length == 0) {
3202 		cb_fn(cb_arg, 0);
3203 		return;
3204 	}
3205 
3206 	if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
3207 		cb_fn(cb_arg, -EINVAL);
3208 		return;
3209 	}
3210 	if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) {
3211 		blob_request_submit_op_single(_channel, blob, payload, offset, length,
3212 					      cb_fn, cb_arg, op_type);
3213 	} else {
3214 		blob_request_submit_op_split(_channel, blob, payload, offset, length,
3215 					     cb_fn, cb_arg, op_type);
3216 	}
3217 }
3218 
3219 struct rw_iov_ctx {
3220 	struct spdk_blob *blob;
3221 	struct spdk_io_channel *channel;
3222 	spdk_blob_op_complete cb_fn;
3223 	void *cb_arg;
3224 	bool read;
3225 	int iovcnt;
3226 	struct iovec *orig_iov;
3227 	uint64_t io_unit_offset;
3228 	uint64_t io_units_remaining;
3229 	uint64_t io_units_done;
3230 	struct spdk_blob_ext_io_opts *ext_io_opts;
3231 	struct iovec iov[0];
3232 };
3233 
3234 static void
3235 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3236 {
3237 	assert(cb_arg == NULL);
3238 	bs_sequence_finish(seq, bserrno);
3239 }
3240 
3241 static void
3242 rw_iov_split_next(void *cb_arg, int bserrno)
3243 {
3244 	struct rw_iov_ctx *ctx = cb_arg;
3245 	struct spdk_blob *blob = ctx->blob;
3246 	struct iovec *iov, *orig_iov;
3247 	int iovcnt;
3248 	size_t orig_iovoff;
3249 	uint64_t io_units_count, io_units_to_boundary, io_unit_offset;
3250 	uint64_t byte_count;
3251 
3252 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
3253 		ctx->cb_fn(ctx->cb_arg, bserrno);
3254 		free(ctx);
3255 		return;
3256 	}
3257 
3258 	io_unit_offset = ctx->io_unit_offset;
3259 	io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset);
3260 	io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary);
3261 	/*
3262 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
3263 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
3264 	 *  point to the current position in the I/O sequence.
3265 	 */
3266 	byte_count = ctx->io_units_done * blob->bs->io_unit_size;
3267 	orig_iov = &ctx->orig_iov[0];
3268 	orig_iovoff = 0;
3269 	while (byte_count > 0) {
3270 		if (byte_count >= orig_iov->iov_len) {
3271 			byte_count -= orig_iov->iov_len;
3272 			orig_iov++;
3273 		} else {
3274 			orig_iovoff = byte_count;
3275 			byte_count = 0;
3276 		}
3277 	}
3278 
3279 	/*
3280 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
3281 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
3282 	 */
3283 	byte_count = io_units_count * blob->bs->io_unit_size;
3284 	iov = &ctx->iov[0];
3285 	iovcnt = 0;
3286 	while (byte_count > 0) {
3287 		assert(iovcnt < ctx->iovcnt);
3288 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
3289 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
3290 		byte_count -= iov->iov_len;
3291 		orig_iovoff = 0;
3292 		orig_iov++;
3293 		iov++;
3294 		iovcnt++;
3295 	}
3296 
3297 	ctx->io_unit_offset += io_units_count;
3298 	ctx->io_units_remaining -= io_units_count;
3299 	ctx->io_units_done += io_units_count;
3300 	iov = &ctx->iov[0];
3301 
3302 	if (ctx->read) {
3303 		spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
3304 				       io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
3305 	} else {
3306 		spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
3307 					io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
3308 	}
3309 }
3310 
3311 static void
3312 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
3313 			   struct iovec *iov, int iovcnt,
3314 			   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read,
3315 			   struct spdk_blob_ext_io_opts *ext_io_opts)
3316 {
3317 	struct spdk_bs_cpl	cpl;
3318 
3319 	assert(blob != NULL);
3320 
3321 	if (!read && blob->data_ro) {
3322 		cb_fn(cb_arg, -EPERM);
3323 		return;
3324 	}
3325 
3326 	if (length == 0) {
3327 		cb_fn(cb_arg, 0);
3328 		return;
3329 	}
3330 
3331 	if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
3332 		cb_fn(cb_arg, -EINVAL);
3333 		return;
3334 	}
3335 
3336 	/*
3337 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
3338 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
3339 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
3340 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
3341 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
3342 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
3343 	 *  but since this case happens very infrequently, any performance impact will be negligible.
3344 	 *
3345 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
3346 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
3347 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
3348 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
3349 	 */
3350 	if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) {
3351 		uint64_t lba_count;
3352 		uint64_t lba;
3353 		bool is_allocated;
3354 
3355 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3356 		cpl.u.blob_basic.cb_fn = cb_fn;
3357 		cpl.u.blob_basic.cb_arg = cb_arg;
3358 
3359 		if (blob->frozen_refcnt) {
3360 			/* This blob I/O is frozen */
3361 			enum spdk_blob_op_type op_type;
3362 			spdk_bs_user_op_t *op;
3363 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel);
3364 
3365 			op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV;
3366 			op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length);
3367 			if (!op) {
3368 				cb_fn(cb_arg, -ENOMEM);
3369 				return;
3370 			}
3371 
3372 			TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
3373 
3374 			return;
3375 		}
3376 
3377 		is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
3378 
3379 		if (read) {
3380 			spdk_bs_sequence_t *seq;
3381 
3382 			seq = bs_sequence_start_blob(_channel, &cpl, blob);
3383 			if (!seq) {
3384 				cb_fn(cb_arg, -ENOMEM);
3385 				return;
3386 			}
3387 
3388 			seq->ext_io_opts = ext_io_opts;
3389 
3390 			if (is_allocated) {
3391 				bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
3392 			} else {
3393 				bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
3394 							 rw_iov_done, NULL);
3395 			}
3396 		} else {
3397 			if (is_allocated) {
3398 				spdk_bs_sequence_t *seq;
3399 
3400 				seq = bs_sequence_start_blob(_channel, &cpl, blob);
3401 				if (!seq) {
3402 					cb_fn(cb_arg, -ENOMEM);
3403 					return;
3404 				}
3405 
3406 				seq->ext_io_opts = ext_io_opts;
3407 
3408 				bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
3409 			} else {
3410 				/* Queue this operation and allocate the cluster */
3411 				spdk_bs_user_op_t *op;
3412 
3413 				op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset,
3414 						      length);
3415 				if (!op) {
3416 					cb_fn(cb_arg, -ENOMEM);
3417 					return;
3418 				}
3419 
3420 				op->ext_io_opts = ext_io_opts;
3421 
3422 				bs_allocate_and_copy_cluster(blob, _channel, offset, op);
3423 			}
3424 		}
3425 	} else {
3426 		struct rw_iov_ctx *ctx;
3427 
3428 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
3429 		if (ctx == NULL) {
3430 			cb_fn(cb_arg, -ENOMEM);
3431 			return;
3432 		}
3433 
3434 		ctx->blob = blob;
3435 		ctx->channel = _channel;
3436 		ctx->cb_fn = cb_fn;
3437 		ctx->cb_arg = cb_arg;
3438 		ctx->read = read;
3439 		ctx->orig_iov = iov;
3440 		ctx->iovcnt = iovcnt;
3441 		ctx->io_unit_offset = offset;
3442 		ctx->io_units_remaining = length;
3443 		ctx->io_units_done = 0;
3444 		ctx->ext_io_opts = ext_io_opts;
3445 
3446 		rw_iov_split_next(ctx, 0);
3447 	}
3448 }
3449 
3450 static struct spdk_blob *
3451 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
3452 {
3453 	struct spdk_blob find;
3454 
3455 	if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) {
3456 		return NULL;
3457 	}
3458 
3459 	find.id = blobid;
3460 	return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find);
3461 }
3462 
3463 static void
3464 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob,
3465 				    struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry)
3466 {
3467 	assert(blob != NULL);
3468 	*snapshot_entry = NULL;
3469 	*clone_entry = NULL;
3470 
3471 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
3472 		return;
3473 	}
3474 
3475 	TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) {
3476 		if ((*snapshot_entry)->id == blob->parent_id) {
3477 			break;
3478 		}
3479 	}
3480 
3481 	if (*snapshot_entry != NULL) {
3482 		TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) {
3483 			if ((*clone_entry)->id == blob->id) {
3484 				break;
3485 			}
3486 		}
3487 
3488 		assert(*clone_entry != NULL);
3489 	}
3490 }
3491 
3492 static int
3493 bs_channel_create(void *io_device, void *ctx_buf)
3494 {
3495 	struct spdk_blob_store		*bs = io_device;
3496 	struct spdk_bs_channel		*channel = ctx_buf;
3497 	struct spdk_bs_dev		*dev;
3498 	uint32_t			max_ops = bs->max_channel_ops;
3499 	uint32_t			i;
3500 
3501 	dev = bs->dev;
3502 
3503 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
3504 	if (!channel->req_mem) {
3505 		return -1;
3506 	}
3507 
3508 	TAILQ_INIT(&channel->reqs);
3509 
3510 	for (i = 0; i < max_ops; i++) {
3511 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
3512 	}
3513 
3514 	channel->bs = bs;
3515 	channel->dev = dev;
3516 	channel->dev_channel = dev->create_channel(dev);
3517 
3518 	if (!channel->dev_channel) {
3519 		SPDK_ERRLOG("Failed to create device channel.\n");
3520 		free(channel->req_mem);
3521 		return -1;
3522 	}
3523 
3524 	channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY,
3525 				    SPDK_MALLOC_DMA);
3526 	if (!channel->new_cluster_page) {
3527 		SPDK_ERRLOG("Failed to allocate new cluster page\n");
3528 		free(channel->req_mem);
3529 		channel->dev->destroy_channel(channel->dev, channel->dev_channel);
3530 		return -1;
3531 	}
3532 
3533 	TAILQ_INIT(&channel->need_cluster_alloc);
3534 	TAILQ_INIT(&channel->queued_io);
3535 	RB_INIT(&channel->esnap_channels);
3536 
3537 	return 0;
3538 }
3539 
3540 static void
3541 bs_channel_destroy(void *io_device, void *ctx_buf)
3542 {
3543 	struct spdk_bs_channel *channel = ctx_buf;
3544 	spdk_bs_user_op_t *op;
3545 
3546 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
3547 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
3548 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
3549 		bs_user_op_abort(op, -EIO);
3550 	}
3551 
3552 	while (!TAILQ_EMPTY(&channel->queued_io)) {
3553 		op = TAILQ_FIRST(&channel->queued_io);
3554 		TAILQ_REMOVE(&channel->queued_io, op, link);
3555 		bs_user_op_abort(op, -EIO);
3556 	}
3557 
3558 	blob_esnap_destroy_bs_channel(channel);
3559 
3560 	free(channel->req_mem);
3561 	spdk_free(channel->new_cluster_page);
3562 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
3563 }
3564 
3565 static void
3566 bs_dev_destroy(void *io_device)
3567 {
3568 	struct spdk_blob_store *bs = io_device;
3569 	struct spdk_blob	*blob, *blob_tmp;
3570 
3571 	bs->dev->destroy(bs->dev);
3572 
3573 	RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) {
3574 		RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob);
3575 		spdk_bit_array_clear(bs->open_blobids, blob->id);
3576 		blob_free(blob);
3577 	}
3578 
3579 	spdk_spin_destroy(&bs->used_lock);
3580 
3581 	spdk_bit_array_free(&bs->open_blobids);
3582 	spdk_bit_array_free(&bs->used_blobids);
3583 	spdk_bit_array_free(&bs->used_md_pages);
3584 	spdk_bit_pool_free(&bs->used_clusters);
3585 	/*
3586 	 * If this function is called for any reason except a successful unload,
3587 	 * the unload_cpl type will be NONE and this will be a nop.
3588 	 */
3589 	bs_call_cpl(&bs->unload_cpl, bs->unload_err);
3590 
3591 	free(bs);
3592 }
3593 
3594 static int
3595 bs_blob_list_add(struct spdk_blob *blob)
3596 {
3597 	spdk_blob_id snapshot_id;
3598 	struct spdk_blob_list *snapshot_entry = NULL;
3599 	struct spdk_blob_list *clone_entry = NULL;
3600 
3601 	assert(blob != NULL);
3602 
3603 	snapshot_id = blob->parent_id;
3604 	if (snapshot_id == SPDK_BLOBID_INVALID ||
3605 	    snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
3606 		return 0;
3607 	}
3608 
3609 	snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id);
3610 	if (snapshot_entry == NULL) {
3611 		/* Snapshot not found */
3612 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
3613 		if (snapshot_entry == NULL) {
3614 			return -ENOMEM;
3615 		}
3616 		snapshot_entry->id = snapshot_id;
3617 		TAILQ_INIT(&snapshot_entry->clones);
3618 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
3619 	} else {
3620 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
3621 			if (clone_entry->id == blob->id) {
3622 				break;
3623 			}
3624 		}
3625 	}
3626 
3627 	if (clone_entry == NULL) {
3628 		/* Clone not found */
3629 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
3630 		if (clone_entry == NULL) {
3631 			return -ENOMEM;
3632 		}
3633 		clone_entry->id = blob->id;
3634 		TAILQ_INIT(&clone_entry->clones);
3635 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
3636 		snapshot_entry->clone_count++;
3637 	}
3638 
3639 	return 0;
3640 }
3641 
3642 static void
3643 bs_blob_list_remove(struct spdk_blob *blob)
3644 {
3645 	struct spdk_blob_list *snapshot_entry = NULL;
3646 	struct spdk_blob_list *clone_entry = NULL;
3647 
3648 	blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry);
3649 
3650 	if (snapshot_entry == NULL) {
3651 		return;
3652 	}
3653 
3654 	blob->parent_id = SPDK_BLOBID_INVALID;
3655 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3656 	free(clone_entry);
3657 
3658 	snapshot_entry->clone_count--;
3659 }
3660 
3661 static int
3662 bs_blob_list_free(struct spdk_blob_store *bs)
3663 {
3664 	struct spdk_blob_list *snapshot_entry;
3665 	struct spdk_blob_list *snapshot_entry_tmp;
3666 	struct spdk_blob_list *clone_entry;
3667 	struct spdk_blob_list *clone_entry_tmp;
3668 
3669 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
3670 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
3671 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3672 			free(clone_entry);
3673 		}
3674 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
3675 		free(snapshot_entry);
3676 	}
3677 
3678 	return 0;
3679 }
3680 
3681 static void
3682 bs_free(struct spdk_blob_store *bs)
3683 {
3684 	bs_blob_list_free(bs);
3685 
3686 	bs_unregister_md_thread(bs);
3687 	spdk_io_device_unregister(bs, bs_dev_destroy);
3688 }
3689 
3690 void
3691 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size)
3692 {
3693 
3694 	if (!opts) {
3695 		SPDK_ERRLOG("opts should not be NULL\n");
3696 		return;
3697 	}
3698 
3699 	if (!opts_size) {
3700 		SPDK_ERRLOG("opts_size should not be zero value\n");
3701 		return;
3702 	}
3703 
3704 	memset(opts, 0, opts_size);
3705 	opts->opts_size = opts_size;
3706 
3707 #define FIELD_OK(field) \
3708 	offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size
3709 
3710 #define SET_FIELD(field, value) \
3711 	if (FIELD_OK(field)) { \
3712 		opts->field = value; \
3713 	} \
3714 
3715 	SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ);
3716 	SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES);
3717 	SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES);
3718 	SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS);
3719 	SET_FIELD(clear_method,  BS_CLEAR_WITH_UNMAP);
3720 
3721 	if (FIELD_OK(bstype)) {
3722 		memset(&opts->bstype, 0, sizeof(opts->bstype));
3723 	}
3724 
3725 	SET_FIELD(iter_cb_fn, NULL);
3726 	SET_FIELD(iter_cb_arg, NULL);
3727 	SET_FIELD(force_recover, false);
3728 	SET_FIELD(esnap_bs_dev_create, NULL);
3729 	SET_FIELD(esnap_ctx, NULL);
3730 
3731 #undef FIELD_OK
3732 #undef SET_FIELD
3733 }
3734 
3735 static int
3736 bs_opts_verify(struct spdk_bs_opts *opts)
3737 {
3738 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
3739 	    opts->max_channel_ops == 0) {
3740 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
3741 		return -1;
3742 	}
3743 
3744 	return 0;
3745 }
3746 
3747 /* START spdk_bs_load */
3748 
3749 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */
3750 
3751 struct spdk_bs_load_ctx {
3752 	struct spdk_blob_store		*bs;
3753 	struct spdk_bs_super_block	*super;
3754 
3755 	struct spdk_bs_md_mask		*mask;
3756 	bool				in_page_chain;
3757 	uint32_t			page_index;
3758 	uint32_t			cur_page;
3759 	struct spdk_blob_md_page	*page;
3760 
3761 	uint64_t			num_extent_pages;
3762 	uint32_t			*extent_page_num;
3763 	struct spdk_blob_md_page	*extent_pages;
3764 	struct spdk_bit_array		*used_clusters;
3765 
3766 	spdk_bs_sequence_t			*seq;
3767 	spdk_blob_op_with_handle_complete	iter_cb_fn;
3768 	void					*iter_cb_arg;
3769 	struct spdk_blob			*blob;
3770 	spdk_blob_id				blobid;
3771 
3772 	bool					force_recover;
3773 
3774 	/* These fields are used in the spdk_bs_dump path. */
3775 	bool					dumping;
3776 	FILE					*fp;
3777 	spdk_bs_dump_print_xattr		print_xattr_fn;
3778 	char					xattr_name[4096];
3779 };
3780 
3781 static int
3782 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs,
3783 	 struct spdk_bs_load_ctx **_ctx)
3784 {
3785 	struct spdk_blob_store	*bs;
3786 	struct spdk_bs_load_ctx	*ctx;
3787 	uint64_t dev_size;
3788 	int rc;
3789 
3790 	dev_size = dev->blocklen * dev->blockcnt;
3791 	if (dev_size < opts->cluster_sz) {
3792 		/* Device size cannot be smaller than cluster size of blobstore */
3793 		SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
3794 			     dev_size, opts->cluster_sz);
3795 		return -ENOSPC;
3796 	}
3797 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
3798 		/* Cluster size cannot be smaller than page size */
3799 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
3800 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
3801 		return -EINVAL;
3802 	}
3803 	bs = calloc(1, sizeof(struct spdk_blob_store));
3804 	if (!bs) {
3805 		return -ENOMEM;
3806 	}
3807 
3808 	ctx = calloc(1, sizeof(struct spdk_bs_load_ctx));
3809 	if (!ctx) {
3810 		free(bs);
3811 		return -ENOMEM;
3812 	}
3813 
3814 	ctx->bs = bs;
3815 	ctx->iter_cb_fn = opts->iter_cb_fn;
3816 	ctx->iter_cb_arg = opts->iter_cb_arg;
3817 	ctx->force_recover = opts->force_recover;
3818 
3819 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
3820 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3821 	if (!ctx->super) {
3822 		free(ctx);
3823 		free(bs);
3824 		return -ENOMEM;
3825 	}
3826 
3827 	RB_INIT(&bs->open_blobs);
3828 	TAILQ_INIT(&bs->snapshots);
3829 	bs->dev = dev;
3830 	bs->md_thread = spdk_get_thread();
3831 	assert(bs->md_thread != NULL);
3832 
3833 	/*
3834 	 * Do not use bs_lba_to_cluster() here since blockcnt may not be an
3835 	 *  even multiple of the cluster size.
3836 	 */
3837 	bs->cluster_sz = opts->cluster_sz;
3838 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
3839 	ctx->used_clusters = spdk_bit_array_create(bs->total_clusters);
3840 	if (!ctx->used_clusters) {
3841 		spdk_free(ctx->super);
3842 		free(ctx);
3843 		free(bs);
3844 		return -ENOMEM;
3845 	}
3846 
3847 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
3848 	if (spdk_u32_is_pow2(bs->pages_per_cluster)) {
3849 		bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster);
3850 	}
3851 	bs->num_free_clusters = bs->total_clusters;
3852 	bs->io_unit_size = dev->blocklen;
3853 
3854 	bs->max_channel_ops = opts->max_channel_ops;
3855 	bs->super_blob = SPDK_BLOBID_INVALID;
3856 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
3857 	bs->esnap_bs_dev_create = opts->esnap_bs_dev_create;
3858 	bs->esnap_ctx = opts->esnap_ctx;
3859 
3860 	/* The metadata is assumed to be at least 1 page */
3861 	bs->used_md_pages = spdk_bit_array_create(1);
3862 	bs->used_blobids = spdk_bit_array_create(0);
3863 	bs->open_blobids = spdk_bit_array_create(0);
3864 
3865 	spdk_spin_init(&bs->used_lock);
3866 
3867 	spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy,
3868 				sizeof(struct spdk_bs_channel), "blobstore");
3869 	rc = bs_register_md_thread(bs);
3870 	if (rc == -1) {
3871 		spdk_io_device_unregister(bs, NULL);
3872 		spdk_spin_destroy(&bs->used_lock);
3873 		spdk_bit_array_free(&bs->open_blobids);
3874 		spdk_bit_array_free(&bs->used_blobids);
3875 		spdk_bit_array_free(&bs->used_md_pages);
3876 		spdk_bit_array_free(&ctx->used_clusters);
3877 		spdk_free(ctx->super);
3878 		free(ctx);
3879 		free(bs);
3880 		/* FIXME: this is a lie but don't know how to get a proper error code here */
3881 		return -ENOMEM;
3882 	}
3883 
3884 	*_ctx = ctx;
3885 	*_bs = bs;
3886 	return 0;
3887 }
3888 
3889 static void
3890 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno)
3891 {
3892 	assert(bserrno != 0);
3893 
3894 	spdk_free(ctx->super);
3895 	bs_sequence_finish(ctx->seq, bserrno);
3896 	bs_free(ctx->bs);
3897 	spdk_bit_array_free(&ctx->used_clusters);
3898 	free(ctx);
3899 }
3900 
3901 static void
3902 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
3903 	       struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
3904 {
3905 	/* Update the values in the super block */
3906 	super->super_blob = bs->super_blob;
3907 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
3908 	super->crc = blob_md_page_calc_crc(super);
3909 	bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0),
3910 			      bs_byte_to_lba(bs, sizeof(*super)),
3911 			      cb_fn, cb_arg);
3912 }
3913 
3914 static void
3915 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3916 {
3917 	struct spdk_bs_load_ctx	*ctx = arg;
3918 	uint64_t	mask_size, lba, lba_count;
3919 
3920 	/* Write out the used clusters mask */
3921 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3922 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3923 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3924 	if (!ctx->mask) {
3925 		bs_load_ctx_fail(ctx, -ENOMEM);
3926 		return;
3927 	}
3928 
3929 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
3930 	ctx->mask->length = ctx->bs->total_clusters;
3931 	/* We could get here through the normal unload path, or through dirty
3932 	 * shutdown recovery.  For the normal unload path, we use the mask from
3933 	 * the bit pool.  For dirty shutdown recovery, we don't have a bit pool yet -
3934 	 * only the bit array from the load ctx.
3935 	 */
3936 	if (ctx->bs->used_clusters) {
3937 		assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters));
3938 		spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask);
3939 	} else {
3940 		assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters));
3941 		spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask);
3942 	}
3943 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3944 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3945 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3946 }
3947 
3948 static void
3949 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3950 {
3951 	struct spdk_bs_load_ctx	*ctx = arg;
3952 	uint64_t	mask_size, lba, lba_count;
3953 
3954 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3955 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3956 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3957 	if (!ctx->mask) {
3958 		bs_load_ctx_fail(ctx, -ENOMEM);
3959 		return;
3960 	}
3961 
3962 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
3963 	ctx->mask->length = ctx->super->md_len;
3964 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
3965 
3966 	spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask);
3967 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3968 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3969 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3970 }
3971 
3972 static void
3973 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3974 {
3975 	struct spdk_bs_load_ctx	*ctx = arg;
3976 	uint64_t	mask_size, lba, lba_count;
3977 
3978 	if (ctx->super->used_blobid_mask_len == 0) {
3979 		/*
3980 		 * This is a pre-v3 on-disk format where the blobid mask does not get
3981 		 *  written to disk.
3982 		 */
3983 		cb_fn(seq, arg, 0);
3984 		return;
3985 	}
3986 
3987 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3988 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3989 				 SPDK_MALLOC_DMA);
3990 	if (!ctx->mask) {
3991 		bs_load_ctx_fail(ctx, -ENOMEM);
3992 		return;
3993 	}
3994 
3995 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
3996 	ctx->mask->length = ctx->super->md_len;
3997 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
3998 
3999 	spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask);
4000 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
4001 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
4002 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
4003 }
4004 
4005 static void
4006 blob_set_thin_provision(struct spdk_blob *blob)
4007 {
4008 	blob_verify_md_op(blob);
4009 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
4010 	blob->state = SPDK_BLOB_STATE_DIRTY;
4011 }
4012 
4013 static void
4014 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method)
4015 {
4016 	blob_verify_md_op(blob);
4017 	blob->clear_method = clear_method;
4018 	blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT);
4019 	blob->state = SPDK_BLOB_STATE_DIRTY;
4020 }
4021 
4022 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno);
4023 
4024 static void
4025 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno)
4026 {
4027 	struct spdk_bs_load_ctx *ctx = cb_arg;
4028 	spdk_blob_id id;
4029 	int64_t page_num;
4030 
4031 	/* Iterate to next blob (we can't use spdk_bs_iter_next function as our
4032 	 * last blob has been removed */
4033 	page_num = bs_blobid_to_page(ctx->blobid);
4034 	page_num++;
4035 	page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num);
4036 	if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) {
4037 		bs_load_iter(ctx, NULL, -ENOENT);
4038 		return;
4039 	}
4040 
4041 	id = bs_page_to_blobid(page_num);
4042 
4043 	spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx);
4044 }
4045 
4046 static void
4047 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno)
4048 {
4049 	struct spdk_bs_load_ctx *ctx = cb_arg;
4050 
4051 	if (bserrno != 0) {
4052 		SPDK_ERRLOG("Failed to close corrupted blob\n");
4053 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4054 		return;
4055 	}
4056 
4057 	spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx);
4058 }
4059 
4060 static void
4061 bs_delete_corrupted_blob(void *cb_arg, int bserrno)
4062 {
4063 	struct spdk_bs_load_ctx *ctx = cb_arg;
4064 	uint64_t i;
4065 
4066 	if (bserrno != 0) {
4067 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
4068 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4069 		return;
4070 	}
4071 
4072 	/* Snapshot and clone have the same copy of cluster map and extent pages
4073 	 * at this point. Let's clear both for snapshot now,
4074 	 * so that it won't be cleared for clone later when we remove snapshot.
4075 	 * Also set thin provision to pass data corruption check */
4076 	for (i = 0; i < ctx->blob->active.num_clusters; i++) {
4077 		ctx->blob->active.clusters[i] = 0;
4078 	}
4079 	for (i = 0; i < ctx->blob->active.num_extent_pages; i++) {
4080 		ctx->blob->active.extent_pages[i] = 0;
4081 	}
4082 
4083 	ctx->blob->md_ro = false;
4084 
4085 	blob_set_thin_provision(ctx->blob);
4086 
4087 	ctx->blobid = ctx->blob->id;
4088 
4089 	spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx);
4090 }
4091 
4092 static void
4093 bs_update_corrupted_blob(void *cb_arg, int bserrno)
4094 {
4095 	struct spdk_bs_load_ctx *ctx = cb_arg;
4096 
4097 	if (bserrno != 0) {
4098 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
4099 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4100 		return;
4101 	}
4102 
4103 	ctx->blob->md_ro = false;
4104 	blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true);
4105 	blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true);
4106 	spdk_blob_set_read_only(ctx->blob);
4107 
4108 	if (ctx->iter_cb_fn) {
4109 		ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0);
4110 	}
4111 	bs_blob_list_add(ctx->blob);
4112 
4113 	spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4114 }
4115 
4116 static void
4117 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
4118 {
4119 	struct spdk_bs_load_ctx *ctx = cb_arg;
4120 
4121 	if (bserrno != 0) {
4122 		SPDK_ERRLOG("Failed to open clone of a corrupted blob\n");
4123 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4124 		return;
4125 	}
4126 
4127 	if (blob->parent_id == ctx->blob->id) {
4128 		/* Power failure occurred before updating clone (snapshot delete case)
4129 		 * or after updating clone (creating snapshot case) - keep snapshot */
4130 		spdk_blob_close(blob, bs_update_corrupted_blob, ctx);
4131 	} else {
4132 		/* Power failure occurred after updating clone (snapshot delete case)
4133 		 * or before updating clone (creating snapshot case) - remove snapshot */
4134 		spdk_blob_close(blob, bs_delete_corrupted_blob, ctx);
4135 	}
4136 }
4137 
4138 static void
4139 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
4140 {
4141 	struct spdk_bs_load_ctx *ctx = arg;
4142 	const void *value;
4143 	size_t len;
4144 	int rc = 0;
4145 
4146 	if (bserrno == 0) {
4147 		/* Examine blob if it is corrupted after power failure. Fix
4148 		 * the ones that can be fixed and remove any other corrupted
4149 		 * ones. If it is not corrupted just process it */
4150 		rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true);
4151 		if (rc != 0) {
4152 			rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true);
4153 			if (rc != 0) {
4154 				/* Not corrupted - process it and continue with iterating through blobs */
4155 				if (ctx->iter_cb_fn) {
4156 					ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
4157 				}
4158 				bs_blob_list_add(blob);
4159 				spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx);
4160 				return;
4161 			}
4162 
4163 		}
4164 
4165 		assert(len == sizeof(spdk_blob_id));
4166 
4167 		ctx->blob = blob;
4168 
4169 		/* Open clone to check if we are able to fix this blob or should we remove it */
4170 		spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx);
4171 		return;
4172 	} else if (bserrno == -ENOENT) {
4173 		bserrno = 0;
4174 	} else {
4175 		/*
4176 		 * This case needs to be looked at further.  Same problem
4177 		 *  exists with applications that rely on explicit blob
4178 		 *  iteration.  We should just skip the blob that failed
4179 		 *  to load and continue on to the next one.
4180 		 */
4181 		SPDK_ERRLOG("Error in iterating blobs\n");
4182 	}
4183 
4184 	ctx->iter_cb_fn = NULL;
4185 
4186 	spdk_free(ctx->super);
4187 	spdk_free(ctx->mask);
4188 	bs_sequence_finish(ctx->seq, bserrno);
4189 	free(ctx);
4190 }
4191 
4192 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
4193 
4194 static void
4195 bs_load_complete(struct spdk_bs_load_ctx *ctx)
4196 {
4197 	ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters);
4198 	if (ctx->dumping) {
4199 		bs_dump_read_md_page(ctx->seq, ctx);
4200 		return;
4201 	}
4202 	spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx);
4203 }
4204 
4205 static void
4206 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4207 {
4208 	struct spdk_bs_load_ctx *ctx = cb_arg;
4209 	int rc;
4210 
4211 	/* The type must be correct */
4212 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
4213 
4214 	/* The length of the mask (in bits) must not be greater than
4215 	 * the length of the buffer (converted to bits) */
4216 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
4217 
4218 	/* The length of the mask must be exactly equal to the size
4219 	 * (in pages) of the metadata region */
4220 	assert(ctx->mask->length == ctx->super->md_len);
4221 
4222 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length);
4223 	if (rc < 0) {
4224 		spdk_free(ctx->mask);
4225 		bs_load_ctx_fail(ctx, rc);
4226 		return;
4227 	}
4228 
4229 	spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask);
4230 	bs_load_complete(ctx);
4231 }
4232 
4233 static void
4234 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4235 {
4236 	struct spdk_bs_load_ctx *ctx = cb_arg;
4237 	uint64_t		lba, lba_count, mask_size;
4238 	int			rc;
4239 
4240 	if (bserrno != 0) {
4241 		bs_load_ctx_fail(ctx, bserrno);
4242 		return;
4243 	}
4244 
4245 	/* The type must be correct */
4246 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
4247 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
4248 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
4249 					     struct spdk_blob_md_page) * 8));
4250 	/*
4251 	 * The length of the mask must be equal to or larger than the total number of clusters. It may be
4252 	 * larger than the total number of clusters due to a failure spdk_bs_grow.
4253 	 */
4254 	assert(ctx->mask->length >= ctx->bs->total_clusters);
4255 	if (ctx->mask->length > ctx->bs->total_clusters) {
4256 		SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters");
4257 		ctx->mask->length = ctx->bs->total_clusters;
4258 	}
4259 
4260 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length);
4261 	if (rc < 0) {
4262 		spdk_free(ctx->mask);
4263 		bs_load_ctx_fail(ctx, rc);
4264 		return;
4265 	}
4266 
4267 	spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask);
4268 	ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters);
4269 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
4270 
4271 	spdk_free(ctx->mask);
4272 
4273 	/* Read the used blobids mask */
4274 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
4275 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4276 				 SPDK_MALLOC_DMA);
4277 	if (!ctx->mask) {
4278 		bs_load_ctx_fail(ctx, -ENOMEM);
4279 		return;
4280 	}
4281 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
4282 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
4283 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
4284 			     bs_load_used_blobids_cpl, ctx);
4285 }
4286 
4287 static void
4288 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4289 {
4290 	struct spdk_bs_load_ctx *ctx = cb_arg;
4291 	uint64_t		lba, lba_count, mask_size;
4292 	int			rc;
4293 
4294 	if (bserrno != 0) {
4295 		bs_load_ctx_fail(ctx, bserrno);
4296 		return;
4297 	}
4298 
4299 	/* The type must be correct */
4300 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
4301 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
4302 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
4303 				     8));
4304 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
4305 	if (ctx->mask->length != ctx->super->md_len) {
4306 		SPDK_ERRLOG("mismatched md_len in used_pages mask: "
4307 			    "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n",
4308 			    ctx->mask->length, ctx->super->md_len);
4309 		assert(false);
4310 	}
4311 
4312 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length);
4313 	if (rc < 0) {
4314 		spdk_free(ctx->mask);
4315 		bs_load_ctx_fail(ctx, rc);
4316 		return;
4317 	}
4318 
4319 	spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask);
4320 	spdk_free(ctx->mask);
4321 
4322 	/* Read the used clusters mask */
4323 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
4324 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4325 				 SPDK_MALLOC_DMA);
4326 	if (!ctx->mask) {
4327 		bs_load_ctx_fail(ctx, -ENOMEM);
4328 		return;
4329 	}
4330 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
4331 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
4332 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
4333 			     bs_load_used_clusters_cpl, ctx);
4334 }
4335 
4336 static void
4337 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx)
4338 {
4339 	uint64_t lba, lba_count, mask_size;
4340 
4341 	/* Read the used pages mask */
4342 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
4343 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
4344 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4345 	if (!ctx->mask) {
4346 		bs_load_ctx_fail(ctx, -ENOMEM);
4347 		return;
4348 	}
4349 
4350 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
4351 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
4352 	bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
4353 			     bs_load_used_pages_cpl, ctx);
4354 }
4355 
4356 static int
4357 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page)
4358 {
4359 	struct spdk_blob_store *bs = ctx->bs;
4360 	struct spdk_blob_md_descriptor *desc;
4361 	size_t	cur_desc = 0;
4362 
4363 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4364 	while (cur_desc < sizeof(page->descriptors)) {
4365 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
4366 			if (desc->length == 0) {
4367 				/* If padding and length are 0, this terminates the page */
4368 				break;
4369 			}
4370 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
4371 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
4372 			unsigned int				i, j;
4373 			unsigned int				cluster_count = 0;
4374 			uint32_t				cluster_idx;
4375 
4376 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
4377 
4378 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
4379 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
4380 					cluster_idx = desc_extent_rle->extents[i].cluster_idx;
4381 					/*
4382 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
4383 					 * in the used cluster map.
4384 					 */
4385 					if (cluster_idx != 0) {
4386 						SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j);
4387 						spdk_bit_array_set(ctx->used_clusters, cluster_idx + j);
4388 						if (bs->num_free_clusters == 0) {
4389 							return -ENOSPC;
4390 						}
4391 						bs->num_free_clusters--;
4392 					}
4393 					cluster_count++;
4394 				}
4395 			}
4396 			if (cluster_count == 0) {
4397 				return -EINVAL;
4398 			}
4399 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4400 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
4401 			uint32_t					i;
4402 			uint32_t					cluster_count = 0;
4403 			uint32_t					cluster_idx;
4404 			size_t						cluster_idx_length;
4405 
4406 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
4407 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
4408 
4409 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
4410 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
4411 				return -EINVAL;
4412 			}
4413 
4414 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
4415 				cluster_idx = desc_extent->cluster_idx[i];
4416 				/*
4417 				 * cluster_idx = 0 means an unallocated cluster - don't mark that
4418 				 * in the used cluster map.
4419 				 */
4420 				if (cluster_idx != 0) {
4421 					if (cluster_idx < desc_extent->start_cluster_idx &&
4422 					    cluster_idx >= desc_extent->start_cluster_idx + cluster_count) {
4423 						return -EINVAL;
4424 					}
4425 					spdk_bit_array_set(ctx->used_clusters, cluster_idx);
4426 					if (bs->num_free_clusters == 0) {
4427 						return -ENOSPC;
4428 					}
4429 					bs->num_free_clusters--;
4430 				}
4431 				cluster_count++;
4432 			}
4433 
4434 			if (cluster_count == 0) {
4435 				return -EINVAL;
4436 			}
4437 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
4438 			/* Skip this item */
4439 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
4440 			/* Skip this item */
4441 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
4442 			/* Skip this item */
4443 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
4444 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
4445 			uint32_t num_extent_pages = ctx->num_extent_pages;
4446 			uint32_t i;
4447 			size_t extent_pages_length;
4448 			void *tmp;
4449 
4450 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
4451 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
4452 
4453 			if (desc_extent_table->length == 0 ||
4454 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
4455 				return -EINVAL;
4456 			}
4457 
4458 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
4459 				if (desc_extent_table->extent_page[i].page_idx != 0) {
4460 					if (desc_extent_table->extent_page[i].num_pages != 1) {
4461 						return -EINVAL;
4462 					}
4463 					num_extent_pages += 1;
4464 				}
4465 			}
4466 
4467 			if (num_extent_pages > 0) {
4468 				tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t));
4469 				if (tmp == NULL) {
4470 					return -ENOMEM;
4471 				}
4472 				ctx->extent_page_num = tmp;
4473 
4474 				/* Extent table entries contain md page numbers for extent pages.
4475 				 * Zeroes represent unallocated extent pages, those are run-length-encoded.
4476 				 */
4477 				for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
4478 					if (desc_extent_table->extent_page[i].page_idx != 0) {
4479 						ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx;
4480 						ctx->num_extent_pages += 1;
4481 					}
4482 				}
4483 			}
4484 		} else {
4485 			/* Error */
4486 			return -EINVAL;
4487 		}
4488 		/* Advance to the next descriptor */
4489 		cur_desc += sizeof(*desc) + desc->length;
4490 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
4491 			break;
4492 		}
4493 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
4494 	}
4495 	return 0;
4496 }
4497 
4498 static bool
4499 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page)
4500 {
4501 	uint32_t crc;
4502 	struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4503 	size_t desc_len;
4504 
4505 	crc = blob_md_page_calc_crc(page);
4506 	if (crc != page->crc) {
4507 		return false;
4508 	}
4509 
4510 	/* Extent page should always be of sequence num 0. */
4511 	if (page->sequence_num != 0) {
4512 		return false;
4513 	}
4514 
4515 	/* Descriptor type must be EXTENT_PAGE. */
4516 	if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4517 		return false;
4518 	}
4519 
4520 	/* Descriptor length cannot exceed the page. */
4521 	desc_len = sizeof(*desc) + desc->length;
4522 	if (desc_len > sizeof(page->descriptors)) {
4523 		return false;
4524 	}
4525 
4526 	/* It has to be the only descriptor in the page. */
4527 	if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) {
4528 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len);
4529 		if (desc->length != 0) {
4530 			return false;
4531 		}
4532 	}
4533 
4534 	return true;
4535 }
4536 
4537 static bool
4538 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
4539 {
4540 	uint32_t crc;
4541 	struct spdk_blob_md_page *page = ctx->page;
4542 
4543 	crc = blob_md_page_calc_crc(page);
4544 	if (crc != page->crc) {
4545 		return false;
4546 	}
4547 
4548 	/* First page of a sequence should match the blobid. */
4549 	if (page->sequence_num == 0 &&
4550 	    bs_page_to_blobid(ctx->cur_page) != page->id) {
4551 		return false;
4552 	}
4553 	assert(bs_load_cur_extent_page_valid(page) == false);
4554 
4555 	return true;
4556 }
4557 
4558 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx);
4559 
4560 static void
4561 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4562 {
4563 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4564 
4565 	if (bserrno != 0) {
4566 		bs_load_ctx_fail(ctx, bserrno);
4567 		return;
4568 	}
4569 
4570 	bs_load_complete(ctx);
4571 }
4572 
4573 static void
4574 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4575 {
4576 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4577 
4578 	spdk_free(ctx->mask);
4579 	ctx->mask = NULL;
4580 
4581 	if (bserrno != 0) {
4582 		bs_load_ctx_fail(ctx, bserrno);
4583 		return;
4584 	}
4585 
4586 	bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl);
4587 }
4588 
4589 static void
4590 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4591 {
4592 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4593 
4594 	spdk_free(ctx->mask);
4595 	ctx->mask = NULL;
4596 
4597 	if (bserrno != 0) {
4598 		bs_load_ctx_fail(ctx, bserrno);
4599 		return;
4600 	}
4601 
4602 	bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl);
4603 }
4604 
4605 static void
4606 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx)
4607 {
4608 	bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl);
4609 }
4610 
4611 static void
4612 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx)
4613 {
4614 	uint64_t num_md_clusters;
4615 	uint64_t i;
4616 
4617 	ctx->in_page_chain = false;
4618 
4619 	do {
4620 		ctx->page_index++;
4621 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
4622 
4623 	if (ctx->page_index < ctx->super->md_len) {
4624 		ctx->cur_page = ctx->page_index;
4625 		bs_load_replay_cur_md_page(ctx);
4626 	} else {
4627 		/* Claim all of the clusters used by the metadata */
4628 		num_md_clusters = spdk_divide_round_up(
4629 					  ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster);
4630 		for (i = 0; i < num_md_clusters; i++) {
4631 			spdk_bit_array_set(ctx->used_clusters, i);
4632 		}
4633 		ctx->bs->num_free_clusters -= num_md_clusters;
4634 		spdk_free(ctx->page);
4635 		bs_load_write_used_md(ctx);
4636 	}
4637 }
4638 
4639 static void
4640 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4641 {
4642 	struct spdk_bs_load_ctx *ctx = cb_arg;
4643 	uint32_t page_num;
4644 	uint64_t i;
4645 
4646 	if (bserrno != 0) {
4647 		spdk_free(ctx->extent_pages);
4648 		bs_load_ctx_fail(ctx, bserrno);
4649 		return;
4650 	}
4651 
4652 	for (i = 0; i < ctx->num_extent_pages; i++) {
4653 		/* Extent pages are only read when present within in chain md.
4654 		 * Integrity of md is not right if that page was not a valid extent page. */
4655 		if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) {
4656 			spdk_free(ctx->extent_pages);
4657 			bs_load_ctx_fail(ctx, -EILSEQ);
4658 			return;
4659 		}
4660 
4661 		page_num = ctx->extent_page_num[i];
4662 		spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
4663 		if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) {
4664 			spdk_free(ctx->extent_pages);
4665 			bs_load_ctx_fail(ctx, -EILSEQ);
4666 			return;
4667 		}
4668 	}
4669 
4670 	spdk_free(ctx->extent_pages);
4671 	free(ctx->extent_page_num);
4672 	ctx->extent_page_num = NULL;
4673 	ctx->num_extent_pages = 0;
4674 
4675 	bs_load_replay_md_chain_cpl(ctx);
4676 }
4677 
4678 static void
4679 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx)
4680 {
4681 	spdk_bs_batch_t *batch;
4682 	uint32_t page;
4683 	uint64_t lba;
4684 	uint64_t i;
4685 
4686 	ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0,
4687 					 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4688 	if (!ctx->extent_pages) {
4689 		bs_load_ctx_fail(ctx, -ENOMEM);
4690 		return;
4691 	}
4692 
4693 	batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx);
4694 
4695 	for (i = 0; i < ctx->num_extent_pages; i++) {
4696 		page = ctx->extent_page_num[i];
4697 		assert(page < ctx->super->md_len);
4698 		lba = bs_md_page_to_lba(ctx->bs, page);
4699 		bs_batch_read_dev(batch, &ctx->extent_pages[i], lba,
4700 				  bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE));
4701 	}
4702 
4703 	bs_batch_close(batch);
4704 }
4705 
4706 static void
4707 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4708 {
4709 	struct spdk_bs_load_ctx *ctx = cb_arg;
4710 	uint32_t page_num;
4711 	struct spdk_blob_md_page *page;
4712 
4713 	if (bserrno != 0) {
4714 		bs_load_ctx_fail(ctx, bserrno);
4715 		return;
4716 	}
4717 
4718 	page_num = ctx->cur_page;
4719 	page = ctx->page;
4720 	if (bs_load_cur_md_page_valid(ctx) == true) {
4721 		if (page->sequence_num == 0 || ctx->in_page_chain == true) {
4722 			spdk_spin_lock(&ctx->bs->used_lock);
4723 			bs_claim_md_page(ctx->bs, page_num);
4724 			spdk_spin_unlock(&ctx->bs->used_lock);
4725 			if (page->sequence_num == 0) {
4726 				SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num);
4727 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
4728 			}
4729 			if (bs_load_replay_md_parse_page(ctx, page)) {
4730 				bs_load_ctx_fail(ctx, -EILSEQ);
4731 				return;
4732 			}
4733 			if (page->next != SPDK_INVALID_MD_PAGE) {
4734 				ctx->in_page_chain = true;
4735 				ctx->cur_page = page->next;
4736 				bs_load_replay_cur_md_page(ctx);
4737 				return;
4738 			}
4739 			if (ctx->num_extent_pages != 0) {
4740 				bs_load_replay_extent_pages(ctx);
4741 				return;
4742 			}
4743 		}
4744 	}
4745 	bs_load_replay_md_chain_cpl(ctx);
4746 }
4747 
4748 static void
4749 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx)
4750 {
4751 	uint64_t lba;
4752 
4753 	assert(ctx->cur_page < ctx->super->md_len);
4754 	lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page);
4755 	bs_sequence_read_dev(ctx->seq, ctx->page, lba,
4756 			     bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
4757 			     bs_load_replay_md_cpl, ctx);
4758 }
4759 
4760 static void
4761 bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
4762 {
4763 	ctx->page_index = 0;
4764 	ctx->cur_page = 0;
4765 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
4766 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4767 	if (!ctx->page) {
4768 		bs_load_ctx_fail(ctx, -ENOMEM);
4769 		return;
4770 	}
4771 	bs_load_replay_cur_md_page(ctx);
4772 }
4773 
4774 static void
4775 bs_recover(struct spdk_bs_load_ctx *ctx)
4776 {
4777 	int		rc;
4778 
4779 	SPDK_NOTICELOG("Performing recovery on blobstore\n");
4780 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
4781 	if (rc < 0) {
4782 		bs_load_ctx_fail(ctx, -ENOMEM);
4783 		return;
4784 	}
4785 
4786 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
4787 	if (rc < 0) {
4788 		bs_load_ctx_fail(ctx, -ENOMEM);
4789 		return;
4790 	}
4791 
4792 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
4793 	if (rc < 0) {
4794 		bs_load_ctx_fail(ctx, -ENOMEM);
4795 		return;
4796 	}
4797 
4798 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len);
4799 	if (rc < 0) {
4800 		bs_load_ctx_fail(ctx, -ENOMEM);
4801 		return;
4802 	}
4803 
4804 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
4805 	bs_load_replay_md(ctx);
4806 }
4807 
4808 static int
4809 bs_parse_super(struct spdk_bs_load_ctx *ctx)
4810 {
4811 	int rc;
4812 
4813 	if (ctx->super->size == 0) {
4814 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
4815 	}
4816 
4817 	if (ctx->super->io_unit_size == 0) {
4818 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
4819 	}
4820 
4821 	ctx->bs->clean = 1;
4822 	ctx->bs->cluster_sz = ctx->super->cluster_size;
4823 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
4824 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
4825 	if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
4826 		ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
4827 	}
4828 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
4829 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
4830 	if (rc < 0) {
4831 		return -ENOMEM;
4832 	}
4833 	ctx->bs->md_start = ctx->super->md_start;
4834 	ctx->bs->md_len = ctx->super->md_len;
4835 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len);
4836 	if (rc < 0) {
4837 		return -ENOMEM;
4838 	}
4839 
4840 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
4841 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
4842 	ctx->bs->super_blob = ctx->super->super_blob;
4843 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
4844 
4845 	return 0;
4846 }
4847 
4848 static void
4849 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4850 {
4851 	struct spdk_bs_load_ctx *ctx = cb_arg;
4852 	int rc;
4853 
4854 	rc = bs_super_validate(ctx->super, ctx->bs);
4855 	if (rc != 0) {
4856 		bs_load_ctx_fail(ctx, rc);
4857 		return;
4858 	}
4859 
4860 	rc = bs_parse_super(ctx);
4861 	if (rc < 0) {
4862 		bs_load_ctx_fail(ctx, rc);
4863 		return;
4864 	}
4865 
4866 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) {
4867 		bs_recover(ctx);
4868 	} else {
4869 		bs_load_read_used_pages(ctx);
4870 	}
4871 }
4872 
4873 static inline int
4874 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst)
4875 {
4876 
4877 	if (!src->opts_size) {
4878 		SPDK_ERRLOG("opts_size should not be zero value\n");
4879 		return -1;
4880 	}
4881 
4882 #define FIELD_OK(field) \
4883         offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size
4884 
4885 #define SET_FIELD(field) \
4886         if (FIELD_OK(field)) { \
4887                 dst->field = src->field; \
4888         } \
4889 
4890 	SET_FIELD(cluster_sz);
4891 	SET_FIELD(num_md_pages);
4892 	SET_FIELD(max_md_ops);
4893 	SET_FIELD(max_channel_ops);
4894 	SET_FIELD(clear_method);
4895 
4896 	if (FIELD_OK(bstype)) {
4897 		memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype));
4898 	}
4899 	SET_FIELD(iter_cb_fn);
4900 	SET_FIELD(iter_cb_arg);
4901 	SET_FIELD(force_recover);
4902 	SET_FIELD(esnap_bs_dev_create);
4903 	SET_FIELD(esnap_ctx);
4904 
4905 	dst->opts_size = src->opts_size;
4906 
4907 	/* You should not remove this statement, but need to update the assert statement
4908 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
4909 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size");
4910 
4911 #undef FIELD_OK
4912 #undef SET_FIELD
4913 
4914 	return 0;
4915 }
4916 
4917 void
4918 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4919 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4920 {
4921 	struct spdk_blob_store	*bs;
4922 	struct spdk_bs_cpl	cpl;
4923 	struct spdk_bs_load_ctx *ctx;
4924 	struct spdk_bs_opts	opts = {};
4925 	int err;
4926 
4927 	SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev);
4928 
4929 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4930 		SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen);
4931 		dev->destroy(dev);
4932 		cb_fn(cb_arg, NULL, -EINVAL);
4933 		return;
4934 	}
4935 
4936 	spdk_bs_opts_init(&opts, sizeof(opts));
4937 	if (o) {
4938 		if (bs_opts_copy(o, &opts)) {
4939 			return;
4940 		}
4941 	}
4942 
4943 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
4944 		dev->destroy(dev);
4945 		cb_fn(cb_arg, NULL, -EINVAL);
4946 		return;
4947 	}
4948 
4949 	err = bs_alloc(dev, &opts, &bs, &ctx);
4950 	if (err) {
4951 		dev->destroy(dev);
4952 		cb_fn(cb_arg, NULL, err);
4953 		return;
4954 	}
4955 
4956 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4957 	cpl.u.bs_handle.cb_fn = cb_fn;
4958 	cpl.u.bs_handle.cb_arg = cb_arg;
4959 	cpl.u.bs_handle.bs = bs;
4960 
4961 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
4962 	if (!ctx->seq) {
4963 		spdk_free(ctx->super);
4964 		free(ctx);
4965 		bs_free(bs);
4966 		cb_fn(cb_arg, NULL, -ENOMEM);
4967 		return;
4968 	}
4969 
4970 	/* Read the super block */
4971 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
4972 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
4973 			     bs_load_super_cpl, ctx);
4974 }
4975 
4976 /* END spdk_bs_load */
4977 
4978 /* START spdk_bs_dump */
4979 
4980 static void
4981 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
4982 {
4983 	spdk_free(ctx->super);
4984 
4985 	/*
4986 	 * We need to defer calling bs_call_cpl() until after
4987 	 * dev destruction, so tuck these away for later use.
4988 	 */
4989 	ctx->bs->unload_err = bserrno;
4990 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4991 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4992 
4993 	bs_sequence_finish(seq, 0);
4994 	bs_free(ctx->bs);
4995 	free(ctx);
4996 }
4997 
4998 static void
4999 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5000 {
5001 	struct spdk_blob_md_descriptor_xattr *desc_xattr;
5002 	uint32_t i;
5003 	const char *type;
5004 
5005 	desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc;
5006 
5007 	if (desc_xattr->length !=
5008 	    sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) +
5009 	    desc_xattr->name_length + desc_xattr->value_length) {
5010 	}
5011 
5012 	memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length);
5013 	ctx->xattr_name[desc_xattr->name_length] = '\0';
5014 	if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
5015 		type = "XATTR";
5016 	} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
5017 		type = "XATTR_INTERNAL";
5018 	} else {
5019 		assert(false);
5020 		type = "XATTR_?";
5021 	}
5022 	fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name);
5023 	fprintf(ctx->fp, "       value = \"");
5024 	ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name,
5025 			    (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
5026 			    desc_xattr->value_length);
5027 	fprintf(ctx->fp, "\"\n");
5028 	for (i = 0; i < desc_xattr->value_length; i++) {
5029 		if (i % 16 == 0) {
5030 			fprintf(ctx->fp, "               ");
5031 		}
5032 		fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i));
5033 		if ((i + 1) % 16 == 0) {
5034 			fprintf(ctx->fp, "\n");
5035 		}
5036 	}
5037 	if (i % 16 != 0) {
5038 		fprintf(ctx->fp, "\n");
5039 	}
5040 }
5041 
5042 struct type_flag_desc {
5043 	uint64_t mask;
5044 	uint64_t val;
5045 	const char *name;
5046 };
5047 
5048 static void
5049 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags,
5050 			struct type_flag_desc *desc, size_t numflags)
5051 {
5052 	uint64_t covered = 0;
5053 	size_t i;
5054 
5055 	for (i = 0; i < numflags; i++) {
5056 		if ((desc[i].mask & flags) != desc[i].val) {
5057 			continue;
5058 		}
5059 		fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name);
5060 		if (desc[i].mask != desc[i].val) {
5061 			fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")",
5062 				desc[i].mask, desc[i].val);
5063 		}
5064 		fprintf(ctx->fp, "\n");
5065 		covered |= desc[i].mask;
5066 	}
5067 	if ((flags & ~covered) != 0) {
5068 		fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered);
5069 	}
5070 }
5071 
5072 static void
5073 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5074 {
5075 	struct spdk_blob_md_descriptor_flags *type_desc;
5076 #define ADD_FLAG(f) { f, f, #f }
5077 #define ADD_MASK_VAL(m, v) { m, v, #v }
5078 	static struct type_flag_desc invalid[] = {
5079 		ADD_FLAG(SPDK_BLOB_THIN_PROV),
5080 		ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR),
5081 		ADD_FLAG(SPDK_BLOB_EXTENT_TABLE),
5082 	};
5083 	static struct type_flag_desc data_ro[] = {
5084 		ADD_FLAG(SPDK_BLOB_READ_ONLY),
5085 	};
5086 	static struct type_flag_desc md_ro[] = {
5087 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT),
5088 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE),
5089 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP),
5090 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES),
5091 	};
5092 #undef ADD_FLAG
5093 #undef ADD_MASK_VAL
5094 
5095 	type_desc = (struct spdk_blob_md_descriptor_flags *)desc;
5096 	fprintf(ctx->fp, "Flags:\n");
5097 	fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags);
5098 	bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid,
5099 				SPDK_COUNTOF(invalid));
5100 	fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags);
5101 	bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro,
5102 				SPDK_COUNTOF(data_ro));
5103 	fprintf(ctx->fp, "\t  md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags);
5104 	bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro,
5105 				SPDK_COUNTOF(md_ro));
5106 }
5107 
5108 static void
5109 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5110 {
5111 	struct spdk_blob_md_descriptor_extent_table *et_desc;
5112 	uint64_t num_extent_pages;
5113 	uint32_t et_idx;
5114 
5115 	et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc;
5116 	num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) /
5117 			   sizeof(et_desc->extent_page[0]);
5118 
5119 	fprintf(ctx->fp, "Extent table:\n");
5120 	for (et_idx = 0; et_idx < num_extent_pages; et_idx++) {
5121 		if (et_desc->extent_page[et_idx].page_idx == 0) {
5122 			/* Zeroes represent unallocated extent pages. */
5123 			continue;
5124 		}
5125 		fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32
5126 			" at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx,
5127 			et_desc->extent_page[et_idx].num_pages,
5128 			bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx));
5129 	}
5130 }
5131 
5132 static void
5133 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx)
5134 {
5135 	uint32_t page_idx = ctx->cur_page;
5136 	struct spdk_blob_md_page *page = ctx->page;
5137 	struct spdk_blob_md_descriptor *desc;
5138 	size_t cur_desc = 0;
5139 	uint32_t crc;
5140 
5141 	fprintf(ctx->fp, "=========\n");
5142 	fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx);
5143 	fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx));
5144 	fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id);
5145 	fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num);
5146 	if (page->next == SPDK_INVALID_MD_PAGE) {
5147 		fprintf(ctx->fp, "Next: None\n");
5148 	} else {
5149 		fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next);
5150 	}
5151 	fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)");
5152 	if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) {
5153 		fprintf(ctx->fp, " md");
5154 	}
5155 	if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) {
5156 		fprintf(ctx->fp, " blob");
5157 	}
5158 	fprintf(ctx->fp, "\n");
5159 
5160 	crc = blob_md_page_calc_crc(page);
5161 	fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch");
5162 
5163 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
5164 	while (cur_desc < sizeof(page->descriptors)) {
5165 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
5166 			if (desc->length == 0) {
5167 				/* If padding and length are 0, this terminates the page */
5168 				break;
5169 			}
5170 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
5171 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
5172 			unsigned int				i;
5173 
5174 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
5175 
5176 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
5177 				if (desc_extent_rle->extents[i].cluster_idx != 0) {
5178 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
5179 						desc_extent_rle->extents[i].cluster_idx);
5180 				} else {
5181 					fprintf(ctx->fp, "Unallocated Extent - ");
5182 				}
5183 				fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length);
5184 				fprintf(ctx->fp, "\n");
5185 			}
5186 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
5187 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
5188 			unsigned int					i;
5189 
5190 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
5191 
5192 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) {
5193 				if (desc_extent->cluster_idx[i] != 0) {
5194 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
5195 						desc_extent->cluster_idx[i]);
5196 				} else {
5197 					fprintf(ctx->fp, "Unallocated Extent");
5198 				}
5199 				fprintf(ctx->fp, "\n");
5200 			}
5201 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
5202 			bs_dump_print_xattr(ctx, desc);
5203 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
5204 			bs_dump_print_xattr(ctx, desc);
5205 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
5206 			bs_dump_print_type_flags(ctx, desc);
5207 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
5208 			bs_dump_print_extent_table(ctx, desc);
5209 		} else {
5210 			/* Error */
5211 			fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type);
5212 		}
5213 		/* Advance to the next descriptor */
5214 		cur_desc += sizeof(*desc) + desc->length;
5215 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
5216 			break;
5217 		}
5218 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
5219 	}
5220 }
5221 
5222 static void
5223 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5224 {
5225 	struct spdk_bs_load_ctx *ctx = cb_arg;
5226 
5227 	if (bserrno != 0) {
5228 		bs_dump_finish(seq, ctx, bserrno);
5229 		return;
5230 	}
5231 
5232 	if (ctx->page->id != 0) {
5233 		bs_dump_print_md_page(ctx);
5234 	}
5235 
5236 	ctx->cur_page++;
5237 
5238 	if (ctx->cur_page < ctx->super->md_len) {
5239 		bs_dump_read_md_page(seq, ctx);
5240 	} else {
5241 		spdk_free(ctx->page);
5242 		bs_dump_finish(seq, ctx, 0);
5243 	}
5244 }
5245 
5246 static void
5247 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
5248 {
5249 	struct spdk_bs_load_ctx *ctx = cb_arg;
5250 	uint64_t lba;
5251 
5252 	assert(ctx->cur_page < ctx->super->md_len);
5253 	lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
5254 	bs_sequence_read_dev(seq, ctx->page, lba,
5255 			     bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
5256 			     bs_dump_read_md_page_cpl, ctx);
5257 }
5258 
5259 static void
5260 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5261 {
5262 	struct spdk_bs_load_ctx *ctx = cb_arg;
5263 	int rc;
5264 
5265 	fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature);
5266 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
5267 		   sizeof(ctx->super->signature)) != 0) {
5268 		fprintf(ctx->fp, "(Mismatch)\n");
5269 		bs_dump_finish(seq, ctx, bserrno);
5270 		return;
5271 	} else {
5272 		fprintf(ctx->fp, "(OK)\n");
5273 	}
5274 	fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version);
5275 	fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc,
5276 		(ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch");
5277 	fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype);
5278 	fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size);
5279 	fprintf(ctx->fp, "Super Blob ID: ");
5280 	if (ctx->super->super_blob == SPDK_BLOBID_INVALID) {
5281 		fprintf(ctx->fp, "(None)\n");
5282 	} else {
5283 		fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob);
5284 	}
5285 	fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean);
5286 	fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start);
5287 	fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len);
5288 	fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start);
5289 	fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len);
5290 	fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start);
5291 	fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len);
5292 	fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start);
5293 	fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);
5294 
5295 	ctx->cur_page = 0;
5296 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
5297 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5298 	if (!ctx->page) {
5299 		bs_dump_finish(seq, ctx, -ENOMEM);
5300 		return;
5301 	}
5302 
5303 	rc = bs_parse_super(ctx);
5304 	if (rc < 0) {
5305 		bs_load_ctx_fail(ctx, rc);
5306 		return;
5307 	}
5308 
5309 	bs_load_read_used_pages(ctx);
5310 }
5311 
5312 void
5313 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn,
5314 	     spdk_bs_op_complete cb_fn, void *cb_arg)
5315 {
5316 	struct spdk_blob_store	*bs;
5317 	struct spdk_bs_cpl	cpl;
5318 	struct spdk_bs_load_ctx *ctx;
5319 	struct spdk_bs_opts	opts = {};
5320 	int err;
5321 
5322 	SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev);
5323 
5324 	spdk_bs_opts_init(&opts, sizeof(opts));
5325 
5326 	err = bs_alloc(dev, &opts, &bs, &ctx);
5327 	if (err) {
5328 		dev->destroy(dev);
5329 		cb_fn(cb_arg, err);
5330 		return;
5331 	}
5332 
5333 	ctx->dumping = true;
5334 	ctx->fp = fp;
5335 	ctx->print_xattr_fn = print_xattr_fn;
5336 
5337 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5338 	cpl.u.bs_basic.cb_fn = cb_fn;
5339 	cpl.u.bs_basic.cb_arg = cb_arg;
5340 
5341 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5342 	if (!ctx->seq) {
5343 		spdk_free(ctx->super);
5344 		free(ctx);
5345 		bs_free(bs);
5346 		cb_fn(cb_arg, -ENOMEM);
5347 		return;
5348 	}
5349 
5350 	/* Read the super block */
5351 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
5352 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5353 			     bs_dump_super_cpl, ctx);
5354 }
5355 
5356 /* END spdk_bs_dump */
5357 
5358 /* START spdk_bs_init */
5359 
5360 static void
5361 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5362 {
5363 	struct spdk_bs_load_ctx *ctx = cb_arg;
5364 
5365 	ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters);
5366 	spdk_free(ctx->super);
5367 	free(ctx);
5368 
5369 	bs_sequence_finish(seq, bserrno);
5370 }
5371 
5372 static void
5373 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5374 {
5375 	struct spdk_bs_load_ctx *ctx = cb_arg;
5376 
5377 	/* Write super block */
5378 	bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0),
5379 			      bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
5380 			      bs_init_persist_super_cpl, ctx);
5381 }
5382 
5383 void
5384 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
5385 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
5386 {
5387 	struct spdk_bs_load_ctx *ctx;
5388 	struct spdk_blob_store	*bs;
5389 	struct spdk_bs_cpl	cpl;
5390 	spdk_bs_sequence_t	*seq;
5391 	spdk_bs_batch_t		*batch;
5392 	uint64_t		num_md_lba;
5393 	uint64_t		num_md_pages;
5394 	uint64_t		num_md_clusters;
5395 	uint64_t		max_used_cluster_mask_len;
5396 	uint32_t		i;
5397 	struct spdk_bs_opts	opts = {};
5398 	int			rc;
5399 	uint64_t		lba, lba_count;
5400 
5401 	SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev);
5402 
5403 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
5404 		SPDK_ERRLOG("unsupported dev block length of %d\n",
5405 			    dev->blocklen);
5406 		dev->destroy(dev);
5407 		cb_fn(cb_arg, NULL, -EINVAL);
5408 		return;
5409 	}
5410 
5411 	spdk_bs_opts_init(&opts, sizeof(opts));
5412 	if (o) {
5413 		if (bs_opts_copy(o, &opts)) {
5414 			return;
5415 		}
5416 	}
5417 
5418 	if (bs_opts_verify(&opts) != 0) {
5419 		dev->destroy(dev);
5420 		cb_fn(cb_arg, NULL, -EINVAL);
5421 		return;
5422 	}
5423 
5424 	rc = bs_alloc(dev, &opts, &bs, &ctx);
5425 	if (rc) {
5426 		dev->destroy(dev);
5427 		cb_fn(cb_arg, NULL, rc);
5428 		return;
5429 	}
5430 
5431 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
5432 		/* By default, allocate 1 page per cluster.
5433 		 * Technically, this over-allocates metadata
5434 		 * because more metadata will reduce the number
5435 		 * of usable clusters. This can be addressed with
5436 		 * more complex math in the future.
5437 		 */
5438 		bs->md_len = bs->total_clusters;
5439 	} else {
5440 		bs->md_len = opts.num_md_pages;
5441 	}
5442 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
5443 	if (rc < 0) {
5444 		spdk_free(ctx->super);
5445 		free(ctx);
5446 		bs_free(bs);
5447 		cb_fn(cb_arg, NULL, -ENOMEM);
5448 		return;
5449 	}
5450 
5451 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
5452 	if (rc < 0) {
5453 		spdk_free(ctx->super);
5454 		free(ctx);
5455 		bs_free(bs);
5456 		cb_fn(cb_arg, NULL, -ENOMEM);
5457 		return;
5458 	}
5459 
5460 	rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len);
5461 	if (rc < 0) {
5462 		spdk_free(ctx->super);
5463 		free(ctx);
5464 		bs_free(bs);
5465 		cb_fn(cb_arg, NULL, -ENOMEM);
5466 		return;
5467 	}
5468 
5469 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
5470 	       sizeof(ctx->super->signature));
5471 	ctx->super->version = SPDK_BS_VERSION;
5472 	ctx->super->length = sizeof(*ctx->super);
5473 	ctx->super->super_blob = bs->super_blob;
5474 	ctx->super->clean = 0;
5475 	ctx->super->cluster_size = bs->cluster_sz;
5476 	ctx->super->io_unit_size = bs->io_unit_size;
5477 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
5478 
5479 	/* Calculate how many pages the metadata consumes at the front
5480 	 * of the disk.
5481 	 */
5482 
5483 	/* The super block uses 1 page */
5484 	num_md_pages = 1;
5485 
5486 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
5487 	 * up to the nearest page, plus a header.
5488 	 */
5489 	ctx->super->used_page_mask_start = num_md_pages;
5490 	ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5491 					 spdk_divide_round_up(bs->md_len, 8),
5492 					 SPDK_BS_PAGE_SIZE);
5493 	num_md_pages += ctx->super->used_page_mask_len;
5494 
5495 	/* The used_clusters mask requires 1 bit per cluster, rounded
5496 	 * up to the nearest page, plus a header.
5497 	 */
5498 	ctx->super->used_cluster_mask_start = num_md_pages;
5499 	ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5500 					    spdk_divide_round_up(bs->total_clusters, 8),
5501 					    SPDK_BS_PAGE_SIZE);
5502 	/* The blobstore might be extended, then the used_cluster bitmap will need more space.
5503 	 * Here we calculate the max clusters we can support according to the
5504 	 * num_md_pages (bs->md_len).
5505 	 */
5506 	max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5507 				    spdk_divide_round_up(bs->md_len, 8),
5508 				    SPDK_BS_PAGE_SIZE);
5509 	max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len,
5510 					     ctx->super->used_cluster_mask_len);
5511 	num_md_pages += max_used_cluster_mask_len;
5512 
5513 	/* The used_blobids mask requires 1 bit per metadata page, rounded
5514 	 * up to the nearest page, plus a header.
5515 	 */
5516 	ctx->super->used_blobid_mask_start = num_md_pages;
5517 	ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5518 					   spdk_divide_round_up(bs->md_len, 8),
5519 					   SPDK_BS_PAGE_SIZE);
5520 	num_md_pages += ctx->super->used_blobid_mask_len;
5521 
5522 	/* The metadata region size was chosen above */
5523 	ctx->super->md_start = bs->md_start = num_md_pages;
5524 	ctx->super->md_len = bs->md_len;
5525 	num_md_pages += bs->md_len;
5526 
5527 	num_md_lba = bs_page_to_lba(bs, num_md_pages);
5528 
5529 	ctx->super->size = dev->blockcnt * dev->blocklen;
5530 
5531 	ctx->super->crc = blob_md_page_calc_crc(ctx->super);
5532 
5533 	num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster);
5534 	if (num_md_clusters > bs->total_clusters) {
5535 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
5536 			    "please decrease number of pages reserved for metadata "
5537 			    "or increase cluster size.\n");
5538 		spdk_free(ctx->super);
5539 		spdk_bit_array_free(&ctx->used_clusters);
5540 		free(ctx);
5541 		bs_free(bs);
5542 		cb_fn(cb_arg, NULL, -ENOMEM);
5543 		return;
5544 	}
5545 	/* Claim all of the clusters used by the metadata */
5546 	for (i = 0; i < num_md_clusters; i++) {
5547 		spdk_bit_array_set(ctx->used_clusters, i);
5548 	}
5549 
5550 	bs->num_free_clusters -= num_md_clusters;
5551 	bs->total_data_clusters = bs->num_free_clusters;
5552 
5553 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
5554 	cpl.u.bs_handle.cb_fn = cb_fn;
5555 	cpl.u.bs_handle.cb_arg = cb_arg;
5556 	cpl.u.bs_handle.bs = bs;
5557 
5558 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5559 	if (!seq) {
5560 		spdk_free(ctx->super);
5561 		free(ctx);
5562 		bs_free(bs);
5563 		cb_fn(cb_arg, NULL, -ENOMEM);
5564 		return;
5565 	}
5566 
5567 	batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx);
5568 
5569 	/* Clear metadata space */
5570 	bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
5571 
5572 	lba = num_md_lba;
5573 	lba_count = ctx->bs->dev->blockcnt - lba;
5574 	switch (opts.clear_method) {
5575 	case BS_CLEAR_WITH_UNMAP:
5576 		/* Trim data clusters */
5577 		bs_batch_unmap_dev(batch, lba, lba_count);
5578 		break;
5579 	case BS_CLEAR_WITH_WRITE_ZEROES:
5580 		/* Write_zeroes to data clusters */
5581 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
5582 		break;
5583 	case BS_CLEAR_WITH_NONE:
5584 	default:
5585 		break;
5586 	}
5587 
5588 	bs_batch_close(batch);
5589 }
5590 
5591 /* END spdk_bs_init */
5592 
5593 /* START spdk_bs_destroy */
5594 
5595 static void
5596 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5597 {
5598 	struct spdk_bs_load_ctx *ctx = cb_arg;
5599 	struct spdk_blob_store *bs = ctx->bs;
5600 
5601 	/*
5602 	 * We need to defer calling bs_call_cpl() until after
5603 	 * dev destruction, so tuck these away for later use.
5604 	 */
5605 	bs->unload_err = bserrno;
5606 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5607 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5608 
5609 	bs_sequence_finish(seq, bserrno);
5610 
5611 	bs_free(bs);
5612 	free(ctx);
5613 }
5614 
5615 void
5616 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
5617 		void *cb_arg)
5618 {
5619 	struct spdk_bs_cpl	cpl;
5620 	spdk_bs_sequence_t	*seq;
5621 	struct spdk_bs_load_ctx *ctx;
5622 
5623 	SPDK_DEBUGLOG(blob, "Destroying blobstore\n");
5624 
5625 	if (!RB_EMPTY(&bs->open_blobs)) {
5626 		SPDK_ERRLOG("Blobstore still has open blobs\n");
5627 		cb_fn(cb_arg, -EBUSY);
5628 		return;
5629 	}
5630 
5631 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5632 	cpl.u.bs_basic.cb_fn = cb_fn;
5633 	cpl.u.bs_basic.cb_arg = cb_arg;
5634 
5635 	ctx = calloc(1, sizeof(*ctx));
5636 	if (!ctx) {
5637 		cb_fn(cb_arg, -ENOMEM);
5638 		return;
5639 	}
5640 
5641 	ctx->bs = bs;
5642 
5643 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5644 	if (!seq) {
5645 		free(ctx);
5646 		cb_fn(cb_arg, -ENOMEM);
5647 		return;
5648 	}
5649 
5650 	/* Write zeroes to the super block */
5651 	bs_sequence_write_zeroes_dev(seq,
5652 				     bs_page_to_lba(bs, 0),
5653 				     bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
5654 				     bs_destroy_trim_cpl, ctx);
5655 }
5656 
5657 /* END spdk_bs_destroy */
5658 
5659 /* START spdk_bs_unload */
5660 
5661 static void
5662 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno)
5663 {
5664 	spdk_bs_sequence_t *seq = ctx->seq;
5665 
5666 	spdk_free(ctx->super);
5667 
5668 	/*
5669 	 * We need to defer calling bs_call_cpl() until after
5670 	 * dev destruction, so tuck these away for later use.
5671 	 */
5672 	ctx->bs->unload_err = bserrno;
5673 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5674 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5675 
5676 	bs_sequence_finish(seq, bserrno);
5677 
5678 	bs_free(ctx->bs);
5679 	free(ctx);
5680 }
5681 
5682 static void
5683 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5684 {
5685 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5686 
5687 	bs_unload_finish(ctx, bserrno);
5688 }
5689 
5690 static void
5691 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5692 {
5693 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5694 
5695 	spdk_free(ctx->mask);
5696 
5697 	if (bserrno != 0) {
5698 		bs_unload_finish(ctx, bserrno);
5699 		return;
5700 	}
5701 
5702 	ctx->super->clean = 1;
5703 
5704 	bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx);
5705 }
5706 
5707 static void
5708 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5709 {
5710 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5711 
5712 	spdk_free(ctx->mask);
5713 	ctx->mask = NULL;
5714 
5715 	if (bserrno != 0) {
5716 		bs_unload_finish(ctx, bserrno);
5717 		return;
5718 	}
5719 
5720 	bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl);
5721 }
5722 
5723 static void
5724 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5725 {
5726 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5727 
5728 	spdk_free(ctx->mask);
5729 	ctx->mask = NULL;
5730 
5731 	if (bserrno != 0) {
5732 		bs_unload_finish(ctx, bserrno);
5733 		return;
5734 	}
5735 
5736 	bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl);
5737 }
5738 
5739 static void
5740 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5741 {
5742 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5743 	int rc;
5744 
5745 	if (bserrno != 0) {
5746 		bs_unload_finish(ctx, bserrno);
5747 		return;
5748 	}
5749 
5750 	rc = bs_super_validate(ctx->super, ctx->bs);
5751 	if (rc != 0) {
5752 		bs_unload_finish(ctx, rc);
5753 		return;
5754 	}
5755 
5756 	bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl);
5757 }
5758 
5759 void
5760 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
5761 {
5762 	struct spdk_bs_cpl	cpl;
5763 	struct spdk_bs_load_ctx *ctx;
5764 
5765 	SPDK_DEBUGLOG(blob, "Syncing blobstore\n");
5766 
5767 	/*
5768 	 * If external snapshot channels are being destroyed while the blobstore is unloaded, the
5769 	 * unload is deferred until after the channel destruction completes.
5770 	 */
5771 	if (bs->esnap_channels_unloading != 0) {
5772 		if (bs->esnap_unload_cb_fn != NULL) {
5773 			SPDK_ERRLOG("Blobstore unload in progress\n");
5774 			cb_fn(cb_arg, -EBUSY);
5775 			return;
5776 		}
5777 		SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32
5778 			      " esnap clones are unloading\n", bs->esnap_channels_unloading);
5779 		bs->esnap_unload_cb_fn = cb_fn;
5780 		bs->esnap_unload_cb_arg = cb_arg;
5781 		return;
5782 	}
5783 	if (bs->esnap_unload_cb_fn != NULL) {
5784 		SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n");
5785 		assert(bs->esnap_unload_cb_fn == cb_fn);
5786 		assert(bs->esnap_unload_cb_arg == cb_arg);
5787 		bs->esnap_unload_cb_fn = NULL;
5788 		bs->esnap_unload_cb_arg = NULL;
5789 	}
5790 
5791 	if (!RB_EMPTY(&bs->open_blobs)) {
5792 		SPDK_ERRLOG("Blobstore still has open blobs\n");
5793 		cb_fn(cb_arg, -EBUSY);
5794 		return;
5795 	}
5796 
5797 	ctx = calloc(1, sizeof(*ctx));
5798 	if (!ctx) {
5799 		cb_fn(cb_arg, -ENOMEM);
5800 		return;
5801 	}
5802 
5803 	ctx->bs = bs;
5804 
5805 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
5806 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5807 	if (!ctx->super) {
5808 		free(ctx);
5809 		cb_fn(cb_arg, -ENOMEM);
5810 		return;
5811 	}
5812 
5813 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5814 	cpl.u.bs_basic.cb_fn = cb_fn;
5815 	cpl.u.bs_basic.cb_arg = cb_arg;
5816 
5817 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5818 	if (!ctx->seq) {
5819 		spdk_free(ctx->super);
5820 		free(ctx);
5821 		cb_fn(cb_arg, -ENOMEM);
5822 		return;
5823 	}
5824 
5825 	/* Read super block */
5826 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
5827 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5828 			     bs_unload_read_super_cpl, ctx);
5829 }
5830 
5831 /* END spdk_bs_unload */
5832 
5833 /* START spdk_bs_set_super */
5834 
5835 struct spdk_bs_set_super_ctx {
5836 	struct spdk_blob_store		*bs;
5837 	struct spdk_bs_super_block	*super;
5838 };
5839 
5840 static void
5841 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5842 {
5843 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
5844 
5845 	if (bserrno != 0) {
5846 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
5847 	}
5848 
5849 	spdk_free(ctx->super);
5850 
5851 	bs_sequence_finish(seq, bserrno);
5852 
5853 	free(ctx);
5854 }
5855 
5856 static void
5857 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5858 {
5859 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
5860 	int rc;
5861 
5862 	if (bserrno != 0) {
5863 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
5864 		spdk_free(ctx->super);
5865 		bs_sequence_finish(seq, bserrno);
5866 		free(ctx);
5867 		return;
5868 	}
5869 
5870 	rc = bs_super_validate(ctx->super, ctx->bs);
5871 	if (rc != 0) {
5872 		SPDK_ERRLOG("Not a valid super block\n");
5873 		spdk_free(ctx->super);
5874 		bs_sequence_finish(seq, rc);
5875 		free(ctx);
5876 		return;
5877 	}
5878 
5879 	bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx);
5880 }
5881 
5882 void
5883 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
5884 		  spdk_bs_op_complete cb_fn, void *cb_arg)
5885 {
5886 	struct spdk_bs_cpl		cpl;
5887 	spdk_bs_sequence_t		*seq;
5888 	struct spdk_bs_set_super_ctx	*ctx;
5889 
5890 	SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n");
5891 
5892 	ctx = calloc(1, sizeof(*ctx));
5893 	if (!ctx) {
5894 		cb_fn(cb_arg, -ENOMEM);
5895 		return;
5896 	}
5897 
5898 	ctx->bs = bs;
5899 
5900 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
5901 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5902 	if (!ctx->super) {
5903 		free(ctx);
5904 		cb_fn(cb_arg, -ENOMEM);
5905 		return;
5906 	}
5907 
5908 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5909 	cpl.u.bs_basic.cb_fn = cb_fn;
5910 	cpl.u.bs_basic.cb_arg = cb_arg;
5911 
5912 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5913 	if (!seq) {
5914 		spdk_free(ctx->super);
5915 		free(ctx);
5916 		cb_fn(cb_arg, -ENOMEM);
5917 		return;
5918 	}
5919 
5920 	bs->super_blob = blobid;
5921 
5922 	/* Read super block */
5923 	bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0),
5924 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5925 			     bs_set_super_read_cpl, ctx);
5926 }
5927 
5928 /* END spdk_bs_set_super */
5929 
5930 void
5931 spdk_bs_get_super(struct spdk_blob_store *bs,
5932 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5933 {
5934 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
5935 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
5936 	} else {
5937 		cb_fn(cb_arg, bs->super_blob, 0);
5938 	}
5939 }
5940 
5941 uint64_t
5942 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
5943 {
5944 	return bs->cluster_sz;
5945 }
5946 
5947 uint64_t
5948 spdk_bs_get_page_size(struct spdk_blob_store *bs)
5949 {
5950 	return SPDK_BS_PAGE_SIZE;
5951 }
5952 
5953 uint64_t
5954 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
5955 {
5956 	return bs->io_unit_size;
5957 }
5958 
5959 uint64_t
5960 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
5961 {
5962 	return bs->num_free_clusters;
5963 }
5964 
5965 uint64_t
5966 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
5967 {
5968 	return bs->total_data_clusters;
5969 }
5970 
5971 static int
5972 bs_register_md_thread(struct spdk_blob_store *bs)
5973 {
5974 	bs->md_channel = spdk_get_io_channel(bs);
5975 	if (!bs->md_channel) {
5976 		SPDK_ERRLOG("Failed to get IO channel.\n");
5977 		return -1;
5978 	}
5979 
5980 	return 0;
5981 }
5982 
5983 static int
5984 bs_unregister_md_thread(struct spdk_blob_store *bs)
5985 {
5986 	spdk_put_io_channel(bs->md_channel);
5987 
5988 	return 0;
5989 }
5990 
5991 spdk_blob_id
5992 spdk_blob_get_id(struct spdk_blob *blob)
5993 {
5994 	assert(blob != NULL);
5995 
5996 	return blob->id;
5997 }
5998 
5999 uint64_t
6000 spdk_blob_get_num_pages(struct spdk_blob *blob)
6001 {
6002 	assert(blob != NULL);
6003 
6004 	return bs_cluster_to_page(blob->bs, blob->active.num_clusters);
6005 }
6006 
6007 uint64_t
6008 spdk_blob_get_num_io_units(struct spdk_blob *blob)
6009 {
6010 	assert(blob != NULL);
6011 
6012 	return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs);
6013 }
6014 
6015 uint64_t
6016 spdk_blob_get_num_clusters(struct spdk_blob *blob)
6017 {
6018 	assert(blob != NULL);
6019 
6020 	return blob->active.num_clusters;
6021 }
6022 
6023 static uint64_t
6024 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated)
6025 {
6026 	uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob);
6027 
6028 	while (offset < blob_io_unit_num) {
6029 		if (bs_io_unit_is_allocated(blob, offset) == is_allocated) {
6030 			return offset;
6031 		}
6032 
6033 		offset += bs_num_io_units_to_cluster_boundary(blob, offset);
6034 	}
6035 
6036 	return UINT64_MAX;
6037 }
6038 
6039 uint64_t
6040 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset)
6041 {
6042 	return blob_find_io_unit(blob, offset, true);
6043 }
6044 
6045 uint64_t
6046 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset)
6047 {
6048 	return blob_find_io_unit(blob, offset, false);
6049 }
6050 
6051 /* START spdk_bs_create_blob */
6052 
6053 static void
6054 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6055 {
6056 	struct spdk_blob *blob = cb_arg;
6057 	uint32_t page_idx = bs_blobid_to_page(blob->id);
6058 
6059 	if (bserrno != 0) {
6060 		spdk_spin_lock(&blob->bs->used_lock);
6061 		spdk_bit_array_clear(blob->bs->used_blobids, page_idx);
6062 		bs_release_md_page(blob->bs, page_idx);
6063 		spdk_spin_unlock(&blob->bs->used_lock);
6064 	}
6065 
6066 	blob_free(blob);
6067 
6068 	bs_sequence_finish(seq, bserrno);
6069 }
6070 
6071 static int
6072 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
6073 		bool internal)
6074 {
6075 	uint64_t i;
6076 	size_t value_len = 0;
6077 	int rc;
6078 	const void *value = NULL;
6079 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
6080 		return -EINVAL;
6081 	}
6082 	for (i = 0; i < xattrs->count; i++) {
6083 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
6084 		if (value == NULL || value_len == 0) {
6085 			return -EINVAL;
6086 		}
6087 		rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
6088 		if (rc < 0) {
6089 			return rc;
6090 		}
6091 	}
6092 	return 0;
6093 }
6094 
6095 static void
6096 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst)
6097 {
6098 #define FIELD_OK(field) \
6099         offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size
6100 
6101 #define SET_FIELD(field) \
6102         if (FIELD_OK(field)) { \
6103                 dst->field = src->field; \
6104         } \
6105 
6106 	SET_FIELD(num_clusters);
6107 	SET_FIELD(thin_provision);
6108 	SET_FIELD(clear_method);
6109 
6110 	if (FIELD_OK(xattrs)) {
6111 		memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs));
6112 	}
6113 
6114 	SET_FIELD(use_extent_table);
6115 	SET_FIELD(esnap_id);
6116 	SET_FIELD(esnap_id_len);
6117 
6118 	dst->opts_size = src->opts_size;
6119 
6120 	/* You should not remove this statement, but need to update the assert statement
6121 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
6122 	SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size");
6123 
6124 #undef FIELD_OK
6125 #undef SET_FIELD
6126 }
6127 
6128 static void
6129 bs_create_blob(struct spdk_blob_store *bs,
6130 	       const struct spdk_blob_opts *opts,
6131 	       const struct spdk_blob_xattr_opts *internal_xattrs,
6132 	       spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6133 {
6134 	struct spdk_blob	*blob;
6135 	uint32_t		page_idx;
6136 	struct spdk_bs_cpl	cpl;
6137 	struct spdk_blob_opts	opts_local;
6138 	struct spdk_blob_xattr_opts internal_xattrs_default;
6139 	spdk_bs_sequence_t	*seq;
6140 	spdk_blob_id		id;
6141 	int rc;
6142 
6143 	assert(spdk_get_thread() == bs->md_thread);
6144 
6145 	spdk_spin_lock(&bs->used_lock);
6146 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
6147 	if (page_idx == UINT32_MAX) {
6148 		spdk_spin_unlock(&bs->used_lock);
6149 		cb_fn(cb_arg, 0, -ENOMEM);
6150 		return;
6151 	}
6152 	spdk_bit_array_set(bs->used_blobids, page_idx);
6153 	bs_claim_md_page(bs, page_idx);
6154 	spdk_spin_unlock(&bs->used_lock);
6155 
6156 	id = bs_page_to_blobid(page_idx);
6157 
6158 	SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx);
6159 
6160 	spdk_blob_opts_init(&opts_local, sizeof(opts_local));
6161 	if (opts) {
6162 		blob_opts_copy(opts, &opts_local);
6163 	}
6164 
6165 	blob = blob_alloc(bs, id);
6166 	if (!blob) {
6167 		rc = -ENOMEM;
6168 		goto error;
6169 	}
6170 
6171 	blob->use_extent_table = opts_local.use_extent_table;
6172 	if (blob->use_extent_table) {
6173 		blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE;
6174 	}
6175 
6176 	if (!internal_xattrs) {
6177 		blob_xattrs_init(&internal_xattrs_default);
6178 		internal_xattrs = &internal_xattrs_default;
6179 	}
6180 
6181 	rc = blob_set_xattrs(blob, &opts_local.xattrs, false);
6182 	if (rc < 0) {
6183 		goto error;
6184 	}
6185 
6186 	rc = blob_set_xattrs(blob, internal_xattrs, true);
6187 	if (rc < 0) {
6188 		goto error;
6189 	}
6190 
6191 	if (opts_local.thin_provision) {
6192 		blob_set_thin_provision(blob);
6193 	}
6194 
6195 	blob_set_clear_method(blob, opts_local.clear_method);
6196 
6197 	if (opts_local.esnap_id != NULL) {
6198 		if (opts_local.esnap_id_len > UINT16_MAX) {
6199 			SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n",
6200 				    opts_local.esnap_id_len);
6201 			rc = -EINVAL;
6202 			goto error;
6203 
6204 		}
6205 		blob_set_thin_provision(blob);
6206 		blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
6207 		rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID,
6208 				    opts_local.esnap_id, opts_local.esnap_id_len, true);
6209 		if (rc != 0) {
6210 			goto error;
6211 		}
6212 	}
6213 
6214 	rc = blob_resize(blob, opts_local.num_clusters);
6215 	if (rc < 0) {
6216 		goto error;
6217 	}
6218 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6219 	cpl.u.blobid.cb_fn = cb_fn;
6220 	cpl.u.blobid.cb_arg = cb_arg;
6221 	cpl.u.blobid.blobid = blob->id;
6222 
6223 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
6224 	if (!seq) {
6225 		rc = -ENOMEM;
6226 		goto error;
6227 	}
6228 
6229 	blob_persist(seq, blob, bs_create_blob_cpl, blob);
6230 	return;
6231 
6232 error:
6233 	SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n",
6234 		    spdk_strerror(rc), opts_local.num_clusters);
6235 	if (blob != NULL) {
6236 		blob_free(blob);
6237 	}
6238 	spdk_spin_lock(&bs->used_lock);
6239 	spdk_bit_array_clear(bs->used_blobids, page_idx);
6240 	bs_release_md_page(bs, page_idx);
6241 	spdk_spin_unlock(&bs->used_lock);
6242 	cb_fn(cb_arg, 0, rc);
6243 }
6244 
6245 void
6246 spdk_bs_create_blob(struct spdk_blob_store *bs,
6247 		    spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6248 {
6249 	bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
6250 }
6251 
6252 void
6253 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
6254 			spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6255 {
6256 	bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
6257 }
6258 
6259 /* END spdk_bs_create_blob */
6260 
6261 /* START blob_cleanup */
6262 
6263 struct spdk_clone_snapshot_ctx {
6264 	struct spdk_bs_cpl      cpl;
6265 	int bserrno;
6266 	bool frozen;
6267 
6268 	struct spdk_io_channel *channel;
6269 
6270 	/* Current cluster for inflate operation */
6271 	uint64_t cluster;
6272 
6273 	/* For inflation force allocation of all unallocated clusters and remove
6274 	 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */
6275 	bool allocate_all;
6276 
6277 	struct {
6278 		spdk_blob_id id;
6279 		struct spdk_blob *blob;
6280 		bool md_ro;
6281 	} original;
6282 	struct {
6283 		spdk_blob_id id;
6284 		struct spdk_blob *blob;
6285 	} new;
6286 
6287 	/* xattrs specified for snapshot/clones only. They have no impact on
6288 	 * the original blobs xattrs. */
6289 	const struct spdk_blob_xattr_opts *xattrs;
6290 };
6291 
6292 static void
6293 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
6294 {
6295 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
6296 	struct spdk_bs_cpl *cpl = &ctx->cpl;
6297 
6298 	if (bserrno != 0) {
6299 		if (ctx->bserrno != 0) {
6300 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6301 		} else {
6302 			ctx->bserrno = bserrno;
6303 		}
6304 	}
6305 
6306 	switch (cpl->type) {
6307 	case SPDK_BS_CPL_TYPE_BLOBID:
6308 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
6309 		break;
6310 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
6311 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
6312 		break;
6313 	default:
6314 		SPDK_UNREACHABLE();
6315 		break;
6316 	}
6317 
6318 	free(ctx);
6319 }
6320 
6321 static void
6322 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
6323 {
6324 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6325 	struct spdk_blob *origblob = ctx->original.blob;
6326 
6327 	if (bserrno != 0) {
6328 		if (ctx->bserrno != 0) {
6329 			SPDK_ERRLOG("Unfreeze error %d\n", bserrno);
6330 		} else {
6331 			ctx->bserrno = bserrno;
6332 		}
6333 	}
6334 
6335 	ctx->original.id = origblob->id;
6336 	origblob->locked_operation_in_progress = false;
6337 
6338 	/* Revert md_ro to original state */
6339 	origblob->md_ro = ctx->original.md_ro;
6340 
6341 	spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx);
6342 }
6343 
6344 static void
6345 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
6346 {
6347 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6348 	struct spdk_blob *origblob = ctx->original.blob;
6349 
6350 	if (bserrno != 0) {
6351 		if (ctx->bserrno != 0) {
6352 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6353 		} else {
6354 			ctx->bserrno = bserrno;
6355 		}
6356 	}
6357 
6358 	if (ctx->frozen) {
6359 		/* Unfreeze any outstanding I/O */
6360 		blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx);
6361 	} else {
6362 		bs_snapshot_unfreeze_cpl(ctx, 0);
6363 	}
6364 
6365 }
6366 
6367 static void
6368 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno)
6369 {
6370 	struct spdk_blob *newblob = ctx->new.blob;
6371 
6372 	if (bserrno != 0) {
6373 		if (ctx->bserrno != 0) {
6374 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6375 		} else {
6376 			ctx->bserrno = bserrno;
6377 		}
6378 	}
6379 
6380 	ctx->new.id = newblob->id;
6381 	spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx);
6382 }
6383 
6384 /* END blob_cleanup */
6385 
6386 /* START spdk_bs_create_snapshot */
6387 
6388 static void
6389 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2)
6390 {
6391 	uint64_t *cluster_temp;
6392 	uint32_t *extent_page_temp;
6393 
6394 	cluster_temp = blob1->active.clusters;
6395 	blob1->active.clusters = blob2->active.clusters;
6396 	blob2->active.clusters = cluster_temp;
6397 
6398 	extent_page_temp = blob1->active.extent_pages;
6399 	blob1->active.extent_pages = blob2->active.extent_pages;
6400 	blob2->active.extent_pages = extent_page_temp;
6401 }
6402 
6403 /* Copies an internal xattr */
6404 static int
6405 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name)
6406 {
6407 	const void	*val = NULL;
6408 	size_t		len;
6409 	int		bserrno;
6410 
6411 	bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true);
6412 	if (bserrno != 0) {
6413 		SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name);
6414 		return bserrno;
6415 	}
6416 
6417 	bserrno = blob_set_xattr(toblob, name, val, len, true);
6418 	if (bserrno != 0) {
6419 		SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n",
6420 			    name, toblob->id);
6421 		return bserrno;
6422 	}
6423 	return 0;
6424 }
6425 
6426 static void
6427 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
6428 {
6429 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6430 	struct spdk_blob *origblob = ctx->original.blob;
6431 	struct spdk_blob *newblob = ctx->new.blob;
6432 
6433 	if (bserrno != 0) {
6434 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6435 		if (blob_is_esnap_clone(newblob)) {
6436 			bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID);
6437 			origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
6438 		}
6439 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6440 		return;
6441 	}
6442 
6443 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
6444 	bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
6445 	if (bserrno != 0) {
6446 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6447 		return;
6448 	}
6449 
6450 	bs_blob_list_add(ctx->original.blob);
6451 
6452 	spdk_blob_set_read_only(newblob);
6453 
6454 	/* sync snapshot metadata */
6455 	spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx);
6456 }
6457 
6458 static void
6459 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
6460 {
6461 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6462 	struct spdk_blob *origblob = ctx->original.blob;
6463 	struct spdk_blob *newblob = ctx->new.blob;
6464 
6465 	if (bserrno != 0) {
6466 		/* return cluster map back to original */
6467 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6468 
6469 		/* Newblob md sync failed. Valid clusters are only present in origblob.
6470 		 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred.
6471 		 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */
6472 		blob_set_thin_provision(newblob);
6473 		assert(spdk_mem_all_zero(newblob->active.clusters,
6474 					 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
6475 		assert(spdk_mem_all_zero(newblob->active.extent_pages,
6476 					 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
6477 
6478 		bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6479 		return;
6480 	}
6481 
6482 	/* Set internal xattr for snapshot id */
6483 	bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
6484 	if (bserrno != 0) {
6485 		/* return cluster map back to original */
6486 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6487 		blob_set_thin_provision(newblob);
6488 		bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6489 		return;
6490 	}
6491 
6492 	/* Create new back_bs_dev for snapshot */
6493 	origblob->back_bs_dev = bs_create_blob_bs_dev(newblob);
6494 	if (origblob->back_bs_dev == NULL) {
6495 		/* return cluster map back to original */
6496 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6497 		blob_set_thin_provision(newblob);
6498 		bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
6499 		return;
6500 	}
6501 
6502 	/* Remove the xattr that references an external snapshot */
6503 	if (blob_is_esnap_clone(origblob)) {
6504 		origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT;
6505 		bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true);
6506 		if (bserrno != 0) {
6507 			if (bserrno == -ENOENT) {
6508 				SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID
6509 					    " xattr to remove\n", origblob->id);
6510 				assert(false);
6511 			} else {
6512 				/* return cluster map back to original */
6513 				bs_snapshot_swap_cluster_maps(newblob, origblob);
6514 				blob_set_thin_provision(newblob);
6515 				bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6516 				return;
6517 			}
6518 		}
6519 	}
6520 
6521 	bs_blob_list_remove(origblob);
6522 	origblob->parent_id = newblob->id;
6523 	/* set clone blob as thin provisioned */
6524 	blob_set_thin_provision(origblob);
6525 
6526 	bs_blob_list_add(newblob);
6527 
6528 	/* sync clone metadata */
6529 	spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx);
6530 }
6531 
6532 static void
6533 bs_snapshot_freeze_cpl(void *cb_arg, int rc)
6534 {
6535 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6536 	struct spdk_blob *origblob = ctx->original.blob;
6537 	struct spdk_blob *newblob = ctx->new.blob;
6538 	int bserrno;
6539 
6540 	if (rc != 0) {
6541 		bs_clone_snapshot_newblob_cleanup(ctx, rc);
6542 		return;
6543 	}
6544 
6545 	ctx->frozen = true;
6546 
6547 	if (blob_is_esnap_clone(origblob)) {
6548 		/* Clean up any channels associated with the original blob id because future IO will
6549 		 * perform IO using the snapshot blob_id.
6550 		 */
6551 		blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL);
6552 	}
6553 	if (newblob->back_bs_dev) {
6554 		blob_back_bs_destroy(newblob);
6555 	}
6556 	/* set new back_bs_dev for snapshot */
6557 	newblob->back_bs_dev = origblob->back_bs_dev;
6558 	/* Set invalid flags from origblob */
6559 	newblob->invalid_flags = origblob->invalid_flags;
6560 
6561 	/* inherit parent from original blob if set */
6562 	newblob->parent_id = origblob->parent_id;
6563 	switch (origblob->parent_id) {
6564 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
6565 		bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID);
6566 		if (bserrno != 0) {
6567 			bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6568 			return;
6569 		}
6570 		break;
6571 	case SPDK_BLOBID_INVALID:
6572 		break;
6573 	default:
6574 		/* Set internal xattr for snapshot id */
6575 		bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT,
6576 					 &origblob->parent_id, sizeof(spdk_blob_id), true);
6577 		if (bserrno != 0) {
6578 			bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6579 			return;
6580 		}
6581 	}
6582 
6583 	/* swap cluster maps */
6584 	bs_snapshot_swap_cluster_maps(newblob, origblob);
6585 
6586 	/* Set the clear method on the new blob to match the original. */
6587 	blob_set_clear_method(newblob, origblob->clear_method);
6588 
6589 	/* sync snapshot metadata */
6590 	spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx);
6591 }
6592 
6593 static void
6594 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6595 {
6596 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6597 	struct spdk_blob *origblob = ctx->original.blob;
6598 	struct spdk_blob *newblob = _blob;
6599 
6600 	if (bserrno != 0) {
6601 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6602 		return;
6603 	}
6604 
6605 	ctx->new.blob = newblob;
6606 	assert(spdk_blob_is_thin_provisioned(newblob));
6607 	assert(spdk_mem_all_zero(newblob->active.clusters,
6608 				 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
6609 	assert(spdk_mem_all_zero(newblob->active.extent_pages,
6610 				 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
6611 
6612 	blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx);
6613 }
6614 
6615 static void
6616 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
6617 {
6618 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6619 	struct spdk_blob *origblob = ctx->original.blob;
6620 
6621 	if (bserrno != 0) {
6622 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6623 		return;
6624 	}
6625 
6626 	ctx->new.id = blobid;
6627 	ctx->cpl.u.blobid.blobid = blobid;
6628 
6629 	spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx);
6630 }
6631 
6632 
6633 static void
6634 bs_xattr_snapshot(void *arg, const char *name,
6635 		  const void **value, size_t *value_len)
6636 {
6637 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
6638 
6639 	struct spdk_blob *blob = (struct spdk_blob *)arg;
6640 	*value = &blob->id;
6641 	*value_len = sizeof(blob->id);
6642 }
6643 
6644 static void
6645 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6646 {
6647 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6648 	struct spdk_blob_opts opts;
6649 	struct spdk_blob_xattr_opts internal_xattrs;
6650 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
6651 
6652 	if (bserrno != 0) {
6653 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6654 		return;
6655 	}
6656 
6657 	ctx->original.blob = _blob;
6658 
6659 	if (_blob->data_ro || _blob->md_ro) {
6660 		SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%"
6661 			      PRIx64 "\n", _blob->id);
6662 		ctx->bserrno = -EINVAL;
6663 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6664 		return;
6665 	}
6666 
6667 	if (_blob->locked_operation_in_progress) {
6668 		SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n");
6669 		ctx->bserrno = -EBUSY;
6670 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6671 		return;
6672 	}
6673 
6674 	_blob->locked_operation_in_progress = true;
6675 
6676 	spdk_blob_opts_init(&opts, sizeof(opts));
6677 	blob_xattrs_init(&internal_xattrs);
6678 
6679 	/* Change the size of new blob to the same as in original blob,
6680 	 * but do not allocate clusters */
6681 	opts.thin_provision = true;
6682 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
6683 	opts.use_extent_table = _blob->use_extent_table;
6684 
6685 	/* If there are any xattrs specified for snapshot, set them now */
6686 	if (ctx->xattrs) {
6687 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
6688 	}
6689 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
6690 	internal_xattrs.count = 1;
6691 	internal_xattrs.ctx = _blob;
6692 	internal_xattrs.names = xattrs_names;
6693 	internal_xattrs.get_value = bs_xattr_snapshot;
6694 
6695 	bs_create_blob(_blob->bs, &opts, &internal_xattrs,
6696 		       bs_snapshot_newblob_create_cpl, ctx);
6697 }
6698 
6699 void
6700 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
6701 			const struct spdk_blob_xattr_opts *snapshot_xattrs,
6702 			spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6703 {
6704 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
6705 
6706 	if (!ctx) {
6707 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
6708 		return;
6709 	}
6710 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6711 	ctx->cpl.u.blobid.cb_fn = cb_fn;
6712 	ctx->cpl.u.blobid.cb_arg = cb_arg;
6713 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
6714 	ctx->bserrno = 0;
6715 	ctx->frozen = false;
6716 	ctx->original.id = blobid;
6717 	ctx->xattrs = snapshot_xattrs;
6718 
6719 	spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx);
6720 }
6721 /* END spdk_bs_create_snapshot */
6722 
6723 /* START spdk_bs_create_clone */
6724 
6725 static void
6726 bs_xattr_clone(void *arg, const char *name,
6727 	       const void **value, size_t *value_len)
6728 {
6729 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
6730 
6731 	struct spdk_blob *blob = (struct spdk_blob *)arg;
6732 	*value = &blob->id;
6733 	*value_len = sizeof(blob->id);
6734 }
6735 
6736 static void
6737 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6738 {
6739 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6740 	struct spdk_blob *clone = _blob;
6741 
6742 	ctx->new.blob = clone;
6743 	bs_blob_list_add(clone);
6744 
6745 	spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx);
6746 }
6747 
6748 static void
6749 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
6750 {
6751 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6752 
6753 	ctx->cpl.u.blobid.blobid = blobid;
6754 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx);
6755 }
6756 
6757 static void
6758 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6759 {
6760 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6761 	struct spdk_blob_opts		opts;
6762 	struct spdk_blob_xattr_opts internal_xattrs;
6763 	char *xattr_names[] = { BLOB_SNAPSHOT };
6764 
6765 	if (bserrno != 0) {
6766 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6767 		return;
6768 	}
6769 
6770 	ctx->original.blob = _blob;
6771 	ctx->original.md_ro = _blob->md_ro;
6772 
6773 	if (!_blob->data_ro || !_blob->md_ro) {
6774 		SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n");
6775 		ctx->bserrno = -EINVAL;
6776 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6777 		return;
6778 	}
6779 
6780 	if (_blob->locked_operation_in_progress) {
6781 		SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n");
6782 		ctx->bserrno = -EBUSY;
6783 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6784 		return;
6785 	}
6786 
6787 	_blob->locked_operation_in_progress = true;
6788 
6789 	spdk_blob_opts_init(&opts, sizeof(opts));
6790 	blob_xattrs_init(&internal_xattrs);
6791 
6792 	opts.thin_provision = true;
6793 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
6794 	opts.use_extent_table = _blob->use_extent_table;
6795 	if (ctx->xattrs) {
6796 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
6797 	}
6798 
6799 	/* Set internal xattr BLOB_SNAPSHOT */
6800 	internal_xattrs.count = 1;
6801 	internal_xattrs.ctx = _blob;
6802 	internal_xattrs.names = xattr_names;
6803 	internal_xattrs.get_value = bs_xattr_clone;
6804 
6805 	bs_create_blob(_blob->bs, &opts, &internal_xattrs,
6806 		       bs_clone_newblob_create_cpl, ctx);
6807 }
6808 
6809 void
6810 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
6811 		     const struct spdk_blob_xattr_opts *clone_xattrs,
6812 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6813 {
6814 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
6815 
6816 	if (!ctx) {
6817 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
6818 		return;
6819 	}
6820 
6821 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6822 	ctx->cpl.u.blobid.cb_fn = cb_fn;
6823 	ctx->cpl.u.blobid.cb_arg = cb_arg;
6824 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
6825 	ctx->bserrno = 0;
6826 	ctx->xattrs = clone_xattrs;
6827 	ctx->original.id = blobid;
6828 
6829 	spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx);
6830 }
6831 
6832 /* END spdk_bs_create_clone */
6833 
6834 /* START spdk_bs_inflate_blob */
6835 
6836 static void
6837 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno)
6838 {
6839 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6840 	struct spdk_blob *_blob = ctx->original.blob;
6841 
6842 	if (bserrno != 0) {
6843 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6844 		return;
6845 	}
6846 
6847 	/* Temporarily override md_ro flag for MD modification */
6848 	_blob->md_ro = false;
6849 
6850 	bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true);
6851 	if (bserrno != 0) {
6852 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6853 		return;
6854 	}
6855 
6856 	assert(_parent != NULL);
6857 
6858 	bs_blob_list_remove(_blob);
6859 	_blob->parent_id = _parent->id;
6860 
6861 	blob_back_bs_destroy(_blob);
6862 	_blob->back_bs_dev = bs_create_blob_bs_dev(_parent);
6863 	bs_blob_list_add(_blob);
6864 
6865 	spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx);
6866 }
6867 
6868 static void
6869 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx)
6870 {
6871 	struct spdk_blob *_blob = ctx->original.blob;
6872 	struct spdk_blob *_parent;
6873 
6874 	if (ctx->allocate_all) {
6875 		/* remove thin provisioning */
6876 		bs_blob_list_remove(_blob);
6877 		if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
6878 			blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true);
6879 			_blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT;
6880 		} else {
6881 			blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
6882 		}
6883 		_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
6884 		blob_back_bs_destroy(_blob);
6885 		_blob->parent_id = SPDK_BLOBID_INVALID;
6886 	} else {
6887 		/* For now, esnap clones always have allocate_all set. */
6888 		assert(!blob_is_esnap_clone(_blob));
6889 
6890 		_parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob;
6891 		if (_parent->parent_id != SPDK_BLOBID_INVALID) {
6892 			/* We must change the parent of the inflated blob */
6893 			spdk_bs_open_blob(_blob->bs, _parent->parent_id,
6894 					  bs_inflate_blob_set_parent_cpl, ctx);
6895 			return;
6896 		}
6897 
6898 		bs_blob_list_remove(_blob);
6899 		_blob->parent_id = SPDK_BLOBID_INVALID;
6900 		blob_back_bs_destroy(_blob);
6901 		_blob->back_bs_dev = bs_create_zeroes_dev();
6902 	}
6903 
6904 	/* Temporarily override md_ro flag for MD modification */
6905 	_blob->md_ro = false;
6906 	blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
6907 	_blob->state = SPDK_BLOB_STATE_DIRTY;
6908 
6909 	spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx);
6910 }
6911 
6912 /* Check if cluster needs allocation */
6913 static inline bool
6914 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all)
6915 {
6916 	struct spdk_blob_bs_dev *b;
6917 
6918 	assert(blob != NULL);
6919 
6920 	if (blob->active.clusters[cluster] != 0) {
6921 		/* Cluster is already allocated */
6922 		return false;
6923 	}
6924 
6925 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
6926 		/* Blob have no parent blob */
6927 		return allocate_all;
6928 	}
6929 
6930 	if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
6931 		return true;
6932 	}
6933 
6934 	b = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
6935 	return (allocate_all || b->blob->active.clusters[cluster] != 0);
6936 }
6937 
6938 static void
6939 bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
6940 {
6941 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6942 	struct spdk_blob *_blob = ctx->original.blob;
6943 	struct spdk_bs_cpl cpl;
6944 	spdk_bs_user_op_t *op;
6945 	uint64_t offset;
6946 
6947 	if (bserrno != 0) {
6948 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6949 		return;
6950 	}
6951 
6952 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
6953 		if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) {
6954 			break;
6955 		}
6956 	}
6957 
6958 	if (ctx->cluster < _blob->active.num_clusters) {
6959 		offset = bs_cluster_to_lba(_blob->bs, ctx->cluster);
6960 
6961 		/* We may safely increment a cluster before copying */
6962 		ctx->cluster++;
6963 
6964 		/* Use a dummy 0B read as a context for cluster copy */
6965 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6966 		cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next;
6967 		cpl.u.blob_basic.cb_arg = ctx;
6968 
6969 		op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob,
6970 				      NULL, 0, offset, 0);
6971 		if (!op) {
6972 			bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM);
6973 			return;
6974 		}
6975 
6976 		bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op);
6977 	} else {
6978 		bs_inflate_blob_done(ctx);
6979 	}
6980 }
6981 
6982 static void
6983 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6984 {
6985 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6986 	uint64_t clusters_needed;
6987 	uint64_t i;
6988 
6989 	if (bserrno != 0) {
6990 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6991 		return;
6992 	}
6993 
6994 	ctx->original.blob = _blob;
6995 	ctx->original.md_ro = _blob->md_ro;
6996 
6997 	if (_blob->locked_operation_in_progress) {
6998 		SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n");
6999 		ctx->bserrno = -EBUSY;
7000 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
7001 		return;
7002 	}
7003 
7004 	_blob->locked_operation_in_progress = true;
7005 
7006 	switch (_blob->parent_id) {
7007 	case SPDK_BLOBID_INVALID:
7008 		if (!ctx->allocate_all) {
7009 			/* This blob has no parent, so we cannot decouple it. */
7010 			SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n");
7011 			bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
7012 			return;
7013 		}
7014 		break;
7015 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
7016 		/*
7017 		 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which
7018 		 * clusters require allocation. Until there is a blobstore consumer that
7019 		 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not
7020 		 * worth the effort.
7021 		 */
7022 		ctx->allocate_all = true;
7023 		break;
7024 	default:
7025 		break;
7026 	}
7027 
7028 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
7029 		/* This is not thin provisioned blob. No need to inflate. */
7030 		bs_clone_snapshot_origblob_cleanup(ctx, 0);
7031 		return;
7032 	}
7033 
7034 	/* Do two passes - one to verify that we can obtain enough clusters
7035 	 * and another to actually claim them.
7036 	 */
7037 	clusters_needed = 0;
7038 	for (i = 0; i < _blob->active.num_clusters; i++) {
7039 		if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) {
7040 			clusters_needed++;
7041 		}
7042 	}
7043 
7044 	if (clusters_needed > _blob->bs->num_free_clusters) {
7045 		/* Not enough free clusters. Cannot satisfy the request. */
7046 		bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
7047 		return;
7048 	}
7049 
7050 	ctx->cluster = 0;
7051 	bs_inflate_blob_touch_next(ctx, 0);
7052 }
7053 
7054 static void
7055 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7056 		spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg)
7057 {
7058 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
7059 
7060 	if (!ctx) {
7061 		cb_fn(cb_arg, -ENOMEM);
7062 		return;
7063 	}
7064 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7065 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
7066 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
7067 	ctx->bserrno = 0;
7068 	ctx->original.id = blobid;
7069 	ctx->channel = channel;
7070 	ctx->allocate_all = allocate_all;
7071 
7072 	spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx);
7073 }
7074 
7075 void
7076 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7077 		     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
7078 {
7079 	bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg);
7080 }
7081 
7082 void
7083 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7084 			     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
7085 {
7086 	bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg);
7087 }
7088 /* END spdk_bs_inflate_blob */
7089 
7090 /* START spdk_blob_resize */
7091 struct spdk_bs_resize_ctx {
7092 	spdk_blob_op_complete cb_fn;
7093 	void *cb_arg;
7094 	struct spdk_blob *blob;
7095 	uint64_t sz;
7096 	int rc;
7097 };
7098 
7099 static void
7100 bs_resize_unfreeze_cpl(void *cb_arg, int rc)
7101 {
7102 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
7103 
7104 	if (rc != 0) {
7105 		SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc);
7106 	}
7107 
7108 	if (ctx->rc != 0) {
7109 		SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc);
7110 		rc = ctx->rc;
7111 	}
7112 
7113 	ctx->blob->locked_operation_in_progress = false;
7114 
7115 	ctx->cb_fn(ctx->cb_arg, rc);
7116 	free(ctx);
7117 }
7118 
7119 static void
7120 bs_resize_freeze_cpl(void *cb_arg, int rc)
7121 {
7122 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
7123 
7124 	if (rc != 0) {
7125 		ctx->blob->locked_operation_in_progress = false;
7126 		ctx->cb_fn(ctx->cb_arg, rc);
7127 		free(ctx);
7128 		return;
7129 	}
7130 
7131 	ctx->rc = blob_resize(ctx->blob, ctx->sz);
7132 
7133 	blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx);
7134 }
7135 
7136 void
7137 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
7138 {
7139 	struct spdk_bs_resize_ctx *ctx;
7140 
7141 	blob_verify_md_op(blob);
7142 
7143 	SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz);
7144 
7145 	if (blob->md_ro) {
7146 		cb_fn(cb_arg, -EPERM);
7147 		return;
7148 	}
7149 
7150 	if (sz == blob->active.num_clusters) {
7151 		cb_fn(cb_arg, 0);
7152 		return;
7153 	}
7154 
7155 	if (blob->locked_operation_in_progress) {
7156 		cb_fn(cb_arg, -EBUSY);
7157 		return;
7158 	}
7159 
7160 	ctx = calloc(1, sizeof(*ctx));
7161 	if (!ctx) {
7162 		cb_fn(cb_arg, -ENOMEM);
7163 		return;
7164 	}
7165 
7166 	blob->locked_operation_in_progress = true;
7167 	ctx->cb_fn = cb_fn;
7168 	ctx->cb_arg = cb_arg;
7169 	ctx->blob = blob;
7170 	ctx->sz = sz;
7171 	blob_freeze_io(blob, bs_resize_freeze_cpl, ctx);
7172 }
7173 
7174 /* END spdk_blob_resize */
7175 
7176 
7177 /* START spdk_bs_delete_blob */
7178 
7179 static void
7180 bs_delete_close_cpl(void *cb_arg, int bserrno)
7181 {
7182 	spdk_bs_sequence_t *seq = cb_arg;
7183 
7184 	bs_sequence_finish(seq, bserrno);
7185 }
7186 
7187 static void
7188 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
7189 {
7190 	struct spdk_blob *blob = cb_arg;
7191 
7192 	if (bserrno != 0) {
7193 		/*
7194 		 * We already removed this blob from the blobstore tailq, so
7195 		 *  we need to free it here since this is the last reference
7196 		 *  to it.
7197 		 */
7198 		blob_free(blob);
7199 		bs_delete_close_cpl(seq, bserrno);
7200 		return;
7201 	}
7202 
7203 	/*
7204 	 * This will immediately decrement the ref_count and call
7205 	 *  the completion routine since the metadata state is clean.
7206 	 *  By calling spdk_blob_close, we reduce the number of call
7207 	 *  points into code that touches the blob->open_ref count
7208 	 *  and the blobstore's blob list.
7209 	 */
7210 	spdk_blob_close(blob, bs_delete_close_cpl, seq);
7211 }
7212 
7213 struct delete_snapshot_ctx {
7214 	struct spdk_blob_list *parent_snapshot_entry;
7215 	struct spdk_blob *snapshot;
7216 	struct spdk_blob_md_page *page;
7217 	bool snapshot_md_ro;
7218 	struct spdk_blob *clone;
7219 	bool clone_md_ro;
7220 	spdk_blob_op_with_handle_complete cb_fn;
7221 	void *cb_arg;
7222 	int bserrno;
7223 	uint32_t next_extent_page;
7224 };
7225 
7226 static void
7227 delete_blob_cleanup_finish(void *cb_arg, int bserrno)
7228 {
7229 	struct delete_snapshot_ctx *ctx = cb_arg;
7230 
7231 	if (bserrno != 0) {
7232 		SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno);
7233 	}
7234 
7235 	assert(ctx != NULL);
7236 
7237 	if (bserrno != 0 && ctx->bserrno == 0) {
7238 		ctx->bserrno = bserrno;
7239 	}
7240 
7241 	ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno);
7242 	spdk_free(ctx->page);
7243 	free(ctx);
7244 }
7245 
7246 static void
7247 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno)
7248 {
7249 	struct delete_snapshot_ctx *ctx = cb_arg;
7250 
7251 	if (bserrno != 0) {
7252 		ctx->bserrno = bserrno;
7253 		SPDK_ERRLOG("Clone cleanup error %d\n", bserrno);
7254 	}
7255 
7256 	if (ctx->bserrno != 0) {
7257 		assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL);
7258 		RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot);
7259 		spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id);
7260 	}
7261 
7262 	ctx->snapshot->locked_operation_in_progress = false;
7263 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
7264 
7265 	spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx);
7266 }
7267 
7268 static void
7269 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno)
7270 {
7271 	struct delete_snapshot_ctx *ctx = cb_arg;
7272 
7273 	ctx->clone->locked_operation_in_progress = false;
7274 	ctx->clone->md_ro = ctx->clone_md_ro;
7275 
7276 	spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx);
7277 }
7278 
7279 static void
7280 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
7281 {
7282 	struct delete_snapshot_ctx *ctx = cb_arg;
7283 
7284 	if (bserrno) {
7285 		ctx->bserrno = bserrno;
7286 		delete_snapshot_cleanup_clone(ctx, 0);
7287 		return;
7288 	}
7289 
7290 	ctx->clone->locked_operation_in_progress = false;
7291 	spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx);
7292 }
7293 
7294 static void
7295 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno)
7296 {
7297 	struct delete_snapshot_ctx *ctx = cb_arg;
7298 	struct spdk_blob_list *parent_snapshot_entry = NULL;
7299 	struct spdk_blob_list *snapshot_entry = NULL;
7300 	struct spdk_blob_list *clone_entry = NULL;
7301 	struct spdk_blob_list *snapshot_clone_entry = NULL;
7302 
7303 	if (bserrno) {
7304 		SPDK_ERRLOG("Failed to sync MD on blob\n");
7305 		ctx->bserrno = bserrno;
7306 		delete_snapshot_cleanup_clone(ctx, 0);
7307 		return;
7308 	}
7309 
7310 	/* Get snapshot entry for the snapshot we want to remove */
7311 	snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id);
7312 
7313 	assert(snapshot_entry != NULL);
7314 
7315 	/* Remove clone entry in this snapshot (at this point there can be only one clone) */
7316 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7317 	assert(clone_entry != NULL);
7318 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
7319 	snapshot_entry->clone_count--;
7320 	assert(TAILQ_EMPTY(&snapshot_entry->clones));
7321 
7322 	switch (ctx->snapshot->parent_id) {
7323 	case SPDK_BLOBID_INVALID:
7324 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
7325 		/* No parent snapshot - just remove clone entry */
7326 		free(clone_entry);
7327 		break;
7328 	default:
7329 		/* This snapshot is at the same time a clone of another snapshot - we need to
7330 		 * update parent snapshot (remove current clone, add new one inherited from
7331 		 * the snapshot that is being removed) */
7332 
7333 		/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
7334 		 * snapshot that we are removing */
7335 		blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry,
7336 						    &snapshot_clone_entry);
7337 
7338 		/* Switch clone entry in parent snapshot */
7339 		TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link);
7340 		TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link);
7341 		free(snapshot_clone_entry);
7342 	}
7343 
7344 	/* Restore md_ro flags */
7345 	ctx->clone->md_ro = ctx->clone_md_ro;
7346 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
7347 
7348 	blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx);
7349 }
7350 
7351 static void
7352 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
7353 {
7354 	struct delete_snapshot_ctx *ctx = cb_arg;
7355 	uint64_t i;
7356 
7357 	ctx->snapshot->md_ro = false;
7358 
7359 	if (bserrno) {
7360 		SPDK_ERRLOG("Failed to sync MD on clone\n");
7361 		ctx->bserrno = bserrno;
7362 
7363 		/* Restore snapshot to previous state */
7364 		bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
7365 		if (bserrno != 0) {
7366 			delete_snapshot_cleanup_clone(ctx, bserrno);
7367 			return;
7368 		}
7369 
7370 		spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx);
7371 		return;
7372 	}
7373 
7374 	/* Clear cluster map entries for snapshot */
7375 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
7376 		if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
7377 			ctx->snapshot->active.clusters[i] = 0;
7378 		}
7379 	}
7380 	for (i = 0; i < ctx->snapshot->active.num_extent_pages &&
7381 	     i < ctx->clone->active.num_extent_pages; i++) {
7382 		if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) {
7383 			ctx->snapshot->active.extent_pages[i] = 0;
7384 		}
7385 	}
7386 
7387 	blob_set_thin_provision(ctx->snapshot);
7388 	ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY;
7389 
7390 	if (ctx->parent_snapshot_entry != NULL) {
7391 		ctx->snapshot->back_bs_dev = NULL;
7392 	}
7393 
7394 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx);
7395 }
7396 
7397 static void
7398 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx)
7399 {
7400 	int bserrno;
7401 
7402 	/* Delete old backing bs_dev from clone (related to snapshot that will be removed) */
7403 	blob_back_bs_destroy(ctx->clone);
7404 
7405 	/* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */
7406 	if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
7407 		bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot,
7408 						 BLOB_EXTERNAL_SNAPSHOT_ID);
7409 		if (bserrno != 0) {
7410 			ctx->bserrno = bserrno;
7411 
7412 			/* Restore snapshot to previous state */
7413 			bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
7414 			if (bserrno != 0) {
7415 				delete_snapshot_cleanup_clone(ctx, bserrno);
7416 				return;
7417 			}
7418 
7419 			spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx);
7420 			return;
7421 		}
7422 		ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT;
7423 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
7424 		/* Do not delete the external snapshot along with this snapshot */
7425 		ctx->snapshot->back_bs_dev = NULL;
7426 		ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
7427 	} else if (ctx->parent_snapshot_entry != NULL) {
7428 		/* ...to parent snapshot */
7429 		ctx->clone->parent_id = ctx->parent_snapshot_entry->id;
7430 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
7431 		blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id,
7432 			       sizeof(spdk_blob_id),
7433 			       true);
7434 	} else {
7435 		/* ...to blobid invalid and zeroes dev */
7436 		ctx->clone->parent_id = SPDK_BLOBID_INVALID;
7437 		ctx->clone->back_bs_dev = bs_create_zeroes_dev();
7438 		blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true);
7439 	}
7440 
7441 	spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx);
7442 }
7443 
7444 static void
7445 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno)
7446 {
7447 	struct delete_snapshot_ctx *ctx = cb_arg;
7448 	uint32_t *extent_page;
7449 	uint64_t i;
7450 
7451 	for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages &&
7452 	     i < ctx->clone->active.num_extent_pages; i++) {
7453 		if (ctx->snapshot->active.extent_pages[i] == 0) {
7454 			/* No extent page to use from snapshot */
7455 			continue;
7456 		}
7457 
7458 		extent_page = &ctx->clone->active.extent_pages[i];
7459 		if (*extent_page == 0) {
7460 			/* Copy extent page from snapshot when clone did not have a matching one */
7461 			*extent_page = ctx->snapshot->active.extent_pages[i];
7462 			continue;
7463 		}
7464 
7465 		/* Clone and snapshot both contain partially filled matching extent pages.
7466 		 * Update the clone extent page in place with cluster map containing the mix of both. */
7467 		ctx->next_extent_page = i + 1;
7468 		memset(ctx->page, 0, SPDK_BS_PAGE_SIZE);
7469 
7470 		blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page,
7471 				       delete_snapshot_update_extent_pages, ctx);
7472 		return;
7473 	}
7474 	delete_snapshot_update_extent_pages_cpl(ctx);
7475 }
7476 
7477 static void
7478 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
7479 {
7480 	struct delete_snapshot_ctx *ctx = cb_arg;
7481 	uint64_t i;
7482 
7483 	/* Temporarily override md_ro flag for clone for MD modification */
7484 	ctx->clone_md_ro = ctx->clone->md_ro;
7485 	ctx->clone->md_ro = false;
7486 
7487 	if (bserrno) {
7488 		SPDK_ERRLOG("Failed to sync MD with xattr on blob\n");
7489 		ctx->bserrno = bserrno;
7490 		delete_snapshot_cleanup_clone(ctx, 0);
7491 		return;
7492 	}
7493 
7494 	/* Copy snapshot map to clone map (only unallocated clusters in clone) */
7495 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
7496 		if (ctx->clone->active.clusters[i] == 0) {
7497 			ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
7498 		}
7499 	}
7500 	ctx->next_extent_page = 0;
7501 	delete_snapshot_update_extent_pages(ctx, 0);
7502 }
7503 
7504 static void
7505 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno)
7506 {
7507 	struct delete_snapshot_ctx *ctx = cb_arg;
7508 
7509 	if (bserrno != 0) {
7510 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n",
7511 			    blob->id, bserrno);
7512 		/* That error should not stop us from syncing metadata. */
7513 	}
7514 
7515 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx);
7516 }
7517 
7518 static void
7519 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno)
7520 {
7521 	struct delete_snapshot_ctx *ctx = cb_arg;
7522 
7523 	if (bserrno) {
7524 		SPDK_ERRLOG("Failed to freeze I/O on clone\n");
7525 		ctx->bserrno = bserrno;
7526 		delete_snapshot_cleanup_clone(ctx, 0);
7527 		return;
7528 	}
7529 
7530 	/* Temporarily override md_ro flag for snapshot for MD modification */
7531 	ctx->snapshot_md_ro = ctx->snapshot->md_ro;
7532 	ctx->snapshot->md_ro = false;
7533 
7534 	/* Mark blob as pending for removal for power failure safety, use clone id for recovery */
7535 	ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id,
7536 				      sizeof(spdk_blob_id), true);
7537 	if (ctx->bserrno != 0) {
7538 		delete_snapshot_cleanup_clone(ctx, 0);
7539 		return;
7540 	}
7541 
7542 	if (blob_is_esnap_clone(ctx->snapshot)) {
7543 		blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false,
7544 						   delete_snapshot_esnap_channels_destroyed_cb,
7545 						   ctx);
7546 		return;
7547 	}
7548 
7549 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx);
7550 }
7551 
7552 static void
7553 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno)
7554 {
7555 	struct delete_snapshot_ctx *ctx = cb_arg;
7556 
7557 	if (bserrno) {
7558 		SPDK_ERRLOG("Failed to open clone\n");
7559 		ctx->bserrno = bserrno;
7560 		delete_snapshot_cleanup_snapshot(ctx, 0);
7561 		return;
7562 	}
7563 
7564 	ctx->clone = clone;
7565 
7566 	if (clone->locked_operation_in_progress) {
7567 		SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n");
7568 		ctx->bserrno = -EBUSY;
7569 		spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx);
7570 		return;
7571 	}
7572 
7573 	clone->locked_operation_in_progress = true;
7574 
7575 	blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx);
7576 }
7577 
7578 static void
7579 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx)
7580 {
7581 	struct spdk_blob_list *snapshot_entry = NULL;
7582 	struct spdk_blob_list *clone_entry = NULL;
7583 	struct spdk_blob_list *snapshot_clone_entry = NULL;
7584 
7585 	/* Get snapshot entry for the snapshot we want to remove */
7586 	snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id);
7587 
7588 	assert(snapshot_entry != NULL);
7589 
7590 	/* Get clone of the snapshot (at this point there can be only one clone) */
7591 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7592 	assert(snapshot_entry->clone_count == 1);
7593 	assert(clone_entry != NULL);
7594 
7595 	/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
7596 	 * snapshot that we are removing */
7597 	blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry,
7598 					    &snapshot_clone_entry);
7599 
7600 	spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx);
7601 }
7602 
7603 static void
7604 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
7605 {
7606 	spdk_bs_sequence_t *seq = cb_arg;
7607 	struct spdk_blob_list *snapshot_entry = NULL;
7608 	uint32_t page_num;
7609 
7610 	if (bserrno) {
7611 		SPDK_ERRLOG("Failed to remove blob\n");
7612 		bs_sequence_finish(seq, bserrno);
7613 		return;
7614 	}
7615 
7616 	/* Remove snapshot from the list */
7617 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
7618 	if (snapshot_entry != NULL) {
7619 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
7620 		free(snapshot_entry);
7621 	}
7622 
7623 	page_num = bs_blobid_to_page(blob->id);
7624 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
7625 	blob->state = SPDK_BLOB_STATE_DIRTY;
7626 	blob->active.num_pages = 0;
7627 	blob_resize(blob, 0);
7628 
7629 	blob_persist(seq, blob, bs_delete_persist_cpl, blob);
7630 }
7631 
7632 static int
7633 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone)
7634 {
7635 	struct spdk_blob_list *snapshot_entry = NULL;
7636 	struct spdk_blob_list *clone_entry = NULL;
7637 	struct spdk_blob *clone = NULL;
7638 	bool has_one_clone = false;
7639 
7640 	/* Check if this is a snapshot with clones */
7641 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
7642 	if (snapshot_entry != NULL) {
7643 		if (snapshot_entry->clone_count > 1) {
7644 			SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n");
7645 			return -EBUSY;
7646 		} else if (snapshot_entry->clone_count == 1) {
7647 			has_one_clone = true;
7648 		}
7649 	}
7650 
7651 	/* Check if someone has this blob open (besides this delete context):
7652 	 * - open_ref = 1 - only this context opened blob, so it is ok to remove it
7653 	 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot
7654 	 *	and that is ok, because we will update it accordingly */
7655 	if (blob->open_ref <= 2 && has_one_clone) {
7656 		clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7657 		assert(clone_entry != NULL);
7658 		clone = blob_lookup(blob->bs, clone_entry->id);
7659 
7660 		if (blob->open_ref == 2 && clone == NULL) {
7661 			/* Clone is closed and someone else opened this blob */
7662 			SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
7663 			return -EBUSY;
7664 		}
7665 
7666 		*update_clone = true;
7667 		return 0;
7668 	}
7669 
7670 	if (blob->open_ref > 1) {
7671 		SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
7672 		return -EBUSY;
7673 	}
7674 
7675 	assert(has_one_clone == false);
7676 	*update_clone = false;
7677 	return 0;
7678 }
7679 
7680 static void
7681 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno)
7682 {
7683 	spdk_bs_sequence_t *seq = cb_arg;
7684 
7685 	bs_sequence_finish(seq, -ENOMEM);
7686 }
7687 
7688 static void
7689 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
7690 {
7691 	spdk_bs_sequence_t *seq = cb_arg;
7692 	struct delete_snapshot_ctx *ctx;
7693 	bool update_clone = false;
7694 
7695 	if (bserrno != 0) {
7696 		bs_sequence_finish(seq, bserrno);
7697 		return;
7698 	}
7699 
7700 	blob_verify_md_op(blob);
7701 
7702 	ctx = calloc(1, sizeof(*ctx));
7703 	if (ctx == NULL) {
7704 		spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq);
7705 		return;
7706 	}
7707 
7708 	ctx->snapshot = blob;
7709 	ctx->cb_fn = bs_delete_blob_finish;
7710 	ctx->cb_arg = seq;
7711 
7712 	/* Check if blob can be removed and if it is a snapshot with clone on top of it */
7713 	ctx->bserrno = bs_is_blob_deletable(blob, &update_clone);
7714 	if (ctx->bserrno) {
7715 		spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
7716 		return;
7717 	}
7718 
7719 	if (blob->locked_operation_in_progress) {
7720 		SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n");
7721 		ctx->bserrno = -EBUSY;
7722 		spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
7723 		return;
7724 	}
7725 
7726 	blob->locked_operation_in_progress = true;
7727 
7728 	/*
7729 	 * Remove the blob from the blob_store list now, to ensure it does not
7730 	 *  get returned after this point by blob_lookup().
7731 	 */
7732 	spdk_bit_array_clear(blob->bs->open_blobids, blob->id);
7733 	RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob);
7734 
7735 	if (update_clone) {
7736 		ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
7737 		if (!ctx->page) {
7738 			ctx->bserrno = -ENOMEM;
7739 			spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
7740 			return;
7741 		}
7742 		/* This blob is a snapshot with active clone - update clone first */
7743 		update_clone_on_snapshot_deletion(blob, ctx);
7744 	} else {
7745 		/* This blob does not have any clones - just remove it */
7746 		bs_blob_list_remove(blob);
7747 		bs_delete_blob_finish(seq, blob, 0);
7748 		free(ctx);
7749 	}
7750 }
7751 
7752 void
7753 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
7754 		    spdk_blob_op_complete cb_fn, void *cb_arg)
7755 {
7756 	struct spdk_bs_cpl	cpl;
7757 	spdk_bs_sequence_t	*seq;
7758 
7759 	SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid);
7760 
7761 	assert(spdk_get_thread() == bs->md_thread);
7762 
7763 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7764 	cpl.u.blob_basic.cb_fn = cb_fn;
7765 	cpl.u.blob_basic.cb_arg = cb_arg;
7766 
7767 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
7768 	if (!seq) {
7769 		cb_fn(cb_arg, -ENOMEM);
7770 		return;
7771 	}
7772 
7773 	spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq);
7774 }
7775 
7776 /* END spdk_bs_delete_blob */
7777 
7778 /* START spdk_bs_open_blob */
7779 
7780 static void
7781 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
7782 {
7783 	struct spdk_blob *blob = cb_arg;
7784 	struct spdk_blob *existing;
7785 
7786 	if (bserrno != 0) {
7787 		blob_free(blob);
7788 		seq->cpl.u.blob_handle.blob = NULL;
7789 		bs_sequence_finish(seq, bserrno);
7790 		return;
7791 	}
7792 
7793 	existing = blob_lookup(blob->bs, blob->id);
7794 	if (existing) {
7795 		blob_free(blob);
7796 		existing->open_ref++;
7797 		seq->cpl.u.blob_handle.blob = existing;
7798 		bs_sequence_finish(seq, 0);
7799 		return;
7800 	}
7801 
7802 	blob->open_ref++;
7803 
7804 	spdk_bit_array_set(blob->bs->open_blobids, blob->id);
7805 	RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob);
7806 
7807 	bs_sequence_finish(seq, bserrno);
7808 }
7809 
7810 static inline void
7811 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst)
7812 {
7813 #define FIELD_OK(field) \
7814         offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size
7815 
7816 #define SET_FIELD(field) \
7817         if (FIELD_OK(field)) { \
7818                 dst->field = src->field; \
7819         } \
7820 
7821 	SET_FIELD(clear_method);
7822 	SET_FIELD(esnap_ctx);
7823 
7824 	dst->opts_size = src->opts_size;
7825 
7826 	/* You should not remove this statement, but need to update the assert statement
7827 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
7828 	SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size");
7829 
7830 #undef FIELD_OK
7831 #undef SET_FIELD
7832 }
7833 
7834 static void
7835 bs_open_blob(struct spdk_blob_store *bs,
7836 	     spdk_blob_id blobid,
7837 	     struct spdk_blob_open_opts *opts,
7838 	     spdk_blob_op_with_handle_complete cb_fn,
7839 	     void *cb_arg)
7840 {
7841 	struct spdk_blob		*blob;
7842 	struct spdk_bs_cpl		cpl;
7843 	struct spdk_blob_open_opts	opts_local;
7844 	spdk_bs_sequence_t		*seq;
7845 	uint32_t			page_num;
7846 
7847 	SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid);
7848 	assert(spdk_get_thread() == bs->md_thread);
7849 
7850 	page_num = bs_blobid_to_page(blobid);
7851 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
7852 		/* Invalid blobid */
7853 		cb_fn(cb_arg, NULL, -ENOENT);
7854 		return;
7855 	}
7856 
7857 	blob = blob_lookup(bs, blobid);
7858 	if (blob) {
7859 		blob->open_ref++;
7860 		cb_fn(cb_arg, blob, 0);
7861 		return;
7862 	}
7863 
7864 	blob = blob_alloc(bs, blobid);
7865 	if (!blob) {
7866 		cb_fn(cb_arg, NULL, -ENOMEM);
7867 		return;
7868 	}
7869 
7870 	spdk_blob_open_opts_init(&opts_local, sizeof(opts_local));
7871 	if (opts) {
7872 		blob_open_opts_copy(opts, &opts_local);
7873 	}
7874 
7875 	blob->clear_method = opts_local.clear_method;
7876 
7877 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
7878 	cpl.u.blob_handle.cb_fn = cb_fn;
7879 	cpl.u.blob_handle.cb_arg = cb_arg;
7880 	cpl.u.blob_handle.blob = blob;
7881 	cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx;
7882 
7883 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
7884 	if (!seq) {
7885 		blob_free(blob);
7886 		cb_fn(cb_arg, NULL, -ENOMEM);
7887 		return;
7888 	}
7889 
7890 	blob_load(seq, blob, bs_open_blob_cpl, blob);
7891 }
7892 
7893 void
7894 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
7895 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
7896 {
7897 	bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg);
7898 }
7899 
7900 void
7901 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
7902 		      struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
7903 {
7904 	bs_open_blob(bs, blobid, opts, cb_fn, cb_arg);
7905 }
7906 
7907 /* END spdk_bs_open_blob */
7908 
7909 /* START spdk_blob_set_read_only */
7910 int
7911 spdk_blob_set_read_only(struct spdk_blob *blob)
7912 {
7913 	blob_verify_md_op(blob);
7914 
7915 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
7916 
7917 	blob->state = SPDK_BLOB_STATE_DIRTY;
7918 	return 0;
7919 }
7920 /* END spdk_blob_set_read_only */
7921 
7922 /* START spdk_blob_sync_md */
7923 
7924 static void
7925 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
7926 {
7927 	struct spdk_blob *blob = cb_arg;
7928 
7929 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
7930 		blob->data_ro = true;
7931 		blob->md_ro = true;
7932 	}
7933 
7934 	bs_sequence_finish(seq, bserrno);
7935 }
7936 
7937 static void
7938 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
7939 {
7940 	struct spdk_bs_cpl	cpl;
7941 	spdk_bs_sequence_t	*seq;
7942 
7943 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7944 	cpl.u.blob_basic.cb_fn = cb_fn;
7945 	cpl.u.blob_basic.cb_arg = cb_arg;
7946 
7947 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
7948 	if (!seq) {
7949 		cb_fn(cb_arg, -ENOMEM);
7950 		return;
7951 	}
7952 
7953 	blob_persist(seq, blob, blob_sync_md_cpl, blob);
7954 }
7955 
7956 void
7957 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
7958 {
7959 	blob_verify_md_op(blob);
7960 
7961 	SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id);
7962 
7963 	if (blob->md_ro) {
7964 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
7965 		cb_fn(cb_arg, 0);
7966 		return;
7967 	}
7968 
7969 	blob_sync_md(blob, cb_fn, cb_arg);
7970 }
7971 
7972 /* END spdk_blob_sync_md */
7973 
7974 struct spdk_blob_cluster_op_ctx {
7975 	struct spdk_thread	*thread;
7976 	struct spdk_blob	*blob;
7977 	uint32_t		cluster_num;	/* cluster index in blob */
7978 	uint32_t		cluster;	/* cluster on disk */
7979 	uint32_t		extent_page;	/* extent page on disk */
7980 	struct spdk_blob_md_page *page; /* preallocated extent page */
7981 	int			rc;
7982 	spdk_blob_op_complete	cb_fn;
7983 	void			*cb_arg;
7984 };
7985 
7986 static void
7987 blob_op_cluster_msg_cpl(void *arg)
7988 {
7989 	struct spdk_blob_cluster_op_ctx *ctx = arg;
7990 
7991 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
7992 	free(ctx);
7993 }
7994 
7995 static void
7996 blob_op_cluster_msg_cb(void *arg, int bserrno)
7997 {
7998 	struct spdk_blob_cluster_op_ctx *ctx = arg;
7999 
8000 	ctx->rc = bserrno;
8001 	spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8002 }
8003 
8004 static void
8005 blob_insert_new_ep_cb(void *arg, int bserrno)
8006 {
8007 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8008 	uint32_t *extent_page;
8009 
8010 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8011 	*extent_page = ctx->extent_page;
8012 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8013 	blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8014 }
8015 
8016 struct spdk_blob_write_extent_page_ctx {
8017 	struct spdk_blob_store		*bs;
8018 
8019 	uint32_t			extent;
8020 	struct spdk_blob_md_page	*page;
8021 };
8022 
8023 static void
8024 blob_free_cluster_msg_cb(void *arg, int bserrno)
8025 {
8026 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8027 
8028 	spdk_spin_lock(&ctx->blob->bs->used_lock);
8029 	bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster));
8030 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
8031 
8032 	ctx->rc = bserrno;
8033 	spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8034 }
8035 
8036 static void
8037 blob_free_cluster_update_ep_cb(void *arg, int bserrno)
8038 {
8039 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8040 
8041 	if (bserrno != 0 || ctx->blob->bs->clean == 0) {
8042 		blob_free_cluster_msg_cb(ctx, bserrno);
8043 		return;
8044 	}
8045 
8046 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8047 	blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx);
8048 }
8049 
8050 static void
8051 blob_free_cluster_free_ep_cb(void *arg, int bserrno)
8052 {
8053 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8054 
8055 	spdk_spin_lock(&ctx->blob->bs->used_lock);
8056 	assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8057 	bs_release_md_page(ctx->blob->bs, ctx->extent_page);
8058 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
8059 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8060 	blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx);
8061 }
8062 
8063 static void
8064 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8065 {
8066 	struct spdk_blob_write_extent_page_ctx *ctx = cb_arg;
8067 
8068 	free(ctx);
8069 	bs_sequence_finish(seq, bserrno);
8070 }
8071 
8072 static void
8073 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8074 {
8075 	struct spdk_blob_write_extent_page_ctx *ctx = cb_arg;
8076 
8077 	if (bserrno != 0) {
8078 		blob_persist_extent_page_cpl(seq, ctx, bserrno);
8079 		return;
8080 	}
8081 	bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent),
8082 			      bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
8083 			      blob_persist_extent_page_cpl, ctx);
8084 }
8085 
8086 static void
8087 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
8088 		       struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg)
8089 {
8090 	struct spdk_blob_write_extent_page_ctx	*ctx;
8091 	spdk_bs_sequence_t			*seq;
8092 	struct spdk_bs_cpl			cpl;
8093 
8094 	ctx = calloc(1, sizeof(*ctx));
8095 	if (!ctx) {
8096 		cb_fn(cb_arg, -ENOMEM);
8097 		return;
8098 	}
8099 	ctx->bs = blob->bs;
8100 	ctx->extent = extent;
8101 	ctx->page = page;
8102 
8103 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8104 	cpl.u.blob_basic.cb_fn = cb_fn;
8105 	cpl.u.blob_basic.cb_arg = cb_arg;
8106 
8107 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8108 	if (!seq) {
8109 		free(ctx);
8110 		cb_fn(cb_arg, -ENOMEM);
8111 		return;
8112 	}
8113 
8114 	assert(page);
8115 	page->next = SPDK_INVALID_MD_PAGE;
8116 	page->id = blob->id;
8117 	page->sequence_num = 0;
8118 
8119 	blob_serialize_extent_page(blob, cluster_num, page);
8120 
8121 	page->crc = blob_md_page_calc_crc(page);
8122 
8123 	assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true);
8124 
8125 	bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx);
8126 }
8127 
8128 static void
8129 blob_insert_cluster_msg(void *arg)
8130 {
8131 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8132 	uint32_t *extent_page;
8133 
8134 	ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
8135 	if (ctx->rc != 0) {
8136 		spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8137 		return;
8138 	}
8139 
8140 	if (ctx->blob->use_extent_table == false) {
8141 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
8142 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8143 		blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8144 		return;
8145 	}
8146 
8147 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8148 	if (*extent_page == 0) {
8149 		/* Extent page requires allocation.
8150 		 * It was already claimed in the used_md_pages map and placed in ctx. */
8151 		assert(ctx->extent_page != 0);
8152 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8153 		blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page,
8154 				       blob_insert_new_ep_cb, ctx);
8155 	} else {
8156 		/* It is possible for original thread to allocate extent page for
8157 		 * different cluster in the same extent page. In such case proceed with
8158 		 * updating the existing extent page, but release the additional one. */
8159 		if (ctx->extent_page != 0) {
8160 			spdk_spin_lock(&ctx->blob->bs->used_lock);
8161 			assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8162 			bs_release_md_page(ctx->blob->bs, ctx->extent_page);
8163 			spdk_spin_unlock(&ctx->blob->bs->used_lock);
8164 			ctx->extent_page = 0;
8165 		}
8166 		/* Extent page already allocated.
8167 		 * Every cluster allocation, requires just an update of single extent page. */
8168 		blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page,
8169 				       blob_op_cluster_msg_cb, ctx);
8170 	}
8171 }
8172 
8173 static void
8174 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
8175 				 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page,
8176 				 spdk_blob_op_complete cb_fn, void *cb_arg)
8177 {
8178 	struct spdk_blob_cluster_op_ctx *ctx;
8179 
8180 	ctx = calloc(1, sizeof(*ctx));
8181 	if (ctx == NULL) {
8182 		cb_fn(cb_arg, -ENOMEM);
8183 		return;
8184 	}
8185 
8186 	ctx->thread = spdk_get_thread();
8187 	ctx->blob = blob;
8188 	ctx->cluster_num = cluster_num;
8189 	ctx->cluster = cluster;
8190 	ctx->extent_page = extent_page;
8191 	ctx->page = page;
8192 	ctx->cb_fn = cb_fn;
8193 	ctx->cb_arg = cb_arg;
8194 
8195 	spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx);
8196 }
8197 
8198 static void
8199 blob_free_cluster_msg(void *arg)
8200 {
8201 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8202 	uint32_t *extent_page;
8203 	uint32_t start_cluster_idx;
8204 	bool free_extent_page = true;
8205 	size_t i;
8206 
8207 	ctx->cluster = ctx->blob->active.clusters[ctx->cluster_num];
8208 	ctx->blob->active.clusters[ctx->cluster_num] = 0;
8209 
8210 	if (ctx->blob->use_extent_table == false) {
8211 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
8212 		spdk_spin_lock(&ctx->blob->bs->used_lock);
8213 		bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster));
8214 		spdk_spin_unlock(&ctx->blob->bs->used_lock);
8215 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8216 		blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8217 		return;
8218 	}
8219 
8220 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8221 
8222 	/* There shouldn't be parallel release operations on same cluster */
8223 	assert(*extent_page == ctx->extent_page);
8224 
8225 	start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
8226 	for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) {
8227 		if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) {
8228 			free_extent_page = false;
8229 			break;
8230 		}
8231 	}
8232 
8233 	if (free_extent_page) {
8234 		assert(ctx->extent_page != 0);
8235 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8236 		ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0;
8237 		blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page,
8238 				       blob_free_cluster_free_ep_cb, ctx);
8239 	} else {
8240 		blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page,
8241 				       blob_free_cluster_update_ep_cb, ctx);
8242 	}
8243 }
8244 
8245 
8246 static void
8247 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page,
8248 			       struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg)
8249 {
8250 	struct spdk_blob_cluster_op_ctx *ctx;
8251 
8252 	ctx = calloc(1, sizeof(*ctx));
8253 	if (ctx == NULL) {
8254 		cb_fn(cb_arg, -ENOMEM);
8255 		return;
8256 	}
8257 
8258 	ctx->thread = spdk_get_thread();
8259 	ctx->blob = blob;
8260 	ctx->cluster_num = cluster_num;
8261 	ctx->extent_page = extent_page;
8262 	ctx->page = page;
8263 	ctx->cb_fn = cb_fn;
8264 	ctx->cb_arg = cb_arg;
8265 
8266 	spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx);
8267 }
8268 
8269 /* START spdk_blob_close */
8270 
8271 static void
8272 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8273 {
8274 	struct spdk_blob *blob = cb_arg;
8275 
8276 	if (bserrno == 0) {
8277 		blob->open_ref--;
8278 		if (blob->open_ref == 0) {
8279 			/*
8280 			 * Blobs with active.num_pages == 0 are deleted blobs.
8281 			 *  these blobs are removed from the blob_store list
8282 			 *  when the deletion process starts - so don't try to
8283 			 *  remove them again.
8284 			 */
8285 			if (blob->active.num_pages > 0) {
8286 				spdk_bit_array_clear(blob->bs->open_blobids, blob->id);
8287 				RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob);
8288 			}
8289 			blob_free(blob);
8290 		}
8291 	}
8292 
8293 	bs_sequence_finish(seq, bserrno);
8294 }
8295 
8296 static void
8297 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno)
8298 {
8299 	spdk_bs_sequence_t	*seq = cb_arg;
8300 
8301 	if (bserrno != 0) {
8302 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n",
8303 			      blob->id, bserrno);
8304 		bs_sequence_finish(seq, bserrno);
8305 		return;
8306 	}
8307 
8308 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n",
8309 		      blob->id, spdk_thread_get_name(spdk_get_thread()));
8310 
8311 	/* Sync metadata */
8312 	blob_persist(seq, blob, blob_close_cpl, blob);
8313 }
8314 
8315 void
8316 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
8317 {
8318 	struct spdk_bs_cpl	cpl;
8319 	spdk_bs_sequence_t	*seq;
8320 
8321 	blob_verify_md_op(blob);
8322 
8323 	SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id);
8324 
8325 	if (blob->open_ref == 0) {
8326 		cb_fn(cb_arg, -EBADF);
8327 		return;
8328 	}
8329 
8330 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8331 	cpl.u.blob_basic.cb_fn = cb_fn;
8332 	cpl.u.blob_basic.cb_arg = cb_arg;
8333 
8334 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8335 	if (!seq) {
8336 		cb_fn(cb_arg, -ENOMEM);
8337 		return;
8338 	}
8339 
8340 	if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) {
8341 		blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq);
8342 		return;
8343 	}
8344 
8345 	/* Sync metadata */
8346 	blob_persist(seq, blob, blob_close_cpl, blob);
8347 }
8348 
8349 /* END spdk_blob_close */
8350 
8351 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
8352 {
8353 	return spdk_get_io_channel(bs);
8354 }
8355 
8356 void
8357 spdk_bs_free_io_channel(struct spdk_io_channel *channel)
8358 {
8359 	blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel));
8360 	spdk_put_io_channel(channel);
8361 }
8362 
8363 void
8364 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
8365 		   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
8366 {
8367 	blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
8368 			       SPDK_BLOB_UNMAP);
8369 }
8370 
8371 void
8372 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
8373 			  uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
8374 {
8375 	blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
8376 			       SPDK_BLOB_WRITE_ZEROES);
8377 }
8378 
8379 void
8380 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
8381 		   void *payload, uint64_t offset, uint64_t length,
8382 		   spdk_blob_op_complete cb_fn, void *cb_arg)
8383 {
8384 	blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
8385 			       SPDK_BLOB_WRITE);
8386 }
8387 
8388 void
8389 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
8390 		  void *payload, uint64_t offset, uint64_t length,
8391 		  spdk_blob_op_complete cb_fn, void *cb_arg)
8392 {
8393 	blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
8394 			       SPDK_BLOB_READ);
8395 }
8396 
8397 void
8398 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
8399 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8400 		    spdk_blob_op_complete cb_fn, void *cb_arg)
8401 {
8402 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL);
8403 }
8404 
8405 void
8406 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
8407 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8408 		   spdk_blob_op_complete cb_fn, void *cb_arg)
8409 {
8410 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL);
8411 }
8412 
8413 void
8414 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
8415 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8416 			spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
8417 {
8418 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false,
8419 				   io_opts);
8420 }
8421 
8422 void
8423 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
8424 		       struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8425 		       spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
8426 {
8427 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true,
8428 				   io_opts);
8429 }
8430 
8431 struct spdk_bs_iter_ctx {
8432 	int64_t page_num;
8433 	struct spdk_blob_store *bs;
8434 
8435 	spdk_blob_op_with_handle_complete cb_fn;
8436 	void *cb_arg;
8437 };
8438 
8439 static void
8440 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
8441 {
8442 	struct spdk_bs_iter_ctx *ctx = cb_arg;
8443 	struct spdk_blob_store *bs = ctx->bs;
8444 	spdk_blob_id id;
8445 
8446 	if (bserrno == 0) {
8447 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
8448 		free(ctx);
8449 		return;
8450 	}
8451 
8452 	ctx->page_num++;
8453 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
8454 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
8455 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
8456 		free(ctx);
8457 		return;
8458 	}
8459 
8460 	id = bs_page_to_blobid(ctx->page_num);
8461 
8462 	spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx);
8463 }
8464 
8465 void
8466 spdk_bs_iter_first(struct spdk_blob_store *bs,
8467 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8468 {
8469 	struct spdk_bs_iter_ctx *ctx;
8470 
8471 	ctx = calloc(1, sizeof(*ctx));
8472 	if (!ctx) {
8473 		cb_fn(cb_arg, NULL, -ENOMEM);
8474 		return;
8475 	}
8476 
8477 	ctx->page_num = -1;
8478 	ctx->bs = bs;
8479 	ctx->cb_fn = cb_fn;
8480 	ctx->cb_arg = cb_arg;
8481 
8482 	bs_iter_cpl(ctx, NULL, -1);
8483 }
8484 
8485 static void
8486 bs_iter_close_cpl(void *cb_arg, int bserrno)
8487 {
8488 	struct spdk_bs_iter_ctx *ctx = cb_arg;
8489 
8490 	bs_iter_cpl(ctx, NULL, -1);
8491 }
8492 
8493 void
8494 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
8495 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8496 {
8497 	struct spdk_bs_iter_ctx *ctx;
8498 
8499 	assert(blob != NULL);
8500 
8501 	ctx = calloc(1, sizeof(*ctx));
8502 	if (!ctx) {
8503 		cb_fn(cb_arg, NULL, -ENOMEM);
8504 		return;
8505 	}
8506 
8507 	ctx->page_num = bs_blobid_to_page(blob->id);
8508 	ctx->bs = bs;
8509 	ctx->cb_fn = cb_fn;
8510 	ctx->cb_arg = cb_arg;
8511 
8512 	/* Close the existing blob */
8513 	spdk_blob_close(blob, bs_iter_close_cpl, ctx);
8514 }
8515 
8516 static int
8517 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
8518 	       uint16_t value_len, bool internal)
8519 {
8520 	struct spdk_xattr_tailq *xattrs;
8521 	struct spdk_xattr	*xattr;
8522 	size_t			desc_size;
8523 	void			*tmp;
8524 
8525 	blob_verify_md_op(blob);
8526 
8527 	if (blob->md_ro) {
8528 		return -EPERM;
8529 	}
8530 
8531 	desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len;
8532 	if (desc_size > SPDK_BS_MAX_DESC_SIZE) {
8533 		SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name,
8534 			      desc_size, SPDK_BS_MAX_DESC_SIZE);
8535 		return -ENOMEM;
8536 	}
8537 
8538 	if (internal) {
8539 		xattrs = &blob->xattrs_internal;
8540 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
8541 	} else {
8542 		xattrs = &blob->xattrs;
8543 	}
8544 
8545 	TAILQ_FOREACH(xattr, xattrs, link) {
8546 		if (!strcmp(name, xattr->name)) {
8547 			tmp = malloc(value_len);
8548 			if (!tmp) {
8549 				return -ENOMEM;
8550 			}
8551 
8552 			free(xattr->value);
8553 			xattr->value_len = value_len;
8554 			xattr->value = tmp;
8555 			memcpy(xattr->value, value, value_len);
8556 
8557 			blob->state = SPDK_BLOB_STATE_DIRTY;
8558 
8559 			return 0;
8560 		}
8561 	}
8562 
8563 	xattr = calloc(1, sizeof(*xattr));
8564 	if (!xattr) {
8565 		return -ENOMEM;
8566 	}
8567 
8568 	xattr->name = strdup(name);
8569 	if (!xattr->name) {
8570 		free(xattr);
8571 		return -ENOMEM;
8572 	}
8573 
8574 	xattr->value_len = value_len;
8575 	xattr->value = malloc(value_len);
8576 	if (!xattr->value) {
8577 		free(xattr->name);
8578 		free(xattr);
8579 		return -ENOMEM;
8580 	}
8581 	memcpy(xattr->value, value, value_len);
8582 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
8583 
8584 	blob->state = SPDK_BLOB_STATE_DIRTY;
8585 
8586 	return 0;
8587 }
8588 
8589 int
8590 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
8591 		    uint16_t value_len)
8592 {
8593 	return blob_set_xattr(blob, name, value, value_len, false);
8594 }
8595 
8596 static int
8597 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
8598 {
8599 	struct spdk_xattr_tailq *xattrs;
8600 	struct spdk_xattr	*xattr;
8601 
8602 	blob_verify_md_op(blob);
8603 
8604 	if (blob->md_ro) {
8605 		return -EPERM;
8606 	}
8607 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
8608 
8609 	TAILQ_FOREACH(xattr, xattrs, link) {
8610 		if (!strcmp(name, xattr->name)) {
8611 			TAILQ_REMOVE(xattrs, xattr, link);
8612 			free(xattr->value);
8613 			free(xattr->name);
8614 			free(xattr);
8615 
8616 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
8617 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
8618 			}
8619 			blob->state = SPDK_BLOB_STATE_DIRTY;
8620 
8621 			return 0;
8622 		}
8623 	}
8624 
8625 	return -ENOENT;
8626 }
8627 
8628 int
8629 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
8630 {
8631 	return blob_remove_xattr(blob, name, false);
8632 }
8633 
8634 static int
8635 blob_get_xattr_value(struct spdk_blob *blob, const char *name,
8636 		     const void **value, size_t *value_len, bool internal)
8637 {
8638 	struct spdk_xattr	*xattr;
8639 	struct spdk_xattr_tailq *xattrs;
8640 
8641 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
8642 
8643 	TAILQ_FOREACH(xattr, xattrs, link) {
8644 		if (!strcmp(name, xattr->name)) {
8645 			*value = xattr->value;
8646 			*value_len = xattr->value_len;
8647 			return 0;
8648 		}
8649 	}
8650 	return -ENOENT;
8651 }
8652 
8653 int
8654 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
8655 			  const void **value, size_t *value_len)
8656 {
8657 	blob_verify_md_op(blob);
8658 
8659 	return blob_get_xattr_value(blob, name, value, value_len, false);
8660 }
8661 
8662 struct spdk_xattr_names {
8663 	uint32_t	count;
8664 	const char	*names[0];
8665 };
8666 
8667 static int
8668 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
8669 {
8670 	struct spdk_xattr	*xattr;
8671 	int			count = 0;
8672 
8673 	TAILQ_FOREACH(xattr, xattrs, link) {
8674 		count++;
8675 	}
8676 
8677 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
8678 	if (*names == NULL) {
8679 		return -ENOMEM;
8680 	}
8681 
8682 	TAILQ_FOREACH(xattr, xattrs, link) {
8683 		(*names)->names[(*names)->count++] = xattr->name;
8684 	}
8685 
8686 	return 0;
8687 }
8688 
8689 int
8690 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
8691 {
8692 	blob_verify_md_op(blob);
8693 
8694 	return blob_get_xattr_names(&blob->xattrs, names);
8695 }
8696 
8697 uint32_t
8698 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
8699 {
8700 	assert(names != NULL);
8701 
8702 	return names->count;
8703 }
8704 
8705 const char *
8706 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
8707 {
8708 	if (index >= names->count) {
8709 		return NULL;
8710 	}
8711 
8712 	return names->names[index];
8713 }
8714 
8715 void
8716 spdk_xattr_names_free(struct spdk_xattr_names *names)
8717 {
8718 	free(names);
8719 }
8720 
8721 struct spdk_bs_type
8722 spdk_bs_get_bstype(struct spdk_blob_store *bs)
8723 {
8724 	return bs->bstype;
8725 }
8726 
8727 void
8728 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
8729 {
8730 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
8731 }
8732 
8733 bool
8734 spdk_blob_is_read_only(struct spdk_blob *blob)
8735 {
8736 	assert(blob != NULL);
8737 	return (blob->data_ro || blob->md_ro);
8738 }
8739 
8740 bool
8741 spdk_blob_is_snapshot(struct spdk_blob *blob)
8742 {
8743 	struct spdk_blob_list *snapshot_entry;
8744 
8745 	assert(blob != NULL);
8746 
8747 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
8748 	if (snapshot_entry == NULL) {
8749 		return false;
8750 	}
8751 
8752 	return true;
8753 }
8754 
8755 bool
8756 spdk_blob_is_clone(struct spdk_blob *blob)
8757 {
8758 	assert(blob != NULL);
8759 
8760 	if (blob->parent_id != SPDK_BLOBID_INVALID &&
8761 	    blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
8762 		assert(spdk_blob_is_thin_provisioned(blob));
8763 		return true;
8764 	}
8765 
8766 	return false;
8767 }
8768 
8769 bool
8770 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
8771 {
8772 	assert(blob != NULL);
8773 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
8774 }
8775 
8776 bool
8777 spdk_blob_is_esnap_clone(const struct spdk_blob *blob)
8778 {
8779 	return blob_is_esnap_clone(blob);
8780 }
8781 
8782 static void
8783 blob_update_clear_method(struct spdk_blob *blob)
8784 {
8785 	enum blob_clear_method stored_cm;
8786 
8787 	assert(blob != NULL);
8788 
8789 	/* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored
8790 	 * in metadata previously.  If something other than the default was
8791 	 * specified, ignore stored value and used what was passed in.
8792 	 */
8793 	stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT);
8794 
8795 	if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) {
8796 		blob->clear_method = stored_cm;
8797 	} else if (blob->clear_method != stored_cm) {
8798 		SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n",
8799 			     blob->clear_method, stored_cm);
8800 	}
8801 }
8802 
8803 spdk_blob_id
8804 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
8805 {
8806 	struct spdk_blob_list *snapshot_entry = NULL;
8807 	struct spdk_blob_list *clone_entry = NULL;
8808 
8809 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
8810 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
8811 			if (clone_entry->id == blob_id) {
8812 				return snapshot_entry->id;
8813 			}
8814 		}
8815 	}
8816 
8817 	return SPDK_BLOBID_INVALID;
8818 }
8819 
8820 int
8821 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
8822 		     size_t *count)
8823 {
8824 	struct spdk_blob_list *snapshot_entry, *clone_entry;
8825 	size_t n;
8826 
8827 	snapshot_entry = bs_get_snapshot_entry(bs, blobid);
8828 	if (snapshot_entry == NULL) {
8829 		*count = 0;
8830 		return 0;
8831 	}
8832 
8833 	if (ids == NULL || *count < snapshot_entry->clone_count) {
8834 		*count = snapshot_entry->clone_count;
8835 		return -ENOMEM;
8836 	}
8837 	*count = snapshot_entry->clone_count;
8838 
8839 	n = 0;
8840 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
8841 		ids[n++] = clone_entry->id;
8842 	}
8843 
8844 	return 0;
8845 }
8846 
8847 static void
8848 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx)
8849 {
8850 	int rc;
8851 
8852 	if (ctx->super->size == 0) {
8853 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
8854 	}
8855 
8856 	if (ctx->super->io_unit_size == 0) {
8857 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
8858 	}
8859 
8860 	/* Parse the super block */
8861 	ctx->bs->clean = 1;
8862 	ctx->bs->cluster_sz = ctx->super->cluster_size;
8863 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
8864 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
8865 	if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
8866 		ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
8867 	}
8868 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
8869 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
8870 	if (rc < 0) {
8871 		bs_load_ctx_fail(ctx, -ENOMEM);
8872 		return;
8873 	}
8874 	ctx->bs->md_start = ctx->super->md_start;
8875 	ctx->bs->md_len = ctx->super->md_len;
8876 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len);
8877 	if (rc < 0) {
8878 		bs_load_ctx_fail(ctx, -ENOMEM);
8879 		return;
8880 	}
8881 
8882 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
8883 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
8884 	ctx->bs->super_blob = ctx->super->super_blob;
8885 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
8886 
8887 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
8888 		SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n");
8889 		bs_load_ctx_fail(ctx, -EIO);
8890 		return;
8891 	} else {
8892 		bs_load_read_used_pages(ctx);
8893 	}
8894 }
8895 
8896 static void
8897 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8898 {
8899 	struct spdk_bs_load_ctx	*ctx = cb_arg;
8900 
8901 	if (bserrno != 0) {
8902 		bs_load_ctx_fail(ctx, bserrno);
8903 		return;
8904 	}
8905 	bs_load_grow_continue(ctx);
8906 }
8907 
8908 static void
8909 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8910 {
8911 	struct spdk_bs_load_ctx	*ctx = cb_arg;
8912 
8913 	if (bserrno != 0) {
8914 		bs_load_ctx_fail(ctx, bserrno);
8915 		return;
8916 	}
8917 
8918 	spdk_free(ctx->mask);
8919 
8920 	bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0),
8921 			      bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
8922 			      bs_load_grow_super_write_cpl, ctx);
8923 }
8924 
8925 static void
8926 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8927 {
8928 	struct spdk_bs_load_ctx *ctx = cb_arg;
8929 	uint64_t		lba, lba_count;
8930 	uint64_t		dev_size;
8931 	uint64_t		total_clusters;
8932 
8933 	if (bserrno != 0) {
8934 		bs_load_ctx_fail(ctx, bserrno);
8935 		return;
8936 	}
8937 
8938 	/* The type must be correct */
8939 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
8940 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
8941 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
8942 					     struct spdk_blob_md_page) * 8));
8943 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
8944 	total_clusters = dev_size / ctx->super->cluster_size;
8945 	ctx->mask->length = total_clusters;
8946 
8947 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
8948 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
8949 	bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count,
8950 			      bs_load_grow_used_clusters_write_cpl, ctx);
8951 }
8952 
8953 static void
8954 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx)
8955 {
8956 	uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask;
8957 	uint64_t lba, lba_count, mask_size;
8958 
8959 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
8960 	total_clusters = dev_size / ctx->super->cluster_size;
8961 	used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
8962 				spdk_divide_round_up(total_clusters, 8),
8963 				SPDK_BS_PAGE_SIZE);
8964 	max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start;
8965 	/* No necessary to grow or no space to grow */
8966 	if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) {
8967 		SPDK_DEBUGLOG(blob, "No grow\n");
8968 		bs_load_grow_continue(ctx);
8969 		return;
8970 	}
8971 
8972 	SPDK_DEBUGLOG(blob, "Resize blobstore\n");
8973 
8974 	ctx->super->size = dev_size;
8975 	ctx->super->used_cluster_mask_len = used_cluster_mask_len;
8976 	ctx->super->crc = blob_md_page_calc_crc(ctx->super);
8977 
8978 	mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
8979 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
8980 				 SPDK_MALLOC_DMA);
8981 	if (!ctx->mask) {
8982 		bs_load_ctx_fail(ctx, -ENOMEM);
8983 		return;
8984 	}
8985 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
8986 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
8987 	bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
8988 			     bs_load_grow_used_clusters_read_cpl, ctx);
8989 }
8990 
8991 static void
8992 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8993 {
8994 	struct spdk_bs_load_ctx *ctx = cb_arg;
8995 	int rc;
8996 
8997 	rc = bs_super_validate(ctx->super, ctx->bs);
8998 	if (rc != 0) {
8999 		bs_load_ctx_fail(ctx, rc);
9000 		return;
9001 	}
9002 
9003 	bs_load_try_to_grow(ctx);
9004 }
9005 
9006 struct spdk_bs_grow_ctx {
9007 	struct spdk_blob_store		*bs;
9008 	struct spdk_bs_super_block	*super;
9009 
9010 	struct spdk_bit_pool		*new_used_clusters;
9011 	struct spdk_bs_md_mask		*new_used_clusters_mask;
9012 
9013 	spdk_bs_sequence_t		*seq;
9014 };
9015 
9016 static void
9017 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno)
9018 {
9019 	if (bserrno != 0) {
9020 		spdk_bit_pool_free(&ctx->new_used_clusters);
9021 	}
9022 
9023 	bs_sequence_finish(ctx->seq, bserrno);
9024 	free(ctx->new_used_clusters_mask);
9025 	spdk_free(ctx->super);
9026 	free(ctx);
9027 }
9028 
9029 static void
9030 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9031 {
9032 	struct spdk_bs_grow_ctx	*ctx = cb_arg;
9033 	struct spdk_blob_store *bs = ctx->bs;
9034 	uint64_t total_clusters;
9035 
9036 	if (bserrno != 0) {
9037 		bs_grow_live_done(ctx, bserrno);
9038 		return;
9039 	}
9040 
9041 	/*
9042 	 * Blobstore is not clean until unload, for now only the super block is up to date.
9043 	 * This is similar to state right after blobstore init, when bs_write_used_md() didn't
9044 	 * yet execute.
9045 	 * When cleanly unloaded, the used md pages will be written out.
9046 	 * In case of unclean shutdown, loading blobstore will go through recovery path correctly
9047 	 * filling out the used_clusters with new size and writing it out.
9048 	 */
9049 	bs->clean = 0;
9050 
9051 	/* Reverting the super->size past this point is complex, avoid any error paths
9052 	 * that require to do so. */
9053 	spdk_spin_lock(&bs->used_lock);
9054 
9055 	total_clusters = ctx->super->size / ctx->super->cluster_size;
9056 
9057 	assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters));
9058 	spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask);
9059 
9060 	assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters));
9061 	spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask);
9062 
9063 	spdk_bit_pool_free(&bs->used_clusters);
9064 	bs->used_clusters = ctx->new_used_clusters;
9065 
9066 	bs->total_clusters = total_clusters;
9067 	bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up(
9068 					  bs->md_start + bs->md_len, bs->pages_per_cluster);
9069 
9070 	bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters);
9071 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
9072 	spdk_spin_unlock(&bs->used_lock);
9073 
9074 	bs_grow_live_done(ctx, 0);
9075 }
9076 
9077 static void
9078 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9079 {
9080 	struct spdk_bs_grow_ctx *ctx = cb_arg;
9081 	uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask;
9082 	int rc;
9083 
9084 	if (bserrno != 0) {
9085 		bs_grow_live_done(ctx, bserrno);
9086 		return;
9087 	}
9088 
9089 	rc = bs_super_validate(ctx->super, ctx->bs);
9090 	if (rc != 0) {
9091 		bs_grow_live_done(ctx, rc);
9092 		return;
9093 	}
9094 
9095 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9096 	total_clusters = dev_size / ctx->super->cluster_size;
9097 	used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
9098 				spdk_divide_round_up(total_clusters, 8),
9099 				SPDK_BS_PAGE_SIZE);
9100 	max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start;
9101 	/* Only checking dev_size. Since it can change, but total_clusters remain the same. */
9102 	if (dev_size == ctx->super->size) {
9103 		SPDK_DEBUGLOG(blob, "No need to grow blobstore\n");
9104 		bs_grow_live_done(ctx, 0);
9105 		return;
9106 	}
9107 	/*
9108 	 * Blobstore cannot be shrunk, so check before if:
9109 	 * - new size of the device is smaller than size in super_block
9110 	 * - new total number of clusters is smaller than used_clusters bit_pool
9111 	 * - there is enough space in metadata for used_cluster_mask to be written out
9112 	 */
9113 	if (dev_size < ctx->super->size ||
9114 	    total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) ||
9115 	    used_cluster_mask_len > max_used_cluster_mask) {
9116 		SPDK_DEBUGLOG(blob, "No space to grow blobstore\n");
9117 		bs_grow_live_done(ctx, -ENOSPC);
9118 		return;
9119 	}
9120 
9121 	SPDK_DEBUGLOG(blob, "Resizing blobstore\n");
9122 
9123 	ctx->new_used_clusters_mask = calloc(1, total_clusters);
9124 	if (!ctx->new_used_clusters_mask) {
9125 		bs_grow_live_done(ctx, -ENOMEM);
9126 		return;
9127 	}
9128 	ctx->new_used_clusters = spdk_bit_pool_create(total_clusters);
9129 	if (!ctx->new_used_clusters) {
9130 		bs_grow_live_done(ctx, -ENOMEM);
9131 		return;
9132 	}
9133 
9134 	ctx->super->clean = 0;
9135 	ctx->super->size = dev_size;
9136 	ctx->super->used_cluster_mask_len = used_cluster_mask_len;
9137 	bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx);
9138 }
9139 
9140 void
9141 spdk_bs_grow_live(struct spdk_blob_store *bs,
9142 		  spdk_bs_op_complete cb_fn, void *cb_arg)
9143 {
9144 	struct spdk_bs_cpl	cpl;
9145 	struct spdk_bs_grow_ctx *ctx;
9146 
9147 	assert(spdk_get_thread() == bs->md_thread);
9148 
9149 	SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev);
9150 
9151 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
9152 	cpl.u.bs_basic.cb_fn = cb_fn;
9153 	cpl.u.bs_basic.cb_arg = cb_arg;
9154 
9155 	ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx));
9156 	if (!ctx) {
9157 		cb_fn(cb_arg, -ENOMEM);
9158 		return;
9159 	}
9160 	ctx->bs = bs;
9161 
9162 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
9163 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
9164 	if (!ctx->super) {
9165 		free(ctx);
9166 		cb_fn(cb_arg, -ENOMEM);
9167 		return;
9168 	}
9169 
9170 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
9171 	if (!ctx->seq) {
9172 		spdk_free(ctx->super);
9173 		free(ctx);
9174 		cb_fn(cb_arg, -ENOMEM);
9175 		return;
9176 	}
9177 
9178 	/* Read the super block */
9179 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
9180 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
9181 			     bs_grow_live_load_super_cpl, ctx);
9182 }
9183 
9184 void
9185 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
9186 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
9187 {
9188 	struct spdk_blob_store	*bs;
9189 	struct spdk_bs_cpl	cpl;
9190 	struct spdk_bs_load_ctx *ctx;
9191 	struct spdk_bs_opts	opts = {};
9192 	int err;
9193 
9194 	SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev);
9195 
9196 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
9197 		SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen);
9198 		dev->destroy(dev);
9199 		cb_fn(cb_arg, NULL, -EINVAL);
9200 		return;
9201 	}
9202 
9203 	spdk_bs_opts_init(&opts, sizeof(opts));
9204 	if (o) {
9205 		if (bs_opts_copy(o, &opts)) {
9206 			return;
9207 		}
9208 	}
9209 
9210 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
9211 		dev->destroy(dev);
9212 		cb_fn(cb_arg, NULL, -EINVAL);
9213 		return;
9214 	}
9215 
9216 	err = bs_alloc(dev, &opts, &bs, &ctx);
9217 	if (err) {
9218 		dev->destroy(dev);
9219 		cb_fn(cb_arg, NULL, err);
9220 		return;
9221 	}
9222 
9223 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
9224 	cpl.u.bs_handle.cb_fn = cb_fn;
9225 	cpl.u.bs_handle.cb_arg = cb_arg;
9226 	cpl.u.bs_handle.bs = bs;
9227 
9228 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
9229 	if (!ctx->seq) {
9230 		spdk_free(ctx->super);
9231 		free(ctx);
9232 		bs_free(bs);
9233 		cb_fn(cb_arg, NULL, -ENOMEM);
9234 		return;
9235 	}
9236 
9237 	/* Read the super block */
9238 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
9239 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
9240 			     bs_grow_load_super_cpl, ctx);
9241 }
9242 
9243 int
9244 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len)
9245 {
9246 	if (!blob_is_esnap_clone(blob)) {
9247 		return -EINVAL;
9248 	}
9249 
9250 	return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true);
9251 }
9252 
9253 struct spdk_io_channel *
9254 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob)
9255 {
9256 	struct spdk_bs_channel		*bs_channel = spdk_io_channel_get_ctx(ch);
9257 	struct spdk_bs_dev		*bs_dev = blob->back_bs_dev;
9258 	struct blob_esnap_channel	find = {};
9259 	struct blob_esnap_channel	*esnap_channel, *existing;
9260 
9261 	find.blob_id = blob->id;
9262 	esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find);
9263 	if (spdk_likely(esnap_channel != NULL)) {
9264 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n",
9265 			      blob->id, spdk_thread_get_name(spdk_get_thread()));
9266 		return esnap_channel->channel;
9267 	}
9268 
9269 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n",
9270 		      blob->id, spdk_thread_get_name(spdk_get_thread()));
9271 
9272 	esnap_channel = calloc(1, sizeof(*esnap_channel));
9273 	if (esnap_channel == NULL) {
9274 		SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n",
9275 			       find.blob_id);
9276 		return NULL;
9277 	}
9278 	esnap_channel->channel = bs_dev->create_channel(bs_dev);
9279 	if (esnap_channel->channel == NULL) {
9280 		SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id);
9281 		free(esnap_channel);
9282 		return NULL;
9283 	}
9284 	esnap_channel->blob_id = find.blob_id;
9285 	existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel);
9286 	if (spdk_unlikely(existing != NULL)) {
9287 		/*
9288 		 * This should be unreachable: all modifications to this tree happen on this thread.
9289 		 */
9290 		SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id);
9291 		assert(false);
9292 
9293 		bs_dev->destroy_channel(bs_dev, esnap_channel->channel);
9294 		free(esnap_channel);
9295 
9296 		return existing->channel;
9297 	}
9298 
9299 	return esnap_channel->channel;
9300 }
9301 
9302 static int
9303 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2)
9304 {
9305 	return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id);
9306 }
9307 
9308 struct blob_esnap_destroy_ctx {
9309 	spdk_blob_op_with_handle_complete	cb_fn;
9310 	void					*cb_arg;
9311 	struct spdk_blob			*blob;
9312 	struct spdk_bs_dev			*back_bs_dev;
9313 	bool					abort_io;
9314 };
9315 
9316 static void
9317 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status)
9318 {
9319 	struct blob_esnap_destroy_ctx	*ctx = spdk_io_channel_iter_get_ctx(i);
9320 	struct spdk_blob		*blob = ctx->blob;
9321 	struct spdk_blob_store		*bs = blob->bs;
9322 
9323 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n",
9324 		      blob->id);
9325 
9326 	if (ctx->cb_fn != NULL) {
9327 		ctx->cb_fn(ctx->cb_arg, blob, status);
9328 	}
9329 	free(ctx);
9330 
9331 	bs->esnap_channels_unloading--;
9332 	if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) {
9333 		spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg);
9334 	}
9335 }
9336 
9337 static void
9338 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i)
9339 {
9340 	struct blob_esnap_destroy_ctx	*ctx = spdk_io_channel_iter_get_ctx(i);
9341 	struct spdk_blob		*blob = ctx->blob;
9342 	struct spdk_bs_dev		*bs_dev = ctx->back_bs_dev;
9343 	struct spdk_io_channel		*channel = spdk_io_channel_iter_get_channel(i);
9344 	struct spdk_bs_channel		*bs_channel = spdk_io_channel_get_ctx(channel);
9345 	struct blob_esnap_channel	*esnap_channel;
9346 	struct blob_esnap_channel	find = {};
9347 
9348 	assert(spdk_get_thread() == spdk_io_channel_get_thread(channel));
9349 
9350 	find.blob_id = blob->id;
9351 	esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find);
9352 	if (esnap_channel != NULL) {
9353 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n",
9354 			      blob->id, spdk_thread_get_name(spdk_get_thread()));
9355 		RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel);
9356 
9357 		if (ctx->abort_io) {
9358 			spdk_bs_user_op_t *op, *tmp;
9359 
9360 			TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) {
9361 				if (op->back_channel == esnap_channel->channel) {
9362 					TAILQ_REMOVE(&bs_channel->queued_io, op, link);
9363 					bs_user_op_abort(op, -EIO);
9364 				}
9365 			}
9366 		}
9367 
9368 		bs_dev->destroy_channel(bs_dev, esnap_channel->channel);
9369 		free(esnap_channel);
9370 	}
9371 
9372 	spdk_for_each_channel_continue(i, 0);
9373 }
9374 
9375 /*
9376  * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be
9377  * used when closing an esnap clone blob and after decoupling from the parent.
9378  */
9379 static void
9380 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io,
9381 				   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
9382 {
9383 	struct blob_esnap_destroy_ctx	*ctx;
9384 
9385 	if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) {
9386 		if (cb_fn != NULL) {
9387 			cb_fn(cb_arg, blob, 0);
9388 		}
9389 		return;
9390 	}
9391 
9392 	ctx = calloc(1, sizeof(*ctx));
9393 	if (ctx == NULL) {
9394 		if (cb_fn != NULL) {
9395 			cb_fn(cb_arg, blob, -ENOMEM);
9396 		}
9397 		return;
9398 	}
9399 	ctx->cb_fn = cb_fn;
9400 	ctx->cb_arg = cb_arg;
9401 	ctx->blob = blob;
9402 	ctx->back_bs_dev = blob->back_bs_dev;
9403 	ctx->abort_io = abort_io;
9404 
9405 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n",
9406 		      blob->id);
9407 
9408 	blob->bs->esnap_channels_unloading++;
9409 	spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx,
9410 			      blob_esnap_destroy_channels_done);
9411 }
9412 
9413 /*
9414  * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a
9415  * bs_channel is destroyed.
9416  */
9417 static void
9418 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch)
9419 {
9420 	struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp;
9421 
9422 	assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch)));
9423 
9424 	SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n",
9425 		      spdk_thread_get_name(spdk_get_thread()));
9426 	RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels,
9427 			esnap_channel_tmp) {
9428 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64
9429 			      ": destroying one channel in thread %s\n",
9430 			      esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread()));
9431 		RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel);
9432 		spdk_put_io_channel(esnap_channel->channel);
9433 		free(esnap_channel);
9434 	}
9435 	SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n",
9436 		      spdk_thread_get_name(spdk_get_thread()));
9437 }
9438 
9439 struct set_bs_dev_ctx {
9440 	struct spdk_blob	*blob;
9441 	struct spdk_bs_dev	*back_bs_dev;
9442 	spdk_blob_op_complete	cb_fn;
9443 	void			*cb_arg;
9444 	int			bserrno;
9445 };
9446 
9447 static void
9448 blob_set_back_bs_dev_done(void *_ctx, int bserrno)
9449 {
9450 	struct set_bs_dev_ctx	*ctx = _ctx;
9451 
9452 	if (bserrno != 0) {
9453 		/* Even though the unfreeze failed, the update may have succeed. */
9454 		SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id,
9455 			    bserrno);
9456 	}
9457 	ctx->cb_fn(ctx->cb_arg, ctx->bserrno);
9458 	free(ctx);
9459 }
9460 
9461 static void
9462 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno)
9463 {
9464 	struct set_bs_dev_ctx	*ctx = _ctx;
9465 
9466 	if (bserrno != 0) {
9467 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n",
9468 			    blob->id, bserrno);
9469 		ctx->bserrno = bserrno;
9470 		blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx);
9471 		return;
9472 	}
9473 
9474 	if (blob->back_bs_dev != NULL) {
9475 		blob->back_bs_dev->destroy(blob->back_bs_dev);
9476 	}
9477 
9478 	SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id);
9479 	blob->back_bs_dev = ctx->back_bs_dev;
9480 	ctx->bserrno = 0;
9481 
9482 	blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx);
9483 }
9484 
9485 static void
9486 blob_frozen_destroy_esnap_channels(void *_ctx, int bserrno)
9487 {
9488 	struct set_bs_dev_ctx	*ctx = _ctx;
9489 	struct spdk_blob	*blob = ctx->blob;
9490 
9491 	if (bserrno != 0) {
9492 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id,
9493 			    bserrno);
9494 		ctx->cb_fn(ctx->cb_arg, bserrno);
9495 		free(ctx);
9496 		return;
9497 	}
9498 
9499 	/*
9500 	 * This does not prevent future reads from the esnap device because any future IO will
9501 	 * lazily create a new esnap IO channel.
9502 	 */
9503 	blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx);
9504 }
9505 
9506 void
9507 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev,
9508 			   spdk_blob_op_complete cb_fn, void *cb_arg)
9509 {
9510 	struct set_bs_dev_ctx	*ctx;
9511 
9512 	if (!blob_is_esnap_clone(blob)) {
9513 		SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id);
9514 		cb_fn(cb_arg, -EINVAL);
9515 		return;
9516 	}
9517 
9518 	ctx = calloc(1, sizeof(*ctx));
9519 	if (ctx == NULL) {
9520 		SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n",
9521 			    blob->id);
9522 		cb_fn(cb_arg, -ENOMEM);
9523 		return;
9524 	}
9525 	ctx->cb_fn = cb_fn;
9526 	ctx->cb_arg = cb_arg;
9527 	ctx->back_bs_dev = back_bs_dev;
9528 	ctx->blob = blob;
9529 	blob_freeze_io(blob, blob_frozen_destroy_esnap_channels, ctx);
9530 }
9531 
9532 struct spdk_bs_dev *
9533 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob)
9534 {
9535 	if (!blob_is_esnap_clone(blob)) {
9536 		SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id);
9537 		return NULL;
9538 	}
9539 
9540 	return blob->back_bs_dev;
9541 }
9542 
9543 bool
9544 spdk_blob_is_degraded(const struct spdk_blob *blob)
9545 {
9546 	if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) {
9547 		return true;
9548 	}
9549 	if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) {
9550 		return false;
9551 	}
9552 
9553 	return blob->back_bs_dev->is_degraded(blob->back_bs_dev);
9554 }
9555 
9556 SPDK_LOG_REGISTER_COMPONENT(blob)
9557 SPDK_LOG_REGISTER_COMPONENT(blob_esnap)
9558