xref: /spdk/lib/blob/blobstore.c (revision 8130039ee5287100d9eb93eb886967645da3d545)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk/blob.h"
10 #include "spdk/crc32.h"
11 #include "spdk/env.h"
12 #include "spdk/queue.h"
13 #include "spdk/thread.h"
14 #include "spdk/bit_array.h"
15 #include "spdk/bit_pool.h"
16 #include "spdk/likely.h"
17 #include "spdk/util.h"
18 #include "spdk/string.h"
19 
20 #include "spdk_internal/assert.h"
21 #include "spdk/log.h"
22 
23 #include "blobstore.h"
24 
25 #define BLOB_CRC32C_INITIAL    0xffffffffUL
26 
27 static int bs_register_md_thread(struct spdk_blob_store *bs);
28 static int bs_unregister_md_thread(struct spdk_blob_store *bs);
29 static void blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
30 static void blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
31 		uint64_t cluster, uint32_t extent, struct spdk_blob_md_page *page,
32 		spdk_blob_op_complete cb_fn, void *cb_arg);
33 static void blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
34 		uint32_t extent_page, struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg);
35 
36 static int blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
37 			  uint16_t value_len, bool internal);
38 static int blob_get_xattr_value(struct spdk_blob *blob, const char *name,
39 				const void **value, size_t *value_len, bool internal);
40 static int blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
41 
42 static void blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
43 				   struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg);
44 
45 /*
46  * External snapshots require a channel per thread per esnap bdev.  The tree
47  * is populated lazily as blob IOs are handled by the back_bs_dev. When this
48  * channel is destroyed, all the channels in the tree are destroyed.
49  */
50 
51 struct blob_esnap_channel {
52 	RB_ENTRY(blob_esnap_channel)	node;
53 	spdk_blob_id			blob_id;
54 	struct spdk_io_channel		*channel;
55 };
56 
57 static int blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2);
58 static void blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io,
59 		spdk_blob_op_with_handle_complete cb_fn, void *cb_arg);
60 static void blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch);
61 RB_GENERATE_STATIC(blob_esnap_channel_tree, blob_esnap_channel, node, blob_esnap_channel_compare)
62 
63 static inline bool
64 blob_is_esnap_clone(const struct spdk_blob *blob)
65 {
66 	assert(blob != NULL);
67 	return !!(blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT);
68 }
69 
70 static int
71 blob_id_cmp(struct spdk_blob *blob1, struct spdk_blob *blob2)
72 {
73 	assert(blob1 != NULL && blob2 != NULL);
74 	return (blob1->id < blob2->id ? -1 : blob1->id > blob2->id);
75 }
76 
77 RB_GENERATE_STATIC(spdk_blob_tree, spdk_blob, link, blob_id_cmp);
78 
79 static void
80 blob_verify_md_op(struct spdk_blob *blob)
81 {
82 	assert(blob != NULL);
83 	assert(spdk_get_thread() == blob->bs->md_thread);
84 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
85 }
86 
87 static struct spdk_blob_list *
88 bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid)
89 {
90 	struct spdk_blob_list *snapshot_entry = NULL;
91 
92 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
93 		if (snapshot_entry->id == blobid) {
94 			break;
95 		}
96 	}
97 
98 	return snapshot_entry;
99 }
100 
101 static void
102 bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page)
103 {
104 	assert(spdk_spin_held(&bs->used_lock));
105 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
106 	assert(spdk_bit_array_get(bs->used_md_pages, page) == false);
107 
108 	spdk_bit_array_set(bs->used_md_pages, page);
109 }
110 
111 static void
112 bs_release_md_page(struct spdk_blob_store *bs, uint32_t page)
113 {
114 	assert(spdk_spin_held(&bs->used_lock));
115 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
116 	assert(spdk_bit_array_get(bs->used_md_pages, page) == true);
117 
118 	spdk_bit_array_clear(bs->used_md_pages, page);
119 }
120 
121 static uint32_t
122 bs_claim_cluster(struct spdk_blob_store *bs)
123 {
124 	uint32_t cluster_num;
125 
126 	assert(spdk_spin_held(&bs->used_lock));
127 
128 	cluster_num = spdk_bit_pool_allocate_bit(bs->used_clusters);
129 	if (cluster_num == UINT32_MAX) {
130 		return UINT32_MAX;
131 	}
132 
133 	SPDK_DEBUGLOG(blob, "Claiming cluster %u\n", cluster_num);
134 	bs->num_free_clusters--;
135 
136 	return cluster_num;
137 }
138 
139 static void
140 bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
141 {
142 	assert(spdk_spin_held(&bs->used_lock));
143 	assert(cluster_num < spdk_bit_pool_capacity(bs->used_clusters));
144 	assert(spdk_bit_pool_is_allocated(bs->used_clusters, cluster_num) == true);
145 	assert(bs->num_free_clusters < bs->total_clusters);
146 
147 	SPDK_DEBUGLOG(blob, "Releasing cluster %u\n", cluster_num);
148 
149 	spdk_bit_pool_free_bit(bs->used_clusters, cluster_num);
150 	bs->num_free_clusters++;
151 }
152 
153 static int
154 blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
155 {
156 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
157 
158 	blob_verify_md_op(blob);
159 
160 	if (*cluster_lba != 0) {
161 		return -EEXIST;
162 	}
163 
164 	*cluster_lba = bs_cluster_to_lba(blob->bs, cluster);
165 	return 0;
166 }
167 
168 static int
169 bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
170 		    uint64_t *cluster, uint32_t *lowest_free_md_page, bool update_map)
171 {
172 	uint32_t *extent_page = 0;
173 
174 	assert(spdk_spin_held(&blob->bs->used_lock));
175 
176 	*cluster = bs_claim_cluster(blob->bs);
177 	if (*cluster == UINT32_MAX) {
178 		/* No more free clusters. Cannot satisfy the request */
179 		return -ENOSPC;
180 	}
181 
182 	if (blob->use_extent_table) {
183 		extent_page = bs_cluster_to_extent_page(blob, cluster_num);
184 		if (*extent_page == 0) {
185 			/* Extent page shall never occupy md_page so start the search from 1 */
186 			if (*lowest_free_md_page == 0) {
187 				*lowest_free_md_page = 1;
188 			}
189 			/* No extent_page is allocated for the cluster */
190 			*lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages,
191 					       *lowest_free_md_page);
192 			if (*lowest_free_md_page == UINT32_MAX) {
193 				/* No more free md pages. Cannot satisfy the request */
194 				bs_release_cluster(blob->bs, *cluster);
195 				return -ENOSPC;
196 			}
197 			bs_claim_md_page(blob->bs, *lowest_free_md_page);
198 		}
199 	}
200 
201 	SPDK_DEBUGLOG(blob, "Claiming cluster %" PRIu64 " for blob 0x%" PRIx64 "\n", *cluster,
202 		      blob->id);
203 
204 	if (update_map) {
205 		blob_insert_cluster(blob, cluster_num, *cluster);
206 		if (blob->use_extent_table && *extent_page == 0) {
207 			*extent_page = *lowest_free_md_page;
208 		}
209 	}
210 
211 	return 0;
212 }
213 
214 static void
215 blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
216 {
217 	xattrs->count = 0;
218 	xattrs->names = NULL;
219 	xattrs->ctx = NULL;
220 	xattrs->get_value = NULL;
221 }
222 
223 void
224 spdk_blob_opts_init(struct spdk_blob_opts *opts, size_t opts_size)
225 {
226 	if (!opts) {
227 		SPDK_ERRLOG("opts should not be NULL\n");
228 		return;
229 	}
230 
231 	if (!opts_size) {
232 		SPDK_ERRLOG("opts_size should not be zero value\n");
233 		return;
234 	}
235 
236 	memset(opts, 0, opts_size);
237 	opts->opts_size = opts_size;
238 
239 #define FIELD_OK(field) \
240         offsetof(struct spdk_blob_opts, field) + sizeof(opts->field) <= opts_size
241 
242 #define SET_FIELD(field, value) \
243         if (FIELD_OK(field)) { \
244                 opts->field = value; \
245         } \
246 
247 	SET_FIELD(num_clusters, 0);
248 	SET_FIELD(thin_provision, false);
249 	SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT);
250 
251 	if (FIELD_OK(xattrs)) {
252 		blob_xattrs_init(&opts->xattrs);
253 	}
254 
255 	SET_FIELD(use_extent_table, true);
256 
257 #undef FIELD_OK
258 #undef SET_FIELD
259 }
260 
261 void
262 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts, size_t opts_size)
263 {
264 	if (!opts) {
265 		SPDK_ERRLOG("opts should not be NULL\n");
266 		return;
267 	}
268 
269 	if (!opts_size) {
270 		SPDK_ERRLOG("opts_size should not be zero value\n");
271 		return;
272 	}
273 
274 	memset(opts, 0, opts_size);
275 	opts->opts_size = opts_size;
276 
277 #define FIELD_OK(field) \
278         offsetof(struct spdk_blob_open_opts, field) + sizeof(opts->field) <= opts_size
279 
280 #define SET_FIELD(field, value) \
281         if (FIELD_OK(field)) { \
282                 opts->field = value; \
283         } \
284 
285 	SET_FIELD(clear_method, BLOB_CLEAR_WITH_DEFAULT);
286 
287 #undef FIELD_OK
288 #undef SET_FILED
289 }
290 
291 static struct spdk_blob *
292 blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
293 {
294 	struct spdk_blob *blob;
295 
296 	blob = calloc(1, sizeof(*blob));
297 	if (!blob) {
298 		return NULL;
299 	}
300 
301 	blob->id = id;
302 	blob->bs = bs;
303 
304 	blob->parent_id = SPDK_BLOBID_INVALID;
305 
306 	blob->state = SPDK_BLOB_STATE_DIRTY;
307 	blob->extent_rle_found = false;
308 	blob->extent_table_found = false;
309 	blob->active.num_pages = 1;
310 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
311 	if (!blob->active.pages) {
312 		free(blob);
313 		return NULL;
314 	}
315 
316 	blob->active.pages[0] = bs_blobid_to_page(id);
317 
318 	TAILQ_INIT(&blob->xattrs);
319 	TAILQ_INIT(&blob->xattrs_internal);
320 	TAILQ_INIT(&blob->pending_persists);
321 	TAILQ_INIT(&blob->persists_to_complete);
322 
323 	return blob;
324 }
325 
326 static void
327 xattrs_free(struct spdk_xattr_tailq *xattrs)
328 {
329 	struct spdk_xattr	*xattr, *xattr_tmp;
330 
331 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
332 		TAILQ_REMOVE(xattrs, xattr, link);
333 		free(xattr->name);
334 		free(xattr->value);
335 		free(xattr);
336 	}
337 }
338 
339 static void
340 blob_free(struct spdk_blob *blob)
341 {
342 	assert(blob != NULL);
343 	assert(TAILQ_EMPTY(&blob->pending_persists));
344 	assert(TAILQ_EMPTY(&blob->persists_to_complete));
345 
346 	free(blob->active.extent_pages);
347 	free(blob->clean.extent_pages);
348 	free(blob->active.clusters);
349 	free(blob->clean.clusters);
350 	free(blob->active.pages);
351 	free(blob->clean.pages);
352 
353 	xattrs_free(&blob->xattrs);
354 	xattrs_free(&blob->xattrs_internal);
355 
356 	if (blob->back_bs_dev) {
357 		blob->back_bs_dev->destroy(blob->back_bs_dev);
358 	}
359 
360 	free(blob);
361 }
362 
363 static void
364 blob_back_bs_destroy_esnap_done(void *ctx, struct spdk_blob *blob, int bserrno)
365 {
366 	struct spdk_bs_dev	*bs_dev = ctx;
367 
368 	if (bserrno != 0) {
369 		/*
370 		 * This is probably due to a memory allocation failure when creating the
371 		 * blob_esnap_destroy_ctx before iterating threads.
372 		 */
373 		SPDK_ERRLOG("blob 0x%" PRIx64 ": Unable to destroy bs dev channels: error %d\n",
374 			    blob->id, bserrno);
375 		assert(false);
376 	}
377 
378 	if (bs_dev == NULL) {
379 		/*
380 		 * This check exists to make scanbuild happy.
381 		 *
382 		 * blob->back_bs_dev for an esnap is NULL during the first iteration of blobs while
383 		 * the blobstore is being loaded. It could also be NULL if there was an error
384 		 * opening the esnap device. In each of these cases, no channels could have been
385 		 * created because back_bs_dev->create_channel() would have led to a NULL pointer
386 		 * deref.
387 		 */
388 		assert(false);
389 		return;
390 	}
391 
392 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": calling destroy on back_bs_dev\n", blob->id);
393 	bs_dev->destroy(bs_dev);
394 }
395 
396 static void
397 blob_back_bs_destroy(struct spdk_blob *blob)
398 {
399 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": preparing to destroy back_bs_dev\n",
400 		      blob->id);
401 
402 	blob_esnap_destroy_bs_dev_channels(blob, false, blob_back_bs_destroy_esnap_done,
403 					   blob->back_bs_dev);
404 	blob->back_bs_dev = NULL;
405 }
406 
407 struct freeze_io_ctx {
408 	struct spdk_bs_cpl cpl;
409 	struct spdk_blob *blob;
410 };
411 
412 static void
413 blob_io_sync(struct spdk_io_channel_iter *i)
414 {
415 	spdk_for_each_channel_continue(i, 0);
416 }
417 
418 static void
419 blob_execute_queued_io(struct spdk_io_channel_iter *i)
420 {
421 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
422 	struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch);
423 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
424 	struct spdk_bs_request_set	*set;
425 	struct spdk_bs_user_op_args	*args;
426 	spdk_bs_user_op_t *op, *tmp;
427 
428 	TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) {
429 		set = (struct spdk_bs_request_set *)op;
430 		args = &set->u.user_op;
431 
432 		if (args->blob == ctx->blob) {
433 			TAILQ_REMOVE(&ch->queued_io, op, link);
434 			bs_user_op_execute(op);
435 		}
436 	}
437 
438 	spdk_for_each_channel_continue(i, 0);
439 }
440 
441 static void
442 blob_io_cpl(struct spdk_io_channel_iter *i, int status)
443 {
444 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
445 
446 	ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0);
447 
448 	free(ctx);
449 }
450 
451 static void
452 blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
453 {
454 	struct freeze_io_ctx *ctx;
455 
456 	blob_verify_md_op(blob);
457 
458 	ctx = calloc(1, sizeof(*ctx));
459 	if (!ctx) {
460 		cb_fn(cb_arg, -ENOMEM);
461 		return;
462 	}
463 
464 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
465 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
466 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
467 	ctx->blob = blob;
468 
469 	/* Freeze I/O on blob */
470 	blob->frozen_refcnt++;
471 
472 	spdk_for_each_channel(blob->bs, blob_io_sync, ctx, blob_io_cpl);
473 }
474 
475 static void
476 blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
477 {
478 	struct freeze_io_ctx *ctx;
479 
480 	blob_verify_md_op(blob);
481 
482 	ctx = calloc(1, sizeof(*ctx));
483 	if (!ctx) {
484 		cb_fn(cb_arg, -ENOMEM);
485 		return;
486 	}
487 
488 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
489 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
490 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
491 	ctx->blob = blob;
492 
493 	assert(blob->frozen_refcnt > 0);
494 
495 	blob->frozen_refcnt--;
496 
497 	spdk_for_each_channel(blob->bs, blob_execute_queued_io, ctx, blob_io_cpl);
498 }
499 
500 static int
501 blob_mark_clean(struct spdk_blob *blob)
502 {
503 	uint32_t *extent_pages = NULL;
504 	uint64_t *clusters = NULL;
505 	uint32_t *pages = NULL;
506 
507 	assert(blob != NULL);
508 
509 	if (blob->active.num_extent_pages) {
510 		assert(blob->active.extent_pages);
511 		extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages));
512 		if (!extent_pages) {
513 			return -ENOMEM;
514 		}
515 		memcpy(extent_pages, blob->active.extent_pages,
516 		       blob->active.num_extent_pages * sizeof(*extent_pages));
517 	}
518 
519 	if (blob->active.num_clusters) {
520 		assert(blob->active.clusters);
521 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
522 		if (!clusters) {
523 			free(extent_pages);
524 			return -ENOMEM;
525 		}
526 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
527 	}
528 
529 	if (blob->active.num_pages) {
530 		assert(blob->active.pages);
531 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
532 		if (!pages) {
533 			free(extent_pages);
534 			free(clusters);
535 			return -ENOMEM;
536 		}
537 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
538 	}
539 
540 	free(blob->clean.extent_pages);
541 	free(blob->clean.clusters);
542 	free(blob->clean.pages);
543 
544 	blob->clean.num_extent_pages = blob->active.num_extent_pages;
545 	blob->clean.extent_pages = blob->active.extent_pages;
546 	blob->clean.num_clusters = blob->active.num_clusters;
547 	blob->clean.clusters = blob->active.clusters;
548 	blob->clean.num_pages = blob->active.num_pages;
549 	blob->clean.pages = blob->active.pages;
550 
551 	blob->active.extent_pages = extent_pages;
552 	blob->active.clusters = clusters;
553 	blob->active.pages = pages;
554 
555 	/* If the metadata was dirtied again while the metadata was being written to disk,
556 	 *  we do not want to revert the DIRTY state back to CLEAN here.
557 	 */
558 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
559 		blob->state = SPDK_BLOB_STATE_CLEAN;
560 	}
561 
562 	return 0;
563 }
564 
565 static int
566 blob_deserialize_xattr(struct spdk_blob *blob,
567 		       struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
568 {
569 	struct spdk_xattr                       *xattr;
570 
571 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
572 	    sizeof(desc_xattr->value_length) +
573 	    desc_xattr->name_length + desc_xattr->value_length) {
574 		return -EINVAL;
575 	}
576 
577 	xattr = calloc(1, sizeof(*xattr));
578 	if (xattr == NULL) {
579 		return -ENOMEM;
580 	}
581 
582 	xattr->name = malloc(desc_xattr->name_length + 1);
583 	if (xattr->name == NULL) {
584 		free(xattr);
585 		return -ENOMEM;
586 	}
587 
588 	xattr->value = malloc(desc_xattr->value_length);
589 	if (xattr->value == NULL) {
590 		free(xattr->name);
591 		free(xattr);
592 		return -ENOMEM;
593 	}
594 
595 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
596 	xattr->name[desc_xattr->name_length] = '\0';
597 	xattr->value_len = desc_xattr->value_length;
598 	memcpy(xattr->value,
599 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
600 	       desc_xattr->value_length);
601 
602 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
603 
604 	return 0;
605 }
606 
607 
608 static int
609 blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
610 {
611 	struct spdk_blob_md_descriptor *desc;
612 	size_t	cur_desc = 0;
613 	void *tmp;
614 
615 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
616 	while (cur_desc < sizeof(page->descriptors)) {
617 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
618 			if (desc->length == 0) {
619 				/* If padding and length are 0, this terminates the page */
620 				break;
621 			}
622 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
623 			struct spdk_blob_md_descriptor_flags	*desc_flags;
624 
625 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
626 
627 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
628 				return -EINVAL;
629 			}
630 
631 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
632 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
633 				return -EINVAL;
634 			}
635 
636 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
637 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
638 				blob->data_ro = true;
639 				blob->md_ro = true;
640 			}
641 
642 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
643 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
644 				blob->md_ro = true;
645 			}
646 
647 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
648 				blob->data_ro = true;
649 				blob->md_ro = true;
650 			}
651 
652 			blob->invalid_flags = desc_flags->invalid_flags;
653 			blob->data_ro_flags = desc_flags->data_ro_flags;
654 			blob->md_ro_flags = desc_flags->md_ro_flags;
655 
656 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
657 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
658 			unsigned int				i, j;
659 			unsigned int				cluster_count = blob->active.num_clusters;
660 
661 			if (blob->extent_table_found) {
662 				/* Extent Table already present in the md,
663 				 * both descriptors should never be at the same time. */
664 				return -EINVAL;
665 			}
666 			blob->extent_rle_found = true;
667 
668 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
669 
670 			if (desc_extent_rle->length == 0 ||
671 			    (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) {
672 				return -EINVAL;
673 			}
674 
675 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
676 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
677 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
678 						if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters,
679 										desc_extent_rle->extents[i].cluster_idx + j)) {
680 							return -EINVAL;
681 						}
682 					}
683 					cluster_count++;
684 				}
685 			}
686 
687 			if (cluster_count == 0) {
688 				return -EINVAL;
689 			}
690 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
691 			if (tmp == NULL) {
692 				return -ENOMEM;
693 			}
694 			blob->active.clusters = tmp;
695 			blob->active.cluster_array_size = cluster_count;
696 
697 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
698 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
699 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
700 						blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
701 								desc_extent_rle->extents[i].cluster_idx + j);
702 					} else if (spdk_blob_is_thin_provisioned(blob)) {
703 						blob->active.clusters[blob->active.num_clusters++] = 0;
704 					} else {
705 						return -EINVAL;
706 					}
707 				}
708 			}
709 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
710 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
711 			uint32_t num_extent_pages = blob->active.num_extent_pages;
712 			uint32_t i, j;
713 			size_t extent_pages_length;
714 
715 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
716 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
717 
718 			if (blob->extent_rle_found) {
719 				/* This means that Extent RLE is present in MD,
720 				 * both should never be at the same time. */
721 				return -EINVAL;
722 			} else if (blob->extent_table_found &&
723 				   desc_extent_table->num_clusters != blob->remaining_clusters_in_et) {
724 				/* Number of clusters in this ET does not match number
725 				 * from previously read EXTENT_TABLE. */
726 				return -EINVAL;
727 			}
728 
729 			if (desc_extent_table->length == 0 ||
730 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
731 				return -EINVAL;
732 			}
733 
734 			blob->extent_table_found = true;
735 
736 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
737 				num_extent_pages += desc_extent_table->extent_page[i].num_pages;
738 			}
739 
740 			if (num_extent_pages > 0) {
741 				tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t));
742 				if (tmp == NULL) {
743 					return -ENOMEM;
744 				}
745 				blob->active.extent_pages = tmp;
746 			}
747 			blob->active.extent_pages_array_size = num_extent_pages;
748 
749 			blob->remaining_clusters_in_et = desc_extent_table->num_clusters;
750 
751 			/* Extent table entries contain md page numbers for extent pages.
752 			 * Zeroes represent unallocated extent pages, those are run-length-encoded.
753 			 */
754 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
755 				if (desc_extent_table->extent_page[i].page_idx != 0) {
756 					assert(desc_extent_table->extent_page[i].num_pages == 1);
757 					blob->active.extent_pages[blob->active.num_extent_pages++] =
758 						desc_extent_table->extent_page[i].page_idx;
759 				} else if (spdk_blob_is_thin_provisioned(blob)) {
760 					for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) {
761 						blob->active.extent_pages[blob->active.num_extent_pages++] = 0;
762 					}
763 				} else {
764 					return -EINVAL;
765 				}
766 			}
767 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
768 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
769 			unsigned int					i;
770 			unsigned int					cluster_count = 0;
771 			size_t						cluster_idx_length;
772 
773 			if (blob->extent_rle_found) {
774 				/* This means that Extent RLE is present in MD,
775 				 * both should never be at the same time. */
776 				return -EINVAL;
777 			}
778 
779 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
780 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
781 
782 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
783 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
784 				return -EINVAL;
785 			}
786 
787 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
788 				if (desc_extent->cluster_idx[i] != 0) {
789 					if (!spdk_bit_pool_is_allocated(blob->bs->used_clusters, desc_extent->cluster_idx[i])) {
790 						return -EINVAL;
791 					}
792 				}
793 				cluster_count++;
794 			}
795 
796 			if (cluster_count == 0) {
797 				return -EINVAL;
798 			}
799 
800 			/* When reading extent pages sequentially starting cluster idx should match
801 			 * current size of a blob.
802 			 * If changed to batch reading, this check shall be removed. */
803 			if (desc_extent->start_cluster_idx != blob->active.num_clusters) {
804 				return -EINVAL;
805 			}
806 
807 			tmp = realloc(blob->active.clusters,
808 				      (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters));
809 			if (tmp == NULL) {
810 				return -ENOMEM;
811 			}
812 			blob->active.clusters = tmp;
813 			blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters);
814 
815 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
816 				if (desc_extent->cluster_idx[i] != 0) {
817 					blob->active.clusters[blob->active.num_clusters++] = bs_cluster_to_lba(blob->bs,
818 							desc_extent->cluster_idx[i]);
819 				} else if (spdk_blob_is_thin_provisioned(blob)) {
820 					blob->active.clusters[blob->active.num_clusters++] = 0;
821 				} else {
822 					return -EINVAL;
823 				}
824 			}
825 			assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters);
826 			assert(blob->remaining_clusters_in_et >= cluster_count);
827 			blob->remaining_clusters_in_et -= cluster_count;
828 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
829 			int rc;
830 
831 			rc = blob_deserialize_xattr(blob,
832 						    (struct spdk_blob_md_descriptor_xattr *) desc, false);
833 			if (rc != 0) {
834 				return rc;
835 			}
836 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
837 			int rc;
838 
839 			rc = blob_deserialize_xattr(blob,
840 						    (struct spdk_blob_md_descriptor_xattr *) desc, true);
841 			if (rc != 0) {
842 				return rc;
843 			}
844 		} else {
845 			/* Unrecognized descriptor type.  Do not fail - just continue to the
846 			 *  next descriptor.  If this descriptor is associated with some feature
847 			 *  defined in a newer version of blobstore, that version of blobstore
848 			 *  should create and set an associated feature flag to specify if this
849 			 *  blob can be loaded or not.
850 			 */
851 		}
852 
853 		/* Advance to the next descriptor */
854 		cur_desc += sizeof(*desc) + desc->length;
855 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
856 			break;
857 		}
858 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
859 	}
860 
861 	return 0;
862 }
863 
864 static bool bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page);
865 
866 static int
867 blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob)
868 {
869 	assert(blob != NULL);
870 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
871 
872 	if (bs_load_cur_extent_page_valid(extent_page) == false) {
873 		return -ENOENT;
874 	}
875 
876 	return blob_parse_page(extent_page, blob);
877 }
878 
879 static int
880 blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
881 	   struct spdk_blob *blob)
882 {
883 	const struct spdk_blob_md_page *page;
884 	uint32_t i;
885 	int rc;
886 	void *tmp;
887 
888 	assert(page_count > 0);
889 	assert(pages[0].sequence_num == 0);
890 	assert(blob != NULL);
891 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
892 	assert(blob->active.clusters == NULL);
893 
894 	/* The blobid provided doesn't match what's in the MD, this can
895 	 * happen for example if a bogus blobid is passed in through open.
896 	 */
897 	if (blob->id != pages[0].id) {
898 		SPDK_ERRLOG("Blobid (0x%" PRIx64 ") doesn't match what's in metadata "
899 			    "(0x%" PRIx64 ")\n", blob->id, pages[0].id);
900 		return -ENOENT;
901 	}
902 
903 	tmp = realloc(blob->active.pages, page_count * sizeof(*blob->active.pages));
904 	if (!tmp) {
905 		return -ENOMEM;
906 	}
907 	blob->active.pages = tmp;
908 
909 	blob->active.pages[0] = pages[0].id;
910 
911 	for (i = 1; i < page_count; i++) {
912 		assert(spdk_bit_array_get(blob->bs->used_md_pages, pages[i - 1].next));
913 		blob->active.pages[i] = pages[i - 1].next;
914 	}
915 	blob->active.num_pages = page_count;
916 
917 	for (i = 0; i < page_count; i++) {
918 		page = &pages[i];
919 
920 		assert(page->id == blob->id);
921 		assert(page->sequence_num == i);
922 
923 		rc = blob_parse_page(page, blob);
924 		if (rc != 0) {
925 			return rc;
926 		}
927 	}
928 
929 	return 0;
930 }
931 
932 static int
933 blob_serialize_add_page(const struct spdk_blob *blob,
934 			struct spdk_blob_md_page **pages,
935 			uint32_t *page_count,
936 			struct spdk_blob_md_page **last_page)
937 {
938 	struct spdk_blob_md_page *page, *tmp_pages;
939 
940 	assert(pages != NULL);
941 	assert(page_count != NULL);
942 
943 	*last_page = NULL;
944 	if (*page_count == 0) {
945 		assert(*pages == NULL);
946 		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, 0,
947 				     NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
948 		if (*pages == NULL) {
949 			return -ENOMEM;
950 		}
951 		*page_count = 1;
952 	} else {
953 		assert(*pages != NULL);
954 		tmp_pages = spdk_realloc(*pages, SPDK_BS_PAGE_SIZE * (*page_count + 1), 0);
955 		if (tmp_pages == NULL) {
956 			return -ENOMEM;
957 		}
958 		(*page_count)++;
959 		*pages = tmp_pages;
960 	}
961 
962 	page = &(*pages)[*page_count - 1];
963 	memset(page, 0, sizeof(*page));
964 	page->id = blob->id;
965 	page->sequence_num = *page_count - 1;
966 	page->next = SPDK_INVALID_MD_PAGE;
967 	*last_page = page;
968 
969 	return 0;
970 }
971 
972 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
973  * Update required_sz on both success and failure.
974  *
975  */
976 static int
977 blob_serialize_xattr(const struct spdk_xattr *xattr,
978 		     uint8_t *buf, size_t buf_sz,
979 		     size_t *required_sz, bool internal)
980 {
981 	struct spdk_blob_md_descriptor_xattr	*desc;
982 
983 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
984 		       strlen(xattr->name) +
985 		       xattr->value_len;
986 
987 	if (buf_sz < *required_sz) {
988 		return -1;
989 	}
990 
991 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
992 
993 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
994 	desc->length = sizeof(desc->name_length) +
995 		       sizeof(desc->value_length) +
996 		       strlen(xattr->name) +
997 		       xattr->value_len;
998 	desc->name_length = strlen(xattr->name);
999 	desc->value_length = xattr->value_len;
1000 
1001 	memcpy(desc->name, xattr->name, desc->name_length);
1002 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
1003 	       xattr->value,
1004 	       desc->value_length);
1005 
1006 	return 0;
1007 }
1008 
1009 static void
1010 blob_serialize_extent_table_entry(const struct spdk_blob *blob,
1011 				  uint64_t start_ep, uint64_t *next_ep,
1012 				  uint8_t **buf, size_t *remaining_sz)
1013 {
1014 	struct spdk_blob_md_descriptor_extent_table *desc;
1015 	size_t cur_sz;
1016 	uint64_t i, et_idx;
1017 	uint32_t extent_page, ep_len;
1018 
1019 	/* The buffer must have room for at least num_clusters entry */
1020 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters);
1021 	if (*remaining_sz < cur_sz) {
1022 		*next_ep = start_ep;
1023 		return;
1024 	}
1025 
1026 	desc = (struct spdk_blob_md_descriptor_extent_table *)*buf;
1027 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE;
1028 
1029 	desc->num_clusters = blob->active.num_clusters;
1030 
1031 	ep_len = 1;
1032 	et_idx = 0;
1033 	for (i = start_ep; i < blob->active.num_extent_pages; i++) {
1034 		if (*remaining_sz < cur_sz  + sizeof(desc->extent_page[0])) {
1035 			/* If we ran out of buffer space, return */
1036 			break;
1037 		}
1038 
1039 		extent_page = blob->active.extent_pages[i];
1040 		/* Verify that next extent_page is unallocated */
1041 		if (extent_page == 0 &&
1042 		    (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) {
1043 			ep_len++;
1044 			continue;
1045 		}
1046 		desc->extent_page[et_idx].page_idx = extent_page;
1047 		desc->extent_page[et_idx].num_pages = ep_len;
1048 		et_idx++;
1049 
1050 		ep_len = 1;
1051 		cur_sz += sizeof(desc->extent_page[et_idx]);
1052 	}
1053 	*next_ep = i;
1054 
1055 	desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx;
1056 	*remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length;
1057 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc->length;
1058 }
1059 
1060 static int
1061 blob_serialize_extent_table(const struct spdk_blob *blob,
1062 			    struct spdk_blob_md_page **pages,
1063 			    struct spdk_blob_md_page *cur_page,
1064 			    uint32_t *page_count, uint8_t **buf,
1065 			    size_t *remaining_sz)
1066 {
1067 	uint64_t				last_extent_page;
1068 	int					rc;
1069 
1070 	last_extent_page = 0;
1071 	/* At least single extent table entry has to be always persisted.
1072 	 * Such case occurs with num_extent_pages == 0. */
1073 	while (last_extent_page <= blob->active.num_extent_pages) {
1074 		blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf,
1075 						  remaining_sz);
1076 
1077 		if (last_extent_page == blob->active.num_extent_pages) {
1078 			break;
1079 		}
1080 
1081 		rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1082 		if (rc < 0) {
1083 			return rc;
1084 		}
1085 
1086 		*buf = (uint8_t *)cur_page->descriptors;
1087 		*remaining_sz = sizeof(cur_page->descriptors);
1088 	}
1089 
1090 	return 0;
1091 }
1092 
1093 static void
1094 blob_serialize_extent_rle(const struct spdk_blob *blob,
1095 			  uint64_t start_cluster, uint64_t *next_cluster,
1096 			  uint8_t **buf, size_t *buf_sz)
1097 {
1098 	struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle;
1099 	size_t cur_sz;
1100 	uint64_t i, extent_idx;
1101 	uint64_t lba, lba_per_cluster, lba_count;
1102 
1103 	/* The buffer must have room for at least one extent */
1104 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]);
1105 	if (*buf_sz < cur_sz) {
1106 		*next_cluster = start_cluster;
1107 		return;
1108 	}
1109 
1110 	desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf;
1111 	desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE;
1112 
1113 	lba_per_cluster = bs_cluster_to_lba(blob->bs, 1);
1114 	/* Assert for scan-build false positive */
1115 	assert(lba_per_cluster > 0);
1116 
1117 	lba = blob->active.clusters[start_cluster];
1118 	lba_count = lba_per_cluster;
1119 	extent_idx = 0;
1120 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
1121 		if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) {
1122 			/* Run-length encode sequential non-zero LBA */
1123 			lba_count += lba_per_cluster;
1124 			continue;
1125 		} else if (lba == 0 && blob->active.clusters[i] == 0) {
1126 			/* Run-length encode unallocated clusters */
1127 			lba_count += lba_per_cluster;
1128 			continue;
1129 		}
1130 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1131 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1132 		extent_idx++;
1133 
1134 		cur_sz += sizeof(desc_extent_rle->extents[extent_idx]);
1135 
1136 		if (*buf_sz < cur_sz) {
1137 			/* If we ran out of buffer space, return */
1138 			*next_cluster = i;
1139 			break;
1140 		}
1141 
1142 		lba = blob->active.clusters[i];
1143 		lba_count = lba_per_cluster;
1144 	}
1145 
1146 	if (*buf_sz >= cur_sz) {
1147 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1148 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1149 		extent_idx++;
1150 
1151 		*next_cluster = blob->active.num_clusters;
1152 	}
1153 
1154 	desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx;
1155 	*buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1156 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1157 }
1158 
1159 static int
1160 blob_serialize_extents_rle(const struct spdk_blob *blob,
1161 			   struct spdk_blob_md_page **pages,
1162 			   struct spdk_blob_md_page *cur_page,
1163 			   uint32_t *page_count, uint8_t **buf,
1164 			   size_t *remaining_sz)
1165 {
1166 	uint64_t				last_cluster;
1167 	int					rc;
1168 
1169 	last_cluster = 0;
1170 	while (last_cluster < blob->active.num_clusters) {
1171 		blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz);
1172 
1173 		if (last_cluster == blob->active.num_clusters) {
1174 			break;
1175 		}
1176 
1177 		rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1178 		if (rc < 0) {
1179 			return rc;
1180 		}
1181 
1182 		*buf = (uint8_t *)cur_page->descriptors;
1183 		*remaining_sz = sizeof(cur_page->descriptors);
1184 	}
1185 
1186 	return 0;
1187 }
1188 
1189 static void
1190 blob_serialize_extent_page(const struct spdk_blob *blob,
1191 			   uint64_t cluster, struct spdk_blob_md_page *page)
1192 {
1193 	struct spdk_blob_md_descriptor_extent_page *desc_extent;
1194 	uint64_t i, extent_idx;
1195 	uint64_t lba, lba_per_cluster;
1196 	uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
1197 
1198 	desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors;
1199 	desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE;
1200 
1201 	lba_per_cluster = bs_cluster_to_lba(blob->bs, 1);
1202 
1203 	desc_extent->start_cluster_idx = start_cluster_idx;
1204 	extent_idx = 0;
1205 	for (i = start_cluster_idx; i < blob->active.num_clusters; i++) {
1206 		lba = blob->active.clusters[i];
1207 		desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster;
1208 		if (extent_idx >= SPDK_EXTENTS_PER_EP) {
1209 			break;
1210 		}
1211 	}
1212 	desc_extent->length = sizeof(desc_extent->start_cluster_idx) +
1213 			      sizeof(desc_extent->cluster_idx[0]) * extent_idx;
1214 }
1215 
1216 static void
1217 blob_serialize_flags(const struct spdk_blob *blob,
1218 		     uint8_t *buf, size_t *buf_sz)
1219 {
1220 	struct spdk_blob_md_descriptor_flags *desc;
1221 
1222 	/*
1223 	 * Flags get serialized first, so we should always have room for the flags
1224 	 *  descriptor.
1225 	 */
1226 	assert(*buf_sz >= sizeof(*desc));
1227 
1228 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
1229 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
1230 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
1231 	desc->invalid_flags = blob->invalid_flags;
1232 	desc->data_ro_flags = blob->data_ro_flags;
1233 	desc->md_ro_flags = blob->md_ro_flags;
1234 
1235 	*buf_sz -= sizeof(*desc);
1236 }
1237 
1238 static int
1239 blob_serialize_xattrs(const struct spdk_blob *blob,
1240 		      const struct spdk_xattr_tailq *xattrs, bool internal,
1241 		      struct spdk_blob_md_page **pages,
1242 		      struct spdk_blob_md_page *cur_page,
1243 		      uint32_t *page_count, uint8_t **buf,
1244 		      size_t *remaining_sz)
1245 {
1246 	const struct spdk_xattr	*xattr;
1247 	int	rc;
1248 
1249 	TAILQ_FOREACH(xattr, xattrs, link) {
1250 		size_t required_sz = 0;
1251 
1252 		rc = blob_serialize_xattr(xattr,
1253 					  *buf, *remaining_sz,
1254 					  &required_sz, internal);
1255 		if (rc < 0) {
1256 			/* Need to add a new page to the chain */
1257 			rc = blob_serialize_add_page(blob, pages, page_count,
1258 						     &cur_page);
1259 			if (rc < 0) {
1260 				spdk_free(*pages);
1261 				*pages = NULL;
1262 				*page_count = 0;
1263 				return rc;
1264 			}
1265 
1266 			*buf = (uint8_t *)cur_page->descriptors;
1267 			*remaining_sz = sizeof(cur_page->descriptors);
1268 
1269 			/* Try again */
1270 			required_sz = 0;
1271 			rc = blob_serialize_xattr(xattr,
1272 						  *buf, *remaining_sz,
1273 						  &required_sz, internal);
1274 
1275 			if (rc < 0) {
1276 				spdk_free(*pages);
1277 				*pages = NULL;
1278 				*page_count = 0;
1279 				return rc;
1280 			}
1281 		}
1282 
1283 		*remaining_sz -= required_sz;
1284 		*buf += required_sz;
1285 	}
1286 
1287 	return 0;
1288 }
1289 
1290 static int
1291 blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
1292 	       uint32_t *page_count)
1293 {
1294 	struct spdk_blob_md_page		*cur_page;
1295 	int					rc;
1296 	uint8_t					*buf;
1297 	size_t					remaining_sz;
1298 
1299 	assert(pages != NULL);
1300 	assert(page_count != NULL);
1301 	assert(blob != NULL);
1302 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
1303 
1304 	*pages = NULL;
1305 	*page_count = 0;
1306 
1307 	/* A blob always has at least 1 page, even if it has no descriptors */
1308 	rc = blob_serialize_add_page(blob, pages, page_count, &cur_page);
1309 	if (rc < 0) {
1310 		return rc;
1311 	}
1312 
1313 	buf = (uint8_t *)cur_page->descriptors;
1314 	remaining_sz = sizeof(cur_page->descriptors);
1315 
1316 	/* Serialize flags */
1317 	blob_serialize_flags(blob, buf, &remaining_sz);
1318 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
1319 
1320 	/* Serialize xattrs */
1321 	rc = blob_serialize_xattrs(blob, &blob->xattrs, false,
1322 				   pages, cur_page, page_count, &buf, &remaining_sz);
1323 	if (rc < 0) {
1324 		return rc;
1325 	}
1326 
1327 	/* Serialize internal xattrs */
1328 	rc = blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
1329 				   pages, cur_page, page_count, &buf, &remaining_sz);
1330 	if (rc < 0) {
1331 		return rc;
1332 	}
1333 
1334 	if (blob->use_extent_table) {
1335 		/* Serialize extent table */
1336 		rc = blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1337 	} else {
1338 		/* Serialize extents */
1339 		rc = blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1340 	}
1341 
1342 	return rc;
1343 }
1344 
1345 struct spdk_blob_load_ctx {
1346 	struct spdk_blob		*blob;
1347 
1348 	struct spdk_blob_md_page	*pages;
1349 	uint32_t			num_pages;
1350 	uint32_t			next_extent_page;
1351 	spdk_bs_sequence_t	        *seq;
1352 
1353 	spdk_bs_sequence_cpl		cb_fn;
1354 	void				*cb_arg;
1355 };
1356 
1357 static uint32_t
1358 blob_md_page_calc_crc(void *page)
1359 {
1360 	uint32_t		crc;
1361 
1362 	crc = BLOB_CRC32C_INITIAL;
1363 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
1364 	crc ^= BLOB_CRC32C_INITIAL;
1365 
1366 	return crc;
1367 
1368 }
1369 
1370 static void
1371 blob_load_final(struct spdk_blob_load_ctx *ctx, int bserrno)
1372 {
1373 	struct spdk_blob		*blob = ctx->blob;
1374 
1375 	if (bserrno == 0) {
1376 		blob_mark_clean(blob);
1377 	}
1378 
1379 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
1380 
1381 	/* Free the memory */
1382 	spdk_free(ctx->pages);
1383 	free(ctx);
1384 }
1385 
1386 static void
1387 blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
1388 {
1389 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1390 	struct spdk_blob		*blob = ctx->blob;
1391 
1392 	if (bserrno == 0) {
1393 		blob->back_bs_dev = bs_create_blob_bs_dev(snapshot);
1394 		if (blob->back_bs_dev == NULL) {
1395 			bserrno = -ENOMEM;
1396 		}
1397 	}
1398 	if (bserrno != 0) {
1399 		SPDK_ERRLOG("Snapshot fail\n");
1400 	}
1401 
1402 	blob_load_final(ctx, bserrno);
1403 }
1404 
1405 static void blob_update_clear_method(struct spdk_blob *blob);
1406 
1407 static int
1408 blob_load_esnap(struct spdk_blob *blob, void *blob_ctx)
1409 {
1410 	struct spdk_blob_store *bs = blob->bs;
1411 	struct spdk_bs_dev *bs_dev = NULL;
1412 	const void *esnap_id = NULL;
1413 	size_t id_len = 0;
1414 	int rc;
1415 
1416 	if (bs->esnap_bs_dev_create == NULL) {
1417 		SPDK_NOTICELOG("blob 0x%" PRIx64 " is an esnap clone but the blobstore was opened "
1418 			       "without support for esnap clones\n", blob->id);
1419 		return -ENOTSUP;
1420 	}
1421 	assert(blob->back_bs_dev == NULL);
1422 
1423 	rc = blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, &esnap_id, &id_len, true);
1424 	if (rc != 0) {
1425 		SPDK_ERRLOG("blob 0x%" PRIx64 " is an esnap clone but has no esnap ID\n", blob->id);
1426 		return -EINVAL;
1427 	}
1428 	assert(id_len > 0 && id_len < UINT32_MAX);
1429 
1430 	SPDK_INFOLOG(blob, "Creating external snapshot device\n");
1431 
1432 	rc = bs->esnap_bs_dev_create(bs->esnap_ctx, blob_ctx, blob, esnap_id, (uint32_t)id_len,
1433 				     &bs_dev);
1434 	if (rc != 0) {
1435 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": failed to load back_bs_dev "
1436 			      "with error %d\n", blob->id, rc);
1437 		return rc;
1438 	}
1439 
1440 	/*
1441 	 * Note: bs_dev might be NULL if the consumer chose to not open the external snapshot.
1442 	 * This especially might happen during spdk_bs_load() iteration.
1443 	 */
1444 	if (bs_dev != NULL) {
1445 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": loaded back_bs_dev\n", blob->id);
1446 		if ((bs->io_unit_size % bs_dev->blocklen) != 0) {
1447 			SPDK_NOTICELOG("blob 0x%" PRIx64 " external snapshot device block size %u "
1448 				       "is not compatible with blobstore block size %u\n",
1449 				       blob->id, bs_dev->blocklen, bs->io_unit_size);
1450 			bs_dev->destroy(bs_dev);
1451 			return -EINVAL;
1452 		}
1453 	}
1454 
1455 	blob->back_bs_dev = bs_dev;
1456 	blob->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT;
1457 
1458 	return 0;
1459 }
1460 
1461 static void
1462 blob_load_backing_dev(spdk_bs_sequence_t *seq, void *cb_arg)
1463 {
1464 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1465 	struct spdk_blob		*blob = ctx->blob;
1466 	const void			*value;
1467 	size_t				len;
1468 	int				rc;
1469 
1470 	if (blob_is_esnap_clone(blob)) {
1471 		rc = blob_load_esnap(blob, seq->cpl.u.blob_handle.esnap_ctx);
1472 		blob_load_final(ctx, rc);
1473 		return;
1474 	}
1475 
1476 	if (spdk_blob_is_thin_provisioned(blob)) {
1477 		rc = blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
1478 		if (rc == 0) {
1479 			if (len != sizeof(spdk_blob_id)) {
1480 				blob_load_final(ctx, -EINVAL);
1481 				return;
1482 			}
1483 			/* open snapshot blob and continue in the callback function */
1484 			blob->parent_id = *(spdk_blob_id *)value;
1485 			spdk_bs_open_blob(blob->bs, blob->parent_id,
1486 					  blob_load_snapshot_cpl, ctx);
1487 			return;
1488 		} else {
1489 			/* add zeroes_dev for thin provisioned blob */
1490 			blob->back_bs_dev = bs_create_zeroes_dev();
1491 		}
1492 	} else {
1493 		/* standard blob */
1494 		blob->back_bs_dev = NULL;
1495 	}
1496 	blob_load_final(ctx, 0);
1497 }
1498 
1499 static void
1500 blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1501 {
1502 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1503 	struct spdk_blob		*blob = ctx->blob;
1504 	struct spdk_blob_md_page	*page;
1505 	uint64_t			i;
1506 	uint32_t			crc;
1507 	uint64_t			lba;
1508 	void				*tmp;
1509 	uint64_t			sz;
1510 
1511 	if (bserrno) {
1512 		SPDK_ERRLOG("Extent page read failed: %d\n", bserrno);
1513 		blob_load_final(ctx, bserrno);
1514 		return;
1515 	}
1516 
1517 	if (ctx->pages == NULL) {
1518 		/* First iteration of this function, allocate buffer for single EXTENT_PAGE */
1519 		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
1520 					  NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
1521 		if (!ctx->pages) {
1522 			blob_load_final(ctx, -ENOMEM);
1523 			return;
1524 		}
1525 		ctx->num_pages = 1;
1526 		ctx->next_extent_page = 0;
1527 	} else {
1528 		page = &ctx->pages[0];
1529 		crc = blob_md_page_calc_crc(page);
1530 		if (crc != page->crc) {
1531 			blob_load_final(ctx, -EINVAL);
1532 			return;
1533 		}
1534 
1535 		if (page->next != SPDK_INVALID_MD_PAGE) {
1536 			blob_load_final(ctx, -EINVAL);
1537 			return;
1538 		}
1539 
1540 		bserrno = blob_parse_extent_page(page, blob);
1541 		if (bserrno) {
1542 			blob_load_final(ctx, bserrno);
1543 			return;
1544 		}
1545 	}
1546 
1547 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
1548 		if (blob->active.extent_pages[i] != 0) {
1549 			/* Extent page was allocated, read and parse it. */
1550 			lba = bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]);
1551 			ctx->next_extent_page = i + 1;
1552 
1553 			bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1554 					     bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1555 					     blob_load_cpl_extents_cpl, ctx);
1556 			return;
1557 		} else {
1558 			/* Thin provisioned blobs can point to unallocated extent pages.
1559 			 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */
1560 
1561 			sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP);
1562 			blob->active.num_clusters += sz;
1563 			blob->remaining_clusters_in_et -= sz;
1564 
1565 			assert(spdk_blob_is_thin_provisioned(blob));
1566 			assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0);
1567 
1568 			tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
1569 			if (tmp == NULL) {
1570 				blob_load_final(ctx, -ENOMEM);
1571 				return;
1572 			}
1573 			memset(tmp + sizeof(*blob->active.clusters) * blob->active.cluster_array_size, 0,
1574 			       sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size));
1575 			blob->active.clusters = tmp;
1576 			blob->active.cluster_array_size = blob->active.num_clusters;
1577 		}
1578 	}
1579 
1580 	blob_load_backing_dev(seq, ctx);
1581 }
1582 
1583 static void
1584 blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1585 {
1586 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1587 	struct spdk_blob		*blob = ctx->blob;
1588 	struct spdk_blob_md_page	*page;
1589 	int				rc;
1590 	uint32_t			crc;
1591 	uint32_t			current_page;
1592 
1593 	if (ctx->num_pages == 1) {
1594 		current_page = bs_blobid_to_page(blob->id);
1595 	} else {
1596 		assert(ctx->num_pages != 0);
1597 		page = &ctx->pages[ctx->num_pages - 2];
1598 		current_page = page->next;
1599 	}
1600 
1601 	if (bserrno) {
1602 		SPDK_ERRLOG("Metadata page %d read failed for blobid 0x%" PRIx64 ": %d\n",
1603 			    current_page, blob->id, bserrno);
1604 		blob_load_final(ctx, bserrno);
1605 		return;
1606 	}
1607 
1608 	page = &ctx->pages[ctx->num_pages - 1];
1609 	crc = blob_md_page_calc_crc(page);
1610 	if (crc != page->crc) {
1611 		SPDK_ERRLOG("Metadata page %d crc mismatch for blobid 0x%" PRIx64 "\n",
1612 			    current_page, blob->id);
1613 		blob_load_final(ctx, -EINVAL);
1614 		return;
1615 	}
1616 
1617 	if (page->next != SPDK_INVALID_MD_PAGE) {
1618 		struct spdk_blob_md_page *tmp_pages;
1619 		uint32_t next_page = page->next;
1620 		uint64_t next_lba = bs_md_page_to_lba(blob->bs, next_page);
1621 
1622 		/* Read the next page */
1623 		tmp_pages = spdk_realloc(ctx->pages, (sizeof(*page) * (ctx->num_pages + 1)), 0);
1624 		if (tmp_pages == NULL) {
1625 			blob_load_final(ctx, -ENOMEM);
1626 			return;
1627 		}
1628 		ctx->num_pages++;
1629 		ctx->pages = tmp_pages;
1630 
1631 		bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
1632 				     next_lba,
1633 				     bs_byte_to_lba(blob->bs, sizeof(*page)),
1634 				     blob_load_cpl, ctx);
1635 		return;
1636 	}
1637 
1638 	/* Parse the pages */
1639 	rc = blob_parse(ctx->pages, ctx->num_pages, blob);
1640 	if (rc) {
1641 		blob_load_final(ctx, rc);
1642 		return;
1643 	}
1644 
1645 	if (blob->extent_table_found == true) {
1646 		/* If EXTENT_TABLE was found, that means support for it should be enabled. */
1647 		assert(blob->extent_rle_found == false);
1648 		blob->use_extent_table = true;
1649 	} else {
1650 		/* If EXTENT_RLE or no extent_* descriptor was found disable support
1651 		 * for extent table. No extent_* descriptors means that blob has length of 0
1652 		 * and no extent_rle descriptors were persisted for it.
1653 		 * EXTENT_TABLE if used, is always present in metadata regardless of length. */
1654 		blob->use_extent_table = false;
1655 	}
1656 
1657 	/* Check the clear_method stored in metadata vs what may have been passed
1658 	 * via spdk_bs_open_blob_ext() and update accordingly.
1659 	 */
1660 	blob_update_clear_method(blob);
1661 
1662 	spdk_free(ctx->pages);
1663 	ctx->pages = NULL;
1664 
1665 	if (blob->extent_table_found) {
1666 		blob_load_cpl_extents_cpl(seq, ctx, 0);
1667 	} else {
1668 		blob_load_backing_dev(seq, ctx);
1669 	}
1670 }
1671 
1672 /* Load a blob from disk given a blobid */
1673 static void
1674 blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1675 	  spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1676 {
1677 	struct spdk_blob_load_ctx *ctx;
1678 	struct spdk_blob_store *bs;
1679 	uint32_t page_num;
1680 	uint64_t lba;
1681 
1682 	blob_verify_md_op(blob);
1683 
1684 	bs = blob->bs;
1685 
1686 	ctx = calloc(1, sizeof(*ctx));
1687 	if (!ctx) {
1688 		cb_fn(seq, cb_arg, -ENOMEM);
1689 		return;
1690 	}
1691 
1692 	ctx->blob = blob;
1693 	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, 0);
1694 	if (!ctx->pages) {
1695 		free(ctx);
1696 		cb_fn(seq, cb_arg, -ENOMEM);
1697 		return;
1698 	}
1699 	ctx->num_pages = 1;
1700 	ctx->cb_fn = cb_fn;
1701 	ctx->cb_arg = cb_arg;
1702 	ctx->seq = seq;
1703 
1704 	page_num = bs_blobid_to_page(blob->id);
1705 	lba = bs_md_page_to_lba(blob->bs, page_num);
1706 
1707 	blob->state = SPDK_BLOB_STATE_LOADING;
1708 
1709 	bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1710 			     bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
1711 			     blob_load_cpl, ctx);
1712 }
1713 
1714 struct spdk_blob_persist_ctx {
1715 	struct spdk_blob		*blob;
1716 
1717 	struct spdk_blob_md_page	*pages;
1718 	uint32_t			next_extent_page;
1719 	struct spdk_blob_md_page	*extent_page;
1720 
1721 	spdk_bs_sequence_t		*seq;
1722 	spdk_bs_sequence_cpl		cb_fn;
1723 	void				*cb_arg;
1724 	TAILQ_ENTRY(spdk_blob_persist_ctx) link;
1725 };
1726 
1727 static void
1728 bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba,
1729 		   uint64_t lba_count)
1730 {
1731 	switch (ctx->blob->clear_method) {
1732 	case BLOB_CLEAR_WITH_DEFAULT:
1733 	case BLOB_CLEAR_WITH_UNMAP:
1734 		bs_batch_unmap_dev(batch, lba, lba_count);
1735 		break;
1736 	case BLOB_CLEAR_WITH_WRITE_ZEROES:
1737 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
1738 		break;
1739 	case BLOB_CLEAR_WITH_NONE:
1740 	default:
1741 		break;
1742 	}
1743 }
1744 
1745 static int
1746 bs_super_validate(struct spdk_bs_super_block *super, struct spdk_blob_store *bs)
1747 {
1748 	uint32_t	crc;
1749 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
1750 
1751 	if (super->version > SPDK_BS_VERSION ||
1752 	    super->version < SPDK_BS_INITIAL_VERSION) {
1753 		return -EILSEQ;
1754 	}
1755 
1756 	if (memcmp(super->signature, SPDK_BS_SUPER_BLOCK_SIG,
1757 		   sizeof(super->signature)) != 0) {
1758 		return -EILSEQ;
1759 	}
1760 
1761 	crc = blob_md_page_calc_crc(super);
1762 	if (crc != super->crc) {
1763 		return -EILSEQ;
1764 	}
1765 
1766 	if (memcmp(&bs->bstype, &super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
1767 		SPDK_DEBUGLOG(blob, "Bstype matched - loading blobstore\n");
1768 	} else if (memcmp(&bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
1769 		SPDK_DEBUGLOG(blob, "Bstype wildcard used - loading blobstore regardless bstype\n");
1770 	} else {
1771 		SPDK_DEBUGLOG(blob, "Unexpected bstype\n");
1772 		SPDK_LOGDUMP(blob, "Expected:", bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
1773 		SPDK_LOGDUMP(blob, "Found:", super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
1774 		return -ENXIO;
1775 	}
1776 
1777 	if (super->size > bs->dev->blockcnt * bs->dev->blocklen) {
1778 		SPDK_NOTICELOG("Size mismatch, dev size: %" PRIu64 ", blobstore size: %" PRIu64 "\n",
1779 			       bs->dev->blockcnt * bs->dev->blocklen, super->size);
1780 		return -EILSEQ;
1781 	}
1782 
1783 	return 0;
1784 }
1785 
1786 static void bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
1787 			  spdk_bs_sequence_cpl cb_fn, void *cb_arg);
1788 
1789 static void
1790 blob_persist_complete_cb(void *arg)
1791 {
1792 	struct spdk_blob_persist_ctx *ctx = arg;
1793 
1794 	/* Call user callback */
1795 	ctx->cb_fn(ctx->seq, ctx->cb_arg, 0);
1796 
1797 	/* Free the memory */
1798 	spdk_free(ctx->pages);
1799 	free(ctx);
1800 }
1801 
1802 static void blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
1803 
1804 static void
1805 blob_persist_complete(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx, int bserrno)
1806 {
1807 	struct spdk_blob_persist_ctx	*next_persist, *tmp;
1808 	struct spdk_blob		*blob = ctx->blob;
1809 
1810 	if (bserrno == 0) {
1811 		blob_mark_clean(blob);
1812 	}
1813 
1814 	assert(ctx == TAILQ_FIRST(&blob->persists_to_complete));
1815 
1816 	/* Complete all persists that were pending when the current persist started */
1817 	TAILQ_FOREACH_SAFE(next_persist, &blob->persists_to_complete, link, tmp) {
1818 		TAILQ_REMOVE(&blob->persists_to_complete, next_persist, link);
1819 		spdk_thread_send_msg(spdk_get_thread(), blob_persist_complete_cb, next_persist);
1820 	}
1821 
1822 	if (TAILQ_EMPTY(&blob->pending_persists)) {
1823 		return;
1824 	}
1825 
1826 	/* Queue up all pending persists for completion and start blob persist with first one */
1827 	TAILQ_SWAP(&blob->persists_to_complete, &blob->pending_persists, spdk_blob_persist_ctx, link);
1828 	next_persist = TAILQ_FIRST(&blob->persists_to_complete);
1829 
1830 	blob->state = SPDK_BLOB_STATE_DIRTY;
1831 	bs_mark_dirty(seq, blob->bs, blob_persist_start, next_persist);
1832 }
1833 
1834 static void
1835 blob_persist_clear_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1836 {
1837 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1838 	struct spdk_blob		*blob = ctx->blob;
1839 	struct spdk_blob_store		*bs = blob->bs;
1840 	size_t				i;
1841 
1842 	if (bserrno != 0) {
1843 		blob_persist_complete(seq, ctx, bserrno);
1844 		return;
1845 	}
1846 
1847 	spdk_spin_lock(&bs->used_lock);
1848 
1849 	/* Release all extent_pages that were truncated */
1850 	for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) {
1851 		/* Nothing to release if it was not allocated */
1852 		if (blob->active.extent_pages[i] != 0) {
1853 			bs_release_md_page(bs, blob->active.extent_pages[i]);
1854 		}
1855 	}
1856 
1857 	spdk_spin_unlock(&bs->used_lock);
1858 
1859 	if (blob->active.num_extent_pages == 0) {
1860 		free(blob->active.extent_pages);
1861 		blob->active.extent_pages = NULL;
1862 		blob->active.extent_pages_array_size = 0;
1863 	} else if (blob->active.num_extent_pages != blob->active.extent_pages_array_size) {
1864 #ifndef __clang_analyzer__
1865 		void *tmp;
1866 
1867 		/* scan-build really can't figure reallocs, workaround it */
1868 		tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages);
1869 		assert(tmp != NULL);
1870 		blob->active.extent_pages = tmp;
1871 #endif
1872 		blob->active.extent_pages_array_size = blob->active.num_extent_pages;
1873 	}
1874 
1875 	blob_persist_complete(seq, ctx, bserrno);
1876 }
1877 
1878 static void
1879 blob_persist_clear_extents(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
1880 {
1881 	struct spdk_blob		*blob = ctx->blob;
1882 	struct spdk_blob_store		*bs = blob->bs;
1883 	size_t				i;
1884 	uint64_t                        lba;
1885 	uint64_t                        lba_count;
1886 	spdk_bs_batch_t                 *batch;
1887 
1888 	batch = bs_sequence_to_batch(seq, blob_persist_clear_extents_cpl, ctx);
1889 	lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1890 
1891 	/* Clear all extent_pages that were truncated */
1892 	for (i = blob->active.num_extent_pages; i < blob->active.extent_pages_array_size; i++) {
1893 		/* Nothing to clear if it was not allocated */
1894 		if (blob->active.extent_pages[i] != 0) {
1895 			lba = bs_md_page_to_lba(bs, blob->active.extent_pages[i]);
1896 			bs_batch_write_zeroes_dev(batch, lba, lba_count);
1897 		}
1898 	}
1899 
1900 	bs_batch_close(batch);
1901 }
1902 
1903 static void
1904 blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1905 {
1906 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1907 	struct spdk_blob		*blob = ctx->blob;
1908 	struct spdk_blob_store		*bs = blob->bs;
1909 	size_t				i;
1910 
1911 	if (bserrno != 0) {
1912 		blob_persist_complete(seq, ctx, bserrno);
1913 		return;
1914 	}
1915 
1916 	spdk_spin_lock(&bs->used_lock);
1917 	/* Release all clusters that were truncated */
1918 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1919 		uint32_t cluster_num = bs_lba_to_cluster(bs, blob->active.clusters[i]);
1920 
1921 		/* Nothing to release if it was not allocated */
1922 		if (blob->active.clusters[i] != 0) {
1923 			bs_release_cluster(bs, cluster_num);
1924 		}
1925 	}
1926 	spdk_spin_unlock(&bs->used_lock);
1927 
1928 	if (blob->active.num_clusters == 0) {
1929 		free(blob->active.clusters);
1930 		blob->active.clusters = NULL;
1931 		blob->active.cluster_array_size = 0;
1932 	} else if (blob->active.num_clusters != blob->active.cluster_array_size) {
1933 #ifndef __clang_analyzer__
1934 		void *tmp;
1935 
1936 		/* scan-build really can't figure reallocs, workaround it */
1937 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters);
1938 		assert(tmp != NULL);
1939 		blob->active.clusters = tmp;
1940 
1941 #endif
1942 		blob->active.cluster_array_size = blob->active.num_clusters;
1943 	}
1944 
1945 	/* Move on to clearing extent pages */
1946 	blob_persist_clear_extents(seq, ctx);
1947 }
1948 
1949 static void
1950 blob_persist_clear_clusters(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
1951 {
1952 	struct spdk_blob		*blob = ctx->blob;
1953 	struct spdk_blob_store		*bs = blob->bs;
1954 	spdk_bs_batch_t			*batch;
1955 	size_t				i;
1956 	uint64_t			lba;
1957 	uint64_t			lba_count;
1958 
1959 	/* Clusters don't move around in blobs. The list shrinks or grows
1960 	 * at the end, but no changes ever occur in the middle of the list.
1961 	 */
1962 
1963 	batch = bs_sequence_to_batch(seq, blob_persist_clear_clusters_cpl, ctx);
1964 
1965 	/* Clear all clusters that were truncated */
1966 	lba = 0;
1967 	lba_count = 0;
1968 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1969 		uint64_t next_lba = blob->active.clusters[i];
1970 		uint64_t next_lba_count = bs_cluster_to_lba(bs, 1);
1971 
1972 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1973 			/* This cluster is contiguous with the previous one. */
1974 			lba_count += next_lba_count;
1975 			continue;
1976 		} else if (next_lba == 0) {
1977 			continue;
1978 		}
1979 
1980 		/* This cluster is not contiguous with the previous one. */
1981 
1982 		/* If a run of LBAs previously existing, clear them now */
1983 		if (lba_count > 0) {
1984 			bs_batch_clear_dev(ctx, batch, lba, lba_count);
1985 		}
1986 
1987 		/* Start building the next batch */
1988 		lba = next_lba;
1989 		if (next_lba > 0) {
1990 			lba_count = next_lba_count;
1991 		} else {
1992 			lba_count = 0;
1993 		}
1994 	}
1995 
1996 	/* If we ended with a contiguous set of LBAs, clear them now */
1997 	if (lba_count > 0) {
1998 		bs_batch_clear_dev(ctx, batch, lba, lba_count);
1999 	}
2000 
2001 	bs_batch_close(batch);
2002 }
2003 
2004 static void
2005 blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2006 {
2007 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2008 	struct spdk_blob		*blob = ctx->blob;
2009 	struct spdk_blob_store		*bs = blob->bs;
2010 	size_t				i;
2011 
2012 	if (bserrno != 0) {
2013 		blob_persist_complete(seq, ctx, bserrno);
2014 		return;
2015 	}
2016 
2017 	spdk_spin_lock(&bs->used_lock);
2018 
2019 	/* This loop starts at 1 because the first page is special and handled
2020 	 * below. The pages (except the first) are never written in place,
2021 	 * so any pages in the clean list must be zeroed.
2022 	 */
2023 	for (i = 1; i < blob->clean.num_pages; i++) {
2024 		bs_release_md_page(bs, blob->clean.pages[i]);
2025 	}
2026 
2027 	if (blob->active.num_pages == 0) {
2028 		uint32_t page_num;
2029 
2030 		page_num = bs_blobid_to_page(blob->id);
2031 		bs_release_md_page(bs, page_num);
2032 	}
2033 
2034 	spdk_spin_unlock(&bs->used_lock);
2035 
2036 	/* Move on to clearing clusters */
2037 	blob_persist_clear_clusters(seq, ctx);
2038 }
2039 
2040 static void
2041 blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2042 {
2043 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2044 	struct spdk_blob		*blob = ctx->blob;
2045 	struct spdk_blob_store		*bs = blob->bs;
2046 	uint64_t			lba;
2047 	uint64_t			lba_count;
2048 	spdk_bs_batch_t			*batch;
2049 	size_t				i;
2050 
2051 	if (bserrno != 0) {
2052 		blob_persist_complete(seq, ctx, bserrno);
2053 		return;
2054 	}
2055 
2056 	batch = bs_sequence_to_batch(seq, blob_persist_zero_pages_cpl, ctx);
2057 
2058 	lba_count = bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
2059 
2060 	/* This loop starts at 1 because the first page is special and handled
2061 	 * below. The pages (except the first) are never written in place,
2062 	 * so any pages in the clean list must be zeroed.
2063 	 */
2064 	for (i = 1; i < blob->clean.num_pages; i++) {
2065 		lba = bs_md_page_to_lba(bs, blob->clean.pages[i]);
2066 
2067 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
2068 	}
2069 
2070 	/* The first page will only be zeroed if this is a delete. */
2071 	if (blob->active.num_pages == 0) {
2072 		uint32_t page_num;
2073 
2074 		/* The first page in the metadata goes where the blobid indicates */
2075 		page_num = bs_blobid_to_page(blob->id);
2076 		lba = bs_md_page_to_lba(bs, page_num);
2077 
2078 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
2079 	}
2080 
2081 	bs_batch_close(batch);
2082 }
2083 
2084 static void
2085 blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2086 {
2087 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2088 	struct spdk_blob		*blob = ctx->blob;
2089 	struct spdk_blob_store		*bs = blob->bs;
2090 	uint64_t			lba;
2091 	uint32_t			lba_count;
2092 	struct spdk_blob_md_page	*page;
2093 
2094 	if (bserrno != 0) {
2095 		blob_persist_complete(seq, ctx, bserrno);
2096 		return;
2097 	}
2098 
2099 	if (blob->active.num_pages == 0) {
2100 		/* Move on to the next step */
2101 		blob_persist_zero_pages(seq, ctx, 0);
2102 		return;
2103 	}
2104 
2105 	lba_count = bs_byte_to_lba(bs, sizeof(*page));
2106 
2107 	page = &ctx->pages[0];
2108 	/* The first page in the metadata goes where the blobid indicates */
2109 	lba = bs_md_page_to_lba(bs, bs_blobid_to_page(blob->id));
2110 
2111 	bs_sequence_write_dev(seq, page, lba, lba_count,
2112 			      blob_persist_zero_pages, ctx);
2113 }
2114 
2115 static void
2116 blob_persist_write_page_chain(spdk_bs_sequence_t *seq, struct spdk_blob_persist_ctx *ctx)
2117 {
2118 	struct spdk_blob		*blob = ctx->blob;
2119 	struct spdk_blob_store		*bs = blob->bs;
2120 	uint64_t			lba;
2121 	uint32_t			lba_count;
2122 	struct spdk_blob_md_page	*page;
2123 	spdk_bs_batch_t			*batch;
2124 	size_t				i;
2125 
2126 	/* Clusters don't move around in blobs. The list shrinks or grows
2127 	 * at the end, but no changes ever occur in the middle of the list.
2128 	 */
2129 
2130 	lba_count = bs_byte_to_lba(bs, sizeof(*page));
2131 
2132 	batch = bs_sequence_to_batch(seq, blob_persist_write_page_root, ctx);
2133 
2134 	/* This starts at 1. The root page is not written until
2135 	 * all of the others are finished
2136 	 */
2137 	for (i = 1; i < blob->active.num_pages; i++) {
2138 		page = &ctx->pages[i];
2139 		assert(page->sequence_num == i);
2140 
2141 		lba = bs_md_page_to_lba(bs, blob->active.pages[i]);
2142 
2143 		bs_batch_write_dev(batch, page, lba, lba_count);
2144 	}
2145 
2146 	bs_batch_close(batch);
2147 }
2148 
2149 static int
2150 blob_resize(struct spdk_blob *blob, uint64_t sz)
2151 {
2152 	uint64_t	i;
2153 	uint64_t	*tmp;
2154 	uint64_t	cluster;
2155 	uint32_t	lfmd; /*  lowest free md page */
2156 	uint64_t	num_clusters;
2157 	uint32_t	*ep_tmp;
2158 	uint64_t	new_num_ep = 0, current_num_ep = 0;
2159 	struct spdk_blob_store *bs;
2160 	int		rc;
2161 
2162 	bs = blob->bs;
2163 
2164 	blob_verify_md_op(blob);
2165 
2166 	if (blob->active.num_clusters == sz) {
2167 		return 0;
2168 	}
2169 
2170 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
2171 		/* If this blob was resized to be larger, then smaller, then
2172 		 * larger without syncing, then the cluster array already
2173 		 * contains spare assigned clusters we can use.
2174 		 */
2175 		num_clusters = spdk_min(blob->active.cluster_array_size,
2176 					sz);
2177 	} else {
2178 		num_clusters = blob->active.num_clusters;
2179 	}
2180 
2181 	if (blob->use_extent_table) {
2182 		/* Round up since every cluster beyond current Extent Table size,
2183 		 * requires new extent page. */
2184 		new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP);
2185 		current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP);
2186 	}
2187 
2188 	assert(!spdk_spin_held(&bs->used_lock));
2189 
2190 	/* Check first that we have enough clusters and md pages before we start claiming them.
2191 	 * bs->used_lock is held to ensure that clusters we think are free are still free when we go
2192 	 * to claim them later in this function.
2193 	 */
2194 	if (sz > num_clusters && spdk_blob_is_thin_provisioned(blob) == false) {
2195 		spdk_spin_lock(&bs->used_lock);
2196 		if ((sz - num_clusters) > bs->num_free_clusters) {
2197 			rc = -ENOSPC;
2198 			goto out;
2199 		}
2200 		lfmd = 0;
2201 		for (i = current_num_ep; i < new_num_ep ; i++) {
2202 			lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd);
2203 			if (lfmd == UINT32_MAX) {
2204 				/* No more free md pages. Cannot satisfy the request */
2205 				rc = -ENOSPC;
2206 				goto out;
2207 			}
2208 		}
2209 	}
2210 
2211 	if (sz > num_clusters) {
2212 		/* Expand the cluster array if necessary.
2213 		 * We only shrink the array when persisting.
2214 		 */
2215 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz);
2216 		if (sz > 0 && tmp == NULL) {
2217 			rc = -ENOMEM;
2218 			goto out;
2219 		}
2220 		memset(tmp + blob->active.cluster_array_size, 0,
2221 		       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
2222 		blob->active.clusters = tmp;
2223 		blob->active.cluster_array_size = sz;
2224 
2225 		/* Expand the extents table, only if enough clusters were added */
2226 		if (new_num_ep > current_num_ep && blob->use_extent_table) {
2227 			ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep);
2228 			if (new_num_ep > 0 && ep_tmp == NULL) {
2229 				rc = -ENOMEM;
2230 				goto out;
2231 			}
2232 			memset(ep_tmp + blob->active.extent_pages_array_size, 0,
2233 			       sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size));
2234 			blob->active.extent_pages = ep_tmp;
2235 			blob->active.extent_pages_array_size = new_num_ep;
2236 		}
2237 	}
2238 
2239 	blob->state = SPDK_BLOB_STATE_DIRTY;
2240 
2241 	if (spdk_blob_is_thin_provisioned(blob) == false) {
2242 		cluster = 0;
2243 		lfmd = 0;
2244 		for (i = num_clusters; i < sz; i++) {
2245 			bs_allocate_cluster(blob, i, &cluster, &lfmd, true);
2246 			/* Do not increment lfmd here.  lfmd will get updated
2247 			 * to the md_page allocated (if any) when a new extent
2248 			 * page is needed.  Just pass that value again,
2249 			 * bs_allocate_cluster will just start at that index
2250 			 * to find the next free md_page when needed.
2251 			 */
2252 		}
2253 	}
2254 
2255 	blob->active.num_clusters = sz;
2256 	blob->active.num_extent_pages = new_num_ep;
2257 
2258 	rc = 0;
2259 out:
2260 	if (spdk_spin_held(&bs->used_lock)) {
2261 		spdk_spin_unlock(&bs->used_lock);
2262 	}
2263 
2264 	return rc;
2265 }
2266 
2267 static void
2268 blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx)
2269 {
2270 	spdk_bs_sequence_t *seq = ctx->seq;
2271 	struct spdk_blob *blob = ctx->blob;
2272 	struct spdk_blob_store *bs = blob->bs;
2273 	uint64_t i;
2274 	uint32_t page_num;
2275 	void *tmp;
2276 	int rc;
2277 
2278 	/* Generate the new metadata */
2279 	rc = blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
2280 	if (rc < 0) {
2281 		blob_persist_complete(seq, ctx, rc);
2282 		return;
2283 	}
2284 
2285 	assert(blob->active.num_pages >= 1);
2286 
2287 	/* Resize the cache of page indices */
2288 	tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
2289 	if (!tmp) {
2290 		blob_persist_complete(seq, ctx, -ENOMEM);
2291 		return;
2292 	}
2293 	blob->active.pages = tmp;
2294 
2295 	/* Assign this metadata to pages. This requires two passes - one to verify that there are
2296 	 * enough pages and a second to actually claim them. The used_lock is held across
2297 	 * both passes to ensure things don't change in the middle.
2298 	 */
2299 	spdk_spin_lock(&bs->used_lock);
2300 	page_num = 0;
2301 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
2302 	for (i = 1; i < blob->active.num_pages; i++) {
2303 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
2304 		if (page_num == UINT32_MAX) {
2305 			spdk_spin_unlock(&bs->used_lock);
2306 			blob_persist_complete(seq, ctx, -ENOMEM);
2307 			return;
2308 		}
2309 		page_num++;
2310 	}
2311 
2312 	page_num = 0;
2313 	blob->active.pages[0] = bs_blobid_to_page(blob->id);
2314 	for (i = 1; i < blob->active.num_pages; i++) {
2315 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
2316 		ctx->pages[i - 1].next = page_num;
2317 		/* Now that previous metadata page is complete, calculate the crc for it. */
2318 		ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]);
2319 		blob->active.pages[i] = page_num;
2320 		bs_claim_md_page(bs, page_num);
2321 		SPDK_DEBUGLOG(blob, "Claiming page %u for blob 0x%" PRIx64 "\n", page_num,
2322 			      blob->id);
2323 		page_num++;
2324 	}
2325 	spdk_spin_unlock(&bs->used_lock);
2326 	ctx->pages[i - 1].crc = blob_md_page_calc_crc(&ctx->pages[i - 1]);
2327 	/* Start writing the metadata from last page to first */
2328 	blob->state = SPDK_BLOB_STATE_CLEAN;
2329 	blob_persist_write_page_chain(seq, ctx);
2330 }
2331 
2332 static void
2333 blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2334 {
2335 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2336 	struct spdk_blob		*blob = ctx->blob;
2337 	size_t				i;
2338 	uint32_t			extent_page_id;
2339 	uint32_t                        page_count = 0;
2340 	int				rc;
2341 
2342 	if (ctx->extent_page != NULL) {
2343 		spdk_free(ctx->extent_page);
2344 		ctx->extent_page = NULL;
2345 	}
2346 
2347 	if (bserrno != 0) {
2348 		blob_persist_complete(seq, ctx, bserrno);
2349 		return;
2350 	}
2351 
2352 	/* Only write out Extent Pages when blob was resized. */
2353 	for (i = ctx->next_extent_page; i < blob->active.extent_pages_array_size; i++) {
2354 		extent_page_id = blob->active.extent_pages[i];
2355 		if (extent_page_id == 0) {
2356 			/* No Extent Page to persist */
2357 			assert(spdk_blob_is_thin_provisioned(blob));
2358 			continue;
2359 		}
2360 		assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id));
2361 		ctx->next_extent_page = i + 1;
2362 		rc = blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page);
2363 		if (rc < 0) {
2364 			blob_persist_complete(seq, ctx, rc);
2365 			return;
2366 		}
2367 
2368 		blob->state = SPDK_BLOB_STATE_DIRTY;
2369 		blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page);
2370 
2371 		ctx->extent_page->crc = blob_md_page_calc_crc(ctx->extent_page);
2372 
2373 		bs_sequence_write_dev(seq, ctx->extent_page, bs_md_page_to_lba(blob->bs, extent_page_id),
2374 				      bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
2375 				      blob_persist_write_extent_pages, ctx);
2376 		return;
2377 	}
2378 
2379 	blob_persist_generate_new_md(ctx);
2380 }
2381 
2382 static void
2383 blob_persist_start(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2384 {
2385 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2386 	struct spdk_blob *blob = ctx->blob;
2387 
2388 	if (bserrno != 0) {
2389 		blob_persist_complete(seq, ctx, bserrno);
2390 		return;
2391 	}
2392 
2393 	if (blob->active.num_pages == 0) {
2394 		/* This is the signal that the blob should be deleted.
2395 		 * Immediately jump to the clean up routine. */
2396 		assert(blob->clean.num_pages > 0);
2397 		blob->state = SPDK_BLOB_STATE_CLEAN;
2398 		blob_persist_zero_pages(seq, ctx, 0);
2399 		return;
2400 
2401 	}
2402 
2403 	if (blob->clean.num_clusters < blob->active.num_clusters) {
2404 		/* Blob was resized up */
2405 		assert(blob->clean.num_extent_pages <= blob->active.num_extent_pages);
2406 		ctx->next_extent_page = spdk_max(1, blob->clean.num_extent_pages) - 1;
2407 	} else if (blob->active.num_clusters < blob->active.cluster_array_size) {
2408 		/* Blob was resized down */
2409 		assert(blob->clean.num_extent_pages >= blob->active.num_extent_pages);
2410 		ctx->next_extent_page = spdk_max(1, blob->active.num_extent_pages) - 1;
2411 	} else {
2412 		/* No change in size occurred */
2413 		blob_persist_generate_new_md(ctx);
2414 		return;
2415 	}
2416 
2417 	blob_persist_write_extent_pages(seq, ctx, 0);
2418 }
2419 
2420 struct spdk_bs_mark_dirty {
2421 	struct spdk_blob_store		*bs;
2422 	struct spdk_bs_super_block	*super;
2423 	spdk_bs_sequence_cpl		cb_fn;
2424 	void				*cb_arg;
2425 };
2426 
2427 static void
2428 bs_mark_dirty_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2429 {
2430 	struct spdk_bs_mark_dirty *ctx = cb_arg;
2431 
2432 	if (bserrno == 0) {
2433 		ctx->bs->clean = 0;
2434 	}
2435 
2436 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
2437 
2438 	spdk_free(ctx->super);
2439 	free(ctx);
2440 }
2441 
2442 static void bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2443 			   struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg);
2444 
2445 
2446 static void
2447 bs_mark_dirty_write(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2448 {
2449 	struct spdk_bs_mark_dirty *ctx = cb_arg;
2450 	int rc;
2451 
2452 	if (bserrno != 0) {
2453 		bs_mark_dirty_write_cpl(seq, ctx, bserrno);
2454 		return;
2455 	}
2456 
2457 	rc = bs_super_validate(ctx->super, ctx->bs);
2458 	if (rc != 0) {
2459 		bs_mark_dirty_write_cpl(seq, ctx, rc);
2460 		return;
2461 	}
2462 
2463 	ctx->super->clean = 0;
2464 	if (ctx->super->size == 0) {
2465 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
2466 	}
2467 
2468 	bs_write_super(seq, ctx->bs, ctx->super, bs_mark_dirty_write_cpl, ctx);
2469 }
2470 
2471 static void
2472 bs_mark_dirty(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2473 	      spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2474 {
2475 	struct spdk_bs_mark_dirty *ctx;
2476 
2477 	/* Blobstore is already marked dirty */
2478 	if (bs->clean == 0) {
2479 		cb_fn(seq, cb_arg, 0);
2480 		return;
2481 	}
2482 
2483 	ctx = calloc(1, sizeof(*ctx));
2484 	if (!ctx) {
2485 		cb_fn(seq, cb_arg, -ENOMEM);
2486 		return;
2487 	}
2488 	ctx->bs = bs;
2489 	ctx->cb_fn = cb_fn;
2490 	ctx->cb_arg = cb_arg;
2491 
2492 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
2493 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2494 	if (!ctx->super) {
2495 		free(ctx);
2496 		cb_fn(seq, cb_arg, -ENOMEM);
2497 		return;
2498 	}
2499 
2500 	bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0),
2501 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
2502 			     bs_mark_dirty_write, ctx);
2503 }
2504 
2505 /* Write a blob to disk */
2506 static void
2507 blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
2508 	     spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2509 {
2510 	struct spdk_blob_persist_ctx *ctx;
2511 
2512 	blob_verify_md_op(blob);
2513 
2514 	if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->persists_to_complete)) {
2515 		cb_fn(seq, cb_arg, 0);
2516 		return;
2517 	}
2518 
2519 	ctx = calloc(1, sizeof(*ctx));
2520 	if (!ctx) {
2521 		cb_fn(seq, cb_arg, -ENOMEM);
2522 		return;
2523 	}
2524 	ctx->blob = blob;
2525 	ctx->seq = seq;
2526 	ctx->cb_fn = cb_fn;
2527 	ctx->cb_arg = cb_arg;
2528 
2529 	/* Multiple blob persists can affect one another, via blob->state or
2530 	 * blob mutable data changes. To prevent it, queue up the persists. */
2531 	if (!TAILQ_EMPTY(&blob->persists_to_complete)) {
2532 		TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link);
2533 		return;
2534 	}
2535 	TAILQ_INSERT_HEAD(&blob->persists_to_complete, ctx, link);
2536 
2537 	bs_mark_dirty(seq, blob->bs, blob_persist_start, ctx);
2538 }
2539 
2540 struct spdk_blob_copy_cluster_ctx {
2541 	struct spdk_blob *blob;
2542 	uint8_t *buf;
2543 	uint64_t page;
2544 	uint64_t new_cluster;
2545 	uint32_t new_extent_page;
2546 	spdk_bs_sequence_t *seq;
2547 	struct spdk_blob_md_page *new_cluster_page;
2548 };
2549 
2550 struct spdk_blob_free_cluster_ctx {
2551 	struct spdk_blob *blob;
2552 	uint64_t page;
2553 	struct spdk_blob_md_page *md_page;
2554 	uint64_t cluster_num;
2555 	uint32_t extent_page;
2556 	spdk_bs_sequence_t *seq;
2557 };
2558 
2559 static void
2560 blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
2561 {
2562 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2563 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
2564 	TAILQ_HEAD(, spdk_bs_request_set) requests;
2565 	spdk_bs_user_op_t *op;
2566 
2567 	TAILQ_INIT(&requests);
2568 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
2569 
2570 	while (!TAILQ_EMPTY(&requests)) {
2571 		op = TAILQ_FIRST(&requests);
2572 		TAILQ_REMOVE(&requests, op, link);
2573 		if (bserrno == 0) {
2574 			bs_user_op_execute(op);
2575 		} else {
2576 			bs_user_op_abort(op, bserrno);
2577 		}
2578 	}
2579 
2580 	spdk_free(ctx->buf);
2581 	free(ctx);
2582 }
2583 
2584 static void
2585 blob_free_cluster_cpl(void *cb_arg, int bserrno)
2586 {
2587 	struct spdk_blob_free_cluster_ctx *ctx = cb_arg;
2588 	spdk_bs_sequence_t *seq = ctx->seq;
2589 
2590 	bs_sequence_finish(seq, bserrno);
2591 
2592 	free(ctx);
2593 }
2594 
2595 static void
2596 blob_insert_cluster_cpl(void *cb_arg, int bserrno)
2597 {
2598 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2599 
2600 	if (bserrno) {
2601 		if (bserrno == -EEXIST) {
2602 			/* The metadata insert failed because another thread
2603 			 * allocated the cluster first. Free our cluster
2604 			 * but continue without error. */
2605 			bserrno = 0;
2606 		}
2607 		spdk_spin_lock(&ctx->blob->bs->used_lock);
2608 		bs_release_cluster(ctx->blob->bs, ctx->new_cluster);
2609 		if (ctx->new_extent_page != 0) {
2610 			bs_release_md_page(ctx->blob->bs, ctx->new_extent_page);
2611 		}
2612 		spdk_spin_unlock(&ctx->blob->bs->used_lock);
2613 	}
2614 
2615 	bs_sequence_finish(ctx->seq, bserrno);
2616 }
2617 
2618 static void
2619 blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2620 {
2621 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2622 	uint32_t cluster_number;
2623 
2624 	if (bserrno) {
2625 		/* The write failed, so jump to the final completion handler */
2626 		bs_sequence_finish(seq, bserrno);
2627 		return;
2628 	}
2629 
2630 	cluster_number = bs_page_to_cluster(ctx->blob->bs, ctx->page);
2631 
2632 	blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2633 					 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx);
2634 }
2635 
2636 static void
2637 blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2638 {
2639 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2640 
2641 	if (bserrno != 0) {
2642 		/* The read failed, so jump to the final completion handler */
2643 		bs_sequence_finish(seq, bserrno);
2644 		return;
2645 	}
2646 
2647 	/* Write whole cluster */
2648 	bs_sequence_write_dev(seq, ctx->buf,
2649 			      bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2650 			      bs_cluster_to_lba(ctx->blob->bs, 1),
2651 			      blob_write_copy_cpl, ctx);
2652 }
2653 
2654 static bool
2655 blob_can_copy(struct spdk_blob *blob, uint32_t cluster_start_page, uint64_t *base_lba)
2656 {
2657 	uint64_t lba = bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page);
2658 
2659 	return (!blob_is_esnap_clone(blob) && blob->bs->dev->copy != NULL) &&
2660 	       blob->back_bs_dev->translate_lba(blob->back_bs_dev, lba, base_lba);
2661 }
2662 
2663 static void
2664 blob_copy(struct spdk_blob_copy_cluster_ctx *ctx, spdk_bs_user_op_t *op, uint64_t src_lba)
2665 {
2666 	struct spdk_blob *blob = ctx->blob;
2667 	uint64_t lba_count = bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz);
2668 
2669 	bs_sequence_copy_dev(ctx->seq,
2670 			     bs_cluster_to_lba(blob->bs, ctx->new_cluster),
2671 			     src_lba,
2672 			     lba_count,
2673 			     blob_write_copy_cpl, ctx);
2674 }
2675 
2676 static void
2677 bs_allocate_and_copy_cluster(struct spdk_blob *blob,
2678 			     struct spdk_io_channel *_ch,
2679 			     uint64_t io_unit, spdk_bs_user_op_t *op)
2680 {
2681 	struct spdk_bs_cpl cpl;
2682 	struct spdk_bs_channel *ch;
2683 	struct spdk_blob_copy_cluster_ctx *ctx;
2684 	uint32_t cluster_start_page;
2685 	uint32_t cluster_number;
2686 	bool is_zeroes;
2687 	bool can_copy;
2688 	uint64_t copy_src_lba;
2689 	int rc;
2690 
2691 	ch = spdk_io_channel_get_ctx(_ch);
2692 
2693 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
2694 		/* There are already operations pending. Queue this user op
2695 		 * and return because it will be re-executed when the outstanding
2696 		 * cluster allocation completes. */
2697 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2698 		return;
2699 	}
2700 
2701 	/* Round the io_unit offset down to the first page in the cluster */
2702 	cluster_start_page = bs_io_unit_to_cluster_start(blob, io_unit);
2703 
2704 	/* Calculate which index in the metadata cluster array the corresponding
2705 	 * cluster is supposed to be at. */
2706 	cluster_number = bs_io_unit_to_cluster_number(blob, io_unit);
2707 
2708 	ctx = calloc(1, sizeof(*ctx));
2709 	if (!ctx) {
2710 		bs_user_op_abort(op, -ENOMEM);
2711 		return;
2712 	}
2713 
2714 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
2715 
2716 	ctx->blob = blob;
2717 	ctx->page = cluster_start_page;
2718 	ctx->new_cluster_page = ch->new_cluster_page;
2719 	memset(ctx->new_cluster_page, 0, SPDK_BS_PAGE_SIZE);
2720 	can_copy = blob_can_copy(blob, cluster_start_page, &copy_src_lba);
2721 
2722 	is_zeroes = blob->back_bs_dev->is_zeroes(blob->back_bs_dev,
2723 			bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2724 			bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz));
2725 	if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes && !can_copy) {
2726 		ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen,
2727 				       NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2728 		if (!ctx->buf) {
2729 			SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
2730 				    blob->bs->cluster_sz);
2731 			free(ctx);
2732 			bs_user_op_abort(op, -ENOMEM);
2733 			return;
2734 		}
2735 	}
2736 
2737 	spdk_spin_lock(&blob->bs->used_lock);
2738 	rc = bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page,
2739 				 false);
2740 	spdk_spin_unlock(&blob->bs->used_lock);
2741 	if (rc != 0) {
2742 		spdk_free(ctx->buf);
2743 		free(ctx);
2744 		bs_user_op_abort(op, rc);
2745 		return;
2746 	}
2747 
2748 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2749 	cpl.u.blob_basic.cb_fn = blob_allocate_and_copy_cluster_cpl;
2750 	cpl.u.blob_basic.cb_arg = ctx;
2751 
2752 	ctx->seq = bs_sequence_start_blob(_ch, &cpl, blob);
2753 	if (!ctx->seq) {
2754 		spdk_spin_lock(&blob->bs->used_lock);
2755 		bs_release_cluster(blob->bs, ctx->new_cluster);
2756 		spdk_spin_unlock(&blob->bs->used_lock);
2757 		spdk_free(ctx->buf);
2758 		free(ctx);
2759 		bs_user_op_abort(op, -ENOMEM);
2760 		return;
2761 	}
2762 
2763 	/* Queue the user op to block other incoming operations */
2764 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2765 
2766 	if (blob->parent_id != SPDK_BLOBID_INVALID && !is_zeroes) {
2767 		if (can_copy) {
2768 			blob_copy(ctx, op, copy_src_lba);
2769 		} else {
2770 			/* Read cluster from backing device */
2771 			bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
2772 						bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2773 						bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
2774 						blob_write_copy, ctx);
2775 		}
2776 
2777 	} else {
2778 		blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2779 						 ctx->new_extent_page, ctx->new_cluster_page, blob_insert_cluster_cpl, ctx);
2780 	}
2781 }
2782 
2783 static inline bool
2784 blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length,
2785 				 uint64_t *lba,	uint64_t *lba_count)
2786 {
2787 	*lba_count = length;
2788 
2789 	if (!bs_io_unit_is_allocated(blob, io_unit)) {
2790 		assert(blob->back_bs_dev != NULL);
2791 		*lba = bs_io_unit_to_back_dev_lba(blob, io_unit);
2792 		*lba_count = bs_io_unit_to_back_dev_lba(blob, *lba_count);
2793 		return false;
2794 	} else {
2795 		*lba = bs_blob_io_unit_to_lba(blob, io_unit);
2796 		return true;
2797 	}
2798 }
2799 
2800 struct op_split_ctx {
2801 	struct spdk_blob *blob;
2802 	struct spdk_io_channel *channel;
2803 	uint64_t io_unit_offset;
2804 	uint64_t io_units_remaining;
2805 	void *curr_payload;
2806 	enum spdk_blob_op_type op_type;
2807 	spdk_bs_sequence_t *seq;
2808 	bool in_submit_ctx;
2809 	bool completed_in_submit_ctx;
2810 	bool done;
2811 };
2812 
2813 static void
2814 blob_request_submit_op_split_next(void *cb_arg, int bserrno)
2815 {
2816 	struct op_split_ctx	*ctx = cb_arg;
2817 	struct spdk_blob	*blob = ctx->blob;
2818 	struct spdk_io_channel	*ch = ctx->channel;
2819 	enum spdk_blob_op_type	op_type = ctx->op_type;
2820 	uint8_t			*buf;
2821 	uint64_t		offset;
2822 	uint64_t		length;
2823 	uint64_t		op_length;
2824 
2825 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2826 		bs_sequence_finish(ctx->seq, bserrno);
2827 		if (ctx->in_submit_ctx) {
2828 			/* Defer freeing of the ctx object, since it will be
2829 			 * accessed when this unwinds back to the submisison
2830 			 * context.
2831 			 */
2832 			ctx->done = true;
2833 		} else {
2834 			free(ctx);
2835 		}
2836 		return;
2837 	}
2838 
2839 	if (ctx->in_submit_ctx) {
2840 		/* If this split operation completed in the context
2841 		 * of its submission, mark the flag and return immediately
2842 		 * to avoid recursion.
2843 		 */
2844 		ctx->completed_in_submit_ctx = true;
2845 		return;
2846 	}
2847 
2848 	while (true) {
2849 		ctx->completed_in_submit_ctx = false;
2850 
2851 		offset = ctx->io_unit_offset;
2852 		length = ctx->io_units_remaining;
2853 		buf = ctx->curr_payload;
2854 		op_length = spdk_min(length, bs_num_io_units_to_cluster_boundary(blob,
2855 				     offset));
2856 
2857 		/* Update length and payload for next operation */
2858 		ctx->io_units_remaining -= op_length;
2859 		ctx->io_unit_offset += op_length;
2860 		if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
2861 			ctx->curr_payload += op_length * blob->bs->io_unit_size;
2862 		}
2863 
2864 		assert(!ctx->in_submit_ctx);
2865 		ctx->in_submit_ctx = true;
2866 
2867 		switch (op_type) {
2868 		case SPDK_BLOB_READ:
2869 			spdk_blob_io_read(blob, ch, buf, offset, op_length,
2870 					  blob_request_submit_op_split_next, ctx);
2871 			break;
2872 		case SPDK_BLOB_WRITE:
2873 			spdk_blob_io_write(blob, ch, buf, offset, op_length,
2874 					   blob_request_submit_op_split_next, ctx);
2875 			break;
2876 		case SPDK_BLOB_UNMAP:
2877 			spdk_blob_io_unmap(blob, ch, offset, op_length,
2878 					   blob_request_submit_op_split_next, ctx);
2879 			break;
2880 		case SPDK_BLOB_WRITE_ZEROES:
2881 			spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
2882 						  blob_request_submit_op_split_next, ctx);
2883 			break;
2884 		case SPDK_BLOB_READV:
2885 		case SPDK_BLOB_WRITEV:
2886 			SPDK_ERRLOG("readv/write not valid\n");
2887 			bs_sequence_finish(ctx->seq, -EINVAL);
2888 			free(ctx);
2889 			return;
2890 		}
2891 
2892 #ifndef __clang_analyzer__
2893 		/* scan-build reports a false positive around accessing the ctx here. It
2894 		 * forms a path that recursively calls this function, but then says
2895 		 * "assuming ctx->in_submit_ctx is false", when that isn't possible.
2896 		 * This path does free(ctx), returns to here, and reports a use-after-free
2897 		 * bug.  Wrapping this bit of code so that scan-build doesn't see it
2898 		 * works around the scan-build bug.
2899 		 */
2900 		assert(ctx->in_submit_ctx);
2901 		ctx->in_submit_ctx = false;
2902 
2903 		/* If the operation completed immediately, loop back and submit the
2904 		 * next operation.  Otherwise we can return and the next split
2905 		 * operation will get submitted when this current operation is
2906 		 * later completed asynchronously.
2907 		 */
2908 		if (ctx->completed_in_submit_ctx) {
2909 			continue;
2910 		} else if (ctx->done) {
2911 			free(ctx);
2912 		}
2913 #endif
2914 		break;
2915 	}
2916 }
2917 
2918 static void
2919 blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
2920 			     void *payload, uint64_t offset, uint64_t length,
2921 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2922 {
2923 	struct op_split_ctx *ctx;
2924 	spdk_bs_sequence_t *seq;
2925 	struct spdk_bs_cpl cpl;
2926 
2927 	assert(blob != NULL);
2928 
2929 	ctx = calloc(1, sizeof(struct op_split_ctx));
2930 	if (ctx == NULL) {
2931 		cb_fn(cb_arg, -ENOMEM);
2932 		return;
2933 	}
2934 
2935 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2936 	cpl.u.blob_basic.cb_fn = cb_fn;
2937 	cpl.u.blob_basic.cb_arg = cb_arg;
2938 
2939 	seq = bs_sequence_start_blob(ch, &cpl, blob);
2940 	if (!seq) {
2941 		free(ctx);
2942 		cb_fn(cb_arg, -ENOMEM);
2943 		return;
2944 	}
2945 
2946 	ctx->blob = blob;
2947 	ctx->channel = ch;
2948 	ctx->curr_payload = payload;
2949 	ctx->io_unit_offset = offset;
2950 	ctx->io_units_remaining = length;
2951 	ctx->op_type = op_type;
2952 	ctx->seq = seq;
2953 
2954 	blob_request_submit_op_split_next(ctx, 0);
2955 }
2956 
2957 static void
2958 spdk_free_cluster_unmap_complete(void *cb_arg, int bserrno)
2959 {
2960 	struct spdk_blob_free_cluster_ctx *ctx = cb_arg;
2961 
2962 	if (bserrno) {
2963 		bs_sequence_finish(ctx->seq, bserrno);
2964 		free(ctx);
2965 		return;
2966 	}
2967 
2968 	blob_free_cluster_on_md_thread(ctx->blob, ctx->cluster_num,
2969 				       ctx->extent_page, ctx->md_page, blob_free_cluster_cpl, ctx);
2970 }
2971 
2972 static void
2973 blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
2974 			      void *payload, uint64_t offset, uint64_t length,
2975 			      spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2976 {
2977 	struct spdk_bs_cpl cpl;
2978 	uint64_t lba;
2979 	uint64_t lba_count;
2980 	bool is_allocated;
2981 
2982 	assert(blob != NULL);
2983 
2984 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2985 	cpl.u.blob_basic.cb_fn = cb_fn;
2986 	cpl.u.blob_basic.cb_arg = cb_arg;
2987 
2988 	if (blob->frozen_refcnt) {
2989 		/* This blob I/O is frozen */
2990 		spdk_bs_user_op_t *op;
2991 		struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
2992 
2993 		op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
2994 		if (!op) {
2995 			cb_fn(cb_arg, -ENOMEM);
2996 			return;
2997 		}
2998 
2999 		TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
3000 
3001 		return;
3002 	}
3003 
3004 	is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
3005 
3006 	switch (op_type) {
3007 	case SPDK_BLOB_READ: {
3008 		spdk_bs_batch_t *batch;
3009 
3010 		batch = bs_batch_open(_ch, &cpl, blob);
3011 		if (!batch) {
3012 			cb_fn(cb_arg, -ENOMEM);
3013 			return;
3014 		}
3015 
3016 		if (is_allocated) {
3017 			/* Read from the blob */
3018 			bs_batch_read_dev(batch, payload, lba, lba_count);
3019 		} else {
3020 			/* Read from the backing block device */
3021 			bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
3022 		}
3023 
3024 		bs_batch_close(batch);
3025 		break;
3026 	}
3027 	case SPDK_BLOB_WRITE:
3028 	case SPDK_BLOB_WRITE_ZEROES: {
3029 		if (is_allocated) {
3030 			/* Write to the blob */
3031 			spdk_bs_batch_t *batch;
3032 
3033 			if (lba_count == 0) {
3034 				cb_fn(cb_arg, 0);
3035 				return;
3036 			}
3037 
3038 			batch = bs_batch_open(_ch, &cpl, blob);
3039 			if (!batch) {
3040 				cb_fn(cb_arg, -ENOMEM);
3041 				return;
3042 			}
3043 
3044 			if (op_type == SPDK_BLOB_WRITE) {
3045 				bs_batch_write_dev(batch, payload, lba, lba_count);
3046 			} else {
3047 				bs_batch_write_zeroes_dev(batch, lba, lba_count);
3048 			}
3049 
3050 			bs_batch_close(batch);
3051 		} else {
3052 			/* Queue this operation and allocate the cluster */
3053 			spdk_bs_user_op_t *op;
3054 
3055 			op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
3056 			if (!op) {
3057 				cb_fn(cb_arg, -ENOMEM);
3058 				return;
3059 			}
3060 
3061 			bs_allocate_and_copy_cluster(blob, _ch, offset, op);
3062 		}
3063 		break;
3064 	}
3065 	case SPDK_BLOB_UNMAP: {
3066 		struct spdk_blob_free_cluster_ctx *ctx = NULL;
3067 		spdk_bs_batch_t *batch;
3068 
3069 		/* if aligned with cluster release cluster */
3070 		if (spdk_blob_is_thin_provisioned(blob) && is_allocated &&
3071 		    bs_io_units_per_cluster(blob) == length) {
3072 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
3073 			uint32_t cluster_start_page;
3074 			uint32_t cluster_number;
3075 
3076 			assert(offset % bs_io_units_per_cluster(blob) == 0);
3077 
3078 			/* Round the io_unit offset down to the first page in the cluster */
3079 			cluster_start_page = bs_io_unit_to_cluster_start(blob, offset);
3080 
3081 			/* Calculate which index in the metadata cluster array the corresponding
3082 			 * cluster is supposed to be at. */
3083 			cluster_number = bs_io_unit_to_cluster_number(blob, offset);
3084 
3085 			ctx = calloc(1, sizeof(*ctx));
3086 			if (!ctx) {
3087 				cb_fn(cb_arg, -ENOMEM);
3088 				return;
3089 			}
3090 			/* When freeing a cluster the flow should be (in order):
3091 			 * 1. Unmap the underlying area (so if the cluster is reclaimed in the future, it won't leak
3092 			 * old data)
3093 			 * 2. Once the unmap completes (to avoid any races with incoming writes that may claim the
3094 			 * cluster), update and sync metadata freeing the cluster
3095 			 * 3. Once metadata update is done, complete the user unmap request
3096 			 */
3097 			ctx->blob = blob;
3098 			ctx->page = cluster_start_page;
3099 			ctx->cluster_num = cluster_number;
3100 			ctx->md_page = bs_channel->new_cluster_page;
3101 			ctx->seq = bs_sequence_start_bs(_ch, &cpl);
3102 			if (!ctx->seq) {
3103 				free(ctx);
3104 				cb_fn(cb_arg, -ENOMEM);
3105 				return;
3106 			}
3107 
3108 			if (blob->use_extent_table) {
3109 				ctx->extent_page = *bs_cluster_to_extent_page(blob, cluster_number);
3110 			}
3111 
3112 			cpl.u.blob_basic.cb_fn = spdk_free_cluster_unmap_complete;
3113 			cpl.u.blob_basic.cb_arg = ctx;
3114 		}
3115 
3116 		batch = bs_batch_open(_ch, &cpl, blob);
3117 		if (!batch) {
3118 			free(ctx);
3119 			cb_fn(cb_arg, -ENOMEM);
3120 			return;
3121 		}
3122 
3123 		if (is_allocated) {
3124 			bs_batch_unmap_dev(batch, lba, lba_count);
3125 		}
3126 
3127 		bs_batch_close(batch);
3128 		break;
3129 	}
3130 	case SPDK_BLOB_READV:
3131 	case SPDK_BLOB_WRITEV:
3132 		SPDK_ERRLOG("readv/write not valid\n");
3133 		cb_fn(cb_arg, -EINVAL);
3134 		break;
3135 	}
3136 }
3137 
3138 static void
3139 blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
3140 		       void *payload, uint64_t offset, uint64_t length,
3141 		       spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
3142 {
3143 	assert(blob != NULL);
3144 
3145 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
3146 		cb_fn(cb_arg, -EPERM);
3147 		return;
3148 	}
3149 
3150 	if (length == 0) {
3151 		cb_fn(cb_arg, 0);
3152 		return;
3153 	}
3154 
3155 	if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
3156 		cb_fn(cb_arg, -EINVAL);
3157 		return;
3158 	}
3159 	if (length <= bs_num_io_units_to_cluster_boundary(blob, offset)) {
3160 		blob_request_submit_op_single(_channel, blob, payload, offset, length,
3161 					      cb_fn, cb_arg, op_type);
3162 	} else {
3163 		blob_request_submit_op_split(_channel, blob, payload, offset, length,
3164 					     cb_fn, cb_arg, op_type);
3165 	}
3166 }
3167 
3168 struct rw_iov_ctx {
3169 	struct spdk_blob *blob;
3170 	struct spdk_io_channel *channel;
3171 	spdk_blob_op_complete cb_fn;
3172 	void *cb_arg;
3173 	bool read;
3174 	int iovcnt;
3175 	struct iovec *orig_iov;
3176 	uint64_t io_unit_offset;
3177 	uint64_t io_units_remaining;
3178 	uint64_t io_units_done;
3179 	struct spdk_blob_ext_io_opts *ext_io_opts;
3180 	struct iovec iov[0];
3181 };
3182 
3183 static void
3184 rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3185 {
3186 	assert(cb_arg == NULL);
3187 	bs_sequence_finish(seq, bserrno);
3188 }
3189 
3190 static void
3191 rw_iov_split_next(void *cb_arg, int bserrno)
3192 {
3193 	struct rw_iov_ctx *ctx = cb_arg;
3194 	struct spdk_blob *blob = ctx->blob;
3195 	struct iovec *iov, *orig_iov;
3196 	int iovcnt;
3197 	size_t orig_iovoff;
3198 	uint64_t io_units_count, io_units_to_boundary, io_unit_offset;
3199 	uint64_t byte_count;
3200 
3201 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
3202 		ctx->cb_fn(ctx->cb_arg, bserrno);
3203 		free(ctx);
3204 		return;
3205 	}
3206 
3207 	io_unit_offset = ctx->io_unit_offset;
3208 	io_units_to_boundary = bs_num_io_units_to_cluster_boundary(blob, io_unit_offset);
3209 	io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary);
3210 	/*
3211 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
3212 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
3213 	 *  point to the current position in the I/O sequence.
3214 	 */
3215 	byte_count = ctx->io_units_done * blob->bs->io_unit_size;
3216 	orig_iov = &ctx->orig_iov[0];
3217 	orig_iovoff = 0;
3218 	while (byte_count > 0) {
3219 		if (byte_count >= orig_iov->iov_len) {
3220 			byte_count -= orig_iov->iov_len;
3221 			orig_iov++;
3222 		} else {
3223 			orig_iovoff = byte_count;
3224 			byte_count = 0;
3225 		}
3226 	}
3227 
3228 	/*
3229 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
3230 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
3231 	 */
3232 	byte_count = io_units_count * blob->bs->io_unit_size;
3233 	iov = &ctx->iov[0];
3234 	iovcnt = 0;
3235 	while (byte_count > 0) {
3236 		assert(iovcnt < ctx->iovcnt);
3237 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
3238 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
3239 		byte_count -= iov->iov_len;
3240 		orig_iovoff = 0;
3241 		orig_iov++;
3242 		iov++;
3243 		iovcnt++;
3244 	}
3245 
3246 	ctx->io_unit_offset += io_units_count;
3247 	ctx->io_units_remaining -= io_units_count;
3248 	ctx->io_units_done += io_units_count;
3249 	iov = &ctx->iov[0];
3250 
3251 	if (ctx->read) {
3252 		spdk_blob_io_readv_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
3253 				       io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
3254 	} else {
3255 		spdk_blob_io_writev_ext(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
3256 					io_units_count, rw_iov_split_next, ctx, ctx->ext_io_opts);
3257 	}
3258 }
3259 
3260 static void
3261 blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
3262 			   struct iovec *iov, int iovcnt,
3263 			   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg, bool read,
3264 			   struct spdk_blob_ext_io_opts *ext_io_opts)
3265 {
3266 	struct spdk_bs_cpl	cpl;
3267 
3268 	assert(blob != NULL);
3269 
3270 	if (!read && blob->data_ro) {
3271 		cb_fn(cb_arg, -EPERM);
3272 		return;
3273 	}
3274 
3275 	if (length == 0) {
3276 		cb_fn(cb_arg, 0);
3277 		return;
3278 	}
3279 
3280 	if (offset + length > bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
3281 		cb_fn(cb_arg, -EINVAL);
3282 		return;
3283 	}
3284 
3285 	/*
3286 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
3287 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
3288 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
3289 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
3290 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
3291 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
3292 	 *  but since this case happens very infrequently, any performance impact will be negligible.
3293 	 *
3294 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
3295 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
3296 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
3297 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
3298 	 */
3299 	if (spdk_likely(length <= bs_num_io_units_to_cluster_boundary(blob, offset))) {
3300 		uint64_t lba_count;
3301 		uint64_t lba;
3302 		bool is_allocated;
3303 
3304 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3305 		cpl.u.blob_basic.cb_fn = cb_fn;
3306 		cpl.u.blob_basic.cb_arg = cb_arg;
3307 
3308 		if (blob->frozen_refcnt) {
3309 			/* This blob I/O is frozen */
3310 			enum spdk_blob_op_type op_type;
3311 			spdk_bs_user_op_t *op;
3312 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel);
3313 
3314 			op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV;
3315 			op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length);
3316 			if (!op) {
3317 				cb_fn(cb_arg, -ENOMEM);
3318 				return;
3319 			}
3320 
3321 			TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
3322 
3323 			return;
3324 		}
3325 
3326 		is_allocated = blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
3327 
3328 		if (read) {
3329 			spdk_bs_sequence_t *seq;
3330 
3331 			seq = bs_sequence_start_blob(_channel, &cpl, blob);
3332 			if (!seq) {
3333 				cb_fn(cb_arg, -ENOMEM);
3334 				return;
3335 			}
3336 
3337 			seq->ext_io_opts = ext_io_opts;
3338 
3339 			if (is_allocated) {
3340 				bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
3341 			} else {
3342 				bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
3343 							 rw_iov_done, NULL);
3344 			}
3345 		} else {
3346 			if (is_allocated) {
3347 				spdk_bs_sequence_t *seq;
3348 
3349 				seq = bs_sequence_start_blob(_channel, &cpl, blob);
3350 				if (!seq) {
3351 					cb_fn(cb_arg, -ENOMEM);
3352 					return;
3353 				}
3354 
3355 				seq->ext_io_opts = ext_io_opts;
3356 
3357 				bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, rw_iov_done, NULL);
3358 			} else {
3359 				/* Queue this operation and allocate the cluster */
3360 				spdk_bs_user_op_t *op;
3361 
3362 				op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset,
3363 						      length);
3364 				if (!op) {
3365 					cb_fn(cb_arg, -ENOMEM);
3366 					return;
3367 				}
3368 
3369 				op->ext_io_opts = ext_io_opts;
3370 
3371 				bs_allocate_and_copy_cluster(blob, _channel, offset, op);
3372 			}
3373 		}
3374 	} else {
3375 		struct rw_iov_ctx *ctx;
3376 
3377 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
3378 		if (ctx == NULL) {
3379 			cb_fn(cb_arg, -ENOMEM);
3380 			return;
3381 		}
3382 
3383 		ctx->blob = blob;
3384 		ctx->channel = _channel;
3385 		ctx->cb_fn = cb_fn;
3386 		ctx->cb_arg = cb_arg;
3387 		ctx->read = read;
3388 		ctx->orig_iov = iov;
3389 		ctx->iovcnt = iovcnt;
3390 		ctx->io_unit_offset = offset;
3391 		ctx->io_units_remaining = length;
3392 		ctx->io_units_done = 0;
3393 		ctx->ext_io_opts = ext_io_opts;
3394 
3395 		rw_iov_split_next(ctx, 0);
3396 	}
3397 }
3398 
3399 static struct spdk_blob *
3400 blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
3401 {
3402 	struct spdk_blob find;
3403 
3404 	if (spdk_bit_array_get(bs->open_blobids, blobid) == 0) {
3405 		return NULL;
3406 	}
3407 
3408 	find.id = blobid;
3409 	return RB_FIND(spdk_blob_tree, &bs->open_blobs, &find);
3410 }
3411 
3412 static void
3413 blob_get_snapshot_and_clone_entries(struct spdk_blob *blob,
3414 				    struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry)
3415 {
3416 	assert(blob != NULL);
3417 	*snapshot_entry = NULL;
3418 	*clone_entry = NULL;
3419 
3420 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
3421 		return;
3422 	}
3423 
3424 	TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) {
3425 		if ((*snapshot_entry)->id == blob->parent_id) {
3426 			break;
3427 		}
3428 	}
3429 
3430 	if (*snapshot_entry != NULL) {
3431 		TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) {
3432 			if ((*clone_entry)->id == blob->id) {
3433 				break;
3434 			}
3435 		}
3436 
3437 		assert(*clone_entry != NULL);
3438 	}
3439 }
3440 
3441 static int
3442 bs_channel_create(void *io_device, void *ctx_buf)
3443 {
3444 	struct spdk_blob_store		*bs = io_device;
3445 	struct spdk_bs_channel		*channel = ctx_buf;
3446 	struct spdk_bs_dev		*dev;
3447 	uint32_t			max_ops = bs->max_channel_ops;
3448 	uint32_t			i;
3449 
3450 	dev = bs->dev;
3451 
3452 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
3453 	if (!channel->req_mem) {
3454 		return -1;
3455 	}
3456 
3457 	TAILQ_INIT(&channel->reqs);
3458 
3459 	for (i = 0; i < max_ops; i++) {
3460 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
3461 	}
3462 
3463 	channel->bs = bs;
3464 	channel->dev = dev;
3465 	channel->dev_channel = dev->create_channel(dev);
3466 
3467 	if (!channel->dev_channel) {
3468 		SPDK_ERRLOG("Failed to create device channel.\n");
3469 		free(channel->req_mem);
3470 		return -1;
3471 	}
3472 
3473 	channel->new_cluster_page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY,
3474 				    SPDK_MALLOC_DMA);
3475 	if (!channel->new_cluster_page) {
3476 		SPDK_ERRLOG("Failed to allocate new cluster page\n");
3477 		free(channel->req_mem);
3478 		channel->dev->destroy_channel(channel->dev, channel->dev_channel);
3479 		return -1;
3480 	}
3481 
3482 	TAILQ_INIT(&channel->need_cluster_alloc);
3483 	TAILQ_INIT(&channel->queued_io);
3484 	RB_INIT(&channel->esnap_channels);
3485 
3486 	return 0;
3487 }
3488 
3489 static void
3490 bs_channel_destroy(void *io_device, void *ctx_buf)
3491 {
3492 	struct spdk_bs_channel *channel = ctx_buf;
3493 	spdk_bs_user_op_t *op;
3494 
3495 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
3496 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
3497 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
3498 		bs_user_op_abort(op, -EIO);
3499 	}
3500 
3501 	while (!TAILQ_EMPTY(&channel->queued_io)) {
3502 		op = TAILQ_FIRST(&channel->queued_io);
3503 		TAILQ_REMOVE(&channel->queued_io, op, link);
3504 		bs_user_op_abort(op, -EIO);
3505 	}
3506 
3507 	blob_esnap_destroy_bs_channel(channel);
3508 
3509 	free(channel->req_mem);
3510 	spdk_free(channel->new_cluster_page);
3511 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
3512 }
3513 
3514 static void
3515 bs_dev_destroy(void *io_device)
3516 {
3517 	struct spdk_blob_store *bs = io_device;
3518 	struct spdk_blob	*blob, *blob_tmp;
3519 
3520 	bs->dev->destroy(bs->dev);
3521 
3522 	RB_FOREACH_SAFE(blob, spdk_blob_tree, &bs->open_blobs, blob_tmp) {
3523 		RB_REMOVE(spdk_blob_tree, &bs->open_blobs, blob);
3524 		spdk_bit_array_clear(bs->open_blobids, blob->id);
3525 		blob_free(blob);
3526 	}
3527 
3528 	spdk_spin_destroy(&bs->used_lock);
3529 
3530 	spdk_bit_array_free(&bs->open_blobids);
3531 	spdk_bit_array_free(&bs->used_blobids);
3532 	spdk_bit_array_free(&bs->used_md_pages);
3533 	spdk_bit_pool_free(&bs->used_clusters);
3534 	/*
3535 	 * If this function is called for any reason except a successful unload,
3536 	 * the unload_cpl type will be NONE and this will be a nop.
3537 	 */
3538 	bs_call_cpl(&bs->unload_cpl, bs->unload_err);
3539 
3540 	free(bs);
3541 }
3542 
3543 static int
3544 bs_blob_list_add(struct spdk_blob *blob)
3545 {
3546 	spdk_blob_id snapshot_id;
3547 	struct spdk_blob_list *snapshot_entry = NULL;
3548 	struct spdk_blob_list *clone_entry = NULL;
3549 
3550 	assert(blob != NULL);
3551 
3552 	snapshot_id = blob->parent_id;
3553 	if (snapshot_id == SPDK_BLOBID_INVALID ||
3554 	    snapshot_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
3555 		return 0;
3556 	}
3557 
3558 	snapshot_entry = bs_get_snapshot_entry(blob->bs, snapshot_id);
3559 	if (snapshot_entry == NULL) {
3560 		/* Snapshot not found */
3561 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
3562 		if (snapshot_entry == NULL) {
3563 			return -ENOMEM;
3564 		}
3565 		snapshot_entry->id = snapshot_id;
3566 		TAILQ_INIT(&snapshot_entry->clones);
3567 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
3568 	} else {
3569 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
3570 			if (clone_entry->id == blob->id) {
3571 				break;
3572 			}
3573 		}
3574 	}
3575 
3576 	if (clone_entry == NULL) {
3577 		/* Clone not found */
3578 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
3579 		if (clone_entry == NULL) {
3580 			return -ENOMEM;
3581 		}
3582 		clone_entry->id = blob->id;
3583 		TAILQ_INIT(&clone_entry->clones);
3584 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
3585 		snapshot_entry->clone_count++;
3586 	}
3587 
3588 	return 0;
3589 }
3590 
3591 static void
3592 bs_blob_list_remove(struct spdk_blob *blob)
3593 {
3594 	struct spdk_blob_list *snapshot_entry = NULL;
3595 	struct spdk_blob_list *clone_entry = NULL;
3596 
3597 	blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry);
3598 
3599 	if (snapshot_entry == NULL) {
3600 		return;
3601 	}
3602 
3603 	blob->parent_id = SPDK_BLOBID_INVALID;
3604 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3605 	free(clone_entry);
3606 
3607 	snapshot_entry->clone_count--;
3608 }
3609 
3610 static int
3611 bs_blob_list_free(struct spdk_blob_store *bs)
3612 {
3613 	struct spdk_blob_list *snapshot_entry;
3614 	struct spdk_blob_list *snapshot_entry_tmp;
3615 	struct spdk_blob_list *clone_entry;
3616 	struct spdk_blob_list *clone_entry_tmp;
3617 
3618 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
3619 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
3620 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3621 			free(clone_entry);
3622 		}
3623 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
3624 		free(snapshot_entry);
3625 	}
3626 
3627 	return 0;
3628 }
3629 
3630 static void
3631 bs_free(struct spdk_blob_store *bs)
3632 {
3633 	bs_blob_list_free(bs);
3634 
3635 	bs_unregister_md_thread(bs);
3636 	spdk_io_device_unregister(bs, bs_dev_destroy);
3637 }
3638 
3639 void
3640 spdk_bs_opts_init(struct spdk_bs_opts *opts, size_t opts_size)
3641 {
3642 
3643 	if (!opts) {
3644 		SPDK_ERRLOG("opts should not be NULL\n");
3645 		return;
3646 	}
3647 
3648 	if (!opts_size) {
3649 		SPDK_ERRLOG("opts_size should not be zero value\n");
3650 		return;
3651 	}
3652 
3653 	memset(opts, 0, opts_size);
3654 	opts->opts_size = opts_size;
3655 
3656 #define FIELD_OK(field) \
3657 	offsetof(struct spdk_bs_opts, field) + sizeof(opts->field) <= opts_size
3658 
3659 #define SET_FIELD(field, value) \
3660 	if (FIELD_OK(field)) { \
3661 		opts->field = value; \
3662 	} \
3663 
3664 	SET_FIELD(cluster_sz, SPDK_BLOB_OPTS_CLUSTER_SZ);
3665 	SET_FIELD(num_md_pages, SPDK_BLOB_OPTS_NUM_MD_PAGES);
3666 	SET_FIELD(max_md_ops, SPDK_BLOB_OPTS_NUM_MD_PAGES);
3667 	SET_FIELD(max_channel_ops, SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS);
3668 	SET_FIELD(clear_method,  BS_CLEAR_WITH_UNMAP);
3669 
3670 	if (FIELD_OK(bstype)) {
3671 		memset(&opts->bstype, 0, sizeof(opts->bstype));
3672 	}
3673 
3674 	SET_FIELD(iter_cb_fn, NULL);
3675 	SET_FIELD(iter_cb_arg, NULL);
3676 	SET_FIELD(force_recover, false);
3677 	SET_FIELD(esnap_bs_dev_create, NULL);
3678 	SET_FIELD(esnap_ctx, NULL);
3679 
3680 #undef FIELD_OK
3681 #undef SET_FIELD
3682 }
3683 
3684 static int
3685 bs_opts_verify(struct spdk_bs_opts *opts)
3686 {
3687 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
3688 	    opts->max_channel_ops == 0) {
3689 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
3690 		return -1;
3691 	}
3692 
3693 	return 0;
3694 }
3695 
3696 /* START spdk_bs_load */
3697 
3698 /* spdk_bs_load_ctx is used for init, load, unload and dump code paths. */
3699 
3700 struct spdk_bs_load_ctx {
3701 	struct spdk_blob_store		*bs;
3702 	struct spdk_bs_super_block	*super;
3703 
3704 	struct spdk_bs_md_mask		*mask;
3705 	bool				in_page_chain;
3706 	uint32_t			page_index;
3707 	uint32_t			cur_page;
3708 	struct spdk_blob_md_page	*page;
3709 
3710 	uint64_t			num_extent_pages;
3711 	uint32_t			*extent_page_num;
3712 	struct spdk_blob_md_page	*extent_pages;
3713 	struct spdk_bit_array		*used_clusters;
3714 
3715 	spdk_bs_sequence_t			*seq;
3716 	spdk_blob_op_with_handle_complete	iter_cb_fn;
3717 	void					*iter_cb_arg;
3718 	struct spdk_blob			*blob;
3719 	spdk_blob_id				blobid;
3720 
3721 	bool					force_recover;
3722 
3723 	/* These fields are used in the spdk_bs_dump path. */
3724 	bool					dumping;
3725 	FILE					*fp;
3726 	spdk_bs_dump_print_xattr		print_xattr_fn;
3727 	char					xattr_name[4096];
3728 };
3729 
3730 static int
3731 bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs,
3732 	 struct spdk_bs_load_ctx **_ctx)
3733 {
3734 	struct spdk_blob_store	*bs;
3735 	struct spdk_bs_load_ctx	*ctx;
3736 	uint64_t dev_size;
3737 	int rc;
3738 
3739 	dev_size = dev->blocklen * dev->blockcnt;
3740 	if (dev_size < opts->cluster_sz) {
3741 		/* Device size cannot be smaller than cluster size of blobstore */
3742 		SPDK_INFOLOG(blob, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
3743 			     dev_size, opts->cluster_sz);
3744 		return -ENOSPC;
3745 	}
3746 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
3747 		/* Cluster size cannot be smaller than page size */
3748 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
3749 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
3750 		return -EINVAL;
3751 	}
3752 	bs = calloc(1, sizeof(struct spdk_blob_store));
3753 	if (!bs) {
3754 		return -ENOMEM;
3755 	}
3756 
3757 	ctx = calloc(1, sizeof(struct spdk_bs_load_ctx));
3758 	if (!ctx) {
3759 		free(bs);
3760 		return -ENOMEM;
3761 	}
3762 
3763 	ctx->bs = bs;
3764 	ctx->iter_cb_fn = opts->iter_cb_fn;
3765 	ctx->iter_cb_arg = opts->iter_cb_arg;
3766 	ctx->force_recover = opts->force_recover;
3767 
3768 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
3769 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3770 	if (!ctx->super) {
3771 		free(ctx);
3772 		free(bs);
3773 		return -ENOMEM;
3774 	}
3775 
3776 	RB_INIT(&bs->open_blobs);
3777 	TAILQ_INIT(&bs->snapshots);
3778 	bs->dev = dev;
3779 	bs->md_thread = spdk_get_thread();
3780 	assert(bs->md_thread != NULL);
3781 
3782 	/*
3783 	 * Do not use bs_lba_to_cluster() here since blockcnt may not be an
3784 	 *  even multiple of the cluster size.
3785 	 */
3786 	bs->cluster_sz = opts->cluster_sz;
3787 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
3788 	ctx->used_clusters = spdk_bit_array_create(bs->total_clusters);
3789 	if (!ctx->used_clusters) {
3790 		spdk_free(ctx->super);
3791 		free(ctx);
3792 		free(bs);
3793 		return -ENOMEM;
3794 	}
3795 
3796 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
3797 	if (spdk_u32_is_pow2(bs->pages_per_cluster)) {
3798 		bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster);
3799 	}
3800 	bs->num_free_clusters = bs->total_clusters;
3801 	bs->io_unit_size = dev->blocklen;
3802 
3803 	bs->max_channel_ops = opts->max_channel_ops;
3804 	bs->super_blob = SPDK_BLOBID_INVALID;
3805 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
3806 	bs->esnap_bs_dev_create = opts->esnap_bs_dev_create;
3807 	bs->esnap_ctx = opts->esnap_ctx;
3808 
3809 	/* The metadata is assumed to be at least 1 page */
3810 	bs->used_md_pages = spdk_bit_array_create(1);
3811 	bs->used_blobids = spdk_bit_array_create(0);
3812 	bs->open_blobids = spdk_bit_array_create(0);
3813 
3814 	spdk_spin_init(&bs->used_lock);
3815 
3816 	spdk_io_device_register(bs, bs_channel_create, bs_channel_destroy,
3817 				sizeof(struct spdk_bs_channel), "blobstore");
3818 	rc = bs_register_md_thread(bs);
3819 	if (rc == -1) {
3820 		spdk_io_device_unregister(bs, NULL);
3821 		spdk_spin_destroy(&bs->used_lock);
3822 		spdk_bit_array_free(&bs->open_blobids);
3823 		spdk_bit_array_free(&bs->used_blobids);
3824 		spdk_bit_array_free(&bs->used_md_pages);
3825 		spdk_bit_array_free(&ctx->used_clusters);
3826 		spdk_free(ctx->super);
3827 		free(ctx);
3828 		free(bs);
3829 		/* FIXME: this is a lie but don't know how to get a proper error code here */
3830 		return -ENOMEM;
3831 	}
3832 
3833 	*_ctx = ctx;
3834 	*_bs = bs;
3835 	return 0;
3836 }
3837 
3838 static void
3839 bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno)
3840 {
3841 	assert(bserrno != 0);
3842 
3843 	spdk_free(ctx->super);
3844 	bs_sequence_finish(ctx->seq, bserrno);
3845 	bs_free(ctx->bs);
3846 	spdk_bit_array_free(&ctx->used_clusters);
3847 	free(ctx);
3848 }
3849 
3850 static void
3851 bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
3852 	       struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
3853 {
3854 	/* Update the values in the super block */
3855 	super->super_blob = bs->super_blob;
3856 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
3857 	super->crc = blob_md_page_calc_crc(super);
3858 	bs_sequence_write_dev(seq, super, bs_page_to_lba(bs, 0),
3859 			      bs_byte_to_lba(bs, sizeof(*super)),
3860 			      cb_fn, cb_arg);
3861 }
3862 
3863 static void
3864 bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3865 {
3866 	struct spdk_bs_load_ctx	*ctx = arg;
3867 	uint64_t	mask_size, lba, lba_count;
3868 
3869 	/* Write out the used clusters mask */
3870 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3871 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3872 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3873 	if (!ctx->mask) {
3874 		bs_load_ctx_fail(ctx, -ENOMEM);
3875 		return;
3876 	}
3877 
3878 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
3879 	ctx->mask->length = ctx->bs->total_clusters;
3880 	/* We could get here through the normal unload path, or through dirty
3881 	 * shutdown recovery.  For the normal unload path, we use the mask from
3882 	 * the bit pool.  For dirty shutdown recovery, we don't have a bit pool yet -
3883 	 * only the bit array from the load ctx.
3884 	 */
3885 	if (ctx->bs->used_clusters) {
3886 		assert(ctx->mask->length == spdk_bit_pool_capacity(ctx->bs->used_clusters));
3887 		spdk_bit_pool_store_mask(ctx->bs->used_clusters, ctx->mask->mask);
3888 	} else {
3889 		assert(ctx->mask->length == spdk_bit_array_capacity(ctx->used_clusters));
3890 		spdk_bit_array_store_mask(ctx->used_clusters, ctx->mask->mask);
3891 	}
3892 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3893 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3894 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3895 }
3896 
3897 static void
3898 bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3899 {
3900 	struct spdk_bs_load_ctx	*ctx = arg;
3901 	uint64_t	mask_size, lba, lba_count;
3902 
3903 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3904 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3905 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3906 	if (!ctx->mask) {
3907 		bs_load_ctx_fail(ctx, -ENOMEM);
3908 		return;
3909 	}
3910 
3911 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
3912 	ctx->mask->length = ctx->super->md_len;
3913 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
3914 
3915 	spdk_bit_array_store_mask(ctx->bs->used_md_pages, ctx->mask->mask);
3916 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3917 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3918 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3919 }
3920 
3921 static void
3922 bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3923 {
3924 	struct spdk_bs_load_ctx	*ctx = arg;
3925 	uint64_t	mask_size, lba, lba_count;
3926 
3927 	if (ctx->super->used_blobid_mask_len == 0) {
3928 		/*
3929 		 * This is a pre-v3 on-disk format where the blobid mask does not get
3930 		 *  written to disk.
3931 		 */
3932 		cb_fn(seq, arg, 0);
3933 		return;
3934 	}
3935 
3936 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3937 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3938 				 SPDK_MALLOC_DMA);
3939 	if (!ctx->mask) {
3940 		bs_load_ctx_fail(ctx, -ENOMEM);
3941 		return;
3942 	}
3943 
3944 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
3945 	ctx->mask->length = ctx->super->md_len;
3946 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
3947 
3948 	spdk_bit_array_store_mask(ctx->bs->used_blobids, ctx->mask->mask);
3949 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
3950 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
3951 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3952 }
3953 
3954 static void
3955 blob_set_thin_provision(struct spdk_blob *blob)
3956 {
3957 	blob_verify_md_op(blob);
3958 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
3959 	blob->state = SPDK_BLOB_STATE_DIRTY;
3960 }
3961 
3962 static void
3963 blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method)
3964 {
3965 	blob_verify_md_op(blob);
3966 	blob->clear_method = clear_method;
3967 	blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT);
3968 	blob->state = SPDK_BLOB_STATE_DIRTY;
3969 }
3970 
3971 static void bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno);
3972 
3973 static void
3974 bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno)
3975 {
3976 	struct spdk_bs_load_ctx *ctx = cb_arg;
3977 	spdk_blob_id id;
3978 	int64_t page_num;
3979 
3980 	/* Iterate to next blob (we can't use spdk_bs_iter_next function as our
3981 	 * last blob has been removed */
3982 	page_num = bs_blobid_to_page(ctx->blobid);
3983 	page_num++;
3984 	page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num);
3985 	if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) {
3986 		bs_load_iter(ctx, NULL, -ENOENT);
3987 		return;
3988 	}
3989 
3990 	id = bs_page_to_blobid(page_num);
3991 
3992 	spdk_bs_open_blob(ctx->bs, id, bs_load_iter, ctx);
3993 }
3994 
3995 static void
3996 bs_delete_corrupted_close_cb(void *cb_arg, int bserrno)
3997 {
3998 	struct spdk_bs_load_ctx *ctx = cb_arg;
3999 
4000 	if (bserrno != 0) {
4001 		SPDK_ERRLOG("Failed to close corrupted blob\n");
4002 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4003 		return;
4004 	}
4005 
4006 	spdk_bs_delete_blob(ctx->bs, ctx->blobid, bs_delete_corrupted_blob_cpl, ctx);
4007 }
4008 
4009 static void
4010 bs_delete_corrupted_blob(void *cb_arg, int bserrno)
4011 {
4012 	struct spdk_bs_load_ctx *ctx = cb_arg;
4013 	uint64_t i;
4014 
4015 	if (bserrno != 0) {
4016 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
4017 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4018 		return;
4019 	}
4020 
4021 	/* Snapshot and clone have the same copy of cluster map and extent pages
4022 	 * at this point. Let's clear both for snapshot now,
4023 	 * so that it won't be cleared for clone later when we remove snapshot.
4024 	 * Also set thin provision to pass data corruption check */
4025 	for (i = 0; i < ctx->blob->active.num_clusters; i++) {
4026 		ctx->blob->active.clusters[i] = 0;
4027 	}
4028 	for (i = 0; i < ctx->blob->active.num_extent_pages; i++) {
4029 		ctx->blob->active.extent_pages[i] = 0;
4030 	}
4031 
4032 	ctx->blob->md_ro = false;
4033 
4034 	blob_set_thin_provision(ctx->blob);
4035 
4036 	ctx->blobid = ctx->blob->id;
4037 
4038 	spdk_blob_close(ctx->blob, bs_delete_corrupted_close_cb, ctx);
4039 }
4040 
4041 static void
4042 bs_update_corrupted_blob(void *cb_arg, int bserrno)
4043 {
4044 	struct spdk_bs_load_ctx *ctx = cb_arg;
4045 
4046 	if (bserrno != 0) {
4047 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
4048 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4049 		return;
4050 	}
4051 
4052 	ctx->blob->md_ro = false;
4053 	blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true);
4054 	blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true);
4055 	spdk_blob_set_read_only(ctx->blob);
4056 
4057 	if (ctx->iter_cb_fn) {
4058 		ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0);
4059 	}
4060 	bs_blob_list_add(ctx->blob);
4061 
4062 	spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4063 }
4064 
4065 static void
4066 bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
4067 {
4068 	struct spdk_bs_load_ctx *ctx = cb_arg;
4069 
4070 	if (bserrno != 0) {
4071 		SPDK_ERRLOG("Failed to open clone of a corrupted blob\n");
4072 		spdk_bs_iter_next(ctx->bs, ctx->blob, bs_load_iter, ctx);
4073 		return;
4074 	}
4075 
4076 	if (blob->parent_id == ctx->blob->id) {
4077 		/* Power failure occurred before updating clone (snapshot delete case)
4078 		 * or after updating clone (creating snapshot case) - keep snapshot */
4079 		spdk_blob_close(blob, bs_update_corrupted_blob, ctx);
4080 	} else {
4081 		/* Power failure occurred after updating clone (snapshot delete case)
4082 		 * or before updating clone (creating snapshot case) - remove snapshot */
4083 		spdk_blob_close(blob, bs_delete_corrupted_blob, ctx);
4084 	}
4085 }
4086 
4087 static void
4088 bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
4089 {
4090 	struct spdk_bs_load_ctx *ctx = arg;
4091 	const void *value;
4092 	size_t len;
4093 	int rc = 0;
4094 
4095 	if (bserrno == 0) {
4096 		/* Examine blob if it is corrupted after power failure. Fix
4097 		 * the ones that can be fixed and remove any other corrupted
4098 		 * ones. If it is not corrupted just process it */
4099 		rc = blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true);
4100 		if (rc != 0) {
4101 			rc = blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true);
4102 			if (rc != 0) {
4103 				/* Not corrupted - process it and continue with iterating through blobs */
4104 				if (ctx->iter_cb_fn) {
4105 					ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
4106 				}
4107 				bs_blob_list_add(blob);
4108 				spdk_bs_iter_next(ctx->bs, blob, bs_load_iter, ctx);
4109 				return;
4110 			}
4111 
4112 		}
4113 
4114 		assert(len == sizeof(spdk_blob_id));
4115 
4116 		ctx->blob = blob;
4117 
4118 		/* Open clone to check if we are able to fix this blob or should we remove it */
4119 		spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, bs_examine_clone, ctx);
4120 		return;
4121 	} else if (bserrno == -ENOENT) {
4122 		bserrno = 0;
4123 	} else {
4124 		/*
4125 		 * This case needs to be looked at further.  Same problem
4126 		 *  exists with applications that rely on explicit blob
4127 		 *  iteration.  We should just skip the blob that failed
4128 		 *  to load and continue on to the next one.
4129 		 */
4130 		SPDK_ERRLOG("Error in iterating blobs\n");
4131 	}
4132 
4133 	ctx->iter_cb_fn = NULL;
4134 
4135 	spdk_free(ctx->super);
4136 	spdk_free(ctx->mask);
4137 	bs_sequence_finish(ctx->seq, bserrno);
4138 	free(ctx);
4139 }
4140 
4141 static void bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
4142 
4143 static void
4144 bs_load_complete(struct spdk_bs_load_ctx *ctx)
4145 {
4146 	ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters);
4147 	if (ctx->dumping) {
4148 		bs_dump_read_md_page(ctx->seq, ctx);
4149 		return;
4150 	}
4151 	spdk_bs_iter_first(ctx->bs, bs_load_iter, ctx);
4152 }
4153 
4154 static void
4155 bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4156 {
4157 	struct spdk_bs_load_ctx *ctx = cb_arg;
4158 	int rc;
4159 
4160 	/* The type must be correct */
4161 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
4162 
4163 	/* The length of the mask (in bits) must not be greater than
4164 	 * the length of the buffer (converted to bits) */
4165 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
4166 
4167 	/* The length of the mask must be exactly equal to the size
4168 	 * (in pages) of the metadata region */
4169 	assert(ctx->mask->length == ctx->super->md_len);
4170 
4171 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length);
4172 	if (rc < 0) {
4173 		spdk_free(ctx->mask);
4174 		bs_load_ctx_fail(ctx, rc);
4175 		return;
4176 	}
4177 
4178 	spdk_bit_array_load_mask(ctx->bs->used_blobids, ctx->mask->mask);
4179 	bs_load_complete(ctx);
4180 }
4181 
4182 static void
4183 bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4184 {
4185 	struct spdk_bs_load_ctx *ctx = cb_arg;
4186 	uint64_t		lba, lba_count, mask_size;
4187 	int			rc;
4188 
4189 	if (bserrno != 0) {
4190 		bs_load_ctx_fail(ctx, bserrno);
4191 		return;
4192 	}
4193 
4194 	/* The type must be correct */
4195 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
4196 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
4197 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
4198 					     struct spdk_blob_md_page) * 8));
4199 	/*
4200 	 * The length of the mask must be equal to or larger than the total number of clusters. It may be
4201 	 * larger than the total number of clusters due to a failure spdk_bs_grow.
4202 	 */
4203 	assert(ctx->mask->length >= ctx->bs->total_clusters);
4204 	if (ctx->mask->length > ctx->bs->total_clusters) {
4205 		SPDK_WARNLOG("Shrink the used_custers mask length to total_clusters");
4206 		ctx->mask->length = ctx->bs->total_clusters;
4207 	}
4208 
4209 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->mask->length);
4210 	if (rc < 0) {
4211 		spdk_free(ctx->mask);
4212 		bs_load_ctx_fail(ctx, rc);
4213 		return;
4214 	}
4215 
4216 	spdk_bit_array_load_mask(ctx->used_clusters, ctx->mask->mask);
4217 	ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->used_clusters);
4218 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
4219 
4220 	spdk_free(ctx->mask);
4221 
4222 	/* Read the used blobids mask */
4223 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
4224 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4225 				 SPDK_MALLOC_DMA);
4226 	if (!ctx->mask) {
4227 		bs_load_ctx_fail(ctx, -ENOMEM);
4228 		return;
4229 	}
4230 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
4231 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
4232 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
4233 			     bs_load_used_blobids_cpl, ctx);
4234 }
4235 
4236 static void
4237 bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4238 {
4239 	struct spdk_bs_load_ctx *ctx = cb_arg;
4240 	uint64_t		lba, lba_count, mask_size;
4241 	int			rc;
4242 
4243 	if (bserrno != 0) {
4244 		bs_load_ctx_fail(ctx, bserrno);
4245 		return;
4246 	}
4247 
4248 	/* The type must be correct */
4249 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
4250 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
4251 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
4252 				     8));
4253 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
4254 	if (ctx->mask->length != ctx->super->md_len) {
4255 		SPDK_ERRLOG("mismatched md_len in used_pages mask: "
4256 			    "mask->length=%" PRIu32 " super->md_len=%" PRIu32 "\n",
4257 			    ctx->mask->length, ctx->super->md_len);
4258 		assert(false);
4259 	}
4260 
4261 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length);
4262 	if (rc < 0) {
4263 		spdk_free(ctx->mask);
4264 		bs_load_ctx_fail(ctx, rc);
4265 		return;
4266 	}
4267 
4268 	spdk_bit_array_load_mask(ctx->bs->used_md_pages, ctx->mask->mask);
4269 	spdk_free(ctx->mask);
4270 
4271 	/* Read the used clusters mask */
4272 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
4273 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
4274 				 SPDK_MALLOC_DMA);
4275 	if (!ctx->mask) {
4276 		bs_load_ctx_fail(ctx, -ENOMEM);
4277 		return;
4278 	}
4279 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
4280 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
4281 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
4282 			     bs_load_used_clusters_cpl, ctx);
4283 }
4284 
4285 static void
4286 bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx)
4287 {
4288 	uint64_t lba, lba_count, mask_size;
4289 
4290 	/* Read the used pages mask */
4291 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
4292 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
4293 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4294 	if (!ctx->mask) {
4295 		bs_load_ctx_fail(ctx, -ENOMEM);
4296 		return;
4297 	}
4298 
4299 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
4300 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
4301 	bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
4302 			     bs_load_used_pages_cpl, ctx);
4303 }
4304 
4305 static int
4306 bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page)
4307 {
4308 	struct spdk_blob_store *bs = ctx->bs;
4309 	struct spdk_blob_md_descriptor *desc;
4310 	size_t	cur_desc = 0;
4311 
4312 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4313 	while (cur_desc < sizeof(page->descriptors)) {
4314 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
4315 			if (desc->length == 0) {
4316 				/* If padding and length are 0, this terminates the page */
4317 				break;
4318 			}
4319 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
4320 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
4321 			unsigned int				i, j;
4322 			unsigned int				cluster_count = 0;
4323 			uint32_t				cluster_idx;
4324 
4325 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
4326 
4327 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
4328 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
4329 					cluster_idx = desc_extent_rle->extents[i].cluster_idx;
4330 					/*
4331 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
4332 					 * in the used cluster map.
4333 					 */
4334 					if (cluster_idx != 0) {
4335 						SPDK_NOTICELOG("Recover: cluster %" PRIu32 "\n", cluster_idx + j);
4336 						spdk_bit_array_set(ctx->used_clusters, cluster_idx + j);
4337 						if (bs->num_free_clusters == 0) {
4338 							return -ENOSPC;
4339 						}
4340 						bs->num_free_clusters--;
4341 					}
4342 					cluster_count++;
4343 				}
4344 			}
4345 			if (cluster_count == 0) {
4346 				return -EINVAL;
4347 			}
4348 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4349 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
4350 			uint32_t					i;
4351 			uint32_t					cluster_count = 0;
4352 			uint32_t					cluster_idx;
4353 			size_t						cluster_idx_length;
4354 
4355 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
4356 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
4357 
4358 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
4359 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
4360 				return -EINVAL;
4361 			}
4362 
4363 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
4364 				cluster_idx = desc_extent->cluster_idx[i];
4365 				/*
4366 				 * cluster_idx = 0 means an unallocated cluster - don't mark that
4367 				 * in the used cluster map.
4368 				 */
4369 				if (cluster_idx != 0) {
4370 					if (cluster_idx < desc_extent->start_cluster_idx &&
4371 					    cluster_idx >= desc_extent->start_cluster_idx + cluster_count) {
4372 						return -EINVAL;
4373 					}
4374 					spdk_bit_array_set(ctx->used_clusters, cluster_idx);
4375 					if (bs->num_free_clusters == 0) {
4376 						return -ENOSPC;
4377 					}
4378 					bs->num_free_clusters--;
4379 				}
4380 				cluster_count++;
4381 			}
4382 
4383 			if (cluster_count == 0) {
4384 				return -EINVAL;
4385 			}
4386 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
4387 			/* Skip this item */
4388 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
4389 			/* Skip this item */
4390 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
4391 			/* Skip this item */
4392 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
4393 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
4394 			uint32_t num_extent_pages = ctx->num_extent_pages;
4395 			uint32_t i;
4396 			size_t extent_pages_length;
4397 			void *tmp;
4398 
4399 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
4400 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
4401 
4402 			if (desc_extent_table->length == 0 ||
4403 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
4404 				return -EINVAL;
4405 			}
4406 
4407 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
4408 				if (desc_extent_table->extent_page[i].page_idx != 0) {
4409 					if (desc_extent_table->extent_page[i].num_pages != 1) {
4410 						return -EINVAL;
4411 					}
4412 					num_extent_pages += 1;
4413 				}
4414 			}
4415 
4416 			if (num_extent_pages > 0) {
4417 				tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t));
4418 				if (tmp == NULL) {
4419 					return -ENOMEM;
4420 				}
4421 				ctx->extent_page_num = tmp;
4422 
4423 				/* Extent table entries contain md page numbers for extent pages.
4424 				 * Zeroes represent unallocated extent pages, those are run-length-encoded.
4425 				 */
4426 				for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
4427 					if (desc_extent_table->extent_page[i].page_idx != 0) {
4428 						ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx;
4429 						ctx->num_extent_pages += 1;
4430 					}
4431 				}
4432 			}
4433 		} else {
4434 			/* Error */
4435 			return -EINVAL;
4436 		}
4437 		/* Advance to the next descriptor */
4438 		cur_desc += sizeof(*desc) + desc->length;
4439 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
4440 			break;
4441 		}
4442 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
4443 	}
4444 	return 0;
4445 }
4446 
4447 static bool
4448 bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page)
4449 {
4450 	uint32_t crc;
4451 	struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4452 	size_t desc_len;
4453 
4454 	crc = blob_md_page_calc_crc(page);
4455 	if (crc != page->crc) {
4456 		return false;
4457 	}
4458 
4459 	/* Extent page should always be of sequence num 0. */
4460 	if (page->sequence_num != 0) {
4461 		return false;
4462 	}
4463 
4464 	/* Descriptor type must be EXTENT_PAGE. */
4465 	if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4466 		return false;
4467 	}
4468 
4469 	/* Descriptor length cannot exceed the page. */
4470 	desc_len = sizeof(*desc) + desc->length;
4471 	if (desc_len > sizeof(page->descriptors)) {
4472 		return false;
4473 	}
4474 
4475 	/* It has to be the only descriptor in the page. */
4476 	if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) {
4477 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len);
4478 		if (desc->length != 0) {
4479 			return false;
4480 		}
4481 	}
4482 
4483 	return true;
4484 }
4485 
4486 static bool
4487 bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
4488 {
4489 	uint32_t crc;
4490 	struct spdk_blob_md_page *page = ctx->page;
4491 
4492 	crc = blob_md_page_calc_crc(page);
4493 	if (crc != page->crc) {
4494 		return false;
4495 	}
4496 
4497 	/* First page of a sequence should match the blobid. */
4498 	if (page->sequence_num == 0 &&
4499 	    bs_page_to_blobid(ctx->cur_page) != page->id) {
4500 		return false;
4501 	}
4502 	assert(bs_load_cur_extent_page_valid(page) == false);
4503 
4504 	return true;
4505 }
4506 
4507 static void bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx);
4508 
4509 static void
4510 bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4511 {
4512 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4513 
4514 	if (bserrno != 0) {
4515 		bs_load_ctx_fail(ctx, bserrno);
4516 		return;
4517 	}
4518 
4519 	bs_load_complete(ctx);
4520 }
4521 
4522 static void
4523 bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4524 {
4525 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4526 
4527 	spdk_free(ctx->mask);
4528 	ctx->mask = NULL;
4529 
4530 	if (bserrno != 0) {
4531 		bs_load_ctx_fail(ctx, bserrno);
4532 		return;
4533 	}
4534 
4535 	bs_write_used_clusters(seq, ctx, bs_load_write_used_clusters_cpl);
4536 }
4537 
4538 static void
4539 bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4540 {
4541 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4542 
4543 	spdk_free(ctx->mask);
4544 	ctx->mask = NULL;
4545 
4546 	if (bserrno != 0) {
4547 		bs_load_ctx_fail(ctx, bserrno);
4548 		return;
4549 	}
4550 
4551 	bs_write_used_blobids(seq, ctx, bs_load_write_used_blobids_cpl);
4552 }
4553 
4554 static void
4555 bs_load_write_used_md(struct spdk_bs_load_ctx *ctx)
4556 {
4557 	bs_write_used_md(ctx->seq, ctx, bs_load_write_used_pages_cpl);
4558 }
4559 
4560 static void
4561 bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx)
4562 {
4563 	uint64_t num_md_clusters;
4564 	uint64_t i;
4565 
4566 	ctx->in_page_chain = false;
4567 
4568 	do {
4569 		ctx->page_index++;
4570 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
4571 
4572 	if (ctx->page_index < ctx->super->md_len) {
4573 		ctx->cur_page = ctx->page_index;
4574 		bs_load_replay_cur_md_page(ctx);
4575 	} else {
4576 		/* Claim all of the clusters used by the metadata */
4577 		num_md_clusters = spdk_divide_round_up(
4578 					  ctx->super->md_start + ctx->super->md_len, ctx->bs->pages_per_cluster);
4579 		for (i = 0; i < num_md_clusters; i++) {
4580 			spdk_bit_array_set(ctx->used_clusters, i);
4581 		}
4582 		ctx->bs->num_free_clusters -= num_md_clusters;
4583 		spdk_free(ctx->page);
4584 		bs_load_write_used_md(ctx);
4585 	}
4586 }
4587 
4588 static void
4589 bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4590 {
4591 	struct spdk_bs_load_ctx *ctx = cb_arg;
4592 	uint32_t page_num;
4593 	uint64_t i;
4594 
4595 	if (bserrno != 0) {
4596 		spdk_free(ctx->extent_pages);
4597 		bs_load_ctx_fail(ctx, bserrno);
4598 		return;
4599 	}
4600 
4601 	for (i = 0; i < ctx->num_extent_pages; i++) {
4602 		/* Extent pages are only read when present within in chain md.
4603 		 * Integrity of md is not right if that page was not a valid extent page. */
4604 		if (bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) {
4605 			spdk_free(ctx->extent_pages);
4606 			bs_load_ctx_fail(ctx, -EILSEQ);
4607 			return;
4608 		}
4609 
4610 		page_num = ctx->extent_page_num[i];
4611 		spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
4612 		if (bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) {
4613 			spdk_free(ctx->extent_pages);
4614 			bs_load_ctx_fail(ctx, -EILSEQ);
4615 			return;
4616 		}
4617 	}
4618 
4619 	spdk_free(ctx->extent_pages);
4620 	free(ctx->extent_page_num);
4621 	ctx->extent_page_num = NULL;
4622 	ctx->num_extent_pages = 0;
4623 
4624 	bs_load_replay_md_chain_cpl(ctx);
4625 }
4626 
4627 static void
4628 bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx)
4629 {
4630 	spdk_bs_batch_t *batch;
4631 	uint32_t page;
4632 	uint64_t lba;
4633 	uint64_t i;
4634 
4635 	ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, 0,
4636 					 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4637 	if (!ctx->extent_pages) {
4638 		bs_load_ctx_fail(ctx, -ENOMEM);
4639 		return;
4640 	}
4641 
4642 	batch = bs_sequence_to_batch(ctx->seq, bs_load_replay_extent_page_cpl, ctx);
4643 
4644 	for (i = 0; i < ctx->num_extent_pages; i++) {
4645 		page = ctx->extent_page_num[i];
4646 		assert(page < ctx->super->md_len);
4647 		lba = bs_md_page_to_lba(ctx->bs, page);
4648 		bs_batch_read_dev(batch, &ctx->extent_pages[i], lba,
4649 				  bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE));
4650 	}
4651 
4652 	bs_batch_close(batch);
4653 }
4654 
4655 static void
4656 bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4657 {
4658 	struct spdk_bs_load_ctx *ctx = cb_arg;
4659 	uint32_t page_num;
4660 	struct spdk_blob_md_page *page;
4661 
4662 	if (bserrno != 0) {
4663 		bs_load_ctx_fail(ctx, bserrno);
4664 		return;
4665 	}
4666 
4667 	page_num = ctx->cur_page;
4668 	page = ctx->page;
4669 	if (bs_load_cur_md_page_valid(ctx) == true) {
4670 		if (page->sequence_num == 0 || ctx->in_page_chain == true) {
4671 			spdk_spin_lock(&ctx->bs->used_lock);
4672 			bs_claim_md_page(ctx->bs, page_num);
4673 			spdk_spin_unlock(&ctx->bs->used_lock);
4674 			if (page->sequence_num == 0) {
4675 				SPDK_NOTICELOG("Recover: blob 0x%" PRIx32 "\n", page_num);
4676 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
4677 			}
4678 			if (bs_load_replay_md_parse_page(ctx, page)) {
4679 				bs_load_ctx_fail(ctx, -EILSEQ);
4680 				return;
4681 			}
4682 			if (page->next != SPDK_INVALID_MD_PAGE) {
4683 				ctx->in_page_chain = true;
4684 				ctx->cur_page = page->next;
4685 				bs_load_replay_cur_md_page(ctx);
4686 				return;
4687 			}
4688 			if (ctx->num_extent_pages != 0) {
4689 				bs_load_replay_extent_pages(ctx);
4690 				return;
4691 			}
4692 		}
4693 	}
4694 	bs_load_replay_md_chain_cpl(ctx);
4695 }
4696 
4697 static void
4698 bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx)
4699 {
4700 	uint64_t lba;
4701 
4702 	assert(ctx->cur_page < ctx->super->md_len);
4703 	lba = bs_md_page_to_lba(ctx->bs, ctx->cur_page);
4704 	bs_sequence_read_dev(ctx->seq, ctx->page, lba,
4705 			     bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
4706 			     bs_load_replay_md_cpl, ctx);
4707 }
4708 
4709 static void
4710 bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
4711 {
4712 	ctx->page_index = 0;
4713 	ctx->cur_page = 0;
4714 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
4715 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4716 	if (!ctx->page) {
4717 		bs_load_ctx_fail(ctx, -ENOMEM);
4718 		return;
4719 	}
4720 	bs_load_replay_cur_md_page(ctx);
4721 }
4722 
4723 static void
4724 bs_recover(struct spdk_bs_load_ctx *ctx)
4725 {
4726 	int		rc;
4727 
4728 	SPDK_NOTICELOG("Performing recovery on blobstore\n");
4729 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
4730 	if (rc < 0) {
4731 		bs_load_ctx_fail(ctx, -ENOMEM);
4732 		return;
4733 	}
4734 
4735 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
4736 	if (rc < 0) {
4737 		bs_load_ctx_fail(ctx, -ENOMEM);
4738 		return;
4739 	}
4740 
4741 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
4742 	if (rc < 0) {
4743 		bs_load_ctx_fail(ctx, -ENOMEM);
4744 		return;
4745 	}
4746 
4747 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->super->md_len);
4748 	if (rc < 0) {
4749 		bs_load_ctx_fail(ctx, -ENOMEM);
4750 		return;
4751 	}
4752 
4753 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
4754 	bs_load_replay_md(ctx);
4755 }
4756 
4757 static int
4758 bs_parse_super(struct spdk_bs_load_ctx *ctx)
4759 {
4760 	int rc;
4761 
4762 	if (ctx->super->size == 0) {
4763 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
4764 	}
4765 
4766 	if (ctx->super->io_unit_size == 0) {
4767 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
4768 	}
4769 
4770 	ctx->bs->clean = 1;
4771 	ctx->bs->cluster_sz = ctx->super->cluster_size;
4772 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
4773 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
4774 	if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
4775 		ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
4776 	}
4777 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
4778 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
4779 	if (rc < 0) {
4780 		return -ENOMEM;
4781 	}
4782 	ctx->bs->md_start = ctx->super->md_start;
4783 	ctx->bs->md_len = ctx->super->md_len;
4784 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len);
4785 	if (rc < 0) {
4786 		return -ENOMEM;
4787 	}
4788 
4789 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
4790 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
4791 	ctx->bs->super_blob = ctx->super->super_blob;
4792 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
4793 
4794 	return 0;
4795 }
4796 
4797 static void
4798 bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4799 {
4800 	struct spdk_bs_load_ctx *ctx = cb_arg;
4801 	int rc;
4802 
4803 	rc = bs_super_validate(ctx->super, ctx->bs);
4804 	if (rc != 0) {
4805 		bs_load_ctx_fail(ctx, rc);
4806 		return;
4807 	}
4808 
4809 	rc = bs_parse_super(ctx);
4810 	if (rc < 0) {
4811 		bs_load_ctx_fail(ctx, rc);
4812 		return;
4813 	}
4814 
4815 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0 || ctx->force_recover) {
4816 		bs_recover(ctx);
4817 	} else {
4818 		bs_load_read_used_pages(ctx);
4819 	}
4820 }
4821 
4822 static inline int
4823 bs_opts_copy(struct spdk_bs_opts *src, struct spdk_bs_opts *dst)
4824 {
4825 
4826 	if (!src->opts_size) {
4827 		SPDK_ERRLOG("opts_size should not be zero value\n");
4828 		return -1;
4829 	}
4830 
4831 #define FIELD_OK(field) \
4832         offsetof(struct spdk_bs_opts, field) + sizeof(src->field) <= src->opts_size
4833 
4834 #define SET_FIELD(field) \
4835         if (FIELD_OK(field)) { \
4836                 dst->field = src->field; \
4837         } \
4838 
4839 	SET_FIELD(cluster_sz);
4840 	SET_FIELD(num_md_pages);
4841 	SET_FIELD(max_md_ops);
4842 	SET_FIELD(max_channel_ops);
4843 	SET_FIELD(clear_method);
4844 
4845 	if (FIELD_OK(bstype)) {
4846 		memcpy(&dst->bstype, &src->bstype, sizeof(dst->bstype));
4847 	}
4848 	SET_FIELD(iter_cb_fn);
4849 	SET_FIELD(iter_cb_arg);
4850 	SET_FIELD(force_recover);
4851 	SET_FIELD(esnap_bs_dev_create);
4852 	SET_FIELD(esnap_ctx);
4853 
4854 	dst->opts_size = src->opts_size;
4855 
4856 	/* You should not remove this statement, but need to update the assert statement
4857 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
4858 	SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_opts) == 88, "Incorrect size");
4859 
4860 #undef FIELD_OK
4861 #undef SET_FIELD
4862 
4863 	return 0;
4864 }
4865 
4866 void
4867 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4868 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4869 {
4870 	struct spdk_blob_store	*bs;
4871 	struct spdk_bs_cpl	cpl;
4872 	struct spdk_bs_load_ctx *ctx;
4873 	struct spdk_bs_opts	opts = {};
4874 	int err;
4875 
4876 	SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev);
4877 
4878 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4879 		SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen);
4880 		dev->destroy(dev);
4881 		cb_fn(cb_arg, NULL, -EINVAL);
4882 		return;
4883 	}
4884 
4885 	spdk_bs_opts_init(&opts, sizeof(opts));
4886 	if (o) {
4887 		if (bs_opts_copy(o, &opts)) {
4888 			return;
4889 		}
4890 	}
4891 
4892 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
4893 		dev->destroy(dev);
4894 		cb_fn(cb_arg, NULL, -EINVAL);
4895 		return;
4896 	}
4897 
4898 	err = bs_alloc(dev, &opts, &bs, &ctx);
4899 	if (err) {
4900 		dev->destroy(dev);
4901 		cb_fn(cb_arg, NULL, err);
4902 		return;
4903 	}
4904 
4905 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4906 	cpl.u.bs_handle.cb_fn = cb_fn;
4907 	cpl.u.bs_handle.cb_arg = cb_arg;
4908 	cpl.u.bs_handle.bs = bs;
4909 
4910 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
4911 	if (!ctx->seq) {
4912 		spdk_free(ctx->super);
4913 		free(ctx);
4914 		bs_free(bs);
4915 		cb_fn(cb_arg, NULL, -ENOMEM);
4916 		return;
4917 	}
4918 
4919 	/* Read the super block */
4920 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
4921 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
4922 			     bs_load_super_cpl, ctx);
4923 }
4924 
4925 /* END spdk_bs_load */
4926 
4927 /* START spdk_bs_dump */
4928 
4929 static void
4930 bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
4931 {
4932 	spdk_free(ctx->super);
4933 
4934 	/*
4935 	 * We need to defer calling bs_call_cpl() until after
4936 	 * dev destruction, so tuck these away for later use.
4937 	 */
4938 	ctx->bs->unload_err = bserrno;
4939 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4940 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4941 
4942 	bs_sequence_finish(seq, 0);
4943 	bs_free(ctx->bs);
4944 	free(ctx);
4945 }
4946 
4947 static void
4948 bs_dump_print_xattr(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
4949 {
4950 	struct spdk_blob_md_descriptor_xattr *desc_xattr;
4951 	uint32_t i;
4952 	const char *type;
4953 
4954 	desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc;
4955 
4956 	if (desc_xattr->length !=
4957 	    sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) +
4958 	    desc_xattr->name_length + desc_xattr->value_length) {
4959 	}
4960 
4961 	memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length);
4962 	ctx->xattr_name[desc_xattr->name_length] = '\0';
4963 	if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
4964 		type = "XATTR";
4965 	} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
4966 		type = "XATTR_INTERNAL";
4967 	} else {
4968 		assert(false);
4969 		type = "XATTR_?";
4970 	}
4971 	fprintf(ctx->fp, "%s: name = \"%s\"\n", type, ctx->xattr_name);
4972 	fprintf(ctx->fp, "       value = \"");
4973 	ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name,
4974 			    (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
4975 			    desc_xattr->value_length);
4976 	fprintf(ctx->fp, "\"\n");
4977 	for (i = 0; i < desc_xattr->value_length; i++) {
4978 		if (i % 16 == 0) {
4979 			fprintf(ctx->fp, "               ");
4980 		}
4981 		fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i));
4982 		if ((i + 1) % 16 == 0) {
4983 			fprintf(ctx->fp, "\n");
4984 		}
4985 	}
4986 	if (i % 16 != 0) {
4987 		fprintf(ctx->fp, "\n");
4988 	}
4989 }
4990 
4991 struct type_flag_desc {
4992 	uint64_t mask;
4993 	uint64_t val;
4994 	const char *name;
4995 };
4996 
4997 static void
4998 bs_dump_print_type_bits(struct spdk_bs_load_ctx *ctx, uint64_t flags,
4999 			struct type_flag_desc *desc, size_t numflags)
5000 {
5001 	uint64_t covered = 0;
5002 	size_t i;
5003 
5004 	for (i = 0; i < numflags; i++) {
5005 		if ((desc[i].mask & flags) != desc[i].val) {
5006 			continue;
5007 		}
5008 		fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " %s", desc[i].val, desc[i].name);
5009 		if (desc[i].mask != desc[i].val) {
5010 			fprintf(ctx->fp, " (mask 0x%" PRIx64 " value 0x%" PRIx64 ")",
5011 				desc[i].mask, desc[i].val);
5012 		}
5013 		fprintf(ctx->fp, "\n");
5014 		covered |= desc[i].mask;
5015 	}
5016 	if ((flags & ~covered) != 0) {
5017 		fprintf(ctx->fp, "\t\t 0x%016" PRIx64 " Unknown\n", flags & ~covered);
5018 	}
5019 }
5020 
5021 static void
5022 bs_dump_print_type_flags(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5023 {
5024 	struct spdk_blob_md_descriptor_flags *type_desc;
5025 #define ADD_FLAG(f) { f, f, #f }
5026 #define ADD_MASK_VAL(m, v) { m, v, #v }
5027 	static struct type_flag_desc invalid[] = {
5028 		ADD_FLAG(SPDK_BLOB_THIN_PROV),
5029 		ADD_FLAG(SPDK_BLOB_INTERNAL_XATTR),
5030 		ADD_FLAG(SPDK_BLOB_EXTENT_TABLE),
5031 	};
5032 	static struct type_flag_desc data_ro[] = {
5033 		ADD_FLAG(SPDK_BLOB_READ_ONLY),
5034 	};
5035 	static struct type_flag_desc md_ro[] = {
5036 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_DEFAULT),
5037 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_NONE),
5038 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_UNMAP),
5039 		ADD_MASK_VAL(SPDK_BLOB_MD_RO_FLAGS_MASK, BLOB_CLEAR_WITH_WRITE_ZEROES),
5040 	};
5041 #undef ADD_FLAG
5042 #undef ADD_MASK_VAL
5043 
5044 	type_desc = (struct spdk_blob_md_descriptor_flags *)desc;
5045 	fprintf(ctx->fp, "Flags:\n");
5046 	fprintf(ctx->fp, "\tinvalid: 0x%016" PRIx64 "\n", type_desc->invalid_flags);
5047 	bs_dump_print_type_bits(ctx, type_desc->invalid_flags, invalid,
5048 				SPDK_COUNTOF(invalid));
5049 	fprintf(ctx->fp, "\tdata_ro: 0x%016" PRIx64 "\n", type_desc->data_ro_flags);
5050 	bs_dump_print_type_bits(ctx, type_desc->data_ro_flags, data_ro,
5051 				SPDK_COUNTOF(data_ro));
5052 	fprintf(ctx->fp, "\t  md_ro: 0x%016" PRIx64 "\n", type_desc->md_ro_flags);
5053 	bs_dump_print_type_bits(ctx, type_desc->md_ro_flags, md_ro,
5054 				SPDK_COUNTOF(md_ro));
5055 }
5056 
5057 static void
5058 bs_dump_print_extent_table(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_descriptor *desc)
5059 {
5060 	struct spdk_blob_md_descriptor_extent_table *et_desc;
5061 	uint64_t num_extent_pages;
5062 	uint32_t et_idx;
5063 
5064 	et_desc = (struct spdk_blob_md_descriptor_extent_table *)desc;
5065 	num_extent_pages = (et_desc->length - sizeof(et_desc->num_clusters)) /
5066 			   sizeof(et_desc->extent_page[0]);
5067 
5068 	fprintf(ctx->fp, "Extent table:\n");
5069 	for (et_idx = 0; et_idx < num_extent_pages; et_idx++) {
5070 		if (et_desc->extent_page[et_idx].page_idx == 0) {
5071 			/* Zeroes represent unallocated extent pages. */
5072 			continue;
5073 		}
5074 		fprintf(ctx->fp, "\tExtent page: %5" PRIu32 " length %3" PRIu32
5075 			" at LBA %" PRIu64 "\n", et_desc->extent_page[et_idx].page_idx,
5076 			et_desc->extent_page[et_idx].num_pages,
5077 			bs_md_page_to_lba(ctx->bs, et_desc->extent_page[et_idx].page_idx));
5078 	}
5079 }
5080 
5081 static void
5082 bs_dump_print_md_page(struct spdk_bs_load_ctx *ctx)
5083 {
5084 	uint32_t page_idx = ctx->cur_page;
5085 	struct spdk_blob_md_page *page = ctx->page;
5086 	struct spdk_blob_md_descriptor *desc;
5087 	size_t cur_desc = 0;
5088 	uint32_t crc;
5089 
5090 	fprintf(ctx->fp, "=========\n");
5091 	fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx);
5092 	fprintf(ctx->fp, "Start LBA: %" PRIu64 "\n", bs_md_page_to_lba(ctx->bs, page_idx));
5093 	fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id);
5094 	fprintf(ctx->fp, "Sequence: %" PRIu32 "\n", page->sequence_num);
5095 	if (page->next == SPDK_INVALID_MD_PAGE) {
5096 		fprintf(ctx->fp, "Next: None\n");
5097 	} else {
5098 		fprintf(ctx->fp, "Next: %" PRIu32 "\n", page->next);
5099 	}
5100 	fprintf(ctx->fp, "In used bit array%s:", ctx->super->clean ? "" : " (not clean: dubious)");
5101 	if (spdk_bit_array_get(ctx->bs->used_md_pages, page_idx)) {
5102 		fprintf(ctx->fp, " md");
5103 	}
5104 	if (spdk_bit_array_get(ctx->bs->used_blobids, page_idx)) {
5105 		fprintf(ctx->fp, " blob");
5106 	}
5107 	fprintf(ctx->fp, "\n");
5108 
5109 	crc = blob_md_page_calc_crc(page);
5110 	fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch");
5111 
5112 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
5113 	while (cur_desc < sizeof(page->descriptors)) {
5114 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
5115 			if (desc->length == 0) {
5116 				/* If padding and length are 0, this terminates the page */
5117 				break;
5118 			}
5119 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
5120 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
5121 			unsigned int				i;
5122 
5123 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
5124 
5125 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
5126 				if (desc_extent_rle->extents[i].cluster_idx != 0) {
5127 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
5128 						desc_extent_rle->extents[i].cluster_idx);
5129 				} else {
5130 					fprintf(ctx->fp, "Unallocated Extent - ");
5131 				}
5132 				fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length);
5133 				fprintf(ctx->fp, "\n");
5134 			}
5135 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
5136 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
5137 			unsigned int					i;
5138 
5139 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
5140 
5141 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) {
5142 				if (desc_extent->cluster_idx[i] != 0) {
5143 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
5144 						desc_extent->cluster_idx[i]);
5145 				} else {
5146 					fprintf(ctx->fp, "Unallocated Extent");
5147 				}
5148 				fprintf(ctx->fp, "\n");
5149 			}
5150 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
5151 			bs_dump_print_xattr(ctx, desc);
5152 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
5153 			bs_dump_print_xattr(ctx, desc);
5154 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
5155 			bs_dump_print_type_flags(ctx, desc);
5156 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
5157 			bs_dump_print_extent_table(ctx, desc);
5158 		} else {
5159 			/* Error */
5160 			fprintf(ctx->fp, "Unknown descriptor type %" PRIu8 "\n", desc->type);
5161 		}
5162 		/* Advance to the next descriptor */
5163 		cur_desc += sizeof(*desc) + desc->length;
5164 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
5165 			break;
5166 		}
5167 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
5168 	}
5169 }
5170 
5171 static void
5172 bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5173 {
5174 	struct spdk_bs_load_ctx *ctx = cb_arg;
5175 
5176 	if (bserrno != 0) {
5177 		bs_dump_finish(seq, ctx, bserrno);
5178 		return;
5179 	}
5180 
5181 	if (ctx->page->id != 0) {
5182 		bs_dump_print_md_page(ctx);
5183 	}
5184 
5185 	ctx->cur_page++;
5186 
5187 	if (ctx->cur_page < ctx->super->md_len) {
5188 		bs_dump_read_md_page(seq, ctx);
5189 	} else {
5190 		spdk_free(ctx->page);
5191 		bs_dump_finish(seq, ctx, 0);
5192 	}
5193 }
5194 
5195 static void
5196 bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
5197 {
5198 	struct spdk_bs_load_ctx *ctx = cb_arg;
5199 	uint64_t lba;
5200 
5201 	assert(ctx->cur_page < ctx->super->md_len);
5202 	lba = bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
5203 	bs_sequence_read_dev(seq, ctx->page, lba,
5204 			     bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
5205 			     bs_dump_read_md_page_cpl, ctx);
5206 }
5207 
5208 static void
5209 bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5210 {
5211 	struct spdk_bs_load_ctx *ctx = cb_arg;
5212 	int rc;
5213 
5214 	fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature);
5215 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
5216 		   sizeof(ctx->super->signature)) != 0) {
5217 		fprintf(ctx->fp, "(Mismatch)\n");
5218 		bs_dump_finish(seq, ctx, bserrno);
5219 		return;
5220 	} else {
5221 		fprintf(ctx->fp, "(OK)\n");
5222 	}
5223 	fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version);
5224 	fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc,
5225 		(ctx->super->crc == blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch");
5226 	fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype);
5227 	fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size);
5228 	fprintf(ctx->fp, "Super Blob ID: ");
5229 	if (ctx->super->super_blob == SPDK_BLOBID_INVALID) {
5230 		fprintf(ctx->fp, "(None)\n");
5231 	} else {
5232 		fprintf(ctx->fp, "0x%" PRIx64 "\n", ctx->super->super_blob);
5233 	}
5234 	fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean);
5235 	fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start);
5236 	fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len);
5237 	fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start);
5238 	fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len);
5239 	fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start);
5240 	fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len);
5241 	fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start);
5242 	fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);
5243 
5244 	ctx->cur_page = 0;
5245 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0,
5246 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5247 	if (!ctx->page) {
5248 		bs_dump_finish(seq, ctx, -ENOMEM);
5249 		return;
5250 	}
5251 
5252 	rc = bs_parse_super(ctx);
5253 	if (rc < 0) {
5254 		bs_load_ctx_fail(ctx, rc);
5255 		return;
5256 	}
5257 
5258 	bs_load_read_used_pages(ctx);
5259 }
5260 
5261 void
5262 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn,
5263 	     spdk_bs_op_complete cb_fn, void *cb_arg)
5264 {
5265 	struct spdk_blob_store	*bs;
5266 	struct spdk_bs_cpl	cpl;
5267 	struct spdk_bs_load_ctx *ctx;
5268 	struct spdk_bs_opts	opts = {};
5269 	int err;
5270 
5271 	SPDK_DEBUGLOG(blob, "Dumping blobstore from dev %p\n", dev);
5272 
5273 	spdk_bs_opts_init(&opts, sizeof(opts));
5274 
5275 	err = bs_alloc(dev, &opts, &bs, &ctx);
5276 	if (err) {
5277 		dev->destroy(dev);
5278 		cb_fn(cb_arg, err);
5279 		return;
5280 	}
5281 
5282 	ctx->dumping = true;
5283 	ctx->fp = fp;
5284 	ctx->print_xattr_fn = print_xattr_fn;
5285 
5286 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5287 	cpl.u.bs_basic.cb_fn = cb_fn;
5288 	cpl.u.bs_basic.cb_arg = cb_arg;
5289 
5290 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5291 	if (!ctx->seq) {
5292 		spdk_free(ctx->super);
5293 		free(ctx);
5294 		bs_free(bs);
5295 		cb_fn(cb_arg, -ENOMEM);
5296 		return;
5297 	}
5298 
5299 	/* Read the super block */
5300 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
5301 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5302 			     bs_dump_super_cpl, ctx);
5303 }
5304 
5305 /* END spdk_bs_dump */
5306 
5307 /* START spdk_bs_init */
5308 
5309 static void
5310 bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5311 {
5312 	struct spdk_bs_load_ctx *ctx = cb_arg;
5313 
5314 	ctx->bs->used_clusters = spdk_bit_pool_create_from_array(ctx->used_clusters);
5315 	spdk_free(ctx->super);
5316 	free(ctx);
5317 
5318 	bs_sequence_finish(seq, bserrno);
5319 }
5320 
5321 static void
5322 bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5323 {
5324 	struct spdk_bs_load_ctx *ctx = cb_arg;
5325 
5326 	/* Write super block */
5327 	bs_sequence_write_dev(seq, ctx->super, bs_page_to_lba(ctx->bs, 0),
5328 			      bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
5329 			      bs_init_persist_super_cpl, ctx);
5330 }
5331 
5332 void
5333 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
5334 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
5335 {
5336 	struct spdk_bs_load_ctx *ctx;
5337 	struct spdk_blob_store	*bs;
5338 	struct spdk_bs_cpl	cpl;
5339 	spdk_bs_sequence_t	*seq;
5340 	spdk_bs_batch_t		*batch;
5341 	uint64_t		num_md_lba;
5342 	uint64_t		num_md_pages;
5343 	uint64_t		num_md_clusters;
5344 	uint64_t		max_used_cluster_mask_len;
5345 	uint32_t		i;
5346 	struct spdk_bs_opts	opts = {};
5347 	int			rc;
5348 	uint64_t		lba, lba_count;
5349 
5350 	SPDK_DEBUGLOG(blob, "Initializing blobstore on dev %p\n", dev);
5351 
5352 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
5353 		SPDK_ERRLOG("unsupported dev block length of %d\n",
5354 			    dev->blocklen);
5355 		dev->destroy(dev);
5356 		cb_fn(cb_arg, NULL, -EINVAL);
5357 		return;
5358 	}
5359 
5360 	spdk_bs_opts_init(&opts, sizeof(opts));
5361 	if (o) {
5362 		if (bs_opts_copy(o, &opts)) {
5363 			return;
5364 		}
5365 	}
5366 
5367 	if (bs_opts_verify(&opts) != 0) {
5368 		dev->destroy(dev);
5369 		cb_fn(cb_arg, NULL, -EINVAL);
5370 		return;
5371 	}
5372 
5373 	rc = bs_alloc(dev, &opts, &bs, &ctx);
5374 	if (rc) {
5375 		dev->destroy(dev);
5376 		cb_fn(cb_arg, NULL, rc);
5377 		return;
5378 	}
5379 
5380 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
5381 		/* By default, allocate 1 page per cluster.
5382 		 * Technically, this over-allocates metadata
5383 		 * because more metadata will reduce the number
5384 		 * of usable clusters. This can be addressed with
5385 		 * more complex math in the future.
5386 		 */
5387 		bs->md_len = bs->total_clusters;
5388 	} else {
5389 		bs->md_len = opts.num_md_pages;
5390 	}
5391 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
5392 	if (rc < 0) {
5393 		spdk_free(ctx->super);
5394 		free(ctx);
5395 		bs_free(bs);
5396 		cb_fn(cb_arg, NULL, -ENOMEM);
5397 		return;
5398 	}
5399 
5400 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
5401 	if (rc < 0) {
5402 		spdk_free(ctx->super);
5403 		free(ctx);
5404 		bs_free(bs);
5405 		cb_fn(cb_arg, NULL, -ENOMEM);
5406 		return;
5407 	}
5408 
5409 	rc = spdk_bit_array_resize(&bs->open_blobids, bs->md_len);
5410 	if (rc < 0) {
5411 		spdk_free(ctx->super);
5412 		free(ctx);
5413 		bs_free(bs);
5414 		cb_fn(cb_arg, NULL, -ENOMEM);
5415 		return;
5416 	}
5417 
5418 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
5419 	       sizeof(ctx->super->signature));
5420 	ctx->super->version = SPDK_BS_VERSION;
5421 	ctx->super->length = sizeof(*ctx->super);
5422 	ctx->super->super_blob = bs->super_blob;
5423 	ctx->super->clean = 0;
5424 	ctx->super->cluster_size = bs->cluster_sz;
5425 	ctx->super->io_unit_size = bs->io_unit_size;
5426 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
5427 
5428 	/* Calculate how many pages the metadata consumes at the front
5429 	 * of the disk.
5430 	 */
5431 
5432 	/* The super block uses 1 page */
5433 	num_md_pages = 1;
5434 
5435 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
5436 	 * up to the nearest page, plus a header.
5437 	 */
5438 	ctx->super->used_page_mask_start = num_md_pages;
5439 	ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5440 					 spdk_divide_round_up(bs->md_len, 8),
5441 					 SPDK_BS_PAGE_SIZE);
5442 	num_md_pages += ctx->super->used_page_mask_len;
5443 
5444 	/* The used_clusters mask requires 1 bit per cluster, rounded
5445 	 * up to the nearest page, plus a header.
5446 	 */
5447 	ctx->super->used_cluster_mask_start = num_md_pages;
5448 	ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5449 					    spdk_divide_round_up(bs->total_clusters, 8),
5450 					    SPDK_BS_PAGE_SIZE);
5451 	/* The blobstore might be extended, then the used_cluster bitmap will need more space.
5452 	 * Here we calculate the max clusters we can support according to the
5453 	 * num_md_pages (bs->md_len).
5454 	 */
5455 	max_used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5456 				    spdk_divide_round_up(bs->md_len, 8),
5457 				    SPDK_BS_PAGE_SIZE);
5458 	max_used_cluster_mask_len = spdk_max(max_used_cluster_mask_len,
5459 					     ctx->super->used_cluster_mask_len);
5460 	num_md_pages += max_used_cluster_mask_len;
5461 
5462 	/* The used_blobids mask requires 1 bit per metadata page, rounded
5463 	 * up to the nearest page, plus a header.
5464 	 */
5465 	ctx->super->used_blobid_mask_start = num_md_pages;
5466 	ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
5467 					   spdk_divide_round_up(bs->md_len, 8),
5468 					   SPDK_BS_PAGE_SIZE);
5469 	num_md_pages += ctx->super->used_blobid_mask_len;
5470 
5471 	/* The metadata region size was chosen above */
5472 	ctx->super->md_start = bs->md_start = num_md_pages;
5473 	ctx->super->md_len = bs->md_len;
5474 	num_md_pages += bs->md_len;
5475 
5476 	num_md_lba = bs_page_to_lba(bs, num_md_pages);
5477 
5478 	ctx->super->size = dev->blockcnt * dev->blocklen;
5479 
5480 	ctx->super->crc = blob_md_page_calc_crc(ctx->super);
5481 
5482 	num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster);
5483 	if (num_md_clusters > bs->total_clusters) {
5484 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
5485 			    "please decrease number of pages reserved for metadata "
5486 			    "or increase cluster size.\n");
5487 		spdk_free(ctx->super);
5488 		spdk_bit_array_free(&ctx->used_clusters);
5489 		free(ctx);
5490 		bs_free(bs);
5491 		cb_fn(cb_arg, NULL, -ENOMEM);
5492 		return;
5493 	}
5494 	/* Claim all of the clusters used by the metadata */
5495 	for (i = 0; i < num_md_clusters; i++) {
5496 		spdk_bit_array_set(ctx->used_clusters, i);
5497 	}
5498 
5499 	bs->num_free_clusters -= num_md_clusters;
5500 	bs->total_data_clusters = bs->num_free_clusters;
5501 
5502 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
5503 	cpl.u.bs_handle.cb_fn = cb_fn;
5504 	cpl.u.bs_handle.cb_arg = cb_arg;
5505 	cpl.u.bs_handle.bs = bs;
5506 
5507 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5508 	if (!seq) {
5509 		spdk_free(ctx->super);
5510 		free(ctx);
5511 		bs_free(bs);
5512 		cb_fn(cb_arg, NULL, -ENOMEM);
5513 		return;
5514 	}
5515 
5516 	batch = bs_sequence_to_batch(seq, bs_init_trim_cpl, ctx);
5517 
5518 	/* Clear metadata space */
5519 	bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
5520 
5521 	lba = num_md_lba;
5522 	lba_count = ctx->bs->dev->blockcnt - lba;
5523 	switch (opts.clear_method) {
5524 	case BS_CLEAR_WITH_UNMAP:
5525 		/* Trim data clusters */
5526 		bs_batch_unmap_dev(batch, lba, lba_count);
5527 		break;
5528 	case BS_CLEAR_WITH_WRITE_ZEROES:
5529 		/* Write_zeroes to data clusters */
5530 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
5531 		break;
5532 	case BS_CLEAR_WITH_NONE:
5533 	default:
5534 		break;
5535 	}
5536 
5537 	bs_batch_close(batch);
5538 }
5539 
5540 /* END spdk_bs_init */
5541 
5542 /* START spdk_bs_destroy */
5543 
5544 static void
5545 bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5546 {
5547 	struct spdk_bs_load_ctx *ctx = cb_arg;
5548 	struct spdk_blob_store *bs = ctx->bs;
5549 
5550 	/*
5551 	 * We need to defer calling bs_call_cpl() until after
5552 	 * dev destruction, so tuck these away for later use.
5553 	 */
5554 	bs->unload_err = bserrno;
5555 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5556 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5557 
5558 	bs_sequence_finish(seq, bserrno);
5559 
5560 	bs_free(bs);
5561 	free(ctx);
5562 }
5563 
5564 void
5565 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
5566 		void *cb_arg)
5567 {
5568 	struct spdk_bs_cpl	cpl;
5569 	spdk_bs_sequence_t	*seq;
5570 	struct spdk_bs_load_ctx *ctx;
5571 
5572 	SPDK_DEBUGLOG(blob, "Destroying blobstore\n");
5573 
5574 	if (!RB_EMPTY(&bs->open_blobs)) {
5575 		SPDK_ERRLOG("Blobstore still has open blobs\n");
5576 		cb_fn(cb_arg, -EBUSY);
5577 		return;
5578 	}
5579 
5580 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5581 	cpl.u.bs_basic.cb_fn = cb_fn;
5582 	cpl.u.bs_basic.cb_arg = cb_arg;
5583 
5584 	ctx = calloc(1, sizeof(*ctx));
5585 	if (!ctx) {
5586 		cb_fn(cb_arg, -ENOMEM);
5587 		return;
5588 	}
5589 
5590 	ctx->bs = bs;
5591 
5592 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5593 	if (!seq) {
5594 		free(ctx);
5595 		cb_fn(cb_arg, -ENOMEM);
5596 		return;
5597 	}
5598 
5599 	/* Write zeroes to the super block */
5600 	bs_sequence_write_zeroes_dev(seq,
5601 				     bs_page_to_lba(bs, 0),
5602 				     bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
5603 				     bs_destroy_trim_cpl, ctx);
5604 }
5605 
5606 /* END spdk_bs_destroy */
5607 
5608 /* START spdk_bs_unload */
5609 
5610 static void
5611 bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno)
5612 {
5613 	spdk_bs_sequence_t *seq = ctx->seq;
5614 
5615 	spdk_free(ctx->super);
5616 
5617 	/*
5618 	 * We need to defer calling bs_call_cpl() until after
5619 	 * dev destruction, so tuck these away for later use.
5620 	 */
5621 	ctx->bs->unload_err = bserrno;
5622 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
5623 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
5624 
5625 	bs_sequence_finish(seq, bserrno);
5626 
5627 	bs_free(ctx->bs);
5628 	free(ctx);
5629 }
5630 
5631 static void
5632 bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5633 {
5634 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5635 
5636 	bs_unload_finish(ctx, bserrno);
5637 }
5638 
5639 static void
5640 bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5641 {
5642 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5643 
5644 	spdk_free(ctx->mask);
5645 
5646 	if (bserrno != 0) {
5647 		bs_unload_finish(ctx, bserrno);
5648 		return;
5649 	}
5650 
5651 	ctx->super->clean = 1;
5652 
5653 	bs_write_super(seq, ctx->bs, ctx->super, bs_unload_write_super_cpl, ctx);
5654 }
5655 
5656 static void
5657 bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5658 {
5659 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5660 
5661 	spdk_free(ctx->mask);
5662 	ctx->mask = NULL;
5663 
5664 	if (bserrno != 0) {
5665 		bs_unload_finish(ctx, bserrno);
5666 		return;
5667 	}
5668 
5669 	bs_write_used_clusters(seq, ctx, bs_unload_write_used_clusters_cpl);
5670 }
5671 
5672 static void
5673 bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5674 {
5675 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5676 
5677 	spdk_free(ctx->mask);
5678 	ctx->mask = NULL;
5679 
5680 	if (bserrno != 0) {
5681 		bs_unload_finish(ctx, bserrno);
5682 		return;
5683 	}
5684 
5685 	bs_write_used_blobids(seq, ctx, bs_unload_write_used_blobids_cpl);
5686 }
5687 
5688 static void
5689 bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5690 {
5691 	struct spdk_bs_load_ctx	*ctx = cb_arg;
5692 	int rc;
5693 
5694 	if (bserrno != 0) {
5695 		bs_unload_finish(ctx, bserrno);
5696 		return;
5697 	}
5698 
5699 	rc = bs_super_validate(ctx->super, ctx->bs);
5700 	if (rc != 0) {
5701 		bs_unload_finish(ctx, rc);
5702 		return;
5703 	}
5704 
5705 	bs_write_used_md(seq, cb_arg, bs_unload_write_used_pages_cpl);
5706 }
5707 
5708 void
5709 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
5710 {
5711 	struct spdk_bs_cpl	cpl;
5712 	struct spdk_bs_load_ctx *ctx;
5713 
5714 	SPDK_DEBUGLOG(blob, "Syncing blobstore\n");
5715 
5716 	/*
5717 	 * If external snapshot channels are being destroyed while the blobstore is unloaded, the
5718 	 * unload is deferred until after the channel destruction completes.
5719 	 */
5720 	if (bs->esnap_channels_unloading != 0) {
5721 		if (bs->esnap_unload_cb_fn != NULL) {
5722 			SPDK_ERRLOG("Blobstore unload in progress\n");
5723 			cb_fn(cb_arg, -EBUSY);
5724 			return;
5725 		}
5726 		SPDK_DEBUGLOG(blob_esnap, "Blobstore unload deferred: %" PRIu32
5727 			      " esnap clones are unloading\n", bs->esnap_channels_unloading);
5728 		bs->esnap_unload_cb_fn = cb_fn;
5729 		bs->esnap_unload_cb_arg = cb_arg;
5730 		return;
5731 	}
5732 	if (bs->esnap_unload_cb_fn != NULL) {
5733 		SPDK_DEBUGLOG(blob_esnap, "Blobstore deferred unload progressing\n");
5734 		assert(bs->esnap_unload_cb_fn == cb_fn);
5735 		assert(bs->esnap_unload_cb_arg == cb_arg);
5736 		bs->esnap_unload_cb_fn = NULL;
5737 		bs->esnap_unload_cb_arg = NULL;
5738 	}
5739 
5740 	if (!RB_EMPTY(&bs->open_blobs)) {
5741 		SPDK_ERRLOG("Blobstore still has open blobs\n");
5742 		cb_fn(cb_arg, -EBUSY);
5743 		return;
5744 	}
5745 
5746 	ctx = calloc(1, sizeof(*ctx));
5747 	if (!ctx) {
5748 		cb_fn(cb_arg, -ENOMEM);
5749 		return;
5750 	}
5751 
5752 	ctx->bs = bs;
5753 
5754 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
5755 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5756 	if (!ctx->super) {
5757 		free(ctx);
5758 		cb_fn(cb_arg, -ENOMEM);
5759 		return;
5760 	}
5761 
5762 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5763 	cpl.u.bs_basic.cb_fn = cb_fn;
5764 	cpl.u.bs_basic.cb_arg = cb_arg;
5765 
5766 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5767 	if (!ctx->seq) {
5768 		spdk_free(ctx->super);
5769 		free(ctx);
5770 		cb_fn(cb_arg, -ENOMEM);
5771 		return;
5772 	}
5773 
5774 	/* Read super block */
5775 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
5776 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5777 			     bs_unload_read_super_cpl, ctx);
5778 }
5779 
5780 /* END spdk_bs_unload */
5781 
5782 /* START spdk_bs_set_super */
5783 
5784 struct spdk_bs_set_super_ctx {
5785 	struct spdk_blob_store		*bs;
5786 	struct spdk_bs_super_block	*super;
5787 };
5788 
5789 static void
5790 bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5791 {
5792 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
5793 
5794 	if (bserrno != 0) {
5795 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
5796 	}
5797 
5798 	spdk_free(ctx->super);
5799 
5800 	bs_sequence_finish(seq, bserrno);
5801 
5802 	free(ctx);
5803 }
5804 
5805 static void
5806 bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5807 {
5808 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
5809 	int rc;
5810 
5811 	if (bserrno != 0) {
5812 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
5813 		spdk_free(ctx->super);
5814 		bs_sequence_finish(seq, bserrno);
5815 		free(ctx);
5816 		return;
5817 	}
5818 
5819 	rc = bs_super_validate(ctx->super, ctx->bs);
5820 	if (rc != 0) {
5821 		SPDK_ERRLOG("Not a valid super block\n");
5822 		spdk_free(ctx->super);
5823 		bs_sequence_finish(seq, rc);
5824 		free(ctx);
5825 		return;
5826 	}
5827 
5828 	bs_write_super(seq, ctx->bs, ctx->super, bs_set_super_write_cpl, ctx);
5829 }
5830 
5831 void
5832 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
5833 		  spdk_bs_op_complete cb_fn, void *cb_arg)
5834 {
5835 	struct spdk_bs_cpl		cpl;
5836 	spdk_bs_sequence_t		*seq;
5837 	struct spdk_bs_set_super_ctx	*ctx;
5838 
5839 	SPDK_DEBUGLOG(blob, "Setting super blob id on blobstore\n");
5840 
5841 	ctx = calloc(1, sizeof(*ctx));
5842 	if (!ctx) {
5843 		cb_fn(cb_arg, -ENOMEM);
5844 		return;
5845 	}
5846 
5847 	ctx->bs = bs;
5848 
5849 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
5850 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5851 	if (!ctx->super) {
5852 		free(ctx);
5853 		cb_fn(cb_arg, -ENOMEM);
5854 		return;
5855 	}
5856 
5857 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5858 	cpl.u.bs_basic.cb_fn = cb_fn;
5859 	cpl.u.bs_basic.cb_arg = cb_arg;
5860 
5861 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
5862 	if (!seq) {
5863 		spdk_free(ctx->super);
5864 		free(ctx);
5865 		cb_fn(cb_arg, -ENOMEM);
5866 		return;
5867 	}
5868 
5869 	bs->super_blob = blobid;
5870 
5871 	/* Read super block */
5872 	bs_sequence_read_dev(seq, ctx->super, bs_page_to_lba(bs, 0),
5873 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
5874 			     bs_set_super_read_cpl, ctx);
5875 }
5876 
5877 /* END spdk_bs_set_super */
5878 
5879 void
5880 spdk_bs_get_super(struct spdk_blob_store *bs,
5881 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5882 {
5883 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
5884 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
5885 	} else {
5886 		cb_fn(cb_arg, bs->super_blob, 0);
5887 	}
5888 }
5889 
5890 uint64_t
5891 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
5892 {
5893 	return bs->cluster_sz;
5894 }
5895 
5896 uint64_t
5897 spdk_bs_get_page_size(struct spdk_blob_store *bs)
5898 {
5899 	return SPDK_BS_PAGE_SIZE;
5900 }
5901 
5902 uint64_t
5903 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
5904 {
5905 	return bs->io_unit_size;
5906 }
5907 
5908 uint64_t
5909 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
5910 {
5911 	return bs->num_free_clusters;
5912 }
5913 
5914 uint64_t
5915 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
5916 {
5917 	return bs->total_data_clusters;
5918 }
5919 
5920 static int
5921 bs_register_md_thread(struct spdk_blob_store *bs)
5922 {
5923 	bs->md_channel = spdk_get_io_channel(bs);
5924 	if (!bs->md_channel) {
5925 		SPDK_ERRLOG("Failed to get IO channel.\n");
5926 		return -1;
5927 	}
5928 
5929 	return 0;
5930 }
5931 
5932 static int
5933 bs_unregister_md_thread(struct spdk_blob_store *bs)
5934 {
5935 	spdk_put_io_channel(bs->md_channel);
5936 
5937 	return 0;
5938 }
5939 
5940 spdk_blob_id
5941 spdk_blob_get_id(struct spdk_blob *blob)
5942 {
5943 	assert(blob != NULL);
5944 
5945 	return blob->id;
5946 }
5947 
5948 uint64_t
5949 spdk_blob_get_num_pages(struct spdk_blob *blob)
5950 {
5951 	assert(blob != NULL);
5952 
5953 	return bs_cluster_to_page(blob->bs, blob->active.num_clusters);
5954 }
5955 
5956 uint64_t
5957 spdk_blob_get_num_io_units(struct spdk_blob *blob)
5958 {
5959 	assert(blob != NULL);
5960 
5961 	return spdk_blob_get_num_pages(blob) * bs_io_unit_per_page(blob->bs);
5962 }
5963 
5964 uint64_t
5965 spdk_blob_get_num_clusters(struct spdk_blob *blob)
5966 {
5967 	assert(blob != NULL);
5968 
5969 	return blob->active.num_clusters;
5970 }
5971 
5972 static uint64_t
5973 blob_find_io_unit(struct spdk_blob *blob, uint64_t offset, bool is_allocated)
5974 {
5975 	uint64_t blob_io_unit_num = spdk_blob_get_num_io_units(blob);
5976 
5977 	while (offset < blob_io_unit_num) {
5978 		if (bs_io_unit_is_allocated(blob, offset) == is_allocated) {
5979 			return offset;
5980 		}
5981 
5982 		offset += bs_num_io_units_to_cluster_boundary(blob, offset);
5983 	}
5984 
5985 	return UINT64_MAX;
5986 }
5987 
5988 uint64_t
5989 spdk_blob_get_next_allocated_io_unit(struct spdk_blob *blob, uint64_t offset)
5990 {
5991 	return blob_find_io_unit(blob, offset, true);
5992 }
5993 
5994 uint64_t
5995 spdk_blob_get_next_unallocated_io_unit(struct spdk_blob *blob, uint64_t offset)
5996 {
5997 	return blob_find_io_unit(blob, offset, false);
5998 }
5999 
6000 /* START spdk_bs_create_blob */
6001 
6002 static void
6003 bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6004 {
6005 	struct spdk_blob *blob = cb_arg;
6006 	uint32_t page_idx = bs_blobid_to_page(blob->id);
6007 
6008 	if (bserrno != 0) {
6009 		spdk_spin_lock(&blob->bs->used_lock);
6010 		spdk_bit_array_clear(blob->bs->used_blobids, page_idx);
6011 		bs_release_md_page(blob->bs, page_idx);
6012 		spdk_spin_unlock(&blob->bs->used_lock);
6013 	}
6014 
6015 	blob_free(blob);
6016 
6017 	bs_sequence_finish(seq, bserrno);
6018 }
6019 
6020 static int
6021 blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
6022 		bool internal)
6023 {
6024 	uint64_t i;
6025 	size_t value_len = 0;
6026 	int rc;
6027 	const void *value = NULL;
6028 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
6029 		return -EINVAL;
6030 	}
6031 	for (i = 0; i < xattrs->count; i++) {
6032 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
6033 		if (value == NULL || value_len == 0) {
6034 			return -EINVAL;
6035 		}
6036 		rc = blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
6037 		if (rc < 0) {
6038 			return rc;
6039 		}
6040 	}
6041 	return 0;
6042 }
6043 
6044 static void
6045 blob_opts_copy(const struct spdk_blob_opts *src, struct spdk_blob_opts *dst)
6046 {
6047 #define FIELD_OK(field) \
6048         offsetof(struct spdk_blob_opts, field) + sizeof(src->field) <= src->opts_size
6049 
6050 #define SET_FIELD(field) \
6051         if (FIELD_OK(field)) { \
6052                 dst->field = src->field; \
6053         } \
6054 
6055 	SET_FIELD(num_clusters);
6056 	SET_FIELD(thin_provision);
6057 	SET_FIELD(clear_method);
6058 
6059 	if (FIELD_OK(xattrs)) {
6060 		memcpy(&dst->xattrs, &src->xattrs, sizeof(src->xattrs));
6061 	}
6062 
6063 	SET_FIELD(use_extent_table);
6064 	SET_FIELD(esnap_id);
6065 	SET_FIELD(esnap_id_len);
6066 
6067 	dst->opts_size = src->opts_size;
6068 
6069 	/* You should not remove this statement, but need to update the assert statement
6070 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
6071 	SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_opts) == 80, "Incorrect size");
6072 
6073 #undef FIELD_OK
6074 #undef SET_FIELD
6075 }
6076 
6077 static void
6078 bs_create_blob(struct spdk_blob_store *bs,
6079 	       const struct spdk_blob_opts *opts,
6080 	       const struct spdk_blob_xattr_opts *internal_xattrs,
6081 	       spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6082 {
6083 	struct spdk_blob	*blob;
6084 	uint32_t		page_idx;
6085 	struct spdk_bs_cpl	cpl;
6086 	struct spdk_blob_opts	opts_local;
6087 	struct spdk_blob_xattr_opts internal_xattrs_default;
6088 	spdk_bs_sequence_t	*seq;
6089 	spdk_blob_id		id;
6090 	int rc;
6091 
6092 	assert(spdk_get_thread() == bs->md_thread);
6093 
6094 	spdk_spin_lock(&bs->used_lock);
6095 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
6096 	if (page_idx == UINT32_MAX) {
6097 		spdk_spin_unlock(&bs->used_lock);
6098 		cb_fn(cb_arg, 0, -ENOMEM);
6099 		return;
6100 	}
6101 	spdk_bit_array_set(bs->used_blobids, page_idx);
6102 	bs_claim_md_page(bs, page_idx);
6103 	spdk_spin_unlock(&bs->used_lock);
6104 
6105 	id = bs_page_to_blobid(page_idx);
6106 
6107 	SPDK_DEBUGLOG(blob, "Creating blob with id 0x%" PRIx64 " at page %u\n", id, page_idx);
6108 
6109 	spdk_blob_opts_init(&opts_local, sizeof(opts_local));
6110 	if (opts) {
6111 		blob_opts_copy(opts, &opts_local);
6112 	}
6113 
6114 	blob = blob_alloc(bs, id);
6115 	if (!blob) {
6116 		rc = -ENOMEM;
6117 		goto error;
6118 	}
6119 
6120 	blob->use_extent_table = opts_local.use_extent_table;
6121 	if (blob->use_extent_table) {
6122 		blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE;
6123 	}
6124 
6125 	if (!internal_xattrs) {
6126 		blob_xattrs_init(&internal_xattrs_default);
6127 		internal_xattrs = &internal_xattrs_default;
6128 	}
6129 
6130 	rc = blob_set_xattrs(blob, &opts_local.xattrs, false);
6131 	if (rc < 0) {
6132 		goto error;
6133 	}
6134 
6135 	rc = blob_set_xattrs(blob, internal_xattrs, true);
6136 	if (rc < 0) {
6137 		goto error;
6138 	}
6139 
6140 	if (opts_local.thin_provision) {
6141 		blob_set_thin_provision(blob);
6142 	}
6143 
6144 	blob_set_clear_method(blob, opts_local.clear_method);
6145 
6146 	if (opts_local.esnap_id != NULL) {
6147 		if (opts_local.esnap_id_len > UINT16_MAX) {
6148 			SPDK_ERRLOG("esnap id length %" PRIu64 "is too long\n",
6149 				    opts_local.esnap_id_len);
6150 			rc = -EINVAL;
6151 			goto error;
6152 
6153 		}
6154 		blob_set_thin_provision(blob);
6155 		blob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
6156 		rc = blob_set_xattr(blob, BLOB_EXTERNAL_SNAPSHOT_ID,
6157 				    opts_local.esnap_id, opts_local.esnap_id_len, true);
6158 		if (rc != 0) {
6159 			goto error;
6160 		}
6161 	}
6162 
6163 	rc = blob_resize(blob, opts_local.num_clusters);
6164 	if (rc < 0) {
6165 		goto error;
6166 	}
6167 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6168 	cpl.u.blobid.cb_fn = cb_fn;
6169 	cpl.u.blobid.cb_arg = cb_arg;
6170 	cpl.u.blobid.blobid = blob->id;
6171 
6172 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
6173 	if (!seq) {
6174 		rc = -ENOMEM;
6175 		goto error;
6176 	}
6177 
6178 	blob_persist(seq, blob, bs_create_blob_cpl, blob);
6179 	return;
6180 
6181 error:
6182 	SPDK_ERRLOG("Failed to create blob: %s, size in clusters/size: %lu (clusters)\n",
6183 		    spdk_strerror(rc), opts_local.num_clusters);
6184 	if (blob != NULL) {
6185 		blob_free(blob);
6186 	}
6187 	spdk_spin_lock(&bs->used_lock);
6188 	spdk_bit_array_clear(bs->used_blobids, page_idx);
6189 	bs_release_md_page(bs, page_idx);
6190 	spdk_spin_unlock(&bs->used_lock);
6191 	cb_fn(cb_arg, 0, rc);
6192 }
6193 
6194 void
6195 spdk_bs_create_blob(struct spdk_blob_store *bs,
6196 		    spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6197 {
6198 	bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
6199 }
6200 
6201 void
6202 spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
6203 			spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6204 {
6205 	bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
6206 }
6207 
6208 /* END spdk_bs_create_blob */
6209 
6210 /* START blob_cleanup */
6211 
6212 struct spdk_clone_snapshot_ctx {
6213 	struct spdk_bs_cpl      cpl;
6214 	int bserrno;
6215 	bool frozen;
6216 
6217 	struct spdk_io_channel *channel;
6218 
6219 	/* Current cluster for inflate operation */
6220 	uint64_t cluster;
6221 
6222 	/* For inflation force allocation of all unallocated clusters and remove
6223 	 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */
6224 	bool allocate_all;
6225 
6226 	struct {
6227 		spdk_blob_id id;
6228 		struct spdk_blob *blob;
6229 		bool md_ro;
6230 	} original;
6231 	struct {
6232 		spdk_blob_id id;
6233 		struct spdk_blob *blob;
6234 	} new;
6235 
6236 	/* xattrs specified for snapshot/clones only. They have no impact on
6237 	 * the original blobs xattrs. */
6238 	const struct spdk_blob_xattr_opts *xattrs;
6239 };
6240 
6241 static void
6242 bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
6243 {
6244 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
6245 	struct spdk_bs_cpl *cpl = &ctx->cpl;
6246 
6247 	if (bserrno != 0) {
6248 		if (ctx->bserrno != 0) {
6249 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6250 		} else {
6251 			ctx->bserrno = bserrno;
6252 		}
6253 	}
6254 
6255 	switch (cpl->type) {
6256 	case SPDK_BS_CPL_TYPE_BLOBID:
6257 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
6258 		break;
6259 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
6260 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
6261 		break;
6262 	default:
6263 		SPDK_UNREACHABLE();
6264 		break;
6265 	}
6266 
6267 	free(ctx);
6268 }
6269 
6270 static void
6271 bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
6272 {
6273 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6274 	struct spdk_blob *origblob = ctx->original.blob;
6275 
6276 	if (bserrno != 0) {
6277 		if (ctx->bserrno != 0) {
6278 			SPDK_ERRLOG("Unfreeze error %d\n", bserrno);
6279 		} else {
6280 			ctx->bserrno = bserrno;
6281 		}
6282 	}
6283 
6284 	ctx->original.id = origblob->id;
6285 	origblob->locked_operation_in_progress = false;
6286 
6287 	/* Revert md_ro to original state */
6288 	origblob->md_ro = ctx->original.md_ro;
6289 
6290 	spdk_blob_close(origblob, bs_clone_snapshot_cleanup_finish, ctx);
6291 }
6292 
6293 static void
6294 bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
6295 {
6296 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6297 	struct spdk_blob *origblob = ctx->original.blob;
6298 
6299 	if (bserrno != 0) {
6300 		if (ctx->bserrno != 0) {
6301 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6302 		} else {
6303 			ctx->bserrno = bserrno;
6304 		}
6305 	}
6306 
6307 	if (ctx->frozen) {
6308 		/* Unfreeze any outstanding I/O */
6309 		blob_unfreeze_io(origblob, bs_snapshot_unfreeze_cpl, ctx);
6310 	} else {
6311 		bs_snapshot_unfreeze_cpl(ctx, 0);
6312 	}
6313 
6314 }
6315 
6316 static void
6317 bs_clone_snapshot_newblob_cleanup(struct spdk_clone_snapshot_ctx *ctx, int bserrno)
6318 {
6319 	struct spdk_blob *newblob = ctx->new.blob;
6320 
6321 	if (bserrno != 0) {
6322 		if (ctx->bserrno != 0) {
6323 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
6324 		} else {
6325 			ctx->bserrno = bserrno;
6326 		}
6327 	}
6328 
6329 	ctx->new.id = newblob->id;
6330 	spdk_blob_close(newblob, bs_clone_snapshot_origblob_cleanup, ctx);
6331 }
6332 
6333 /* END blob_cleanup */
6334 
6335 /* START spdk_bs_create_snapshot */
6336 
6337 static void
6338 bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2)
6339 {
6340 	uint64_t *cluster_temp;
6341 	uint32_t *extent_page_temp;
6342 
6343 	cluster_temp = blob1->active.clusters;
6344 	blob1->active.clusters = blob2->active.clusters;
6345 	blob2->active.clusters = cluster_temp;
6346 
6347 	extent_page_temp = blob1->active.extent_pages;
6348 	blob1->active.extent_pages = blob2->active.extent_pages;
6349 	blob2->active.extent_pages = extent_page_temp;
6350 }
6351 
6352 /* Copies an internal xattr */
6353 static int
6354 bs_snapshot_copy_xattr(struct spdk_blob *toblob, struct spdk_blob *fromblob, const char *name)
6355 {
6356 	const void	*val = NULL;
6357 	size_t		len;
6358 	int		bserrno;
6359 
6360 	bserrno = blob_get_xattr_value(fromblob, name, &val, &len, true);
6361 	if (bserrno != 0) {
6362 		SPDK_ERRLOG("blob 0x%" PRIx64 " missing %s XATTR\n", fromblob->id, name);
6363 		return bserrno;
6364 	}
6365 
6366 	bserrno = blob_set_xattr(toblob, name, val, len, true);
6367 	if (bserrno != 0) {
6368 		SPDK_ERRLOG("could not set %s XATTR on blob 0x%" PRIx64 "\n",
6369 			    name, toblob->id);
6370 		return bserrno;
6371 	}
6372 	return 0;
6373 }
6374 
6375 static void
6376 bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
6377 {
6378 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6379 	struct spdk_blob *origblob = ctx->original.blob;
6380 	struct spdk_blob *newblob = ctx->new.blob;
6381 
6382 	if (bserrno != 0) {
6383 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6384 		if (blob_is_esnap_clone(newblob)) {
6385 			bs_snapshot_copy_xattr(origblob, newblob, BLOB_EXTERNAL_SNAPSHOT_ID);
6386 			origblob->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
6387 		}
6388 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6389 		return;
6390 	}
6391 
6392 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
6393 	bserrno = blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
6394 	if (bserrno != 0) {
6395 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6396 		return;
6397 	}
6398 
6399 	bs_blob_list_add(ctx->original.blob);
6400 
6401 	spdk_blob_set_read_only(newblob);
6402 
6403 	/* sync snapshot metadata */
6404 	spdk_blob_sync_md(newblob, bs_clone_snapshot_origblob_cleanup, ctx);
6405 }
6406 
6407 static void
6408 bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
6409 {
6410 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6411 	struct spdk_blob *origblob = ctx->original.blob;
6412 	struct spdk_blob *newblob = ctx->new.blob;
6413 
6414 	if (bserrno != 0) {
6415 		/* return cluster map back to original */
6416 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6417 
6418 		/* Newblob md sync failed. Valid clusters are only present in origblob.
6419 		 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occurred.
6420 		 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */
6421 		blob_set_thin_provision(newblob);
6422 		assert(spdk_mem_all_zero(newblob->active.clusters,
6423 					 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
6424 		assert(spdk_mem_all_zero(newblob->active.extent_pages,
6425 					 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
6426 
6427 		bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6428 		return;
6429 	}
6430 
6431 	/* Set internal xattr for snapshot id */
6432 	bserrno = blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
6433 	if (bserrno != 0) {
6434 		/* return cluster map back to original */
6435 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6436 		blob_set_thin_provision(newblob);
6437 		bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6438 		return;
6439 	}
6440 
6441 	/* Create new back_bs_dev for snapshot */
6442 	origblob->back_bs_dev = bs_create_blob_bs_dev(newblob);
6443 	if (origblob->back_bs_dev == NULL) {
6444 		/* return cluster map back to original */
6445 		bs_snapshot_swap_cluster_maps(newblob, origblob);
6446 		blob_set_thin_provision(newblob);
6447 		bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
6448 		return;
6449 	}
6450 
6451 	/* Remove the xattr that references an external snapshot */
6452 	if (blob_is_esnap_clone(origblob)) {
6453 		origblob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT;
6454 		bserrno = blob_remove_xattr(origblob, BLOB_EXTERNAL_SNAPSHOT_ID, true);
6455 		if (bserrno != 0) {
6456 			if (bserrno == -ENOENT) {
6457 				SPDK_ERRLOG("blob 0x%" PRIx64 " has no " BLOB_EXTERNAL_SNAPSHOT_ID
6458 					    " xattr to remove\n", origblob->id);
6459 				assert(false);
6460 			} else {
6461 				/* return cluster map back to original */
6462 				bs_snapshot_swap_cluster_maps(newblob, origblob);
6463 				blob_set_thin_provision(newblob);
6464 				bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6465 				return;
6466 			}
6467 		}
6468 	}
6469 
6470 	bs_blob_list_remove(origblob);
6471 	origblob->parent_id = newblob->id;
6472 	/* set clone blob as thin provisioned */
6473 	blob_set_thin_provision(origblob);
6474 
6475 	bs_blob_list_add(newblob);
6476 
6477 	/* sync clone metadata */
6478 	spdk_blob_sync_md(origblob, bs_snapshot_origblob_sync_cpl, ctx);
6479 }
6480 
6481 static void
6482 bs_snapshot_freeze_cpl(void *cb_arg, int rc)
6483 {
6484 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6485 	struct spdk_blob *origblob = ctx->original.blob;
6486 	struct spdk_blob *newblob = ctx->new.blob;
6487 	int bserrno;
6488 
6489 	if (rc != 0) {
6490 		bs_clone_snapshot_newblob_cleanup(ctx, rc);
6491 		return;
6492 	}
6493 
6494 	ctx->frozen = true;
6495 
6496 	if (blob_is_esnap_clone(origblob)) {
6497 		/* Clean up any channels associated with the original blob id because future IO will
6498 		 * perform IO using the snapshot blob_id.
6499 		 */
6500 		blob_esnap_destroy_bs_dev_channels(origblob, false, NULL, NULL);
6501 	}
6502 	if (newblob->back_bs_dev) {
6503 		blob_back_bs_destroy(newblob);
6504 	}
6505 	/* set new back_bs_dev for snapshot */
6506 	newblob->back_bs_dev = origblob->back_bs_dev;
6507 	/* Set invalid flags from origblob */
6508 	newblob->invalid_flags = origblob->invalid_flags;
6509 
6510 	/* inherit parent from original blob if set */
6511 	newblob->parent_id = origblob->parent_id;
6512 	switch (origblob->parent_id) {
6513 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
6514 		bserrno = bs_snapshot_copy_xattr(newblob, origblob, BLOB_EXTERNAL_SNAPSHOT_ID);
6515 		if (bserrno != 0) {
6516 			bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6517 			return;
6518 		}
6519 		break;
6520 	case SPDK_BLOBID_INVALID:
6521 		break;
6522 	default:
6523 		/* Set internal xattr for snapshot id */
6524 		bserrno = blob_set_xattr(newblob, BLOB_SNAPSHOT,
6525 					 &origblob->parent_id, sizeof(spdk_blob_id), true);
6526 		if (bserrno != 0) {
6527 			bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
6528 			return;
6529 		}
6530 	}
6531 
6532 	/* swap cluster maps */
6533 	bs_snapshot_swap_cluster_maps(newblob, origblob);
6534 
6535 	/* Set the clear method on the new blob to match the original. */
6536 	blob_set_clear_method(newblob, origblob->clear_method);
6537 
6538 	/* sync snapshot metadata */
6539 	spdk_blob_sync_md(newblob, bs_snapshot_newblob_sync_cpl, ctx);
6540 }
6541 
6542 static void
6543 bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6544 {
6545 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6546 	struct spdk_blob *origblob = ctx->original.blob;
6547 	struct spdk_blob *newblob = _blob;
6548 
6549 	if (bserrno != 0) {
6550 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6551 		return;
6552 	}
6553 
6554 	ctx->new.blob = newblob;
6555 	assert(spdk_blob_is_thin_provisioned(newblob));
6556 	assert(spdk_mem_all_zero(newblob->active.clusters,
6557 				 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
6558 	assert(spdk_mem_all_zero(newblob->active.extent_pages,
6559 				 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
6560 
6561 	blob_freeze_io(origblob, bs_snapshot_freeze_cpl, ctx);
6562 }
6563 
6564 static void
6565 bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
6566 {
6567 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6568 	struct spdk_blob *origblob = ctx->original.blob;
6569 
6570 	if (bserrno != 0) {
6571 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6572 		return;
6573 	}
6574 
6575 	ctx->new.id = blobid;
6576 	ctx->cpl.u.blobid.blobid = blobid;
6577 
6578 	spdk_bs_open_blob(origblob->bs, ctx->new.id, bs_snapshot_newblob_open_cpl, ctx);
6579 }
6580 
6581 
6582 static void
6583 bs_xattr_snapshot(void *arg, const char *name,
6584 		  const void **value, size_t *value_len)
6585 {
6586 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
6587 
6588 	struct spdk_blob *blob = (struct spdk_blob *)arg;
6589 	*value = &blob->id;
6590 	*value_len = sizeof(blob->id);
6591 }
6592 
6593 static void
6594 bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6595 {
6596 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6597 	struct spdk_blob_opts opts;
6598 	struct spdk_blob_xattr_opts internal_xattrs;
6599 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
6600 
6601 	if (bserrno != 0) {
6602 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6603 		return;
6604 	}
6605 
6606 	ctx->original.blob = _blob;
6607 
6608 	if (_blob->data_ro || _blob->md_ro) {
6609 		SPDK_DEBUGLOG(blob, "Cannot create snapshot from read only blob with id 0x%"
6610 			      PRIx64 "\n", _blob->id);
6611 		ctx->bserrno = -EINVAL;
6612 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6613 		return;
6614 	}
6615 
6616 	if (_blob->locked_operation_in_progress) {
6617 		SPDK_DEBUGLOG(blob, "Cannot create snapshot - another operation in progress\n");
6618 		ctx->bserrno = -EBUSY;
6619 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6620 		return;
6621 	}
6622 
6623 	_blob->locked_operation_in_progress = true;
6624 
6625 	spdk_blob_opts_init(&opts, sizeof(opts));
6626 	blob_xattrs_init(&internal_xattrs);
6627 
6628 	/* Change the size of new blob to the same as in original blob,
6629 	 * but do not allocate clusters */
6630 	opts.thin_provision = true;
6631 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
6632 	opts.use_extent_table = _blob->use_extent_table;
6633 
6634 	/* If there are any xattrs specified for snapshot, set them now */
6635 	if (ctx->xattrs) {
6636 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
6637 	}
6638 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
6639 	internal_xattrs.count = 1;
6640 	internal_xattrs.ctx = _blob;
6641 	internal_xattrs.names = xattrs_names;
6642 	internal_xattrs.get_value = bs_xattr_snapshot;
6643 
6644 	bs_create_blob(_blob->bs, &opts, &internal_xattrs,
6645 		       bs_snapshot_newblob_create_cpl, ctx);
6646 }
6647 
6648 void
6649 spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
6650 			const struct spdk_blob_xattr_opts *snapshot_xattrs,
6651 			spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6652 {
6653 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
6654 
6655 	if (!ctx) {
6656 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
6657 		return;
6658 	}
6659 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6660 	ctx->cpl.u.blobid.cb_fn = cb_fn;
6661 	ctx->cpl.u.blobid.cb_arg = cb_arg;
6662 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
6663 	ctx->bserrno = 0;
6664 	ctx->frozen = false;
6665 	ctx->original.id = blobid;
6666 	ctx->xattrs = snapshot_xattrs;
6667 
6668 	spdk_bs_open_blob(bs, ctx->original.id, bs_snapshot_origblob_open_cpl, ctx);
6669 }
6670 /* END spdk_bs_create_snapshot */
6671 
6672 /* START spdk_bs_create_clone */
6673 
6674 static void
6675 bs_xattr_clone(void *arg, const char *name,
6676 	       const void **value, size_t *value_len)
6677 {
6678 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
6679 
6680 	struct spdk_blob *blob = (struct spdk_blob *)arg;
6681 	*value = &blob->id;
6682 	*value_len = sizeof(blob->id);
6683 }
6684 
6685 static void
6686 bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6687 {
6688 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6689 	struct spdk_blob *clone = _blob;
6690 
6691 	ctx->new.blob = clone;
6692 	bs_blob_list_add(clone);
6693 
6694 	spdk_blob_close(clone, bs_clone_snapshot_origblob_cleanup, ctx);
6695 }
6696 
6697 static void
6698 bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
6699 {
6700 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6701 
6702 	ctx->cpl.u.blobid.blobid = blobid;
6703 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, bs_clone_newblob_open_cpl, ctx);
6704 }
6705 
6706 static void
6707 bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6708 {
6709 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6710 	struct spdk_blob_opts		opts;
6711 	struct spdk_blob_xattr_opts internal_xattrs;
6712 	char *xattr_names[] = { BLOB_SNAPSHOT };
6713 
6714 	if (bserrno != 0) {
6715 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6716 		return;
6717 	}
6718 
6719 	ctx->original.blob = _blob;
6720 	ctx->original.md_ro = _blob->md_ro;
6721 
6722 	if (!_blob->data_ro || !_blob->md_ro) {
6723 		SPDK_DEBUGLOG(blob, "Clone not from read-only blob\n");
6724 		ctx->bserrno = -EINVAL;
6725 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6726 		return;
6727 	}
6728 
6729 	if (_blob->locked_operation_in_progress) {
6730 		SPDK_DEBUGLOG(blob, "Cannot create clone - another operation in progress\n");
6731 		ctx->bserrno = -EBUSY;
6732 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6733 		return;
6734 	}
6735 
6736 	_blob->locked_operation_in_progress = true;
6737 
6738 	spdk_blob_opts_init(&opts, sizeof(opts));
6739 	blob_xattrs_init(&internal_xattrs);
6740 
6741 	opts.thin_provision = true;
6742 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
6743 	opts.use_extent_table = _blob->use_extent_table;
6744 	if (ctx->xattrs) {
6745 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
6746 	}
6747 
6748 	/* Set internal xattr BLOB_SNAPSHOT */
6749 	internal_xattrs.count = 1;
6750 	internal_xattrs.ctx = _blob;
6751 	internal_xattrs.names = xattr_names;
6752 	internal_xattrs.get_value = bs_xattr_clone;
6753 
6754 	bs_create_blob(_blob->bs, &opts, &internal_xattrs,
6755 		       bs_clone_newblob_create_cpl, ctx);
6756 }
6757 
6758 void
6759 spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
6760 		     const struct spdk_blob_xattr_opts *clone_xattrs,
6761 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
6762 {
6763 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
6764 
6765 	if (!ctx) {
6766 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
6767 		return;
6768 	}
6769 
6770 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
6771 	ctx->cpl.u.blobid.cb_fn = cb_fn;
6772 	ctx->cpl.u.blobid.cb_arg = cb_arg;
6773 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
6774 	ctx->bserrno = 0;
6775 	ctx->xattrs = clone_xattrs;
6776 	ctx->original.id = blobid;
6777 
6778 	spdk_bs_open_blob(bs, ctx->original.id, bs_clone_origblob_open_cpl, ctx);
6779 }
6780 
6781 /* END spdk_bs_create_clone */
6782 
6783 /* START spdk_bs_inflate_blob */
6784 
6785 static void
6786 bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno)
6787 {
6788 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6789 	struct spdk_blob *_blob = ctx->original.blob;
6790 
6791 	if (bserrno != 0) {
6792 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6793 		return;
6794 	}
6795 
6796 	/* Temporarily override md_ro flag for MD modification */
6797 	_blob->md_ro = false;
6798 
6799 	bserrno = blob_set_xattr(_blob, BLOB_SNAPSHOT, &_parent->id, sizeof(spdk_blob_id), true);
6800 	if (bserrno != 0) {
6801 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6802 		return;
6803 	}
6804 
6805 	assert(_parent != NULL);
6806 
6807 	bs_blob_list_remove(_blob);
6808 	_blob->parent_id = _parent->id;
6809 
6810 	blob_back_bs_destroy(_blob);
6811 	_blob->back_bs_dev = bs_create_blob_bs_dev(_parent);
6812 	bs_blob_list_add(_blob);
6813 
6814 	spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx);
6815 }
6816 
6817 static void
6818 bs_inflate_blob_done(struct spdk_clone_snapshot_ctx *ctx)
6819 {
6820 	struct spdk_blob *_blob = ctx->original.blob;
6821 	struct spdk_blob *_parent;
6822 
6823 	if (ctx->allocate_all) {
6824 		/* remove thin provisioning */
6825 		bs_blob_list_remove(_blob);
6826 		if (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
6827 			blob_remove_xattr(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, true);
6828 			_blob->invalid_flags &= ~SPDK_BLOB_EXTERNAL_SNAPSHOT;
6829 		} else {
6830 			blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
6831 		}
6832 		_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
6833 		blob_back_bs_destroy(_blob);
6834 		_blob->parent_id = SPDK_BLOBID_INVALID;
6835 	} else {
6836 		/* For now, esnap clones always have allocate_all set. */
6837 		assert(!blob_is_esnap_clone(_blob));
6838 
6839 		_parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob;
6840 		if (_parent->parent_id != SPDK_BLOBID_INVALID) {
6841 			/* We must change the parent of the inflated blob */
6842 			spdk_bs_open_blob(_blob->bs, _parent->parent_id,
6843 					  bs_inflate_blob_set_parent_cpl, ctx);
6844 			return;
6845 		}
6846 
6847 		bs_blob_list_remove(_blob);
6848 		_blob->parent_id = SPDK_BLOBID_INVALID;
6849 		blob_back_bs_destroy(_blob);
6850 		_blob->back_bs_dev = bs_create_zeroes_dev();
6851 	}
6852 
6853 	/* Temporarily override md_ro flag for MD modification */
6854 	_blob->md_ro = false;
6855 	blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
6856 	_blob->state = SPDK_BLOB_STATE_DIRTY;
6857 
6858 	spdk_blob_sync_md(_blob, bs_clone_snapshot_origblob_cleanup, ctx);
6859 }
6860 
6861 /* Check if cluster needs allocation */
6862 static inline bool
6863 bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all)
6864 {
6865 	struct spdk_blob_bs_dev *b;
6866 
6867 	assert(blob != NULL);
6868 
6869 	if (blob->active.clusters[cluster] != 0) {
6870 		/* Cluster is already allocated */
6871 		return false;
6872 	}
6873 
6874 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
6875 		/* Blob have no parent blob */
6876 		return allocate_all;
6877 	}
6878 
6879 	if (blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
6880 		return true;
6881 	}
6882 
6883 	b = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
6884 	return (allocate_all || b->blob->active.clusters[cluster] != 0);
6885 }
6886 
6887 static void
6888 bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
6889 {
6890 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6891 	struct spdk_blob *_blob = ctx->original.blob;
6892 	struct spdk_bs_cpl cpl;
6893 	spdk_bs_user_op_t *op;
6894 	uint64_t offset;
6895 
6896 	if (bserrno != 0) {
6897 		bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
6898 		return;
6899 	}
6900 
6901 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
6902 		if (bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) {
6903 			break;
6904 		}
6905 	}
6906 
6907 	if (ctx->cluster < _blob->active.num_clusters) {
6908 		offset = bs_cluster_to_lba(_blob->bs, ctx->cluster);
6909 
6910 		/* We may safely increment a cluster before copying */
6911 		ctx->cluster++;
6912 
6913 		/* Use a dummy 0B read as a context for cluster copy */
6914 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6915 		cpl.u.blob_basic.cb_fn = bs_inflate_blob_touch_next;
6916 		cpl.u.blob_basic.cb_arg = ctx;
6917 
6918 		op = bs_user_op_alloc(ctx->channel, &cpl, SPDK_BLOB_READ, _blob,
6919 				      NULL, 0, offset, 0);
6920 		if (!op) {
6921 			bs_clone_snapshot_origblob_cleanup(ctx, -ENOMEM);
6922 			return;
6923 		}
6924 
6925 		bs_allocate_and_copy_cluster(_blob, ctx->channel, offset, op);
6926 	} else {
6927 		bs_inflate_blob_done(ctx);
6928 	}
6929 }
6930 
6931 static void
6932 bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6933 {
6934 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
6935 	uint64_t clusters_needed;
6936 	uint64_t i;
6937 
6938 	if (bserrno != 0) {
6939 		bs_clone_snapshot_cleanup_finish(ctx, bserrno);
6940 		return;
6941 	}
6942 
6943 	ctx->original.blob = _blob;
6944 	ctx->original.md_ro = _blob->md_ro;
6945 
6946 	if (_blob->locked_operation_in_progress) {
6947 		SPDK_DEBUGLOG(blob, "Cannot inflate blob - another operation in progress\n");
6948 		ctx->bserrno = -EBUSY;
6949 		spdk_blob_close(_blob, bs_clone_snapshot_cleanup_finish, ctx);
6950 		return;
6951 	}
6952 
6953 	_blob->locked_operation_in_progress = true;
6954 
6955 	switch (_blob->parent_id) {
6956 	case SPDK_BLOBID_INVALID:
6957 		if (!ctx->allocate_all) {
6958 			/* This blob has no parent, so we cannot decouple it. */
6959 			SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n");
6960 			bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
6961 			return;
6962 		}
6963 		break;
6964 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
6965 		/*
6966 		 * It would be better to rely on back_bs_dev->is_zeroes(), to determine which
6967 		 * clusters require allocation. Until there is a blobstore consumer that
6968 		 * uses esnaps with an spdk_bs_dev that implements a useful is_zeroes() it is not
6969 		 * worth the effort.
6970 		 */
6971 		ctx->allocate_all = true;
6972 		break;
6973 	default:
6974 		break;
6975 	}
6976 
6977 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
6978 		/* This is not thin provisioned blob. No need to inflate. */
6979 		bs_clone_snapshot_origblob_cleanup(ctx, 0);
6980 		return;
6981 	}
6982 
6983 	/* Do two passes - one to verify that we can obtain enough clusters
6984 	 * and another to actually claim them.
6985 	 */
6986 	clusters_needed = 0;
6987 	for (i = 0; i < _blob->active.num_clusters; i++) {
6988 		if (bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) {
6989 			clusters_needed++;
6990 		}
6991 	}
6992 
6993 	if (clusters_needed > _blob->bs->num_free_clusters) {
6994 		/* Not enough free clusters. Cannot satisfy the request. */
6995 		bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
6996 		return;
6997 	}
6998 
6999 	ctx->cluster = 0;
7000 	bs_inflate_blob_touch_next(ctx, 0);
7001 }
7002 
7003 static void
7004 bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7005 		spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg)
7006 {
7007 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
7008 
7009 	if (!ctx) {
7010 		cb_fn(cb_arg, -ENOMEM);
7011 		return;
7012 	}
7013 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7014 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
7015 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
7016 	ctx->bserrno = 0;
7017 	ctx->original.id = blobid;
7018 	ctx->channel = channel;
7019 	ctx->allocate_all = allocate_all;
7020 
7021 	spdk_bs_open_blob(bs, ctx->original.id, bs_inflate_blob_open_cpl, ctx);
7022 }
7023 
7024 void
7025 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7026 		     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
7027 {
7028 	bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg);
7029 }
7030 
7031 void
7032 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
7033 			     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
7034 {
7035 	bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg);
7036 }
7037 /* END spdk_bs_inflate_blob */
7038 
7039 /* START spdk_blob_resize */
7040 struct spdk_bs_resize_ctx {
7041 	spdk_blob_op_complete cb_fn;
7042 	void *cb_arg;
7043 	struct spdk_blob *blob;
7044 	uint64_t sz;
7045 	int rc;
7046 };
7047 
7048 static void
7049 bs_resize_unfreeze_cpl(void *cb_arg, int rc)
7050 {
7051 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
7052 
7053 	if (rc != 0) {
7054 		SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc);
7055 	}
7056 
7057 	if (ctx->rc != 0) {
7058 		SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc);
7059 		rc = ctx->rc;
7060 	}
7061 
7062 	ctx->blob->locked_operation_in_progress = false;
7063 
7064 	ctx->cb_fn(ctx->cb_arg, rc);
7065 	free(ctx);
7066 }
7067 
7068 static void
7069 bs_resize_freeze_cpl(void *cb_arg, int rc)
7070 {
7071 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
7072 
7073 	if (rc != 0) {
7074 		ctx->blob->locked_operation_in_progress = false;
7075 		ctx->cb_fn(ctx->cb_arg, rc);
7076 		free(ctx);
7077 		return;
7078 	}
7079 
7080 	ctx->rc = blob_resize(ctx->blob, ctx->sz);
7081 
7082 	blob_unfreeze_io(ctx->blob, bs_resize_unfreeze_cpl, ctx);
7083 }
7084 
7085 void
7086 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
7087 {
7088 	struct spdk_bs_resize_ctx *ctx;
7089 
7090 	blob_verify_md_op(blob);
7091 
7092 	SPDK_DEBUGLOG(blob, "Resizing blob 0x%" PRIx64 " to %" PRIu64 " clusters\n", blob->id, sz);
7093 
7094 	if (blob->md_ro) {
7095 		cb_fn(cb_arg, -EPERM);
7096 		return;
7097 	}
7098 
7099 	if (sz == blob->active.num_clusters) {
7100 		cb_fn(cb_arg, 0);
7101 		return;
7102 	}
7103 
7104 	if (blob->locked_operation_in_progress) {
7105 		cb_fn(cb_arg, -EBUSY);
7106 		return;
7107 	}
7108 
7109 	ctx = calloc(1, sizeof(*ctx));
7110 	if (!ctx) {
7111 		cb_fn(cb_arg, -ENOMEM);
7112 		return;
7113 	}
7114 
7115 	blob->locked_operation_in_progress = true;
7116 	ctx->cb_fn = cb_fn;
7117 	ctx->cb_arg = cb_arg;
7118 	ctx->blob = blob;
7119 	ctx->sz = sz;
7120 	blob_freeze_io(blob, bs_resize_freeze_cpl, ctx);
7121 }
7122 
7123 /* END spdk_blob_resize */
7124 
7125 
7126 /* START spdk_bs_delete_blob */
7127 
7128 static void
7129 bs_delete_close_cpl(void *cb_arg, int bserrno)
7130 {
7131 	spdk_bs_sequence_t *seq = cb_arg;
7132 
7133 	bs_sequence_finish(seq, bserrno);
7134 }
7135 
7136 static void
7137 bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
7138 {
7139 	struct spdk_blob *blob = cb_arg;
7140 
7141 	if (bserrno != 0) {
7142 		/*
7143 		 * We already removed this blob from the blobstore tailq, so
7144 		 *  we need to free it here since this is the last reference
7145 		 *  to it.
7146 		 */
7147 		blob_free(blob);
7148 		bs_delete_close_cpl(seq, bserrno);
7149 		return;
7150 	}
7151 
7152 	/*
7153 	 * This will immediately decrement the ref_count and call
7154 	 *  the completion routine since the metadata state is clean.
7155 	 *  By calling spdk_blob_close, we reduce the number of call
7156 	 *  points into code that touches the blob->open_ref count
7157 	 *  and the blobstore's blob list.
7158 	 */
7159 	spdk_blob_close(blob, bs_delete_close_cpl, seq);
7160 }
7161 
7162 struct delete_snapshot_ctx {
7163 	struct spdk_blob_list *parent_snapshot_entry;
7164 	struct spdk_blob *snapshot;
7165 	struct spdk_blob_md_page *page;
7166 	bool snapshot_md_ro;
7167 	struct spdk_blob *clone;
7168 	bool clone_md_ro;
7169 	spdk_blob_op_with_handle_complete cb_fn;
7170 	void *cb_arg;
7171 	int bserrno;
7172 	uint32_t next_extent_page;
7173 };
7174 
7175 static void
7176 delete_blob_cleanup_finish(void *cb_arg, int bserrno)
7177 {
7178 	struct delete_snapshot_ctx *ctx = cb_arg;
7179 
7180 	if (bserrno != 0) {
7181 		SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno);
7182 	}
7183 
7184 	assert(ctx != NULL);
7185 
7186 	if (bserrno != 0 && ctx->bserrno == 0) {
7187 		ctx->bserrno = bserrno;
7188 	}
7189 
7190 	ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno);
7191 	spdk_free(ctx->page);
7192 	free(ctx);
7193 }
7194 
7195 static void
7196 delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno)
7197 {
7198 	struct delete_snapshot_ctx *ctx = cb_arg;
7199 
7200 	if (bserrno != 0) {
7201 		ctx->bserrno = bserrno;
7202 		SPDK_ERRLOG("Clone cleanup error %d\n", bserrno);
7203 	}
7204 
7205 	if (ctx->bserrno != 0) {
7206 		assert(blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL);
7207 		RB_INSERT(spdk_blob_tree, &ctx->snapshot->bs->open_blobs, ctx->snapshot);
7208 		spdk_bit_array_set(ctx->snapshot->bs->open_blobids, ctx->snapshot->id);
7209 	}
7210 
7211 	ctx->snapshot->locked_operation_in_progress = false;
7212 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
7213 
7214 	spdk_blob_close(ctx->snapshot, delete_blob_cleanup_finish, ctx);
7215 }
7216 
7217 static void
7218 delete_snapshot_cleanup_clone(void *cb_arg, int bserrno)
7219 {
7220 	struct delete_snapshot_ctx *ctx = cb_arg;
7221 
7222 	ctx->clone->locked_operation_in_progress = false;
7223 	ctx->clone->md_ro = ctx->clone_md_ro;
7224 
7225 	spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx);
7226 }
7227 
7228 static void
7229 delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
7230 {
7231 	struct delete_snapshot_ctx *ctx = cb_arg;
7232 
7233 	if (bserrno) {
7234 		ctx->bserrno = bserrno;
7235 		delete_snapshot_cleanup_clone(ctx, 0);
7236 		return;
7237 	}
7238 
7239 	ctx->clone->locked_operation_in_progress = false;
7240 	spdk_blob_close(ctx->clone, delete_blob_cleanup_finish, ctx);
7241 }
7242 
7243 static void
7244 delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno)
7245 {
7246 	struct delete_snapshot_ctx *ctx = cb_arg;
7247 	struct spdk_blob_list *parent_snapshot_entry = NULL;
7248 	struct spdk_blob_list *snapshot_entry = NULL;
7249 	struct spdk_blob_list *clone_entry = NULL;
7250 	struct spdk_blob_list *snapshot_clone_entry = NULL;
7251 
7252 	if (bserrno) {
7253 		SPDK_ERRLOG("Failed to sync MD on blob\n");
7254 		ctx->bserrno = bserrno;
7255 		delete_snapshot_cleanup_clone(ctx, 0);
7256 		return;
7257 	}
7258 
7259 	/* Get snapshot entry for the snapshot we want to remove */
7260 	snapshot_entry = bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id);
7261 
7262 	assert(snapshot_entry != NULL);
7263 
7264 	/* Remove clone entry in this snapshot (at this point there can be only one clone) */
7265 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7266 	assert(clone_entry != NULL);
7267 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
7268 	snapshot_entry->clone_count--;
7269 	assert(TAILQ_EMPTY(&snapshot_entry->clones));
7270 
7271 	switch (ctx->snapshot->parent_id) {
7272 	case SPDK_BLOBID_INVALID:
7273 	case SPDK_BLOBID_EXTERNAL_SNAPSHOT:
7274 		/* No parent snapshot - just remove clone entry */
7275 		free(clone_entry);
7276 		break;
7277 	default:
7278 		/* This snapshot is at the same time a clone of another snapshot - we need to
7279 		 * update parent snapshot (remove current clone, add new one inherited from
7280 		 * the snapshot that is being removed) */
7281 
7282 		/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
7283 		 * snapshot that we are removing */
7284 		blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry,
7285 						    &snapshot_clone_entry);
7286 
7287 		/* Switch clone entry in parent snapshot */
7288 		TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link);
7289 		TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link);
7290 		free(snapshot_clone_entry);
7291 	}
7292 
7293 	/* Restore md_ro flags */
7294 	ctx->clone->md_ro = ctx->clone_md_ro;
7295 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
7296 
7297 	blob_unfreeze_io(ctx->clone, delete_snapshot_unfreeze_cpl, ctx);
7298 }
7299 
7300 static void
7301 delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
7302 {
7303 	struct delete_snapshot_ctx *ctx = cb_arg;
7304 	uint64_t i;
7305 
7306 	ctx->snapshot->md_ro = false;
7307 
7308 	if (bserrno) {
7309 		SPDK_ERRLOG("Failed to sync MD on clone\n");
7310 		ctx->bserrno = bserrno;
7311 
7312 		/* Restore snapshot to previous state */
7313 		bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
7314 		if (bserrno != 0) {
7315 			delete_snapshot_cleanup_clone(ctx, bserrno);
7316 			return;
7317 		}
7318 
7319 		spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx);
7320 		return;
7321 	}
7322 
7323 	/* Clear cluster map entries for snapshot */
7324 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
7325 		if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
7326 			ctx->snapshot->active.clusters[i] = 0;
7327 		}
7328 	}
7329 	for (i = 0; i < ctx->snapshot->active.num_extent_pages &&
7330 	     i < ctx->clone->active.num_extent_pages; i++) {
7331 		if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) {
7332 			ctx->snapshot->active.extent_pages[i] = 0;
7333 		}
7334 	}
7335 
7336 	blob_set_thin_provision(ctx->snapshot);
7337 	ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY;
7338 
7339 	if (ctx->parent_snapshot_entry != NULL) {
7340 		ctx->snapshot->back_bs_dev = NULL;
7341 	}
7342 
7343 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_cpl, ctx);
7344 }
7345 
7346 static void
7347 delete_snapshot_update_extent_pages_cpl(struct delete_snapshot_ctx *ctx)
7348 {
7349 	int bserrno;
7350 
7351 	/* Delete old backing bs_dev from clone (related to snapshot that will be removed) */
7352 	blob_back_bs_destroy(ctx->clone);
7353 
7354 	/* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */
7355 	if (ctx->snapshot->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
7356 		bserrno = bs_snapshot_copy_xattr(ctx->clone, ctx->snapshot,
7357 						 BLOB_EXTERNAL_SNAPSHOT_ID);
7358 		if (bserrno != 0) {
7359 			ctx->bserrno = bserrno;
7360 
7361 			/* Restore snapshot to previous state */
7362 			bserrno = blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
7363 			if (bserrno != 0) {
7364 				delete_snapshot_cleanup_clone(ctx, bserrno);
7365 				return;
7366 			}
7367 
7368 			spdk_blob_sync_md(ctx->snapshot, delete_snapshot_cleanup_clone, ctx);
7369 			return;
7370 		}
7371 		ctx->clone->parent_id = SPDK_BLOBID_EXTERNAL_SNAPSHOT;
7372 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
7373 		/* Do not delete the external snapshot along with this snapshot */
7374 		ctx->snapshot->back_bs_dev = NULL;
7375 		ctx->clone->invalid_flags |= SPDK_BLOB_EXTERNAL_SNAPSHOT;
7376 	} else if (ctx->parent_snapshot_entry != NULL) {
7377 		/* ...to parent snapshot */
7378 		ctx->clone->parent_id = ctx->parent_snapshot_entry->id;
7379 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
7380 		blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id,
7381 			       sizeof(spdk_blob_id),
7382 			       true);
7383 	} else {
7384 		/* ...to blobid invalid and zeroes dev */
7385 		ctx->clone->parent_id = SPDK_BLOBID_INVALID;
7386 		ctx->clone->back_bs_dev = bs_create_zeroes_dev();
7387 		blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true);
7388 	}
7389 
7390 	spdk_blob_sync_md(ctx->clone, delete_snapshot_sync_clone_cpl, ctx);
7391 }
7392 
7393 static void
7394 delete_snapshot_update_extent_pages(void *cb_arg, int bserrno)
7395 {
7396 	struct delete_snapshot_ctx *ctx = cb_arg;
7397 	uint32_t *extent_page;
7398 	uint64_t i;
7399 
7400 	for (i = ctx->next_extent_page; i < ctx->snapshot->active.num_extent_pages &&
7401 	     i < ctx->clone->active.num_extent_pages; i++) {
7402 		if (ctx->snapshot->active.extent_pages[i] == 0) {
7403 			/* No extent page to use from snapshot */
7404 			continue;
7405 		}
7406 
7407 		extent_page = &ctx->clone->active.extent_pages[i];
7408 		if (*extent_page == 0) {
7409 			/* Copy extent page from snapshot when clone did not have a matching one */
7410 			*extent_page = ctx->snapshot->active.extent_pages[i];
7411 			continue;
7412 		}
7413 
7414 		/* Clone and snapshot both contain partially filled matching extent pages.
7415 		 * Update the clone extent page in place with cluster map containing the mix of both. */
7416 		ctx->next_extent_page = i + 1;
7417 		memset(ctx->page, 0, SPDK_BS_PAGE_SIZE);
7418 
7419 		blob_write_extent_page(ctx->clone, *extent_page, i * SPDK_EXTENTS_PER_EP, ctx->page,
7420 				       delete_snapshot_update_extent_pages, ctx);
7421 		return;
7422 	}
7423 	delete_snapshot_update_extent_pages_cpl(ctx);
7424 }
7425 
7426 static void
7427 delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
7428 {
7429 	struct delete_snapshot_ctx *ctx = cb_arg;
7430 	uint64_t i;
7431 
7432 	/* Temporarily override md_ro flag for clone for MD modification */
7433 	ctx->clone_md_ro = ctx->clone->md_ro;
7434 	ctx->clone->md_ro = false;
7435 
7436 	if (bserrno) {
7437 		SPDK_ERRLOG("Failed to sync MD with xattr on blob\n");
7438 		ctx->bserrno = bserrno;
7439 		delete_snapshot_cleanup_clone(ctx, 0);
7440 		return;
7441 	}
7442 
7443 	/* Copy snapshot map to clone map (only unallocated clusters in clone) */
7444 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
7445 		if (ctx->clone->active.clusters[i] == 0) {
7446 			ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
7447 		}
7448 	}
7449 	ctx->next_extent_page = 0;
7450 	delete_snapshot_update_extent_pages(ctx, 0);
7451 }
7452 
7453 static void
7454 delete_snapshot_esnap_channels_destroyed_cb(void *cb_arg, struct spdk_blob *blob, int bserrno)
7455 {
7456 	struct delete_snapshot_ctx *ctx = cb_arg;
7457 
7458 	if (bserrno != 0) {
7459 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to destroy esnap channels: %d\n",
7460 			    blob->id, bserrno);
7461 		/* That error should not stop us from syncing metadata. */
7462 	}
7463 
7464 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx);
7465 }
7466 
7467 static void
7468 delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno)
7469 {
7470 	struct delete_snapshot_ctx *ctx = cb_arg;
7471 
7472 	if (bserrno) {
7473 		SPDK_ERRLOG("Failed to freeze I/O on clone\n");
7474 		ctx->bserrno = bserrno;
7475 		delete_snapshot_cleanup_clone(ctx, 0);
7476 		return;
7477 	}
7478 
7479 	/* Temporarily override md_ro flag for snapshot for MD modification */
7480 	ctx->snapshot_md_ro = ctx->snapshot->md_ro;
7481 	ctx->snapshot->md_ro = false;
7482 
7483 	/* Mark blob as pending for removal for power failure safety, use clone id for recovery */
7484 	ctx->bserrno = blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id,
7485 				      sizeof(spdk_blob_id), true);
7486 	if (ctx->bserrno != 0) {
7487 		delete_snapshot_cleanup_clone(ctx, 0);
7488 		return;
7489 	}
7490 
7491 	if (blob_is_esnap_clone(ctx->snapshot)) {
7492 		blob_esnap_destroy_bs_dev_channels(ctx->snapshot, false,
7493 						   delete_snapshot_esnap_channels_destroyed_cb,
7494 						   ctx);
7495 		return;
7496 	}
7497 
7498 	spdk_blob_sync_md(ctx->snapshot, delete_snapshot_sync_snapshot_xattr_cpl, ctx);
7499 }
7500 
7501 static void
7502 delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno)
7503 {
7504 	struct delete_snapshot_ctx *ctx = cb_arg;
7505 
7506 	if (bserrno) {
7507 		SPDK_ERRLOG("Failed to open clone\n");
7508 		ctx->bserrno = bserrno;
7509 		delete_snapshot_cleanup_snapshot(ctx, 0);
7510 		return;
7511 	}
7512 
7513 	ctx->clone = clone;
7514 
7515 	if (clone->locked_operation_in_progress) {
7516 		SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress on its clone\n");
7517 		ctx->bserrno = -EBUSY;
7518 		spdk_blob_close(ctx->clone, delete_snapshot_cleanup_snapshot, ctx);
7519 		return;
7520 	}
7521 
7522 	clone->locked_operation_in_progress = true;
7523 
7524 	blob_freeze_io(clone, delete_snapshot_freeze_io_cb, ctx);
7525 }
7526 
7527 static void
7528 update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx)
7529 {
7530 	struct spdk_blob_list *snapshot_entry = NULL;
7531 	struct spdk_blob_list *clone_entry = NULL;
7532 	struct spdk_blob_list *snapshot_clone_entry = NULL;
7533 
7534 	/* Get snapshot entry for the snapshot we want to remove */
7535 	snapshot_entry = bs_get_snapshot_entry(snapshot->bs, snapshot->id);
7536 
7537 	assert(snapshot_entry != NULL);
7538 
7539 	/* Get clone of the snapshot (at this point there can be only one clone) */
7540 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7541 	assert(snapshot_entry->clone_count == 1);
7542 	assert(clone_entry != NULL);
7543 
7544 	/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
7545 	 * snapshot that we are removing */
7546 	blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry,
7547 					    &snapshot_clone_entry);
7548 
7549 	spdk_bs_open_blob(snapshot->bs, clone_entry->id, delete_snapshot_open_clone_cb, ctx);
7550 }
7551 
7552 static void
7553 bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
7554 {
7555 	spdk_bs_sequence_t *seq = cb_arg;
7556 	struct spdk_blob_list *snapshot_entry = NULL;
7557 	uint32_t page_num;
7558 
7559 	if (bserrno) {
7560 		SPDK_ERRLOG("Failed to remove blob\n");
7561 		bs_sequence_finish(seq, bserrno);
7562 		return;
7563 	}
7564 
7565 	/* Remove snapshot from the list */
7566 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
7567 	if (snapshot_entry != NULL) {
7568 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
7569 		free(snapshot_entry);
7570 	}
7571 
7572 	page_num = bs_blobid_to_page(blob->id);
7573 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
7574 	blob->state = SPDK_BLOB_STATE_DIRTY;
7575 	blob->active.num_pages = 0;
7576 	blob_resize(blob, 0);
7577 
7578 	blob_persist(seq, blob, bs_delete_persist_cpl, blob);
7579 }
7580 
7581 static int
7582 bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone)
7583 {
7584 	struct spdk_blob_list *snapshot_entry = NULL;
7585 	struct spdk_blob_list *clone_entry = NULL;
7586 	struct spdk_blob *clone = NULL;
7587 	bool has_one_clone = false;
7588 
7589 	/* Check if this is a snapshot with clones */
7590 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
7591 	if (snapshot_entry != NULL) {
7592 		if (snapshot_entry->clone_count > 1) {
7593 			SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n");
7594 			return -EBUSY;
7595 		} else if (snapshot_entry->clone_count == 1) {
7596 			has_one_clone = true;
7597 		}
7598 	}
7599 
7600 	/* Check if someone has this blob open (besides this delete context):
7601 	 * - open_ref = 1 - only this context opened blob, so it is ok to remove it
7602 	 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot
7603 	 *	and that is ok, because we will update it accordingly */
7604 	if (blob->open_ref <= 2 && has_one_clone) {
7605 		clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
7606 		assert(clone_entry != NULL);
7607 		clone = blob_lookup(blob->bs, clone_entry->id);
7608 
7609 		if (blob->open_ref == 2 && clone == NULL) {
7610 			/* Clone is closed and someone else opened this blob */
7611 			SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
7612 			return -EBUSY;
7613 		}
7614 
7615 		*update_clone = true;
7616 		return 0;
7617 	}
7618 
7619 	if (blob->open_ref > 1) {
7620 		SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
7621 		return -EBUSY;
7622 	}
7623 
7624 	assert(has_one_clone == false);
7625 	*update_clone = false;
7626 	return 0;
7627 }
7628 
7629 static void
7630 bs_delete_enomem_close_cpl(void *cb_arg, int bserrno)
7631 {
7632 	spdk_bs_sequence_t *seq = cb_arg;
7633 
7634 	bs_sequence_finish(seq, -ENOMEM);
7635 }
7636 
7637 static void
7638 bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
7639 {
7640 	spdk_bs_sequence_t *seq = cb_arg;
7641 	struct delete_snapshot_ctx *ctx;
7642 	bool update_clone = false;
7643 
7644 	if (bserrno != 0) {
7645 		bs_sequence_finish(seq, bserrno);
7646 		return;
7647 	}
7648 
7649 	blob_verify_md_op(blob);
7650 
7651 	ctx = calloc(1, sizeof(*ctx));
7652 	if (ctx == NULL) {
7653 		spdk_blob_close(blob, bs_delete_enomem_close_cpl, seq);
7654 		return;
7655 	}
7656 
7657 	ctx->snapshot = blob;
7658 	ctx->cb_fn = bs_delete_blob_finish;
7659 	ctx->cb_arg = seq;
7660 
7661 	/* Check if blob can be removed and if it is a snapshot with clone on top of it */
7662 	ctx->bserrno = bs_is_blob_deletable(blob, &update_clone);
7663 	if (ctx->bserrno) {
7664 		spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
7665 		return;
7666 	}
7667 
7668 	if (blob->locked_operation_in_progress) {
7669 		SPDK_DEBUGLOG(blob, "Cannot remove blob - another operation in progress\n");
7670 		ctx->bserrno = -EBUSY;
7671 		spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
7672 		return;
7673 	}
7674 
7675 	blob->locked_operation_in_progress = true;
7676 
7677 	/*
7678 	 * Remove the blob from the blob_store list now, to ensure it does not
7679 	 *  get returned after this point by blob_lookup().
7680 	 */
7681 	spdk_bit_array_clear(blob->bs->open_blobids, blob->id);
7682 	RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob);
7683 
7684 	if (update_clone) {
7685 		ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, 0, NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
7686 		if (!ctx->page) {
7687 			ctx->bserrno = -ENOMEM;
7688 			spdk_blob_close(blob, delete_blob_cleanup_finish, ctx);
7689 			return;
7690 		}
7691 		/* This blob is a snapshot with active clone - update clone first */
7692 		update_clone_on_snapshot_deletion(blob, ctx);
7693 	} else {
7694 		/* This blob does not have any clones - just remove it */
7695 		bs_blob_list_remove(blob);
7696 		bs_delete_blob_finish(seq, blob, 0);
7697 		free(ctx);
7698 	}
7699 }
7700 
7701 void
7702 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
7703 		    spdk_blob_op_complete cb_fn, void *cb_arg)
7704 {
7705 	struct spdk_bs_cpl	cpl;
7706 	spdk_bs_sequence_t	*seq;
7707 
7708 	SPDK_DEBUGLOG(blob, "Deleting blob 0x%" PRIx64 "\n", blobid);
7709 
7710 	assert(spdk_get_thread() == bs->md_thread);
7711 
7712 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7713 	cpl.u.blob_basic.cb_fn = cb_fn;
7714 	cpl.u.blob_basic.cb_arg = cb_arg;
7715 
7716 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
7717 	if (!seq) {
7718 		cb_fn(cb_arg, -ENOMEM);
7719 		return;
7720 	}
7721 
7722 	spdk_bs_open_blob(bs, blobid, bs_delete_open_cpl, seq);
7723 }
7724 
7725 /* END spdk_bs_delete_blob */
7726 
7727 /* START spdk_bs_open_blob */
7728 
7729 static void
7730 bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
7731 {
7732 	struct spdk_blob *blob = cb_arg;
7733 	struct spdk_blob *existing;
7734 
7735 	if (bserrno != 0) {
7736 		blob_free(blob);
7737 		seq->cpl.u.blob_handle.blob = NULL;
7738 		bs_sequence_finish(seq, bserrno);
7739 		return;
7740 	}
7741 
7742 	existing = blob_lookup(blob->bs, blob->id);
7743 	if (existing) {
7744 		blob_free(blob);
7745 		existing->open_ref++;
7746 		seq->cpl.u.blob_handle.blob = existing;
7747 		bs_sequence_finish(seq, 0);
7748 		return;
7749 	}
7750 
7751 	blob->open_ref++;
7752 
7753 	spdk_bit_array_set(blob->bs->open_blobids, blob->id);
7754 	RB_INSERT(spdk_blob_tree, &blob->bs->open_blobs, blob);
7755 
7756 	bs_sequence_finish(seq, bserrno);
7757 }
7758 
7759 static inline void
7760 blob_open_opts_copy(const struct spdk_blob_open_opts *src, struct spdk_blob_open_opts *dst)
7761 {
7762 #define FIELD_OK(field) \
7763         offsetof(struct spdk_blob_open_opts, field) + sizeof(src->field) <= src->opts_size
7764 
7765 #define SET_FIELD(field) \
7766         if (FIELD_OK(field)) { \
7767                 dst->field = src->field; \
7768         } \
7769 
7770 	SET_FIELD(clear_method);
7771 	SET_FIELD(esnap_ctx);
7772 
7773 	dst->opts_size = src->opts_size;
7774 
7775 	/* You should not remove this statement, but need to update the assert statement
7776 	 * if you add a new field, and also add a corresponding SET_FIELD statement */
7777 	SPDK_STATIC_ASSERT(sizeof(struct spdk_blob_open_opts) == 24, "Incorrect size");
7778 
7779 #undef FIELD_OK
7780 #undef SET_FIELD
7781 }
7782 
7783 static void
7784 bs_open_blob(struct spdk_blob_store *bs,
7785 	     spdk_blob_id blobid,
7786 	     struct spdk_blob_open_opts *opts,
7787 	     spdk_blob_op_with_handle_complete cb_fn,
7788 	     void *cb_arg)
7789 {
7790 	struct spdk_blob		*blob;
7791 	struct spdk_bs_cpl		cpl;
7792 	struct spdk_blob_open_opts	opts_local;
7793 	spdk_bs_sequence_t		*seq;
7794 	uint32_t			page_num;
7795 
7796 	SPDK_DEBUGLOG(blob, "Opening blob 0x%" PRIx64 "\n", blobid);
7797 	assert(spdk_get_thread() == bs->md_thread);
7798 
7799 	page_num = bs_blobid_to_page(blobid);
7800 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
7801 		/* Invalid blobid */
7802 		cb_fn(cb_arg, NULL, -ENOENT);
7803 		return;
7804 	}
7805 
7806 	blob = blob_lookup(bs, blobid);
7807 	if (blob) {
7808 		blob->open_ref++;
7809 		cb_fn(cb_arg, blob, 0);
7810 		return;
7811 	}
7812 
7813 	blob = blob_alloc(bs, blobid);
7814 	if (!blob) {
7815 		cb_fn(cb_arg, NULL, -ENOMEM);
7816 		return;
7817 	}
7818 
7819 	spdk_blob_open_opts_init(&opts_local, sizeof(opts_local));
7820 	if (opts) {
7821 		blob_open_opts_copy(opts, &opts_local);
7822 	}
7823 
7824 	blob->clear_method = opts_local.clear_method;
7825 
7826 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
7827 	cpl.u.blob_handle.cb_fn = cb_fn;
7828 	cpl.u.blob_handle.cb_arg = cb_arg;
7829 	cpl.u.blob_handle.blob = blob;
7830 	cpl.u.blob_handle.esnap_ctx = opts_local.esnap_ctx;
7831 
7832 	seq = bs_sequence_start_bs(bs->md_channel, &cpl);
7833 	if (!seq) {
7834 		blob_free(blob);
7835 		cb_fn(cb_arg, NULL, -ENOMEM);
7836 		return;
7837 	}
7838 
7839 	blob_load(seq, blob, bs_open_blob_cpl, blob);
7840 }
7841 
7842 void
7843 spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
7844 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
7845 {
7846 	bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg);
7847 }
7848 
7849 void
7850 spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
7851 		      struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
7852 {
7853 	bs_open_blob(bs, blobid, opts, cb_fn, cb_arg);
7854 }
7855 
7856 /* END spdk_bs_open_blob */
7857 
7858 /* START spdk_blob_set_read_only */
7859 int
7860 spdk_blob_set_read_only(struct spdk_blob *blob)
7861 {
7862 	blob_verify_md_op(blob);
7863 
7864 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
7865 
7866 	blob->state = SPDK_BLOB_STATE_DIRTY;
7867 	return 0;
7868 }
7869 /* END spdk_blob_set_read_only */
7870 
7871 /* START spdk_blob_sync_md */
7872 
7873 static void
7874 blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
7875 {
7876 	struct spdk_blob *blob = cb_arg;
7877 
7878 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
7879 		blob->data_ro = true;
7880 		blob->md_ro = true;
7881 	}
7882 
7883 	bs_sequence_finish(seq, bserrno);
7884 }
7885 
7886 static void
7887 blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
7888 {
7889 	struct spdk_bs_cpl	cpl;
7890 	spdk_bs_sequence_t	*seq;
7891 
7892 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
7893 	cpl.u.blob_basic.cb_fn = cb_fn;
7894 	cpl.u.blob_basic.cb_arg = cb_arg;
7895 
7896 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
7897 	if (!seq) {
7898 		cb_fn(cb_arg, -ENOMEM);
7899 		return;
7900 	}
7901 
7902 	blob_persist(seq, blob, blob_sync_md_cpl, blob);
7903 }
7904 
7905 void
7906 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
7907 {
7908 	blob_verify_md_op(blob);
7909 
7910 	SPDK_DEBUGLOG(blob, "Syncing blob 0x%" PRIx64 "\n", blob->id);
7911 
7912 	if (blob->md_ro) {
7913 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
7914 		cb_fn(cb_arg, 0);
7915 		return;
7916 	}
7917 
7918 	blob_sync_md(blob, cb_fn, cb_arg);
7919 }
7920 
7921 /* END spdk_blob_sync_md */
7922 
7923 struct spdk_blob_cluster_op_ctx {
7924 	struct spdk_thread	*thread;
7925 	struct spdk_blob	*blob;
7926 	uint32_t		cluster_num;	/* cluster index in blob */
7927 	uint32_t		cluster;	/* cluster on disk */
7928 	uint32_t		extent_page;	/* extent page on disk */
7929 	struct spdk_blob_md_page *page; /* preallocated extent page */
7930 	int			rc;
7931 	spdk_blob_op_complete	cb_fn;
7932 	void			*cb_arg;
7933 };
7934 
7935 static void
7936 blob_op_cluster_msg_cpl(void *arg)
7937 {
7938 	struct spdk_blob_cluster_op_ctx *ctx = arg;
7939 
7940 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
7941 	free(ctx);
7942 }
7943 
7944 static void
7945 blob_op_cluster_msg_cb(void *arg, int bserrno)
7946 {
7947 	struct spdk_blob_cluster_op_ctx *ctx = arg;
7948 
7949 	ctx->rc = bserrno;
7950 	spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
7951 }
7952 
7953 static void
7954 blob_insert_new_ep_cb(void *arg, int bserrno)
7955 {
7956 	struct spdk_blob_cluster_op_ctx *ctx = arg;
7957 	uint32_t *extent_page;
7958 
7959 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
7960 	*extent_page = ctx->extent_page;
7961 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
7962 	blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
7963 }
7964 
7965 struct spdk_blob_write_extent_page_ctx {
7966 	struct spdk_blob_store		*bs;
7967 
7968 	uint32_t			extent;
7969 	struct spdk_blob_md_page	*page;
7970 };
7971 
7972 static void
7973 blob_free_cluster_msg_cb(void *arg, int bserrno)
7974 {
7975 	struct spdk_blob_cluster_op_ctx *ctx = arg;
7976 
7977 	spdk_spin_lock(&ctx->blob->bs->used_lock);
7978 	bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster));
7979 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
7980 
7981 	ctx->rc = bserrno;
7982 	spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
7983 }
7984 
7985 static void
7986 blob_free_cluster_update_ep_cb(void *arg, int bserrno)
7987 {
7988 	struct spdk_blob_cluster_op_ctx *ctx = arg;
7989 
7990 	if (bserrno != 0 || ctx->blob->bs->clean == 0) {
7991 		blob_free_cluster_msg_cb(ctx, bserrno);
7992 		return;
7993 	}
7994 
7995 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
7996 	blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx);
7997 }
7998 
7999 static void
8000 blob_free_cluster_free_ep_cb(void *arg, int bserrno)
8001 {
8002 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8003 
8004 	spdk_spin_lock(&ctx->blob->bs->used_lock);
8005 	assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8006 	bs_release_md_page(ctx->blob->bs, ctx->extent_page);
8007 	spdk_spin_unlock(&ctx->blob->bs->used_lock);
8008 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8009 	blob_sync_md(ctx->blob, blob_free_cluster_msg_cb, ctx);
8010 }
8011 
8012 static void
8013 blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8014 {
8015 	struct spdk_blob_write_extent_page_ctx *ctx = cb_arg;
8016 
8017 	free(ctx);
8018 	bs_sequence_finish(seq, bserrno);
8019 }
8020 
8021 static void
8022 blob_write_extent_page_ready(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8023 {
8024 	struct spdk_blob_write_extent_page_ctx *ctx = cb_arg;
8025 
8026 	if (bserrno != 0) {
8027 		blob_persist_extent_page_cpl(seq, ctx, bserrno);
8028 		return;
8029 	}
8030 	bs_sequence_write_dev(seq, ctx->page, bs_md_page_to_lba(ctx->bs, ctx->extent),
8031 			      bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
8032 			      blob_persist_extent_page_cpl, ctx);
8033 }
8034 
8035 static void
8036 blob_write_extent_page(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
8037 		       struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg)
8038 {
8039 	struct spdk_blob_write_extent_page_ctx	*ctx;
8040 	spdk_bs_sequence_t			*seq;
8041 	struct spdk_bs_cpl			cpl;
8042 
8043 	ctx = calloc(1, sizeof(*ctx));
8044 	if (!ctx) {
8045 		cb_fn(cb_arg, -ENOMEM);
8046 		return;
8047 	}
8048 	ctx->bs = blob->bs;
8049 	ctx->extent = extent;
8050 	ctx->page = page;
8051 
8052 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8053 	cpl.u.blob_basic.cb_fn = cb_fn;
8054 	cpl.u.blob_basic.cb_arg = cb_arg;
8055 
8056 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8057 	if (!seq) {
8058 		free(ctx);
8059 		cb_fn(cb_arg, -ENOMEM);
8060 		return;
8061 	}
8062 
8063 	assert(page);
8064 	page->next = SPDK_INVALID_MD_PAGE;
8065 	page->id = blob->id;
8066 	page->sequence_num = 0;
8067 
8068 	blob_serialize_extent_page(blob, cluster_num, page);
8069 
8070 	page->crc = blob_md_page_calc_crc(page);
8071 
8072 	assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true);
8073 
8074 	bs_mark_dirty(seq, blob->bs, blob_write_extent_page_ready, ctx);
8075 }
8076 
8077 static void
8078 blob_insert_cluster_msg(void *arg)
8079 {
8080 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8081 	uint32_t *extent_page;
8082 
8083 	ctx->rc = blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
8084 	if (ctx->rc != 0) {
8085 		spdk_thread_send_msg(ctx->thread, blob_op_cluster_msg_cpl, ctx);
8086 		return;
8087 	}
8088 
8089 	if (ctx->blob->use_extent_table == false) {
8090 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
8091 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8092 		blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8093 		return;
8094 	}
8095 
8096 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8097 	if (*extent_page == 0) {
8098 		/* Extent page requires allocation.
8099 		 * It was already claimed in the used_md_pages map and placed in ctx. */
8100 		assert(ctx->extent_page != 0);
8101 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8102 		blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page,
8103 				       blob_insert_new_ep_cb, ctx);
8104 	} else {
8105 		/* It is possible for original thread to allocate extent page for
8106 		 * different cluster in the same extent page. In such case proceed with
8107 		 * updating the existing extent page, but release the additional one. */
8108 		if (ctx->extent_page != 0) {
8109 			spdk_spin_lock(&ctx->blob->bs->used_lock);
8110 			assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8111 			bs_release_md_page(ctx->blob->bs, ctx->extent_page);
8112 			spdk_spin_unlock(&ctx->blob->bs->used_lock);
8113 			ctx->extent_page = 0;
8114 		}
8115 		/* Extent page already allocated.
8116 		 * Every cluster allocation, requires just an update of single extent page. */
8117 		blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page,
8118 				       blob_op_cluster_msg_cb, ctx);
8119 	}
8120 }
8121 
8122 static void
8123 blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
8124 				 uint64_t cluster, uint32_t extent_page, struct spdk_blob_md_page *page,
8125 				 spdk_blob_op_complete cb_fn, void *cb_arg)
8126 {
8127 	struct spdk_blob_cluster_op_ctx *ctx;
8128 
8129 	ctx = calloc(1, sizeof(*ctx));
8130 	if (ctx == NULL) {
8131 		cb_fn(cb_arg, -ENOMEM);
8132 		return;
8133 	}
8134 
8135 	ctx->thread = spdk_get_thread();
8136 	ctx->blob = blob;
8137 	ctx->cluster_num = cluster_num;
8138 	ctx->cluster = cluster;
8139 	ctx->extent_page = extent_page;
8140 	ctx->page = page;
8141 	ctx->cb_fn = cb_fn;
8142 	ctx->cb_arg = cb_arg;
8143 
8144 	spdk_thread_send_msg(blob->bs->md_thread, blob_insert_cluster_msg, ctx);
8145 }
8146 
8147 static void
8148 blob_free_cluster_msg(void *arg)
8149 {
8150 	struct spdk_blob_cluster_op_ctx *ctx = arg;
8151 	uint32_t *extent_page;
8152 	uint32_t start_cluster_idx;
8153 	bool free_extent_page = true;
8154 	size_t i;
8155 
8156 	ctx->cluster = ctx->blob->active.clusters[ctx->cluster_num];
8157 	ctx->blob->active.clusters[ctx->cluster_num] = 0;
8158 
8159 	if (ctx->blob->use_extent_table == false) {
8160 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
8161 		spdk_spin_lock(&ctx->blob->bs->used_lock);
8162 		bs_release_cluster(ctx->blob->bs, bs_lba_to_cluster(ctx->blob->bs, ctx->cluster));
8163 		spdk_spin_unlock(&ctx->blob->bs->used_lock);
8164 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
8165 		blob_sync_md(ctx->blob, blob_op_cluster_msg_cb, ctx);
8166 		return;
8167 	}
8168 
8169 	extent_page = bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
8170 
8171 	/* There shouldn't be parallel release operations on same cluster */
8172 	assert(*extent_page == ctx->extent_page);
8173 
8174 	start_cluster_idx = (ctx->cluster_num / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
8175 	for (i = 0; i < SPDK_EXTENTS_PER_EP; ++i) {
8176 		if (ctx->blob->active.clusters[start_cluster_idx + i] != 0) {
8177 			free_extent_page = false;
8178 			break;
8179 		}
8180 	}
8181 
8182 	if (free_extent_page) {
8183 		assert(ctx->extent_page != 0);
8184 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
8185 		ctx->blob->active.extent_pages[bs_cluster_to_extent_table_id(ctx->cluster_num)] = 0;
8186 		blob_write_extent_page(ctx->blob, ctx->extent_page, ctx->cluster_num, ctx->page,
8187 				       blob_free_cluster_free_ep_cb, ctx);
8188 	} else {
8189 		blob_write_extent_page(ctx->blob, *extent_page, ctx->cluster_num, ctx->page,
8190 				       blob_free_cluster_update_ep_cb, ctx);
8191 	}
8192 }
8193 
8194 
8195 static void
8196 blob_free_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num, uint32_t extent_page,
8197 			       struct spdk_blob_md_page *page, spdk_blob_op_complete cb_fn, void *cb_arg)
8198 {
8199 	struct spdk_blob_cluster_op_ctx *ctx;
8200 
8201 	ctx = calloc(1, sizeof(*ctx));
8202 	if (ctx == NULL) {
8203 		cb_fn(cb_arg, -ENOMEM);
8204 		return;
8205 	}
8206 
8207 	ctx->thread = spdk_get_thread();
8208 	ctx->blob = blob;
8209 	ctx->cluster_num = cluster_num;
8210 	ctx->extent_page = extent_page;
8211 	ctx->page = page;
8212 	ctx->cb_fn = cb_fn;
8213 	ctx->cb_arg = cb_arg;
8214 
8215 	spdk_thread_send_msg(blob->bs->md_thread, blob_free_cluster_msg, ctx);
8216 }
8217 
8218 /* START spdk_blob_close */
8219 
8220 static void
8221 blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8222 {
8223 	struct spdk_blob *blob = cb_arg;
8224 
8225 	if (bserrno == 0) {
8226 		blob->open_ref--;
8227 		if (blob->open_ref == 0) {
8228 			/*
8229 			 * Blobs with active.num_pages == 0 are deleted blobs.
8230 			 *  these blobs are removed from the blob_store list
8231 			 *  when the deletion process starts - so don't try to
8232 			 *  remove them again.
8233 			 */
8234 			if (blob->active.num_pages > 0) {
8235 				spdk_bit_array_clear(blob->bs->open_blobids, blob->id);
8236 				RB_REMOVE(spdk_blob_tree, &blob->bs->open_blobs, blob);
8237 			}
8238 			blob_free(blob);
8239 		}
8240 	}
8241 
8242 	bs_sequence_finish(seq, bserrno);
8243 }
8244 
8245 static void
8246 blob_close_esnap_done(void *cb_arg, struct spdk_blob *blob, int bserrno)
8247 {
8248 	spdk_bs_sequence_t	*seq = cb_arg;
8249 
8250 	if (bserrno != 0) {
8251 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": close failed with error %d\n",
8252 			      blob->id, bserrno);
8253 		bs_sequence_finish(seq, bserrno);
8254 		return;
8255 	}
8256 
8257 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": closed, syncing metadata on thread %s\n",
8258 		      blob->id, spdk_thread_get_name(spdk_get_thread()));
8259 
8260 	/* Sync metadata */
8261 	blob_persist(seq, blob, blob_close_cpl, blob);
8262 }
8263 
8264 void
8265 spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
8266 {
8267 	struct spdk_bs_cpl	cpl;
8268 	spdk_bs_sequence_t	*seq;
8269 
8270 	blob_verify_md_op(blob);
8271 
8272 	SPDK_DEBUGLOG(blob, "Closing blob 0x%" PRIx64 "\n", blob->id);
8273 
8274 	if (blob->open_ref == 0) {
8275 		cb_fn(cb_arg, -EBADF);
8276 		return;
8277 	}
8278 
8279 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
8280 	cpl.u.blob_basic.cb_fn = cb_fn;
8281 	cpl.u.blob_basic.cb_arg = cb_arg;
8282 
8283 	seq = bs_sequence_start_bs(blob->bs->md_channel, &cpl);
8284 	if (!seq) {
8285 		cb_fn(cb_arg, -ENOMEM);
8286 		return;
8287 	}
8288 
8289 	if (blob->open_ref == 1 && blob_is_esnap_clone(blob)) {
8290 		blob_esnap_destroy_bs_dev_channels(blob, false, blob_close_esnap_done, seq);
8291 		return;
8292 	}
8293 
8294 	/* Sync metadata */
8295 	blob_persist(seq, blob, blob_close_cpl, blob);
8296 }
8297 
8298 /* END spdk_blob_close */
8299 
8300 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
8301 {
8302 	return spdk_get_io_channel(bs);
8303 }
8304 
8305 void
8306 spdk_bs_free_io_channel(struct spdk_io_channel *channel)
8307 {
8308 	blob_esnap_destroy_bs_channel(spdk_io_channel_get_ctx(channel));
8309 	spdk_put_io_channel(channel);
8310 }
8311 
8312 void
8313 spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
8314 		   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
8315 {
8316 	blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
8317 			       SPDK_BLOB_UNMAP);
8318 }
8319 
8320 void
8321 spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
8322 			  uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
8323 {
8324 	blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
8325 			       SPDK_BLOB_WRITE_ZEROES);
8326 }
8327 
8328 void
8329 spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
8330 		   void *payload, uint64_t offset, uint64_t length,
8331 		   spdk_blob_op_complete cb_fn, void *cb_arg)
8332 {
8333 	blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
8334 			       SPDK_BLOB_WRITE);
8335 }
8336 
8337 void
8338 spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
8339 		  void *payload, uint64_t offset, uint64_t length,
8340 		  spdk_blob_op_complete cb_fn, void *cb_arg)
8341 {
8342 	blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
8343 			       SPDK_BLOB_READ);
8344 }
8345 
8346 void
8347 spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
8348 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8349 		    spdk_blob_op_complete cb_fn, void *cb_arg)
8350 {
8351 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false, NULL);
8352 }
8353 
8354 void
8355 spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
8356 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8357 		   spdk_blob_op_complete cb_fn, void *cb_arg)
8358 {
8359 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true, NULL);
8360 }
8361 
8362 void
8363 spdk_blob_io_writev_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
8364 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8365 			spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
8366 {
8367 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false,
8368 				   io_opts);
8369 }
8370 
8371 void
8372 spdk_blob_io_readv_ext(struct spdk_blob *blob, struct spdk_io_channel *channel,
8373 		       struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
8374 		       spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
8375 {
8376 	blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true,
8377 				   io_opts);
8378 }
8379 
8380 struct spdk_bs_iter_ctx {
8381 	int64_t page_num;
8382 	struct spdk_blob_store *bs;
8383 
8384 	spdk_blob_op_with_handle_complete cb_fn;
8385 	void *cb_arg;
8386 };
8387 
8388 static void
8389 bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
8390 {
8391 	struct spdk_bs_iter_ctx *ctx = cb_arg;
8392 	struct spdk_blob_store *bs = ctx->bs;
8393 	spdk_blob_id id;
8394 
8395 	if (bserrno == 0) {
8396 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
8397 		free(ctx);
8398 		return;
8399 	}
8400 
8401 	ctx->page_num++;
8402 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
8403 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
8404 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
8405 		free(ctx);
8406 		return;
8407 	}
8408 
8409 	id = bs_page_to_blobid(ctx->page_num);
8410 
8411 	spdk_bs_open_blob(bs, id, bs_iter_cpl, ctx);
8412 }
8413 
8414 void
8415 spdk_bs_iter_first(struct spdk_blob_store *bs,
8416 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8417 {
8418 	struct spdk_bs_iter_ctx *ctx;
8419 
8420 	ctx = calloc(1, sizeof(*ctx));
8421 	if (!ctx) {
8422 		cb_fn(cb_arg, NULL, -ENOMEM);
8423 		return;
8424 	}
8425 
8426 	ctx->page_num = -1;
8427 	ctx->bs = bs;
8428 	ctx->cb_fn = cb_fn;
8429 	ctx->cb_arg = cb_arg;
8430 
8431 	bs_iter_cpl(ctx, NULL, -1);
8432 }
8433 
8434 static void
8435 bs_iter_close_cpl(void *cb_arg, int bserrno)
8436 {
8437 	struct spdk_bs_iter_ctx *ctx = cb_arg;
8438 
8439 	bs_iter_cpl(ctx, NULL, -1);
8440 }
8441 
8442 void
8443 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
8444 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
8445 {
8446 	struct spdk_bs_iter_ctx *ctx;
8447 
8448 	assert(blob != NULL);
8449 
8450 	ctx = calloc(1, sizeof(*ctx));
8451 	if (!ctx) {
8452 		cb_fn(cb_arg, NULL, -ENOMEM);
8453 		return;
8454 	}
8455 
8456 	ctx->page_num = bs_blobid_to_page(blob->id);
8457 	ctx->bs = bs;
8458 	ctx->cb_fn = cb_fn;
8459 	ctx->cb_arg = cb_arg;
8460 
8461 	/* Close the existing blob */
8462 	spdk_blob_close(blob, bs_iter_close_cpl, ctx);
8463 }
8464 
8465 static int
8466 blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
8467 	       uint16_t value_len, bool internal)
8468 {
8469 	struct spdk_xattr_tailq *xattrs;
8470 	struct spdk_xattr	*xattr;
8471 	size_t			desc_size;
8472 	void			*tmp;
8473 
8474 	blob_verify_md_op(blob);
8475 
8476 	if (blob->md_ro) {
8477 		return -EPERM;
8478 	}
8479 
8480 	desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len;
8481 	if (desc_size > SPDK_BS_MAX_DESC_SIZE) {
8482 		SPDK_DEBUGLOG(blob, "Xattr '%s' of size %zu does not fix into single page %zu\n", name,
8483 			      desc_size, SPDK_BS_MAX_DESC_SIZE);
8484 		return -ENOMEM;
8485 	}
8486 
8487 	if (internal) {
8488 		xattrs = &blob->xattrs_internal;
8489 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
8490 	} else {
8491 		xattrs = &blob->xattrs;
8492 	}
8493 
8494 	TAILQ_FOREACH(xattr, xattrs, link) {
8495 		if (!strcmp(name, xattr->name)) {
8496 			tmp = malloc(value_len);
8497 			if (!tmp) {
8498 				return -ENOMEM;
8499 			}
8500 
8501 			free(xattr->value);
8502 			xattr->value_len = value_len;
8503 			xattr->value = tmp;
8504 			memcpy(xattr->value, value, value_len);
8505 
8506 			blob->state = SPDK_BLOB_STATE_DIRTY;
8507 
8508 			return 0;
8509 		}
8510 	}
8511 
8512 	xattr = calloc(1, sizeof(*xattr));
8513 	if (!xattr) {
8514 		return -ENOMEM;
8515 	}
8516 
8517 	xattr->name = strdup(name);
8518 	if (!xattr->name) {
8519 		free(xattr);
8520 		return -ENOMEM;
8521 	}
8522 
8523 	xattr->value_len = value_len;
8524 	xattr->value = malloc(value_len);
8525 	if (!xattr->value) {
8526 		free(xattr->name);
8527 		free(xattr);
8528 		return -ENOMEM;
8529 	}
8530 	memcpy(xattr->value, value, value_len);
8531 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
8532 
8533 	blob->state = SPDK_BLOB_STATE_DIRTY;
8534 
8535 	return 0;
8536 }
8537 
8538 int
8539 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
8540 		    uint16_t value_len)
8541 {
8542 	return blob_set_xattr(blob, name, value, value_len, false);
8543 }
8544 
8545 static int
8546 blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
8547 {
8548 	struct spdk_xattr_tailq *xattrs;
8549 	struct spdk_xattr	*xattr;
8550 
8551 	blob_verify_md_op(blob);
8552 
8553 	if (blob->md_ro) {
8554 		return -EPERM;
8555 	}
8556 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
8557 
8558 	TAILQ_FOREACH(xattr, xattrs, link) {
8559 		if (!strcmp(name, xattr->name)) {
8560 			TAILQ_REMOVE(xattrs, xattr, link);
8561 			free(xattr->value);
8562 			free(xattr->name);
8563 			free(xattr);
8564 
8565 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
8566 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
8567 			}
8568 			blob->state = SPDK_BLOB_STATE_DIRTY;
8569 
8570 			return 0;
8571 		}
8572 	}
8573 
8574 	return -ENOENT;
8575 }
8576 
8577 int
8578 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
8579 {
8580 	return blob_remove_xattr(blob, name, false);
8581 }
8582 
8583 static int
8584 blob_get_xattr_value(struct spdk_blob *blob, const char *name,
8585 		     const void **value, size_t *value_len, bool internal)
8586 {
8587 	struct spdk_xattr	*xattr;
8588 	struct spdk_xattr_tailq *xattrs;
8589 
8590 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
8591 
8592 	TAILQ_FOREACH(xattr, xattrs, link) {
8593 		if (!strcmp(name, xattr->name)) {
8594 			*value = xattr->value;
8595 			*value_len = xattr->value_len;
8596 			return 0;
8597 		}
8598 	}
8599 	return -ENOENT;
8600 }
8601 
8602 int
8603 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
8604 			  const void **value, size_t *value_len)
8605 {
8606 	blob_verify_md_op(blob);
8607 
8608 	return blob_get_xattr_value(blob, name, value, value_len, false);
8609 }
8610 
8611 struct spdk_xattr_names {
8612 	uint32_t	count;
8613 	const char	*names[0];
8614 };
8615 
8616 static int
8617 blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
8618 {
8619 	struct spdk_xattr	*xattr;
8620 	int			count = 0;
8621 
8622 	TAILQ_FOREACH(xattr, xattrs, link) {
8623 		count++;
8624 	}
8625 
8626 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
8627 	if (*names == NULL) {
8628 		return -ENOMEM;
8629 	}
8630 
8631 	TAILQ_FOREACH(xattr, xattrs, link) {
8632 		(*names)->names[(*names)->count++] = xattr->name;
8633 	}
8634 
8635 	return 0;
8636 }
8637 
8638 int
8639 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
8640 {
8641 	blob_verify_md_op(blob);
8642 
8643 	return blob_get_xattr_names(&blob->xattrs, names);
8644 }
8645 
8646 uint32_t
8647 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
8648 {
8649 	assert(names != NULL);
8650 
8651 	return names->count;
8652 }
8653 
8654 const char *
8655 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
8656 {
8657 	if (index >= names->count) {
8658 		return NULL;
8659 	}
8660 
8661 	return names->names[index];
8662 }
8663 
8664 void
8665 spdk_xattr_names_free(struct spdk_xattr_names *names)
8666 {
8667 	free(names);
8668 }
8669 
8670 struct spdk_bs_type
8671 spdk_bs_get_bstype(struct spdk_blob_store *bs)
8672 {
8673 	return bs->bstype;
8674 }
8675 
8676 void
8677 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
8678 {
8679 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
8680 }
8681 
8682 bool
8683 spdk_blob_is_read_only(struct spdk_blob *blob)
8684 {
8685 	assert(blob != NULL);
8686 	return (blob->data_ro || blob->md_ro);
8687 }
8688 
8689 bool
8690 spdk_blob_is_snapshot(struct spdk_blob *blob)
8691 {
8692 	struct spdk_blob_list *snapshot_entry;
8693 
8694 	assert(blob != NULL);
8695 
8696 	snapshot_entry = bs_get_snapshot_entry(blob->bs, blob->id);
8697 	if (snapshot_entry == NULL) {
8698 		return false;
8699 	}
8700 
8701 	return true;
8702 }
8703 
8704 bool
8705 spdk_blob_is_clone(struct spdk_blob *blob)
8706 {
8707 	assert(blob != NULL);
8708 
8709 	if (blob->parent_id != SPDK_BLOBID_INVALID &&
8710 	    blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT) {
8711 		assert(spdk_blob_is_thin_provisioned(blob));
8712 		return true;
8713 	}
8714 
8715 	return false;
8716 }
8717 
8718 bool
8719 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
8720 {
8721 	assert(blob != NULL);
8722 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
8723 }
8724 
8725 bool
8726 spdk_blob_is_esnap_clone(const struct spdk_blob *blob)
8727 {
8728 	return blob_is_esnap_clone(blob);
8729 }
8730 
8731 static void
8732 blob_update_clear_method(struct spdk_blob *blob)
8733 {
8734 	enum blob_clear_method stored_cm;
8735 
8736 	assert(blob != NULL);
8737 
8738 	/* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored
8739 	 * in metadata previously.  If something other than the default was
8740 	 * specified, ignore stored value and used what was passed in.
8741 	 */
8742 	stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT);
8743 
8744 	if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) {
8745 		blob->clear_method = stored_cm;
8746 	} else if (blob->clear_method != stored_cm) {
8747 		SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n",
8748 			     blob->clear_method, stored_cm);
8749 	}
8750 }
8751 
8752 spdk_blob_id
8753 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
8754 {
8755 	struct spdk_blob_list *snapshot_entry = NULL;
8756 	struct spdk_blob_list *clone_entry = NULL;
8757 
8758 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
8759 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
8760 			if (clone_entry->id == blob_id) {
8761 				return snapshot_entry->id;
8762 			}
8763 		}
8764 	}
8765 
8766 	return SPDK_BLOBID_INVALID;
8767 }
8768 
8769 int
8770 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
8771 		     size_t *count)
8772 {
8773 	struct spdk_blob_list *snapshot_entry, *clone_entry;
8774 	size_t n;
8775 
8776 	snapshot_entry = bs_get_snapshot_entry(bs, blobid);
8777 	if (snapshot_entry == NULL) {
8778 		*count = 0;
8779 		return 0;
8780 	}
8781 
8782 	if (ids == NULL || *count < snapshot_entry->clone_count) {
8783 		*count = snapshot_entry->clone_count;
8784 		return -ENOMEM;
8785 	}
8786 	*count = snapshot_entry->clone_count;
8787 
8788 	n = 0;
8789 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
8790 		ids[n++] = clone_entry->id;
8791 	}
8792 
8793 	return 0;
8794 }
8795 
8796 static void
8797 bs_load_grow_continue(struct spdk_bs_load_ctx *ctx)
8798 {
8799 	int rc;
8800 
8801 	if (ctx->super->size == 0) {
8802 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
8803 	}
8804 
8805 	if (ctx->super->io_unit_size == 0) {
8806 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
8807 	}
8808 
8809 	/* Parse the super block */
8810 	ctx->bs->clean = 1;
8811 	ctx->bs->cluster_sz = ctx->super->cluster_size;
8812 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
8813 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
8814 	if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
8815 		ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
8816 	}
8817 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
8818 	rc = spdk_bit_array_resize(&ctx->used_clusters, ctx->bs->total_clusters);
8819 	if (rc < 0) {
8820 		bs_load_ctx_fail(ctx, -ENOMEM);
8821 		return;
8822 	}
8823 	ctx->bs->md_start = ctx->super->md_start;
8824 	ctx->bs->md_len = ctx->super->md_len;
8825 	rc = spdk_bit_array_resize(&ctx->bs->open_blobids, ctx->bs->md_len);
8826 	if (rc < 0) {
8827 		bs_load_ctx_fail(ctx, -ENOMEM);
8828 		return;
8829 	}
8830 
8831 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
8832 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
8833 	ctx->bs->super_blob = ctx->super->super_blob;
8834 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
8835 
8836 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
8837 		SPDK_ERRLOG("Can not grow an unclean blobstore, please load it normally to clean it.\n");
8838 		bs_load_ctx_fail(ctx, -EIO);
8839 		return;
8840 	} else {
8841 		bs_load_read_used_pages(ctx);
8842 	}
8843 }
8844 
8845 static void
8846 bs_load_grow_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8847 {
8848 	struct spdk_bs_load_ctx	*ctx = cb_arg;
8849 
8850 	if (bserrno != 0) {
8851 		bs_load_ctx_fail(ctx, bserrno);
8852 		return;
8853 	}
8854 	bs_load_grow_continue(ctx);
8855 }
8856 
8857 static void
8858 bs_load_grow_used_clusters_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8859 {
8860 	struct spdk_bs_load_ctx	*ctx = cb_arg;
8861 
8862 	if (bserrno != 0) {
8863 		bs_load_ctx_fail(ctx, bserrno);
8864 		return;
8865 	}
8866 
8867 	spdk_free(ctx->mask);
8868 
8869 	bs_sequence_write_dev(ctx->seq, ctx->super, bs_page_to_lba(ctx->bs, 0),
8870 			      bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
8871 			      bs_load_grow_super_write_cpl, ctx);
8872 }
8873 
8874 static void
8875 bs_load_grow_used_clusters_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8876 {
8877 	struct spdk_bs_load_ctx *ctx = cb_arg;
8878 	uint64_t		lba, lba_count;
8879 	uint64_t		dev_size;
8880 	uint64_t		total_clusters;
8881 
8882 	if (bserrno != 0) {
8883 		bs_load_ctx_fail(ctx, bserrno);
8884 		return;
8885 	}
8886 
8887 	/* The type must be correct */
8888 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
8889 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
8890 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
8891 					     struct spdk_blob_md_page) * 8));
8892 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
8893 	total_clusters = dev_size / ctx->super->cluster_size;
8894 	ctx->mask->length = total_clusters;
8895 
8896 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
8897 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
8898 	bs_sequence_write_dev(ctx->seq, ctx->mask, lba, lba_count,
8899 			      bs_load_grow_used_clusters_write_cpl, ctx);
8900 }
8901 
8902 static void
8903 bs_load_try_to_grow(struct spdk_bs_load_ctx *ctx)
8904 {
8905 	uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask;
8906 	uint64_t lba, lba_count, mask_size;
8907 
8908 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
8909 	total_clusters = dev_size / ctx->super->cluster_size;
8910 	used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
8911 				spdk_divide_round_up(total_clusters, 8),
8912 				SPDK_BS_PAGE_SIZE);
8913 	max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start;
8914 	/* No necessary to grow or no space to grow */
8915 	if (ctx->super->size >= dev_size || used_cluster_mask_len > max_used_cluster_mask) {
8916 		SPDK_DEBUGLOG(blob, "No grow\n");
8917 		bs_load_grow_continue(ctx);
8918 		return;
8919 	}
8920 
8921 	SPDK_DEBUGLOG(blob, "Resize blobstore\n");
8922 
8923 	ctx->super->size = dev_size;
8924 	ctx->super->used_cluster_mask_len = used_cluster_mask_len;
8925 	ctx->super->crc = blob_md_page_calc_crc(ctx->super);
8926 
8927 	mask_size = used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
8928 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
8929 				 SPDK_MALLOC_DMA);
8930 	if (!ctx->mask) {
8931 		bs_load_ctx_fail(ctx, -ENOMEM);
8932 		return;
8933 	}
8934 	lba = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
8935 	lba_count = bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
8936 	bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
8937 			     bs_load_grow_used_clusters_read_cpl, ctx);
8938 }
8939 
8940 static void
8941 bs_grow_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8942 {
8943 	struct spdk_bs_load_ctx *ctx = cb_arg;
8944 	int rc;
8945 
8946 	rc = bs_super_validate(ctx->super, ctx->bs);
8947 	if (rc != 0) {
8948 		bs_load_ctx_fail(ctx, rc);
8949 		return;
8950 	}
8951 
8952 	bs_load_try_to_grow(ctx);
8953 }
8954 
8955 struct spdk_bs_grow_ctx {
8956 	struct spdk_blob_store		*bs;
8957 	struct spdk_bs_super_block	*super;
8958 
8959 	struct spdk_bit_pool		*new_used_clusters;
8960 	struct spdk_bs_md_mask		*new_used_clusters_mask;
8961 
8962 	spdk_bs_sequence_t		*seq;
8963 };
8964 
8965 static void
8966 bs_grow_live_done(struct spdk_bs_grow_ctx *ctx, int bserrno)
8967 {
8968 	if (bserrno != 0) {
8969 		spdk_bit_pool_free(&ctx->new_used_clusters);
8970 	}
8971 
8972 	bs_sequence_finish(ctx->seq, bserrno);
8973 	free(ctx->new_used_clusters_mask);
8974 	spdk_free(ctx->super);
8975 	free(ctx);
8976 }
8977 
8978 static void
8979 bs_grow_live_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
8980 {
8981 	struct spdk_bs_grow_ctx	*ctx = cb_arg;
8982 	struct spdk_blob_store *bs = ctx->bs;
8983 	uint64_t total_clusters;
8984 
8985 	if (bserrno != 0) {
8986 		bs_grow_live_done(ctx, bserrno);
8987 		return;
8988 	}
8989 
8990 	/*
8991 	 * Blobstore is not clean until unload, for now only the super block is up to date.
8992 	 * This is similar to state right after blobstore init, when bs_write_used_md() didn't
8993 	 * yet execute.
8994 	 * When cleanly unloaded, the used md pages will be written out.
8995 	 * In case of unclean shutdown, loading blobstore will go through recovery path correctly
8996 	 * filling out the used_clusters with new size and writing it out.
8997 	 */
8998 	bs->clean = 0;
8999 
9000 	/* Reverting the super->size past this point is complex, avoid any error paths
9001 	 * that require to do so. */
9002 	spdk_spin_lock(&bs->used_lock);
9003 
9004 	total_clusters = ctx->super->size / ctx->super->cluster_size;
9005 
9006 	assert(total_clusters >= spdk_bit_pool_capacity(bs->used_clusters));
9007 	spdk_bit_pool_store_mask(bs->used_clusters, ctx->new_used_clusters_mask);
9008 
9009 	assert(total_clusters == spdk_bit_pool_capacity(ctx->new_used_clusters));
9010 	spdk_bit_pool_load_mask(ctx->new_used_clusters, ctx->new_used_clusters_mask);
9011 
9012 	spdk_bit_pool_free(&bs->used_clusters);
9013 	bs->used_clusters = ctx->new_used_clusters;
9014 
9015 	bs->total_clusters = total_clusters;
9016 	bs->total_data_clusters = bs->total_clusters - spdk_divide_round_up(
9017 					  bs->md_start + bs->md_len, bs->pages_per_cluster);
9018 
9019 	bs->num_free_clusters = spdk_bit_pool_count_free(bs->used_clusters);
9020 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
9021 	spdk_spin_unlock(&bs->used_lock);
9022 
9023 	bs_grow_live_done(ctx, 0);
9024 }
9025 
9026 static void
9027 bs_grow_live_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
9028 {
9029 	struct spdk_bs_grow_ctx *ctx = cb_arg;
9030 	uint64_t dev_size, total_clusters, used_cluster_mask_len, max_used_cluster_mask;
9031 	int rc;
9032 
9033 	if (bserrno != 0) {
9034 		bs_grow_live_done(ctx, bserrno);
9035 		return;
9036 	}
9037 
9038 	rc = bs_super_validate(ctx->super, ctx->bs);
9039 	if (rc != 0) {
9040 		bs_grow_live_done(ctx, rc);
9041 		return;
9042 	}
9043 
9044 	dev_size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
9045 	total_clusters = dev_size / ctx->super->cluster_size;
9046 	used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
9047 				spdk_divide_round_up(total_clusters, 8),
9048 				SPDK_BS_PAGE_SIZE);
9049 	max_used_cluster_mask = ctx->super->used_blobid_mask_start - ctx->super->used_cluster_mask_start;
9050 	/* Only checking dev_size. Since it can change, but total_clusters remain the same. */
9051 	if (dev_size == ctx->super->size) {
9052 		SPDK_DEBUGLOG(blob, "No need to grow blobstore\n");
9053 		bs_grow_live_done(ctx, 0);
9054 		return;
9055 	}
9056 	/*
9057 	 * Blobstore cannot be shrunk, so check before if:
9058 	 * - new size of the device is smaller than size in super_block
9059 	 * - new total number of clusters is smaller than used_clusters bit_pool
9060 	 * - there is enough space in metadata for used_cluster_mask to be written out
9061 	 */
9062 	if (dev_size < ctx->super->size ||
9063 	    total_clusters < spdk_bit_pool_capacity(ctx->bs->used_clusters) ||
9064 	    used_cluster_mask_len > max_used_cluster_mask) {
9065 		SPDK_DEBUGLOG(blob, "No space to grow blobstore\n");
9066 		bs_grow_live_done(ctx, -ENOSPC);
9067 		return;
9068 	}
9069 
9070 	SPDK_DEBUGLOG(blob, "Resizing blobstore\n");
9071 
9072 	ctx->new_used_clusters_mask = calloc(1, total_clusters);
9073 	if (!ctx->new_used_clusters_mask) {
9074 		bs_grow_live_done(ctx, -ENOMEM);
9075 		return;
9076 	}
9077 	ctx->new_used_clusters = spdk_bit_pool_create(total_clusters);
9078 	if (!ctx->new_used_clusters) {
9079 		bs_grow_live_done(ctx, -ENOMEM);
9080 		return;
9081 	}
9082 
9083 	ctx->super->clean = 0;
9084 	ctx->super->size = dev_size;
9085 	ctx->super->used_cluster_mask_len = used_cluster_mask_len;
9086 	bs_write_super(seq, ctx->bs, ctx->super, bs_grow_live_super_write_cpl, ctx);
9087 }
9088 
9089 void
9090 spdk_bs_grow_live(struct spdk_blob_store *bs,
9091 		  spdk_bs_op_complete cb_fn, void *cb_arg)
9092 {
9093 	struct spdk_bs_cpl	cpl;
9094 	struct spdk_bs_grow_ctx *ctx;
9095 
9096 	assert(spdk_get_thread() == bs->md_thread);
9097 
9098 	SPDK_DEBUGLOG(blob, "Growing blobstore on dev %p\n", bs->dev);
9099 
9100 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
9101 	cpl.u.bs_basic.cb_fn = cb_fn;
9102 	cpl.u.bs_basic.cb_arg = cb_arg;
9103 
9104 	ctx = calloc(1, sizeof(struct spdk_bs_grow_ctx));
9105 	if (!ctx) {
9106 		cb_fn(cb_arg, -ENOMEM);
9107 		return;
9108 	}
9109 	ctx->bs = bs;
9110 
9111 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
9112 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
9113 	if (!ctx->super) {
9114 		free(ctx);
9115 		cb_fn(cb_arg, -ENOMEM);
9116 		return;
9117 	}
9118 
9119 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
9120 	if (!ctx->seq) {
9121 		spdk_free(ctx->super);
9122 		free(ctx);
9123 		cb_fn(cb_arg, -ENOMEM);
9124 		return;
9125 	}
9126 
9127 	/* Read the super block */
9128 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
9129 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
9130 			     bs_grow_live_load_super_cpl, ctx);
9131 }
9132 
9133 void
9134 spdk_bs_grow(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
9135 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
9136 {
9137 	struct spdk_blob_store	*bs;
9138 	struct spdk_bs_cpl	cpl;
9139 	struct spdk_bs_load_ctx *ctx;
9140 	struct spdk_bs_opts	opts = {};
9141 	int err;
9142 
9143 	SPDK_DEBUGLOG(blob, "Loading blobstore from dev %p\n", dev);
9144 
9145 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
9146 		SPDK_DEBUGLOG(blob, "unsupported dev block length of %d\n", dev->blocklen);
9147 		dev->destroy(dev);
9148 		cb_fn(cb_arg, NULL, -EINVAL);
9149 		return;
9150 	}
9151 
9152 	spdk_bs_opts_init(&opts, sizeof(opts));
9153 	if (o) {
9154 		if (bs_opts_copy(o, &opts)) {
9155 			return;
9156 		}
9157 	}
9158 
9159 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
9160 		dev->destroy(dev);
9161 		cb_fn(cb_arg, NULL, -EINVAL);
9162 		return;
9163 	}
9164 
9165 	err = bs_alloc(dev, &opts, &bs, &ctx);
9166 	if (err) {
9167 		dev->destroy(dev);
9168 		cb_fn(cb_arg, NULL, err);
9169 		return;
9170 	}
9171 
9172 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
9173 	cpl.u.bs_handle.cb_fn = cb_fn;
9174 	cpl.u.bs_handle.cb_arg = cb_arg;
9175 	cpl.u.bs_handle.bs = bs;
9176 
9177 	ctx->seq = bs_sequence_start_bs(bs->md_channel, &cpl);
9178 	if (!ctx->seq) {
9179 		spdk_free(ctx->super);
9180 		free(ctx);
9181 		bs_free(bs);
9182 		cb_fn(cb_arg, NULL, -ENOMEM);
9183 		return;
9184 	}
9185 
9186 	/* Read the super block */
9187 	bs_sequence_read_dev(ctx->seq, ctx->super, bs_page_to_lba(bs, 0),
9188 			     bs_byte_to_lba(bs, sizeof(*ctx->super)),
9189 			     bs_grow_load_super_cpl, ctx);
9190 }
9191 
9192 int
9193 spdk_blob_get_esnap_id(struct spdk_blob *blob, const void **id, size_t *len)
9194 {
9195 	if (!blob_is_esnap_clone(blob)) {
9196 		return -EINVAL;
9197 	}
9198 
9199 	return blob_get_xattr_value(blob, BLOB_EXTERNAL_SNAPSHOT_ID, id, len, true);
9200 }
9201 
9202 struct spdk_io_channel *
9203 blob_esnap_get_io_channel(struct spdk_io_channel *ch, struct spdk_blob *blob)
9204 {
9205 	struct spdk_bs_channel		*bs_channel = spdk_io_channel_get_ctx(ch);
9206 	struct spdk_bs_dev		*bs_dev = blob->back_bs_dev;
9207 	struct blob_esnap_channel	find = {};
9208 	struct blob_esnap_channel	*esnap_channel, *existing;
9209 
9210 	find.blob_id = blob->id;
9211 	esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find);
9212 	if (spdk_likely(esnap_channel != NULL)) {
9213 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": using cached channel on thread %s\n",
9214 			      blob->id, spdk_thread_get_name(spdk_get_thread()));
9215 		return esnap_channel->channel;
9216 	}
9217 
9218 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": allocating channel on thread %s\n",
9219 		      blob->id, spdk_thread_get_name(spdk_get_thread()));
9220 
9221 	esnap_channel = calloc(1, sizeof(*esnap_channel));
9222 	if (esnap_channel == NULL) {
9223 		SPDK_NOTICELOG("blob 0x%" PRIx64 " channel allocation failed: no memory\n",
9224 			       find.blob_id);
9225 		return NULL;
9226 	}
9227 	esnap_channel->channel = bs_dev->create_channel(bs_dev);
9228 	if (esnap_channel->channel == NULL) {
9229 		SPDK_NOTICELOG("blob 0x%" PRIx64 " back channel allocation failed\n", blob->id);
9230 		free(esnap_channel);
9231 		return NULL;
9232 	}
9233 	esnap_channel->blob_id = find.blob_id;
9234 	existing = RB_INSERT(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel);
9235 	if (spdk_unlikely(existing != NULL)) {
9236 		/*
9237 		 * This should be unreachable: all modifications to this tree happen on this thread.
9238 		 */
9239 		SPDK_ERRLOG("blob 0x%" PRIx64 "lost race to allocate a channel\n", find.blob_id);
9240 		assert(false);
9241 
9242 		bs_dev->destroy_channel(bs_dev, esnap_channel->channel);
9243 		free(esnap_channel);
9244 
9245 		return existing->channel;
9246 	}
9247 
9248 	return esnap_channel->channel;
9249 }
9250 
9251 static int
9252 blob_esnap_channel_compare(struct blob_esnap_channel *c1, struct blob_esnap_channel *c2)
9253 {
9254 	return (c1->blob_id < c2->blob_id ? -1 : c1->blob_id > c2->blob_id);
9255 }
9256 
9257 struct blob_esnap_destroy_ctx {
9258 	spdk_blob_op_with_handle_complete	cb_fn;
9259 	void					*cb_arg;
9260 	struct spdk_blob			*blob;
9261 	struct spdk_bs_dev			*back_bs_dev;
9262 	bool					abort_io;
9263 };
9264 
9265 static void
9266 blob_esnap_destroy_channels_done(struct spdk_io_channel_iter *i, int status)
9267 {
9268 	struct blob_esnap_destroy_ctx	*ctx = spdk_io_channel_iter_get_ctx(i);
9269 	struct spdk_blob		*blob = ctx->blob;
9270 	struct spdk_blob_store		*bs = blob->bs;
9271 
9272 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": done destroying channels for this blob\n",
9273 		      blob->id);
9274 
9275 	if (ctx->cb_fn != NULL) {
9276 		ctx->cb_fn(ctx->cb_arg, blob, status);
9277 	}
9278 	free(ctx);
9279 
9280 	bs->esnap_channels_unloading--;
9281 	if (bs->esnap_channels_unloading == 0 && bs->esnap_unload_cb_fn != NULL) {
9282 		spdk_bs_unload(bs, bs->esnap_unload_cb_fn, bs->esnap_unload_cb_arg);
9283 	}
9284 }
9285 
9286 static void
9287 blob_esnap_destroy_one_channel(struct spdk_io_channel_iter *i)
9288 {
9289 	struct blob_esnap_destroy_ctx	*ctx = spdk_io_channel_iter_get_ctx(i);
9290 	struct spdk_blob		*blob = ctx->blob;
9291 	struct spdk_bs_dev		*bs_dev = ctx->back_bs_dev;
9292 	struct spdk_io_channel		*channel = spdk_io_channel_iter_get_channel(i);
9293 	struct spdk_bs_channel		*bs_channel = spdk_io_channel_get_ctx(channel);
9294 	struct blob_esnap_channel	*esnap_channel;
9295 	struct blob_esnap_channel	find = {};
9296 
9297 	assert(spdk_get_thread() == spdk_io_channel_get_thread(channel));
9298 
9299 	find.blob_id = blob->id;
9300 	esnap_channel = RB_FIND(blob_esnap_channel_tree, &bs_channel->esnap_channels, &find);
9301 	if (esnap_channel != NULL) {
9302 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channel on thread %s\n",
9303 			      blob->id, spdk_thread_get_name(spdk_get_thread()));
9304 		RB_REMOVE(blob_esnap_channel_tree, &bs_channel->esnap_channels, esnap_channel);
9305 
9306 		if (ctx->abort_io) {
9307 			spdk_bs_user_op_t *op, *tmp;
9308 
9309 			TAILQ_FOREACH_SAFE(op, &bs_channel->queued_io, link, tmp) {
9310 				if (op->back_channel == esnap_channel->channel) {
9311 					TAILQ_REMOVE(&bs_channel->queued_io, op, link);
9312 					bs_user_op_abort(op, -EIO);
9313 				}
9314 			}
9315 		}
9316 
9317 		bs_dev->destroy_channel(bs_dev, esnap_channel->channel);
9318 		free(esnap_channel);
9319 	}
9320 
9321 	spdk_for_each_channel_continue(i, 0);
9322 }
9323 
9324 /*
9325  * Destroy the channels for a specific blob on each thread with a blobstore channel. This should be
9326  * used when closing an esnap clone blob and after decoupling from the parent.
9327  */
9328 static void
9329 blob_esnap_destroy_bs_dev_channels(struct spdk_blob *blob, bool abort_io,
9330 				   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
9331 {
9332 	struct blob_esnap_destroy_ctx	*ctx;
9333 
9334 	if (!blob_is_esnap_clone(blob) || blob->back_bs_dev == NULL) {
9335 		if (cb_fn != NULL) {
9336 			cb_fn(cb_arg, blob, 0);
9337 		}
9338 		return;
9339 	}
9340 
9341 	ctx = calloc(1, sizeof(*ctx));
9342 	if (ctx == NULL) {
9343 		if (cb_fn != NULL) {
9344 			cb_fn(cb_arg, blob, -ENOMEM);
9345 		}
9346 		return;
9347 	}
9348 	ctx->cb_fn = cb_fn;
9349 	ctx->cb_arg = cb_arg;
9350 	ctx->blob = blob;
9351 	ctx->back_bs_dev = blob->back_bs_dev;
9352 	ctx->abort_io = abort_io;
9353 
9354 	SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64 ": destroying channels for this blob\n",
9355 		      blob->id);
9356 
9357 	blob->bs->esnap_channels_unloading++;
9358 	spdk_for_each_channel(blob->bs, blob_esnap_destroy_one_channel, ctx,
9359 			      blob_esnap_destroy_channels_done);
9360 }
9361 
9362 /*
9363  * Destroy all bs_dev channels on a specific blobstore channel. This should be used when a
9364  * bs_channel is destroyed.
9365  */
9366 static void
9367 blob_esnap_destroy_bs_channel(struct spdk_bs_channel *ch)
9368 {
9369 	struct blob_esnap_channel *esnap_channel, *esnap_channel_tmp;
9370 
9371 	assert(spdk_get_thread() == spdk_io_channel_get_thread(spdk_io_channel_from_ctx(ch)));
9372 
9373 	SPDK_DEBUGLOG(blob_esnap, "destroying channels on thread %s\n",
9374 		      spdk_thread_get_name(spdk_get_thread()));
9375 	RB_FOREACH_SAFE(esnap_channel, blob_esnap_channel_tree, &ch->esnap_channels,
9376 			esnap_channel_tmp) {
9377 		SPDK_DEBUGLOG(blob_esnap, "blob 0x%" PRIx64
9378 			      ": destroying one channel in thread %s\n",
9379 			      esnap_channel->blob_id, spdk_thread_get_name(spdk_get_thread()));
9380 		RB_REMOVE(blob_esnap_channel_tree, &ch->esnap_channels, esnap_channel);
9381 		spdk_put_io_channel(esnap_channel->channel);
9382 		free(esnap_channel);
9383 	}
9384 	SPDK_DEBUGLOG(blob_esnap, "done destroying channels on thread %s\n",
9385 		      spdk_thread_get_name(spdk_get_thread()));
9386 }
9387 
9388 struct set_bs_dev_ctx {
9389 	struct spdk_blob	*blob;
9390 	struct spdk_bs_dev	*back_bs_dev;
9391 	spdk_blob_op_complete	cb_fn;
9392 	void			*cb_arg;
9393 	int			bserrno;
9394 };
9395 
9396 static void
9397 blob_set_back_bs_dev_done(void *_ctx, int bserrno)
9398 {
9399 	struct set_bs_dev_ctx	*ctx = _ctx;
9400 
9401 	if (bserrno != 0) {
9402 		/* Even though the unfreeze failed, the update may have succeed. */
9403 		SPDK_ERRLOG("blob 0x%" PRIx64 ": unfreeze failed with error %d\n", ctx->blob->id,
9404 			    bserrno);
9405 	}
9406 	ctx->cb_fn(ctx->cb_arg, ctx->bserrno);
9407 	free(ctx);
9408 }
9409 
9410 static void
9411 blob_frozen_set_back_bs_dev(void *_ctx, struct spdk_blob *blob, int bserrno)
9412 {
9413 	struct set_bs_dev_ctx	*ctx = _ctx;
9414 
9415 	if (bserrno != 0) {
9416 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to release old back_bs_dev with error %d\n",
9417 			    blob->id, bserrno);
9418 		ctx->bserrno = bserrno;
9419 		blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx);
9420 		return;
9421 	}
9422 
9423 	if (blob->back_bs_dev != NULL) {
9424 		blob->back_bs_dev->destroy(blob->back_bs_dev);
9425 	}
9426 
9427 	SPDK_NOTICELOG("blob 0x%" PRIx64 ": hotplugged back_bs_dev\n", blob->id);
9428 	blob->back_bs_dev = ctx->back_bs_dev;
9429 	ctx->bserrno = 0;
9430 
9431 	blob_unfreeze_io(blob, blob_set_back_bs_dev_done, ctx);
9432 }
9433 
9434 static void
9435 blob_frozen_destroy_esnap_channels(void *_ctx, int bserrno)
9436 {
9437 	struct set_bs_dev_ctx	*ctx = _ctx;
9438 	struct spdk_blob	*blob = ctx->blob;
9439 
9440 	if (bserrno != 0) {
9441 		SPDK_ERRLOG("blob 0x%" PRIx64 ": failed to freeze with error %d\n", blob->id,
9442 			    bserrno);
9443 		ctx->cb_fn(ctx->cb_arg, bserrno);
9444 		free(ctx);
9445 		return;
9446 	}
9447 
9448 	/*
9449 	 * This does not prevent future reads from the esnap device because any future IO will
9450 	 * lazily create a new esnap IO channel.
9451 	 */
9452 	blob_esnap_destroy_bs_dev_channels(blob, true, blob_frozen_set_back_bs_dev, ctx);
9453 }
9454 
9455 void
9456 spdk_blob_set_esnap_bs_dev(struct spdk_blob *blob, struct spdk_bs_dev *back_bs_dev,
9457 			   spdk_blob_op_complete cb_fn, void *cb_arg)
9458 {
9459 	struct set_bs_dev_ctx	*ctx;
9460 
9461 	if (!blob_is_esnap_clone(blob)) {
9462 		SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id);
9463 		cb_fn(cb_arg, -EINVAL);
9464 		return;
9465 	}
9466 
9467 	ctx = calloc(1, sizeof(*ctx));
9468 	if (ctx == NULL) {
9469 		SPDK_ERRLOG("blob 0x%" PRIx64 ": out of memory while setting back_bs_dev\n",
9470 			    blob->id);
9471 		cb_fn(cb_arg, -ENOMEM);
9472 		return;
9473 	}
9474 	ctx->cb_fn = cb_fn;
9475 	ctx->cb_arg = cb_arg;
9476 	ctx->back_bs_dev = back_bs_dev;
9477 	ctx->blob = blob;
9478 	blob_freeze_io(blob, blob_frozen_destroy_esnap_channels, ctx);
9479 }
9480 
9481 struct spdk_bs_dev *
9482 spdk_blob_get_esnap_bs_dev(const struct spdk_blob *blob)
9483 {
9484 	if (!blob_is_esnap_clone(blob)) {
9485 		SPDK_ERRLOG("blob 0x%" PRIx64 ": not an esnap clone\n", blob->id);
9486 		return NULL;
9487 	}
9488 
9489 	return blob->back_bs_dev;
9490 }
9491 
9492 bool
9493 spdk_blob_is_degraded(const struct spdk_blob *blob)
9494 {
9495 	if (blob->bs->dev->is_degraded != NULL && blob->bs->dev->is_degraded(blob->bs->dev)) {
9496 		return true;
9497 	}
9498 	if (blob->back_bs_dev == NULL || blob->back_bs_dev->is_degraded == NULL) {
9499 		return false;
9500 	}
9501 
9502 	return blob->back_bs_dev->is_degraded(blob->back_bs_dev);
9503 }
9504 
9505 SPDK_LOG_REGISTER_COMPONENT(blob)
9506 SPDK_LOG_REGISTER_COMPONENT(blob_esnap)
9507