xref: /spdk/lib/blob/blobstore.c (revision 6383602952785b87cf05e47e78f2ac7e482f1df3)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/blob.h"
37 #include "spdk/crc32.h"
38 #include "spdk/env.h"
39 #include "spdk/queue.h"
40 #include "spdk/thread.h"
41 #include "spdk/bit_array.h"
42 #include "spdk/likely.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "spdk_internal/assert.h"
47 #include "spdk_internal/log.h"
48 
49 #include "blobstore.h"
50 
51 #define BLOB_CRC32C_INITIAL    0xffffffffUL
52 
53 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
54 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
55 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
56 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
57 		uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg);
58 
59 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
60 				uint16_t value_len, bool internal);
61 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
62 				      const void **value, size_t *value_len, bool internal);
63 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
64 
65 static void _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
66 				     spdk_blob_op_complete cb_fn, void *cb_arg);
67 
68 static void
69 _spdk_blob_verify_md_op(struct spdk_blob *blob)
70 {
71 	assert(blob != NULL);
72 	assert(spdk_get_thread() == blob->bs->md_thread);
73 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
74 }
75 
76 static struct spdk_blob_list *
77 _spdk_bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid)
78 {
79 	struct spdk_blob_list *snapshot_entry = NULL;
80 
81 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
82 		if (snapshot_entry->id == blobid) {
83 			break;
84 		}
85 	}
86 
87 	return snapshot_entry;
88 }
89 
90 static void
91 _spdk_bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page)
92 {
93 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
94 	assert(spdk_bit_array_get(bs->used_md_pages, page) == false);
95 
96 	spdk_bit_array_set(bs->used_md_pages, page);
97 }
98 
99 static void
100 _spdk_bs_release_md_page(struct spdk_blob_store *bs, uint32_t page)
101 {
102 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
103 	assert(spdk_bit_array_get(bs->used_md_pages, page) == true);
104 
105 	spdk_bit_array_clear(bs->used_md_pages, page);
106 }
107 
108 static void
109 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
110 {
111 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
112 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false);
113 	assert(bs->num_free_clusters > 0);
114 
115 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num);
116 
117 	spdk_bit_array_set(bs->used_clusters, cluster_num);
118 	bs->num_free_clusters--;
119 }
120 
121 static int
122 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
123 {
124 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
125 
126 	_spdk_blob_verify_md_op(blob);
127 
128 	if (*cluster_lba != 0) {
129 		return -EEXIST;
130 	}
131 
132 	*cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster);
133 	return 0;
134 }
135 
136 static int
137 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
138 			  uint64_t *lowest_free_cluster, uint32_t *lowest_free_md_page, bool update_map)
139 {
140 	uint32_t *extent_page = 0;
141 
142 	pthread_mutex_lock(&blob->bs->used_clusters_mutex);
143 	*lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters,
144 			       *lowest_free_cluster);
145 	if (*lowest_free_cluster == UINT32_MAX) {
146 		/* No more free clusters. Cannot satisfy the request */
147 		pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
148 		return -ENOSPC;
149 	}
150 
151 	if (blob->use_extent_table) {
152 		extent_page = _spdk_bs_cluster_to_extent_page(blob, cluster_num);
153 		if (*extent_page == 0) {
154 			/* No extent_page is allocated for the cluster */
155 			*lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages,
156 					       *lowest_free_md_page);
157 			if (*lowest_free_md_page == UINT32_MAX) {
158 				/* No more free md pages. Cannot satisfy the request */
159 				pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
160 				return -ENOSPC;
161 			}
162 			_spdk_bs_claim_md_page(blob->bs, *lowest_free_md_page);
163 		}
164 	}
165 
166 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id);
167 	_spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster);
168 
169 	pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
170 
171 	if (update_map) {
172 		_spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster);
173 		if (blob->use_extent_table && *extent_page == 0) {
174 			*extent_page = *lowest_free_md_page;
175 		}
176 	}
177 
178 	return 0;
179 }
180 
181 static void
182 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
183 {
184 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
185 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true);
186 	assert(bs->num_free_clusters < bs->total_clusters);
187 
188 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num);
189 
190 	pthread_mutex_lock(&bs->used_clusters_mutex);
191 	spdk_bit_array_clear(bs->used_clusters, cluster_num);
192 	bs->num_free_clusters++;
193 	pthread_mutex_unlock(&bs->used_clusters_mutex);
194 }
195 
196 static void
197 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
198 {
199 	xattrs->count = 0;
200 	xattrs->names = NULL;
201 	xattrs->ctx = NULL;
202 	xattrs->get_value = NULL;
203 }
204 
205 void
206 spdk_blob_opts_init(struct spdk_blob_opts *opts)
207 {
208 	opts->num_clusters = 0;
209 	opts->thin_provision = false;
210 	opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
211 	_spdk_blob_xattrs_init(&opts->xattrs);
212 	opts->use_extent_table = true;
213 }
214 
215 void
216 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts)
217 {
218 	opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
219 }
220 
221 static struct spdk_blob *
222 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
223 {
224 	struct spdk_blob *blob;
225 
226 	blob = calloc(1, sizeof(*blob));
227 	if (!blob) {
228 		return NULL;
229 	}
230 
231 	blob->id = id;
232 	blob->bs = bs;
233 
234 	blob->parent_id = SPDK_BLOBID_INVALID;
235 
236 	blob->state = SPDK_BLOB_STATE_DIRTY;
237 	blob->extent_rle_found = false;
238 	blob->extent_table_found = false;
239 	blob->active.num_pages = 1;
240 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
241 	if (!blob->active.pages) {
242 		free(blob);
243 		return NULL;
244 	}
245 
246 	blob->active.pages[0] = _spdk_bs_blobid_to_page(id);
247 
248 	TAILQ_INIT(&blob->xattrs);
249 	TAILQ_INIT(&blob->xattrs_internal);
250 	TAILQ_INIT(&blob->pending_persists);
251 
252 	return blob;
253 }
254 
255 static void
256 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs)
257 {
258 	struct spdk_xattr	*xattr, *xattr_tmp;
259 
260 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
261 		TAILQ_REMOVE(xattrs, xattr, link);
262 		free(xattr->name);
263 		free(xattr->value);
264 		free(xattr);
265 	}
266 }
267 
268 static void
269 _spdk_blob_free(struct spdk_blob *blob)
270 {
271 	assert(blob != NULL);
272 	assert(TAILQ_EMPTY(&blob->pending_persists));
273 
274 	free(blob->active.extent_pages);
275 	free(blob->clean.extent_pages);
276 	free(blob->active.clusters);
277 	free(blob->clean.clusters);
278 	free(blob->active.pages);
279 	free(blob->clean.pages);
280 
281 	_spdk_xattrs_free(&blob->xattrs);
282 	_spdk_xattrs_free(&blob->xattrs_internal);
283 
284 	if (blob->back_bs_dev) {
285 		blob->back_bs_dev->destroy(blob->back_bs_dev);
286 	}
287 
288 	free(blob);
289 }
290 
291 struct freeze_io_ctx {
292 	struct spdk_bs_cpl cpl;
293 	struct spdk_blob *blob;
294 };
295 
296 static void
297 _spdk_blob_io_sync(struct spdk_io_channel_iter *i)
298 {
299 	spdk_for_each_channel_continue(i, 0);
300 }
301 
302 static void
303 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i)
304 {
305 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
306 	struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch);
307 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
308 	struct spdk_bs_request_set	*set;
309 	struct spdk_bs_user_op_args	*args;
310 	spdk_bs_user_op_t *op, *tmp;
311 
312 	TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) {
313 		set = (struct spdk_bs_request_set *)op;
314 		args = &set->u.user_op;
315 
316 		if (args->blob == ctx->blob) {
317 			TAILQ_REMOVE(&ch->queued_io, op, link);
318 			spdk_bs_user_op_execute(op);
319 		}
320 	}
321 
322 	spdk_for_each_channel_continue(i, 0);
323 }
324 
325 static void
326 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status)
327 {
328 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
329 
330 	ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0);
331 
332 	free(ctx);
333 }
334 
335 static void
336 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
337 {
338 	struct freeze_io_ctx *ctx;
339 
340 	ctx = calloc(1, sizeof(*ctx));
341 	if (!ctx) {
342 		cb_fn(cb_arg, -ENOMEM);
343 		return;
344 	}
345 
346 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
347 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
348 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
349 	ctx->blob = blob;
350 
351 	/* Freeze I/O on blob */
352 	blob->frozen_refcnt++;
353 
354 	if (blob->frozen_refcnt == 1) {
355 		spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl);
356 	} else {
357 		cb_fn(cb_arg, 0);
358 		free(ctx);
359 	}
360 }
361 
362 static void
363 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
364 {
365 	struct freeze_io_ctx *ctx;
366 
367 	ctx = calloc(1, sizeof(*ctx));
368 	if (!ctx) {
369 		cb_fn(cb_arg, -ENOMEM);
370 		return;
371 	}
372 
373 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
374 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
375 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
376 	ctx->blob = blob;
377 
378 	assert(blob->frozen_refcnt > 0);
379 
380 	blob->frozen_refcnt--;
381 
382 	if (blob->frozen_refcnt == 0) {
383 		spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl);
384 	} else {
385 		cb_fn(cb_arg, 0);
386 		free(ctx);
387 	}
388 }
389 
390 static int
391 _spdk_blob_mark_clean(struct spdk_blob *blob)
392 {
393 	uint32_t *extent_pages = NULL;
394 	uint64_t *clusters = NULL;
395 	uint32_t *pages = NULL;
396 
397 	assert(blob != NULL);
398 
399 	if (blob->active.num_extent_pages) {
400 		assert(blob->active.extent_pages);
401 		extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages));
402 		if (!extent_pages) {
403 			return -ENOMEM;
404 		}
405 		memcpy(extent_pages, blob->active.extent_pages,
406 		       blob->active.num_extent_pages * sizeof(*extent_pages));
407 	}
408 
409 	if (blob->active.num_clusters) {
410 		assert(blob->active.clusters);
411 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
412 		if (!clusters) {
413 			free(extent_pages);
414 			return -ENOMEM;
415 		}
416 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
417 	}
418 
419 	if (blob->active.num_pages) {
420 		assert(blob->active.pages);
421 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
422 		if (!pages) {
423 			free(extent_pages);
424 			free(clusters);
425 			return -ENOMEM;
426 		}
427 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
428 	}
429 
430 	free(blob->clean.extent_pages);
431 	free(blob->clean.clusters);
432 	free(blob->clean.pages);
433 
434 	blob->clean.num_extent_pages = blob->active.num_extent_pages;
435 	blob->clean.extent_pages = blob->active.extent_pages;
436 	blob->clean.num_clusters = blob->active.num_clusters;
437 	blob->clean.clusters = blob->active.clusters;
438 	blob->clean.num_pages = blob->active.num_pages;
439 	blob->clean.pages = blob->active.pages;
440 
441 	blob->active.extent_pages = extent_pages;
442 	blob->active.clusters = clusters;
443 	blob->active.pages = pages;
444 
445 	/* If the metadata was dirtied again while the metadata was being written to disk,
446 	 *  we do not want to revert the DIRTY state back to CLEAN here.
447 	 */
448 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
449 		blob->state = SPDK_BLOB_STATE_CLEAN;
450 	}
451 
452 	return 0;
453 }
454 
455 static int
456 _spdk_blob_deserialize_xattr(struct spdk_blob *blob,
457 			     struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
458 {
459 	struct spdk_xattr                       *xattr;
460 
461 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
462 	    sizeof(desc_xattr->value_length) +
463 	    desc_xattr->name_length + desc_xattr->value_length) {
464 		return -EINVAL;
465 	}
466 
467 	xattr = calloc(1, sizeof(*xattr));
468 	if (xattr == NULL) {
469 		return -ENOMEM;
470 	}
471 
472 	xattr->name = malloc(desc_xattr->name_length + 1);
473 	if (xattr->name == NULL) {
474 		free(xattr);
475 		return -ENOMEM;
476 	}
477 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
478 	xattr->name[desc_xattr->name_length] = '\0';
479 
480 	xattr->value = malloc(desc_xattr->value_length);
481 	if (xattr->value == NULL) {
482 		free(xattr->name);
483 		free(xattr);
484 		return -ENOMEM;
485 	}
486 	xattr->value_len = desc_xattr->value_length;
487 	memcpy(xattr->value,
488 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
489 	       desc_xattr->value_length);
490 
491 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
492 
493 	return 0;
494 }
495 
496 
497 static int
498 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
499 {
500 	struct spdk_blob_md_descriptor *desc;
501 	size_t	cur_desc = 0;
502 	void *tmp;
503 
504 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
505 	while (cur_desc < sizeof(page->descriptors)) {
506 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
507 			if (desc->length == 0) {
508 				/* If padding and length are 0, this terminates the page */
509 				break;
510 			}
511 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
512 			struct spdk_blob_md_descriptor_flags	*desc_flags;
513 
514 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
515 
516 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
517 				return -EINVAL;
518 			}
519 
520 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
521 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
522 				return -EINVAL;
523 			}
524 
525 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
526 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
527 				blob->data_ro = true;
528 				blob->md_ro = true;
529 			}
530 
531 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
532 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
533 				blob->md_ro = true;
534 			}
535 
536 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
537 				blob->data_ro = true;
538 				blob->md_ro = true;
539 			}
540 
541 			blob->invalid_flags = desc_flags->invalid_flags;
542 			blob->data_ro_flags = desc_flags->data_ro_flags;
543 			blob->md_ro_flags = desc_flags->md_ro_flags;
544 
545 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
546 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
547 			unsigned int				i, j;
548 			unsigned int				cluster_count = blob->active.num_clusters;
549 
550 			if (blob->extent_table_found) {
551 				/* Extent Table already present in the md,
552 				 * both descriptors should never be at the same time. */
553 				return -EINVAL;
554 			}
555 			blob->extent_rle_found = true;
556 
557 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
558 
559 			if (desc_extent_rle->length == 0 ||
560 			    (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) {
561 				return -EINVAL;
562 			}
563 
564 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
565 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
566 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
567 						if (!spdk_bit_array_get(blob->bs->used_clusters,
568 									desc_extent_rle->extents[i].cluster_idx + j)) {
569 							return -EINVAL;
570 						}
571 					}
572 					cluster_count++;
573 				}
574 			}
575 
576 			if (cluster_count == 0) {
577 				return -EINVAL;
578 			}
579 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
580 			if (tmp == NULL) {
581 				return -ENOMEM;
582 			}
583 			blob->active.clusters = tmp;
584 			blob->active.cluster_array_size = cluster_count;
585 
586 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
587 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
588 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
589 						blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
590 								desc_extent_rle->extents[i].cluster_idx + j);
591 					} else if (spdk_blob_is_thin_provisioned(blob)) {
592 						blob->active.clusters[blob->active.num_clusters++] = 0;
593 					} else {
594 						return -EINVAL;
595 					}
596 				}
597 			}
598 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
599 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
600 			uint32_t num_extent_pages = blob->active.num_extent_pages;
601 			uint32_t i, j;
602 			size_t extent_pages_length;
603 
604 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
605 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
606 
607 			if (blob->extent_rle_found) {
608 				/* This means that Extent RLE is present in MD,
609 				 * both should never be at the same time. */
610 				return -EINVAL;
611 			} else if (blob->extent_table_found &&
612 				   desc_extent_table->num_clusters != blob->remaining_clusters_in_et) {
613 				/* Number of clusters in this ET does not match number
614 				 * from previously read EXTENT_TABLE. */
615 				return -EINVAL;
616 			}
617 
618 			blob->extent_table_found = true;
619 
620 			if (desc_extent_table->length == 0 ||
621 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
622 				return -EINVAL;
623 			}
624 
625 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
626 				num_extent_pages += desc_extent_table->extent_page[i].num_pages;
627 			}
628 
629 			tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t));
630 			if (tmp == NULL) {
631 				return -ENOMEM;
632 			}
633 			blob->active.extent_pages = tmp;
634 			blob->active.extent_pages_array_size = num_extent_pages;
635 
636 			blob->remaining_clusters_in_et = desc_extent_table->num_clusters;
637 
638 			/* Extent table entries contain md page numbers for extent pages.
639 			 * Zeroes represent unallocated extent pages, those are run-length-encoded.
640 			 */
641 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
642 				if (desc_extent_table->extent_page[i].page_idx != 0) {
643 					assert(desc_extent_table->extent_page[i].num_pages == 1);
644 					blob->active.extent_pages[blob->active.num_extent_pages++] =
645 						desc_extent_table->extent_page[i].page_idx;
646 				} else if (spdk_blob_is_thin_provisioned(blob)) {
647 					for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) {
648 						blob->active.extent_pages[blob->active.num_extent_pages++] = 0;
649 					}
650 				} else {
651 					return -EINVAL;
652 				}
653 			}
654 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
655 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
656 			unsigned int					i;
657 			unsigned int					cluster_count = 0;
658 			size_t						cluster_idx_length;
659 
660 			if (blob->extent_rle_found) {
661 				/* This means that Extent RLE is present in MD,
662 				 * both should never be at the same time. */
663 				return -EINVAL;
664 			}
665 
666 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
667 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
668 
669 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
670 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
671 				return -EINVAL;
672 			}
673 
674 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
675 				if (desc_extent->cluster_idx[i] != 0) {
676 					if (!spdk_bit_array_get(blob->bs->used_clusters, desc_extent->cluster_idx[i])) {
677 						return -EINVAL;
678 					}
679 				}
680 				cluster_count++;
681 			}
682 
683 			if (cluster_count == 0) {
684 				return -EINVAL;
685 			}
686 
687 			/* When reading extent pages sequentially starting cluster idx should match
688 			 * current size of a blob.
689 			 * If changed to batch reading, this check shall be removed. */
690 			if (desc_extent->start_cluster_idx != blob->active.num_clusters) {
691 				return -EINVAL;
692 			}
693 
694 			tmp = realloc(blob->active.clusters,
695 				      (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters));
696 			if (tmp == NULL) {
697 				return -ENOMEM;
698 			}
699 			blob->active.clusters = tmp;
700 			blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters);
701 
702 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
703 				if (desc_extent->cluster_idx[i] != 0) {
704 					blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
705 							desc_extent->cluster_idx[i]);
706 				} else if (spdk_blob_is_thin_provisioned(blob)) {
707 					blob->active.clusters[blob->active.num_clusters++] = 0;
708 				} else {
709 					return -EINVAL;
710 				}
711 			}
712 			assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters);
713 			assert(blob->remaining_clusters_in_et >= cluster_count);
714 			blob->remaining_clusters_in_et -= cluster_count;
715 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
716 			int rc;
717 
718 			rc = _spdk_blob_deserialize_xattr(blob,
719 							  (struct spdk_blob_md_descriptor_xattr *) desc, false);
720 			if (rc != 0) {
721 				return rc;
722 			}
723 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
724 			int rc;
725 
726 			rc = _spdk_blob_deserialize_xattr(blob,
727 							  (struct spdk_blob_md_descriptor_xattr *) desc, true);
728 			if (rc != 0) {
729 				return rc;
730 			}
731 		} else {
732 			/* Unrecognized descriptor type.  Do not fail - just continue to the
733 			 *  next descriptor.  If this descriptor is associated with some feature
734 			 *  defined in a newer version of blobstore, that version of blobstore
735 			 *  should create and set an associated feature flag to specify if this
736 			 *  blob can be loaded or not.
737 			 */
738 		}
739 
740 		/* Advance to the next descriptor */
741 		cur_desc += sizeof(*desc) + desc->length;
742 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
743 			break;
744 		}
745 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
746 	}
747 
748 	return 0;
749 }
750 
751 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page);
752 
753 static int
754 _spdk_blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob)
755 {
756 	assert(blob != NULL);
757 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
758 
759 	if (_spdk_bs_load_cur_extent_page_valid(extent_page) == false) {
760 		return -ENOENT;
761 	}
762 
763 	return _spdk_blob_parse_page(extent_page, blob);
764 }
765 
766 static int
767 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
768 		 struct spdk_blob *blob)
769 {
770 	const struct spdk_blob_md_page *page;
771 	uint32_t i;
772 	int rc;
773 
774 	assert(page_count > 0);
775 	assert(pages[0].sequence_num == 0);
776 	assert(blob != NULL);
777 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
778 	assert(blob->active.clusters == NULL);
779 
780 	/* The blobid provided doesn't match what's in the MD, this can
781 	 * happen for example if a bogus blobid is passed in through open.
782 	 */
783 	if (blob->id != pages[0].id) {
784 		SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n",
785 			    blob->id, pages[0].id);
786 		return -ENOENT;
787 	}
788 
789 	for (i = 0; i < page_count; i++) {
790 		page = &pages[i];
791 
792 		assert(page->id == blob->id);
793 		assert(page->sequence_num == i);
794 
795 		rc = _spdk_blob_parse_page(page, blob);
796 		if (rc != 0) {
797 			return rc;
798 		}
799 	}
800 
801 	return 0;
802 }
803 
804 static int
805 _spdk_blob_serialize_add_page(const struct spdk_blob *blob,
806 			      struct spdk_blob_md_page **pages,
807 			      uint32_t *page_count,
808 			      struct spdk_blob_md_page **last_page)
809 {
810 	struct spdk_blob_md_page *page;
811 
812 	assert(pages != NULL);
813 	assert(page_count != NULL);
814 
815 	if (*page_count == 0) {
816 		assert(*pages == NULL);
817 		*page_count = 1;
818 		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
819 				     NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
820 	} else {
821 		assert(*pages != NULL);
822 		(*page_count)++;
823 		*pages = spdk_realloc(*pages,
824 				      SPDK_BS_PAGE_SIZE * (*page_count),
825 				      SPDK_BS_PAGE_SIZE);
826 	}
827 
828 	if (*pages == NULL) {
829 		*page_count = 0;
830 		*last_page = NULL;
831 		return -ENOMEM;
832 	}
833 
834 	page = &(*pages)[*page_count - 1];
835 	memset(page, 0, sizeof(*page));
836 	page->id = blob->id;
837 	page->sequence_num = *page_count - 1;
838 	page->next = SPDK_INVALID_MD_PAGE;
839 	*last_page = page;
840 
841 	return 0;
842 }
843 
844 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
845  * Update required_sz on both success and failure.
846  *
847  */
848 static int
849 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr,
850 			   uint8_t *buf, size_t buf_sz,
851 			   size_t *required_sz, bool internal)
852 {
853 	struct spdk_blob_md_descriptor_xattr	*desc;
854 
855 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
856 		       strlen(xattr->name) +
857 		       xattr->value_len;
858 
859 	if (buf_sz < *required_sz) {
860 		return -1;
861 	}
862 
863 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
864 
865 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
866 	desc->length = sizeof(desc->name_length) +
867 		       sizeof(desc->value_length) +
868 		       strlen(xattr->name) +
869 		       xattr->value_len;
870 	desc->name_length = strlen(xattr->name);
871 	desc->value_length = xattr->value_len;
872 
873 	memcpy(desc->name, xattr->name, desc->name_length);
874 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
875 	       xattr->value,
876 	       desc->value_length);
877 
878 	return 0;
879 }
880 
881 static void
882 _spdk_blob_serialize_extent_table_entry(const struct spdk_blob *blob,
883 					uint64_t start_ep, uint64_t *next_ep,
884 					uint8_t **buf, size_t *remaining_sz)
885 {
886 	struct spdk_blob_md_descriptor_extent_table *desc;
887 	size_t cur_sz;
888 	uint64_t i, et_idx;
889 	uint32_t extent_page, ep_len;
890 
891 	/* The buffer must have room for at least num_clusters entry */
892 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters);
893 	if (*remaining_sz < cur_sz) {
894 		*next_ep = start_ep;
895 		return;
896 	}
897 
898 	desc = (struct spdk_blob_md_descriptor_extent_table *)*buf;
899 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE;
900 
901 	desc->num_clusters = blob->active.num_clusters;
902 
903 	ep_len = 1;
904 	et_idx = 0;
905 	for (i = start_ep; i < blob->active.num_extent_pages; i++) {
906 		if (*remaining_sz < cur_sz  + sizeof(desc->extent_page[0])) {
907 			/* If we ran out of buffer space, return */
908 			break;
909 		}
910 
911 		extent_page = blob->active.extent_pages[i];
912 		/* Verify that next extent_page is unallocated */
913 		if (extent_page == 0 &&
914 		    (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) {
915 			ep_len++;
916 			continue;
917 		}
918 		desc->extent_page[et_idx].page_idx = extent_page;
919 		desc->extent_page[et_idx].num_pages = ep_len;
920 		et_idx++;
921 
922 		ep_len = 1;
923 		cur_sz += sizeof(desc->extent_page[et_idx]);
924 	}
925 	*next_ep = i;
926 
927 	desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx;
928 	*remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length;
929 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc->length;
930 }
931 
932 static int
933 _spdk_blob_serialize_extent_table(const struct spdk_blob *blob,
934 				  struct spdk_blob_md_page **pages,
935 				  struct spdk_blob_md_page *cur_page,
936 				  uint32_t *page_count, uint8_t **buf,
937 				  size_t *remaining_sz)
938 {
939 	uint64_t				last_extent_page;
940 	int					rc;
941 
942 	last_extent_page = 0;
943 	/* At least single extent table entry has to be always persisted.
944 	 * Such case occurs with num_extent_pages == 0. */
945 	while (last_extent_page <= blob->active.num_extent_pages) {
946 		_spdk_blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf,
947 							remaining_sz);
948 
949 		if (last_extent_page == blob->active.num_extent_pages) {
950 			break;
951 		}
952 
953 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
954 		if (rc < 0) {
955 			return rc;
956 		}
957 
958 		*buf = (uint8_t *)cur_page->descriptors;
959 		*remaining_sz = sizeof(cur_page->descriptors);
960 	}
961 
962 	return 0;
963 }
964 
965 static void
966 _spdk_blob_serialize_extent_rle(const struct spdk_blob *blob,
967 				uint64_t start_cluster, uint64_t *next_cluster,
968 				uint8_t **buf, size_t *buf_sz)
969 {
970 	struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle;
971 	size_t cur_sz;
972 	uint64_t i, extent_idx;
973 	uint64_t lba, lba_per_cluster, lba_count;
974 
975 	/* The buffer must have room for at least one extent */
976 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]);
977 	if (*buf_sz < cur_sz) {
978 		*next_cluster = start_cluster;
979 		return;
980 	}
981 
982 	desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf;
983 	desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE;
984 
985 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
986 
987 	lba = blob->active.clusters[start_cluster];
988 	lba_count = lba_per_cluster;
989 	extent_idx = 0;
990 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
991 		if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) {
992 			/* Run-length encode sequential non-zero LBA */
993 			lba_count += lba_per_cluster;
994 			continue;
995 		} else if (lba == 0 && blob->active.clusters[i] == 0) {
996 			/* Run-length encode unallocated clusters */
997 			lba_count += lba_per_cluster;
998 			continue;
999 		}
1000 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1001 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1002 		extent_idx++;
1003 
1004 		cur_sz += sizeof(desc_extent_rle->extents[extent_idx]);
1005 
1006 		if (*buf_sz < cur_sz) {
1007 			/* If we ran out of buffer space, return */
1008 			*next_cluster = i;
1009 			break;
1010 		}
1011 
1012 		lba = blob->active.clusters[i];
1013 		lba_count = lba_per_cluster;
1014 	}
1015 
1016 	if (*buf_sz >= cur_sz) {
1017 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1018 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1019 		extent_idx++;
1020 
1021 		*next_cluster = blob->active.num_clusters;
1022 	}
1023 
1024 	desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx;
1025 	*buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1026 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1027 }
1028 
1029 static int
1030 _spdk_blob_serialize_extents_rle(const struct spdk_blob *blob,
1031 				 struct spdk_blob_md_page **pages,
1032 				 struct spdk_blob_md_page *cur_page,
1033 				 uint32_t *page_count, uint8_t **buf,
1034 				 size_t *remaining_sz)
1035 {
1036 	uint64_t				last_cluster;
1037 	int					rc;
1038 
1039 	last_cluster = 0;
1040 	while (last_cluster < blob->active.num_clusters) {
1041 		_spdk_blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz);
1042 
1043 		if (last_cluster == blob->active.num_clusters) {
1044 			break;
1045 		}
1046 
1047 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
1048 		if (rc < 0) {
1049 			return rc;
1050 		}
1051 
1052 		*buf = (uint8_t *)cur_page->descriptors;
1053 		*remaining_sz = sizeof(cur_page->descriptors);
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 static void
1060 _spdk_blob_serialize_extent_page(const struct spdk_blob *blob,
1061 				 uint64_t cluster, struct spdk_blob_md_page *page)
1062 {
1063 	struct spdk_blob_md_descriptor_extent_page *desc_extent;
1064 	uint64_t i, extent_idx;
1065 	uint64_t lba, lba_per_cluster;
1066 	uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
1067 
1068 	desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors;
1069 	desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE;
1070 
1071 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
1072 
1073 	desc_extent->start_cluster_idx = start_cluster_idx;
1074 	extent_idx = 0;
1075 	for (i = start_cluster_idx; i < blob->active.num_clusters; i++) {
1076 		lba = blob->active.clusters[i];
1077 		desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster;
1078 		if (extent_idx >= SPDK_EXTENTS_PER_EP) {
1079 			break;
1080 		}
1081 	}
1082 	desc_extent->length = sizeof(desc_extent->start_cluster_idx) +
1083 			      sizeof(desc_extent->cluster_idx[0]) * extent_idx;
1084 }
1085 
1086 static void
1087 _spdk_blob_serialize_flags(const struct spdk_blob *blob,
1088 			   uint8_t *buf, size_t *buf_sz)
1089 {
1090 	struct spdk_blob_md_descriptor_flags *desc;
1091 
1092 	/*
1093 	 * Flags get serialized first, so we should always have room for the flags
1094 	 *  descriptor.
1095 	 */
1096 	assert(*buf_sz >= sizeof(*desc));
1097 
1098 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
1099 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
1100 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
1101 	desc->invalid_flags = blob->invalid_flags;
1102 	desc->data_ro_flags = blob->data_ro_flags;
1103 	desc->md_ro_flags = blob->md_ro_flags;
1104 
1105 	*buf_sz -= sizeof(*desc);
1106 }
1107 
1108 static int
1109 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
1110 			    const struct spdk_xattr_tailq *xattrs, bool internal,
1111 			    struct spdk_blob_md_page **pages,
1112 			    struct spdk_blob_md_page *cur_page,
1113 			    uint32_t *page_count, uint8_t **buf,
1114 			    size_t *remaining_sz)
1115 {
1116 	const struct spdk_xattr	*xattr;
1117 	int	rc;
1118 
1119 	TAILQ_FOREACH(xattr, xattrs, link) {
1120 		size_t required_sz = 0;
1121 
1122 		rc = _spdk_blob_serialize_xattr(xattr,
1123 						*buf, *remaining_sz,
1124 						&required_sz, internal);
1125 		if (rc < 0) {
1126 			/* Need to add a new page to the chain */
1127 			rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
1128 							   &cur_page);
1129 			if (rc < 0) {
1130 				spdk_free(*pages);
1131 				*pages = NULL;
1132 				*page_count = 0;
1133 				return rc;
1134 			}
1135 
1136 			*buf = (uint8_t *)cur_page->descriptors;
1137 			*remaining_sz = sizeof(cur_page->descriptors);
1138 
1139 			/* Try again */
1140 			required_sz = 0;
1141 			rc = _spdk_blob_serialize_xattr(xattr,
1142 							*buf, *remaining_sz,
1143 							&required_sz, internal);
1144 
1145 			if (rc < 0) {
1146 				spdk_free(*pages);
1147 				*pages = NULL;
1148 				*page_count = 0;
1149 				return rc;
1150 			}
1151 		}
1152 
1153 		*remaining_sz -= required_sz;
1154 		*buf += required_sz;
1155 	}
1156 
1157 	return 0;
1158 }
1159 
1160 static int
1161 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
1162 		     uint32_t *page_count)
1163 {
1164 	struct spdk_blob_md_page		*cur_page;
1165 	int					rc;
1166 	uint8_t					*buf;
1167 	size_t					remaining_sz;
1168 
1169 	assert(pages != NULL);
1170 	assert(page_count != NULL);
1171 	assert(blob != NULL);
1172 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
1173 
1174 	*pages = NULL;
1175 	*page_count = 0;
1176 
1177 	/* A blob always has at least 1 page, even if it has no descriptors */
1178 	rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
1179 	if (rc < 0) {
1180 		return rc;
1181 	}
1182 
1183 	buf = (uint8_t *)cur_page->descriptors;
1184 	remaining_sz = sizeof(cur_page->descriptors);
1185 
1186 	/* Serialize flags */
1187 	_spdk_blob_serialize_flags(blob, buf, &remaining_sz);
1188 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
1189 
1190 	/* Serialize xattrs */
1191 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false,
1192 					 pages, cur_page, page_count, &buf, &remaining_sz);
1193 	if (rc < 0) {
1194 		return rc;
1195 	}
1196 
1197 	/* Serialize internal xattrs */
1198 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
1199 					 pages, cur_page, page_count, &buf, &remaining_sz);
1200 	if (rc < 0) {
1201 		return rc;
1202 	}
1203 
1204 	if (blob->use_extent_table) {
1205 		/* Serialize extent table */
1206 		rc = _spdk_blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1207 	} else {
1208 		/* Serialize extents */
1209 		rc = _spdk_blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1210 	}
1211 
1212 	return rc;
1213 }
1214 
1215 struct spdk_blob_load_ctx {
1216 	struct spdk_blob		*blob;
1217 
1218 	struct spdk_blob_md_page	*pages;
1219 	uint32_t			num_pages;
1220 	uint32_t			next_extent_page;
1221 	spdk_bs_sequence_t	        *seq;
1222 
1223 	spdk_bs_sequence_cpl		cb_fn;
1224 	void				*cb_arg;
1225 };
1226 
1227 static uint32_t
1228 _spdk_blob_md_page_calc_crc(void *page)
1229 {
1230 	uint32_t		crc;
1231 
1232 	crc = BLOB_CRC32C_INITIAL;
1233 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
1234 	crc ^= BLOB_CRC32C_INITIAL;
1235 
1236 	return crc;
1237 
1238 }
1239 
1240 static void
1241 _spdk_blob_load_final(void *cb_arg, int bserrno)
1242 {
1243 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1244 	struct spdk_blob		*blob = ctx->blob;
1245 
1246 	if (bserrno == 0) {
1247 		_spdk_blob_mark_clean(blob);
1248 	}
1249 
1250 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
1251 
1252 	/* Free the memory */
1253 	spdk_free(ctx->pages);
1254 	free(ctx);
1255 }
1256 
1257 static void
1258 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
1259 {
1260 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1261 	struct spdk_blob		*blob = ctx->blob;
1262 
1263 	if (bserrno == 0) {
1264 		blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot);
1265 		if (blob->back_bs_dev == NULL) {
1266 			bserrno = -ENOMEM;
1267 		}
1268 	}
1269 	if (bserrno != 0) {
1270 		SPDK_ERRLOG("Snapshot fail\n");
1271 	}
1272 
1273 	_spdk_blob_load_final(ctx, bserrno);
1274 }
1275 
1276 static void _spdk_blob_update_clear_method(struct spdk_blob *blob);
1277 
1278 static void
1279 _spdk_blob_load_backing_dev(void *cb_arg)
1280 {
1281 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1282 	struct spdk_blob		*blob = ctx->blob;
1283 	const void			*value;
1284 	size_t				len;
1285 	int				rc;
1286 
1287 	if (spdk_blob_is_thin_provisioned(blob)) {
1288 		rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
1289 		if (rc == 0) {
1290 			if (len != sizeof(spdk_blob_id)) {
1291 				_spdk_blob_load_final(ctx, -EINVAL);
1292 				return;
1293 			}
1294 			/* open snapshot blob and continue in the callback function */
1295 			blob->parent_id = *(spdk_blob_id *)value;
1296 			spdk_bs_open_blob(blob->bs, blob->parent_id,
1297 					  _spdk_blob_load_snapshot_cpl, ctx);
1298 			return;
1299 		} else {
1300 			/* add zeroes_dev for thin provisioned blob */
1301 			blob->back_bs_dev = spdk_bs_create_zeroes_dev();
1302 		}
1303 	} else {
1304 		/* standard blob */
1305 		blob->back_bs_dev = NULL;
1306 	}
1307 	_spdk_blob_load_final(ctx, 0);
1308 }
1309 
1310 static void
1311 _spdk_blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1312 {
1313 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1314 	struct spdk_blob		*blob = ctx->blob;
1315 	struct spdk_blob_md_page	*page;
1316 	uint64_t			i;
1317 	uint32_t			crc;
1318 	uint64_t			lba;
1319 	void				*tmp;
1320 	uint64_t			sz;
1321 
1322 	if (bserrno) {
1323 		SPDK_ERRLOG("Extent page read failed: %d\n", bserrno);
1324 		_spdk_blob_load_final(ctx, bserrno);
1325 		return;
1326 	}
1327 
1328 	if (ctx->pages == NULL) {
1329 		/* First iteration of this function, allocate buffer for single EXTENT_PAGE */
1330 		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, NULL, SPDK_ENV_SOCKET_ID_ANY,
1331 					  SPDK_MALLOC_DMA);
1332 		if (!ctx->pages) {
1333 			_spdk_blob_load_final(ctx, -ENOMEM);
1334 			return;
1335 		}
1336 		ctx->num_pages = 1;
1337 		ctx->next_extent_page = 0;
1338 	} else {
1339 		page = &ctx->pages[0];
1340 		crc = _spdk_blob_md_page_calc_crc(page);
1341 		if (crc != page->crc) {
1342 			_spdk_blob_load_final(ctx, -EINVAL);
1343 			return;
1344 		}
1345 
1346 		if (page->next != SPDK_INVALID_MD_PAGE) {
1347 			_spdk_blob_load_final(ctx, -EINVAL);
1348 			return;
1349 		}
1350 
1351 		bserrno = _spdk_blob_parse_extent_page(page, blob);
1352 		if (bserrno) {
1353 			_spdk_blob_load_final(ctx, bserrno);
1354 			return;
1355 		}
1356 	}
1357 
1358 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
1359 		if (blob->active.extent_pages[i] != 0) {
1360 			/* Extent page was allocated, read and parse it. */
1361 			lba = _spdk_bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]);
1362 			ctx->next_extent_page = i + 1;
1363 
1364 			spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1365 						  _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1366 						  _spdk_blob_load_cpl_extents_cpl, ctx);
1367 			return;
1368 		} else {
1369 			/* Thin provisioned blobs can point to unallocated extent pages.
1370 			 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */
1371 
1372 			sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP);
1373 			blob->active.num_clusters += sz;
1374 			blob->remaining_clusters_in_et -= sz;
1375 
1376 			assert(spdk_blob_is_thin_provisioned(blob));
1377 			assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0);
1378 
1379 			tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
1380 			if (tmp == NULL) {
1381 				_spdk_blob_load_final(ctx, -ENOMEM);
1382 				return;
1383 			}
1384 			memset(tmp + blob->active.cluster_array_size, 0,
1385 			       sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size));
1386 			blob->active.clusters = tmp;
1387 			blob->active.cluster_array_size = blob->active.num_clusters;
1388 		}
1389 	}
1390 
1391 	_spdk_blob_load_backing_dev(ctx);
1392 }
1393 
1394 static void
1395 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1396 {
1397 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1398 	struct spdk_blob		*blob = ctx->blob;
1399 	struct spdk_blob_md_page	*page;
1400 	int				rc;
1401 	uint32_t			crc;
1402 
1403 	if (bserrno) {
1404 		SPDK_ERRLOG("Metadata page read failed: %d\n", bserrno);
1405 		_spdk_blob_load_final(ctx, bserrno);
1406 		return;
1407 	}
1408 
1409 	page = &ctx->pages[ctx->num_pages - 1];
1410 	crc = _spdk_blob_md_page_calc_crc(page);
1411 	if (crc != page->crc) {
1412 		SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages);
1413 		_spdk_blob_load_final(ctx, -EINVAL);
1414 		return;
1415 	}
1416 
1417 	if (page->next != SPDK_INVALID_MD_PAGE) {
1418 		uint32_t next_page = page->next;
1419 		uint64_t next_lba = _spdk_bs_md_page_to_lba(blob->bs, next_page);
1420 
1421 		/* Read the next page */
1422 		ctx->num_pages++;
1423 		ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
1424 					  sizeof(*page));
1425 		if (ctx->pages == NULL) {
1426 			_spdk_blob_load_final(ctx, -ENOMEM);
1427 			return;
1428 		}
1429 
1430 		spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
1431 					  next_lba,
1432 					  _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)),
1433 					  _spdk_blob_load_cpl, ctx);
1434 		return;
1435 	}
1436 
1437 	/* Parse the pages */
1438 	rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob);
1439 	if (rc) {
1440 		_spdk_blob_load_final(ctx, rc);
1441 		return;
1442 	}
1443 
1444 	if (blob->extent_table_found == true) {
1445 		/* If EXTENT_TABLE was found, that means support for it should be enabled. */
1446 		assert(blob->extent_rle_found == false);
1447 		blob->use_extent_table = true;
1448 	} else {
1449 		/* If EXTENT_RLE or no extent_* descriptor was found disable support
1450 		 * for extent table. No extent_* descriptors means that blob has length of 0
1451 		 * and no extent_rle descriptors were persisted for it.
1452 		 * EXTENT_TABLE if used, is always present in metadata regardless of length. */
1453 		blob->use_extent_table = false;
1454 	}
1455 
1456 	/* Check the clear_method stored in metadata vs what may have been passed
1457 	 * via spdk_bs_open_blob_ext() and update accordingly.
1458 	 */
1459 	_spdk_blob_update_clear_method(blob);
1460 
1461 	spdk_free(ctx->pages);
1462 	ctx->pages = NULL;
1463 
1464 	if (blob->extent_table_found) {
1465 		_spdk_blob_load_cpl_extents_cpl(seq, ctx, 0);
1466 	} else {
1467 		_spdk_blob_load_backing_dev(ctx);
1468 	}
1469 }
1470 
1471 /* Load a blob from disk given a blobid */
1472 static void
1473 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1474 		spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1475 {
1476 	struct spdk_blob_load_ctx *ctx;
1477 	struct spdk_blob_store *bs;
1478 	uint32_t page_num;
1479 	uint64_t lba;
1480 
1481 	_spdk_blob_verify_md_op(blob);
1482 
1483 	bs = blob->bs;
1484 
1485 	ctx = calloc(1, sizeof(*ctx));
1486 	if (!ctx) {
1487 		cb_fn(seq, cb_arg, -ENOMEM);
1488 		return;
1489 	}
1490 
1491 	ctx->blob = blob;
1492 	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE);
1493 	if (!ctx->pages) {
1494 		free(ctx);
1495 		cb_fn(seq, cb_arg, -ENOMEM);
1496 		return;
1497 	}
1498 	ctx->num_pages = 1;
1499 	ctx->cb_fn = cb_fn;
1500 	ctx->cb_arg = cb_arg;
1501 	ctx->seq = seq;
1502 
1503 	page_num = _spdk_bs_blobid_to_page(blob->id);
1504 	lba = _spdk_bs_md_page_to_lba(blob->bs, page_num);
1505 
1506 	blob->state = SPDK_BLOB_STATE_LOADING;
1507 
1508 	spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1509 				  _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
1510 				  _spdk_blob_load_cpl, ctx);
1511 }
1512 
1513 struct spdk_blob_persist_ctx {
1514 	struct spdk_blob		*blob;
1515 
1516 	struct spdk_bs_super_block	*super;
1517 
1518 	struct spdk_blob_md_page	*pages;
1519 	uint32_t			next_extent_page;
1520 	struct spdk_blob_md_page	*extent_page;
1521 
1522 	spdk_bs_sequence_t		*seq;
1523 	spdk_bs_sequence_cpl		cb_fn;
1524 	void				*cb_arg;
1525 	TAILQ_ENTRY(spdk_blob_persist_ctx) link;
1526 };
1527 
1528 static void
1529 spdk_bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba,
1530 			uint32_t lba_count)
1531 {
1532 	switch (ctx->blob->clear_method) {
1533 	case BLOB_CLEAR_WITH_DEFAULT:
1534 	case BLOB_CLEAR_WITH_UNMAP:
1535 		spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1536 		break;
1537 	case BLOB_CLEAR_WITH_WRITE_ZEROES:
1538 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1539 		break;
1540 	case BLOB_CLEAR_WITH_NONE:
1541 	default:
1542 		break;
1543 	}
1544 }
1545 
1546 static void _spdk_blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx);
1547 
1548 static void
1549 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1550 {
1551 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1552 	struct spdk_blob_persist_ctx	*next_persist;
1553 	struct spdk_blob		*blob = ctx->blob;
1554 
1555 	if (bserrno == 0) {
1556 		_spdk_blob_mark_clean(blob);
1557 	}
1558 
1559 	assert(ctx == TAILQ_FIRST(&blob->pending_persists));
1560 	TAILQ_REMOVE(&blob->pending_persists, ctx, link);
1561 
1562 	next_persist = TAILQ_FIRST(&blob->pending_persists);
1563 
1564 	/* Call user callback */
1565 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
1566 
1567 	/* Free the memory */
1568 	spdk_free(ctx->pages);
1569 	free(ctx);
1570 
1571 	if (next_persist != NULL) {
1572 		_spdk_blob_persist_check_dirty(next_persist);
1573 	}
1574 }
1575 
1576 static void
1577 _spdk_blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1578 {
1579 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1580 	struct spdk_blob		*blob = ctx->blob;
1581 	struct spdk_blob_store		*bs = blob->bs;
1582 	size_t				i;
1583 
1584 	/* Release all clusters that were truncated */
1585 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1586 		uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]);
1587 
1588 		/* Nothing to release if it was not allocated */
1589 		if (blob->active.clusters[i] != 0) {
1590 			_spdk_bs_release_cluster(bs, cluster_num);
1591 		}
1592 	}
1593 
1594 	if (blob->active.num_clusters == 0) {
1595 		free(blob->active.clusters);
1596 		blob->active.clusters = NULL;
1597 		blob->active.cluster_array_size = 0;
1598 	} else if (blob->active.num_clusters != blob->active.cluster_array_size) {
1599 #ifndef __clang_analyzer__
1600 		void *tmp;
1601 
1602 		/* scan-build really can't figure reallocs, workaround it */
1603 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters);
1604 		assert(tmp != NULL);
1605 		blob->active.clusters = tmp;
1606 
1607 		tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages);
1608 		assert(tmp != NULL);
1609 		blob->active.extent_pages = tmp;
1610 #endif
1611 		blob->active.extent_pages_array_size = blob->active.num_extent_pages;
1612 		blob->active.cluster_array_size = blob->active.num_clusters;
1613 	}
1614 
1615 	/* TODO: Add path to persist clear extent pages. */
1616 	_spdk_blob_persist_complete(seq, ctx, bserrno);
1617 }
1618 
1619 static void
1620 _spdk_blob_persist_clear_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1621 {
1622 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1623 	struct spdk_blob		*blob = ctx->blob;
1624 	struct spdk_blob_store		*bs = blob->bs;
1625 	spdk_bs_batch_t			*batch;
1626 	size_t				i;
1627 	uint64_t			lba;
1628 	uint32_t			lba_count;
1629 
1630 	/* Clusters don't move around in blobs. The list shrinks or grows
1631 	 * at the end, but no changes ever occur in the middle of the list.
1632 	 */
1633 
1634 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_clear_clusters_cpl, ctx);
1635 
1636 	/* Clear all clusters that were truncated */
1637 	lba = 0;
1638 	lba_count = 0;
1639 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1640 		uint64_t next_lba = blob->active.clusters[i];
1641 		uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1);
1642 
1643 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1644 			/* This cluster is contiguous with the previous one. */
1645 			lba_count += next_lba_count;
1646 			continue;
1647 		}
1648 
1649 		/* This cluster is not contiguous with the previous one. */
1650 
1651 		/* If a run of LBAs previously existing, clear them now */
1652 		if (lba_count > 0) {
1653 			spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
1654 		}
1655 
1656 		/* Start building the next batch */
1657 		lba = next_lba;
1658 		if (next_lba > 0) {
1659 			lba_count = next_lba_count;
1660 		} else {
1661 			lba_count = 0;
1662 		}
1663 	}
1664 
1665 	/* If we ended with a contiguous set of LBAs, clear them now */
1666 	if (lba_count > 0) {
1667 		spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
1668 	}
1669 
1670 	spdk_bs_batch_close(batch);
1671 }
1672 
1673 static void
1674 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1675 {
1676 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1677 	struct spdk_blob		*blob = ctx->blob;
1678 	struct spdk_blob_store		*bs = blob->bs;
1679 	size_t				i;
1680 
1681 	/* This loop starts at 1 because the first page is special and handled
1682 	 * below. The pages (except the first) are never written in place,
1683 	 * so any pages in the clean list must be zeroed.
1684 	 */
1685 	for (i = 1; i < blob->clean.num_pages; i++) {
1686 		_spdk_bs_release_md_page(bs, blob->clean.pages[i]);
1687 	}
1688 
1689 	if (blob->active.num_pages == 0) {
1690 		uint32_t page_num;
1691 
1692 		page_num = _spdk_bs_blobid_to_page(blob->id);
1693 		_spdk_bs_release_md_page(bs, page_num);
1694 	}
1695 
1696 	/* Move on to clearing clusters */
1697 	_spdk_blob_persist_clear_clusters(seq, ctx, 0);
1698 }
1699 
1700 static void
1701 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1702 {
1703 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1704 	struct spdk_blob		*blob = ctx->blob;
1705 	struct spdk_blob_store		*bs = blob->bs;
1706 	uint64_t			lba;
1707 	uint32_t			lba_count;
1708 	spdk_bs_batch_t			*batch;
1709 	size_t				i;
1710 
1711 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx);
1712 
1713 	lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1714 
1715 	/* This loop starts at 1 because the first page is special and handled
1716 	 * below. The pages (except the first) are never written in place,
1717 	 * so any pages in the clean list must be zeroed.
1718 	 */
1719 	for (i = 1; i < blob->clean.num_pages; i++) {
1720 		lba = _spdk_bs_md_page_to_lba(bs, blob->clean.pages[i]);
1721 
1722 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1723 	}
1724 
1725 	/* The first page will only be zeroed if this is a delete. */
1726 	if (blob->active.num_pages == 0) {
1727 		uint32_t page_num;
1728 
1729 		/* The first page in the metadata goes where the blobid indicates */
1730 		page_num = _spdk_bs_blobid_to_page(blob->id);
1731 		lba = _spdk_bs_md_page_to_lba(bs, page_num);
1732 
1733 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1734 	}
1735 
1736 	spdk_bs_batch_close(batch);
1737 }
1738 
1739 static void
1740 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1741 {
1742 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1743 	struct spdk_blob		*blob = ctx->blob;
1744 	struct spdk_blob_store		*bs = blob->bs;
1745 	uint64_t			lba;
1746 	uint32_t			lba_count;
1747 	struct spdk_blob_md_page	*page;
1748 
1749 	if (blob->active.num_pages == 0) {
1750 		/* Move on to the next step */
1751 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1752 		return;
1753 	}
1754 
1755 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1756 
1757 	page = &ctx->pages[0];
1758 	/* The first page in the metadata goes where the blobid indicates */
1759 	lba = _spdk_bs_md_page_to_lba(bs, _spdk_bs_blobid_to_page(blob->id));
1760 
1761 	spdk_bs_sequence_write_dev(seq, page, lba, lba_count,
1762 				   _spdk_blob_persist_zero_pages, ctx);
1763 }
1764 
1765 static void
1766 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1767 {
1768 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1769 	struct spdk_blob		*blob = ctx->blob;
1770 	struct spdk_blob_store		*bs = blob->bs;
1771 	uint64_t			lba;
1772 	uint32_t			lba_count;
1773 	struct spdk_blob_md_page	*page;
1774 	spdk_bs_batch_t			*batch;
1775 	size_t				i;
1776 
1777 	/* Clusters don't move around in blobs. The list shrinks or grows
1778 	 * at the end, but no changes ever occur in the middle of the list.
1779 	 */
1780 
1781 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1782 
1783 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx);
1784 
1785 	/* This starts at 1. The root page is not written until
1786 	 * all of the others are finished
1787 	 */
1788 	for (i = 1; i < blob->active.num_pages; i++) {
1789 		page = &ctx->pages[i];
1790 		assert(page->sequence_num == i);
1791 
1792 		lba = _spdk_bs_md_page_to_lba(bs, blob->active.pages[i]);
1793 
1794 		spdk_bs_batch_write_dev(batch, page, lba, lba_count);
1795 	}
1796 
1797 	spdk_bs_batch_close(batch);
1798 }
1799 
1800 static int
1801 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz)
1802 {
1803 	uint64_t	i;
1804 	uint64_t	*tmp;
1805 	uint64_t	lfc; /* lowest free cluster */
1806 	uint32_t	lfmd; /*  lowest free md page */
1807 	uint64_t	num_clusters;
1808 	uint32_t	*ep_tmp;
1809 	uint64_t	new_num_ep = 0, current_num_ep = 0;
1810 	struct spdk_blob_store *bs;
1811 
1812 	bs = blob->bs;
1813 
1814 	_spdk_blob_verify_md_op(blob);
1815 
1816 	if (blob->active.num_clusters == sz) {
1817 		return 0;
1818 	}
1819 
1820 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
1821 		/* If this blob was resized to be larger, then smaller, then
1822 		 * larger without syncing, then the cluster array already
1823 		 * contains spare assigned clusters we can use.
1824 		 */
1825 		num_clusters = spdk_min(blob->active.cluster_array_size,
1826 					sz);
1827 	} else {
1828 		num_clusters = blob->active.num_clusters;
1829 	}
1830 
1831 	if (blob->use_extent_table) {
1832 		/* Round up since every cluster beyond current Extent Table size,
1833 		 * requires new extent page. */
1834 		new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP);
1835 		current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP);
1836 	}
1837 
1838 	/* Do two passes - one to verify that we can obtain enough clusters
1839 	 * and md pages, another to actually claim them.
1840 	 */
1841 
1842 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1843 		lfc = 0;
1844 		for (i = num_clusters; i < sz; i++) {
1845 			lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
1846 			if (lfc == UINT32_MAX) {
1847 				/* No more free clusters. Cannot satisfy the request */
1848 				return -ENOSPC;
1849 			}
1850 			lfc++;
1851 		}
1852 		lfmd = 0;
1853 		for (i = current_num_ep; i < new_num_ep ; i++) {
1854 			lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd);
1855 			if (lfmd == UINT32_MAX) {
1856 				/* No more free md pages. Cannot satisfy the request */
1857 				return -ENOSPC;
1858 			}
1859 		}
1860 	}
1861 
1862 	if (sz > num_clusters) {
1863 		/* Expand the cluster array if necessary.
1864 		 * We only shrink the array when persisting.
1865 		 */
1866 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz);
1867 		if (sz > 0 && tmp == NULL) {
1868 			return -ENOMEM;
1869 		}
1870 		memset(tmp + blob->active.cluster_array_size, 0,
1871 		       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
1872 		blob->active.clusters = tmp;
1873 		blob->active.cluster_array_size = sz;
1874 
1875 		/* Expand the extents table, only if enough clusters were added */
1876 		if (new_num_ep > current_num_ep && blob->use_extent_table) {
1877 			ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep);
1878 			if (new_num_ep > 0 && ep_tmp == NULL) {
1879 				return -ENOMEM;
1880 			}
1881 			memset(ep_tmp + blob->active.extent_pages_array_size, 0,
1882 			       sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size));
1883 			blob->active.extent_pages = ep_tmp;
1884 			blob->active.extent_pages_array_size = new_num_ep;
1885 		}
1886 	}
1887 
1888 	blob->state = SPDK_BLOB_STATE_DIRTY;
1889 
1890 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1891 		lfc = 0;
1892 		lfmd = 0;
1893 		for (i = num_clusters; i < sz; i++) {
1894 			_spdk_bs_allocate_cluster(blob, i, &lfc, &lfmd, true);
1895 			lfc++;
1896 			lfmd++;
1897 		}
1898 	}
1899 
1900 	blob->active.num_clusters = sz;
1901 	blob->active.num_extent_pages = new_num_ep;
1902 
1903 	return 0;
1904 }
1905 
1906 static void
1907 _spdk_blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx)
1908 {
1909 	spdk_bs_sequence_t *seq = ctx->seq;
1910 	struct spdk_blob *blob = ctx->blob;
1911 	struct spdk_blob_store *bs = blob->bs;
1912 	uint64_t i;
1913 	uint32_t page_num;
1914 	void *tmp;
1915 	int rc;
1916 
1917 	/* Generate the new metadata */
1918 	rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
1919 	if (rc < 0) {
1920 		_spdk_blob_persist_complete(seq, ctx, rc);
1921 		return;
1922 	}
1923 
1924 	assert(blob->active.num_pages >= 1);
1925 
1926 	/* Resize the cache of page indices */
1927 	tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
1928 	if (!tmp) {
1929 		_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1930 		return;
1931 	}
1932 	blob->active.pages = tmp;
1933 
1934 	/* Assign this metadata to pages. This requires two passes -
1935 	 * one to verify that there are enough pages and a second
1936 	 * to actually claim them. */
1937 	page_num = 0;
1938 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
1939 	for (i = 1; i < blob->active.num_pages; i++) {
1940 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1941 		if (page_num == UINT32_MAX) {
1942 			_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1943 			return;
1944 		}
1945 		page_num++;
1946 	}
1947 
1948 	page_num = 0;
1949 	blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id);
1950 	for (i = 1; i < blob->active.num_pages; i++) {
1951 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1952 		ctx->pages[i - 1].next = page_num;
1953 		/* Now that previous metadata page is complete, calculate the crc for it. */
1954 		ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1955 		blob->active.pages[i] = page_num;
1956 		_spdk_bs_claim_md_page(bs, page_num);
1957 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id);
1958 		page_num++;
1959 	}
1960 	ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1961 	/* Start writing the metadata from last page to first */
1962 	blob->state = SPDK_BLOB_STATE_CLEAN;
1963 	_spdk_blob_persist_write_page_chain(seq, ctx, 0);
1964 }
1965 
1966 static void _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg,
1967 		int bserrno);
1968 
1969 static void
1970 _spdk_blob_persist_write_extent_page(uint32_t extent, uint64_t cluster_num,
1971 				     struct spdk_blob_persist_ctx *ctx)
1972 {
1973 	spdk_bs_sequence_t		*seq = ctx->seq;
1974 	uint32_t                        page_count = 0;
1975 	struct spdk_blob		*blob = ctx->blob;
1976 	int				rc;
1977 
1978 	rc = _spdk_blob_serialize_add_page(blob, &ctx->extent_page, &page_count, &ctx->extent_page);
1979 	if (rc < 0) {
1980 		assert(false);
1981 		return;
1982 	}
1983 
1984 	_spdk_blob_serialize_extent_page(blob, cluster_num, ctx->extent_page);
1985 
1986 	ctx->extent_page->crc = _spdk_blob_md_page_calc_crc(ctx->extent_page);
1987 
1988 	spdk_bs_sequence_write_dev(seq, ctx->extent_page, _spdk_bs_md_page_to_lba(blob->bs, extent),
1989 				   _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1990 				   _spdk_blob_persist_write_extent_pages, ctx);
1991 }
1992 
1993 static void
1994 _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1995 {
1996 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1997 	struct spdk_blob		*blob = ctx->blob;
1998 	size_t				i;
1999 	uint32_t			extent_page_id;
2000 
2001 	if (ctx->extent_page != NULL) {
2002 		spdk_free(ctx->extent_page);
2003 		ctx->extent_page = NULL;
2004 	}
2005 
2006 	/* Only write out changed extent pages */
2007 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
2008 		extent_page_id = blob->active.extent_pages[i];
2009 		if (extent_page_id == 0) {
2010 			/* No Extent Page to persist */
2011 			assert(spdk_blob_is_thin_provisioned(blob));
2012 			continue;
2013 		}
2014 		/* Writing out new extent page for the first time. Either active extent pages is larger
2015 		 * than clean extent pages or there was no extent page assigned due to thin provisioning. */
2016 		if (i >= blob->clean.extent_pages_array_size || blob->clean.extent_pages[i] == 0) {
2017 			blob->state = SPDK_BLOB_STATE_DIRTY;
2018 			assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id));
2019 			ctx->next_extent_page = i + 1;
2020 			_spdk_blob_persist_write_extent_page(extent_page_id, i * SPDK_EXTENTS_PER_EP, ctx);
2021 			return;
2022 		}
2023 		assert(blob->clean.extent_pages[i] != 0);
2024 	}
2025 
2026 	_spdk_blob_persist_generate_new_md(ctx);
2027 }
2028 
2029 static void
2030 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx)
2031 {
2032 	spdk_bs_sequence_t *seq = ctx->seq;
2033 	struct spdk_blob *blob = ctx->blob;
2034 
2035 	if (blob->active.num_pages == 0) {
2036 		/* This is the signal that the blob should be deleted.
2037 		 * Immediately jump to the clean up routine. */
2038 		assert(blob->clean.num_pages > 0);
2039 		blob->state = SPDK_BLOB_STATE_CLEAN;
2040 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
2041 		return;
2042 
2043 	}
2044 
2045 	_spdk_blob_persist_write_extent_pages(seq, ctx, 0);
2046 }
2047 
2048 static void
2049 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2050 {
2051 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2052 
2053 	ctx->blob->bs->clean = 0;
2054 
2055 	spdk_free(ctx->super);
2056 
2057 	_spdk_blob_persist_start(ctx);
2058 }
2059 
2060 static void
2061 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2062 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg);
2063 
2064 
2065 static void
2066 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2067 {
2068 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2069 
2070 	ctx->super->clean = 0;
2071 	if (ctx->super->size == 0) {
2072 		ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen;
2073 	}
2074 
2075 	_spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx);
2076 }
2077 
2078 static void
2079 _spdk_blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx)
2080 {
2081 	if (ctx->blob->bs->clean) {
2082 		ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
2083 					  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2084 		if (!ctx->super) {
2085 			ctx->cb_fn(ctx->seq, ctx->cb_arg, -ENOMEM);
2086 			free(ctx);
2087 			return;
2088 		}
2089 
2090 		spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(ctx->blob->bs, 0),
2091 					  _spdk_bs_byte_to_lba(ctx->blob->bs, sizeof(*ctx->super)),
2092 					  _spdk_blob_persist_dirty, ctx);
2093 	} else {
2094 		_spdk_blob_persist_start(ctx);
2095 	}
2096 }
2097 
2098 /* Write a blob to disk */
2099 static void
2100 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
2101 		   spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2102 {
2103 	struct spdk_blob_persist_ctx *ctx;
2104 
2105 	_spdk_blob_verify_md_op(blob);
2106 
2107 	if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->pending_persists)) {
2108 		cb_fn(seq, cb_arg, 0);
2109 		return;
2110 	}
2111 
2112 	ctx = calloc(1, sizeof(*ctx));
2113 	if (!ctx) {
2114 		cb_fn(seq, cb_arg, -ENOMEM);
2115 		return;
2116 	}
2117 	ctx->blob = blob;
2118 	ctx->seq = seq;
2119 	ctx->cb_fn = cb_fn;
2120 	ctx->cb_arg = cb_arg;
2121 	ctx->next_extent_page = 0;
2122 
2123 	/* Multiple blob persists can affect one another, via blob->state or
2124 	 * blob mutable data changes. To prevent it, queue up the persists. */
2125 	if (!TAILQ_EMPTY(&blob->pending_persists)) {
2126 		TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link);
2127 		return;
2128 	}
2129 	TAILQ_INSERT_HEAD(&blob->pending_persists, ctx, link);
2130 
2131 	_spdk_blob_persist_check_dirty(ctx);
2132 }
2133 
2134 struct spdk_blob_copy_cluster_ctx {
2135 	struct spdk_blob *blob;
2136 	uint8_t *buf;
2137 	uint64_t page;
2138 	uint64_t new_cluster;
2139 	uint32_t new_extent_page;
2140 	spdk_bs_sequence_t *seq;
2141 };
2142 
2143 static void
2144 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
2145 {
2146 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2147 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
2148 	TAILQ_HEAD(, spdk_bs_request_set) requests;
2149 	spdk_bs_user_op_t *op;
2150 
2151 	TAILQ_INIT(&requests);
2152 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
2153 
2154 	while (!TAILQ_EMPTY(&requests)) {
2155 		op = TAILQ_FIRST(&requests);
2156 		TAILQ_REMOVE(&requests, op, link);
2157 		if (bserrno == 0) {
2158 			spdk_bs_user_op_execute(op);
2159 		} else {
2160 			spdk_bs_user_op_abort(op);
2161 		}
2162 	}
2163 
2164 	spdk_free(ctx->buf);
2165 	free(ctx);
2166 }
2167 
2168 static void
2169 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno)
2170 {
2171 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2172 
2173 	if (bserrno) {
2174 		if (bserrno == -EEXIST) {
2175 			/* The metadata insert failed because another thread
2176 			 * allocated the cluster first. Free our cluster
2177 			 * but continue without error. */
2178 			bserrno = 0;
2179 		}
2180 		_spdk_bs_release_cluster(ctx->blob->bs, ctx->new_cluster);
2181 		if (ctx->new_extent_page != 0) {
2182 			_spdk_bs_release_md_page(ctx->blob->bs, ctx->new_extent_page);
2183 		}
2184 	}
2185 
2186 	spdk_bs_sequence_finish(ctx->seq, bserrno);
2187 }
2188 
2189 static void
2190 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2191 {
2192 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2193 	uint32_t cluster_number;
2194 
2195 	if (bserrno) {
2196 		/* The write failed, so jump to the final completion handler */
2197 		spdk_bs_sequence_finish(seq, bserrno);
2198 		return;
2199 	}
2200 
2201 	cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
2202 
2203 	_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2204 					       ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx);
2205 }
2206 
2207 static void
2208 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2209 {
2210 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2211 
2212 	if (bserrno != 0) {
2213 		/* The read failed, so jump to the final completion handler */
2214 		spdk_bs_sequence_finish(seq, bserrno);
2215 		return;
2216 	}
2217 
2218 	/* Write whole cluster */
2219 	spdk_bs_sequence_write_dev(seq, ctx->buf,
2220 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2221 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, 1),
2222 				   _spdk_blob_write_copy_cpl, ctx);
2223 }
2224 
2225 static void
2226 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
2227 				   struct spdk_io_channel *_ch,
2228 				   uint64_t io_unit, spdk_bs_user_op_t *op)
2229 {
2230 	struct spdk_bs_cpl cpl;
2231 	struct spdk_bs_channel *ch;
2232 	struct spdk_blob_copy_cluster_ctx *ctx;
2233 	uint32_t cluster_start_page;
2234 	uint32_t cluster_number;
2235 	int rc;
2236 
2237 	ch = spdk_io_channel_get_ctx(_ch);
2238 
2239 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
2240 		/* There are already operations pending. Queue this user op
2241 		 * and return because it will be re-executed when the outstanding
2242 		 * cluster allocation completes. */
2243 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2244 		return;
2245 	}
2246 
2247 	/* Round the io_unit offset down to the first page in the cluster */
2248 	cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit);
2249 
2250 	/* Calculate which index in the metadata cluster array the corresponding
2251 	 * cluster is supposed to be at. */
2252 	cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit);
2253 
2254 	ctx = calloc(1, sizeof(*ctx));
2255 	if (!ctx) {
2256 		spdk_bs_user_op_abort(op);
2257 		return;
2258 	}
2259 
2260 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
2261 
2262 	ctx->blob = blob;
2263 	ctx->page = cluster_start_page;
2264 
2265 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
2266 		ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen,
2267 				       NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2268 		if (!ctx->buf) {
2269 			SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
2270 				    blob->bs->cluster_sz);
2271 			free(ctx);
2272 			spdk_bs_user_op_abort(op);
2273 			return;
2274 		}
2275 	}
2276 
2277 	rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page,
2278 				       false);
2279 	if (rc != 0) {
2280 		spdk_free(ctx->buf);
2281 		free(ctx);
2282 		spdk_bs_user_op_abort(op);
2283 		return;
2284 	}
2285 
2286 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2287 	cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl;
2288 	cpl.u.blob_basic.cb_arg = ctx;
2289 
2290 	ctx->seq = spdk_bs_sequence_start(_ch, &cpl);
2291 	if (!ctx->seq) {
2292 		_spdk_bs_release_cluster(blob->bs, ctx->new_cluster);
2293 		spdk_free(ctx->buf);
2294 		free(ctx);
2295 		spdk_bs_user_op_abort(op);
2296 		return;
2297 	}
2298 
2299 	/* Queue the user op to block other incoming operations */
2300 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2301 
2302 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
2303 		/* Read cluster from backing device */
2304 		spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
2305 					     _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2306 					     _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
2307 					     _spdk_blob_write_copy, ctx);
2308 	} else {
2309 		_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2310 						       ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx);
2311 	}
2312 }
2313 
2314 static void
2315 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length,
2316 				       uint64_t *lba,	uint32_t *lba_count)
2317 {
2318 	*lba_count = length;
2319 
2320 	if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) {
2321 		assert(blob->back_bs_dev != NULL);
2322 		*lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit);
2323 		*lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count);
2324 	} else {
2325 		*lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit);
2326 	}
2327 }
2328 
2329 struct op_split_ctx {
2330 	struct spdk_blob *blob;
2331 	struct spdk_io_channel *channel;
2332 	uint64_t io_unit_offset;
2333 	uint64_t io_units_remaining;
2334 	void *curr_payload;
2335 	enum spdk_blob_op_type op_type;
2336 	spdk_bs_sequence_t *seq;
2337 };
2338 
2339 static void
2340 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno)
2341 {
2342 	struct op_split_ctx	*ctx = cb_arg;
2343 	struct spdk_blob	*blob = ctx->blob;
2344 	struct spdk_io_channel	*ch = ctx->channel;
2345 	enum spdk_blob_op_type	op_type = ctx->op_type;
2346 	uint8_t			*buf = ctx->curr_payload;
2347 	uint64_t		offset = ctx->io_unit_offset;
2348 	uint64_t		length = ctx->io_units_remaining;
2349 	uint64_t		op_length;
2350 
2351 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2352 		spdk_bs_sequence_finish(ctx->seq, bserrno);
2353 		free(ctx);
2354 		return;
2355 	}
2356 
2357 	op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob,
2358 			     offset));
2359 
2360 	/* Update length and payload for next operation */
2361 	ctx->io_units_remaining -= op_length;
2362 	ctx->io_unit_offset += op_length;
2363 	if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
2364 		ctx->curr_payload += op_length * blob->bs->io_unit_size;
2365 	}
2366 
2367 	switch (op_type) {
2368 	case SPDK_BLOB_READ:
2369 		spdk_blob_io_read(blob, ch, buf, offset, op_length,
2370 				  _spdk_blob_request_submit_op_split_next, ctx);
2371 		break;
2372 	case SPDK_BLOB_WRITE:
2373 		spdk_blob_io_write(blob, ch, buf, offset, op_length,
2374 				   _spdk_blob_request_submit_op_split_next, ctx);
2375 		break;
2376 	case SPDK_BLOB_UNMAP:
2377 		spdk_blob_io_unmap(blob, ch, offset, op_length,
2378 				   _spdk_blob_request_submit_op_split_next, ctx);
2379 		break;
2380 	case SPDK_BLOB_WRITE_ZEROES:
2381 		spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
2382 					  _spdk_blob_request_submit_op_split_next, ctx);
2383 		break;
2384 	case SPDK_BLOB_READV:
2385 	case SPDK_BLOB_WRITEV:
2386 		SPDK_ERRLOG("readv/write not valid\n");
2387 		spdk_bs_sequence_finish(ctx->seq, -EINVAL);
2388 		free(ctx);
2389 		break;
2390 	}
2391 }
2392 
2393 static void
2394 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
2395 				   void *payload, uint64_t offset, uint64_t length,
2396 				   spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2397 {
2398 	struct op_split_ctx *ctx;
2399 	spdk_bs_sequence_t *seq;
2400 	struct spdk_bs_cpl cpl;
2401 
2402 	assert(blob != NULL);
2403 
2404 	ctx = calloc(1, sizeof(struct op_split_ctx));
2405 	if (ctx == NULL) {
2406 		cb_fn(cb_arg, -ENOMEM);
2407 		return;
2408 	}
2409 
2410 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2411 	cpl.u.blob_basic.cb_fn = cb_fn;
2412 	cpl.u.blob_basic.cb_arg = cb_arg;
2413 
2414 	seq = spdk_bs_sequence_start(ch, &cpl);
2415 	if (!seq) {
2416 		free(ctx);
2417 		cb_fn(cb_arg, -ENOMEM);
2418 		return;
2419 	}
2420 
2421 	ctx->blob = blob;
2422 	ctx->channel = ch;
2423 	ctx->curr_payload = payload;
2424 	ctx->io_unit_offset = offset;
2425 	ctx->io_units_remaining = length;
2426 	ctx->op_type = op_type;
2427 	ctx->seq = seq;
2428 
2429 	_spdk_blob_request_submit_op_split_next(ctx, 0);
2430 }
2431 
2432 static void
2433 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
2434 				    void *payload, uint64_t offset, uint64_t length,
2435 				    spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2436 {
2437 	struct spdk_bs_cpl cpl;
2438 	uint64_t lba;
2439 	uint32_t lba_count;
2440 
2441 	assert(blob != NULL);
2442 
2443 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2444 	cpl.u.blob_basic.cb_fn = cb_fn;
2445 	cpl.u.blob_basic.cb_arg = cb_arg;
2446 
2447 	_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
2448 
2449 	if (blob->frozen_refcnt) {
2450 		/* This blob I/O is frozen */
2451 		spdk_bs_user_op_t *op;
2452 		struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
2453 
2454 		op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
2455 		if (!op) {
2456 			cb_fn(cb_arg, -ENOMEM);
2457 			return;
2458 		}
2459 
2460 		TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
2461 
2462 		return;
2463 	}
2464 
2465 	switch (op_type) {
2466 	case SPDK_BLOB_READ: {
2467 		spdk_bs_batch_t *batch;
2468 
2469 		batch = spdk_bs_batch_open(_ch, &cpl);
2470 		if (!batch) {
2471 			cb_fn(cb_arg, -ENOMEM);
2472 			return;
2473 		}
2474 
2475 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2476 			/* Read from the blob */
2477 			spdk_bs_batch_read_dev(batch, payload, lba, lba_count);
2478 		} else {
2479 			/* Read from the backing block device */
2480 			spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
2481 		}
2482 
2483 		spdk_bs_batch_close(batch);
2484 		break;
2485 	}
2486 	case SPDK_BLOB_WRITE:
2487 	case SPDK_BLOB_WRITE_ZEROES: {
2488 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2489 			/* Write to the blob */
2490 			spdk_bs_batch_t *batch;
2491 
2492 			if (lba_count == 0) {
2493 				cb_fn(cb_arg, 0);
2494 				return;
2495 			}
2496 
2497 			batch = spdk_bs_batch_open(_ch, &cpl);
2498 			if (!batch) {
2499 				cb_fn(cb_arg, -ENOMEM);
2500 				return;
2501 			}
2502 
2503 			if (op_type == SPDK_BLOB_WRITE) {
2504 				spdk_bs_batch_write_dev(batch, payload, lba, lba_count);
2505 			} else {
2506 				spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
2507 			}
2508 
2509 			spdk_bs_batch_close(batch);
2510 		} else {
2511 			/* Queue this operation and allocate the cluster */
2512 			spdk_bs_user_op_t *op;
2513 
2514 			op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
2515 			if (!op) {
2516 				cb_fn(cb_arg, -ENOMEM);
2517 				return;
2518 			}
2519 
2520 			_spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op);
2521 		}
2522 		break;
2523 	}
2524 	case SPDK_BLOB_UNMAP: {
2525 		spdk_bs_batch_t *batch;
2526 
2527 		batch = spdk_bs_batch_open(_ch, &cpl);
2528 		if (!batch) {
2529 			cb_fn(cb_arg, -ENOMEM);
2530 			return;
2531 		}
2532 
2533 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2534 			spdk_bs_batch_unmap_dev(batch, lba, lba_count);
2535 		}
2536 
2537 		spdk_bs_batch_close(batch);
2538 		break;
2539 	}
2540 	case SPDK_BLOB_READV:
2541 	case SPDK_BLOB_WRITEV:
2542 		SPDK_ERRLOG("readv/write not valid\n");
2543 		cb_fn(cb_arg, -EINVAL);
2544 		break;
2545 	}
2546 }
2547 
2548 static void
2549 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
2550 			     void *payload, uint64_t offset, uint64_t length,
2551 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2552 {
2553 	assert(blob != NULL);
2554 
2555 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
2556 		cb_fn(cb_arg, -EPERM);
2557 		return;
2558 	}
2559 
2560 	if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
2561 		cb_fn(cb_arg, -EINVAL);
2562 		return;
2563 	}
2564 	if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) {
2565 		_spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length,
2566 						    cb_fn, cb_arg, op_type);
2567 	} else {
2568 		_spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length,
2569 						   cb_fn, cb_arg, op_type);
2570 	}
2571 }
2572 
2573 struct rw_iov_ctx {
2574 	struct spdk_blob *blob;
2575 	struct spdk_io_channel *channel;
2576 	spdk_blob_op_complete cb_fn;
2577 	void *cb_arg;
2578 	bool read;
2579 	int iovcnt;
2580 	struct iovec *orig_iov;
2581 	uint64_t io_unit_offset;
2582 	uint64_t io_units_remaining;
2583 	uint64_t io_units_done;
2584 	struct iovec iov[0];
2585 };
2586 
2587 static void
2588 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2589 {
2590 	assert(cb_arg == NULL);
2591 	spdk_bs_sequence_finish(seq, bserrno);
2592 }
2593 
2594 static void
2595 _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
2596 {
2597 	struct rw_iov_ctx *ctx = cb_arg;
2598 	struct spdk_blob *blob = ctx->blob;
2599 	struct iovec *iov, *orig_iov;
2600 	int iovcnt;
2601 	size_t orig_iovoff;
2602 	uint64_t io_units_count, io_units_to_boundary, io_unit_offset;
2603 	uint64_t byte_count;
2604 
2605 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2606 		ctx->cb_fn(ctx->cb_arg, bserrno);
2607 		free(ctx);
2608 		return;
2609 	}
2610 
2611 	io_unit_offset = ctx->io_unit_offset;
2612 	io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset);
2613 	io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary);
2614 	/*
2615 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
2616 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
2617 	 *  point to the current position in the I/O sequence.
2618 	 */
2619 	byte_count = ctx->io_units_done * blob->bs->io_unit_size;
2620 	orig_iov = &ctx->orig_iov[0];
2621 	orig_iovoff = 0;
2622 	while (byte_count > 0) {
2623 		if (byte_count >= orig_iov->iov_len) {
2624 			byte_count -= orig_iov->iov_len;
2625 			orig_iov++;
2626 		} else {
2627 			orig_iovoff = byte_count;
2628 			byte_count = 0;
2629 		}
2630 	}
2631 
2632 	/*
2633 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
2634 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
2635 	 */
2636 	byte_count = io_units_count * blob->bs->io_unit_size;
2637 	iov = &ctx->iov[0];
2638 	iovcnt = 0;
2639 	while (byte_count > 0) {
2640 		assert(iovcnt < ctx->iovcnt);
2641 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
2642 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
2643 		byte_count -= iov->iov_len;
2644 		orig_iovoff = 0;
2645 		orig_iov++;
2646 		iov++;
2647 		iovcnt++;
2648 	}
2649 
2650 	ctx->io_unit_offset += io_units_count;
2651 	ctx->io_units_remaining -= io_units_count;
2652 	ctx->io_units_done += io_units_count;
2653 	iov = &ctx->iov[0];
2654 
2655 	if (ctx->read) {
2656 		spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
2657 				   io_units_count, _spdk_rw_iov_split_next, ctx);
2658 	} else {
2659 		spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
2660 				    io_units_count, _spdk_rw_iov_split_next, ctx);
2661 	}
2662 }
2663 
2664 static void
2665 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
2666 				 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
2667 				 spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
2668 {
2669 	struct spdk_bs_cpl	cpl;
2670 
2671 	assert(blob != NULL);
2672 
2673 	if (!read && blob->data_ro) {
2674 		cb_fn(cb_arg, -EPERM);
2675 		return;
2676 	}
2677 
2678 	if (length == 0) {
2679 		cb_fn(cb_arg, 0);
2680 		return;
2681 	}
2682 
2683 	if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
2684 		cb_fn(cb_arg, -EINVAL);
2685 		return;
2686 	}
2687 
2688 	/*
2689 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
2690 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
2691 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
2692 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
2693 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
2694 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
2695 	 *  but since this case happens very infrequently, any performance impact will be negligible.
2696 	 *
2697 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
2698 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
2699 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
2700 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
2701 	 */
2702 	if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) {
2703 		uint32_t lba_count;
2704 		uint64_t lba;
2705 
2706 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2707 		cpl.u.blob_basic.cb_fn = cb_fn;
2708 		cpl.u.blob_basic.cb_arg = cb_arg;
2709 
2710 		if (blob->frozen_refcnt) {
2711 			/* This blob I/O is frozen */
2712 			enum spdk_blob_op_type op_type;
2713 			spdk_bs_user_op_t *op;
2714 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel);
2715 
2716 			op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV;
2717 			op = spdk_bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length);
2718 			if (!op) {
2719 				cb_fn(cb_arg, -ENOMEM);
2720 				return;
2721 			}
2722 
2723 			TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
2724 
2725 			return;
2726 		}
2727 
2728 		_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
2729 
2730 		if (read) {
2731 			spdk_bs_sequence_t *seq;
2732 
2733 			seq = spdk_bs_sequence_start(_channel, &cpl);
2734 			if (!seq) {
2735 				cb_fn(cb_arg, -ENOMEM);
2736 				return;
2737 			}
2738 
2739 			if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2740 				spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2741 			} else {
2742 				spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
2743 							      _spdk_rw_iov_done, NULL);
2744 			}
2745 		} else {
2746 			if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2747 				spdk_bs_sequence_t *seq;
2748 
2749 				seq = spdk_bs_sequence_start(_channel, &cpl);
2750 				if (!seq) {
2751 					cb_fn(cb_arg, -ENOMEM);
2752 					return;
2753 				}
2754 
2755 				spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2756 			} else {
2757 				/* Queue this operation and allocate the cluster */
2758 				spdk_bs_user_op_t *op;
2759 
2760 				op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset,
2761 							   length);
2762 				if (!op) {
2763 					cb_fn(cb_arg, -ENOMEM);
2764 					return;
2765 				}
2766 
2767 				_spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op);
2768 			}
2769 		}
2770 	} else {
2771 		struct rw_iov_ctx *ctx;
2772 
2773 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
2774 		if (ctx == NULL) {
2775 			cb_fn(cb_arg, -ENOMEM);
2776 			return;
2777 		}
2778 
2779 		ctx->blob = blob;
2780 		ctx->channel = _channel;
2781 		ctx->cb_fn = cb_fn;
2782 		ctx->cb_arg = cb_arg;
2783 		ctx->read = read;
2784 		ctx->orig_iov = iov;
2785 		ctx->iovcnt = iovcnt;
2786 		ctx->io_unit_offset = offset;
2787 		ctx->io_units_remaining = length;
2788 		ctx->io_units_done = 0;
2789 
2790 		_spdk_rw_iov_split_next(ctx, 0);
2791 	}
2792 }
2793 
2794 static struct spdk_blob *
2795 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
2796 {
2797 	struct spdk_blob *blob;
2798 
2799 	TAILQ_FOREACH(blob, &bs->blobs, link) {
2800 		if (blob->id == blobid) {
2801 			return blob;
2802 		}
2803 	}
2804 
2805 	return NULL;
2806 }
2807 
2808 static void
2809 _spdk_blob_get_snapshot_and_clone_entries(struct spdk_blob *blob,
2810 		struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry)
2811 {
2812 	assert(blob != NULL);
2813 	*snapshot_entry = NULL;
2814 	*clone_entry = NULL;
2815 
2816 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
2817 		return;
2818 	}
2819 
2820 	TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) {
2821 		if ((*snapshot_entry)->id == blob->parent_id) {
2822 			break;
2823 		}
2824 	}
2825 
2826 	if (*snapshot_entry != NULL) {
2827 		TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) {
2828 			if ((*clone_entry)->id == blob->id) {
2829 				break;
2830 			}
2831 		}
2832 
2833 		assert(clone_entry != NULL);
2834 	}
2835 }
2836 
2837 static int
2838 _spdk_bs_channel_create(void *io_device, void *ctx_buf)
2839 {
2840 	struct spdk_blob_store		*bs = io_device;
2841 	struct spdk_bs_channel		*channel = ctx_buf;
2842 	struct spdk_bs_dev		*dev;
2843 	uint32_t			max_ops = bs->max_channel_ops;
2844 	uint32_t			i;
2845 
2846 	dev = bs->dev;
2847 
2848 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
2849 	if (!channel->req_mem) {
2850 		return -1;
2851 	}
2852 
2853 	TAILQ_INIT(&channel->reqs);
2854 
2855 	for (i = 0; i < max_ops; i++) {
2856 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
2857 	}
2858 
2859 	channel->bs = bs;
2860 	channel->dev = dev;
2861 	channel->dev_channel = dev->create_channel(dev);
2862 
2863 	if (!channel->dev_channel) {
2864 		SPDK_ERRLOG("Failed to create device channel.\n");
2865 		free(channel->req_mem);
2866 		return -1;
2867 	}
2868 
2869 	TAILQ_INIT(&channel->need_cluster_alloc);
2870 	TAILQ_INIT(&channel->queued_io);
2871 
2872 	return 0;
2873 }
2874 
2875 static void
2876 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf)
2877 {
2878 	struct spdk_bs_channel *channel = ctx_buf;
2879 	spdk_bs_user_op_t *op;
2880 
2881 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
2882 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
2883 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
2884 		spdk_bs_user_op_abort(op);
2885 	}
2886 
2887 	while (!TAILQ_EMPTY(&channel->queued_io)) {
2888 		op = TAILQ_FIRST(&channel->queued_io);
2889 		TAILQ_REMOVE(&channel->queued_io, op, link);
2890 		spdk_bs_user_op_abort(op);
2891 	}
2892 
2893 	free(channel->req_mem);
2894 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
2895 }
2896 
2897 static void
2898 _spdk_bs_dev_destroy(void *io_device)
2899 {
2900 	struct spdk_blob_store *bs = io_device;
2901 	struct spdk_blob	*blob, *blob_tmp;
2902 
2903 	bs->dev->destroy(bs->dev);
2904 
2905 	TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) {
2906 		TAILQ_REMOVE(&bs->blobs, blob, link);
2907 		_spdk_blob_free(blob);
2908 	}
2909 
2910 	pthread_mutex_destroy(&bs->used_clusters_mutex);
2911 
2912 	spdk_bit_array_free(&bs->used_blobids);
2913 	spdk_bit_array_free(&bs->used_md_pages);
2914 	spdk_bit_array_free(&bs->used_clusters);
2915 	/*
2916 	 * If this function is called for any reason except a successful unload,
2917 	 * the unload_cpl type will be NONE and this will be a nop.
2918 	 */
2919 	spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err);
2920 
2921 	free(bs);
2922 }
2923 
2924 static int
2925 _spdk_bs_blob_list_add(struct spdk_blob *blob)
2926 {
2927 	spdk_blob_id snapshot_id;
2928 	struct spdk_blob_list *snapshot_entry = NULL;
2929 	struct spdk_blob_list *clone_entry = NULL;
2930 
2931 	assert(blob != NULL);
2932 
2933 	snapshot_id = blob->parent_id;
2934 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2935 		return 0;
2936 	}
2937 
2938 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, snapshot_id);
2939 	if (snapshot_entry == NULL) {
2940 		/* Snapshot not found */
2941 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
2942 		if (snapshot_entry == NULL) {
2943 			return -ENOMEM;
2944 		}
2945 		snapshot_entry->id = snapshot_id;
2946 		TAILQ_INIT(&snapshot_entry->clones);
2947 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
2948 	} else {
2949 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2950 			if (clone_entry->id == blob->id) {
2951 				break;
2952 			}
2953 		}
2954 	}
2955 
2956 	if (clone_entry == NULL) {
2957 		/* Clone not found */
2958 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
2959 		if (clone_entry == NULL) {
2960 			return -ENOMEM;
2961 		}
2962 		clone_entry->id = blob->id;
2963 		TAILQ_INIT(&clone_entry->clones);
2964 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
2965 		snapshot_entry->clone_count++;
2966 	}
2967 
2968 	return 0;
2969 }
2970 
2971 static void
2972 _spdk_bs_blob_list_remove(struct spdk_blob *blob)
2973 {
2974 	struct spdk_blob_list *snapshot_entry = NULL;
2975 	struct spdk_blob_list *clone_entry = NULL;
2976 
2977 	_spdk_blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry);
2978 
2979 	if (snapshot_entry == NULL) {
2980 		return;
2981 	}
2982 
2983 	blob->parent_id = SPDK_BLOBID_INVALID;
2984 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2985 	free(clone_entry);
2986 
2987 	snapshot_entry->clone_count--;
2988 }
2989 
2990 static int
2991 _spdk_bs_blob_list_free(struct spdk_blob_store *bs)
2992 {
2993 	struct spdk_blob_list *snapshot_entry;
2994 	struct spdk_blob_list *snapshot_entry_tmp;
2995 	struct spdk_blob_list *clone_entry;
2996 	struct spdk_blob_list *clone_entry_tmp;
2997 
2998 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
2999 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
3000 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3001 			free(clone_entry);
3002 		}
3003 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
3004 		free(snapshot_entry);
3005 	}
3006 
3007 	return 0;
3008 }
3009 
3010 static void
3011 _spdk_bs_free(struct spdk_blob_store *bs)
3012 {
3013 	_spdk_bs_blob_list_free(bs);
3014 
3015 	spdk_bs_unregister_md_thread(bs);
3016 	spdk_io_device_unregister(bs, _spdk_bs_dev_destroy);
3017 }
3018 
3019 void
3020 spdk_bs_opts_init(struct spdk_bs_opts *opts)
3021 {
3022 	opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
3023 	opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
3024 	opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
3025 	opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS;
3026 	opts->clear_method = BS_CLEAR_WITH_UNMAP;
3027 	memset(&opts->bstype, 0, sizeof(opts->bstype));
3028 	opts->iter_cb_fn = NULL;
3029 	opts->iter_cb_arg = NULL;
3030 }
3031 
3032 static int
3033 _spdk_bs_opts_verify(struct spdk_bs_opts *opts)
3034 {
3035 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
3036 	    opts->max_channel_ops == 0) {
3037 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
3038 		return -1;
3039 	}
3040 
3041 	return 0;
3042 }
3043 
3044 static int
3045 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs)
3046 {
3047 	struct spdk_blob_store	*bs;
3048 	uint64_t dev_size;
3049 	int rc;
3050 
3051 	dev_size = dev->blocklen * dev->blockcnt;
3052 	if (dev_size < opts->cluster_sz) {
3053 		/* Device size cannot be smaller than cluster size of blobstore */
3054 		SPDK_INFOLOG(SPDK_LOG_BLOB, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
3055 			     dev_size, opts->cluster_sz);
3056 		return -ENOSPC;
3057 	}
3058 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
3059 		/* Cluster size cannot be smaller than page size */
3060 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
3061 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
3062 		return -EINVAL;
3063 	}
3064 	bs = calloc(1, sizeof(struct spdk_blob_store));
3065 	if (!bs) {
3066 		return -ENOMEM;
3067 	}
3068 
3069 	TAILQ_INIT(&bs->blobs);
3070 	TAILQ_INIT(&bs->snapshots);
3071 	bs->dev = dev;
3072 	bs->md_thread = spdk_get_thread();
3073 	assert(bs->md_thread != NULL);
3074 
3075 	/*
3076 	 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an
3077 	 *  even multiple of the cluster size.
3078 	 */
3079 	bs->cluster_sz = opts->cluster_sz;
3080 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
3081 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
3082 	bs->num_free_clusters = bs->total_clusters;
3083 	bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
3084 	bs->io_unit_size = dev->blocklen;
3085 	if (bs->used_clusters == NULL) {
3086 		free(bs);
3087 		return -ENOMEM;
3088 	}
3089 
3090 	bs->max_channel_ops = opts->max_channel_ops;
3091 	bs->super_blob = SPDK_BLOBID_INVALID;
3092 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
3093 
3094 	/* The metadata is assumed to be at least 1 page */
3095 	bs->used_md_pages = spdk_bit_array_create(1);
3096 	bs->used_blobids = spdk_bit_array_create(0);
3097 
3098 	pthread_mutex_init(&bs->used_clusters_mutex, NULL);
3099 
3100 	spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy,
3101 				sizeof(struct spdk_bs_channel), "blobstore");
3102 	rc = spdk_bs_register_md_thread(bs);
3103 	if (rc == -1) {
3104 		spdk_io_device_unregister(bs, NULL);
3105 		pthread_mutex_destroy(&bs->used_clusters_mutex);
3106 		spdk_bit_array_free(&bs->used_blobids);
3107 		spdk_bit_array_free(&bs->used_md_pages);
3108 		spdk_bit_array_free(&bs->used_clusters);
3109 		free(bs);
3110 		/* FIXME: this is a lie but don't know how to get a proper error code here */
3111 		return -ENOMEM;
3112 	}
3113 
3114 	*_bs = bs;
3115 	return 0;
3116 }
3117 
3118 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */
3119 
3120 struct spdk_bs_load_ctx {
3121 	struct spdk_blob_store		*bs;
3122 	struct spdk_bs_super_block	*super;
3123 
3124 	struct spdk_bs_md_mask		*mask;
3125 	bool				in_page_chain;
3126 	uint32_t			page_index;
3127 	uint32_t			cur_page;
3128 	struct spdk_blob_md_page	*page;
3129 
3130 	uint64_t			num_extent_pages;
3131 	uint32_t			*extent_pages;
3132 
3133 	spdk_bs_sequence_t			*seq;
3134 	spdk_blob_op_with_handle_complete	iter_cb_fn;
3135 	void					*iter_cb_arg;
3136 	struct spdk_blob			*blob;
3137 	spdk_blob_id				blobid;
3138 };
3139 
3140 static void
3141 _spdk_bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno)
3142 {
3143 	assert(bserrno != 0);
3144 
3145 	spdk_free(ctx->super);
3146 	spdk_bs_sequence_finish(ctx->seq, bserrno);
3147 	_spdk_bs_free(ctx->bs);
3148 	free(ctx);
3149 }
3150 
3151 static void
3152 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask)
3153 {
3154 	uint32_t i = 0;
3155 
3156 	while (true) {
3157 		i = spdk_bit_array_find_first_set(array, i);
3158 		if (i >= mask->length) {
3159 			break;
3160 		}
3161 		mask->mask[i / 8] |= 1U << (i % 8);
3162 		i++;
3163 	}
3164 }
3165 
3166 static int
3167 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask)
3168 {
3169 	struct spdk_bit_array *array;
3170 	uint32_t i;
3171 
3172 	if (spdk_bit_array_resize(array_ptr, mask->length) < 0) {
3173 		return -ENOMEM;
3174 	}
3175 
3176 	array = *array_ptr;
3177 	for (i = 0; i < mask->length; i++) {
3178 		if (mask->mask[i / 8] & (1U << (i % 8))) {
3179 			spdk_bit_array_set(array, i);
3180 		}
3181 	}
3182 
3183 	return 0;
3184 }
3185 
3186 static void
3187 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
3188 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
3189 {
3190 	/* Update the values in the super block */
3191 	super->super_blob = bs->super_blob;
3192 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
3193 	super->crc = _spdk_blob_md_page_calc_crc(super);
3194 	spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0),
3195 				   _spdk_bs_byte_to_lba(bs, sizeof(*super)),
3196 				   cb_fn, cb_arg);
3197 }
3198 
3199 static void
3200 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3201 {
3202 	struct spdk_bs_load_ctx	*ctx = arg;
3203 	uint64_t	mask_size, lba, lba_count;
3204 
3205 	/* Write out the used clusters mask */
3206 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3207 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3208 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3209 	if (!ctx->mask) {
3210 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3211 		return;
3212 	}
3213 
3214 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
3215 	ctx->mask->length = ctx->bs->total_clusters;
3216 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters));
3217 
3218 	_spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask);
3219 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3220 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3221 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3222 }
3223 
3224 static void
3225 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3226 {
3227 	struct spdk_bs_load_ctx	*ctx = arg;
3228 	uint64_t	mask_size, lba, lba_count;
3229 
3230 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3231 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3232 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3233 	if (!ctx->mask) {
3234 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3235 		return;
3236 	}
3237 
3238 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
3239 	ctx->mask->length = ctx->super->md_len;
3240 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
3241 
3242 	_spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask);
3243 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3244 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3245 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3246 }
3247 
3248 static void
3249 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3250 {
3251 	struct spdk_bs_load_ctx	*ctx = arg;
3252 	uint64_t	mask_size, lba, lba_count;
3253 
3254 	if (ctx->super->used_blobid_mask_len == 0) {
3255 		/*
3256 		 * This is a pre-v3 on-disk format where the blobid mask does not get
3257 		 *  written to disk.
3258 		 */
3259 		cb_fn(seq, arg, 0);
3260 		return;
3261 	}
3262 
3263 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3264 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3265 				 SPDK_MALLOC_DMA);
3266 	if (!ctx->mask) {
3267 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3268 		return;
3269 	}
3270 
3271 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
3272 	ctx->mask->length = ctx->super->md_len;
3273 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
3274 
3275 	_spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask);
3276 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
3277 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
3278 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3279 }
3280 
3281 static void
3282 _spdk_blob_set_thin_provision(struct spdk_blob *blob)
3283 {
3284 	_spdk_blob_verify_md_op(blob);
3285 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
3286 	blob->state = SPDK_BLOB_STATE_DIRTY;
3287 }
3288 
3289 static void
3290 _spdk_blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method)
3291 {
3292 	_spdk_blob_verify_md_op(blob);
3293 	blob->clear_method = clear_method;
3294 	blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT);
3295 	blob->state = SPDK_BLOB_STATE_DIRTY;
3296 }
3297 
3298 static void _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno);
3299 
3300 static void
3301 _spdk_bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno)
3302 {
3303 	struct spdk_bs_load_ctx *ctx = cb_arg;
3304 	spdk_blob_id id;
3305 	int64_t page_num;
3306 
3307 	/* Iterate to next blob (we can't use spdk_bs_iter_next function as our
3308 	 * last blob has been removed */
3309 	page_num = _spdk_bs_blobid_to_page(ctx->blobid);
3310 	page_num++;
3311 	page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num);
3312 	if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) {
3313 		_spdk_bs_load_iter(ctx, NULL, -ENOENT);
3314 		return;
3315 	}
3316 
3317 	id = _spdk_bs_page_to_blobid(page_num);
3318 
3319 	spdk_bs_open_blob(ctx->bs, id, _spdk_bs_load_iter, ctx);
3320 }
3321 
3322 static void
3323 _spdk_bs_delete_corrupted_close_cb(void *cb_arg, int bserrno)
3324 {
3325 	struct spdk_bs_load_ctx *ctx = cb_arg;
3326 
3327 	if (bserrno != 0) {
3328 		SPDK_ERRLOG("Failed to close corrupted blob\n");
3329 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3330 		return;
3331 	}
3332 
3333 	spdk_bs_delete_blob(ctx->bs, ctx->blobid, _spdk_bs_delete_corrupted_blob_cpl, ctx);
3334 }
3335 
3336 static void
3337 _spdk_bs_delete_corrupted_blob(void *cb_arg, int bserrno)
3338 {
3339 	struct spdk_bs_load_ctx *ctx = cb_arg;
3340 	uint64_t i;
3341 
3342 	if (bserrno != 0) {
3343 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
3344 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3345 		return;
3346 	}
3347 
3348 	/* Snapshot and clone have the same copy of cluster map and extent pages
3349 	 * at this point. Let's clear both for snpashot now,
3350 	 * so that it won't be cleared for clone later when we remove snapshot.
3351 	 * Also set thin provision to pass data corruption check */
3352 	for (i = 0; i < ctx->blob->active.num_clusters; i++) {
3353 		ctx->blob->active.clusters[i] = 0;
3354 	}
3355 	for (i = 0; i < ctx->blob->active.num_extent_pages; i++) {
3356 		ctx->blob->active.extent_pages[i] = 0;
3357 	}
3358 
3359 	ctx->blob->md_ro = false;
3360 
3361 	_spdk_blob_set_thin_provision(ctx->blob);
3362 
3363 	ctx->blobid = ctx->blob->id;
3364 
3365 	spdk_blob_close(ctx->blob, _spdk_bs_delete_corrupted_close_cb, ctx);
3366 }
3367 
3368 static void
3369 _spdk_bs_update_corrupted_blob(void *cb_arg, int bserrno)
3370 {
3371 	struct spdk_bs_load_ctx *ctx = cb_arg;
3372 
3373 	if (bserrno != 0) {
3374 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
3375 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3376 		return;
3377 	}
3378 
3379 	ctx->blob->md_ro = false;
3380 	_spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true);
3381 	_spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true);
3382 	spdk_blob_set_read_only(ctx->blob);
3383 
3384 	if (ctx->iter_cb_fn) {
3385 		ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0);
3386 	}
3387 	_spdk_bs_blob_list_add(ctx->blob);
3388 
3389 	spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3390 }
3391 
3392 static void
3393 _spdk_bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
3394 {
3395 	struct spdk_bs_load_ctx *ctx = cb_arg;
3396 
3397 	if (bserrno != 0) {
3398 		SPDK_ERRLOG("Failed to open clone of a corrupted blob\n");
3399 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3400 		return;
3401 	}
3402 
3403 	if (blob->parent_id == ctx->blob->id) {
3404 		/* Power failure occured before updating clone (snapshot delete case)
3405 		 * or after updating clone (creating snapshot case) - keep snapshot */
3406 		spdk_blob_close(blob, _spdk_bs_update_corrupted_blob, ctx);
3407 	} else {
3408 		/* Power failure occured after updating clone (snapshot delete case)
3409 		 * or before updating clone (creating snapshot case) - remove snapshot */
3410 		spdk_blob_close(blob, _spdk_bs_delete_corrupted_blob, ctx);
3411 	}
3412 }
3413 
3414 static void
3415 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
3416 {
3417 	struct spdk_bs_load_ctx *ctx = arg;
3418 	const void *value;
3419 	size_t len;
3420 	int rc = 0;
3421 
3422 	if (bserrno == 0) {
3423 		/* Examine blob if it is corrupted after power failure. Fix
3424 		 * the ones that can be fixed and remove any other corrupted
3425 		 * ones. If it is not corrupted just process it */
3426 		rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true);
3427 		if (rc != 0) {
3428 			rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true);
3429 			if (rc != 0) {
3430 				/* Not corrupted - process it and continue with iterating through blobs */
3431 				if (ctx->iter_cb_fn) {
3432 					ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
3433 				}
3434 				_spdk_bs_blob_list_add(blob);
3435 				spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx);
3436 				return;
3437 			}
3438 
3439 		}
3440 
3441 		assert(len == sizeof(spdk_blob_id));
3442 
3443 		ctx->blob = blob;
3444 
3445 		/* Open clone to check if we are able to fix this blob or should we remove it */
3446 		spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, _spdk_bs_examine_clone, ctx);
3447 		return;
3448 	} else if (bserrno == -ENOENT) {
3449 		bserrno = 0;
3450 	} else {
3451 		/*
3452 		 * This case needs to be looked at further.  Same problem
3453 		 *  exists with applications that rely on explicit blob
3454 		 *  iteration.  We should just skip the blob that failed
3455 		 *  to load and continue on to the next one.
3456 		 */
3457 		SPDK_ERRLOG("Error in iterating blobs\n");
3458 	}
3459 
3460 	ctx->iter_cb_fn = NULL;
3461 
3462 	spdk_free(ctx->super);
3463 	spdk_free(ctx->mask);
3464 	spdk_bs_sequence_finish(ctx->seq, bserrno);
3465 	free(ctx);
3466 }
3467 
3468 static void
3469 _spdk_bs_load_complete(struct spdk_bs_load_ctx *ctx)
3470 {
3471 	spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx);
3472 }
3473 
3474 static void
3475 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3476 {
3477 	struct spdk_bs_load_ctx *ctx = cb_arg;
3478 	int rc;
3479 
3480 	/* The type must be correct */
3481 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
3482 
3483 	/* The length of the mask (in bits) must not be greater than
3484 	 * the length of the buffer (converted to bits) */
3485 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
3486 
3487 	/* The length of the mask must be exactly equal to the size
3488 	 * (in pages) of the metadata region */
3489 	assert(ctx->mask->length == ctx->super->md_len);
3490 
3491 	rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask);
3492 	if (rc < 0) {
3493 		spdk_free(ctx->mask);
3494 		_spdk_bs_load_ctx_fail(ctx, rc);
3495 		return;
3496 	}
3497 
3498 	_spdk_bs_load_complete(ctx);
3499 }
3500 
3501 static void
3502 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3503 {
3504 	struct spdk_bs_load_ctx *ctx = cb_arg;
3505 	uint64_t		lba, lba_count, mask_size;
3506 	int			rc;
3507 
3508 	if (bserrno != 0) {
3509 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3510 		return;
3511 	}
3512 
3513 	/* The type must be correct */
3514 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3515 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
3516 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
3517 					     struct spdk_blob_md_page) * 8));
3518 	/* The length of the mask must be exactly equal to the total number of clusters */
3519 	assert(ctx->mask->length == ctx->bs->total_clusters);
3520 
3521 	rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask);
3522 	if (rc < 0) {
3523 		spdk_free(ctx->mask);
3524 		_spdk_bs_load_ctx_fail(ctx, rc);
3525 		return;
3526 	}
3527 
3528 	ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters);
3529 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
3530 
3531 	spdk_free(ctx->mask);
3532 
3533 	/* Read the used blobids mask */
3534 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3535 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3536 				 SPDK_MALLOC_DMA);
3537 	if (!ctx->mask) {
3538 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3539 		return;
3540 	}
3541 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
3542 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
3543 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
3544 				  _spdk_bs_load_used_blobids_cpl, ctx);
3545 }
3546 
3547 static void
3548 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3549 {
3550 	struct spdk_bs_load_ctx *ctx = cb_arg;
3551 	uint64_t		lba, lba_count, mask_size;
3552 	int			rc;
3553 
3554 	if (bserrno != 0) {
3555 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3556 		return;
3557 	}
3558 
3559 	/* The type must be correct */
3560 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
3561 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
3562 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
3563 				     8));
3564 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
3565 	assert(ctx->mask->length == ctx->super->md_len);
3566 
3567 	rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask);
3568 	if (rc < 0) {
3569 		spdk_free(ctx->mask);
3570 		_spdk_bs_load_ctx_fail(ctx, rc);
3571 		return;
3572 	}
3573 
3574 	spdk_free(ctx->mask);
3575 
3576 	/* Read the used clusters mask */
3577 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3578 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3579 				 SPDK_MALLOC_DMA);
3580 	if (!ctx->mask) {
3581 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3582 		return;
3583 	}
3584 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3585 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3586 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
3587 				  _spdk_bs_load_used_clusters_cpl, ctx);
3588 }
3589 
3590 static void
3591 _spdk_bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx)
3592 {
3593 	uint64_t lba, lba_count, mask_size;
3594 
3595 	/* Read the used pages mask */
3596 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3597 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3598 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3599 	if (!ctx->mask) {
3600 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3601 		return;
3602 	}
3603 
3604 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3605 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3606 	spdk_bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
3607 				  _spdk_bs_load_used_pages_cpl, ctx);
3608 }
3609 
3610 static int
3611 _spdk_bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx)
3612 {
3613 	struct spdk_blob_store *bs = ctx->bs;
3614 	struct spdk_blob_md_page *page = ctx->page;
3615 	struct spdk_blob_md_descriptor *desc;
3616 	size_t	cur_desc = 0;
3617 
3618 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
3619 	while (cur_desc < sizeof(page->descriptors)) {
3620 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
3621 			if (desc->length == 0) {
3622 				/* If padding and length are 0, this terminates the page */
3623 				break;
3624 			}
3625 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
3626 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
3627 			unsigned int				i, j;
3628 			unsigned int				cluster_count = 0;
3629 			uint32_t				cluster_idx;
3630 
3631 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
3632 
3633 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
3634 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
3635 					cluster_idx = desc_extent_rle->extents[i].cluster_idx;
3636 					/*
3637 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
3638 					 * in the used cluster map.
3639 					 */
3640 					if (cluster_idx != 0) {
3641 						spdk_bit_array_set(bs->used_clusters, cluster_idx + j);
3642 						if (bs->num_free_clusters == 0) {
3643 							return -ENOSPC;
3644 						}
3645 						bs->num_free_clusters--;
3646 					}
3647 					cluster_count++;
3648 				}
3649 			}
3650 			if (cluster_count == 0) {
3651 				return -EINVAL;
3652 			}
3653 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
3654 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
3655 			uint32_t					i;
3656 			uint32_t					cluster_count = 0;
3657 			uint32_t					cluster_idx;
3658 			size_t						cluster_idx_length;
3659 
3660 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
3661 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
3662 
3663 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
3664 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
3665 				return -EINVAL;
3666 			}
3667 
3668 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
3669 				cluster_idx = desc_extent->cluster_idx[i];
3670 				/*
3671 				 * cluster_idx = 0 means an unallocated cluster - don't mark that
3672 				 * in the used cluster map.
3673 				 */
3674 				if (cluster_idx != 0) {
3675 					if (cluster_idx < desc_extent->start_cluster_idx &&
3676 					    cluster_idx >= desc_extent->start_cluster_idx + cluster_count) {
3677 						return -EINVAL;
3678 					}
3679 					spdk_bit_array_set(bs->used_clusters, cluster_idx);
3680 					if (bs->num_free_clusters == 0) {
3681 						return -ENOSPC;
3682 					}
3683 					bs->num_free_clusters--;
3684 				}
3685 				cluster_count++;
3686 			}
3687 
3688 			if (cluster_count == 0) {
3689 				return -EINVAL;
3690 			}
3691 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
3692 			/* Skip this item */
3693 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
3694 			/* Skip this item */
3695 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
3696 			/* Skip this item */
3697 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
3698 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
3699 			uint32_t num_extent_pages = ctx->num_extent_pages;
3700 			uint32_t i;
3701 			size_t extent_pages_length;
3702 			void *tmp;
3703 
3704 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
3705 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
3706 
3707 			if (desc_extent_table->length == 0 ||
3708 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
3709 				return -EINVAL;
3710 			}
3711 
3712 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
3713 				if (desc_extent_table->extent_page[i].page_idx != 0) {
3714 					if (desc_extent_table->extent_page[i].num_pages != 1) {
3715 						return -EINVAL;
3716 					}
3717 					num_extent_pages += 1;
3718 				}
3719 			}
3720 
3721 			if (num_extent_pages > 0) {
3722 				tmp = realloc(ctx->extent_pages, num_extent_pages * sizeof(uint32_t));
3723 				if (tmp == NULL) {
3724 					return -ENOMEM;
3725 				}
3726 				ctx->extent_pages = tmp;
3727 
3728 				/* Extent table entries contain md page numbers for extent pages.
3729 				 * Zeroes represent unallocated extent pages, those are run-length-encoded.
3730 				 */
3731 				for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
3732 					if (desc_extent_table->extent_page[i].page_idx != 0) {
3733 						ctx->extent_pages[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx;
3734 						ctx->num_extent_pages += 1;
3735 					}
3736 				}
3737 			}
3738 		} else {
3739 			/* Error */
3740 			return -EINVAL;
3741 		}
3742 		/* Advance to the next descriptor */
3743 		cur_desc += sizeof(*desc) + desc->length;
3744 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
3745 			break;
3746 		}
3747 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
3748 	}
3749 	return 0;
3750 }
3751 
3752 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page)
3753 {
3754 	uint32_t crc;
3755 	struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors;
3756 	size_t desc_len;
3757 
3758 	crc = _spdk_blob_md_page_calc_crc(page);
3759 	if (crc != page->crc) {
3760 		return false;
3761 	}
3762 
3763 	/* Extent page should always be of sequence num 0. */
3764 	if (page->sequence_num != 0) {
3765 		return false;
3766 	}
3767 
3768 	/* Descriptor type must be EXTENT_PAGE. */
3769 	if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
3770 		return false;
3771 	}
3772 
3773 	/* Descriptor length cannot exceed the page. */
3774 	desc_len = sizeof(*desc) + desc->length;
3775 	if (desc_len > sizeof(page->descriptors)) {
3776 		return false;
3777 	}
3778 
3779 	/* It has to be the only descriptor in the page. */
3780 	if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) {
3781 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len);
3782 		if (desc->length != 0) {
3783 			return false;
3784 		}
3785 	}
3786 
3787 	return true;
3788 }
3789 
3790 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
3791 {
3792 	uint32_t crc;
3793 
3794 	crc = _spdk_blob_md_page_calc_crc(ctx->page);
3795 	if (crc != ctx->page->crc) {
3796 		return false;
3797 	}
3798 
3799 	/* First page of a sequence should match the blobid. */
3800 	if (ctx->page->sequence_num == 0 &&
3801 	    _spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) {
3802 		return false;
3803 	}
3804 	return true;
3805 }
3806 
3807 static void
3808 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx);
3809 
3810 static void
3811 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3812 {
3813 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3814 
3815 	if (bserrno != 0) {
3816 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3817 		return;
3818 	}
3819 
3820 	_spdk_bs_load_complete(ctx);
3821 }
3822 
3823 static void
3824 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3825 {
3826 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3827 
3828 	spdk_free(ctx->mask);
3829 	ctx->mask = NULL;
3830 
3831 	if (bserrno != 0) {
3832 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3833 		return;
3834 	}
3835 
3836 	_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_load_write_used_clusters_cpl);
3837 }
3838 
3839 static void
3840 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3841 {
3842 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3843 
3844 	spdk_free(ctx->mask);
3845 	ctx->mask = NULL;
3846 
3847 	if (bserrno != 0) {
3848 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3849 		return;
3850 	}
3851 
3852 	_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_load_write_used_blobids_cpl);
3853 }
3854 
3855 static void
3856 _spdk_bs_load_write_used_md(struct spdk_bs_load_ctx *ctx)
3857 {
3858 	_spdk_bs_write_used_md(ctx->seq, ctx, _spdk_bs_load_write_used_pages_cpl);
3859 }
3860 
3861 static void
3862 _spdk_bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx)
3863 {
3864 	uint64_t num_md_clusters;
3865 	uint64_t i;
3866 
3867 	ctx->in_page_chain = false;
3868 
3869 	do {
3870 		ctx->page_index++;
3871 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
3872 
3873 	if (ctx->page_index < ctx->super->md_len) {
3874 		ctx->cur_page = ctx->page_index;
3875 		_spdk_bs_load_replay_cur_md_page(ctx);
3876 	} else {
3877 		/* Claim all of the clusters used by the metadata */
3878 		num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
3879 		for (i = 0; i < num_md_clusters; i++) {
3880 			_spdk_bs_claim_cluster(ctx->bs, i);
3881 		}
3882 		spdk_free(ctx->page);
3883 		_spdk_bs_load_write_used_md(ctx);
3884 	}
3885 }
3886 
3887 static void _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg);
3888 
3889 static void
3890 _spdk_bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3891 {
3892 	struct spdk_bs_load_ctx *ctx = cb_arg;
3893 	uint32_t page_num;
3894 
3895 	if (bserrno != 0) {
3896 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3897 		return;
3898 	}
3899 
3900 	/* Extent pages are only read when present within in chain md.
3901 	 * Integrity of md is not right if that page was not a valid extent page. */
3902 	if (_spdk_bs_load_cur_extent_page_valid(ctx->page) != true) {
3903 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3904 		return;
3905 	}
3906 
3907 	page_num = ctx->extent_pages[ctx->num_extent_pages - 1];
3908 	spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
3909 	if (_spdk_bs_load_replay_md_parse_page(ctx)) {
3910 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3911 		return;
3912 	}
3913 
3914 	ctx->num_extent_pages--;
3915 	if (ctx->num_extent_pages > 0) {
3916 		_spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx);
3917 		return;
3918 	}
3919 
3920 	free(ctx->extent_pages);
3921 	ctx->extent_pages = NULL;
3922 
3923 	_spdk_bs_load_replay_md_chain_cpl(ctx);
3924 }
3925 
3926 static void
3927 _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg)
3928 {
3929 	struct spdk_bs_load_ctx *ctx = cb_arg;
3930 	uint64_t lba;
3931 
3932 	assert(page < ctx->super->md_len);
3933 	lba = _spdk_bs_md_page_to_lba(ctx->bs, page);
3934 	spdk_bs_sequence_read_dev(seq, ctx->page, lba,
3935 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
3936 				  _spdk_bs_load_replay_extent_page_cpl, ctx);
3937 }
3938 
3939 static void
3940 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3941 {
3942 	struct spdk_bs_load_ctx *ctx = cb_arg;
3943 	uint32_t page_num;
3944 
3945 	if (bserrno != 0) {
3946 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3947 		return;
3948 	}
3949 
3950 	page_num = ctx->cur_page;
3951 	if (_spdk_bs_load_cur_md_page_valid(ctx) == true) {
3952 		if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) {
3953 			_spdk_bs_claim_md_page(ctx->bs, page_num);
3954 			if (ctx->page->sequence_num == 0) {
3955 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
3956 			}
3957 			if (_spdk_bs_load_replay_md_parse_page(ctx)) {
3958 				_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3959 				return;
3960 			}
3961 			if (ctx->page->next != SPDK_INVALID_MD_PAGE) {
3962 				ctx->in_page_chain = true;
3963 				ctx->cur_page = ctx->page->next;
3964 				_spdk_bs_load_replay_cur_md_page(ctx);
3965 				return;
3966 			}
3967 			if (ctx->num_extent_pages != 0) {
3968 				/* Extent pages are read from last to first,
3969 				 * decreasing the num_extent_pages as they are read. */
3970 				_spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx);
3971 				return;
3972 			}
3973 		}
3974 	}
3975 	_spdk_bs_load_replay_md_chain_cpl(ctx);
3976 }
3977 
3978 static void
3979 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx)
3980 {
3981 	uint64_t lba;
3982 
3983 	assert(ctx->cur_page < ctx->super->md_len);
3984 	lba = _spdk_bs_md_page_to_lba(ctx->bs, ctx->cur_page);
3985 	spdk_bs_sequence_read_dev(ctx->seq, ctx->page, lba,
3986 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
3987 				  _spdk_bs_load_replay_md_cpl, ctx);
3988 }
3989 
3990 static void
3991 _spdk_bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
3992 {
3993 	ctx->page_index = 0;
3994 	ctx->cur_page = 0;
3995 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
3996 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3997 	if (!ctx->page) {
3998 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3999 		return;
4000 	}
4001 	_spdk_bs_load_replay_cur_md_page(ctx);
4002 }
4003 
4004 static void
4005 _spdk_bs_recover(struct spdk_bs_load_ctx *ctx)
4006 {
4007 	int		rc;
4008 
4009 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
4010 	if (rc < 0) {
4011 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4012 		return;
4013 	}
4014 
4015 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
4016 	if (rc < 0) {
4017 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4018 		return;
4019 	}
4020 
4021 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
4022 	if (rc < 0) {
4023 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4024 		return;
4025 	}
4026 
4027 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
4028 	_spdk_bs_load_replay_md(ctx);
4029 }
4030 
4031 static void
4032 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4033 {
4034 	struct spdk_bs_load_ctx *ctx = cb_arg;
4035 	uint32_t	crc;
4036 	int		rc;
4037 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
4038 
4039 	if (ctx->super->version > SPDK_BS_VERSION ||
4040 	    ctx->super->version < SPDK_BS_INITIAL_VERSION) {
4041 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4042 		return;
4043 	}
4044 
4045 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4046 		   sizeof(ctx->super->signature)) != 0) {
4047 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4048 		return;
4049 	}
4050 
4051 	crc = _spdk_blob_md_page_calc_crc(ctx->super);
4052 	if (crc != ctx->super->crc) {
4053 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4054 		return;
4055 	}
4056 
4057 	if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
4058 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n");
4059 	} else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
4060 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n");
4061 	} else {
4062 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n");
4063 		SPDK_LOGDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
4064 		SPDK_LOGDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
4065 		_spdk_bs_load_ctx_fail(ctx, -ENXIO);
4066 		return;
4067 	}
4068 
4069 	if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) {
4070 		SPDK_NOTICELOG("Size mismatch, dev size: %lu, blobstore size: %lu\n",
4071 			       ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size);
4072 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4073 		return;
4074 	}
4075 
4076 	if (ctx->super->size == 0) {
4077 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
4078 	}
4079 
4080 	if (ctx->super->io_unit_size == 0) {
4081 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
4082 	}
4083 
4084 	/* Parse the super block */
4085 	ctx->bs->clean = 1;
4086 	ctx->bs->cluster_sz = ctx->super->cluster_size;
4087 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
4088 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
4089 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
4090 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
4091 	if (rc < 0) {
4092 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4093 		return;
4094 	}
4095 	ctx->bs->md_start = ctx->super->md_start;
4096 	ctx->bs->md_len = ctx->super->md_len;
4097 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
4098 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
4099 	ctx->bs->super_blob = ctx->super->super_blob;
4100 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
4101 
4102 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
4103 		_spdk_bs_recover(ctx);
4104 	} else {
4105 		_spdk_bs_load_read_used_pages(ctx);
4106 	}
4107 }
4108 
4109 void
4110 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4111 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4112 {
4113 	struct spdk_blob_store	*bs;
4114 	struct spdk_bs_cpl	cpl;
4115 	struct spdk_bs_load_ctx *ctx;
4116 	struct spdk_bs_opts	opts = {};
4117 	int err;
4118 
4119 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev);
4120 
4121 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4122 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen);
4123 		dev->destroy(dev);
4124 		cb_fn(cb_arg, NULL, -EINVAL);
4125 		return;
4126 	}
4127 
4128 	if (o) {
4129 		opts = *o;
4130 	} else {
4131 		spdk_bs_opts_init(&opts);
4132 	}
4133 
4134 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
4135 		dev->destroy(dev);
4136 		cb_fn(cb_arg, NULL, -EINVAL);
4137 		return;
4138 	}
4139 
4140 	err = _spdk_bs_alloc(dev, &opts, &bs);
4141 	if (err) {
4142 		dev->destroy(dev);
4143 		cb_fn(cb_arg, NULL, err);
4144 		return;
4145 	}
4146 
4147 	ctx = calloc(1, sizeof(*ctx));
4148 	if (!ctx) {
4149 		_spdk_bs_free(bs);
4150 		cb_fn(cb_arg, NULL, -ENOMEM);
4151 		return;
4152 	}
4153 
4154 	ctx->bs = bs;
4155 	ctx->iter_cb_fn = opts.iter_cb_fn;
4156 	ctx->iter_cb_arg = opts.iter_cb_arg;
4157 
4158 	/* Allocate memory for the super block */
4159 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4160 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4161 	if (!ctx->super) {
4162 		free(ctx);
4163 		_spdk_bs_free(bs);
4164 		cb_fn(cb_arg, NULL, -ENOMEM);
4165 		return;
4166 	}
4167 
4168 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4169 	cpl.u.bs_handle.cb_fn = cb_fn;
4170 	cpl.u.bs_handle.cb_arg = cb_arg;
4171 	cpl.u.bs_handle.bs = bs;
4172 
4173 	ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4174 	if (!ctx->seq) {
4175 		spdk_free(ctx->super);
4176 		free(ctx);
4177 		_spdk_bs_free(bs);
4178 		cb_fn(cb_arg, NULL, -ENOMEM);
4179 		return;
4180 	}
4181 
4182 	/* Read the super block */
4183 	spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4184 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4185 				  _spdk_bs_load_super_cpl, ctx);
4186 }
4187 
4188 /* END spdk_bs_load */
4189 
4190 /* START spdk_bs_dump */
4191 
4192 struct spdk_bs_dump_ctx {
4193 	struct spdk_blob_store		*bs;
4194 	struct spdk_bs_super_block	*super;
4195 	uint32_t			cur_page;
4196 	struct spdk_blob_md_page	*page;
4197 	spdk_bs_sequence_t		*seq;
4198 	FILE				*fp;
4199 	spdk_bs_dump_print_xattr	print_xattr_fn;
4200 	char				xattr_name[4096];
4201 };
4202 
4203 static void
4204 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno)
4205 {
4206 	spdk_free(ctx->super);
4207 
4208 	/*
4209 	 * We need to defer calling spdk_bs_call_cpl() until after
4210 	 * dev destruction, so tuck these away for later use.
4211 	 */
4212 	ctx->bs->unload_err = bserrno;
4213 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4214 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4215 
4216 	spdk_bs_sequence_finish(seq, 0);
4217 	_spdk_bs_free(ctx->bs);
4218 	free(ctx);
4219 }
4220 
4221 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
4222 
4223 static void
4224 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx)
4225 {
4226 	uint32_t page_idx = ctx->cur_page;
4227 	struct spdk_blob_md_page *page = ctx->page;
4228 	struct spdk_blob_md_descriptor *desc;
4229 	size_t cur_desc = 0;
4230 	uint32_t crc;
4231 
4232 	fprintf(ctx->fp, "=========\n");
4233 	fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx);
4234 	fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id);
4235 
4236 	crc = _spdk_blob_md_page_calc_crc(page);
4237 	fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch");
4238 
4239 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4240 	while (cur_desc < sizeof(page->descriptors)) {
4241 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
4242 			if (desc->length == 0) {
4243 				/* If padding and length are 0, this terminates the page */
4244 				break;
4245 			}
4246 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
4247 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
4248 			unsigned int				i;
4249 
4250 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
4251 
4252 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
4253 				if (desc_extent_rle->extents[i].cluster_idx != 0) {
4254 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
4255 						desc_extent_rle->extents[i].cluster_idx);
4256 				} else {
4257 					fprintf(ctx->fp, "Unallocated Extent - ");
4258 				}
4259 				fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length);
4260 				fprintf(ctx->fp, "\n");
4261 			}
4262 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4263 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
4264 			unsigned int					i;
4265 
4266 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
4267 
4268 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) {
4269 				if (desc_extent->cluster_idx[i] != 0) {
4270 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
4271 						desc_extent->cluster_idx[i]);
4272 				} else {
4273 					fprintf(ctx->fp, "Unallocated Extent");
4274 				}
4275 				fprintf(ctx->fp, "\n");
4276 			}
4277 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
4278 			struct spdk_blob_md_descriptor_xattr *desc_xattr;
4279 			uint32_t i;
4280 
4281 			desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc;
4282 
4283 			if (desc_xattr->length !=
4284 			    sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) +
4285 			    desc_xattr->name_length + desc_xattr->value_length) {
4286 			}
4287 
4288 			memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length);
4289 			ctx->xattr_name[desc_xattr->name_length] = '\0';
4290 			fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name);
4291 			fprintf(ctx->fp, "       value = \"");
4292 			ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name,
4293 					    (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
4294 					    desc_xattr->value_length);
4295 			fprintf(ctx->fp, "\"\n");
4296 			for (i = 0; i < desc_xattr->value_length; i++) {
4297 				if (i % 16 == 0) {
4298 					fprintf(ctx->fp, "               ");
4299 				}
4300 				fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i));
4301 				if ((i + 1) % 16 == 0) {
4302 					fprintf(ctx->fp, "\n");
4303 				}
4304 			}
4305 			if (i % 16 != 0) {
4306 				fprintf(ctx->fp, "\n");
4307 			}
4308 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
4309 			/* TODO */
4310 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
4311 			/* TODO */
4312 		} else {
4313 			/* Error */
4314 		}
4315 		/* Advance to the next descriptor */
4316 		cur_desc += sizeof(*desc) + desc->length;
4317 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
4318 			break;
4319 		}
4320 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
4321 	}
4322 }
4323 
4324 static void
4325 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4326 {
4327 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4328 
4329 	if (bserrno != 0) {
4330 		_spdk_bs_dump_finish(seq, ctx, bserrno);
4331 		return;
4332 	}
4333 
4334 	if (ctx->page->id != 0) {
4335 		_spdk_bs_dump_print_md_page(ctx);
4336 	}
4337 
4338 	ctx->cur_page++;
4339 
4340 	if (ctx->cur_page < ctx->super->md_len) {
4341 		_spdk_bs_dump_read_md_page(seq, ctx);
4342 	} else {
4343 		spdk_free(ctx->page);
4344 		_spdk_bs_dump_finish(seq, ctx, 0);
4345 	}
4346 }
4347 
4348 static void
4349 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
4350 {
4351 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4352 	uint64_t lba;
4353 
4354 	assert(ctx->cur_page < ctx->super->md_len);
4355 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
4356 	spdk_bs_sequence_read_dev(seq, ctx->page, lba,
4357 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
4358 				  _spdk_bs_dump_read_md_page_cpl, ctx);
4359 }
4360 
4361 static void
4362 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4363 {
4364 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4365 
4366 	fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature);
4367 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4368 		   sizeof(ctx->super->signature)) != 0) {
4369 		fprintf(ctx->fp, "(Mismatch)\n");
4370 		_spdk_bs_dump_finish(seq, ctx, bserrno);
4371 		return;
4372 	} else {
4373 		fprintf(ctx->fp, "(OK)\n");
4374 	}
4375 	fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version);
4376 	fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc,
4377 		(ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch");
4378 	fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype);
4379 	fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size);
4380 	fprintf(ctx->fp, "Super Blob ID: ");
4381 	if (ctx->super->super_blob == SPDK_BLOBID_INVALID) {
4382 		fprintf(ctx->fp, "(None)\n");
4383 	} else {
4384 		fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob);
4385 	}
4386 	fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean);
4387 	fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start);
4388 	fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len);
4389 	fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start);
4390 	fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len);
4391 	fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start);
4392 	fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len);
4393 	fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start);
4394 	fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);
4395 
4396 	ctx->cur_page = 0;
4397 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
4398 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4399 	if (!ctx->page) {
4400 		_spdk_bs_dump_finish(seq, ctx, -ENOMEM);
4401 		return;
4402 	}
4403 	_spdk_bs_dump_read_md_page(seq, ctx);
4404 }
4405 
4406 void
4407 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn,
4408 	     spdk_bs_op_complete cb_fn, void *cb_arg)
4409 {
4410 	struct spdk_blob_store	*bs;
4411 	struct spdk_bs_cpl	cpl;
4412 	spdk_bs_sequence_t	*seq;
4413 	struct spdk_bs_dump_ctx *ctx;
4414 	struct spdk_bs_opts	opts = {};
4415 	int err;
4416 
4417 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev);
4418 
4419 	spdk_bs_opts_init(&opts);
4420 
4421 	err = _spdk_bs_alloc(dev, &opts, &bs);
4422 	if (err) {
4423 		dev->destroy(dev);
4424 		cb_fn(cb_arg, err);
4425 		return;
4426 	}
4427 
4428 	ctx = calloc(1, sizeof(*ctx));
4429 	if (!ctx) {
4430 		_spdk_bs_free(bs);
4431 		cb_fn(cb_arg, -ENOMEM);
4432 		return;
4433 	}
4434 
4435 	ctx->bs = bs;
4436 	ctx->fp = fp;
4437 	ctx->print_xattr_fn = print_xattr_fn;
4438 
4439 	/* Allocate memory for the super block */
4440 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4441 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4442 	if (!ctx->super) {
4443 		free(ctx);
4444 		_spdk_bs_free(bs);
4445 		cb_fn(cb_arg, -ENOMEM);
4446 		return;
4447 	}
4448 
4449 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4450 	cpl.u.bs_basic.cb_fn = cb_fn;
4451 	cpl.u.bs_basic.cb_arg = cb_arg;
4452 
4453 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4454 	if (!seq) {
4455 		spdk_free(ctx->super);
4456 		free(ctx);
4457 		_spdk_bs_free(bs);
4458 		cb_fn(cb_arg, -ENOMEM);
4459 		return;
4460 	}
4461 
4462 	/* Read the super block */
4463 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4464 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4465 				  _spdk_bs_dump_super_cpl, ctx);
4466 }
4467 
4468 /* END spdk_bs_dump */
4469 
4470 /* START spdk_bs_init */
4471 
4472 struct spdk_bs_init_ctx {
4473 	struct spdk_blob_store		*bs;
4474 	struct spdk_bs_super_block	*super;
4475 };
4476 
4477 static void
4478 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4479 {
4480 	struct spdk_bs_init_ctx *ctx = cb_arg;
4481 
4482 	spdk_free(ctx->super);
4483 	free(ctx);
4484 
4485 	spdk_bs_sequence_finish(seq, bserrno);
4486 }
4487 
4488 static void
4489 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4490 {
4491 	struct spdk_bs_init_ctx *ctx = cb_arg;
4492 
4493 	/* Write super block */
4494 	spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0),
4495 				   _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
4496 				   _spdk_bs_init_persist_super_cpl, ctx);
4497 }
4498 
4499 void
4500 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4501 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4502 {
4503 	struct spdk_bs_init_ctx *ctx;
4504 	struct spdk_blob_store	*bs;
4505 	struct spdk_bs_cpl	cpl;
4506 	spdk_bs_sequence_t	*seq;
4507 	spdk_bs_batch_t		*batch;
4508 	uint64_t		num_md_lba;
4509 	uint64_t		num_md_pages;
4510 	uint64_t		num_md_clusters;
4511 	uint32_t		i;
4512 	struct spdk_bs_opts	opts = {};
4513 	int			rc;
4514 
4515 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev);
4516 
4517 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4518 		SPDK_ERRLOG("unsupported dev block length of %d\n",
4519 			    dev->blocklen);
4520 		dev->destroy(dev);
4521 		cb_fn(cb_arg, NULL, -EINVAL);
4522 		return;
4523 	}
4524 
4525 	if (o) {
4526 		opts = *o;
4527 	} else {
4528 		spdk_bs_opts_init(&opts);
4529 	}
4530 
4531 	if (_spdk_bs_opts_verify(&opts) != 0) {
4532 		dev->destroy(dev);
4533 		cb_fn(cb_arg, NULL, -EINVAL);
4534 		return;
4535 	}
4536 
4537 	rc = _spdk_bs_alloc(dev, &opts, &bs);
4538 	if (rc) {
4539 		dev->destroy(dev);
4540 		cb_fn(cb_arg, NULL, rc);
4541 		return;
4542 	}
4543 
4544 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
4545 		/* By default, allocate 1 page per cluster.
4546 		 * Technically, this over-allocates metadata
4547 		 * because more metadata will reduce the number
4548 		 * of usable clusters. This can be addressed with
4549 		 * more complex math in the future.
4550 		 */
4551 		bs->md_len = bs->total_clusters;
4552 	} else {
4553 		bs->md_len = opts.num_md_pages;
4554 	}
4555 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
4556 	if (rc < 0) {
4557 		_spdk_bs_free(bs);
4558 		cb_fn(cb_arg, NULL, -ENOMEM);
4559 		return;
4560 	}
4561 
4562 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
4563 	if (rc < 0) {
4564 		_spdk_bs_free(bs);
4565 		cb_fn(cb_arg, NULL, -ENOMEM);
4566 		return;
4567 	}
4568 
4569 	ctx = calloc(1, sizeof(*ctx));
4570 	if (!ctx) {
4571 		_spdk_bs_free(bs);
4572 		cb_fn(cb_arg, NULL, -ENOMEM);
4573 		return;
4574 	}
4575 
4576 	ctx->bs = bs;
4577 
4578 	/* Allocate memory for the super block */
4579 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4580 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4581 	if (!ctx->super) {
4582 		free(ctx);
4583 		_spdk_bs_free(bs);
4584 		cb_fn(cb_arg, NULL, -ENOMEM);
4585 		return;
4586 	}
4587 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4588 	       sizeof(ctx->super->signature));
4589 	ctx->super->version = SPDK_BS_VERSION;
4590 	ctx->super->length = sizeof(*ctx->super);
4591 	ctx->super->super_blob = bs->super_blob;
4592 	ctx->super->clean = 0;
4593 	ctx->super->cluster_size = bs->cluster_sz;
4594 	ctx->super->io_unit_size = bs->io_unit_size;
4595 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
4596 
4597 	/* Calculate how many pages the metadata consumes at the front
4598 	 * of the disk.
4599 	 */
4600 
4601 	/* The super block uses 1 page */
4602 	num_md_pages = 1;
4603 
4604 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
4605 	 * up to the nearest page, plus a header.
4606 	 */
4607 	ctx->super->used_page_mask_start = num_md_pages;
4608 	ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4609 					 spdk_divide_round_up(bs->md_len, 8),
4610 					 SPDK_BS_PAGE_SIZE);
4611 	num_md_pages += ctx->super->used_page_mask_len;
4612 
4613 	/* The used_clusters mask requires 1 bit per cluster, rounded
4614 	 * up to the nearest page, plus a header.
4615 	 */
4616 	ctx->super->used_cluster_mask_start = num_md_pages;
4617 	ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4618 					    spdk_divide_round_up(bs->total_clusters, 8),
4619 					    SPDK_BS_PAGE_SIZE);
4620 	num_md_pages += ctx->super->used_cluster_mask_len;
4621 
4622 	/* The used_blobids mask requires 1 bit per metadata page, rounded
4623 	 * up to the nearest page, plus a header.
4624 	 */
4625 	ctx->super->used_blobid_mask_start = num_md_pages;
4626 	ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4627 					   spdk_divide_round_up(bs->md_len, 8),
4628 					   SPDK_BS_PAGE_SIZE);
4629 	num_md_pages += ctx->super->used_blobid_mask_len;
4630 
4631 	/* The metadata region size was chosen above */
4632 	ctx->super->md_start = bs->md_start = num_md_pages;
4633 	ctx->super->md_len = bs->md_len;
4634 	num_md_pages += bs->md_len;
4635 
4636 	num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages);
4637 
4638 	ctx->super->size = dev->blockcnt * dev->blocklen;
4639 
4640 	ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super);
4641 
4642 	num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster);
4643 	if (num_md_clusters > bs->total_clusters) {
4644 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
4645 			    "please decrease number of pages reserved for metadata "
4646 			    "or increase cluster size.\n");
4647 		spdk_free(ctx->super);
4648 		free(ctx);
4649 		_spdk_bs_free(bs);
4650 		cb_fn(cb_arg, NULL, -ENOMEM);
4651 		return;
4652 	}
4653 	/* Claim all of the clusters used by the metadata */
4654 	for (i = 0; i < num_md_clusters; i++) {
4655 		_spdk_bs_claim_cluster(bs, i);
4656 	}
4657 
4658 	bs->total_data_clusters = bs->num_free_clusters;
4659 
4660 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4661 	cpl.u.bs_handle.cb_fn = cb_fn;
4662 	cpl.u.bs_handle.cb_arg = cb_arg;
4663 	cpl.u.bs_handle.bs = bs;
4664 
4665 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4666 	if (!seq) {
4667 		spdk_free(ctx->super);
4668 		free(ctx);
4669 		_spdk_bs_free(bs);
4670 		cb_fn(cb_arg, NULL, -ENOMEM);
4671 		return;
4672 	}
4673 
4674 	batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx);
4675 
4676 	/* Clear metadata space */
4677 	spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
4678 
4679 	switch (opts.clear_method) {
4680 	case BS_CLEAR_WITH_UNMAP:
4681 		/* Trim data clusters */
4682 		spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
4683 		break;
4684 	case BS_CLEAR_WITH_WRITE_ZEROES:
4685 		/* Write_zeroes to data clusters */
4686 		spdk_bs_batch_write_zeroes_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
4687 		break;
4688 	case BS_CLEAR_WITH_NONE:
4689 	default:
4690 		break;
4691 	}
4692 
4693 	spdk_bs_batch_close(batch);
4694 }
4695 
4696 /* END spdk_bs_init */
4697 
4698 /* START spdk_bs_destroy */
4699 
4700 static void
4701 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4702 {
4703 	struct spdk_bs_init_ctx *ctx = cb_arg;
4704 	struct spdk_blob_store *bs = ctx->bs;
4705 
4706 	/*
4707 	 * We need to defer calling spdk_bs_call_cpl() until after
4708 	 * dev destruction, so tuck these away for later use.
4709 	 */
4710 	bs->unload_err = bserrno;
4711 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4712 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4713 
4714 	spdk_bs_sequence_finish(seq, bserrno);
4715 
4716 	_spdk_bs_free(bs);
4717 	free(ctx);
4718 }
4719 
4720 void
4721 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
4722 		void *cb_arg)
4723 {
4724 	struct spdk_bs_cpl	cpl;
4725 	spdk_bs_sequence_t	*seq;
4726 	struct spdk_bs_init_ctx *ctx;
4727 
4728 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n");
4729 
4730 	if (!TAILQ_EMPTY(&bs->blobs)) {
4731 		SPDK_ERRLOG("Blobstore still has open blobs\n");
4732 		cb_fn(cb_arg, -EBUSY);
4733 		return;
4734 	}
4735 
4736 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4737 	cpl.u.bs_basic.cb_fn = cb_fn;
4738 	cpl.u.bs_basic.cb_arg = cb_arg;
4739 
4740 	ctx = calloc(1, sizeof(*ctx));
4741 	if (!ctx) {
4742 		cb_fn(cb_arg, -ENOMEM);
4743 		return;
4744 	}
4745 
4746 	ctx->bs = bs;
4747 
4748 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4749 	if (!seq) {
4750 		free(ctx);
4751 		cb_fn(cb_arg, -ENOMEM);
4752 		return;
4753 	}
4754 
4755 	/* Write zeroes to the super block */
4756 	spdk_bs_sequence_write_zeroes_dev(seq,
4757 					  _spdk_bs_page_to_lba(bs, 0),
4758 					  _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
4759 					  _spdk_bs_destroy_trim_cpl, ctx);
4760 }
4761 
4762 /* END spdk_bs_destroy */
4763 
4764 /* START spdk_bs_unload */
4765 
4766 static void
4767 _spdk_bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno)
4768 {
4769 	spdk_bs_sequence_t *seq = ctx->seq;
4770 
4771 	spdk_free(ctx->super);
4772 
4773 	/*
4774 	 * We need to defer calling spdk_bs_call_cpl() until after
4775 	 * dev destruction, so tuck these away for later use.
4776 	 */
4777 	ctx->bs->unload_err = bserrno;
4778 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4779 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4780 
4781 	spdk_bs_sequence_finish(seq, bserrno);
4782 
4783 	_spdk_bs_free(ctx->bs);
4784 	free(ctx);
4785 }
4786 
4787 static void
4788 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4789 {
4790 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4791 
4792 	_spdk_bs_unload_finish(ctx, bserrno);
4793 }
4794 
4795 static void
4796 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4797 {
4798 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4799 
4800 	spdk_free(ctx->mask);
4801 
4802 	if (bserrno != 0) {
4803 		_spdk_bs_unload_finish(ctx, bserrno);
4804 		return;
4805 	}
4806 
4807 	ctx->super->clean = 1;
4808 
4809 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
4810 }
4811 
4812 static void
4813 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4814 {
4815 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4816 
4817 	spdk_free(ctx->mask);
4818 	ctx->mask = NULL;
4819 
4820 	if (bserrno != 0) {
4821 		_spdk_bs_unload_finish(ctx, bserrno);
4822 		return;
4823 	}
4824 
4825 	_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_unload_write_used_clusters_cpl);
4826 }
4827 
4828 static void
4829 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4830 {
4831 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4832 
4833 	spdk_free(ctx->mask);
4834 	ctx->mask = NULL;
4835 
4836 	if (bserrno != 0) {
4837 		_spdk_bs_unload_finish(ctx, bserrno);
4838 		return;
4839 	}
4840 
4841 	_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_unload_write_used_blobids_cpl);
4842 }
4843 
4844 static void
4845 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4846 {
4847 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4848 
4849 	if (bserrno != 0) {
4850 		_spdk_bs_unload_finish(ctx, bserrno);
4851 		return;
4852 	}
4853 
4854 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl);
4855 }
4856 
4857 void
4858 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
4859 {
4860 	struct spdk_bs_cpl	cpl;
4861 	struct spdk_bs_load_ctx *ctx;
4862 
4863 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n");
4864 
4865 	if (!TAILQ_EMPTY(&bs->blobs)) {
4866 		SPDK_ERRLOG("Blobstore still has open blobs\n");
4867 		cb_fn(cb_arg, -EBUSY);
4868 		return;
4869 	}
4870 
4871 	ctx = calloc(1, sizeof(*ctx));
4872 	if (!ctx) {
4873 		cb_fn(cb_arg, -ENOMEM);
4874 		return;
4875 	}
4876 
4877 	ctx->bs = bs;
4878 
4879 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4880 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4881 	if (!ctx->super) {
4882 		free(ctx);
4883 		cb_fn(cb_arg, -ENOMEM);
4884 		return;
4885 	}
4886 
4887 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4888 	cpl.u.bs_basic.cb_fn = cb_fn;
4889 	cpl.u.bs_basic.cb_arg = cb_arg;
4890 
4891 	ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4892 	if (!ctx->seq) {
4893 		spdk_free(ctx->super);
4894 		free(ctx);
4895 		cb_fn(cb_arg, -ENOMEM);
4896 		return;
4897 	}
4898 
4899 	/* Read super block */
4900 	spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4901 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4902 				  _spdk_bs_unload_read_super_cpl, ctx);
4903 }
4904 
4905 /* END spdk_bs_unload */
4906 
4907 /* START spdk_bs_set_super */
4908 
4909 struct spdk_bs_set_super_ctx {
4910 	struct spdk_blob_store		*bs;
4911 	struct spdk_bs_super_block	*super;
4912 };
4913 
4914 static void
4915 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4916 {
4917 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
4918 
4919 	if (bserrno != 0) {
4920 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
4921 	}
4922 
4923 	spdk_free(ctx->super);
4924 
4925 	spdk_bs_sequence_finish(seq, bserrno);
4926 
4927 	free(ctx);
4928 }
4929 
4930 static void
4931 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4932 {
4933 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
4934 
4935 	if (bserrno != 0) {
4936 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
4937 		spdk_free(ctx->super);
4938 		spdk_bs_sequence_finish(seq, bserrno);
4939 		free(ctx);
4940 		return;
4941 	}
4942 
4943 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx);
4944 }
4945 
4946 void
4947 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
4948 		  spdk_bs_op_complete cb_fn, void *cb_arg)
4949 {
4950 	struct spdk_bs_cpl		cpl;
4951 	spdk_bs_sequence_t		*seq;
4952 	struct spdk_bs_set_super_ctx	*ctx;
4953 
4954 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n");
4955 
4956 	ctx = calloc(1, sizeof(*ctx));
4957 	if (!ctx) {
4958 		cb_fn(cb_arg, -ENOMEM);
4959 		return;
4960 	}
4961 
4962 	ctx->bs = bs;
4963 
4964 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4965 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4966 	if (!ctx->super) {
4967 		free(ctx);
4968 		cb_fn(cb_arg, -ENOMEM);
4969 		return;
4970 	}
4971 
4972 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4973 	cpl.u.bs_basic.cb_fn = cb_fn;
4974 	cpl.u.bs_basic.cb_arg = cb_arg;
4975 
4976 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4977 	if (!seq) {
4978 		spdk_free(ctx->super);
4979 		free(ctx);
4980 		cb_fn(cb_arg, -ENOMEM);
4981 		return;
4982 	}
4983 
4984 	bs->super_blob = blobid;
4985 
4986 	/* Read super block */
4987 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4988 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4989 				  _spdk_bs_set_super_read_cpl, ctx);
4990 }
4991 
4992 /* END spdk_bs_set_super */
4993 
4994 void
4995 spdk_bs_get_super(struct spdk_blob_store *bs,
4996 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
4997 {
4998 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
4999 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
5000 	} else {
5001 		cb_fn(cb_arg, bs->super_blob, 0);
5002 	}
5003 }
5004 
5005 uint64_t
5006 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
5007 {
5008 	return bs->cluster_sz;
5009 }
5010 
5011 uint64_t
5012 spdk_bs_get_page_size(struct spdk_blob_store *bs)
5013 {
5014 	return SPDK_BS_PAGE_SIZE;
5015 }
5016 
5017 uint64_t
5018 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
5019 {
5020 	return bs->io_unit_size;
5021 }
5022 
5023 uint64_t
5024 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
5025 {
5026 	return bs->num_free_clusters;
5027 }
5028 
5029 uint64_t
5030 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
5031 {
5032 	return bs->total_data_clusters;
5033 }
5034 
5035 static int
5036 spdk_bs_register_md_thread(struct spdk_blob_store *bs)
5037 {
5038 	bs->md_channel = spdk_get_io_channel(bs);
5039 	if (!bs->md_channel) {
5040 		SPDK_ERRLOG("Failed to get IO channel.\n");
5041 		return -1;
5042 	}
5043 
5044 	return 0;
5045 }
5046 
5047 static int
5048 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
5049 {
5050 	spdk_put_io_channel(bs->md_channel);
5051 
5052 	return 0;
5053 }
5054 
5055 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob)
5056 {
5057 	assert(blob != NULL);
5058 
5059 	return blob->id;
5060 }
5061 
5062 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob)
5063 {
5064 	assert(blob != NULL);
5065 
5066 	return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters);
5067 }
5068 
5069 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob)
5070 {
5071 	assert(blob != NULL);
5072 
5073 	return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs);
5074 }
5075 
5076 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
5077 {
5078 	assert(blob != NULL);
5079 
5080 	return blob->active.num_clusters;
5081 }
5082 
5083 /* START spdk_bs_create_blob */
5084 
5085 static void
5086 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5087 {
5088 	struct spdk_blob *blob = cb_arg;
5089 
5090 	_spdk_blob_free(blob);
5091 
5092 	spdk_bs_sequence_finish(seq, bserrno);
5093 }
5094 
5095 static int
5096 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
5097 		      bool internal)
5098 {
5099 	uint64_t i;
5100 	size_t value_len = 0;
5101 	int rc;
5102 	const void *value = NULL;
5103 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
5104 		return -EINVAL;
5105 	}
5106 	for (i = 0; i < xattrs->count; i++) {
5107 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
5108 		if (value == NULL || value_len == 0) {
5109 			return -EINVAL;
5110 		}
5111 		rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
5112 		if (rc < 0) {
5113 			return rc;
5114 		}
5115 	}
5116 	return 0;
5117 }
5118 
5119 static void
5120 _spdk_bs_create_blob(struct spdk_blob_store *bs,
5121 		     const struct spdk_blob_opts *opts,
5122 		     const struct spdk_blob_xattr_opts *internal_xattrs,
5123 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5124 {
5125 	struct spdk_blob	*blob;
5126 	uint32_t		page_idx;
5127 	struct spdk_bs_cpl	cpl;
5128 	struct spdk_blob_opts	opts_default;
5129 	struct spdk_blob_xattr_opts internal_xattrs_default;
5130 	spdk_bs_sequence_t	*seq;
5131 	spdk_blob_id		id;
5132 	int rc;
5133 
5134 	assert(spdk_get_thread() == bs->md_thread);
5135 
5136 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
5137 	if (page_idx == UINT32_MAX) {
5138 		cb_fn(cb_arg, 0, -ENOMEM);
5139 		return;
5140 	}
5141 	spdk_bit_array_set(bs->used_blobids, page_idx);
5142 	_spdk_bs_claim_md_page(bs, page_idx);
5143 
5144 	id = _spdk_bs_page_to_blobid(page_idx);
5145 
5146 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx);
5147 
5148 	blob = _spdk_blob_alloc(bs, id);
5149 	if (!blob) {
5150 		cb_fn(cb_arg, 0, -ENOMEM);
5151 		return;
5152 	}
5153 
5154 	if (!opts) {
5155 		spdk_blob_opts_init(&opts_default);
5156 		opts = &opts_default;
5157 	}
5158 
5159 	blob->use_extent_table = opts->use_extent_table;
5160 	if (blob->use_extent_table) {
5161 		blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE;
5162 	}
5163 
5164 	if (!internal_xattrs) {
5165 		_spdk_blob_xattrs_init(&internal_xattrs_default);
5166 		internal_xattrs = &internal_xattrs_default;
5167 	}
5168 
5169 	rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false);
5170 	if (rc < 0) {
5171 		_spdk_blob_free(blob);
5172 		cb_fn(cb_arg, 0, rc);
5173 		return;
5174 	}
5175 
5176 	rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true);
5177 	if (rc < 0) {
5178 		_spdk_blob_free(blob);
5179 		cb_fn(cb_arg, 0, rc);
5180 		return;
5181 	}
5182 
5183 	if (opts->thin_provision) {
5184 		_spdk_blob_set_thin_provision(blob);
5185 	}
5186 
5187 	_spdk_blob_set_clear_method(blob, opts->clear_method);
5188 
5189 	rc = _spdk_blob_resize(blob, opts->num_clusters);
5190 	if (rc < 0) {
5191 		_spdk_blob_free(blob);
5192 		cb_fn(cb_arg, 0, rc);
5193 		return;
5194 	}
5195 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5196 	cpl.u.blobid.cb_fn = cb_fn;
5197 	cpl.u.blobid.cb_arg = cb_arg;
5198 	cpl.u.blobid.blobid = blob->id;
5199 
5200 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
5201 	if (!seq) {
5202 		_spdk_blob_free(blob);
5203 		cb_fn(cb_arg, 0, -ENOMEM);
5204 		return;
5205 	}
5206 
5207 	_spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob);
5208 }
5209 
5210 void spdk_bs_create_blob(struct spdk_blob_store *bs,
5211 			 spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5212 {
5213 	_spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
5214 }
5215 
5216 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
5217 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5218 {
5219 	_spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
5220 }
5221 
5222 /* END spdk_bs_create_blob */
5223 
5224 /* START blob_cleanup */
5225 
5226 struct spdk_clone_snapshot_ctx {
5227 	struct spdk_bs_cpl      cpl;
5228 	int bserrno;
5229 	bool frozen;
5230 
5231 	struct spdk_io_channel *channel;
5232 
5233 	/* Current cluster for inflate operation */
5234 	uint64_t cluster;
5235 
5236 	/* For inflation force allocation of all unallocated clusters and remove
5237 	 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */
5238 	bool allocate_all;
5239 
5240 	struct {
5241 		spdk_blob_id id;
5242 		struct spdk_blob *blob;
5243 	} original;
5244 	struct {
5245 		spdk_blob_id id;
5246 		struct spdk_blob *blob;
5247 	} new;
5248 
5249 	/* xattrs specified for snapshot/clones only. They have no impact on
5250 	 * the original blobs xattrs. */
5251 	const struct spdk_blob_xattr_opts *xattrs;
5252 };
5253 
5254 static void
5255 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
5256 {
5257 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
5258 	struct spdk_bs_cpl *cpl = &ctx->cpl;
5259 
5260 	if (bserrno != 0) {
5261 		if (ctx->bserrno != 0) {
5262 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5263 		} else {
5264 			ctx->bserrno = bserrno;
5265 		}
5266 	}
5267 
5268 	switch (cpl->type) {
5269 	case SPDK_BS_CPL_TYPE_BLOBID:
5270 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
5271 		break;
5272 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
5273 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
5274 		break;
5275 	default:
5276 		SPDK_UNREACHABLE();
5277 		break;
5278 	}
5279 
5280 	free(ctx);
5281 }
5282 
5283 static void
5284 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
5285 {
5286 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5287 	struct spdk_blob *origblob = ctx->original.blob;
5288 
5289 	if (bserrno != 0) {
5290 		if (ctx->bserrno != 0) {
5291 			SPDK_ERRLOG("Unfreeze error %d\n", bserrno);
5292 		} else {
5293 			ctx->bserrno = bserrno;
5294 		}
5295 	}
5296 
5297 	ctx->original.id = origblob->id;
5298 	origblob->locked_operation_in_progress = false;
5299 
5300 	spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5301 }
5302 
5303 static void
5304 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
5305 {
5306 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5307 	struct spdk_blob *origblob = ctx->original.blob;
5308 
5309 	if (bserrno != 0) {
5310 		if (ctx->bserrno != 0) {
5311 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5312 		} else {
5313 			ctx->bserrno = bserrno;
5314 		}
5315 	}
5316 
5317 	if (ctx->frozen) {
5318 		/* Unfreeze any outstanding I/O */
5319 		_spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx);
5320 	} else {
5321 		_spdk_bs_snapshot_unfreeze_cpl(ctx, 0);
5322 	}
5323 
5324 }
5325 
5326 static void
5327 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno)
5328 {
5329 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5330 	struct spdk_blob *newblob = ctx->new.blob;
5331 
5332 	if (bserrno != 0) {
5333 		if (ctx->bserrno != 0) {
5334 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5335 		} else {
5336 			ctx->bserrno = bserrno;
5337 		}
5338 	}
5339 
5340 	ctx->new.id = newblob->id;
5341 	spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5342 }
5343 
5344 /* END blob_cleanup */
5345 
5346 /* START spdk_bs_create_snapshot */
5347 
5348 static void
5349 _spdk_bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2)
5350 {
5351 	uint64_t *cluster_temp;
5352 	uint32_t *extent_page_temp;
5353 
5354 	cluster_temp = blob1->active.clusters;
5355 	blob1->active.clusters = blob2->active.clusters;
5356 	blob2->active.clusters = cluster_temp;
5357 
5358 	extent_page_temp = blob1->active.extent_pages;
5359 	blob1->active.extent_pages = blob2->active.extent_pages;
5360 	blob2->active.extent_pages = extent_page_temp;
5361 }
5362 
5363 static void
5364 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
5365 {
5366 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5367 	struct spdk_blob *origblob = ctx->original.blob;
5368 	struct spdk_blob *newblob = ctx->new.blob;
5369 
5370 	if (bserrno != 0) {
5371 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5372 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5373 		return;
5374 	}
5375 
5376 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
5377 	bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
5378 	if (bserrno != 0) {
5379 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5380 		return;
5381 	}
5382 
5383 	_spdk_bs_blob_list_add(ctx->original.blob);
5384 
5385 	spdk_blob_set_read_only(newblob);
5386 
5387 	/* sync snapshot metadata */
5388 	spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5389 }
5390 
5391 static void
5392 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
5393 {
5394 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5395 	struct spdk_blob *origblob = ctx->original.blob;
5396 	struct spdk_blob *newblob = ctx->new.blob;
5397 
5398 	if (bserrno != 0) {
5399 		/* return cluster map back to original */
5400 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5401 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5402 		return;
5403 	}
5404 
5405 	/* Set internal xattr for snapshot id */
5406 	bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
5407 	if (bserrno != 0) {
5408 		/* return cluster map back to original */
5409 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5410 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5411 		return;
5412 	}
5413 
5414 	_spdk_bs_blob_list_remove(origblob);
5415 	origblob->parent_id = newblob->id;
5416 
5417 	/* Create new back_bs_dev for snapshot */
5418 	origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob);
5419 	if (origblob->back_bs_dev == NULL) {
5420 		/* return cluster map back to original */
5421 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5422 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
5423 		return;
5424 	}
5425 
5426 	/* set clone blob as thin provisioned */
5427 	_spdk_blob_set_thin_provision(origblob);
5428 
5429 	_spdk_bs_blob_list_add(newblob);
5430 
5431 	/* sync clone metadata */
5432 	spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx);
5433 }
5434 
5435 static void
5436 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc)
5437 {
5438 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5439 	struct spdk_blob *origblob = ctx->original.blob;
5440 	struct spdk_blob *newblob = ctx->new.blob;
5441 	int bserrno;
5442 
5443 	if (rc != 0) {
5444 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc);
5445 		return;
5446 	}
5447 
5448 	ctx->frozen = true;
5449 
5450 	/* set new back_bs_dev for snapshot */
5451 	newblob->back_bs_dev = origblob->back_bs_dev;
5452 	/* Set invalid flags from origblob */
5453 	newblob->invalid_flags = origblob->invalid_flags;
5454 
5455 	/* inherit parent from original blob if set */
5456 	newblob->parent_id = origblob->parent_id;
5457 	if (origblob->parent_id != SPDK_BLOBID_INVALID) {
5458 		/* Set internal xattr for snapshot id */
5459 		bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT,
5460 					       &origblob->parent_id, sizeof(spdk_blob_id), true);
5461 		if (bserrno != 0) {
5462 			_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5463 			return;
5464 		}
5465 	}
5466 
5467 	/* swap cluster maps */
5468 	_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5469 
5470 	/* Set the clear method on the new blob to match the original. */
5471 	_spdk_blob_set_clear_method(newblob, origblob->clear_method);
5472 
5473 	/* sync snapshot metadata */
5474 	spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx);
5475 }
5476 
5477 static void
5478 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5479 {
5480 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5481 	struct spdk_blob *origblob = ctx->original.blob;
5482 	struct spdk_blob *newblob = _blob;
5483 
5484 	if (bserrno != 0) {
5485 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5486 		return;
5487 	}
5488 
5489 	ctx->new.blob = newblob;
5490 	assert(spdk_blob_is_thin_provisioned(newblob));
5491 	assert(spdk_mem_all_zero(newblob->active.clusters,
5492 				 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
5493 	assert(spdk_mem_all_zero(newblob->active.extent_pages,
5494 				 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
5495 
5496 	_spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx);
5497 }
5498 
5499 static void
5500 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
5501 {
5502 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5503 	struct spdk_blob *origblob = ctx->original.blob;
5504 
5505 	if (bserrno != 0) {
5506 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5507 		return;
5508 	}
5509 
5510 	ctx->new.id = blobid;
5511 	ctx->cpl.u.blobid.blobid = blobid;
5512 
5513 	spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx);
5514 }
5515 
5516 
5517 static void
5518 _spdk_bs_xattr_snapshot(void *arg, const char *name,
5519 			const void **value, size_t *value_len)
5520 {
5521 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
5522 
5523 	struct spdk_blob *blob = (struct spdk_blob *)arg;
5524 	*value = &blob->id;
5525 	*value_len = sizeof(blob->id);
5526 }
5527 
5528 static void
5529 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5530 {
5531 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5532 	struct spdk_blob_opts opts;
5533 	struct spdk_blob_xattr_opts internal_xattrs;
5534 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
5535 
5536 	if (bserrno != 0) {
5537 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5538 		return;
5539 	}
5540 
5541 	ctx->original.blob = _blob;
5542 
5543 	if (_blob->data_ro || _blob->md_ro) {
5544 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n",
5545 			      _blob->id);
5546 		ctx->bserrno = -EINVAL;
5547 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5548 		return;
5549 	}
5550 
5551 	if (_blob->locked_operation_in_progress) {
5552 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot - another operation in progress\n");
5553 		ctx->bserrno = -EBUSY;
5554 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5555 		return;
5556 	}
5557 
5558 	_blob->locked_operation_in_progress = true;
5559 
5560 	spdk_blob_opts_init(&opts);
5561 	_spdk_blob_xattrs_init(&internal_xattrs);
5562 
5563 	/* Change the size of new blob to the same as in original blob,
5564 	 * but do not allocate clusters */
5565 	opts.thin_provision = true;
5566 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
5567 	opts.use_extent_table = _blob->use_extent_table;
5568 
5569 	/* If there are any xattrs specified for snapshot, set them now */
5570 	if (ctx->xattrs) {
5571 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
5572 	}
5573 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
5574 	internal_xattrs.count = 1;
5575 	internal_xattrs.ctx = _blob;
5576 	internal_xattrs.names = xattrs_names;
5577 	internal_xattrs.get_value = _spdk_bs_xattr_snapshot;
5578 
5579 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
5580 			     _spdk_bs_snapshot_newblob_create_cpl, ctx);
5581 }
5582 
5583 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
5584 			     const struct spdk_blob_xattr_opts *snapshot_xattrs,
5585 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5586 {
5587 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
5588 
5589 	if (!ctx) {
5590 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
5591 		return;
5592 	}
5593 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5594 	ctx->cpl.u.blobid.cb_fn = cb_fn;
5595 	ctx->cpl.u.blobid.cb_arg = cb_arg;
5596 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
5597 	ctx->bserrno = 0;
5598 	ctx->frozen = false;
5599 	ctx->original.id = blobid;
5600 	ctx->xattrs = snapshot_xattrs;
5601 
5602 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx);
5603 }
5604 /* END spdk_bs_create_snapshot */
5605 
5606 /* START spdk_bs_create_clone */
5607 
5608 static void
5609 _spdk_bs_xattr_clone(void *arg, const char *name,
5610 		     const void **value, size_t *value_len)
5611 {
5612 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
5613 
5614 	struct spdk_blob *blob = (struct spdk_blob *)arg;
5615 	*value = &blob->id;
5616 	*value_len = sizeof(blob->id);
5617 }
5618 
5619 static void
5620 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5621 {
5622 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5623 	struct spdk_blob *clone = _blob;
5624 
5625 	ctx->new.blob = clone;
5626 	_spdk_bs_blob_list_add(clone);
5627 
5628 	spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5629 }
5630 
5631 static void
5632 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
5633 {
5634 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5635 
5636 	ctx->cpl.u.blobid.blobid = blobid;
5637 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx);
5638 }
5639 
5640 static void
5641 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5642 {
5643 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5644 	struct spdk_blob_opts		opts;
5645 	struct spdk_blob_xattr_opts internal_xattrs;
5646 	char *xattr_names[] = { BLOB_SNAPSHOT };
5647 
5648 	if (bserrno != 0) {
5649 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5650 		return;
5651 	}
5652 
5653 	ctx->original.blob = _blob;
5654 
5655 	if (!_blob->data_ro || !_blob->md_ro) {
5656 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n");
5657 		ctx->bserrno = -EINVAL;
5658 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5659 		return;
5660 	}
5661 
5662 	if (_blob->locked_operation_in_progress) {
5663 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create clone - another operation in progress\n");
5664 		ctx->bserrno = -EBUSY;
5665 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5666 		return;
5667 	}
5668 
5669 	_blob->locked_operation_in_progress = true;
5670 
5671 	spdk_blob_opts_init(&opts);
5672 	_spdk_blob_xattrs_init(&internal_xattrs);
5673 
5674 	opts.thin_provision = true;
5675 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
5676 	opts.use_extent_table = _blob->use_extent_table;
5677 	if (ctx->xattrs) {
5678 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
5679 	}
5680 
5681 	/* Set internal xattr BLOB_SNAPSHOT */
5682 	internal_xattrs.count = 1;
5683 	internal_xattrs.ctx = _blob;
5684 	internal_xattrs.names = xattr_names;
5685 	internal_xattrs.get_value = _spdk_bs_xattr_clone;
5686 
5687 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
5688 			     _spdk_bs_clone_newblob_create_cpl, ctx);
5689 }
5690 
5691 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
5692 			  const struct spdk_blob_xattr_opts *clone_xattrs,
5693 			  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5694 {
5695 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
5696 
5697 	if (!ctx) {
5698 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
5699 		return;
5700 	}
5701 
5702 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5703 	ctx->cpl.u.blobid.cb_fn = cb_fn;
5704 	ctx->cpl.u.blobid.cb_arg = cb_arg;
5705 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
5706 	ctx->bserrno = 0;
5707 	ctx->xattrs = clone_xattrs;
5708 	ctx->original.id = blobid;
5709 
5710 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx);
5711 }
5712 
5713 /* END spdk_bs_create_clone */
5714 
5715 /* START spdk_bs_inflate_blob */
5716 
5717 static void
5718 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno)
5719 {
5720 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5721 	struct spdk_blob *_blob = ctx->original.blob;
5722 
5723 	if (bserrno != 0) {
5724 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5725 		return;
5726 	}
5727 
5728 	assert(_parent != NULL);
5729 
5730 	_spdk_bs_blob_list_remove(_blob);
5731 	_blob->parent_id = _parent->id;
5732 	_spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id,
5733 			     sizeof(spdk_blob_id), true);
5734 
5735 	_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5736 	_blob->back_bs_dev = spdk_bs_create_blob_bs_dev(_parent);
5737 	_spdk_bs_blob_list_add(_blob);
5738 
5739 	spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5740 }
5741 
5742 static void
5743 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno)
5744 {
5745 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5746 	struct spdk_blob *_blob = ctx->original.blob;
5747 	struct spdk_blob *_parent;
5748 
5749 	if (bserrno != 0) {
5750 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5751 		return;
5752 	}
5753 
5754 	if (ctx->allocate_all) {
5755 		/* remove thin provisioning */
5756 		_spdk_bs_blob_list_remove(_blob);
5757 		_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
5758 		_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
5759 		_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5760 		_blob->back_bs_dev = NULL;
5761 		_blob->parent_id = SPDK_BLOBID_INVALID;
5762 	} else {
5763 		_parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob;
5764 		if (_parent->parent_id != SPDK_BLOBID_INVALID) {
5765 			/* We must change the parent of the inflated blob */
5766 			spdk_bs_open_blob(_blob->bs, _parent->parent_id,
5767 					  _spdk_bs_inflate_blob_set_parent_cpl, ctx);
5768 			return;
5769 		}
5770 
5771 		_spdk_bs_blob_list_remove(_blob);
5772 		_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
5773 		_blob->parent_id = SPDK_BLOBID_INVALID;
5774 		_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5775 		_blob->back_bs_dev = spdk_bs_create_zeroes_dev();
5776 	}
5777 
5778 	_blob->state = SPDK_BLOB_STATE_DIRTY;
5779 	spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5780 }
5781 
5782 /* Check if cluster needs allocation */
5783 static inline bool
5784 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all)
5785 {
5786 	struct spdk_blob_bs_dev *b;
5787 
5788 	assert(blob != NULL);
5789 
5790 	if (blob->active.clusters[cluster] != 0) {
5791 		/* Cluster is already allocated */
5792 		return false;
5793 	}
5794 
5795 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
5796 		/* Blob have no parent blob */
5797 		return allocate_all;
5798 	}
5799 
5800 	b = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
5801 	return (allocate_all || b->blob->active.clusters[cluster] != 0);
5802 }
5803 
5804 static void
5805 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
5806 {
5807 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5808 	struct spdk_blob *_blob = ctx->original.blob;
5809 	uint64_t offset;
5810 
5811 	if (bserrno != 0) {
5812 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5813 		return;
5814 	}
5815 
5816 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
5817 		if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) {
5818 			break;
5819 		}
5820 	}
5821 
5822 	if (ctx->cluster < _blob->active.num_clusters) {
5823 		offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster);
5824 
5825 		/* We may safely increment a cluster before write */
5826 		ctx->cluster++;
5827 
5828 		/* Use zero length write to touch a cluster */
5829 		spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0,
5830 				   _spdk_bs_inflate_blob_touch_next, ctx);
5831 	} else {
5832 		_spdk_bs_inflate_blob_done(cb_arg, bserrno);
5833 	}
5834 }
5835 
5836 static void
5837 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5838 {
5839 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5840 	uint64_t lfc; /* lowest free cluster */
5841 	uint64_t i;
5842 
5843 	if (bserrno != 0) {
5844 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5845 		return;
5846 	}
5847 
5848 	ctx->original.blob = _blob;
5849 
5850 	if (_blob->locked_operation_in_progress) {
5851 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot inflate blob - another operation in progress\n");
5852 		ctx->bserrno = -EBUSY;
5853 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5854 		return;
5855 	}
5856 
5857 	_blob->locked_operation_in_progress = true;
5858 
5859 	if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) {
5860 		/* This blob have no parent, so we cannot decouple it. */
5861 		SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n");
5862 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
5863 		return;
5864 	}
5865 
5866 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
5867 		/* This is not thin provisioned blob. No need to inflate. */
5868 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0);
5869 		return;
5870 	}
5871 
5872 	/* Do two passes - one to verify that we can obtain enough clusters
5873 	 * and another to actually claim them.
5874 	 */
5875 	lfc = 0;
5876 	for (i = 0; i < _blob->active.num_clusters; i++) {
5877 		if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) {
5878 			lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc);
5879 			if (lfc == UINT32_MAX) {
5880 				/* No more free clusters. Cannot satisfy the request */
5881 				_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
5882 				return;
5883 			}
5884 			lfc++;
5885 		}
5886 	}
5887 
5888 	ctx->cluster = 0;
5889 	_spdk_bs_inflate_blob_touch_next(ctx, 0);
5890 }
5891 
5892 static void
5893 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5894 		      spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg)
5895 {
5896 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
5897 
5898 	if (!ctx) {
5899 		cb_fn(cb_arg, -ENOMEM);
5900 		return;
5901 	}
5902 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
5903 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
5904 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
5905 	ctx->bserrno = 0;
5906 	ctx->original.id = blobid;
5907 	ctx->channel = channel;
5908 	ctx->allocate_all = allocate_all;
5909 
5910 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx);
5911 }
5912 
5913 void
5914 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5915 		     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
5916 {
5917 	_spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg);
5918 }
5919 
5920 void
5921 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5922 			     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
5923 {
5924 	_spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg);
5925 }
5926 /* END spdk_bs_inflate_blob */
5927 
5928 /* START spdk_blob_resize */
5929 struct spdk_bs_resize_ctx {
5930 	spdk_blob_op_complete cb_fn;
5931 	void *cb_arg;
5932 	struct spdk_blob *blob;
5933 	uint64_t sz;
5934 	int rc;
5935 };
5936 
5937 static void
5938 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc)
5939 {
5940 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
5941 
5942 	if (rc != 0) {
5943 		SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc);
5944 	}
5945 
5946 	if (ctx->rc != 0) {
5947 		SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc);
5948 		rc = ctx->rc;
5949 	}
5950 
5951 	ctx->blob->locked_operation_in_progress = false;
5952 
5953 	ctx->cb_fn(ctx->cb_arg, rc);
5954 	free(ctx);
5955 }
5956 
5957 static void
5958 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc)
5959 {
5960 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
5961 
5962 	if (rc != 0) {
5963 		ctx->blob->locked_operation_in_progress = false;
5964 		ctx->cb_fn(ctx->cb_arg, rc);
5965 		free(ctx);
5966 		return;
5967 	}
5968 
5969 	ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz);
5970 
5971 	_spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx);
5972 }
5973 
5974 void
5975 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
5976 {
5977 	struct spdk_bs_resize_ctx *ctx;
5978 
5979 	_spdk_blob_verify_md_op(blob);
5980 
5981 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz);
5982 
5983 	if (blob->md_ro) {
5984 		cb_fn(cb_arg, -EPERM);
5985 		return;
5986 	}
5987 
5988 	if (sz == blob->active.num_clusters) {
5989 		cb_fn(cb_arg, 0);
5990 		return;
5991 	}
5992 
5993 	if (blob->locked_operation_in_progress) {
5994 		cb_fn(cb_arg, -EBUSY);
5995 		return;
5996 	}
5997 
5998 	ctx = calloc(1, sizeof(*ctx));
5999 	if (!ctx) {
6000 		cb_fn(cb_arg, -ENOMEM);
6001 		return;
6002 	}
6003 
6004 	blob->locked_operation_in_progress = true;
6005 	ctx->cb_fn = cb_fn;
6006 	ctx->cb_arg = cb_arg;
6007 	ctx->blob = blob;
6008 	ctx->sz = sz;
6009 	_spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx);
6010 }
6011 
6012 /* END spdk_blob_resize */
6013 
6014 
6015 /* START spdk_bs_delete_blob */
6016 
6017 static void
6018 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
6019 {
6020 	spdk_bs_sequence_t *seq = cb_arg;
6021 
6022 	spdk_bs_sequence_finish(seq, bserrno);
6023 }
6024 
6025 static void
6026 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6027 {
6028 	struct spdk_blob *blob = cb_arg;
6029 
6030 	if (bserrno != 0) {
6031 		/*
6032 		 * We already removed this blob from the blobstore tailq, so
6033 		 *  we need to free it here since this is the last reference
6034 		 *  to it.
6035 		 */
6036 		_spdk_blob_free(blob);
6037 		_spdk_bs_delete_close_cpl(seq, bserrno);
6038 		return;
6039 	}
6040 
6041 	/*
6042 	 * This will immediately decrement the ref_count and call
6043 	 *  the completion routine since the metadata state is clean.
6044 	 *  By calling spdk_blob_close, we reduce the number of call
6045 	 *  points into code that touches the blob->open_ref count
6046 	 *  and the blobstore's blob list.
6047 	 */
6048 	spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq);
6049 }
6050 
6051 struct delete_snapshot_ctx {
6052 	struct spdk_blob_list *parent_snapshot_entry;
6053 	struct spdk_blob *snapshot;
6054 	bool snapshot_md_ro;
6055 	struct spdk_blob *clone;
6056 	bool clone_md_ro;
6057 	spdk_blob_op_with_handle_complete cb_fn;
6058 	void *cb_arg;
6059 	int bserrno;
6060 };
6061 
6062 static void
6063 _spdk_delete_blob_cleanup_finish(void *cb_arg, int bserrno)
6064 {
6065 	struct delete_snapshot_ctx *ctx = cb_arg;
6066 
6067 	if (bserrno != 0) {
6068 		SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno);
6069 	}
6070 
6071 	assert(ctx != NULL);
6072 
6073 	if (bserrno != 0 && ctx->bserrno == 0) {
6074 		ctx->bserrno = bserrno;
6075 	}
6076 
6077 	ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno);
6078 	free(ctx);
6079 }
6080 
6081 static void
6082 _spdk_delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno)
6083 {
6084 	struct delete_snapshot_ctx *ctx = cb_arg;
6085 
6086 	if (bserrno != 0) {
6087 		ctx->bserrno = bserrno;
6088 		SPDK_ERRLOG("Clone cleanup error %d\n", bserrno);
6089 	}
6090 
6091 	/* open_ref == 1 menas that only deletion context has opened this snapshot
6092 	 * open_ref == 2 menas that clone has opened this snapshot as well,
6093 	 * so we have to add it back to the blobs list */
6094 	if (ctx->snapshot->open_ref == 2) {
6095 		TAILQ_INSERT_HEAD(&ctx->snapshot->bs->blobs, ctx->snapshot, link);
6096 	}
6097 
6098 	ctx->snapshot->locked_operation_in_progress = false;
6099 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
6100 
6101 	spdk_blob_close(ctx->snapshot, _spdk_delete_blob_cleanup_finish, ctx);
6102 }
6103 
6104 static void
6105 _spdk_delete_snapshot_cleanup_clone(void *cb_arg, int bserrno)
6106 {
6107 	struct delete_snapshot_ctx *ctx = cb_arg;
6108 
6109 	ctx->clone->locked_operation_in_progress = false;
6110 	ctx->clone->md_ro = ctx->clone_md_ro;
6111 
6112 	spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
6113 }
6114 
6115 static void
6116 _spdk_delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
6117 {
6118 	struct delete_snapshot_ctx *ctx = cb_arg;
6119 
6120 	if (bserrno) {
6121 		ctx->bserrno = bserrno;
6122 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6123 		return;
6124 	}
6125 
6126 	ctx->clone->locked_operation_in_progress = false;
6127 	spdk_blob_close(ctx->clone, _spdk_delete_blob_cleanup_finish, ctx);
6128 }
6129 
6130 static void
6131 _spdk_delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno)
6132 {
6133 	struct delete_snapshot_ctx *ctx = cb_arg;
6134 	struct spdk_blob_list *parent_snapshot_entry = NULL;
6135 	struct spdk_blob_list *snapshot_entry = NULL;
6136 	struct spdk_blob_list *clone_entry = NULL;
6137 	struct spdk_blob_list *snapshot_clone_entry = NULL;
6138 
6139 	if (bserrno) {
6140 		SPDK_ERRLOG("Failed to sync MD on blob\n");
6141 		ctx->bserrno = bserrno;
6142 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6143 		return;
6144 	}
6145 
6146 	/* Get snapshot entry for the snapshot we want to remove */
6147 	snapshot_entry = _spdk_bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id);
6148 
6149 	assert(snapshot_entry != NULL);
6150 
6151 	/* Remove clone entry in this snapshot (at this point there can be only one clone) */
6152 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6153 	assert(clone_entry != NULL);
6154 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
6155 	snapshot_entry->clone_count--;
6156 	assert(TAILQ_EMPTY(&snapshot_entry->clones));
6157 
6158 	if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) {
6159 		/* This snapshot is at the same time a clone of another snapshot - we need to
6160 		 * update parent snapshot (remove current clone, add new one inherited from
6161 		 * the snapshot that is being removed) */
6162 
6163 		/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
6164 		 * snapshot that we are removing */
6165 		_spdk_blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry,
6166 				&snapshot_clone_entry);
6167 
6168 		/* Switch clone entry in parent snapshot */
6169 		TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link);
6170 		TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link);
6171 		free(snapshot_clone_entry);
6172 	} else {
6173 		/* No parent snapshot - just remove clone entry */
6174 		free(clone_entry);
6175 	}
6176 
6177 	/* Restore md_ro flags */
6178 	ctx->clone->md_ro = ctx->clone_md_ro;
6179 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
6180 
6181 	_spdk_blob_unfreeze_io(ctx->clone, _spdk_delete_snapshot_unfreeze_cpl, ctx);
6182 }
6183 
6184 static void
6185 _spdk_delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
6186 {
6187 	struct delete_snapshot_ctx *ctx = cb_arg;
6188 	uint64_t i;
6189 
6190 	ctx->snapshot->md_ro = false;
6191 
6192 	if (bserrno) {
6193 		SPDK_ERRLOG("Failed to sync MD on clone\n");
6194 		ctx->bserrno = bserrno;
6195 
6196 		/* Restore snapshot to previous state */
6197 		bserrno = _spdk_blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
6198 		if (bserrno != 0) {
6199 			_spdk_delete_snapshot_cleanup_clone(ctx, bserrno);
6200 			return;
6201 		}
6202 
6203 		spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_cleanup_clone, ctx);
6204 		return;
6205 	}
6206 
6207 	/* Clear cluster map entries for snapshot */
6208 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
6209 		if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
6210 			ctx->snapshot->active.clusters[i] = 0;
6211 		}
6212 	}
6213 
6214 	ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY;
6215 
6216 	if (ctx->parent_snapshot_entry != NULL) {
6217 		ctx->snapshot->back_bs_dev = NULL;
6218 	}
6219 
6220 	spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_cpl, ctx);
6221 }
6222 
6223 static void
6224 _spdk_delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
6225 {
6226 	struct delete_snapshot_ctx *ctx = cb_arg;
6227 	uint64_t i;
6228 
6229 	/* Temporarily override md_ro flag for clone for MD modification */
6230 	ctx->clone_md_ro = ctx->clone->md_ro;
6231 	ctx->clone->md_ro = false;
6232 
6233 	if (bserrno) {
6234 		SPDK_ERRLOG("Failed to sync MD with xattr on blob\n");
6235 		ctx->bserrno = bserrno;
6236 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6237 		return;
6238 	}
6239 
6240 	/* Copy snapshot map to clone map (only unallocated clusters in clone) */
6241 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
6242 		if (ctx->clone->active.clusters[i] == 0) {
6243 			ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
6244 		}
6245 	}
6246 
6247 	/* Delete old backing bs_dev from clone (related to snapshot that will be removed) */
6248 	ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev);
6249 
6250 	/* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */
6251 	if (ctx->parent_snapshot_entry != NULL) {
6252 		/* ...to parent snapshot */
6253 		ctx->clone->parent_id = ctx->parent_snapshot_entry->id;
6254 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
6255 		_spdk_blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id,
6256 				     sizeof(spdk_blob_id),
6257 				     true);
6258 	} else {
6259 		/* ...to blobid invalid and zeroes dev */
6260 		ctx->clone->parent_id = SPDK_BLOBID_INVALID;
6261 		ctx->clone->back_bs_dev = spdk_bs_create_zeroes_dev();
6262 		_spdk_blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true);
6263 	}
6264 
6265 	spdk_blob_sync_md(ctx->clone, _spdk_delete_snapshot_sync_clone_cpl, ctx);
6266 }
6267 
6268 static void
6269 _spdk_delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno)
6270 {
6271 	struct delete_snapshot_ctx *ctx = cb_arg;
6272 
6273 	if (bserrno) {
6274 		SPDK_ERRLOG("Failed to freeze I/O on clone\n");
6275 		ctx->bserrno = bserrno;
6276 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6277 		return;
6278 	}
6279 
6280 	/* Temporarily override md_ro flag for snapshot for MD modification */
6281 	ctx->snapshot_md_ro = ctx->snapshot->md_ro;
6282 	ctx->snapshot->md_ro = false;
6283 
6284 	/* Mark blob as pending for removal for power failure safety, use clone id for recovery */
6285 	ctx->bserrno = _spdk_blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id,
6286 					    sizeof(spdk_blob_id), true);
6287 	if (ctx->bserrno != 0) {
6288 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6289 		return;
6290 	}
6291 
6292 	spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_xattr_cpl, ctx);
6293 }
6294 
6295 static void
6296 _spdk_delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno)
6297 {
6298 	struct delete_snapshot_ctx *ctx = cb_arg;
6299 
6300 	if (bserrno) {
6301 		SPDK_ERRLOG("Failed to open clone\n");
6302 		ctx->bserrno = bserrno;
6303 		_spdk_delete_snapshot_cleanup_snapshot(ctx, 0);
6304 		return;
6305 	}
6306 
6307 	ctx->clone = clone;
6308 
6309 	if (clone->locked_operation_in_progress) {
6310 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress on its clone\n");
6311 		ctx->bserrno = -EBUSY;
6312 		spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
6313 		return;
6314 	}
6315 
6316 	clone->locked_operation_in_progress = true;
6317 
6318 	_spdk_blob_freeze_io(clone, _spdk_delete_snapshot_freeze_io_cb, ctx);
6319 }
6320 
6321 static void
6322 _spdk_update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx)
6323 {
6324 	struct spdk_blob_list *snapshot_entry = NULL;
6325 	struct spdk_blob_list *clone_entry = NULL;
6326 	struct spdk_blob_list *snapshot_clone_entry = NULL;
6327 
6328 	/* Get snapshot entry for the snapshot we want to remove */
6329 	snapshot_entry = _spdk_bs_get_snapshot_entry(snapshot->bs, snapshot->id);
6330 
6331 	assert(snapshot_entry != NULL);
6332 
6333 	/* Get clone of the snapshot (at this point there can be only one clone) */
6334 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6335 	assert(snapshot_entry->clone_count == 1);
6336 	assert(clone_entry != NULL);
6337 
6338 	/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
6339 	 * snapshot that we are removing */
6340 	_spdk_blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry,
6341 			&snapshot_clone_entry);
6342 
6343 	spdk_bs_open_blob(snapshot->bs, clone_entry->id, _spdk_delete_snapshot_open_clone_cb, ctx);
6344 }
6345 
6346 static void
6347 _spdk_bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
6348 {
6349 	spdk_bs_sequence_t *seq = cb_arg;
6350 	struct spdk_blob_list *snapshot_entry = NULL;
6351 	uint32_t page_num;
6352 
6353 	if (bserrno) {
6354 		SPDK_ERRLOG("Failed to remove blob\n");
6355 		spdk_bs_sequence_finish(seq, bserrno);
6356 		return;
6357 	}
6358 
6359 	/* Remove snapshot from the list */
6360 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
6361 	if (snapshot_entry != NULL) {
6362 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
6363 		free(snapshot_entry);
6364 	}
6365 
6366 	page_num = _spdk_bs_blobid_to_page(blob->id);
6367 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
6368 	blob->state = SPDK_BLOB_STATE_DIRTY;
6369 	blob->active.num_pages = 0;
6370 	_spdk_blob_resize(blob, 0);
6371 
6372 	_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob);
6373 }
6374 
6375 static int
6376 _spdk_bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone)
6377 {
6378 	struct spdk_blob_list *snapshot_entry = NULL;
6379 	struct spdk_blob_list *clone_entry = NULL;
6380 	struct spdk_blob *clone = NULL;
6381 	bool has_one_clone = false;
6382 
6383 	/* Check if this is a snapshot with clones */
6384 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
6385 	if (snapshot_entry != NULL) {
6386 		if (snapshot_entry->clone_count > 1) {
6387 			SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n");
6388 			return -EBUSY;
6389 		} else if (snapshot_entry->clone_count == 1) {
6390 			has_one_clone = true;
6391 		}
6392 	}
6393 
6394 	/* Check if someone has this blob open (besides this delete context):
6395 	 * - open_ref = 1 - only this context opened blob, so it is ok to remove it
6396 	 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot
6397 	 *	and that is ok, because we will update it accordingly */
6398 	if (blob->open_ref <= 2 && has_one_clone) {
6399 		clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6400 		assert(clone_entry != NULL);
6401 		clone = _spdk_blob_lookup(blob->bs, clone_entry->id);
6402 
6403 		if (blob->open_ref == 2 && clone == NULL) {
6404 			/* Clone is closed and someone else opened this blob */
6405 			SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
6406 			return -EBUSY;
6407 		}
6408 
6409 		*update_clone = true;
6410 		return 0;
6411 	}
6412 
6413 	if (blob->open_ref > 1) {
6414 		SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
6415 		return -EBUSY;
6416 	}
6417 
6418 	assert(has_one_clone == false);
6419 	*update_clone = false;
6420 	return 0;
6421 }
6422 
6423 static void
6424 _spdk_bs_delete_enomem_close_cpl(void *cb_arg, int bserrno)
6425 {
6426 	spdk_bs_sequence_t *seq = cb_arg;
6427 
6428 	spdk_bs_sequence_finish(seq, -ENOMEM);
6429 }
6430 
6431 static void
6432 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
6433 {
6434 	spdk_bs_sequence_t *seq = cb_arg;
6435 	struct delete_snapshot_ctx *ctx;
6436 	bool update_clone = false;
6437 
6438 	if (bserrno != 0) {
6439 		spdk_bs_sequence_finish(seq, bserrno);
6440 		return;
6441 	}
6442 
6443 	_spdk_blob_verify_md_op(blob);
6444 
6445 	ctx = calloc(1, sizeof(*ctx));
6446 	if (ctx == NULL) {
6447 		spdk_blob_close(blob, _spdk_bs_delete_enomem_close_cpl, seq);
6448 		return;
6449 	}
6450 
6451 	ctx->snapshot = blob;
6452 	ctx->cb_fn = _spdk_bs_delete_blob_finish;
6453 	ctx->cb_arg = seq;
6454 
6455 	/* Check if blob can be removed and if it is a snapshot with clone on top of it */
6456 	ctx->bserrno = _spdk_bs_is_blob_deletable(blob, &update_clone);
6457 	if (ctx->bserrno) {
6458 		spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
6459 		return;
6460 	}
6461 
6462 	if (blob->locked_operation_in_progress) {
6463 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress\n");
6464 		ctx->bserrno = -EBUSY;
6465 		spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
6466 		return;
6467 	}
6468 
6469 	blob->locked_operation_in_progress = true;
6470 
6471 	/*
6472 	 * Remove the blob from the blob_store list now, to ensure it does not
6473 	 *  get returned after this point by _spdk_blob_lookup().
6474 	 */
6475 	TAILQ_REMOVE(&blob->bs->blobs, blob, link);
6476 
6477 	if (update_clone) {
6478 		/* This blob is a snapshot with active clone - update clone first */
6479 		_spdk_update_clone_on_snapshot_deletion(blob, ctx);
6480 	} else {
6481 		/* This blob does not have any clones - just remove it */
6482 		_spdk_bs_blob_list_remove(blob);
6483 		_spdk_bs_delete_blob_finish(seq, blob, 0);
6484 		free(ctx);
6485 	}
6486 }
6487 
6488 void
6489 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6490 		    spdk_blob_op_complete cb_fn, void *cb_arg)
6491 {
6492 	struct spdk_bs_cpl	cpl;
6493 	spdk_bs_sequence_t	*seq;
6494 
6495 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid);
6496 
6497 	assert(spdk_get_thread() == bs->md_thread);
6498 
6499 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6500 	cpl.u.blob_basic.cb_fn = cb_fn;
6501 	cpl.u.blob_basic.cb_arg = cb_arg;
6502 
6503 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
6504 	if (!seq) {
6505 		cb_fn(cb_arg, -ENOMEM);
6506 		return;
6507 	}
6508 
6509 	spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq);
6510 }
6511 
6512 /* END spdk_bs_delete_blob */
6513 
6514 /* START spdk_bs_open_blob */
6515 
6516 static void
6517 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6518 {
6519 	struct spdk_blob *blob = cb_arg;
6520 
6521 	if (bserrno != 0) {
6522 		_spdk_blob_free(blob);
6523 		seq->cpl.u.blob_handle.blob = NULL;
6524 		spdk_bs_sequence_finish(seq, bserrno);
6525 		return;
6526 	}
6527 
6528 	blob->open_ref++;
6529 
6530 	TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link);
6531 
6532 	spdk_bs_sequence_finish(seq, bserrno);
6533 }
6534 
6535 static void _spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6536 			       struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6537 {
6538 	struct spdk_blob		*blob;
6539 	struct spdk_bs_cpl		cpl;
6540 	struct spdk_blob_open_opts	opts_default;
6541 	spdk_bs_sequence_t		*seq;
6542 	uint32_t			page_num;
6543 
6544 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid);
6545 	assert(spdk_get_thread() == bs->md_thread);
6546 
6547 	page_num = _spdk_bs_blobid_to_page(blobid);
6548 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
6549 		/* Invalid blobid */
6550 		cb_fn(cb_arg, NULL, -ENOENT);
6551 		return;
6552 	}
6553 
6554 	blob = _spdk_blob_lookup(bs, blobid);
6555 	if (blob) {
6556 		blob->open_ref++;
6557 		cb_fn(cb_arg, blob, 0);
6558 		return;
6559 	}
6560 
6561 	blob = _spdk_blob_alloc(bs, blobid);
6562 	if (!blob) {
6563 		cb_fn(cb_arg, NULL, -ENOMEM);
6564 		return;
6565 	}
6566 
6567 	if (!opts) {
6568 		spdk_blob_open_opts_init(&opts_default);
6569 		opts = &opts_default;
6570 	}
6571 
6572 	blob->clear_method = opts->clear_method;
6573 
6574 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
6575 	cpl.u.blob_handle.cb_fn = cb_fn;
6576 	cpl.u.blob_handle.cb_arg = cb_arg;
6577 	cpl.u.blob_handle.blob = blob;
6578 
6579 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
6580 	if (!seq) {
6581 		_spdk_blob_free(blob);
6582 		cb_fn(cb_arg, NULL, -ENOMEM);
6583 		return;
6584 	}
6585 
6586 	_spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob);
6587 }
6588 
6589 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6590 		       spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6591 {
6592 	_spdk_bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg);
6593 }
6594 
6595 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
6596 			   struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6597 {
6598 	_spdk_bs_open_blob(bs, blobid, opts, cb_fn, cb_arg);
6599 }
6600 
6601 /* END spdk_bs_open_blob */
6602 
6603 /* START spdk_blob_set_read_only */
6604 int spdk_blob_set_read_only(struct spdk_blob *blob)
6605 {
6606 	_spdk_blob_verify_md_op(blob);
6607 
6608 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
6609 
6610 	blob->state = SPDK_BLOB_STATE_DIRTY;
6611 	return 0;
6612 }
6613 /* END spdk_blob_set_read_only */
6614 
6615 /* START spdk_blob_sync_md */
6616 
6617 static void
6618 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6619 {
6620 	struct spdk_blob *blob = cb_arg;
6621 
6622 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
6623 		blob->data_ro = true;
6624 		blob->md_ro = true;
6625 	}
6626 
6627 	spdk_bs_sequence_finish(seq, bserrno);
6628 }
6629 
6630 static void
6631 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6632 {
6633 	struct spdk_bs_cpl	cpl;
6634 	spdk_bs_sequence_t	*seq;
6635 
6636 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6637 	cpl.u.blob_basic.cb_fn = cb_fn;
6638 	cpl.u.blob_basic.cb_arg = cb_arg;
6639 
6640 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
6641 	if (!seq) {
6642 		cb_fn(cb_arg, -ENOMEM);
6643 		return;
6644 	}
6645 
6646 	_spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob);
6647 }
6648 
6649 void
6650 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6651 {
6652 	_spdk_blob_verify_md_op(blob);
6653 
6654 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id);
6655 
6656 	if (blob->md_ro) {
6657 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
6658 		cb_fn(cb_arg, 0);
6659 		return;
6660 	}
6661 
6662 	_spdk_blob_sync_md(blob, cb_fn, cb_arg);
6663 }
6664 
6665 /* END spdk_blob_sync_md */
6666 
6667 struct spdk_blob_insert_cluster_ctx {
6668 	struct spdk_thread	*thread;
6669 	struct spdk_blob	*blob;
6670 	uint32_t		cluster_num;	/* cluster index in blob */
6671 	uint32_t		cluster;	/* cluster on disk */
6672 	uint32_t		extent_page;	/* extent page on disk */
6673 	int			rc;
6674 	spdk_blob_op_complete	cb_fn;
6675 	void			*cb_arg;
6676 };
6677 
6678 static void
6679 _spdk_blob_insert_cluster_msg_cpl(void *arg)
6680 {
6681 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6682 
6683 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
6684 	free(ctx);
6685 }
6686 
6687 static void
6688 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno)
6689 {
6690 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6691 
6692 	ctx->rc = bserrno;
6693 	spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
6694 }
6695 
6696 static void
6697 _spdk_blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6698 {
6699 	struct spdk_blob_md_page        *page = cb_arg;
6700 
6701 	spdk_bs_sequence_finish(seq, bserrno);
6702 	spdk_free(page);
6703 }
6704 
6705 static void
6706 _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
6707 			 spdk_blob_op_complete cb_fn, void *cb_arg)
6708 {
6709 	spdk_bs_sequence_t		*seq;
6710 	struct spdk_bs_cpl		cpl;
6711 	struct spdk_blob_md_page	*page = NULL;
6712 	uint32_t			page_count = 0;
6713 	int				rc;
6714 
6715 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6716 	cpl.u.blob_basic.cb_fn = cb_fn;
6717 	cpl.u.blob_basic.cb_arg = cb_arg;
6718 
6719 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
6720 	if (!seq) {
6721 		cb_fn(cb_arg, -ENOMEM);
6722 		return;
6723 	}
6724 	rc = _spdk_blob_serialize_add_page(blob, &page, &page_count, &page);
6725 	if (rc < 0) {
6726 		spdk_bs_sequence_finish(seq, rc);
6727 		return;
6728 	}
6729 
6730 	_spdk_blob_serialize_extent_page(blob, cluster_num, page);
6731 
6732 	page->crc = _spdk_blob_md_page_calc_crc(page);
6733 
6734 	assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true);
6735 
6736 	spdk_bs_sequence_write_dev(seq, page, _spdk_bs_md_page_to_lba(blob->bs, extent),
6737 				   _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
6738 				   _spdk_blob_persist_extent_page_cpl, page);
6739 }
6740 
6741 static void
6742 _spdk_blob_insert_cluster_msg(void *arg)
6743 {
6744 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6745 	uint32_t *extent_page;
6746 
6747 	ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
6748 	if (ctx->rc != 0) {
6749 		spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
6750 		return;
6751 	}
6752 
6753 	if (ctx->blob->use_extent_table == false) {
6754 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
6755 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
6756 		_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
6757 		return;
6758 	}
6759 
6760 	extent_page = _spdk_bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
6761 	if (*extent_page == 0) {
6762 		/* Extent page requires allocation.
6763 		 * It was already claimed in the used_md_pages map and placed in ctx.
6764 		 * Blob persist will take care of writing out new extent page on disk. */
6765 		assert(ctx->extent_page != 0);
6766 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
6767 		*extent_page = ctx->extent_page;
6768 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
6769 		_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
6770 	} else {
6771 		/* It is possible for original thread to allocate extent page for
6772 		 * different cluster in the same extent page. In such case proceed with
6773 		 * updating the existing extent page, but release the additional one. */
6774 		if (ctx->extent_page != 0) {
6775 			assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
6776 			_spdk_bs_release_md_page(ctx->blob->bs, ctx->extent_page);
6777 		}
6778 		/* Extent page already allocated.
6779 		 * Every cluster allocation, requires just an update of single extent page. */
6780 		_spdk_blob_insert_extent(ctx->blob, *extent_page, ctx->cluster_num,
6781 					 _spdk_blob_insert_cluster_msg_cb, ctx);
6782 	}
6783 }
6784 
6785 static void
6786 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
6787 				       uint64_t cluster, uint32_t extent_page, spdk_blob_op_complete cb_fn, void *cb_arg)
6788 {
6789 	struct spdk_blob_insert_cluster_ctx *ctx;
6790 
6791 	ctx = calloc(1, sizeof(*ctx));
6792 	if (ctx == NULL) {
6793 		cb_fn(cb_arg, -ENOMEM);
6794 		return;
6795 	}
6796 
6797 	ctx->thread = spdk_get_thread();
6798 	ctx->blob = blob;
6799 	ctx->cluster_num = cluster_num;
6800 	ctx->cluster = cluster;
6801 	ctx->extent_page = extent_page;
6802 	ctx->cb_fn = cb_fn;
6803 	ctx->cb_arg = cb_arg;
6804 
6805 	spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx);
6806 }
6807 
6808 /* START spdk_blob_close */
6809 
6810 static void
6811 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6812 {
6813 	struct spdk_blob *blob = cb_arg;
6814 
6815 	if (bserrno == 0) {
6816 		blob->open_ref--;
6817 		if (blob->open_ref == 0) {
6818 			/*
6819 			 * Blobs with active.num_pages == 0 are deleted blobs.
6820 			 *  these blobs are removed from the blob_store list
6821 			 *  when the deletion process starts - so don't try to
6822 			 *  remove them again.
6823 			 */
6824 			if (blob->active.num_pages > 0) {
6825 				TAILQ_REMOVE(&blob->bs->blobs, blob, link);
6826 			}
6827 			_spdk_blob_free(blob);
6828 		}
6829 	}
6830 
6831 	spdk_bs_sequence_finish(seq, bserrno);
6832 }
6833 
6834 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6835 {
6836 	struct spdk_bs_cpl	cpl;
6837 	spdk_bs_sequence_t	*seq;
6838 
6839 	_spdk_blob_verify_md_op(blob);
6840 
6841 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id);
6842 
6843 	if (blob->open_ref == 0) {
6844 		cb_fn(cb_arg, -EBADF);
6845 		return;
6846 	}
6847 
6848 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6849 	cpl.u.blob_basic.cb_fn = cb_fn;
6850 	cpl.u.blob_basic.cb_arg = cb_arg;
6851 
6852 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
6853 	if (!seq) {
6854 		cb_fn(cb_arg, -ENOMEM);
6855 		return;
6856 	}
6857 
6858 	/* Sync metadata */
6859 	_spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob);
6860 }
6861 
6862 /* END spdk_blob_close */
6863 
6864 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
6865 {
6866 	return spdk_get_io_channel(bs);
6867 }
6868 
6869 void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
6870 {
6871 	spdk_put_io_channel(channel);
6872 }
6873 
6874 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
6875 			uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
6876 {
6877 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
6878 				     SPDK_BLOB_UNMAP);
6879 }
6880 
6881 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
6882 			       uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
6883 {
6884 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
6885 				     SPDK_BLOB_WRITE_ZEROES);
6886 }
6887 
6888 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
6889 			void *payload, uint64_t offset, uint64_t length,
6890 			spdk_blob_op_complete cb_fn, void *cb_arg)
6891 {
6892 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
6893 				     SPDK_BLOB_WRITE);
6894 }
6895 
6896 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
6897 		       void *payload, uint64_t offset, uint64_t length,
6898 		       spdk_blob_op_complete cb_fn, void *cb_arg)
6899 {
6900 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
6901 				     SPDK_BLOB_READ);
6902 }
6903 
6904 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6905 			 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6906 			 spdk_blob_op_complete cb_fn, void *cb_arg)
6907 {
6908 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
6909 }
6910 
6911 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6912 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6913 			spdk_blob_op_complete cb_fn, void *cb_arg)
6914 {
6915 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
6916 }
6917 
6918 struct spdk_bs_iter_ctx {
6919 	int64_t page_num;
6920 	struct spdk_blob_store *bs;
6921 
6922 	spdk_blob_op_with_handle_complete cb_fn;
6923 	void *cb_arg;
6924 };
6925 
6926 static void
6927 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6928 {
6929 	struct spdk_bs_iter_ctx *ctx = cb_arg;
6930 	struct spdk_blob_store *bs = ctx->bs;
6931 	spdk_blob_id id;
6932 
6933 	if (bserrno == 0) {
6934 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
6935 		free(ctx);
6936 		return;
6937 	}
6938 
6939 	ctx->page_num++;
6940 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
6941 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
6942 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
6943 		free(ctx);
6944 		return;
6945 	}
6946 
6947 	id = _spdk_bs_page_to_blobid(ctx->page_num);
6948 
6949 	spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx);
6950 }
6951 
6952 void
6953 spdk_bs_iter_first(struct spdk_blob_store *bs,
6954 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6955 {
6956 	struct spdk_bs_iter_ctx *ctx;
6957 
6958 	ctx = calloc(1, sizeof(*ctx));
6959 	if (!ctx) {
6960 		cb_fn(cb_arg, NULL, -ENOMEM);
6961 		return;
6962 	}
6963 
6964 	ctx->page_num = -1;
6965 	ctx->bs = bs;
6966 	ctx->cb_fn = cb_fn;
6967 	ctx->cb_arg = cb_arg;
6968 
6969 	_spdk_bs_iter_cpl(ctx, NULL, -1);
6970 }
6971 
6972 static void
6973 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno)
6974 {
6975 	struct spdk_bs_iter_ctx *ctx = cb_arg;
6976 
6977 	_spdk_bs_iter_cpl(ctx, NULL, -1);
6978 }
6979 
6980 void
6981 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
6982 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6983 {
6984 	struct spdk_bs_iter_ctx *ctx;
6985 
6986 	assert(blob != NULL);
6987 
6988 	ctx = calloc(1, sizeof(*ctx));
6989 	if (!ctx) {
6990 		cb_fn(cb_arg, NULL, -ENOMEM);
6991 		return;
6992 	}
6993 
6994 	ctx->page_num = _spdk_bs_blobid_to_page(blob->id);
6995 	ctx->bs = bs;
6996 	ctx->cb_fn = cb_fn;
6997 	ctx->cb_arg = cb_arg;
6998 
6999 	/* Close the existing blob */
7000 	spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx);
7001 }
7002 
7003 static int
7004 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
7005 		     uint16_t value_len, bool internal)
7006 {
7007 	struct spdk_xattr_tailq *xattrs;
7008 	struct spdk_xattr	*xattr;
7009 	size_t			desc_size;
7010 
7011 	_spdk_blob_verify_md_op(blob);
7012 
7013 	if (blob->md_ro) {
7014 		return -EPERM;
7015 	}
7016 
7017 	desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len;
7018 	if (desc_size > SPDK_BS_MAX_DESC_SIZE) {
7019 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Xattr '%s' of size %ld does not fix into single page %ld\n", name,
7020 			      desc_size, SPDK_BS_MAX_DESC_SIZE);
7021 		return -ENOMEM;
7022 	}
7023 
7024 	if (internal) {
7025 		xattrs = &blob->xattrs_internal;
7026 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
7027 	} else {
7028 		xattrs = &blob->xattrs;
7029 	}
7030 
7031 	TAILQ_FOREACH(xattr, xattrs, link) {
7032 		if (!strcmp(name, xattr->name)) {
7033 			free(xattr->value);
7034 			xattr->value_len = value_len;
7035 			xattr->value = malloc(value_len);
7036 			memcpy(xattr->value, value, value_len);
7037 
7038 			blob->state = SPDK_BLOB_STATE_DIRTY;
7039 
7040 			return 0;
7041 		}
7042 	}
7043 
7044 	xattr = calloc(1, sizeof(*xattr));
7045 	if (!xattr) {
7046 		return -ENOMEM;
7047 	}
7048 	xattr->name = strdup(name);
7049 	xattr->value_len = value_len;
7050 	xattr->value = malloc(value_len);
7051 	memcpy(xattr->value, value, value_len);
7052 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
7053 
7054 	blob->state = SPDK_BLOB_STATE_DIRTY;
7055 
7056 	return 0;
7057 }
7058 
7059 int
7060 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
7061 		    uint16_t value_len)
7062 {
7063 	return _spdk_blob_set_xattr(blob, name, value, value_len, false);
7064 }
7065 
7066 static int
7067 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
7068 {
7069 	struct spdk_xattr_tailq *xattrs;
7070 	struct spdk_xattr	*xattr;
7071 
7072 	_spdk_blob_verify_md_op(blob);
7073 
7074 	if (blob->md_ro) {
7075 		return -EPERM;
7076 	}
7077 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
7078 
7079 	TAILQ_FOREACH(xattr, xattrs, link) {
7080 		if (!strcmp(name, xattr->name)) {
7081 			TAILQ_REMOVE(xattrs, xattr, link);
7082 			free(xattr->value);
7083 			free(xattr->name);
7084 			free(xattr);
7085 
7086 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
7087 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
7088 			}
7089 			blob->state = SPDK_BLOB_STATE_DIRTY;
7090 
7091 			return 0;
7092 		}
7093 	}
7094 
7095 	return -ENOENT;
7096 }
7097 
7098 int
7099 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
7100 {
7101 	return _spdk_blob_remove_xattr(blob, name, false);
7102 }
7103 
7104 static int
7105 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
7106 			   const void **value, size_t *value_len, bool internal)
7107 {
7108 	struct spdk_xattr	*xattr;
7109 	struct spdk_xattr_tailq *xattrs;
7110 
7111 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
7112 
7113 	TAILQ_FOREACH(xattr, xattrs, link) {
7114 		if (!strcmp(name, xattr->name)) {
7115 			*value = xattr->value;
7116 			*value_len = xattr->value_len;
7117 			return 0;
7118 		}
7119 	}
7120 	return -ENOENT;
7121 }
7122 
7123 int
7124 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
7125 			  const void **value, size_t *value_len)
7126 {
7127 	_spdk_blob_verify_md_op(blob);
7128 
7129 	return _spdk_blob_get_xattr_value(blob, name, value, value_len, false);
7130 }
7131 
7132 struct spdk_xattr_names {
7133 	uint32_t	count;
7134 	const char	*names[0];
7135 };
7136 
7137 static int
7138 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
7139 {
7140 	struct spdk_xattr	*xattr;
7141 	int			count = 0;
7142 
7143 	TAILQ_FOREACH(xattr, xattrs, link) {
7144 		count++;
7145 	}
7146 
7147 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
7148 	if (*names == NULL) {
7149 		return -ENOMEM;
7150 	}
7151 
7152 	TAILQ_FOREACH(xattr, xattrs, link) {
7153 		(*names)->names[(*names)->count++] = xattr->name;
7154 	}
7155 
7156 	return 0;
7157 }
7158 
7159 int
7160 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
7161 {
7162 	_spdk_blob_verify_md_op(blob);
7163 
7164 	return _spdk_blob_get_xattr_names(&blob->xattrs, names);
7165 }
7166 
7167 uint32_t
7168 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
7169 {
7170 	assert(names != NULL);
7171 
7172 	return names->count;
7173 }
7174 
7175 const char *
7176 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
7177 {
7178 	if (index >= names->count) {
7179 		return NULL;
7180 	}
7181 
7182 	return names->names[index];
7183 }
7184 
7185 void
7186 spdk_xattr_names_free(struct spdk_xattr_names *names)
7187 {
7188 	free(names);
7189 }
7190 
7191 struct spdk_bs_type
7192 spdk_bs_get_bstype(struct spdk_blob_store *bs)
7193 {
7194 	return bs->bstype;
7195 }
7196 
7197 void
7198 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
7199 {
7200 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
7201 }
7202 
7203 bool
7204 spdk_blob_is_read_only(struct spdk_blob *blob)
7205 {
7206 	assert(blob != NULL);
7207 	return (blob->data_ro || blob->md_ro);
7208 }
7209 
7210 bool
7211 spdk_blob_is_snapshot(struct spdk_blob *blob)
7212 {
7213 	struct spdk_blob_list *snapshot_entry;
7214 
7215 	assert(blob != NULL);
7216 
7217 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
7218 	if (snapshot_entry == NULL) {
7219 		return false;
7220 	}
7221 
7222 	return true;
7223 }
7224 
7225 bool
7226 spdk_blob_is_clone(struct spdk_blob *blob)
7227 {
7228 	assert(blob != NULL);
7229 
7230 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
7231 		assert(spdk_blob_is_thin_provisioned(blob));
7232 		return true;
7233 	}
7234 
7235 	return false;
7236 }
7237 
7238 bool
7239 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
7240 {
7241 	assert(blob != NULL);
7242 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
7243 }
7244 
7245 static void
7246 _spdk_blob_update_clear_method(struct spdk_blob *blob)
7247 {
7248 	enum blob_clear_method stored_cm;
7249 
7250 	assert(blob != NULL);
7251 
7252 	/* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored
7253 	 * in metadata previously.  If something other than the default was
7254 	 * specified, ignore stored value and used what was passed in.
7255 	 */
7256 	stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT);
7257 
7258 	if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) {
7259 		blob->clear_method = stored_cm;
7260 	} else if (blob->clear_method != stored_cm) {
7261 		SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n",
7262 			     blob->clear_method, stored_cm);
7263 	}
7264 }
7265 
7266 spdk_blob_id
7267 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
7268 {
7269 	struct spdk_blob_list *snapshot_entry = NULL;
7270 	struct spdk_blob_list *clone_entry = NULL;
7271 
7272 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
7273 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
7274 			if (clone_entry->id == blob_id) {
7275 				return snapshot_entry->id;
7276 			}
7277 		}
7278 	}
7279 
7280 	return SPDK_BLOBID_INVALID;
7281 }
7282 
7283 int
7284 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
7285 		     size_t *count)
7286 {
7287 	struct spdk_blob_list *snapshot_entry, *clone_entry;
7288 	size_t n;
7289 
7290 	snapshot_entry = _spdk_bs_get_snapshot_entry(bs, blobid);
7291 	if (snapshot_entry == NULL) {
7292 		*count = 0;
7293 		return 0;
7294 	}
7295 
7296 	if (ids == NULL || *count < snapshot_entry->clone_count) {
7297 		*count = snapshot_entry->clone_count;
7298 		return -ENOMEM;
7299 	}
7300 	*count = snapshot_entry->clone_count;
7301 
7302 	n = 0;
7303 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
7304 		ids[n++] = clone_entry->id;
7305 	}
7306 
7307 	return 0;
7308 }
7309 
7310 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB)
7311