xref: /spdk/lib/blob/blobstore.c (revision c177a3c8416290d6d58ae6d4bdf3ccabe81c085a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/blob.h"
37 #include "spdk/crc32.h"
38 #include "spdk/env.h"
39 #include "spdk/queue.h"
40 #include "spdk/thread.h"
41 #include "spdk/bit_array.h"
42 #include "spdk/likely.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "spdk_internal/assert.h"
47 #include "spdk_internal/log.h"
48 
49 #include "blobstore.h"
50 
51 #define BLOB_CRC32C_INITIAL    0xffffffffUL
52 
53 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
54 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
55 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
56 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
57 		uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg);
58 
59 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
60 				uint16_t value_len, bool internal);
61 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
62 				      const void **value, size_t *value_len, bool internal);
63 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
64 
65 static void _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
66 				     spdk_blob_op_complete cb_fn, void *cb_arg);
67 
68 static void
69 _spdk_blob_verify_md_op(struct spdk_blob *blob)
70 {
71 	assert(blob != NULL);
72 	assert(spdk_get_thread() == blob->bs->md_thread);
73 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
74 }
75 
76 static struct spdk_blob_list *
77 _spdk_bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid)
78 {
79 	struct spdk_blob_list *snapshot_entry = NULL;
80 
81 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
82 		if (snapshot_entry->id == blobid) {
83 			break;
84 		}
85 	}
86 
87 	return snapshot_entry;
88 }
89 
90 static void
91 _spdk_bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page)
92 {
93 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
94 	assert(spdk_bit_array_get(bs->used_md_pages, page) == false);
95 
96 	spdk_bit_array_set(bs->used_md_pages, page);
97 }
98 
99 static void
100 _spdk_bs_release_md_page(struct spdk_blob_store *bs, uint32_t page)
101 {
102 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
103 	assert(spdk_bit_array_get(bs->used_md_pages, page) == true);
104 
105 	spdk_bit_array_clear(bs->used_md_pages, page);
106 }
107 
108 static void
109 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
110 {
111 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
112 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false);
113 	assert(bs->num_free_clusters > 0);
114 
115 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num);
116 
117 	spdk_bit_array_set(bs->used_clusters, cluster_num);
118 	bs->num_free_clusters--;
119 }
120 
121 static int
122 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
123 {
124 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
125 
126 	_spdk_blob_verify_md_op(blob);
127 
128 	if (*cluster_lba != 0) {
129 		return -EEXIST;
130 	}
131 
132 	*cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster);
133 	return 0;
134 }
135 
136 static int
137 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
138 			  uint64_t *lowest_free_cluster, uint32_t *lowest_free_md_page, bool update_map)
139 {
140 	uint32_t *extent_page;
141 
142 	pthread_mutex_lock(&blob->bs->used_clusters_mutex);
143 	*lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters,
144 			       *lowest_free_cluster);
145 	if (*lowest_free_cluster == UINT32_MAX) {
146 		/* No more free clusters. Cannot satisfy the request */
147 		pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
148 		return -ENOSPC;
149 	}
150 
151 	if (blob->use_extent_table) {
152 		extent_page = _spdk_bs_cluster_to_extent_page(blob, cluster_num);
153 		if (*extent_page == 0) {
154 			/* No extent_page is allocated for the cluster */
155 			*lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages,
156 					       *lowest_free_md_page);
157 			if (*lowest_free_md_page == UINT32_MAX) {
158 				/* No more free md pages. Cannot satisfy the request */
159 				pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
160 				return -ENOSPC;
161 			}
162 			_spdk_bs_claim_md_page(blob->bs, *lowest_free_md_page);
163 		}
164 	}
165 
166 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id);
167 	_spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster);
168 
169 	pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
170 
171 	if (update_map) {
172 		_spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster);
173 		if (blob->use_extent_table && *extent_page == 0) {
174 			*extent_page = *lowest_free_md_page;
175 		}
176 	}
177 
178 	return 0;
179 }
180 
181 static void
182 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
183 {
184 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
185 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true);
186 	assert(bs->num_free_clusters < bs->total_clusters);
187 
188 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num);
189 
190 	pthread_mutex_lock(&bs->used_clusters_mutex);
191 	spdk_bit_array_clear(bs->used_clusters, cluster_num);
192 	bs->num_free_clusters++;
193 	pthread_mutex_unlock(&bs->used_clusters_mutex);
194 }
195 
196 static void
197 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
198 {
199 	xattrs->count = 0;
200 	xattrs->names = NULL;
201 	xattrs->ctx = NULL;
202 	xattrs->get_value = NULL;
203 }
204 
205 void
206 spdk_blob_opts_init(struct spdk_blob_opts *opts)
207 {
208 	opts->num_clusters = 0;
209 	opts->thin_provision = false;
210 	opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
211 	_spdk_blob_xattrs_init(&opts->xattrs);
212 	opts->use_extent_table = false;
213 }
214 
215 void
216 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts)
217 {
218 	opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
219 }
220 
221 static struct spdk_blob *
222 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
223 {
224 	struct spdk_blob *blob;
225 
226 	blob = calloc(1, sizeof(*blob));
227 	if (!blob) {
228 		return NULL;
229 	}
230 
231 	blob->id = id;
232 	blob->bs = bs;
233 
234 	blob->parent_id = SPDK_BLOBID_INVALID;
235 
236 	blob->state = SPDK_BLOB_STATE_DIRTY;
237 	blob->extent_rle_found = false;
238 	blob->extent_table_found = false;
239 	blob->active.num_pages = 1;
240 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
241 	if (!blob->active.pages) {
242 		free(blob);
243 		return NULL;
244 	}
245 
246 	blob->active.pages[0] = _spdk_bs_blobid_to_page(id);
247 
248 	TAILQ_INIT(&blob->xattrs);
249 	TAILQ_INIT(&blob->xattrs_internal);
250 
251 	return blob;
252 }
253 
254 static void
255 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs)
256 {
257 	struct spdk_xattr	*xattr, *xattr_tmp;
258 
259 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
260 		TAILQ_REMOVE(xattrs, xattr, link);
261 		free(xattr->name);
262 		free(xattr->value);
263 		free(xattr);
264 	}
265 }
266 
267 static void
268 _spdk_blob_free(struct spdk_blob *blob)
269 {
270 	assert(blob != NULL);
271 
272 	free(blob->active.extent_pages);
273 	free(blob->clean.extent_pages);
274 	free(blob->active.clusters);
275 	free(blob->clean.clusters);
276 	free(blob->active.pages);
277 	free(blob->clean.pages);
278 
279 	_spdk_xattrs_free(&blob->xattrs);
280 	_spdk_xattrs_free(&blob->xattrs_internal);
281 
282 	if (blob->back_bs_dev) {
283 		blob->back_bs_dev->destroy(blob->back_bs_dev);
284 	}
285 
286 	free(blob);
287 }
288 
289 struct freeze_io_ctx {
290 	struct spdk_bs_cpl cpl;
291 	struct spdk_blob *blob;
292 };
293 
294 static void
295 _spdk_blob_io_sync(struct spdk_io_channel_iter *i)
296 {
297 	spdk_for_each_channel_continue(i, 0);
298 }
299 
300 static void
301 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i)
302 {
303 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
304 	struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch);
305 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
306 	struct spdk_bs_request_set	*set;
307 	struct spdk_bs_user_op_args	*args;
308 	spdk_bs_user_op_t *op, *tmp;
309 
310 	TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) {
311 		set = (struct spdk_bs_request_set *)op;
312 		args = &set->u.user_op;
313 
314 		if (args->blob == ctx->blob) {
315 			TAILQ_REMOVE(&ch->queued_io, op, link);
316 			spdk_bs_user_op_execute(op);
317 		}
318 	}
319 
320 	spdk_for_each_channel_continue(i, 0);
321 }
322 
323 static void
324 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status)
325 {
326 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
327 
328 	ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0);
329 
330 	free(ctx);
331 }
332 
333 static void
334 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
335 {
336 	struct freeze_io_ctx *ctx;
337 
338 	ctx = calloc(1, sizeof(*ctx));
339 	if (!ctx) {
340 		cb_fn(cb_arg, -ENOMEM);
341 		return;
342 	}
343 
344 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
345 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
346 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
347 	ctx->blob = blob;
348 
349 	/* Freeze I/O on blob */
350 	blob->frozen_refcnt++;
351 
352 	if (blob->frozen_refcnt == 1) {
353 		spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl);
354 	} else {
355 		cb_fn(cb_arg, 0);
356 		free(ctx);
357 	}
358 }
359 
360 static void
361 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
362 {
363 	struct freeze_io_ctx *ctx;
364 
365 	ctx = calloc(1, sizeof(*ctx));
366 	if (!ctx) {
367 		cb_fn(cb_arg, -ENOMEM);
368 		return;
369 	}
370 
371 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
372 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
373 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
374 	ctx->blob = blob;
375 
376 	assert(blob->frozen_refcnt > 0);
377 
378 	blob->frozen_refcnt--;
379 
380 	if (blob->frozen_refcnt == 0) {
381 		spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl);
382 	} else {
383 		cb_fn(cb_arg, 0);
384 		free(ctx);
385 	}
386 }
387 
388 static int
389 _spdk_blob_mark_clean(struct spdk_blob *blob)
390 {
391 	uint32_t *extent_pages = NULL;
392 	uint64_t *clusters = NULL;
393 	uint32_t *pages = NULL;
394 
395 	assert(blob != NULL);
396 
397 	if (blob->active.num_extent_pages) {
398 		assert(blob->active.extent_pages);
399 		extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages));
400 		if (!extent_pages) {
401 			return -ENOMEM;
402 		}
403 		memcpy(extent_pages, blob->active.extent_pages,
404 		       blob->active.num_extent_pages * sizeof(*extent_pages));
405 	}
406 
407 	if (blob->active.num_clusters) {
408 		assert(blob->active.clusters);
409 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
410 		if (!clusters) {
411 			free(extent_pages);
412 			return -ENOMEM;
413 		}
414 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
415 	}
416 
417 	if (blob->active.num_pages) {
418 		assert(blob->active.pages);
419 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
420 		if (!pages) {
421 			free(extent_pages);
422 			free(clusters);
423 			return -ENOMEM;
424 		}
425 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
426 	}
427 
428 	free(blob->clean.extent_pages);
429 	free(blob->clean.clusters);
430 	free(blob->clean.pages);
431 
432 	blob->clean.num_extent_pages = blob->active.num_extent_pages;
433 	blob->clean.extent_pages = blob->active.extent_pages;
434 	blob->clean.num_clusters = blob->active.num_clusters;
435 	blob->clean.clusters = blob->active.clusters;
436 	blob->clean.num_pages = blob->active.num_pages;
437 	blob->clean.pages = blob->active.pages;
438 
439 	blob->active.extent_pages = extent_pages;
440 	blob->active.clusters = clusters;
441 	blob->active.pages = pages;
442 
443 	/* If the metadata was dirtied again while the metadata was being written to disk,
444 	 *  we do not want to revert the DIRTY state back to CLEAN here.
445 	 */
446 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
447 		blob->state = SPDK_BLOB_STATE_CLEAN;
448 	}
449 
450 	return 0;
451 }
452 
453 static int
454 _spdk_blob_deserialize_xattr(struct spdk_blob *blob,
455 			     struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
456 {
457 	struct spdk_xattr                       *xattr;
458 
459 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
460 	    sizeof(desc_xattr->value_length) +
461 	    desc_xattr->name_length + desc_xattr->value_length) {
462 		return -EINVAL;
463 	}
464 
465 	xattr = calloc(1, sizeof(*xattr));
466 	if (xattr == NULL) {
467 		return -ENOMEM;
468 	}
469 
470 	xattr->name = malloc(desc_xattr->name_length + 1);
471 	if (xattr->name == NULL) {
472 		free(xattr);
473 		return -ENOMEM;
474 	}
475 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
476 	xattr->name[desc_xattr->name_length] = '\0';
477 
478 	xattr->value = malloc(desc_xattr->value_length);
479 	if (xattr->value == NULL) {
480 		free(xattr->name);
481 		free(xattr);
482 		return -ENOMEM;
483 	}
484 	xattr->value_len = desc_xattr->value_length;
485 	memcpy(xattr->value,
486 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
487 	       desc_xattr->value_length);
488 
489 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
490 
491 	return 0;
492 }
493 
494 
495 static int
496 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
497 {
498 	struct spdk_blob_md_descriptor *desc;
499 	size_t	cur_desc = 0;
500 	void *tmp;
501 
502 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
503 	while (cur_desc < sizeof(page->descriptors)) {
504 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
505 			if (desc->length == 0) {
506 				/* If padding and length are 0, this terminates the page */
507 				break;
508 			}
509 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
510 			struct spdk_blob_md_descriptor_flags	*desc_flags;
511 
512 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
513 
514 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
515 				return -EINVAL;
516 			}
517 
518 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
519 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
520 				return -EINVAL;
521 			}
522 
523 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
524 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
525 				blob->data_ro = true;
526 				blob->md_ro = true;
527 			}
528 
529 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
530 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
531 				blob->md_ro = true;
532 			}
533 
534 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
535 				blob->data_ro = true;
536 				blob->md_ro = true;
537 			}
538 
539 			blob->invalid_flags = desc_flags->invalid_flags;
540 			blob->data_ro_flags = desc_flags->data_ro_flags;
541 			blob->md_ro_flags = desc_flags->md_ro_flags;
542 
543 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
544 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
545 			unsigned int				i, j;
546 			unsigned int				cluster_count = blob->active.num_clusters;
547 
548 			if (blob->extent_table_found) {
549 				/* Extent Table already present in the md,
550 				 * both descriptors should never be at the same time. */
551 				return -EINVAL;
552 			}
553 			blob->extent_rle_found = true;
554 
555 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
556 
557 			if (desc_extent_rle->length == 0 ||
558 			    (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) {
559 				return -EINVAL;
560 			}
561 
562 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
563 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
564 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
565 						if (!spdk_bit_array_get(blob->bs->used_clusters,
566 									desc_extent_rle->extents[i].cluster_idx + j)) {
567 							return -EINVAL;
568 						}
569 					}
570 					cluster_count++;
571 				}
572 			}
573 
574 			if (cluster_count == 0) {
575 				return -EINVAL;
576 			}
577 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
578 			if (tmp == NULL) {
579 				return -ENOMEM;
580 			}
581 			blob->active.clusters = tmp;
582 			blob->active.cluster_array_size = cluster_count;
583 
584 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
585 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
586 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
587 						blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
588 								desc_extent_rle->extents[i].cluster_idx + j);
589 					} else if (spdk_blob_is_thin_provisioned(blob)) {
590 						blob->active.clusters[blob->active.num_clusters++] = 0;
591 					} else {
592 						return -EINVAL;
593 					}
594 				}
595 			}
596 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
597 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
598 			uint32_t num_extent_pages = blob->active.num_extent_pages;
599 			uint32_t i, j;
600 			size_t extent_pages_length;
601 
602 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
603 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
604 
605 			if (blob->extent_rle_found) {
606 				/* This means that Extent RLE is present in MD,
607 				 * both should never be at the same time. */
608 				return -EINVAL;
609 			} else if (blob->extent_table_found &&
610 				   desc_extent_table->num_clusters != blob->remaining_clusters_in_et) {
611 				/* Number of clusters in this ET does not match number
612 				 * from previously read EXTENT_TABLE. */
613 				return -EINVAL;
614 			}
615 
616 			blob->extent_table_found = true;
617 
618 			if (desc_extent_table->length == 0 ||
619 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
620 				return -EINVAL;
621 			}
622 
623 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
624 				num_extent_pages += desc_extent_table->extent_page[i].num_pages;
625 			}
626 
627 			tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t));
628 			if (tmp == NULL) {
629 				return -ENOMEM;
630 			}
631 			blob->active.extent_pages = tmp;
632 			blob->active.extent_pages_array_size = num_extent_pages;
633 
634 			blob->remaining_clusters_in_et = desc_extent_table->num_clusters;
635 
636 			/* Extent table entries contain md page numbers for extent pages.
637 			 * Zeroes represent unallocated extent pages, those are run-length-encoded.
638 			 */
639 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
640 				if (desc_extent_table->extent_page[i].page_idx != 0) {
641 					assert(desc_extent_table->extent_page[i].num_pages == 1);
642 					blob->active.extent_pages[blob->active.num_extent_pages++] =
643 						desc_extent_table->extent_page[i].page_idx;
644 				} else if (spdk_blob_is_thin_provisioned(blob)) {
645 					for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) {
646 						blob->active.extent_pages[blob->active.num_extent_pages++] = 0;
647 					}
648 				} else {
649 					return -EINVAL;
650 				}
651 			}
652 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
653 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
654 			unsigned int					i;
655 			unsigned int					cluster_count = blob->active.num_clusters;
656 			size_t						cluster_idx_length;
657 
658 			if (blob->extent_rle_found) {
659 				/* This means that Extent RLE is present in MD,
660 				 * both should never be at the same time. */
661 				return -EINVAL;
662 			}
663 
664 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
665 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
666 
667 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
668 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
669 				return -EINVAL;
670 			}
671 
672 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
673 				if (desc_extent->cluster_idx[i] != 0) {
674 					if (!spdk_bit_array_get(blob->bs->used_clusters, desc_extent->cluster_idx[i])) {
675 						return -EINVAL;
676 					}
677 				}
678 				cluster_count++;
679 			}
680 
681 			if (cluster_count == 0) {
682 				return -EINVAL;
683 			}
684 
685 			/* When reading extent pages sequentially starting cluster idx should match
686 			 * current size of a blob.
687 			 * If changed to batch reading, this check shall be removed. */
688 			if (desc_extent->start_cluster_idx != blob->active.num_clusters) {
689 				return -EINVAL;
690 			}
691 
692 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
693 			if (tmp == NULL) {
694 				return -ENOMEM;
695 			}
696 			blob->active.clusters = tmp;
697 			blob->active.cluster_array_size = cluster_count;
698 
699 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
700 				if (desc_extent->cluster_idx[i] != 0) {
701 					blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
702 							desc_extent->cluster_idx[i]);
703 				} else if (spdk_blob_is_thin_provisioned(blob)) {
704 					blob->active.clusters[blob->active.num_clusters++] = 0;
705 				} else {
706 					return -EINVAL;
707 				}
708 			}
709 			assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters);
710 			assert(blob->remaining_clusters_in_et >= cluster_count);
711 			blob->remaining_clusters_in_et -= cluster_count;
712 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
713 			int rc;
714 
715 			rc = _spdk_blob_deserialize_xattr(blob,
716 							  (struct spdk_blob_md_descriptor_xattr *) desc, false);
717 			if (rc != 0) {
718 				return rc;
719 			}
720 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
721 			int rc;
722 
723 			rc = _spdk_blob_deserialize_xattr(blob,
724 							  (struct spdk_blob_md_descriptor_xattr *) desc, true);
725 			if (rc != 0) {
726 				return rc;
727 			}
728 		} else {
729 			/* Unrecognized descriptor type.  Do not fail - just continue to the
730 			 *  next descriptor.  If this descriptor is associated with some feature
731 			 *  defined in a newer version of blobstore, that version of blobstore
732 			 *  should create and set an associated feature flag to specify if this
733 			 *  blob can be loaded or not.
734 			 */
735 		}
736 
737 		/* Advance to the next descriptor */
738 		cur_desc += sizeof(*desc) + desc->length;
739 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
740 			break;
741 		}
742 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
743 	}
744 
745 	return 0;
746 }
747 
748 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page);
749 
750 static int
751 _spdk_blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob)
752 {
753 	assert(blob != NULL);
754 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
755 
756 	if (_spdk_bs_load_cur_extent_page_valid(extent_page) == false) {
757 		return -ENOENT;
758 	}
759 
760 	return _spdk_blob_parse_page(extent_page, blob);
761 }
762 
763 static int
764 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
765 		 struct spdk_blob *blob)
766 {
767 	const struct spdk_blob_md_page *page;
768 	uint32_t i;
769 	int rc;
770 
771 	assert(page_count > 0);
772 	assert(pages[0].sequence_num == 0);
773 	assert(blob != NULL);
774 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
775 	assert(blob->active.clusters == NULL);
776 
777 	/* The blobid provided doesn't match what's in the MD, this can
778 	 * happen for example if a bogus blobid is passed in through open.
779 	 */
780 	if (blob->id != pages[0].id) {
781 		SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n",
782 			    blob->id, pages[0].id);
783 		return -ENOENT;
784 	}
785 
786 	for (i = 0; i < page_count; i++) {
787 		page = &pages[i];
788 
789 		assert(page->id == blob->id);
790 		assert(page->sequence_num == i);
791 
792 		rc = _spdk_blob_parse_page(page, blob);
793 		if (rc != 0) {
794 			return rc;
795 		}
796 	}
797 
798 	return 0;
799 }
800 
801 static int
802 _spdk_blob_serialize_add_page(const struct spdk_blob *blob,
803 			      struct spdk_blob_md_page **pages,
804 			      uint32_t *page_count,
805 			      struct spdk_blob_md_page **last_page)
806 {
807 	struct spdk_blob_md_page *page;
808 
809 	assert(pages != NULL);
810 	assert(page_count != NULL);
811 
812 	if (*page_count == 0) {
813 		assert(*pages == NULL);
814 		*page_count = 1;
815 		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
816 				     NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
817 	} else {
818 		assert(*pages != NULL);
819 		(*page_count)++;
820 		*pages = spdk_realloc(*pages,
821 				      SPDK_BS_PAGE_SIZE * (*page_count),
822 				      SPDK_BS_PAGE_SIZE);
823 	}
824 
825 	if (*pages == NULL) {
826 		*page_count = 0;
827 		*last_page = NULL;
828 		return -ENOMEM;
829 	}
830 
831 	page = &(*pages)[*page_count - 1];
832 	memset(page, 0, sizeof(*page));
833 	page->id = blob->id;
834 	page->sequence_num = *page_count - 1;
835 	page->next = SPDK_INVALID_MD_PAGE;
836 	*last_page = page;
837 
838 	return 0;
839 }
840 
841 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
842  * Update required_sz on both success and failure.
843  *
844  */
845 static int
846 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr,
847 			   uint8_t *buf, size_t buf_sz,
848 			   size_t *required_sz, bool internal)
849 {
850 	struct spdk_blob_md_descriptor_xattr	*desc;
851 
852 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
853 		       strlen(xattr->name) +
854 		       xattr->value_len;
855 
856 	if (buf_sz < *required_sz) {
857 		return -1;
858 	}
859 
860 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
861 
862 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
863 	desc->length = sizeof(desc->name_length) +
864 		       sizeof(desc->value_length) +
865 		       strlen(xattr->name) +
866 		       xattr->value_len;
867 	desc->name_length = strlen(xattr->name);
868 	desc->value_length = xattr->value_len;
869 
870 	memcpy(desc->name, xattr->name, desc->name_length);
871 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
872 	       xattr->value,
873 	       desc->value_length);
874 
875 	return 0;
876 }
877 
878 static void
879 _spdk_blob_serialize_extent_table_entry(const struct spdk_blob *blob,
880 					uint64_t start_ep, uint64_t *next_ep,
881 					uint8_t **buf, size_t *remaining_sz)
882 {
883 	struct spdk_blob_md_descriptor_extent_table *desc;
884 	size_t cur_sz;
885 	uint64_t i, et_idx;
886 	uint32_t extent_page, ep_len;
887 
888 	/* The buffer must have room for at least one extent page */
889 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters) + sizeof(
890 			 desc->extent_page[0]);
891 	if (*remaining_sz < cur_sz) {
892 		*next_ep = start_ep;
893 		return;
894 	}
895 
896 	desc = (struct spdk_blob_md_descriptor_extent_table *)*buf;
897 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE;
898 
899 	desc->num_clusters = blob->active.num_clusters;
900 
901 	extent_page = blob->active.extent_pages[start_ep];
902 	ep_len = 1;
903 	et_idx = 0;
904 	for (i = start_ep + 1; i < blob->active.num_extent_pages; i++) {
905 		/* Extent table entries contain md page offsets for extent pages.
906 		 * Zeroes represent unallocated extent pages, which are run-length-encoded.
907 		 */
908 		if (extent_page == 0 && blob->active.extent_pages[i] == 0) {
909 			ep_len++;
910 			continue;
911 		}
912 		desc->extent_page[et_idx].page_idx = extent_page;
913 		desc->extent_page[et_idx].num_pages = ep_len;
914 		et_idx++;
915 
916 		cur_sz += sizeof(desc->extent_page[et_idx]);
917 
918 		if (*remaining_sz < cur_sz) {
919 			/* If we ran out of buffer space, return */
920 			*next_ep = i;
921 			break;
922 		}
923 		extent_page = blob->active.extent_pages[i];
924 		ep_len = 1;
925 	}
926 
927 	if (*remaining_sz >= cur_sz) {
928 		desc->extent_page[et_idx].page_idx = extent_page;
929 		desc->extent_page[et_idx].num_pages = ep_len;
930 		et_idx++;
931 
932 		*next_ep = blob->active.num_extent_pages;
933 	}
934 
935 	desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx;
936 	*remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length;
937 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc->length;
938 }
939 
940 static int
941 _spdk_blob_serialize_extent_table(const struct spdk_blob *blob,
942 				  struct spdk_blob_md_page **pages,
943 				  struct spdk_blob_md_page *cur_page,
944 				  uint32_t *page_count, uint8_t **buf,
945 				  size_t *remaining_sz)
946 {
947 	uint64_t				last_extent_page;
948 	int					rc;
949 
950 	last_extent_page = 0;
951 	while (last_extent_page < blob->active.num_extent_pages) {
952 		_spdk_blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf,
953 							remaining_sz);
954 
955 		if (last_extent_page == blob->active.num_extent_pages) {
956 			break;
957 		}
958 
959 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
960 		if (rc < 0) {
961 			return rc;
962 		}
963 
964 		*buf = (uint8_t *)cur_page->descriptors;
965 		*remaining_sz = sizeof(cur_page->descriptors);
966 	}
967 
968 	return 0;
969 }
970 
971 static void
972 _spdk_blob_serialize_extent_rle(const struct spdk_blob *blob,
973 				uint64_t start_cluster, uint64_t *next_cluster,
974 				uint8_t **buf, size_t *buf_sz)
975 {
976 	struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle;
977 	size_t cur_sz;
978 	uint64_t i, extent_idx;
979 	uint64_t lba, lba_per_cluster, lba_count;
980 
981 	/* The buffer must have room for at least one extent */
982 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]);
983 	if (*buf_sz < cur_sz) {
984 		*next_cluster = start_cluster;
985 		return;
986 	}
987 
988 	desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf;
989 	desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE;
990 
991 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
992 
993 	lba = blob->active.clusters[start_cluster];
994 	lba_count = lba_per_cluster;
995 	extent_idx = 0;
996 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
997 		if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) {
998 			/* Run-length encode sequential non-zero LBA */
999 			lba_count += lba_per_cluster;
1000 			continue;
1001 		} else if (lba == 0 && blob->active.clusters[i] == 0) {
1002 			/* Run-length encode unallocated clusters */
1003 			lba_count += lba_per_cluster;
1004 			continue;
1005 		}
1006 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1007 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1008 		extent_idx++;
1009 
1010 		cur_sz += sizeof(desc_extent_rle->extents[extent_idx]);
1011 
1012 		if (*buf_sz < cur_sz) {
1013 			/* If we ran out of buffer space, return */
1014 			*next_cluster = i;
1015 			break;
1016 		}
1017 
1018 		lba = blob->active.clusters[i];
1019 		lba_count = lba_per_cluster;
1020 	}
1021 
1022 	if (*buf_sz >= cur_sz) {
1023 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1024 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1025 		extent_idx++;
1026 
1027 		*next_cluster = blob->active.num_clusters;
1028 	}
1029 
1030 	desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx;
1031 	*buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1032 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1033 }
1034 
1035 static int
1036 _spdk_blob_serialize_extents_rle(const struct spdk_blob *blob,
1037 				 struct spdk_blob_md_page **pages,
1038 				 struct spdk_blob_md_page *cur_page,
1039 				 uint32_t *page_count, uint8_t **buf,
1040 				 size_t *remaining_sz)
1041 {
1042 	uint64_t				last_cluster;
1043 	int					rc;
1044 
1045 	last_cluster = 0;
1046 	while (last_cluster < blob->active.num_clusters) {
1047 		_spdk_blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz);
1048 
1049 		if (last_cluster == blob->active.num_clusters) {
1050 			break;
1051 		}
1052 
1053 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
1054 		if (rc < 0) {
1055 			return rc;
1056 		}
1057 
1058 		*buf = (uint8_t *)cur_page->descriptors;
1059 		*remaining_sz = sizeof(cur_page->descriptors);
1060 	}
1061 
1062 	return 0;
1063 }
1064 
1065 static void
1066 _spdk_blob_serialize_extent_page(const struct spdk_blob *blob,
1067 				 uint64_t cluster, struct spdk_blob_md_page *page)
1068 {
1069 	struct spdk_blob_md_descriptor_extent_page *desc_extent;
1070 	uint64_t i, extent_idx;
1071 	uint64_t lba, lba_per_cluster;
1072 	uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
1073 
1074 	desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors;
1075 	desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE;
1076 
1077 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
1078 
1079 	desc_extent->start_cluster_idx = start_cluster_idx;
1080 	extent_idx = 0;
1081 	for (i = start_cluster_idx; i < blob->active.num_clusters; i++) {
1082 		lba = blob->active.clusters[i];
1083 		desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster;
1084 		if (extent_idx >= SPDK_EXTENTS_PER_EP) {
1085 			break;
1086 		}
1087 	}
1088 	desc_extent->length = sizeof(desc_extent->start_cluster_idx) +
1089 			      sizeof(desc_extent->cluster_idx[0]) * extent_idx;
1090 }
1091 
1092 static void
1093 _spdk_blob_serialize_flags(const struct spdk_blob *blob,
1094 			   uint8_t *buf, size_t *buf_sz)
1095 {
1096 	struct spdk_blob_md_descriptor_flags *desc;
1097 
1098 	/*
1099 	 * Flags get serialized first, so we should always have room for the flags
1100 	 *  descriptor.
1101 	 */
1102 	assert(*buf_sz >= sizeof(*desc));
1103 
1104 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
1105 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
1106 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
1107 	desc->invalid_flags = blob->invalid_flags;
1108 	desc->data_ro_flags = blob->data_ro_flags;
1109 	desc->md_ro_flags = blob->md_ro_flags;
1110 
1111 	*buf_sz -= sizeof(*desc);
1112 }
1113 
1114 static int
1115 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
1116 			    const struct spdk_xattr_tailq *xattrs, bool internal,
1117 			    struct spdk_blob_md_page **pages,
1118 			    struct spdk_blob_md_page *cur_page,
1119 			    uint32_t *page_count, uint8_t **buf,
1120 			    size_t *remaining_sz)
1121 {
1122 	const struct spdk_xattr	*xattr;
1123 	int	rc;
1124 
1125 	TAILQ_FOREACH(xattr, xattrs, link) {
1126 		size_t required_sz = 0;
1127 
1128 		rc = _spdk_blob_serialize_xattr(xattr,
1129 						*buf, *remaining_sz,
1130 						&required_sz, internal);
1131 		if (rc < 0) {
1132 			/* Need to add a new page to the chain */
1133 			rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
1134 							   &cur_page);
1135 			if (rc < 0) {
1136 				spdk_free(*pages);
1137 				*pages = NULL;
1138 				*page_count = 0;
1139 				return rc;
1140 			}
1141 
1142 			*buf = (uint8_t *)cur_page->descriptors;
1143 			*remaining_sz = sizeof(cur_page->descriptors);
1144 
1145 			/* Try again */
1146 			required_sz = 0;
1147 			rc = _spdk_blob_serialize_xattr(xattr,
1148 							*buf, *remaining_sz,
1149 							&required_sz, internal);
1150 
1151 			if (rc < 0) {
1152 				spdk_free(*pages);
1153 				*pages = NULL;
1154 				*page_count = 0;
1155 				return rc;
1156 			}
1157 		}
1158 
1159 		*remaining_sz -= required_sz;
1160 		*buf += required_sz;
1161 	}
1162 
1163 	return 0;
1164 }
1165 
1166 static int
1167 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
1168 		     uint32_t *page_count)
1169 {
1170 	struct spdk_blob_md_page		*cur_page;
1171 	int					rc;
1172 	uint8_t					*buf;
1173 	size_t					remaining_sz;
1174 
1175 	assert(pages != NULL);
1176 	assert(page_count != NULL);
1177 	assert(blob != NULL);
1178 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
1179 
1180 	*pages = NULL;
1181 	*page_count = 0;
1182 
1183 	/* A blob always has at least 1 page, even if it has no descriptors */
1184 	rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
1185 	if (rc < 0) {
1186 		return rc;
1187 	}
1188 
1189 	buf = (uint8_t *)cur_page->descriptors;
1190 	remaining_sz = sizeof(cur_page->descriptors);
1191 
1192 	/* Serialize flags */
1193 	_spdk_blob_serialize_flags(blob, buf, &remaining_sz);
1194 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
1195 
1196 	/* Serialize xattrs */
1197 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false,
1198 					 pages, cur_page, page_count, &buf, &remaining_sz);
1199 	if (rc < 0) {
1200 		return rc;
1201 	}
1202 
1203 	/* Serialize internal xattrs */
1204 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
1205 					 pages, cur_page, page_count, &buf, &remaining_sz);
1206 	if (rc < 0) {
1207 		return rc;
1208 	}
1209 
1210 	if (blob->use_extent_table) {
1211 		/* Serialize extent table */
1212 		rc = _spdk_blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1213 	} else {
1214 		/* Serialize extents */
1215 		rc = _spdk_blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1216 	}
1217 
1218 	return rc;
1219 }
1220 
1221 struct spdk_blob_load_ctx {
1222 	struct spdk_blob		*blob;
1223 
1224 	struct spdk_blob_md_page	*pages;
1225 	uint32_t			num_pages;
1226 	uint32_t			next_extent_page;
1227 	spdk_bs_sequence_t	        *seq;
1228 
1229 	spdk_bs_sequence_cpl		cb_fn;
1230 	void				*cb_arg;
1231 };
1232 
1233 static uint32_t
1234 _spdk_blob_md_page_calc_crc(void *page)
1235 {
1236 	uint32_t		crc;
1237 
1238 	crc = BLOB_CRC32C_INITIAL;
1239 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
1240 	crc ^= BLOB_CRC32C_INITIAL;
1241 
1242 	return crc;
1243 
1244 }
1245 
1246 static void
1247 _spdk_blob_load_final(void *cb_arg, int bserrno)
1248 {
1249 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1250 	struct spdk_blob		*blob = ctx->blob;
1251 
1252 	if (bserrno == 0) {
1253 		_spdk_blob_mark_clean(blob);
1254 	}
1255 
1256 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
1257 
1258 	/* Free the memory */
1259 	spdk_free(ctx->pages);
1260 	free(ctx);
1261 }
1262 
1263 static void
1264 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
1265 {
1266 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1267 	struct spdk_blob		*blob = ctx->blob;
1268 
1269 	if (bserrno == 0) {
1270 		blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot);
1271 		if (blob->back_bs_dev == NULL) {
1272 			bserrno = -ENOMEM;
1273 		}
1274 	}
1275 	if (bserrno != 0) {
1276 		SPDK_ERRLOG("Snapshot fail\n");
1277 	}
1278 
1279 	_spdk_blob_load_final(ctx, bserrno);
1280 }
1281 
1282 static void _spdk_blob_update_clear_method(struct spdk_blob *blob);
1283 
1284 static void
1285 _spdk_blob_load_backing_dev(void *cb_arg)
1286 {
1287 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1288 	struct spdk_blob		*blob = ctx->blob;
1289 	const void			*value;
1290 	size_t				len;
1291 	int				rc;
1292 
1293 	if (spdk_blob_is_thin_provisioned(blob)) {
1294 		rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
1295 		if (rc == 0) {
1296 			if (len != sizeof(spdk_blob_id)) {
1297 				_spdk_blob_load_final(ctx, -EINVAL);
1298 				return;
1299 			}
1300 			/* open snapshot blob and continue in the callback function */
1301 			blob->parent_id = *(spdk_blob_id *)value;
1302 			spdk_bs_open_blob(blob->bs, blob->parent_id,
1303 					  _spdk_blob_load_snapshot_cpl, ctx);
1304 			return;
1305 		} else {
1306 			/* add zeroes_dev for thin provisioned blob */
1307 			blob->back_bs_dev = spdk_bs_create_zeroes_dev();
1308 		}
1309 	} else {
1310 		/* standard blob */
1311 		blob->back_bs_dev = NULL;
1312 	}
1313 	_spdk_blob_load_final(ctx, 0);
1314 }
1315 
1316 static void
1317 _spdk_blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1318 {
1319 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1320 	struct spdk_blob		*blob = ctx->blob;
1321 	struct spdk_blob_md_page	*page;
1322 	uint64_t			i;
1323 	uint32_t			crc;
1324 	uint64_t			lba;
1325 	void				*tmp;
1326 	uint64_t			sz;
1327 
1328 	if (bserrno) {
1329 		SPDK_ERRLOG("Extent page read failed: %d\n", bserrno);
1330 		_spdk_blob_load_final(ctx, bserrno);
1331 		return;
1332 	}
1333 
1334 	if (ctx->pages == NULL) {
1335 		/* First iteration of this function, allocate buffer for single EXTENT_PAGE */
1336 		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, NULL, SPDK_ENV_SOCKET_ID_ANY,
1337 					  SPDK_MALLOC_DMA);
1338 		if (!ctx->pages) {
1339 			_spdk_blob_load_final(ctx, -ENOMEM);
1340 			return;
1341 		}
1342 		ctx->num_pages = 1;
1343 		ctx->next_extent_page = 0;
1344 	} else {
1345 		page = &ctx->pages[0];
1346 		crc = _spdk_blob_md_page_calc_crc(page);
1347 		if (crc != page->crc) {
1348 			_spdk_blob_load_final(ctx, -EINVAL);
1349 			return;
1350 		}
1351 
1352 		if (page->next != SPDK_INVALID_MD_PAGE) {
1353 			_spdk_blob_load_final(ctx, -EINVAL);
1354 			return;
1355 		}
1356 
1357 		bserrno = _spdk_blob_parse_extent_page(page, blob);
1358 		if (bserrno) {
1359 			_spdk_blob_load_final(ctx, bserrno);
1360 			return;
1361 		}
1362 	}
1363 
1364 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
1365 		if (blob->active.extent_pages[i] != 0) {
1366 			/* Extent page was allocated, read and parse it. */
1367 			lba = _spdk_bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]);
1368 			ctx->next_extent_page = i + 1;
1369 
1370 			spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1371 						  _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1372 						  _spdk_blob_load_cpl_extents_cpl, ctx);
1373 			return;
1374 		} else {
1375 			/* Thin provisioned blobs can point to unallocated extent pages.
1376 			 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */
1377 
1378 			sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP);
1379 			blob->active.num_clusters += sz;
1380 			blob->remaining_clusters_in_et -= sz;
1381 
1382 			assert(spdk_blob_is_thin_provisioned(blob));
1383 			assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0);
1384 
1385 			tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
1386 			if (tmp == NULL) {
1387 				_spdk_blob_load_final(ctx, -ENOMEM);
1388 				return;
1389 			}
1390 			memset(tmp + blob->active.cluster_array_size, 0,
1391 			       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
1392 			blob->active.clusters = tmp;
1393 			blob->active.cluster_array_size = blob->active.num_clusters;
1394 		}
1395 	}
1396 
1397 	_spdk_blob_load_backing_dev(ctx);
1398 }
1399 
1400 static void
1401 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1402 {
1403 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1404 	struct spdk_blob		*blob = ctx->blob;
1405 	struct spdk_blob_md_page	*page;
1406 	int				rc;
1407 	uint32_t			crc;
1408 
1409 	if (bserrno) {
1410 		SPDK_ERRLOG("Metadata page read failed: %d\n", bserrno);
1411 		_spdk_blob_load_final(ctx, bserrno);
1412 		return;
1413 	}
1414 
1415 	page = &ctx->pages[ctx->num_pages - 1];
1416 	crc = _spdk_blob_md_page_calc_crc(page);
1417 	if (crc != page->crc) {
1418 		SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages);
1419 		_spdk_blob_load_final(ctx, -EINVAL);
1420 		return;
1421 	}
1422 
1423 	if (page->next != SPDK_INVALID_MD_PAGE) {
1424 		uint32_t next_page = page->next;
1425 		uint64_t next_lba = _spdk_bs_md_page_to_lba(blob->bs, next_page);
1426 
1427 		/* Read the next page */
1428 		ctx->num_pages++;
1429 		ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
1430 					  sizeof(*page));
1431 		if (ctx->pages == NULL) {
1432 			_spdk_blob_load_final(ctx, -ENOMEM);
1433 			return;
1434 		}
1435 
1436 		spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
1437 					  next_lba,
1438 					  _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)),
1439 					  _spdk_blob_load_cpl, ctx);
1440 		return;
1441 	}
1442 
1443 	/* Parse the pages */
1444 	rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob);
1445 	if (rc) {
1446 		_spdk_blob_load_final(ctx, rc);
1447 		return;
1448 	}
1449 
1450 	if (blob->extent_table_found == true) {
1451 		/* If EXTENT_TABLE was found, that means support for it should be enabled. */
1452 		assert(blob->extent_rle_found == false);
1453 		blob->use_extent_table = true;
1454 	} else {
1455 		/* If EXTENT_RLE or no extent_* descriptor was found disable support
1456 		 * for extent table. No extent_* descriptors means that blob has length of 0
1457 		 * and no extent_rle descriptors were persisted for it.
1458 		 * EXTENT_TABLE if used, is always present in metadata regardless of length. */
1459 		blob->use_extent_table = false;
1460 	}
1461 
1462 	/* Check the clear_method stored in metadata vs what may have been passed
1463 	 * via spdk_bs_open_blob_ext() and update accordingly.
1464 	 */
1465 	_spdk_blob_update_clear_method(blob);
1466 
1467 	spdk_free(ctx->pages);
1468 	ctx->pages = NULL;
1469 
1470 	if (blob->extent_table_found) {
1471 		_spdk_blob_load_cpl_extents_cpl(seq, ctx, 0);
1472 	} else {
1473 		_spdk_blob_load_backing_dev(ctx);
1474 	}
1475 }
1476 
1477 /* Load a blob from disk given a blobid */
1478 static void
1479 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1480 		spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1481 {
1482 	struct spdk_blob_load_ctx *ctx;
1483 	struct spdk_blob_store *bs;
1484 	uint32_t page_num;
1485 	uint64_t lba;
1486 
1487 	_spdk_blob_verify_md_op(blob);
1488 
1489 	bs = blob->bs;
1490 
1491 	ctx = calloc(1, sizeof(*ctx));
1492 	if (!ctx) {
1493 		cb_fn(seq, cb_arg, -ENOMEM);
1494 		return;
1495 	}
1496 
1497 	ctx->blob = blob;
1498 	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE);
1499 	if (!ctx->pages) {
1500 		free(ctx);
1501 		cb_fn(seq, cb_arg, -ENOMEM);
1502 		return;
1503 	}
1504 	ctx->num_pages = 1;
1505 	ctx->cb_fn = cb_fn;
1506 	ctx->cb_arg = cb_arg;
1507 	ctx->seq = seq;
1508 
1509 	page_num = _spdk_bs_blobid_to_page(blob->id);
1510 	lba = _spdk_bs_md_page_to_lba(blob->bs, page_num);
1511 
1512 	blob->state = SPDK_BLOB_STATE_LOADING;
1513 
1514 	spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1515 				  _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
1516 				  _spdk_blob_load_cpl, ctx);
1517 }
1518 
1519 struct spdk_blob_persist_ctx {
1520 	struct spdk_blob		*blob;
1521 
1522 	struct spdk_bs_super_block	*super;
1523 
1524 	struct spdk_blob_md_page	*pages;
1525 	uint32_t			next_extent_page;
1526 	struct spdk_blob_md_page	*extent_page;
1527 
1528 	spdk_bs_sequence_t		*seq;
1529 	spdk_bs_sequence_cpl		cb_fn;
1530 	void				*cb_arg;
1531 };
1532 
1533 static void
1534 spdk_bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba,
1535 			uint32_t lba_count)
1536 {
1537 	switch (ctx->blob->clear_method) {
1538 	case BLOB_CLEAR_WITH_DEFAULT:
1539 	case BLOB_CLEAR_WITH_UNMAP:
1540 		spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1541 		break;
1542 	case BLOB_CLEAR_WITH_WRITE_ZEROES:
1543 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1544 		break;
1545 	case BLOB_CLEAR_WITH_NONE:
1546 	default:
1547 		break;
1548 	}
1549 }
1550 
1551 static void
1552 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1553 {
1554 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1555 	struct spdk_blob		*blob = ctx->blob;
1556 
1557 	if (bserrno == 0) {
1558 		_spdk_blob_mark_clean(blob);
1559 	}
1560 
1561 	/* Call user callback */
1562 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
1563 
1564 	/* Free the memory */
1565 	spdk_free(ctx->pages);
1566 	free(ctx);
1567 }
1568 
1569 static void
1570 _spdk_blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1571 {
1572 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1573 	struct spdk_blob		*blob = ctx->blob;
1574 	struct spdk_blob_store		*bs = blob->bs;
1575 	size_t				i;
1576 
1577 	/* Release all clusters that were truncated */
1578 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1579 		uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]);
1580 
1581 		/* Nothing to release if it was not allocated */
1582 		if (blob->active.clusters[i] != 0) {
1583 			_spdk_bs_release_cluster(bs, cluster_num);
1584 		}
1585 	}
1586 
1587 	if (blob->active.num_clusters == 0) {
1588 		free(blob->active.clusters);
1589 		blob->active.clusters = NULL;
1590 		blob->active.cluster_array_size = 0;
1591 	} else if (blob->active.num_clusters != blob->active.cluster_array_size) {
1592 #ifndef __clang_analyzer__
1593 		void *tmp;
1594 
1595 		/* scan-build really can't figure reallocs, workaround it */
1596 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters);
1597 		assert(tmp != NULL);
1598 		blob->active.clusters = tmp;
1599 
1600 		tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages);
1601 		assert(tmp != NULL);
1602 		blob->active.extent_pages = tmp;
1603 #endif
1604 		blob->active.extent_pages_array_size = blob->active.num_extent_pages;
1605 		blob->active.cluster_array_size = blob->active.num_clusters;
1606 	}
1607 
1608 	/* TODO: Add path to persist clear extent pages. */
1609 	_spdk_blob_persist_complete(seq, ctx, bserrno);
1610 }
1611 
1612 static void
1613 _spdk_blob_persist_clear_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1614 {
1615 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1616 	struct spdk_blob		*blob = ctx->blob;
1617 	struct spdk_blob_store		*bs = blob->bs;
1618 	spdk_bs_batch_t			*batch;
1619 	size_t				i;
1620 	uint64_t			lba;
1621 	uint32_t			lba_count;
1622 
1623 	/* Clusters don't move around in blobs. The list shrinks or grows
1624 	 * at the end, but no changes ever occur in the middle of the list.
1625 	 */
1626 
1627 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_clear_clusters_cpl, ctx);
1628 
1629 	/* Clear all clusters that were truncated */
1630 	lba = 0;
1631 	lba_count = 0;
1632 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1633 		uint64_t next_lba = blob->active.clusters[i];
1634 		uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1);
1635 
1636 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1637 			/* This cluster is contiguous with the previous one. */
1638 			lba_count += next_lba_count;
1639 			continue;
1640 		}
1641 
1642 		/* This cluster is not contiguous with the previous one. */
1643 
1644 		/* If a run of LBAs previously existing, clear them now */
1645 		if (lba_count > 0) {
1646 			spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
1647 		}
1648 
1649 		/* Start building the next batch */
1650 		lba = next_lba;
1651 		if (next_lba > 0) {
1652 			lba_count = next_lba_count;
1653 		} else {
1654 			lba_count = 0;
1655 		}
1656 	}
1657 
1658 	/* If we ended with a contiguous set of LBAs, clear them now */
1659 	if (lba_count > 0) {
1660 		spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
1661 	}
1662 
1663 	spdk_bs_batch_close(batch);
1664 }
1665 
1666 static void
1667 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1668 {
1669 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1670 	struct spdk_blob		*blob = ctx->blob;
1671 	struct spdk_blob_store		*bs = blob->bs;
1672 	size_t				i;
1673 
1674 	/* This loop starts at 1 because the first page is special and handled
1675 	 * below. The pages (except the first) are never written in place,
1676 	 * so any pages in the clean list must be zeroed.
1677 	 */
1678 	for (i = 1; i < blob->clean.num_pages; i++) {
1679 		_spdk_bs_release_md_page(bs, blob->clean.pages[i]);
1680 	}
1681 
1682 	if (blob->active.num_pages == 0) {
1683 		uint32_t page_num;
1684 
1685 		page_num = _spdk_bs_blobid_to_page(blob->id);
1686 		_spdk_bs_release_md_page(bs, page_num);
1687 	}
1688 
1689 	/* Move on to clearing clusters */
1690 	_spdk_blob_persist_clear_clusters(seq, ctx, 0);
1691 }
1692 
1693 static void
1694 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1695 {
1696 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1697 	struct spdk_blob		*blob = ctx->blob;
1698 	struct spdk_blob_store		*bs = blob->bs;
1699 	uint64_t			lba;
1700 	uint32_t			lba_count;
1701 	spdk_bs_batch_t			*batch;
1702 	size_t				i;
1703 
1704 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx);
1705 
1706 	lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1707 
1708 	/* This loop starts at 1 because the first page is special and handled
1709 	 * below. The pages (except the first) are never written in place,
1710 	 * so any pages in the clean list must be zeroed.
1711 	 */
1712 	for (i = 1; i < blob->clean.num_pages; i++) {
1713 		lba = _spdk_bs_md_page_to_lba(bs, blob->clean.pages[i]);
1714 
1715 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1716 	}
1717 
1718 	/* The first page will only be zeroed if this is a delete. */
1719 	if (blob->active.num_pages == 0) {
1720 		uint32_t page_num;
1721 
1722 		/* The first page in the metadata goes where the blobid indicates */
1723 		page_num = _spdk_bs_blobid_to_page(blob->id);
1724 		lba = _spdk_bs_md_page_to_lba(bs, page_num);
1725 
1726 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1727 	}
1728 
1729 	spdk_bs_batch_close(batch);
1730 }
1731 
1732 static void
1733 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1734 {
1735 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1736 	struct spdk_blob		*blob = ctx->blob;
1737 	struct spdk_blob_store		*bs = blob->bs;
1738 	uint64_t			lba;
1739 	uint32_t			lba_count;
1740 	struct spdk_blob_md_page	*page;
1741 
1742 	if (blob->active.num_pages == 0) {
1743 		/* Move on to the next step */
1744 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1745 		return;
1746 	}
1747 
1748 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1749 
1750 	page = &ctx->pages[0];
1751 	/* The first page in the metadata goes where the blobid indicates */
1752 	lba = _spdk_bs_md_page_to_lba(bs, _spdk_bs_blobid_to_page(blob->id));
1753 
1754 	spdk_bs_sequence_write_dev(seq, page, lba, lba_count,
1755 				   _spdk_blob_persist_zero_pages, ctx);
1756 }
1757 
1758 static void
1759 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1760 {
1761 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1762 	struct spdk_blob		*blob = ctx->blob;
1763 	struct spdk_blob_store		*bs = blob->bs;
1764 	uint64_t			lba;
1765 	uint32_t			lba_count;
1766 	struct spdk_blob_md_page	*page;
1767 	spdk_bs_batch_t			*batch;
1768 	size_t				i;
1769 
1770 	/* Clusters don't move around in blobs. The list shrinks or grows
1771 	 * at the end, but no changes ever occur in the middle of the list.
1772 	 */
1773 
1774 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1775 
1776 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx);
1777 
1778 	/* This starts at 1. The root page is not written until
1779 	 * all of the others are finished
1780 	 */
1781 	for (i = 1; i < blob->active.num_pages; i++) {
1782 		page = &ctx->pages[i];
1783 		assert(page->sequence_num == i);
1784 
1785 		lba = _spdk_bs_md_page_to_lba(bs, blob->active.pages[i]);
1786 
1787 		spdk_bs_batch_write_dev(batch, page, lba, lba_count);
1788 	}
1789 
1790 	spdk_bs_batch_close(batch);
1791 }
1792 
1793 static int
1794 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz)
1795 {
1796 	uint64_t	i;
1797 	uint64_t	*tmp;
1798 	uint64_t	lfc; /* lowest free cluster */
1799 	uint32_t	lfmd; /*  lowest free md page */
1800 	uint64_t	num_clusters;
1801 	uint32_t	*ep_tmp;
1802 	uint64_t	new_num_ep = 0, current_num_ep = 0;
1803 	struct spdk_blob_store *bs;
1804 
1805 	bs = blob->bs;
1806 
1807 	_spdk_blob_verify_md_op(blob);
1808 
1809 	if (blob->active.num_clusters == sz) {
1810 		return 0;
1811 	}
1812 
1813 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
1814 		/* If this blob was resized to be larger, then smaller, then
1815 		 * larger without syncing, then the cluster array already
1816 		 * contains spare assigned clusters we can use.
1817 		 */
1818 		num_clusters = spdk_min(blob->active.cluster_array_size,
1819 					sz);
1820 	} else {
1821 		num_clusters = blob->active.num_clusters;
1822 	}
1823 
1824 	if (blob->use_extent_table) {
1825 		/* Round up since every cluster beyond current Extent Table size,
1826 		 * requires new extent page. */
1827 		new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP);
1828 		current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP);
1829 	}
1830 
1831 	/* Do two passes - one to verify that we can obtain enough clusters
1832 	 * and md pages, another to actually claim them.
1833 	 */
1834 
1835 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1836 		lfc = 0;
1837 		for (i = num_clusters; i < sz; i++) {
1838 			lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
1839 			if (lfc == UINT32_MAX) {
1840 				/* No more free clusters. Cannot satisfy the request */
1841 				return -ENOSPC;
1842 			}
1843 			lfc++;
1844 		}
1845 		lfmd = 0;
1846 		for (i = current_num_ep; i < new_num_ep ; i++) {
1847 			lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd);
1848 			if (lfmd == UINT32_MAX) {
1849 				/* No more free md pages. Cannot satisfy the request */
1850 				return -ENOSPC;
1851 			}
1852 		}
1853 	}
1854 
1855 	if (sz > num_clusters) {
1856 		/* Expand the cluster array if necessary.
1857 		 * We only shrink the array when persisting.
1858 		 */
1859 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz);
1860 		if (sz > 0 && tmp == NULL) {
1861 			return -ENOMEM;
1862 		}
1863 		memset(tmp + blob->active.cluster_array_size, 0,
1864 		       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
1865 		blob->active.clusters = tmp;
1866 		blob->active.cluster_array_size = sz;
1867 
1868 		/* Expand the extents table, only if enough clusters were added */
1869 		if (new_num_ep > current_num_ep && blob->use_extent_table) {
1870 			ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep);
1871 			if (new_num_ep > 0 && ep_tmp == NULL) {
1872 				return -ENOMEM;
1873 			}
1874 			memset(ep_tmp + blob->active.extent_pages_array_size, 0,
1875 			       sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size));
1876 			blob->active.extent_pages = ep_tmp;
1877 			blob->active.extent_pages_array_size = new_num_ep;
1878 		}
1879 	}
1880 
1881 	blob->state = SPDK_BLOB_STATE_DIRTY;
1882 
1883 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1884 		lfc = 0;
1885 		lfmd = 0;
1886 		for (i = num_clusters; i < sz; i++) {
1887 			_spdk_bs_allocate_cluster(blob, i, &lfc, &lfmd, true);
1888 			lfc++;
1889 			lfmd++;
1890 		}
1891 	}
1892 
1893 	blob->active.num_clusters = sz;
1894 	blob->active.num_extent_pages = new_num_ep;
1895 
1896 	return 0;
1897 }
1898 
1899 static void
1900 _spdk_blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx)
1901 {
1902 	spdk_bs_sequence_t *seq = ctx->seq;
1903 	struct spdk_blob *blob = ctx->blob;
1904 	struct spdk_blob_store *bs = blob->bs;
1905 	uint64_t i;
1906 	uint32_t page_num;
1907 	void *tmp;
1908 	int rc;
1909 
1910 	/* Generate the new metadata */
1911 	rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
1912 	if (rc < 0) {
1913 		_spdk_blob_persist_complete(seq, ctx, rc);
1914 		return;
1915 	}
1916 
1917 	assert(blob->active.num_pages >= 1);
1918 
1919 	/* Resize the cache of page indices */
1920 	tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
1921 	if (!tmp) {
1922 		_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1923 		return;
1924 	}
1925 	blob->active.pages = tmp;
1926 
1927 	/* Assign this metadata to pages. This requires two passes -
1928 	 * one to verify that there are enough pages and a second
1929 	 * to actually claim them. */
1930 	page_num = 0;
1931 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
1932 	for (i = 1; i < blob->active.num_pages; i++) {
1933 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1934 		if (page_num == UINT32_MAX) {
1935 			_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1936 			return;
1937 		}
1938 		page_num++;
1939 	}
1940 
1941 	page_num = 0;
1942 	blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id);
1943 	for (i = 1; i < blob->active.num_pages; i++) {
1944 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1945 		ctx->pages[i - 1].next = page_num;
1946 		/* Now that previous metadata page is complete, calculate the crc for it. */
1947 		ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1948 		blob->active.pages[i] = page_num;
1949 		_spdk_bs_claim_md_page(bs, page_num);
1950 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id);
1951 		page_num++;
1952 	}
1953 	ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1954 	/* Start writing the metadata from last page to first */
1955 	blob->state = SPDK_BLOB_STATE_CLEAN;
1956 	_spdk_blob_persist_write_page_chain(seq, ctx, 0);
1957 }
1958 
1959 static void _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg,
1960 		int bserrno);
1961 
1962 static void
1963 _spdk_blob_persist_write_extent_page(uint32_t extent, uint64_t cluster_num,
1964 				     struct spdk_blob_persist_ctx *ctx)
1965 {
1966 	spdk_bs_sequence_t		*seq = ctx->seq;
1967 	uint32_t                        page_count = 0;
1968 	struct spdk_blob		*blob = ctx->blob;
1969 	int				rc;
1970 
1971 	rc = _spdk_blob_serialize_add_page(blob, &ctx->extent_page, &page_count, &ctx->extent_page);
1972 	if (rc < 0) {
1973 		assert(false);
1974 		return;
1975 	}
1976 
1977 	_spdk_blob_serialize_extent_page(blob, cluster_num, ctx->extent_page);
1978 
1979 	ctx->extent_page->crc = _spdk_blob_md_page_calc_crc(ctx->extent_page);
1980 
1981 	spdk_bs_sequence_write_dev(seq, ctx->extent_page, _spdk_bs_md_page_to_lba(blob->bs, extent),
1982 				   _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1983 				   _spdk_blob_persist_write_extent_pages, ctx);
1984 }
1985 
1986 static void
1987 _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1988 {
1989 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1990 	struct spdk_blob		*blob = ctx->blob;
1991 	size_t				i;
1992 	uint32_t			extent_page_id;
1993 
1994 	if (ctx->extent_page != NULL) {
1995 		spdk_free(ctx->extent_page);
1996 		ctx->extent_page = NULL;
1997 	}
1998 
1999 	/* Only write out changed extent pages */
2000 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
2001 		extent_page_id = blob->active.extent_pages[i];
2002 		if (extent_page_id == 0) {
2003 			/* No Extent Page to persist */
2004 			assert(spdk_blob_is_thin_provisioned(blob));
2005 			continue;
2006 		}
2007 		/* Writing out new extent page for the first time. Either active extent pages is larger
2008 		 * than clean extent pages or there was no extent page assigned due to thin provisioning. */
2009 		if (i >= blob->clean.extent_pages_array_size || blob->clean.extent_pages[i] == 0) {
2010 			assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id));
2011 			ctx->next_extent_page = i + 1;
2012 			_spdk_blob_persist_write_extent_page(extent_page_id, i * SPDK_EXTENTS_PER_EP, ctx);
2013 			return;
2014 		}
2015 		assert(blob->clean.extent_pages[i] != 0);
2016 	}
2017 
2018 	_spdk_blob_persist_generate_new_md(ctx);
2019 }
2020 
2021 static void
2022 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx)
2023 {
2024 	spdk_bs_sequence_t *seq = ctx->seq;
2025 	struct spdk_blob *blob = ctx->blob;
2026 
2027 	if (blob->active.num_pages == 0) {
2028 		/* This is the signal that the blob should be deleted.
2029 		 * Immediately jump to the clean up routine. */
2030 		assert(blob->clean.num_pages > 0);
2031 		blob->state = SPDK_BLOB_STATE_CLEAN;
2032 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
2033 		return;
2034 
2035 	}
2036 
2037 	_spdk_blob_persist_write_extent_pages(seq, ctx, 0);
2038 }
2039 
2040 static void
2041 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2042 {
2043 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2044 
2045 	ctx->blob->bs->clean = 0;
2046 
2047 	spdk_free(ctx->super);
2048 
2049 	_spdk_blob_persist_start(ctx);
2050 }
2051 
2052 static void
2053 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2054 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg);
2055 
2056 
2057 static void
2058 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2059 {
2060 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2061 
2062 	ctx->super->clean = 0;
2063 	if (ctx->super->size == 0) {
2064 		ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen;
2065 	}
2066 
2067 	_spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx);
2068 }
2069 
2070 
2071 /* Write a blob to disk */
2072 static void
2073 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
2074 		   spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2075 {
2076 	struct spdk_blob_persist_ctx *ctx;
2077 
2078 	_spdk_blob_verify_md_op(blob);
2079 
2080 	if (blob->state == SPDK_BLOB_STATE_CLEAN) {
2081 		cb_fn(seq, cb_arg, 0);
2082 		return;
2083 	}
2084 
2085 	ctx = calloc(1, sizeof(*ctx));
2086 	if (!ctx) {
2087 		cb_fn(seq, cb_arg, -ENOMEM);
2088 		return;
2089 	}
2090 	ctx->blob = blob;
2091 	ctx->seq = seq;
2092 	ctx->cb_fn = cb_fn;
2093 	ctx->cb_arg = cb_arg;
2094 	ctx->next_extent_page = 0;
2095 
2096 	if (blob->bs->clean) {
2097 		ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
2098 					  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2099 		if (!ctx->super) {
2100 			cb_fn(seq, cb_arg, -ENOMEM);
2101 			free(ctx);
2102 			return;
2103 		}
2104 
2105 		spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(blob->bs, 0),
2106 					  _spdk_bs_byte_to_lba(blob->bs, sizeof(*ctx->super)),
2107 					  _spdk_blob_persist_dirty, ctx);
2108 	} else {
2109 		_spdk_blob_persist_start(ctx);
2110 	}
2111 }
2112 
2113 struct spdk_blob_copy_cluster_ctx {
2114 	struct spdk_blob *blob;
2115 	uint8_t *buf;
2116 	uint64_t page;
2117 	uint64_t new_cluster;
2118 	uint32_t new_extent_page;
2119 	spdk_bs_sequence_t *seq;
2120 };
2121 
2122 static void
2123 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
2124 {
2125 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2126 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
2127 	TAILQ_HEAD(, spdk_bs_request_set) requests;
2128 	spdk_bs_user_op_t *op;
2129 
2130 	TAILQ_INIT(&requests);
2131 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
2132 
2133 	while (!TAILQ_EMPTY(&requests)) {
2134 		op = TAILQ_FIRST(&requests);
2135 		TAILQ_REMOVE(&requests, op, link);
2136 		if (bserrno == 0) {
2137 			spdk_bs_user_op_execute(op);
2138 		} else {
2139 			spdk_bs_user_op_abort(op);
2140 		}
2141 	}
2142 
2143 	spdk_free(ctx->buf);
2144 	free(ctx);
2145 }
2146 
2147 static void
2148 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno)
2149 {
2150 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2151 
2152 	if (bserrno) {
2153 		if (bserrno == -EEXIST) {
2154 			/* The metadata insert failed because another thread
2155 			 * allocated the cluster first. Free our cluster
2156 			 * but continue without error. */
2157 			bserrno = 0;
2158 		}
2159 		_spdk_bs_release_cluster(ctx->blob->bs, ctx->new_cluster);
2160 		if (ctx->new_extent_page != 0) {
2161 			_spdk_bs_release_md_page(ctx->blob->bs, ctx->new_extent_page);
2162 		}
2163 	}
2164 
2165 	spdk_bs_sequence_finish(ctx->seq, bserrno);
2166 }
2167 
2168 static void
2169 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2170 {
2171 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2172 	uint32_t cluster_number;
2173 
2174 	if (bserrno) {
2175 		/* The write failed, so jump to the final completion handler */
2176 		spdk_bs_sequence_finish(seq, bserrno);
2177 		return;
2178 	}
2179 
2180 	cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
2181 
2182 	_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2183 					       ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx);
2184 }
2185 
2186 static void
2187 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2188 {
2189 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2190 
2191 	if (bserrno != 0) {
2192 		/* The read failed, so jump to the final completion handler */
2193 		spdk_bs_sequence_finish(seq, bserrno);
2194 		return;
2195 	}
2196 
2197 	/* Write whole cluster */
2198 	spdk_bs_sequence_write_dev(seq, ctx->buf,
2199 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2200 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, 1),
2201 				   _spdk_blob_write_copy_cpl, ctx);
2202 }
2203 
2204 static void
2205 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
2206 				   struct spdk_io_channel *_ch,
2207 				   uint64_t io_unit, spdk_bs_user_op_t *op)
2208 {
2209 	struct spdk_bs_cpl cpl;
2210 	struct spdk_bs_channel *ch;
2211 	struct spdk_blob_copy_cluster_ctx *ctx;
2212 	uint32_t cluster_start_page;
2213 	uint32_t cluster_number;
2214 	int rc;
2215 
2216 	ch = spdk_io_channel_get_ctx(_ch);
2217 
2218 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
2219 		/* There are already operations pending. Queue this user op
2220 		 * and return because it will be re-executed when the outstanding
2221 		 * cluster allocation completes. */
2222 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2223 		return;
2224 	}
2225 
2226 	/* Round the io_unit offset down to the first page in the cluster */
2227 	cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit);
2228 
2229 	/* Calculate which index in the metadata cluster array the corresponding
2230 	 * cluster is supposed to be at. */
2231 	cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit);
2232 
2233 	ctx = calloc(1, sizeof(*ctx));
2234 	if (!ctx) {
2235 		spdk_bs_user_op_abort(op);
2236 		return;
2237 	}
2238 
2239 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
2240 
2241 	ctx->blob = blob;
2242 	ctx->page = cluster_start_page;
2243 
2244 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
2245 		ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen,
2246 				       NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2247 		if (!ctx->buf) {
2248 			SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
2249 				    blob->bs->cluster_sz);
2250 			free(ctx);
2251 			spdk_bs_user_op_abort(op);
2252 			return;
2253 		}
2254 	}
2255 
2256 	rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page,
2257 				       false);
2258 	if (rc != 0) {
2259 		spdk_free(ctx->buf);
2260 		free(ctx);
2261 		spdk_bs_user_op_abort(op);
2262 		return;
2263 	}
2264 
2265 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2266 	cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl;
2267 	cpl.u.blob_basic.cb_arg = ctx;
2268 
2269 	ctx->seq = spdk_bs_sequence_start(_ch, &cpl);
2270 	if (!ctx->seq) {
2271 		_spdk_bs_release_cluster(blob->bs, ctx->new_cluster);
2272 		spdk_free(ctx->buf);
2273 		free(ctx);
2274 		spdk_bs_user_op_abort(op);
2275 		return;
2276 	}
2277 
2278 	/* Queue the user op to block other incoming operations */
2279 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2280 
2281 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
2282 		/* Read cluster from backing device */
2283 		spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
2284 					     _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2285 					     _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
2286 					     _spdk_blob_write_copy, ctx);
2287 	} else {
2288 		_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2289 						       ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx);
2290 	}
2291 }
2292 
2293 static void
2294 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length,
2295 				       uint64_t *lba,	uint32_t *lba_count)
2296 {
2297 	*lba_count = length;
2298 
2299 	if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) {
2300 		assert(blob->back_bs_dev != NULL);
2301 		*lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit);
2302 		*lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count);
2303 	} else {
2304 		*lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit);
2305 	}
2306 }
2307 
2308 struct op_split_ctx {
2309 	struct spdk_blob *blob;
2310 	struct spdk_io_channel *channel;
2311 	uint64_t io_unit_offset;
2312 	uint64_t io_units_remaining;
2313 	void *curr_payload;
2314 	enum spdk_blob_op_type op_type;
2315 	spdk_bs_sequence_t *seq;
2316 };
2317 
2318 static void
2319 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno)
2320 {
2321 	struct op_split_ctx	*ctx = cb_arg;
2322 	struct spdk_blob	*blob = ctx->blob;
2323 	struct spdk_io_channel	*ch = ctx->channel;
2324 	enum spdk_blob_op_type	op_type = ctx->op_type;
2325 	uint8_t			*buf = ctx->curr_payload;
2326 	uint64_t		offset = ctx->io_unit_offset;
2327 	uint64_t		length = ctx->io_units_remaining;
2328 	uint64_t		op_length;
2329 
2330 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2331 		spdk_bs_sequence_finish(ctx->seq, bserrno);
2332 		free(ctx);
2333 		return;
2334 	}
2335 
2336 	op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob,
2337 			     offset));
2338 
2339 	/* Update length and payload for next operation */
2340 	ctx->io_units_remaining -= op_length;
2341 	ctx->io_unit_offset += op_length;
2342 	if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
2343 		ctx->curr_payload += op_length * blob->bs->io_unit_size;
2344 	}
2345 
2346 	switch (op_type) {
2347 	case SPDK_BLOB_READ:
2348 		spdk_blob_io_read(blob, ch, buf, offset, op_length,
2349 				  _spdk_blob_request_submit_op_split_next, ctx);
2350 		break;
2351 	case SPDK_BLOB_WRITE:
2352 		spdk_blob_io_write(blob, ch, buf, offset, op_length,
2353 				   _spdk_blob_request_submit_op_split_next, ctx);
2354 		break;
2355 	case SPDK_BLOB_UNMAP:
2356 		spdk_blob_io_unmap(blob, ch, offset, op_length,
2357 				   _spdk_blob_request_submit_op_split_next, ctx);
2358 		break;
2359 	case SPDK_BLOB_WRITE_ZEROES:
2360 		spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
2361 					  _spdk_blob_request_submit_op_split_next, ctx);
2362 		break;
2363 	case SPDK_BLOB_READV:
2364 	case SPDK_BLOB_WRITEV:
2365 		SPDK_ERRLOG("readv/write not valid\n");
2366 		spdk_bs_sequence_finish(ctx->seq, -EINVAL);
2367 		free(ctx);
2368 		break;
2369 	}
2370 }
2371 
2372 static void
2373 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
2374 				   void *payload, uint64_t offset, uint64_t length,
2375 				   spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2376 {
2377 	struct op_split_ctx *ctx;
2378 	spdk_bs_sequence_t *seq;
2379 	struct spdk_bs_cpl cpl;
2380 
2381 	assert(blob != NULL);
2382 
2383 	ctx = calloc(1, sizeof(struct op_split_ctx));
2384 	if (ctx == NULL) {
2385 		cb_fn(cb_arg, -ENOMEM);
2386 		return;
2387 	}
2388 
2389 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2390 	cpl.u.blob_basic.cb_fn = cb_fn;
2391 	cpl.u.blob_basic.cb_arg = cb_arg;
2392 
2393 	seq = spdk_bs_sequence_start(ch, &cpl);
2394 	if (!seq) {
2395 		free(ctx);
2396 		cb_fn(cb_arg, -ENOMEM);
2397 		return;
2398 	}
2399 
2400 	ctx->blob = blob;
2401 	ctx->channel = ch;
2402 	ctx->curr_payload = payload;
2403 	ctx->io_unit_offset = offset;
2404 	ctx->io_units_remaining = length;
2405 	ctx->op_type = op_type;
2406 	ctx->seq = seq;
2407 
2408 	_spdk_blob_request_submit_op_split_next(ctx, 0);
2409 }
2410 
2411 static void
2412 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
2413 				    void *payload, uint64_t offset, uint64_t length,
2414 				    spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2415 {
2416 	struct spdk_bs_cpl cpl;
2417 	uint64_t lba;
2418 	uint32_t lba_count;
2419 
2420 	assert(blob != NULL);
2421 
2422 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2423 	cpl.u.blob_basic.cb_fn = cb_fn;
2424 	cpl.u.blob_basic.cb_arg = cb_arg;
2425 
2426 	_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
2427 
2428 	if (blob->frozen_refcnt) {
2429 		/* This blob I/O is frozen */
2430 		spdk_bs_user_op_t *op;
2431 		struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
2432 
2433 		op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
2434 		if (!op) {
2435 			cb_fn(cb_arg, -ENOMEM);
2436 			return;
2437 		}
2438 
2439 		TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
2440 
2441 		return;
2442 	}
2443 
2444 	switch (op_type) {
2445 	case SPDK_BLOB_READ: {
2446 		spdk_bs_batch_t *batch;
2447 
2448 		batch = spdk_bs_batch_open(_ch, &cpl);
2449 		if (!batch) {
2450 			cb_fn(cb_arg, -ENOMEM);
2451 			return;
2452 		}
2453 
2454 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2455 			/* Read from the blob */
2456 			spdk_bs_batch_read_dev(batch, payload, lba, lba_count);
2457 		} else {
2458 			/* Read from the backing block device */
2459 			spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
2460 		}
2461 
2462 		spdk_bs_batch_close(batch);
2463 		break;
2464 	}
2465 	case SPDK_BLOB_WRITE:
2466 	case SPDK_BLOB_WRITE_ZEROES: {
2467 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2468 			/* Write to the blob */
2469 			spdk_bs_batch_t *batch;
2470 
2471 			if (lba_count == 0) {
2472 				cb_fn(cb_arg, 0);
2473 				return;
2474 			}
2475 
2476 			batch = spdk_bs_batch_open(_ch, &cpl);
2477 			if (!batch) {
2478 				cb_fn(cb_arg, -ENOMEM);
2479 				return;
2480 			}
2481 
2482 			if (op_type == SPDK_BLOB_WRITE) {
2483 				spdk_bs_batch_write_dev(batch, payload, lba, lba_count);
2484 			} else {
2485 				spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
2486 			}
2487 
2488 			spdk_bs_batch_close(batch);
2489 		} else {
2490 			/* Queue this operation and allocate the cluster */
2491 			spdk_bs_user_op_t *op;
2492 
2493 			op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
2494 			if (!op) {
2495 				cb_fn(cb_arg, -ENOMEM);
2496 				return;
2497 			}
2498 
2499 			_spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op);
2500 		}
2501 		break;
2502 	}
2503 	case SPDK_BLOB_UNMAP: {
2504 		spdk_bs_batch_t *batch;
2505 
2506 		batch = spdk_bs_batch_open(_ch, &cpl);
2507 		if (!batch) {
2508 			cb_fn(cb_arg, -ENOMEM);
2509 			return;
2510 		}
2511 
2512 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2513 			spdk_bs_batch_unmap_dev(batch, lba, lba_count);
2514 		}
2515 
2516 		spdk_bs_batch_close(batch);
2517 		break;
2518 	}
2519 	case SPDK_BLOB_READV:
2520 	case SPDK_BLOB_WRITEV:
2521 		SPDK_ERRLOG("readv/write not valid\n");
2522 		cb_fn(cb_arg, -EINVAL);
2523 		break;
2524 	}
2525 }
2526 
2527 static void
2528 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
2529 			     void *payload, uint64_t offset, uint64_t length,
2530 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2531 {
2532 	assert(blob != NULL);
2533 
2534 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
2535 		cb_fn(cb_arg, -EPERM);
2536 		return;
2537 	}
2538 
2539 	if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
2540 		cb_fn(cb_arg, -EINVAL);
2541 		return;
2542 	}
2543 	if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) {
2544 		_spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length,
2545 						    cb_fn, cb_arg, op_type);
2546 	} else {
2547 		_spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length,
2548 						   cb_fn, cb_arg, op_type);
2549 	}
2550 }
2551 
2552 struct rw_iov_ctx {
2553 	struct spdk_blob *blob;
2554 	struct spdk_io_channel *channel;
2555 	spdk_blob_op_complete cb_fn;
2556 	void *cb_arg;
2557 	bool read;
2558 	int iovcnt;
2559 	struct iovec *orig_iov;
2560 	uint64_t io_unit_offset;
2561 	uint64_t io_units_remaining;
2562 	uint64_t io_units_done;
2563 	struct iovec iov[0];
2564 };
2565 
2566 static void
2567 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2568 {
2569 	assert(cb_arg == NULL);
2570 	spdk_bs_sequence_finish(seq, bserrno);
2571 }
2572 
2573 static void
2574 _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
2575 {
2576 	struct rw_iov_ctx *ctx = cb_arg;
2577 	struct spdk_blob *blob = ctx->blob;
2578 	struct iovec *iov, *orig_iov;
2579 	int iovcnt;
2580 	size_t orig_iovoff;
2581 	uint64_t io_units_count, io_units_to_boundary, io_unit_offset;
2582 	uint64_t byte_count;
2583 
2584 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2585 		ctx->cb_fn(ctx->cb_arg, bserrno);
2586 		free(ctx);
2587 		return;
2588 	}
2589 
2590 	io_unit_offset = ctx->io_unit_offset;
2591 	io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset);
2592 	io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary);
2593 	/*
2594 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
2595 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
2596 	 *  point to the current position in the I/O sequence.
2597 	 */
2598 	byte_count = ctx->io_units_done * blob->bs->io_unit_size;
2599 	orig_iov = &ctx->orig_iov[0];
2600 	orig_iovoff = 0;
2601 	while (byte_count > 0) {
2602 		if (byte_count >= orig_iov->iov_len) {
2603 			byte_count -= orig_iov->iov_len;
2604 			orig_iov++;
2605 		} else {
2606 			orig_iovoff = byte_count;
2607 			byte_count = 0;
2608 		}
2609 	}
2610 
2611 	/*
2612 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
2613 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
2614 	 */
2615 	byte_count = io_units_count * blob->bs->io_unit_size;
2616 	iov = &ctx->iov[0];
2617 	iovcnt = 0;
2618 	while (byte_count > 0) {
2619 		assert(iovcnt < ctx->iovcnt);
2620 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
2621 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
2622 		byte_count -= iov->iov_len;
2623 		orig_iovoff = 0;
2624 		orig_iov++;
2625 		iov++;
2626 		iovcnt++;
2627 	}
2628 
2629 	ctx->io_unit_offset += io_units_count;
2630 	ctx->io_units_remaining -= io_units_count;
2631 	ctx->io_units_done += io_units_count;
2632 	iov = &ctx->iov[0];
2633 
2634 	if (ctx->read) {
2635 		spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
2636 				   io_units_count, _spdk_rw_iov_split_next, ctx);
2637 	} else {
2638 		spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
2639 				    io_units_count, _spdk_rw_iov_split_next, ctx);
2640 	}
2641 }
2642 
2643 static void
2644 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
2645 				 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
2646 				 spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
2647 {
2648 	struct spdk_bs_cpl	cpl;
2649 
2650 	assert(blob != NULL);
2651 
2652 	if (!read && blob->data_ro) {
2653 		cb_fn(cb_arg, -EPERM);
2654 		return;
2655 	}
2656 
2657 	if (length == 0) {
2658 		cb_fn(cb_arg, 0);
2659 		return;
2660 	}
2661 
2662 	if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
2663 		cb_fn(cb_arg, -EINVAL);
2664 		return;
2665 	}
2666 
2667 	/*
2668 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
2669 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
2670 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
2671 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
2672 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
2673 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
2674 	 *  but since this case happens very infrequently, any performance impact will be negligible.
2675 	 *
2676 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
2677 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
2678 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
2679 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
2680 	 */
2681 	if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) {
2682 		uint32_t lba_count;
2683 		uint64_t lba;
2684 
2685 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2686 		cpl.u.blob_basic.cb_fn = cb_fn;
2687 		cpl.u.blob_basic.cb_arg = cb_arg;
2688 
2689 		if (blob->frozen_refcnt) {
2690 			/* This blob I/O is frozen */
2691 			enum spdk_blob_op_type op_type;
2692 			spdk_bs_user_op_t *op;
2693 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel);
2694 
2695 			op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV;
2696 			op = spdk_bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length);
2697 			if (!op) {
2698 				cb_fn(cb_arg, -ENOMEM);
2699 				return;
2700 			}
2701 
2702 			TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
2703 
2704 			return;
2705 		}
2706 
2707 		_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
2708 
2709 		if (read) {
2710 			spdk_bs_sequence_t *seq;
2711 
2712 			seq = spdk_bs_sequence_start(_channel, &cpl);
2713 			if (!seq) {
2714 				cb_fn(cb_arg, -ENOMEM);
2715 				return;
2716 			}
2717 
2718 			if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2719 				spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2720 			} else {
2721 				spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
2722 							      _spdk_rw_iov_done, NULL);
2723 			}
2724 		} else {
2725 			if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2726 				spdk_bs_sequence_t *seq;
2727 
2728 				seq = spdk_bs_sequence_start(_channel, &cpl);
2729 				if (!seq) {
2730 					cb_fn(cb_arg, -ENOMEM);
2731 					return;
2732 				}
2733 
2734 				spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2735 			} else {
2736 				/* Queue this operation and allocate the cluster */
2737 				spdk_bs_user_op_t *op;
2738 
2739 				op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset,
2740 							   length);
2741 				if (!op) {
2742 					cb_fn(cb_arg, -ENOMEM);
2743 					return;
2744 				}
2745 
2746 				_spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op);
2747 			}
2748 		}
2749 	} else {
2750 		struct rw_iov_ctx *ctx;
2751 
2752 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
2753 		if (ctx == NULL) {
2754 			cb_fn(cb_arg, -ENOMEM);
2755 			return;
2756 		}
2757 
2758 		ctx->blob = blob;
2759 		ctx->channel = _channel;
2760 		ctx->cb_fn = cb_fn;
2761 		ctx->cb_arg = cb_arg;
2762 		ctx->read = read;
2763 		ctx->orig_iov = iov;
2764 		ctx->iovcnt = iovcnt;
2765 		ctx->io_unit_offset = offset;
2766 		ctx->io_units_remaining = length;
2767 		ctx->io_units_done = 0;
2768 
2769 		_spdk_rw_iov_split_next(ctx, 0);
2770 	}
2771 }
2772 
2773 static struct spdk_blob *
2774 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
2775 {
2776 	struct spdk_blob *blob;
2777 
2778 	TAILQ_FOREACH(blob, &bs->blobs, link) {
2779 		if (blob->id == blobid) {
2780 			return blob;
2781 		}
2782 	}
2783 
2784 	return NULL;
2785 }
2786 
2787 static void
2788 _spdk_blob_get_snapshot_and_clone_entries(struct spdk_blob *blob,
2789 		struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry)
2790 {
2791 	assert(blob != NULL);
2792 	*snapshot_entry = NULL;
2793 	*clone_entry = NULL;
2794 
2795 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
2796 		return;
2797 	}
2798 
2799 	TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) {
2800 		if ((*snapshot_entry)->id == blob->parent_id) {
2801 			break;
2802 		}
2803 	}
2804 
2805 	if (*snapshot_entry != NULL) {
2806 		TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) {
2807 			if ((*clone_entry)->id == blob->id) {
2808 				break;
2809 			}
2810 		}
2811 
2812 		assert(clone_entry != NULL);
2813 	}
2814 }
2815 
2816 static int
2817 _spdk_bs_channel_create(void *io_device, void *ctx_buf)
2818 {
2819 	struct spdk_blob_store		*bs = io_device;
2820 	struct spdk_bs_channel		*channel = ctx_buf;
2821 	struct spdk_bs_dev		*dev;
2822 	uint32_t			max_ops = bs->max_channel_ops;
2823 	uint32_t			i;
2824 
2825 	dev = bs->dev;
2826 
2827 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
2828 	if (!channel->req_mem) {
2829 		return -1;
2830 	}
2831 
2832 	TAILQ_INIT(&channel->reqs);
2833 
2834 	for (i = 0; i < max_ops; i++) {
2835 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
2836 	}
2837 
2838 	channel->bs = bs;
2839 	channel->dev = dev;
2840 	channel->dev_channel = dev->create_channel(dev);
2841 
2842 	if (!channel->dev_channel) {
2843 		SPDK_ERRLOG("Failed to create device channel.\n");
2844 		free(channel->req_mem);
2845 		return -1;
2846 	}
2847 
2848 	TAILQ_INIT(&channel->need_cluster_alloc);
2849 	TAILQ_INIT(&channel->queued_io);
2850 
2851 	return 0;
2852 }
2853 
2854 static void
2855 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf)
2856 {
2857 	struct spdk_bs_channel *channel = ctx_buf;
2858 	spdk_bs_user_op_t *op;
2859 
2860 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
2861 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
2862 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
2863 		spdk_bs_user_op_abort(op);
2864 	}
2865 
2866 	while (!TAILQ_EMPTY(&channel->queued_io)) {
2867 		op = TAILQ_FIRST(&channel->queued_io);
2868 		TAILQ_REMOVE(&channel->queued_io, op, link);
2869 		spdk_bs_user_op_abort(op);
2870 	}
2871 
2872 	free(channel->req_mem);
2873 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
2874 }
2875 
2876 static void
2877 _spdk_bs_dev_destroy(void *io_device)
2878 {
2879 	struct spdk_blob_store *bs = io_device;
2880 	struct spdk_blob	*blob, *blob_tmp;
2881 
2882 	bs->dev->destroy(bs->dev);
2883 
2884 	TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) {
2885 		TAILQ_REMOVE(&bs->blobs, blob, link);
2886 		_spdk_blob_free(blob);
2887 	}
2888 
2889 	pthread_mutex_destroy(&bs->used_clusters_mutex);
2890 
2891 	spdk_bit_array_free(&bs->used_blobids);
2892 	spdk_bit_array_free(&bs->used_md_pages);
2893 	spdk_bit_array_free(&bs->used_clusters);
2894 	/*
2895 	 * If this function is called for any reason except a successful unload,
2896 	 * the unload_cpl type will be NONE and this will be a nop.
2897 	 */
2898 	spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err);
2899 
2900 	free(bs);
2901 }
2902 
2903 static int
2904 _spdk_bs_blob_list_add(struct spdk_blob *blob)
2905 {
2906 	spdk_blob_id snapshot_id;
2907 	struct spdk_blob_list *snapshot_entry = NULL;
2908 	struct spdk_blob_list *clone_entry = NULL;
2909 
2910 	assert(blob != NULL);
2911 
2912 	snapshot_id = blob->parent_id;
2913 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2914 		return 0;
2915 	}
2916 
2917 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, snapshot_id);
2918 	if (snapshot_entry == NULL) {
2919 		/* Snapshot not found */
2920 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
2921 		if (snapshot_entry == NULL) {
2922 			return -ENOMEM;
2923 		}
2924 		snapshot_entry->id = snapshot_id;
2925 		TAILQ_INIT(&snapshot_entry->clones);
2926 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
2927 	} else {
2928 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2929 			if (clone_entry->id == blob->id) {
2930 				break;
2931 			}
2932 		}
2933 	}
2934 
2935 	if (clone_entry == NULL) {
2936 		/* Clone not found */
2937 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
2938 		if (clone_entry == NULL) {
2939 			return -ENOMEM;
2940 		}
2941 		clone_entry->id = blob->id;
2942 		TAILQ_INIT(&clone_entry->clones);
2943 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
2944 		snapshot_entry->clone_count++;
2945 	}
2946 
2947 	return 0;
2948 }
2949 
2950 static void
2951 _spdk_bs_blob_list_remove(struct spdk_blob *blob)
2952 {
2953 	struct spdk_blob_list *snapshot_entry = NULL;
2954 	struct spdk_blob_list *clone_entry = NULL;
2955 
2956 	_spdk_blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry);
2957 
2958 	if (snapshot_entry == NULL) {
2959 		return;
2960 	}
2961 
2962 	blob->parent_id = SPDK_BLOBID_INVALID;
2963 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2964 	free(clone_entry);
2965 
2966 	snapshot_entry->clone_count--;
2967 }
2968 
2969 static int
2970 _spdk_bs_blob_list_free(struct spdk_blob_store *bs)
2971 {
2972 	struct spdk_blob_list *snapshot_entry;
2973 	struct spdk_blob_list *snapshot_entry_tmp;
2974 	struct spdk_blob_list *clone_entry;
2975 	struct spdk_blob_list *clone_entry_tmp;
2976 
2977 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
2978 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
2979 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2980 			free(clone_entry);
2981 		}
2982 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
2983 		free(snapshot_entry);
2984 	}
2985 
2986 	return 0;
2987 }
2988 
2989 static void
2990 _spdk_bs_free(struct spdk_blob_store *bs)
2991 {
2992 	_spdk_bs_blob_list_free(bs);
2993 
2994 	spdk_bs_unregister_md_thread(bs);
2995 	spdk_io_device_unregister(bs, _spdk_bs_dev_destroy);
2996 }
2997 
2998 void
2999 spdk_bs_opts_init(struct spdk_bs_opts *opts)
3000 {
3001 	opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
3002 	opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
3003 	opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
3004 	opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS;
3005 	opts->clear_method = BS_CLEAR_WITH_UNMAP;
3006 	memset(&opts->bstype, 0, sizeof(opts->bstype));
3007 	opts->iter_cb_fn = NULL;
3008 	opts->iter_cb_arg = NULL;
3009 }
3010 
3011 static int
3012 _spdk_bs_opts_verify(struct spdk_bs_opts *opts)
3013 {
3014 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
3015 	    opts->max_channel_ops == 0) {
3016 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
3017 		return -1;
3018 	}
3019 
3020 	return 0;
3021 }
3022 
3023 static int
3024 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs)
3025 {
3026 	struct spdk_blob_store	*bs;
3027 	uint64_t dev_size;
3028 	int rc;
3029 
3030 	dev_size = dev->blocklen * dev->blockcnt;
3031 	if (dev_size < opts->cluster_sz) {
3032 		/* Device size cannot be smaller than cluster size of blobstore */
3033 		SPDK_INFOLOG(SPDK_LOG_BLOB, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
3034 			     dev_size, opts->cluster_sz);
3035 		return -ENOSPC;
3036 	}
3037 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
3038 		/* Cluster size cannot be smaller than page size */
3039 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
3040 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
3041 		return -EINVAL;
3042 	}
3043 	bs = calloc(1, sizeof(struct spdk_blob_store));
3044 	if (!bs) {
3045 		return -ENOMEM;
3046 	}
3047 
3048 	TAILQ_INIT(&bs->blobs);
3049 	TAILQ_INIT(&bs->snapshots);
3050 	bs->dev = dev;
3051 	bs->md_thread = spdk_get_thread();
3052 	assert(bs->md_thread != NULL);
3053 
3054 	/*
3055 	 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an
3056 	 *  even multiple of the cluster size.
3057 	 */
3058 	bs->cluster_sz = opts->cluster_sz;
3059 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
3060 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
3061 	bs->num_free_clusters = bs->total_clusters;
3062 	bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
3063 	bs->io_unit_size = dev->blocklen;
3064 	if (bs->used_clusters == NULL) {
3065 		free(bs);
3066 		return -ENOMEM;
3067 	}
3068 
3069 	bs->max_channel_ops = opts->max_channel_ops;
3070 	bs->super_blob = SPDK_BLOBID_INVALID;
3071 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
3072 
3073 	/* The metadata is assumed to be at least 1 page */
3074 	bs->used_md_pages = spdk_bit_array_create(1);
3075 	bs->used_blobids = spdk_bit_array_create(0);
3076 
3077 	pthread_mutex_init(&bs->used_clusters_mutex, NULL);
3078 
3079 	spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy,
3080 				sizeof(struct spdk_bs_channel), "blobstore");
3081 	rc = spdk_bs_register_md_thread(bs);
3082 	if (rc == -1) {
3083 		spdk_io_device_unregister(bs, NULL);
3084 		pthread_mutex_destroy(&bs->used_clusters_mutex);
3085 		spdk_bit_array_free(&bs->used_blobids);
3086 		spdk_bit_array_free(&bs->used_md_pages);
3087 		spdk_bit_array_free(&bs->used_clusters);
3088 		free(bs);
3089 		/* FIXME: this is a lie but don't know how to get a proper error code here */
3090 		return -ENOMEM;
3091 	}
3092 
3093 	*_bs = bs;
3094 	return 0;
3095 }
3096 
3097 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */
3098 
3099 struct spdk_bs_load_ctx {
3100 	struct spdk_blob_store		*bs;
3101 	struct spdk_bs_super_block	*super;
3102 
3103 	struct spdk_bs_md_mask		*mask;
3104 	bool				in_page_chain;
3105 	uint32_t			page_index;
3106 	uint32_t			cur_page;
3107 	struct spdk_blob_md_page	*page;
3108 
3109 	uint64_t			num_extent_pages;
3110 	uint32_t			*extent_pages;
3111 
3112 	spdk_bs_sequence_t			*seq;
3113 	spdk_blob_op_with_handle_complete	iter_cb_fn;
3114 	void					*iter_cb_arg;
3115 	struct spdk_blob			*blob;
3116 	spdk_blob_id				blobid;
3117 };
3118 
3119 static void
3120 _spdk_bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno)
3121 {
3122 	assert(bserrno != 0);
3123 
3124 	spdk_free(ctx->super);
3125 	spdk_bs_sequence_finish(ctx->seq, bserrno);
3126 	_spdk_bs_free(ctx->bs);
3127 	free(ctx);
3128 }
3129 
3130 static void
3131 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask)
3132 {
3133 	uint32_t i = 0;
3134 
3135 	while (true) {
3136 		i = spdk_bit_array_find_first_set(array, i);
3137 		if (i >= mask->length) {
3138 			break;
3139 		}
3140 		mask->mask[i / 8] |= 1U << (i % 8);
3141 		i++;
3142 	}
3143 }
3144 
3145 static int
3146 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask)
3147 {
3148 	struct spdk_bit_array *array;
3149 	uint32_t i;
3150 
3151 	if (spdk_bit_array_resize(array_ptr, mask->length) < 0) {
3152 		return -ENOMEM;
3153 	}
3154 
3155 	array = *array_ptr;
3156 	for (i = 0; i < mask->length; i++) {
3157 		if (mask->mask[i / 8] & (1U << (i % 8))) {
3158 			spdk_bit_array_set(array, i);
3159 		}
3160 	}
3161 
3162 	return 0;
3163 }
3164 
3165 static void
3166 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
3167 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
3168 {
3169 	/* Update the values in the super block */
3170 	super->super_blob = bs->super_blob;
3171 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
3172 	super->crc = _spdk_blob_md_page_calc_crc(super);
3173 	spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0),
3174 				   _spdk_bs_byte_to_lba(bs, sizeof(*super)),
3175 				   cb_fn, cb_arg);
3176 }
3177 
3178 static void
3179 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3180 {
3181 	struct spdk_bs_load_ctx	*ctx = arg;
3182 	uint64_t	mask_size, lba, lba_count;
3183 
3184 	/* Write out the used clusters mask */
3185 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3186 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3187 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3188 	if (!ctx->mask) {
3189 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3190 		return;
3191 	}
3192 
3193 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
3194 	ctx->mask->length = ctx->bs->total_clusters;
3195 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters));
3196 
3197 	_spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask);
3198 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3199 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3200 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3201 }
3202 
3203 static void
3204 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3205 {
3206 	struct spdk_bs_load_ctx	*ctx = arg;
3207 	uint64_t	mask_size, lba, lba_count;
3208 
3209 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3210 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3211 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3212 	if (!ctx->mask) {
3213 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3214 		return;
3215 	}
3216 
3217 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
3218 	ctx->mask->length = ctx->super->md_len;
3219 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
3220 
3221 	_spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask);
3222 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3223 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3224 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3225 }
3226 
3227 static void
3228 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3229 {
3230 	struct spdk_bs_load_ctx	*ctx = arg;
3231 	uint64_t	mask_size, lba, lba_count;
3232 
3233 	if (ctx->super->used_blobid_mask_len == 0) {
3234 		/*
3235 		 * This is a pre-v3 on-disk format where the blobid mask does not get
3236 		 *  written to disk.
3237 		 */
3238 		cb_fn(seq, arg, 0);
3239 		return;
3240 	}
3241 
3242 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3243 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3244 				 SPDK_MALLOC_DMA);
3245 	if (!ctx->mask) {
3246 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3247 		return;
3248 	}
3249 
3250 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
3251 	ctx->mask->length = ctx->super->md_len;
3252 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
3253 
3254 	_spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask);
3255 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
3256 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
3257 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3258 }
3259 
3260 static void
3261 _spdk_blob_set_thin_provision(struct spdk_blob *blob)
3262 {
3263 	_spdk_blob_verify_md_op(blob);
3264 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
3265 	blob->state = SPDK_BLOB_STATE_DIRTY;
3266 }
3267 
3268 static void
3269 _spdk_blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method)
3270 {
3271 	_spdk_blob_verify_md_op(blob);
3272 	blob->clear_method = clear_method;
3273 	blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT);
3274 	blob->state = SPDK_BLOB_STATE_DIRTY;
3275 }
3276 
3277 static void _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno);
3278 
3279 static void
3280 _spdk_bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno)
3281 {
3282 	struct spdk_bs_load_ctx *ctx = cb_arg;
3283 	spdk_blob_id id;
3284 	int64_t page_num;
3285 
3286 	/* Iterate to next blob (we can't use spdk_bs_iter_next function as our
3287 	 * last blob has been removed */
3288 	page_num = _spdk_bs_blobid_to_page(ctx->blobid);
3289 	page_num++;
3290 	page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num);
3291 	if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) {
3292 		_spdk_bs_load_iter(ctx, NULL, -ENOENT);
3293 		return;
3294 	}
3295 
3296 	id = _spdk_bs_page_to_blobid(page_num);
3297 
3298 	spdk_bs_open_blob(ctx->bs, id, _spdk_bs_load_iter, ctx);
3299 }
3300 
3301 static void
3302 _spdk_bs_delete_corrupted_close_cb(void *cb_arg, int bserrno)
3303 {
3304 	struct spdk_bs_load_ctx *ctx = cb_arg;
3305 
3306 	if (bserrno != 0) {
3307 		SPDK_ERRLOG("Failed to close corrupted blob\n");
3308 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3309 		return;
3310 	}
3311 
3312 	spdk_bs_delete_blob(ctx->bs, ctx->blobid, _spdk_bs_delete_corrupted_blob_cpl, ctx);
3313 }
3314 
3315 static void
3316 _spdk_bs_delete_corrupted_blob(void *cb_arg, int bserrno)
3317 {
3318 	struct spdk_bs_load_ctx *ctx = cb_arg;
3319 	uint64_t i;
3320 
3321 	if (bserrno != 0) {
3322 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
3323 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3324 		return;
3325 	}
3326 
3327 	/* Snapshot and clone have the same copy of cluster map and extent pages
3328 	 * at this point. Let's clear both for snpashot now,
3329 	 * so that it won't be cleared for clone later when we remove snapshot.
3330 	 * Also set thin provision to pass data corruption check */
3331 	for (i = 0; i < ctx->blob->active.num_clusters; i++) {
3332 		ctx->blob->active.clusters[i] = 0;
3333 	}
3334 	for (i = 0; i < ctx->blob->active.num_extent_pages; i++) {
3335 		ctx->blob->active.extent_pages[i] = 0;
3336 	}
3337 
3338 	ctx->blob->md_ro = false;
3339 
3340 	_spdk_blob_set_thin_provision(ctx->blob);
3341 
3342 	ctx->blobid = ctx->blob->id;
3343 
3344 	spdk_blob_close(ctx->blob, _spdk_bs_delete_corrupted_close_cb, ctx);
3345 }
3346 
3347 static void
3348 _spdk_bs_update_corrupted_blob(void *cb_arg, int bserrno)
3349 {
3350 	struct spdk_bs_load_ctx *ctx = cb_arg;
3351 
3352 	if (bserrno != 0) {
3353 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
3354 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3355 		return;
3356 	}
3357 
3358 	ctx->blob->md_ro = false;
3359 	_spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true);
3360 	_spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true);
3361 	spdk_blob_set_read_only(ctx->blob);
3362 
3363 	if (ctx->iter_cb_fn) {
3364 		ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0);
3365 	}
3366 	_spdk_bs_blob_list_add(ctx->blob);
3367 
3368 	spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3369 }
3370 
3371 static void
3372 _spdk_bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
3373 {
3374 	struct spdk_bs_load_ctx *ctx = cb_arg;
3375 
3376 	if (bserrno != 0) {
3377 		SPDK_ERRLOG("Failed to open clone of a corrupted blob\n");
3378 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3379 		return;
3380 	}
3381 
3382 	if (blob->parent_id == ctx->blob->id) {
3383 		/* Power failure occured before updating clone (snapshot delete case)
3384 		 * or after updating clone (creating snapshot case) - keep snapshot */
3385 		spdk_blob_close(blob, _spdk_bs_update_corrupted_blob, ctx);
3386 	} else {
3387 		/* Power failure occured after updating clone (snapshot delete case)
3388 		 * or before updating clone (creating snapshot case) - remove snapshot */
3389 		spdk_blob_close(blob, _spdk_bs_delete_corrupted_blob, ctx);
3390 	}
3391 }
3392 
3393 static void
3394 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
3395 {
3396 	struct spdk_bs_load_ctx *ctx = arg;
3397 	const void *value;
3398 	size_t len;
3399 	int rc = 0;
3400 
3401 	if (bserrno == 0) {
3402 		/* Examine blob if it is corrupted after power failure. Fix
3403 		 * the ones that can be fixed and remove any other corrupted
3404 		 * ones. If it is not corrupted just process it */
3405 		rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true);
3406 		if (rc != 0) {
3407 			rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true);
3408 			if (rc != 0) {
3409 				/* Not corrupted - process it and continue with iterating through blobs */
3410 				if (ctx->iter_cb_fn) {
3411 					ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
3412 				}
3413 				_spdk_bs_blob_list_add(blob);
3414 				spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx);
3415 				return;
3416 			}
3417 
3418 		}
3419 
3420 		assert(len == sizeof(spdk_blob_id));
3421 
3422 		ctx->blob = blob;
3423 
3424 		/* Open clone to check if we are able to fix this blob or should we remove it */
3425 		spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, _spdk_bs_examine_clone, ctx);
3426 		return;
3427 	} else if (bserrno == -ENOENT) {
3428 		bserrno = 0;
3429 	} else {
3430 		/*
3431 		 * This case needs to be looked at further.  Same problem
3432 		 *  exists with applications that rely on explicit blob
3433 		 *  iteration.  We should just skip the blob that failed
3434 		 *  to load and continue on to the next one.
3435 		 */
3436 		SPDK_ERRLOG("Error in iterating blobs\n");
3437 	}
3438 
3439 	ctx->iter_cb_fn = NULL;
3440 
3441 	spdk_free(ctx->super);
3442 	spdk_free(ctx->mask);
3443 	spdk_bs_sequence_finish(ctx->seq, bserrno);
3444 	free(ctx);
3445 }
3446 
3447 static void
3448 _spdk_bs_load_complete(struct spdk_bs_load_ctx *ctx)
3449 {
3450 	spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx);
3451 }
3452 
3453 static void
3454 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3455 {
3456 	struct spdk_bs_load_ctx *ctx = cb_arg;
3457 	int rc;
3458 
3459 	/* The type must be correct */
3460 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
3461 
3462 	/* The length of the mask (in bits) must not be greater than
3463 	 * the length of the buffer (converted to bits) */
3464 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
3465 
3466 	/* The length of the mask must be exactly equal to the size
3467 	 * (in pages) of the metadata region */
3468 	assert(ctx->mask->length == ctx->super->md_len);
3469 
3470 	rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask);
3471 	if (rc < 0) {
3472 		spdk_free(ctx->mask);
3473 		_spdk_bs_load_ctx_fail(ctx, rc);
3474 		return;
3475 	}
3476 
3477 	_spdk_bs_load_complete(ctx);
3478 }
3479 
3480 static void
3481 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3482 {
3483 	struct spdk_bs_load_ctx *ctx = cb_arg;
3484 	uint64_t		lba, lba_count, mask_size;
3485 	int			rc;
3486 
3487 	if (bserrno != 0) {
3488 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3489 		return;
3490 	}
3491 
3492 	/* The type must be correct */
3493 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3494 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
3495 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
3496 					     struct spdk_blob_md_page) * 8));
3497 	/* The length of the mask must be exactly equal to the total number of clusters */
3498 	assert(ctx->mask->length == ctx->bs->total_clusters);
3499 
3500 	rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask);
3501 	if (rc < 0) {
3502 		spdk_free(ctx->mask);
3503 		_spdk_bs_load_ctx_fail(ctx, rc);
3504 		return;
3505 	}
3506 
3507 	ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters);
3508 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
3509 
3510 	spdk_free(ctx->mask);
3511 
3512 	/* Read the used blobids mask */
3513 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3514 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3515 				 SPDK_MALLOC_DMA);
3516 	if (!ctx->mask) {
3517 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3518 		return;
3519 	}
3520 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
3521 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
3522 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
3523 				  _spdk_bs_load_used_blobids_cpl, ctx);
3524 }
3525 
3526 static void
3527 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3528 {
3529 	struct spdk_bs_load_ctx *ctx = cb_arg;
3530 	uint64_t		lba, lba_count, mask_size;
3531 	int			rc;
3532 
3533 	if (bserrno != 0) {
3534 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3535 		return;
3536 	}
3537 
3538 	/* The type must be correct */
3539 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
3540 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
3541 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
3542 				     8));
3543 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
3544 	assert(ctx->mask->length == ctx->super->md_len);
3545 
3546 	rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask);
3547 	if (rc < 0) {
3548 		spdk_free(ctx->mask);
3549 		_spdk_bs_load_ctx_fail(ctx, rc);
3550 		return;
3551 	}
3552 
3553 	spdk_free(ctx->mask);
3554 
3555 	/* Read the used clusters mask */
3556 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3557 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3558 				 SPDK_MALLOC_DMA);
3559 	if (!ctx->mask) {
3560 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3561 		return;
3562 	}
3563 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3564 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3565 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
3566 				  _spdk_bs_load_used_clusters_cpl, ctx);
3567 }
3568 
3569 static void
3570 _spdk_bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx)
3571 {
3572 	uint64_t lba, lba_count, mask_size;
3573 
3574 	/* Read the used pages mask */
3575 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3576 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3577 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3578 	if (!ctx->mask) {
3579 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3580 		return;
3581 	}
3582 
3583 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3584 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3585 	spdk_bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
3586 				  _spdk_bs_load_used_pages_cpl, ctx);
3587 }
3588 
3589 static int
3590 _spdk_bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx)
3591 {
3592 	struct spdk_blob_store *bs = ctx->bs;
3593 	struct spdk_blob_md_page *page = ctx->page;
3594 	struct spdk_blob_md_descriptor *desc;
3595 	size_t	cur_desc = 0;
3596 
3597 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
3598 	while (cur_desc < sizeof(page->descriptors)) {
3599 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
3600 			if (desc->length == 0) {
3601 				/* If padding and length are 0, this terminates the page */
3602 				break;
3603 			}
3604 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
3605 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
3606 			unsigned int				i, j;
3607 			unsigned int				cluster_count = 0;
3608 			uint32_t				cluster_idx;
3609 
3610 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
3611 
3612 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
3613 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
3614 					cluster_idx = desc_extent_rle->extents[i].cluster_idx;
3615 					/*
3616 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
3617 					 * in the used cluster map.
3618 					 */
3619 					if (cluster_idx != 0) {
3620 						spdk_bit_array_set(bs->used_clusters, cluster_idx + j);
3621 						if (bs->num_free_clusters == 0) {
3622 							return -ENOSPC;
3623 						}
3624 						bs->num_free_clusters--;
3625 					}
3626 					cluster_count++;
3627 				}
3628 			}
3629 			if (cluster_count == 0) {
3630 				return -EINVAL;
3631 			}
3632 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
3633 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
3634 			uint32_t					i;
3635 			uint32_t					cluster_count = 0;
3636 			uint32_t					cluster_idx;
3637 			size_t						cluster_idx_length;
3638 
3639 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
3640 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
3641 
3642 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
3643 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
3644 				return -EINVAL;
3645 			}
3646 
3647 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
3648 				cluster_idx = desc_extent->cluster_idx[i];
3649 				/*
3650 				 * cluster_idx = 0 means an unallocated cluster - don't mark that
3651 				 * in the used cluster map.
3652 				 */
3653 				if (cluster_idx != 0) {
3654 					if (cluster_idx < desc_extent->start_cluster_idx &&
3655 					    cluster_idx >= desc_extent->start_cluster_idx + cluster_count) {
3656 						return -EINVAL;
3657 					}
3658 					spdk_bit_array_set(bs->used_clusters, cluster_idx);
3659 					if (bs->num_free_clusters == 0) {
3660 						return -ENOSPC;
3661 					}
3662 					bs->num_free_clusters--;
3663 				}
3664 				cluster_count++;
3665 			}
3666 
3667 			if (cluster_count == 0) {
3668 				return -EINVAL;
3669 			}
3670 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
3671 			/* Skip this item */
3672 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
3673 			/* Skip this item */
3674 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
3675 			/* Skip this item */
3676 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
3677 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
3678 			uint32_t num_extent_pages = ctx->num_extent_pages;
3679 			uint32_t i;
3680 			size_t extent_pages_length;
3681 			void *tmp;
3682 
3683 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
3684 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
3685 
3686 			if (desc_extent_table->length == 0 ||
3687 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
3688 				return -EINVAL;
3689 			}
3690 
3691 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
3692 				if (desc_extent_table->extent_page[i].page_idx != 0) {
3693 					if (desc_extent_table->extent_page[i].num_pages != 1) {
3694 						return -EINVAL;
3695 					}
3696 					num_extent_pages += 1;
3697 				}
3698 			}
3699 
3700 			if (num_extent_pages > 0) {
3701 				tmp = realloc(ctx->extent_pages, num_extent_pages * sizeof(uint32_t));
3702 				if (tmp == NULL) {
3703 					return -ENOMEM;
3704 				}
3705 				ctx->extent_pages = tmp;
3706 
3707 				/* Extent table entries contain md page numbers for extent pages.
3708 				 * Zeroes represent unallocated extent pages, those are run-length-encoded.
3709 				 */
3710 				for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
3711 					if (desc_extent_table->extent_page[i].page_idx != 0) {
3712 						ctx->extent_pages[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx;
3713 						ctx->num_extent_pages += 1;
3714 					}
3715 				}
3716 			}
3717 		} else {
3718 			/* Error */
3719 			return -EINVAL;
3720 		}
3721 		/* Advance to the next descriptor */
3722 		cur_desc += sizeof(*desc) + desc->length;
3723 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
3724 			break;
3725 		}
3726 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
3727 	}
3728 	return 0;
3729 }
3730 
3731 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page)
3732 {
3733 	uint32_t crc;
3734 	struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors;
3735 	size_t desc_len;
3736 
3737 	crc = _spdk_blob_md_page_calc_crc(page);
3738 	if (crc != page->crc) {
3739 		return false;
3740 	}
3741 
3742 	/* Extent page should always be of sequence num 0. */
3743 	if (page->sequence_num != 0) {
3744 		return false;
3745 	}
3746 
3747 	/* Descriptor type must be EXTENT_PAGE. */
3748 	if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
3749 		return false;
3750 	}
3751 
3752 	/* Descriptor length cannot exceed the page. */
3753 	desc_len = sizeof(*desc) + desc->length;
3754 	if (desc_len > sizeof(page->descriptors)) {
3755 		return false;
3756 	}
3757 
3758 	/* It has to be the only descriptor in the page. */
3759 	if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) {
3760 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len);
3761 		if (desc->length != 0) {
3762 			return false;
3763 		}
3764 	}
3765 
3766 	return true;
3767 }
3768 
3769 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
3770 {
3771 	uint32_t crc;
3772 
3773 	crc = _spdk_blob_md_page_calc_crc(ctx->page);
3774 	if (crc != ctx->page->crc) {
3775 		return false;
3776 	}
3777 
3778 	/* First page of a sequence should match the blobid. */
3779 	if (ctx->page->sequence_num == 0 &&
3780 	    _spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) {
3781 		return false;
3782 	}
3783 	return true;
3784 }
3785 
3786 static void
3787 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx);
3788 
3789 static void
3790 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3791 {
3792 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3793 
3794 	if (bserrno != 0) {
3795 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3796 		return;
3797 	}
3798 
3799 	_spdk_bs_load_complete(ctx);
3800 }
3801 
3802 static void
3803 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3804 {
3805 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3806 
3807 	spdk_free(ctx->mask);
3808 	ctx->mask = NULL;
3809 
3810 	if (bserrno != 0) {
3811 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3812 		return;
3813 	}
3814 
3815 	_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_load_write_used_clusters_cpl);
3816 }
3817 
3818 static void
3819 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3820 {
3821 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3822 
3823 	spdk_free(ctx->mask);
3824 	ctx->mask = NULL;
3825 
3826 	if (bserrno != 0) {
3827 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3828 		return;
3829 	}
3830 
3831 	_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_load_write_used_blobids_cpl);
3832 }
3833 
3834 static void
3835 _spdk_bs_load_write_used_md(struct spdk_bs_load_ctx *ctx)
3836 {
3837 	_spdk_bs_write_used_md(ctx->seq, ctx, _spdk_bs_load_write_used_pages_cpl);
3838 }
3839 
3840 static void
3841 _spdk_bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx)
3842 {
3843 	uint64_t num_md_clusters;
3844 	uint64_t i;
3845 
3846 	ctx->in_page_chain = false;
3847 
3848 	do {
3849 		ctx->page_index++;
3850 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
3851 
3852 	if (ctx->page_index < ctx->super->md_len) {
3853 		ctx->cur_page = ctx->page_index;
3854 		_spdk_bs_load_replay_cur_md_page(ctx);
3855 	} else {
3856 		/* Claim all of the clusters used by the metadata */
3857 		num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
3858 		for (i = 0; i < num_md_clusters; i++) {
3859 			_spdk_bs_claim_cluster(ctx->bs, i);
3860 		}
3861 		spdk_free(ctx->page);
3862 		_spdk_bs_load_write_used_md(ctx);
3863 	}
3864 }
3865 
3866 static void _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg);
3867 
3868 static void
3869 _spdk_bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3870 {
3871 	struct spdk_bs_load_ctx *ctx = cb_arg;
3872 	uint32_t page_num;
3873 
3874 	if (bserrno != 0) {
3875 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3876 		return;
3877 	}
3878 
3879 	/* Extent pages are only read when present within in chain md.
3880 	 * Integrity of md is not right if that page was not a valid extent page. */
3881 	if (_spdk_bs_load_cur_extent_page_valid(ctx->page) != true) {
3882 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3883 		return;
3884 	}
3885 
3886 	page_num = ctx->extent_pages[ctx->num_extent_pages - 1];
3887 	spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
3888 	if (_spdk_bs_load_replay_md_parse_page(ctx)) {
3889 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3890 		return;
3891 	}
3892 
3893 	ctx->num_extent_pages--;
3894 	if (ctx->num_extent_pages > 0) {
3895 		_spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx);
3896 		return;
3897 	}
3898 
3899 	free(ctx->extent_pages);
3900 	ctx->extent_pages = NULL;
3901 
3902 	_spdk_bs_load_replay_md_chain_cpl(ctx);
3903 }
3904 
3905 static void
3906 _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg)
3907 {
3908 	struct spdk_bs_load_ctx *ctx = cb_arg;
3909 	uint64_t lba;
3910 
3911 	assert(page < ctx->super->md_len);
3912 	lba = _spdk_bs_md_page_to_lba(ctx->bs, page);
3913 	spdk_bs_sequence_read_dev(seq, ctx->page, lba,
3914 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
3915 				  _spdk_bs_load_replay_extent_page_cpl, ctx);
3916 }
3917 
3918 static void
3919 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3920 {
3921 	struct spdk_bs_load_ctx *ctx = cb_arg;
3922 	uint32_t page_num;
3923 
3924 	if (bserrno != 0) {
3925 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3926 		return;
3927 	}
3928 
3929 	page_num = ctx->cur_page;
3930 	if (_spdk_bs_load_cur_md_page_valid(ctx) == true) {
3931 		if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) {
3932 			_spdk_bs_claim_md_page(ctx->bs, page_num);
3933 			if (ctx->page->sequence_num == 0) {
3934 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
3935 			}
3936 			if (_spdk_bs_load_replay_md_parse_page(ctx)) {
3937 				_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3938 				return;
3939 			}
3940 			if (ctx->page->next != SPDK_INVALID_MD_PAGE) {
3941 				ctx->in_page_chain = true;
3942 				ctx->cur_page = ctx->page->next;
3943 				_spdk_bs_load_replay_cur_md_page(ctx);
3944 				return;
3945 			}
3946 			if (ctx->num_extent_pages != 0) {
3947 				/* Extent pages are read from last to first,
3948 				 * decreasing the num_extent_pages as they are read. */
3949 				_spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx);
3950 				return;
3951 			}
3952 		}
3953 	}
3954 	_spdk_bs_load_replay_md_chain_cpl(ctx);
3955 }
3956 
3957 static void
3958 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx)
3959 {
3960 	uint64_t lba;
3961 
3962 	assert(ctx->cur_page < ctx->super->md_len);
3963 	lba = _spdk_bs_md_page_to_lba(ctx->bs, ctx->cur_page);
3964 	spdk_bs_sequence_read_dev(ctx->seq, ctx->page, lba,
3965 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
3966 				  _spdk_bs_load_replay_md_cpl, ctx);
3967 }
3968 
3969 static void
3970 _spdk_bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
3971 {
3972 	ctx->page_index = 0;
3973 	ctx->cur_page = 0;
3974 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
3975 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3976 	if (!ctx->page) {
3977 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3978 		return;
3979 	}
3980 	_spdk_bs_load_replay_cur_md_page(ctx);
3981 }
3982 
3983 static void
3984 _spdk_bs_recover(struct spdk_bs_load_ctx *ctx)
3985 {
3986 	int		rc;
3987 
3988 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
3989 	if (rc < 0) {
3990 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3991 		return;
3992 	}
3993 
3994 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
3995 	if (rc < 0) {
3996 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3997 		return;
3998 	}
3999 
4000 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
4001 	if (rc < 0) {
4002 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4003 		return;
4004 	}
4005 
4006 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
4007 	_spdk_bs_load_replay_md(ctx);
4008 }
4009 
4010 static void
4011 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4012 {
4013 	struct spdk_bs_load_ctx *ctx = cb_arg;
4014 	uint32_t	crc;
4015 	int		rc;
4016 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
4017 
4018 	if (ctx->super->version > SPDK_BS_VERSION ||
4019 	    ctx->super->version < SPDK_BS_INITIAL_VERSION) {
4020 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4021 		return;
4022 	}
4023 
4024 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4025 		   sizeof(ctx->super->signature)) != 0) {
4026 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4027 		return;
4028 	}
4029 
4030 	crc = _spdk_blob_md_page_calc_crc(ctx->super);
4031 	if (crc != ctx->super->crc) {
4032 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4033 		return;
4034 	}
4035 
4036 	if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
4037 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n");
4038 	} else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
4039 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n");
4040 	} else {
4041 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n");
4042 		SPDK_LOGDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
4043 		SPDK_LOGDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
4044 		_spdk_bs_load_ctx_fail(ctx, -ENXIO);
4045 		return;
4046 	}
4047 
4048 	if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) {
4049 		SPDK_NOTICELOG("Size mismatch, dev size: %lu, blobstore size: %lu\n",
4050 			       ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size);
4051 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4052 		return;
4053 	}
4054 
4055 	if (ctx->super->size == 0) {
4056 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
4057 	}
4058 
4059 	if (ctx->super->io_unit_size == 0) {
4060 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
4061 	}
4062 
4063 	/* Parse the super block */
4064 	ctx->bs->clean = 1;
4065 	ctx->bs->cluster_sz = ctx->super->cluster_size;
4066 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
4067 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
4068 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
4069 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
4070 	if (rc < 0) {
4071 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4072 		return;
4073 	}
4074 	ctx->bs->md_start = ctx->super->md_start;
4075 	ctx->bs->md_len = ctx->super->md_len;
4076 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
4077 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
4078 	ctx->bs->super_blob = ctx->super->super_blob;
4079 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
4080 
4081 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
4082 		_spdk_bs_recover(ctx);
4083 	} else {
4084 		_spdk_bs_load_read_used_pages(ctx);
4085 	}
4086 }
4087 
4088 void
4089 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4090 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4091 {
4092 	struct spdk_blob_store	*bs;
4093 	struct spdk_bs_cpl	cpl;
4094 	struct spdk_bs_load_ctx *ctx;
4095 	struct spdk_bs_opts	opts = {};
4096 	int err;
4097 
4098 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev);
4099 
4100 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4101 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen);
4102 		dev->destroy(dev);
4103 		cb_fn(cb_arg, NULL, -EINVAL);
4104 		return;
4105 	}
4106 
4107 	if (o) {
4108 		opts = *o;
4109 	} else {
4110 		spdk_bs_opts_init(&opts);
4111 	}
4112 
4113 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
4114 		dev->destroy(dev);
4115 		cb_fn(cb_arg, NULL, -EINVAL);
4116 		return;
4117 	}
4118 
4119 	err = _spdk_bs_alloc(dev, &opts, &bs);
4120 	if (err) {
4121 		dev->destroy(dev);
4122 		cb_fn(cb_arg, NULL, err);
4123 		return;
4124 	}
4125 
4126 	ctx = calloc(1, sizeof(*ctx));
4127 	if (!ctx) {
4128 		_spdk_bs_free(bs);
4129 		cb_fn(cb_arg, NULL, -ENOMEM);
4130 		return;
4131 	}
4132 
4133 	ctx->bs = bs;
4134 	ctx->iter_cb_fn = opts.iter_cb_fn;
4135 	ctx->iter_cb_arg = opts.iter_cb_arg;
4136 
4137 	/* Allocate memory for the super block */
4138 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4139 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4140 	if (!ctx->super) {
4141 		free(ctx);
4142 		_spdk_bs_free(bs);
4143 		cb_fn(cb_arg, NULL, -ENOMEM);
4144 		return;
4145 	}
4146 
4147 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4148 	cpl.u.bs_handle.cb_fn = cb_fn;
4149 	cpl.u.bs_handle.cb_arg = cb_arg;
4150 	cpl.u.bs_handle.bs = bs;
4151 
4152 	ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4153 	if (!ctx->seq) {
4154 		spdk_free(ctx->super);
4155 		free(ctx);
4156 		_spdk_bs_free(bs);
4157 		cb_fn(cb_arg, NULL, -ENOMEM);
4158 		return;
4159 	}
4160 
4161 	/* Read the super block */
4162 	spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4163 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4164 				  _spdk_bs_load_super_cpl, ctx);
4165 }
4166 
4167 /* END spdk_bs_load */
4168 
4169 /* START spdk_bs_dump */
4170 
4171 struct spdk_bs_dump_ctx {
4172 	struct spdk_blob_store		*bs;
4173 	struct spdk_bs_super_block	*super;
4174 	uint32_t			cur_page;
4175 	struct spdk_blob_md_page	*page;
4176 	spdk_bs_sequence_t		*seq;
4177 	FILE				*fp;
4178 	spdk_bs_dump_print_xattr	print_xattr_fn;
4179 	char				xattr_name[4096];
4180 };
4181 
4182 static void
4183 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno)
4184 {
4185 	spdk_free(ctx->super);
4186 
4187 	/*
4188 	 * We need to defer calling spdk_bs_call_cpl() until after
4189 	 * dev destruction, so tuck these away for later use.
4190 	 */
4191 	ctx->bs->unload_err = bserrno;
4192 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4193 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4194 
4195 	spdk_bs_sequence_finish(seq, 0);
4196 	_spdk_bs_free(ctx->bs);
4197 	free(ctx);
4198 }
4199 
4200 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
4201 
4202 static void
4203 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx)
4204 {
4205 	uint32_t page_idx = ctx->cur_page;
4206 	struct spdk_blob_md_page *page = ctx->page;
4207 	struct spdk_blob_md_descriptor *desc;
4208 	size_t cur_desc = 0;
4209 	uint32_t crc;
4210 
4211 	fprintf(ctx->fp, "=========\n");
4212 	fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx);
4213 	fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id);
4214 
4215 	crc = _spdk_blob_md_page_calc_crc(page);
4216 	fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch");
4217 
4218 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4219 	while (cur_desc < sizeof(page->descriptors)) {
4220 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
4221 			if (desc->length == 0) {
4222 				/* If padding and length are 0, this terminates the page */
4223 				break;
4224 			}
4225 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
4226 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
4227 			unsigned int				i;
4228 
4229 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
4230 
4231 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
4232 				if (desc_extent_rle->extents[i].cluster_idx != 0) {
4233 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
4234 						desc_extent_rle->extents[i].cluster_idx);
4235 				} else {
4236 					fprintf(ctx->fp, "Unallocated Extent - ");
4237 				}
4238 				fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length);
4239 				fprintf(ctx->fp, "\n");
4240 			}
4241 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4242 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
4243 			unsigned int					i;
4244 
4245 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
4246 
4247 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) {
4248 				if (desc_extent->cluster_idx[i] != 0) {
4249 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
4250 						desc_extent->cluster_idx[i]);
4251 				} else {
4252 					fprintf(ctx->fp, "Unallocated Extent");
4253 				}
4254 				fprintf(ctx->fp, "\n");
4255 			}
4256 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
4257 			struct spdk_blob_md_descriptor_xattr *desc_xattr;
4258 			uint32_t i;
4259 
4260 			desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc;
4261 
4262 			if (desc_xattr->length !=
4263 			    sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) +
4264 			    desc_xattr->name_length + desc_xattr->value_length) {
4265 			}
4266 
4267 			memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length);
4268 			ctx->xattr_name[desc_xattr->name_length] = '\0';
4269 			fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name);
4270 			fprintf(ctx->fp, "       value = \"");
4271 			ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name,
4272 					    (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
4273 					    desc_xattr->value_length);
4274 			fprintf(ctx->fp, "\"\n");
4275 			for (i = 0; i < desc_xattr->value_length; i++) {
4276 				if (i % 16 == 0) {
4277 					fprintf(ctx->fp, "               ");
4278 				}
4279 				fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i));
4280 				if ((i + 1) % 16 == 0) {
4281 					fprintf(ctx->fp, "\n");
4282 				}
4283 			}
4284 			if (i % 16 != 0) {
4285 				fprintf(ctx->fp, "\n");
4286 			}
4287 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
4288 			/* TODO */
4289 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
4290 			/* TODO */
4291 		} else {
4292 			/* Error */
4293 		}
4294 		/* Advance to the next descriptor */
4295 		cur_desc += sizeof(*desc) + desc->length;
4296 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
4297 			break;
4298 		}
4299 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
4300 	}
4301 }
4302 
4303 static void
4304 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4305 {
4306 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4307 
4308 	if (bserrno != 0) {
4309 		_spdk_bs_dump_finish(seq, ctx, bserrno);
4310 		return;
4311 	}
4312 
4313 	if (ctx->page->id != 0) {
4314 		_spdk_bs_dump_print_md_page(ctx);
4315 	}
4316 
4317 	ctx->cur_page++;
4318 
4319 	if (ctx->cur_page < ctx->super->md_len) {
4320 		_spdk_bs_dump_read_md_page(seq, ctx);
4321 	} else {
4322 		spdk_free(ctx->page);
4323 		_spdk_bs_dump_finish(seq, ctx, 0);
4324 	}
4325 }
4326 
4327 static void
4328 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
4329 {
4330 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4331 	uint64_t lba;
4332 
4333 	assert(ctx->cur_page < ctx->super->md_len);
4334 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
4335 	spdk_bs_sequence_read_dev(seq, ctx->page, lba,
4336 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
4337 				  _spdk_bs_dump_read_md_page_cpl, ctx);
4338 }
4339 
4340 static void
4341 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4342 {
4343 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4344 
4345 	fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature);
4346 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4347 		   sizeof(ctx->super->signature)) != 0) {
4348 		fprintf(ctx->fp, "(Mismatch)\n");
4349 		_spdk_bs_dump_finish(seq, ctx, bserrno);
4350 		return;
4351 	} else {
4352 		fprintf(ctx->fp, "(OK)\n");
4353 	}
4354 	fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version);
4355 	fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc,
4356 		(ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch");
4357 	fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype);
4358 	fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size);
4359 	fprintf(ctx->fp, "Super Blob ID: ");
4360 	if (ctx->super->super_blob == SPDK_BLOBID_INVALID) {
4361 		fprintf(ctx->fp, "(None)\n");
4362 	} else {
4363 		fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob);
4364 	}
4365 	fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean);
4366 	fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start);
4367 	fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len);
4368 	fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start);
4369 	fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len);
4370 	fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start);
4371 	fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len);
4372 	fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start);
4373 	fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);
4374 
4375 	ctx->cur_page = 0;
4376 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
4377 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4378 	if (!ctx->page) {
4379 		_spdk_bs_dump_finish(seq, ctx, -ENOMEM);
4380 		return;
4381 	}
4382 	_spdk_bs_dump_read_md_page(seq, ctx);
4383 }
4384 
4385 void
4386 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn,
4387 	     spdk_bs_op_complete cb_fn, void *cb_arg)
4388 {
4389 	struct spdk_blob_store	*bs;
4390 	struct spdk_bs_cpl	cpl;
4391 	spdk_bs_sequence_t	*seq;
4392 	struct spdk_bs_dump_ctx *ctx;
4393 	struct spdk_bs_opts	opts = {};
4394 	int err;
4395 
4396 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev);
4397 
4398 	spdk_bs_opts_init(&opts);
4399 
4400 	err = _spdk_bs_alloc(dev, &opts, &bs);
4401 	if (err) {
4402 		dev->destroy(dev);
4403 		cb_fn(cb_arg, err);
4404 		return;
4405 	}
4406 
4407 	ctx = calloc(1, sizeof(*ctx));
4408 	if (!ctx) {
4409 		_spdk_bs_free(bs);
4410 		cb_fn(cb_arg, -ENOMEM);
4411 		return;
4412 	}
4413 
4414 	ctx->bs = bs;
4415 	ctx->fp = fp;
4416 	ctx->print_xattr_fn = print_xattr_fn;
4417 
4418 	/* Allocate memory for the super block */
4419 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4420 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4421 	if (!ctx->super) {
4422 		free(ctx);
4423 		_spdk_bs_free(bs);
4424 		cb_fn(cb_arg, -ENOMEM);
4425 		return;
4426 	}
4427 
4428 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4429 	cpl.u.bs_basic.cb_fn = cb_fn;
4430 	cpl.u.bs_basic.cb_arg = cb_arg;
4431 
4432 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4433 	if (!seq) {
4434 		spdk_free(ctx->super);
4435 		free(ctx);
4436 		_spdk_bs_free(bs);
4437 		cb_fn(cb_arg, -ENOMEM);
4438 		return;
4439 	}
4440 
4441 	/* Read the super block */
4442 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4443 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4444 				  _spdk_bs_dump_super_cpl, ctx);
4445 }
4446 
4447 /* END spdk_bs_dump */
4448 
4449 /* START spdk_bs_init */
4450 
4451 struct spdk_bs_init_ctx {
4452 	struct spdk_blob_store		*bs;
4453 	struct spdk_bs_super_block	*super;
4454 };
4455 
4456 static void
4457 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4458 {
4459 	struct spdk_bs_init_ctx *ctx = cb_arg;
4460 
4461 	spdk_free(ctx->super);
4462 	free(ctx);
4463 
4464 	spdk_bs_sequence_finish(seq, bserrno);
4465 }
4466 
4467 static void
4468 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4469 {
4470 	struct spdk_bs_init_ctx *ctx = cb_arg;
4471 
4472 	/* Write super block */
4473 	spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0),
4474 				   _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
4475 				   _spdk_bs_init_persist_super_cpl, ctx);
4476 }
4477 
4478 void
4479 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4480 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4481 {
4482 	struct spdk_bs_init_ctx *ctx;
4483 	struct spdk_blob_store	*bs;
4484 	struct spdk_bs_cpl	cpl;
4485 	spdk_bs_sequence_t	*seq;
4486 	spdk_bs_batch_t		*batch;
4487 	uint64_t		num_md_lba;
4488 	uint64_t		num_md_pages;
4489 	uint64_t		num_md_clusters;
4490 	uint32_t		i;
4491 	struct spdk_bs_opts	opts = {};
4492 	int			rc;
4493 
4494 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev);
4495 
4496 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4497 		SPDK_ERRLOG("unsupported dev block length of %d\n",
4498 			    dev->blocklen);
4499 		dev->destroy(dev);
4500 		cb_fn(cb_arg, NULL, -EINVAL);
4501 		return;
4502 	}
4503 
4504 	if (o) {
4505 		opts = *o;
4506 	} else {
4507 		spdk_bs_opts_init(&opts);
4508 	}
4509 
4510 	if (_spdk_bs_opts_verify(&opts) != 0) {
4511 		dev->destroy(dev);
4512 		cb_fn(cb_arg, NULL, -EINVAL);
4513 		return;
4514 	}
4515 
4516 	rc = _spdk_bs_alloc(dev, &opts, &bs);
4517 	if (rc) {
4518 		dev->destroy(dev);
4519 		cb_fn(cb_arg, NULL, rc);
4520 		return;
4521 	}
4522 
4523 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
4524 		/* By default, allocate 1 page per cluster.
4525 		 * Technically, this over-allocates metadata
4526 		 * because more metadata will reduce the number
4527 		 * of usable clusters. This can be addressed with
4528 		 * more complex math in the future.
4529 		 */
4530 		bs->md_len = bs->total_clusters;
4531 	} else {
4532 		bs->md_len = opts.num_md_pages;
4533 	}
4534 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
4535 	if (rc < 0) {
4536 		_spdk_bs_free(bs);
4537 		cb_fn(cb_arg, NULL, -ENOMEM);
4538 		return;
4539 	}
4540 
4541 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
4542 	if (rc < 0) {
4543 		_spdk_bs_free(bs);
4544 		cb_fn(cb_arg, NULL, -ENOMEM);
4545 		return;
4546 	}
4547 
4548 	ctx = calloc(1, sizeof(*ctx));
4549 	if (!ctx) {
4550 		_spdk_bs_free(bs);
4551 		cb_fn(cb_arg, NULL, -ENOMEM);
4552 		return;
4553 	}
4554 
4555 	ctx->bs = bs;
4556 
4557 	/* Allocate memory for the super block */
4558 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4559 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4560 	if (!ctx->super) {
4561 		free(ctx);
4562 		_spdk_bs_free(bs);
4563 		cb_fn(cb_arg, NULL, -ENOMEM);
4564 		return;
4565 	}
4566 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4567 	       sizeof(ctx->super->signature));
4568 	ctx->super->version = SPDK_BS_VERSION;
4569 	ctx->super->length = sizeof(*ctx->super);
4570 	ctx->super->super_blob = bs->super_blob;
4571 	ctx->super->clean = 0;
4572 	ctx->super->cluster_size = bs->cluster_sz;
4573 	ctx->super->io_unit_size = bs->io_unit_size;
4574 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
4575 
4576 	/* Calculate how many pages the metadata consumes at the front
4577 	 * of the disk.
4578 	 */
4579 
4580 	/* The super block uses 1 page */
4581 	num_md_pages = 1;
4582 
4583 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
4584 	 * up to the nearest page, plus a header.
4585 	 */
4586 	ctx->super->used_page_mask_start = num_md_pages;
4587 	ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4588 					 spdk_divide_round_up(bs->md_len, 8),
4589 					 SPDK_BS_PAGE_SIZE);
4590 	num_md_pages += ctx->super->used_page_mask_len;
4591 
4592 	/* The used_clusters mask requires 1 bit per cluster, rounded
4593 	 * up to the nearest page, plus a header.
4594 	 */
4595 	ctx->super->used_cluster_mask_start = num_md_pages;
4596 	ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4597 					    spdk_divide_round_up(bs->total_clusters, 8),
4598 					    SPDK_BS_PAGE_SIZE);
4599 	num_md_pages += ctx->super->used_cluster_mask_len;
4600 
4601 	/* The used_blobids mask requires 1 bit per metadata page, rounded
4602 	 * up to the nearest page, plus a header.
4603 	 */
4604 	ctx->super->used_blobid_mask_start = num_md_pages;
4605 	ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4606 					   spdk_divide_round_up(bs->md_len, 8),
4607 					   SPDK_BS_PAGE_SIZE);
4608 	num_md_pages += ctx->super->used_blobid_mask_len;
4609 
4610 	/* The metadata region size was chosen above */
4611 	ctx->super->md_start = bs->md_start = num_md_pages;
4612 	ctx->super->md_len = bs->md_len;
4613 	num_md_pages += bs->md_len;
4614 
4615 	num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages);
4616 
4617 	ctx->super->size = dev->blockcnt * dev->blocklen;
4618 
4619 	ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super);
4620 
4621 	num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster);
4622 	if (num_md_clusters > bs->total_clusters) {
4623 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
4624 			    "please decrease number of pages reserved for metadata "
4625 			    "or increase cluster size.\n");
4626 		spdk_free(ctx->super);
4627 		free(ctx);
4628 		_spdk_bs_free(bs);
4629 		cb_fn(cb_arg, NULL, -ENOMEM);
4630 		return;
4631 	}
4632 	/* Claim all of the clusters used by the metadata */
4633 	for (i = 0; i < num_md_clusters; i++) {
4634 		_spdk_bs_claim_cluster(bs, i);
4635 	}
4636 
4637 	bs->total_data_clusters = bs->num_free_clusters;
4638 
4639 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4640 	cpl.u.bs_handle.cb_fn = cb_fn;
4641 	cpl.u.bs_handle.cb_arg = cb_arg;
4642 	cpl.u.bs_handle.bs = bs;
4643 
4644 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4645 	if (!seq) {
4646 		spdk_free(ctx->super);
4647 		free(ctx);
4648 		_spdk_bs_free(bs);
4649 		cb_fn(cb_arg, NULL, -ENOMEM);
4650 		return;
4651 	}
4652 
4653 	batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx);
4654 
4655 	/* Clear metadata space */
4656 	spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
4657 
4658 	switch (opts.clear_method) {
4659 	case BS_CLEAR_WITH_UNMAP:
4660 		/* Trim data clusters */
4661 		spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
4662 		break;
4663 	case BS_CLEAR_WITH_WRITE_ZEROES:
4664 		/* Write_zeroes to data clusters */
4665 		spdk_bs_batch_write_zeroes_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
4666 		break;
4667 	case BS_CLEAR_WITH_NONE:
4668 	default:
4669 		break;
4670 	}
4671 
4672 	spdk_bs_batch_close(batch);
4673 }
4674 
4675 /* END spdk_bs_init */
4676 
4677 /* START spdk_bs_destroy */
4678 
4679 static void
4680 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4681 {
4682 	struct spdk_bs_init_ctx *ctx = cb_arg;
4683 	struct spdk_blob_store *bs = ctx->bs;
4684 
4685 	/*
4686 	 * We need to defer calling spdk_bs_call_cpl() until after
4687 	 * dev destruction, so tuck these away for later use.
4688 	 */
4689 	bs->unload_err = bserrno;
4690 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4691 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4692 
4693 	spdk_bs_sequence_finish(seq, bserrno);
4694 
4695 	_spdk_bs_free(bs);
4696 	free(ctx);
4697 }
4698 
4699 void
4700 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
4701 		void *cb_arg)
4702 {
4703 	struct spdk_bs_cpl	cpl;
4704 	spdk_bs_sequence_t	*seq;
4705 	struct spdk_bs_init_ctx *ctx;
4706 
4707 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n");
4708 
4709 	if (!TAILQ_EMPTY(&bs->blobs)) {
4710 		SPDK_ERRLOG("Blobstore still has open blobs\n");
4711 		cb_fn(cb_arg, -EBUSY);
4712 		return;
4713 	}
4714 
4715 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4716 	cpl.u.bs_basic.cb_fn = cb_fn;
4717 	cpl.u.bs_basic.cb_arg = cb_arg;
4718 
4719 	ctx = calloc(1, sizeof(*ctx));
4720 	if (!ctx) {
4721 		cb_fn(cb_arg, -ENOMEM);
4722 		return;
4723 	}
4724 
4725 	ctx->bs = bs;
4726 
4727 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4728 	if (!seq) {
4729 		free(ctx);
4730 		cb_fn(cb_arg, -ENOMEM);
4731 		return;
4732 	}
4733 
4734 	/* Write zeroes to the super block */
4735 	spdk_bs_sequence_write_zeroes_dev(seq,
4736 					  _spdk_bs_page_to_lba(bs, 0),
4737 					  _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
4738 					  _spdk_bs_destroy_trim_cpl, ctx);
4739 }
4740 
4741 /* END spdk_bs_destroy */
4742 
4743 /* START spdk_bs_unload */
4744 
4745 static void
4746 _spdk_bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno)
4747 {
4748 	spdk_bs_sequence_t *seq = ctx->seq;
4749 
4750 	spdk_free(ctx->super);
4751 
4752 	/*
4753 	 * We need to defer calling spdk_bs_call_cpl() until after
4754 	 * dev destruction, so tuck these away for later use.
4755 	 */
4756 	ctx->bs->unload_err = bserrno;
4757 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4758 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4759 
4760 	spdk_bs_sequence_finish(seq, bserrno);
4761 
4762 	_spdk_bs_free(ctx->bs);
4763 	free(ctx);
4764 }
4765 
4766 static void
4767 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4768 {
4769 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4770 
4771 	_spdk_bs_unload_finish(ctx, bserrno);
4772 }
4773 
4774 static void
4775 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4776 {
4777 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4778 
4779 	spdk_free(ctx->mask);
4780 
4781 	if (bserrno != 0) {
4782 		_spdk_bs_unload_finish(ctx, bserrno);
4783 		return;
4784 	}
4785 
4786 	ctx->super->clean = 1;
4787 
4788 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
4789 }
4790 
4791 static void
4792 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4793 {
4794 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4795 
4796 	spdk_free(ctx->mask);
4797 	ctx->mask = NULL;
4798 
4799 	if (bserrno != 0) {
4800 		_spdk_bs_unload_finish(ctx, bserrno);
4801 		return;
4802 	}
4803 
4804 	_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_unload_write_used_clusters_cpl);
4805 }
4806 
4807 static void
4808 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4809 {
4810 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4811 
4812 	spdk_free(ctx->mask);
4813 	ctx->mask = NULL;
4814 
4815 	if (bserrno != 0) {
4816 		_spdk_bs_unload_finish(ctx, bserrno);
4817 		return;
4818 	}
4819 
4820 	_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_unload_write_used_blobids_cpl);
4821 }
4822 
4823 static void
4824 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4825 {
4826 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4827 
4828 	if (bserrno != 0) {
4829 		_spdk_bs_unload_finish(ctx, bserrno);
4830 		return;
4831 	}
4832 
4833 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl);
4834 }
4835 
4836 void
4837 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
4838 {
4839 	struct spdk_bs_cpl	cpl;
4840 	struct spdk_bs_load_ctx *ctx;
4841 
4842 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n");
4843 
4844 	if (!TAILQ_EMPTY(&bs->blobs)) {
4845 		SPDK_ERRLOG("Blobstore still has open blobs\n");
4846 		cb_fn(cb_arg, -EBUSY);
4847 		return;
4848 	}
4849 
4850 	ctx = calloc(1, sizeof(*ctx));
4851 	if (!ctx) {
4852 		cb_fn(cb_arg, -ENOMEM);
4853 		return;
4854 	}
4855 
4856 	ctx->bs = bs;
4857 
4858 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4859 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4860 	if (!ctx->super) {
4861 		free(ctx);
4862 		cb_fn(cb_arg, -ENOMEM);
4863 		return;
4864 	}
4865 
4866 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4867 	cpl.u.bs_basic.cb_fn = cb_fn;
4868 	cpl.u.bs_basic.cb_arg = cb_arg;
4869 
4870 	ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4871 	if (!ctx->seq) {
4872 		spdk_free(ctx->super);
4873 		free(ctx);
4874 		cb_fn(cb_arg, -ENOMEM);
4875 		return;
4876 	}
4877 
4878 	/* Read super block */
4879 	spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4880 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4881 				  _spdk_bs_unload_read_super_cpl, ctx);
4882 }
4883 
4884 /* END spdk_bs_unload */
4885 
4886 /* START spdk_bs_set_super */
4887 
4888 struct spdk_bs_set_super_ctx {
4889 	struct spdk_blob_store		*bs;
4890 	struct spdk_bs_super_block	*super;
4891 };
4892 
4893 static void
4894 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4895 {
4896 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
4897 
4898 	if (bserrno != 0) {
4899 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
4900 	}
4901 
4902 	spdk_free(ctx->super);
4903 
4904 	spdk_bs_sequence_finish(seq, bserrno);
4905 
4906 	free(ctx);
4907 }
4908 
4909 static void
4910 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4911 {
4912 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
4913 
4914 	if (bserrno != 0) {
4915 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
4916 		spdk_free(ctx->super);
4917 		spdk_bs_sequence_finish(seq, bserrno);
4918 		free(ctx);
4919 		return;
4920 	}
4921 
4922 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx);
4923 }
4924 
4925 void
4926 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
4927 		  spdk_bs_op_complete cb_fn, void *cb_arg)
4928 {
4929 	struct spdk_bs_cpl		cpl;
4930 	spdk_bs_sequence_t		*seq;
4931 	struct spdk_bs_set_super_ctx	*ctx;
4932 
4933 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n");
4934 
4935 	ctx = calloc(1, sizeof(*ctx));
4936 	if (!ctx) {
4937 		cb_fn(cb_arg, -ENOMEM);
4938 		return;
4939 	}
4940 
4941 	ctx->bs = bs;
4942 
4943 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4944 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4945 	if (!ctx->super) {
4946 		free(ctx);
4947 		cb_fn(cb_arg, -ENOMEM);
4948 		return;
4949 	}
4950 
4951 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4952 	cpl.u.bs_basic.cb_fn = cb_fn;
4953 	cpl.u.bs_basic.cb_arg = cb_arg;
4954 
4955 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4956 	if (!seq) {
4957 		spdk_free(ctx->super);
4958 		free(ctx);
4959 		cb_fn(cb_arg, -ENOMEM);
4960 		return;
4961 	}
4962 
4963 	bs->super_blob = blobid;
4964 
4965 	/* Read super block */
4966 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4967 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4968 				  _spdk_bs_set_super_read_cpl, ctx);
4969 }
4970 
4971 /* END spdk_bs_set_super */
4972 
4973 void
4974 spdk_bs_get_super(struct spdk_blob_store *bs,
4975 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
4976 {
4977 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
4978 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
4979 	} else {
4980 		cb_fn(cb_arg, bs->super_blob, 0);
4981 	}
4982 }
4983 
4984 uint64_t
4985 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
4986 {
4987 	return bs->cluster_sz;
4988 }
4989 
4990 uint64_t
4991 spdk_bs_get_page_size(struct spdk_blob_store *bs)
4992 {
4993 	return SPDK_BS_PAGE_SIZE;
4994 }
4995 
4996 uint64_t
4997 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
4998 {
4999 	return bs->io_unit_size;
5000 }
5001 
5002 uint64_t
5003 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
5004 {
5005 	return bs->num_free_clusters;
5006 }
5007 
5008 uint64_t
5009 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
5010 {
5011 	return bs->total_data_clusters;
5012 }
5013 
5014 static int
5015 spdk_bs_register_md_thread(struct spdk_blob_store *bs)
5016 {
5017 	bs->md_channel = spdk_get_io_channel(bs);
5018 	if (!bs->md_channel) {
5019 		SPDK_ERRLOG("Failed to get IO channel.\n");
5020 		return -1;
5021 	}
5022 
5023 	return 0;
5024 }
5025 
5026 static int
5027 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
5028 {
5029 	spdk_put_io_channel(bs->md_channel);
5030 
5031 	return 0;
5032 }
5033 
5034 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob)
5035 {
5036 	assert(blob != NULL);
5037 
5038 	return blob->id;
5039 }
5040 
5041 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob)
5042 {
5043 	assert(blob != NULL);
5044 
5045 	return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters);
5046 }
5047 
5048 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob)
5049 {
5050 	assert(blob != NULL);
5051 
5052 	return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs);
5053 }
5054 
5055 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
5056 {
5057 	assert(blob != NULL);
5058 
5059 	return blob->active.num_clusters;
5060 }
5061 
5062 /* START spdk_bs_create_blob */
5063 
5064 static void
5065 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5066 {
5067 	struct spdk_blob *blob = cb_arg;
5068 
5069 	_spdk_blob_free(blob);
5070 
5071 	spdk_bs_sequence_finish(seq, bserrno);
5072 }
5073 
5074 static int
5075 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
5076 		      bool internal)
5077 {
5078 	uint64_t i;
5079 	size_t value_len = 0;
5080 	int rc;
5081 	const void *value = NULL;
5082 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
5083 		return -EINVAL;
5084 	}
5085 	for (i = 0; i < xattrs->count; i++) {
5086 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
5087 		if (value == NULL || value_len == 0) {
5088 			return -EINVAL;
5089 		}
5090 		rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
5091 		if (rc < 0) {
5092 			return rc;
5093 		}
5094 	}
5095 	return 0;
5096 }
5097 
5098 static void
5099 _spdk_bs_create_blob(struct spdk_blob_store *bs,
5100 		     const struct spdk_blob_opts *opts,
5101 		     const struct spdk_blob_xattr_opts *internal_xattrs,
5102 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5103 {
5104 	struct spdk_blob	*blob;
5105 	uint32_t		page_idx;
5106 	struct spdk_bs_cpl	cpl;
5107 	struct spdk_blob_opts	opts_default;
5108 	struct spdk_blob_xattr_opts internal_xattrs_default;
5109 	spdk_bs_sequence_t	*seq;
5110 	spdk_blob_id		id;
5111 	int rc;
5112 
5113 	assert(spdk_get_thread() == bs->md_thread);
5114 
5115 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
5116 	if (page_idx == UINT32_MAX) {
5117 		cb_fn(cb_arg, 0, -ENOMEM);
5118 		return;
5119 	}
5120 	spdk_bit_array_set(bs->used_blobids, page_idx);
5121 	_spdk_bs_claim_md_page(bs, page_idx);
5122 
5123 	id = _spdk_bs_page_to_blobid(page_idx);
5124 
5125 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx);
5126 
5127 	blob = _spdk_blob_alloc(bs, id);
5128 	if (!blob) {
5129 		cb_fn(cb_arg, 0, -ENOMEM);
5130 		return;
5131 	}
5132 
5133 	if (!opts) {
5134 		spdk_blob_opts_init(&opts_default);
5135 		opts = &opts_default;
5136 	}
5137 
5138 	blob->use_extent_table = opts->use_extent_table;
5139 
5140 	if (!internal_xattrs) {
5141 		_spdk_blob_xattrs_init(&internal_xattrs_default);
5142 		internal_xattrs = &internal_xattrs_default;
5143 	}
5144 
5145 	rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false);
5146 	if (rc < 0) {
5147 		_spdk_blob_free(blob);
5148 		cb_fn(cb_arg, 0, rc);
5149 		return;
5150 	}
5151 
5152 	rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true);
5153 	if (rc < 0) {
5154 		_spdk_blob_free(blob);
5155 		cb_fn(cb_arg, 0, rc);
5156 		return;
5157 	}
5158 
5159 	if (opts->thin_provision) {
5160 		_spdk_blob_set_thin_provision(blob);
5161 	}
5162 
5163 	_spdk_blob_set_clear_method(blob, opts->clear_method);
5164 
5165 	rc = _spdk_blob_resize(blob, opts->num_clusters);
5166 	if (rc < 0) {
5167 		_spdk_blob_free(blob);
5168 		cb_fn(cb_arg, 0, rc);
5169 		return;
5170 	}
5171 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5172 	cpl.u.blobid.cb_fn = cb_fn;
5173 	cpl.u.blobid.cb_arg = cb_arg;
5174 	cpl.u.blobid.blobid = blob->id;
5175 
5176 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
5177 	if (!seq) {
5178 		_spdk_blob_free(blob);
5179 		cb_fn(cb_arg, 0, -ENOMEM);
5180 		return;
5181 	}
5182 
5183 	_spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob);
5184 }
5185 
5186 void spdk_bs_create_blob(struct spdk_blob_store *bs,
5187 			 spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5188 {
5189 	_spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
5190 }
5191 
5192 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
5193 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5194 {
5195 	_spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
5196 }
5197 
5198 /* END spdk_bs_create_blob */
5199 
5200 /* START blob_cleanup */
5201 
5202 struct spdk_clone_snapshot_ctx {
5203 	struct spdk_bs_cpl      cpl;
5204 	int bserrno;
5205 	bool frozen;
5206 
5207 	struct spdk_io_channel *channel;
5208 
5209 	/* Current cluster for inflate operation */
5210 	uint64_t cluster;
5211 
5212 	/* For inflation force allocation of all unallocated clusters and remove
5213 	 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */
5214 	bool allocate_all;
5215 
5216 	struct {
5217 		spdk_blob_id id;
5218 		struct spdk_blob *blob;
5219 	} original;
5220 	struct {
5221 		spdk_blob_id id;
5222 		struct spdk_blob *blob;
5223 	} new;
5224 
5225 	/* xattrs specified for snapshot/clones only. They have no impact on
5226 	 * the original blobs xattrs. */
5227 	const struct spdk_blob_xattr_opts *xattrs;
5228 };
5229 
5230 static void
5231 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
5232 {
5233 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
5234 	struct spdk_bs_cpl *cpl = &ctx->cpl;
5235 
5236 	if (bserrno != 0) {
5237 		if (ctx->bserrno != 0) {
5238 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5239 		} else {
5240 			ctx->bserrno = bserrno;
5241 		}
5242 	}
5243 
5244 	switch (cpl->type) {
5245 	case SPDK_BS_CPL_TYPE_BLOBID:
5246 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
5247 		break;
5248 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
5249 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
5250 		break;
5251 	default:
5252 		SPDK_UNREACHABLE();
5253 		break;
5254 	}
5255 
5256 	free(ctx);
5257 }
5258 
5259 static void
5260 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
5261 {
5262 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5263 	struct spdk_blob *origblob = ctx->original.blob;
5264 
5265 	if (bserrno != 0) {
5266 		if (ctx->bserrno != 0) {
5267 			SPDK_ERRLOG("Unfreeze error %d\n", bserrno);
5268 		} else {
5269 			ctx->bserrno = bserrno;
5270 		}
5271 	}
5272 
5273 	ctx->original.id = origblob->id;
5274 	origblob->locked_operation_in_progress = false;
5275 
5276 	spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5277 }
5278 
5279 static void
5280 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
5281 {
5282 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5283 	struct spdk_blob *origblob = ctx->original.blob;
5284 
5285 	if (bserrno != 0) {
5286 		if (ctx->bserrno != 0) {
5287 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5288 		} else {
5289 			ctx->bserrno = bserrno;
5290 		}
5291 	}
5292 
5293 	if (ctx->frozen) {
5294 		/* Unfreeze any outstanding I/O */
5295 		_spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx);
5296 	} else {
5297 		_spdk_bs_snapshot_unfreeze_cpl(ctx, 0);
5298 	}
5299 
5300 }
5301 
5302 static void
5303 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno)
5304 {
5305 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5306 	struct spdk_blob *newblob = ctx->new.blob;
5307 
5308 	if (bserrno != 0) {
5309 		if (ctx->bserrno != 0) {
5310 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5311 		} else {
5312 			ctx->bserrno = bserrno;
5313 		}
5314 	}
5315 
5316 	ctx->new.id = newblob->id;
5317 	spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5318 }
5319 
5320 /* END blob_cleanup */
5321 
5322 /* START spdk_bs_create_snapshot */
5323 
5324 static void
5325 _spdk_bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2)
5326 {
5327 	uint64_t *cluster_temp;
5328 	uint32_t *extent_page_temp;
5329 
5330 	cluster_temp = blob1->active.clusters;
5331 	blob1->active.clusters = blob2->active.clusters;
5332 	blob2->active.clusters = cluster_temp;
5333 
5334 	extent_page_temp = blob1->active.extent_pages;
5335 	blob1->active.extent_pages = blob2->active.extent_pages;
5336 	blob2->active.extent_pages = extent_page_temp;
5337 }
5338 
5339 static void
5340 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
5341 {
5342 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5343 	struct spdk_blob *origblob = ctx->original.blob;
5344 	struct spdk_blob *newblob = ctx->new.blob;
5345 
5346 	if (bserrno != 0) {
5347 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5348 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5349 		return;
5350 	}
5351 
5352 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
5353 	bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
5354 	if (bserrno != 0) {
5355 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5356 		return;
5357 	}
5358 
5359 	_spdk_bs_blob_list_add(ctx->original.blob);
5360 
5361 	spdk_blob_set_read_only(newblob);
5362 
5363 	/* sync snapshot metadata */
5364 	spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5365 }
5366 
5367 static void
5368 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
5369 {
5370 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5371 	struct spdk_blob *origblob = ctx->original.blob;
5372 	struct spdk_blob *newblob = ctx->new.blob;
5373 
5374 	if (bserrno != 0) {
5375 		/* return cluster map back to original */
5376 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5377 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5378 		return;
5379 	}
5380 
5381 	/* Set internal xattr for snapshot id */
5382 	bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
5383 	if (bserrno != 0) {
5384 		/* return cluster map back to original */
5385 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5386 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5387 		return;
5388 	}
5389 
5390 	_spdk_bs_blob_list_remove(origblob);
5391 	origblob->parent_id = newblob->id;
5392 
5393 	/* Create new back_bs_dev for snapshot */
5394 	origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob);
5395 	if (origblob->back_bs_dev == NULL) {
5396 		/* return cluster map back to original */
5397 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5398 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
5399 		return;
5400 	}
5401 
5402 	/* set clone blob as thin provisioned */
5403 	_spdk_blob_set_thin_provision(origblob);
5404 
5405 	_spdk_bs_blob_list_add(newblob);
5406 
5407 	/* sync clone metadata */
5408 	spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx);
5409 }
5410 
5411 static void
5412 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc)
5413 {
5414 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5415 	struct spdk_blob *origblob = ctx->original.blob;
5416 	struct spdk_blob *newblob = ctx->new.blob;
5417 	int bserrno;
5418 
5419 	if (rc != 0) {
5420 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc);
5421 		return;
5422 	}
5423 
5424 	ctx->frozen = true;
5425 
5426 	/* set new back_bs_dev for snapshot */
5427 	newblob->back_bs_dev = origblob->back_bs_dev;
5428 	/* Set invalid flags from origblob */
5429 	newblob->invalid_flags = origblob->invalid_flags;
5430 
5431 	/* inherit parent from original blob if set */
5432 	newblob->parent_id = origblob->parent_id;
5433 	if (origblob->parent_id != SPDK_BLOBID_INVALID) {
5434 		/* Set internal xattr for snapshot id */
5435 		bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT,
5436 					       &origblob->parent_id, sizeof(spdk_blob_id), true);
5437 		if (bserrno != 0) {
5438 			_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5439 			return;
5440 		}
5441 	}
5442 
5443 	/* swap cluster maps */
5444 	_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5445 
5446 	/* Set the clear method on the new blob to match the original. */
5447 	_spdk_blob_set_clear_method(newblob, origblob->clear_method);
5448 
5449 	/* sync snapshot metadata */
5450 	spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx);
5451 }
5452 
5453 static void
5454 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5455 {
5456 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5457 	struct spdk_blob *origblob = ctx->original.blob;
5458 	struct spdk_blob *newblob = _blob;
5459 
5460 	if (bserrno != 0) {
5461 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5462 		return;
5463 	}
5464 
5465 	ctx->new.blob = newblob;
5466 	assert(spdk_blob_is_thin_provisioned(newblob));
5467 	assert(spdk_mem_all_zero(newblob->active.clusters,
5468 				 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
5469 	assert(spdk_mem_all_zero(newblob->active.extent_pages,
5470 				 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
5471 
5472 	_spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx);
5473 }
5474 
5475 static void
5476 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
5477 {
5478 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5479 	struct spdk_blob *origblob = ctx->original.blob;
5480 
5481 	if (bserrno != 0) {
5482 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5483 		return;
5484 	}
5485 
5486 	ctx->new.id = blobid;
5487 	ctx->cpl.u.blobid.blobid = blobid;
5488 
5489 	spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx);
5490 }
5491 
5492 
5493 static void
5494 _spdk_bs_xattr_snapshot(void *arg, const char *name,
5495 			const void **value, size_t *value_len)
5496 {
5497 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
5498 
5499 	struct spdk_blob *blob = (struct spdk_blob *)arg;
5500 	*value = &blob->id;
5501 	*value_len = sizeof(blob->id);
5502 }
5503 
5504 static void
5505 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5506 {
5507 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5508 	struct spdk_blob_opts opts;
5509 	struct spdk_blob_xattr_opts internal_xattrs;
5510 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
5511 
5512 	if (bserrno != 0) {
5513 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5514 		return;
5515 	}
5516 
5517 	ctx->original.blob = _blob;
5518 
5519 	if (_blob->data_ro || _blob->md_ro) {
5520 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n",
5521 			      _blob->id);
5522 		ctx->bserrno = -EINVAL;
5523 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5524 		return;
5525 	}
5526 
5527 	if (_blob->locked_operation_in_progress) {
5528 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot - another operation in progress\n");
5529 		ctx->bserrno = -EBUSY;
5530 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5531 		return;
5532 	}
5533 
5534 	_blob->locked_operation_in_progress = true;
5535 
5536 	spdk_blob_opts_init(&opts);
5537 	_spdk_blob_xattrs_init(&internal_xattrs);
5538 
5539 	/* Change the size of new blob to the same as in original blob,
5540 	 * but do not allocate clusters */
5541 	opts.thin_provision = true;
5542 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
5543 	opts.use_extent_table = _blob->use_extent_table;
5544 
5545 	/* If there are any xattrs specified for snapshot, set them now */
5546 	if (ctx->xattrs) {
5547 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
5548 	}
5549 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
5550 	internal_xattrs.count = 1;
5551 	internal_xattrs.ctx = _blob;
5552 	internal_xattrs.names = xattrs_names;
5553 	internal_xattrs.get_value = _spdk_bs_xattr_snapshot;
5554 
5555 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
5556 			     _spdk_bs_snapshot_newblob_create_cpl, ctx);
5557 }
5558 
5559 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
5560 			     const struct spdk_blob_xattr_opts *snapshot_xattrs,
5561 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5562 {
5563 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
5564 
5565 	if (!ctx) {
5566 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
5567 		return;
5568 	}
5569 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5570 	ctx->cpl.u.blobid.cb_fn = cb_fn;
5571 	ctx->cpl.u.blobid.cb_arg = cb_arg;
5572 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
5573 	ctx->bserrno = 0;
5574 	ctx->frozen = false;
5575 	ctx->original.id = blobid;
5576 	ctx->xattrs = snapshot_xattrs;
5577 
5578 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx);
5579 }
5580 /* END spdk_bs_create_snapshot */
5581 
5582 /* START spdk_bs_create_clone */
5583 
5584 static void
5585 _spdk_bs_xattr_clone(void *arg, const char *name,
5586 		     const void **value, size_t *value_len)
5587 {
5588 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
5589 
5590 	struct spdk_blob *blob = (struct spdk_blob *)arg;
5591 	*value = &blob->id;
5592 	*value_len = sizeof(blob->id);
5593 }
5594 
5595 static void
5596 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5597 {
5598 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5599 	struct spdk_blob *clone = _blob;
5600 
5601 	ctx->new.blob = clone;
5602 	_spdk_bs_blob_list_add(clone);
5603 
5604 	spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5605 }
5606 
5607 static void
5608 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
5609 {
5610 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5611 
5612 	ctx->cpl.u.blobid.blobid = blobid;
5613 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx);
5614 }
5615 
5616 static void
5617 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5618 {
5619 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5620 	struct spdk_blob_opts		opts;
5621 	struct spdk_blob_xattr_opts internal_xattrs;
5622 	char *xattr_names[] = { BLOB_SNAPSHOT };
5623 
5624 	if (bserrno != 0) {
5625 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5626 		return;
5627 	}
5628 
5629 	ctx->original.blob = _blob;
5630 
5631 	if (!_blob->data_ro || !_blob->md_ro) {
5632 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n");
5633 		ctx->bserrno = -EINVAL;
5634 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5635 		return;
5636 	}
5637 
5638 	if (_blob->locked_operation_in_progress) {
5639 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create clone - another operation in progress\n");
5640 		ctx->bserrno = -EBUSY;
5641 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5642 		return;
5643 	}
5644 
5645 	_blob->locked_operation_in_progress = true;
5646 
5647 	spdk_blob_opts_init(&opts);
5648 	_spdk_blob_xattrs_init(&internal_xattrs);
5649 
5650 	opts.thin_provision = true;
5651 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
5652 	opts.use_extent_table = _blob->use_extent_table;
5653 	if (ctx->xattrs) {
5654 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
5655 	}
5656 
5657 	/* Set internal xattr BLOB_SNAPSHOT */
5658 	internal_xattrs.count = 1;
5659 	internal_xattrs.ctx = _blob;
5660 	internal_xattrs.names = xattr_names;
5661 	internal_xattrs.get_value = _spdk_bs_xattr_clone;
5662 
5663 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
5664 			     _spdk_bs_clone_newblob_create_cpl, ctx);
5665 }
5666 
5667 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
5668 			  const struct spdk_blob_xattr_opts *clone_xattrs,
5669 			  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5670 {
5671 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
5672 
5673 	if (!ctx) {
5674 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
5675 		return;
5676 	}
5677 
5678 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5679 	ctx->cpl.u.blobid.cb_fn = cb_fn;
5680 	ctx->cpl.u.blobid.cb_arg = cb_arg;
5681 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
5682 	ctx->bserrno = 0;
5683 	ctx->xattrs = clone_xattrs;
5684 	ctx->original.id = blobid;
5685 
5686 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx);
5687 }
5688 
5689 /* END spdk_bs_create_clone */
5690 
5691 /* START spdk_bs_inflate_blob */
5692 
5693 static void
5694 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno)
5695 {
5696 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5697 	struct spdk_blob *_blob = ctx->original.blob;
5698 
5699 	if (bserrno != 0) {
5700 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5701 		return;
5702 	}
5703 
5704 	assert(_parent != NULL);
5705 
5706 	_spdk_bs_blob_list_remove(_blob);
5707 	_blob->parent_id = _parent->id;
5708 	_spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id,
5709 			     sizeof(spdk_blob_id), true);
5710 
5711 	_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5712 	_blob->back_bs_dev = spdk_bs_create_blob_bs_dev(_parent);
5713 	_spdk_bs_blob_list_add(_blob);
5714 
5715 	spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5716 }
5717 
5718 static void
5719 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno)
5720 {
5721 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5722 	struct spdk_blob *_blob = ctx->original.blob;
5723 	struct spdk_blob *_parent;
5724 
5725 	if (bserrno != 0) {
5726 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5727 		return;
5728 	}
5729 
5730 	if (ctx->allocate_all) {
5731 		/* remove thin provisioning */
5732 		_spdk_bs_blob_list_remove(_blob);
5733 		_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
5734 		_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
5735 		_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5736 		_blob->back_bs_dev = NULL;
5737 		_blob->parent_id = SPDK_BLOBID_INVALID;
5738 	} else {
5739 		_parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob;
5740 		if (_parent->parent_id != SPDK_BLOBID_INVALID) {
5741 			/* We must change the parent of the inflated blob */
5742 			spdk_bs_open_blob(_blob->bs, _parent->parent_id,
5743 					  _spdk_bs_inflate_blob_set_parent_cpl, ctx);
5744 			return;
5745 		}
5746 
5747 		_spdk_bs_blob_list_remove(_blob);
5748 		_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
5749 		_blob->parent_id = SPDK_BLOBID_INVALID;
5750 		_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5751 		_blob->back_bs_dev = spdk_bs_create_zeroes_dev();
5752 	}
5753 
5754 	_blob->state = SPDK_BLOB_STATE_DIRTY;
5755 	spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5756 }
5757 
5758 /* Check if cluster needs allocation */
5759 static inline bool
5760 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all)
5761 {
5762 	struct spdk_blob_bs_dev *b;
5763 
5764 	assert(blob != NULL);
5765 
5766 	if (blob->active.clusters[cluster] != 0) {
5767 		/* Cluster is already allocated */
5768 		return false;
5769 	}
5770 
5771 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
5772 		/* Blob have no parent blob */
5773 		return allocate_all;
5774 	}
5775 
5776 	b = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
5777 	return (allocate_all || b->blob->active.clusters[cluster] != 0);
5778 }
5779 
5780 static void
5781 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
5782 {
5783 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5784 	struct spdk_blob *_blob = ctx->original.blob;
5785 	uint64_t offset;
5786 
5787 	if (bserrno != 0) {
5788 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5789 		return;
5790 	}
5791 
5792 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
5793 		if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) {
5794 			break;
5795 		}
5796 	}
5797 
5798 	if (ctx->cluster < _blob->active.num_clusters) {
5799 		offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster);
5800 
5801 		/* We may safely increment a cluster before write */
5802 		ctx->cluster++;
5803 
5804 		/* Use zero length write to touch a cluster */
5805 		spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0,
5806 				   _spdk_bs_inflate_blob_touch_next, ctx);
5807 	} else {
5808 		_spdk_bs_inflate_blob_done(cb_arg, bserrno);
5809 	}
5810 }
5811 
5812 static void
5813 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5814 {
5815 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5816 	uint64_t lfc; /* lowest free cluster */
5817 	uint64_t i;
5818 
5819 	if (bserrno != 0) {
5820 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5821 		return;
5822 	}
5823 
5824 	ctx->original.blob = _blob;
5825 
5826 	if (_blob->locked_operation_in_progress) {
5827 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot inflate blob - another operation in progress\n");
5828 		ctx->bserrno = -EBUSY;
5829 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5830 		return;
5831 	}
5832 
5833 	_blob->locked_operation_in_progress = true;
5834 
5835 	if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) {
5836 		/* This blob have no parent, so we cannot decouple it. */
5837 		SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n");
5838 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
5839 		return;
5840 	}
5841 
5842 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
5843 		/* This is not thin provisioned blob. No need to inflate. */
5844 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0);
5845 		return;
5846 	}
5847 
5848 	/* Do two passes - one to verify that we can obtain enough clusters
5849 	 * and another to actually claim them.
5850 	 */
5851 	lfc = 0;
5852 	for (i = 0; i < _blob->active.num_clusters; i++) {
5853 		if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) {
5854 			lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc);
5855 			if (lfc == UINT32_MAX) {
5856 				/* No more free clusters. Cannot satisfy the request */
5857 				_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
5858 				return;
5859 			}
5860 			lfc++;
5861 		}
5862 	}
5863 
5864 	ctx->cluster = 0;
5865 	_spdk_bs_inflate_blob_touch_next(ctx, 0);
5866 }
5867 
5868 static void
5869 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5870 		      spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg)
5871 {
5872 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
5873 
5874 	if (!ctx) {
5875 		cb_fn(cb_arg, -ENOMEM);
5876 		return;
5877 	}
5878 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
5879 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
5880 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
5881 	ctx->bserrno = 0;
5882 	ctx->original.id = blobid;
5883 	ctx->channel = channel;
5884 	ctx->allocate_all = allocate_all;
5885 
5886 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx);
5887 }
5888 
5889 void
5890 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5891 		     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
5892 {
5893 	_spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg);
5894 }
5895 
5896 void
5897 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5898 			     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
5899 {
5900 	_spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg);
5901 }
5902 /* END spdk_bs_inflate_blob */
5903 
5904 /* START spdk_blob_resize */
5905 struct spdk_bs_resize_ctx {
5906 	spdk_blob_op_complete cb_fn;
5907 	void *cb_arg;
5908 	struct spdk_blob *blob;
5909 	uint64_t sz;
5910 	int rc;
5911 };
5912 
5913 static void
5914 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc)
5915 {
5916 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
5917 
5918 	if (rc != 0) {
5919 		SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc);
5920 	}
5921 
5922 	if (ctx->rc != 0) {
5923 		SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc);
5924 		rc = ctx->rc;
5925 	}
5926 
5927 	ctx->blob->locked_operation_in_progress = false;
5928 
5929 	ctx->cb_fn(ctx->cb_arg, rc);
5930 	free(ctx);
5931 }
5932 
5933 static void
5934 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc)
5935 {
5936 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
5937 
5938 	if (rc != 0) {
5939 		ctx->blob->locked_operation_in_progress = false;
5940 		ctx->cb_fn(ctx->cb_arg, rc);
5941 		free(ctx);
5942 		return;
5943 	}
5944 
5945 	ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz);
5946 
5947 	_spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx);
5948 }
5949 
5950 void
5951 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
5952 {
5953 	struct spdk_bs_resize_ctx *ctx;
5954 
5955 	_spdk_blob_verify_md_op(blob);
5956 
5957 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz);
5958 
5959 	if (blob->md_ro) {
5960 		cb_fn(cb_arg, -EPERM);
5961 		return;
5962 	}
5963 
5964 	if (sz == blob->active.num_clusters) {
5965 		cb_fn(cb_arg, 0);
5966 		return;
5967 	}
5968 
5969 	if (blob->locked_operation_in_progress) {
5970 		cb_fn(cb_arg, -EBUSY);
5971 		return;
5972 	}
5973 
5974 	ctx = calloc(1, sizeof(*ctx));
5975 	if (!ctx) {
5976 		cb_fn(cb_arg, -ENOMEM);
5977 		return;
5978 	}
5979 
5980 	blob->locked_operation_in_progress = true;
5981 	ctx->cb_fn = cb_fn;
5982 	ctx->cb_arg = cb_arg;
5983 	ctx->blob = blob;
5984 	ctx->sz = sz;
5985 	_spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx);
5986 }
5987 
5988 /* END spdk_blob_resize */
5989 
5990 
5991 /* START spdk_bs_delete_blob */
5992 
5993 static void
5994 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
5995 {
5996 	spdk_bs_sequence_t *seq = cb_arg;
5997 
5998 	spdk_bs_sequence_finish(seq, bserrno);
5999 }
6000 
6001 static void
6002 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6003 {
6004 	struct spdk_blob *blob = cb_arg;
6005 
6006 	if (bserrno != 0) {
6007 		/*
6008 		 * We already removed this blob from the blobstore tailq, so
6009 		 *  we need to free it here since this is the last reference
6010 		 *  to it.
6011 		 */
6012 		_spdk_blob_free(blob);
6013 		_spdk_bs_delete_close_cpl(seq, bserrno);
6014 		return;
6015 	}
6016 
6017 	/*
6018 	 * This will immediately decrement the ref_count and call
6019 	 *  the completion routine since the metadata state is clean.
6020 	 *  By calling spdk_blob_close, we reduce the number of call
6021 	 *  points into code that touches the blob->open_ref count
6022 	 *  and the blobstore's blob list.
6023 	 */
6024 	spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq);
6025 }
6026 
6027 struct delete_snapshot_ctx {
6028 	struct spdk_blob_list *parent_snapshot_entry;
6029 	struct spdk_blob *snapshot;
6030 	bool snapshot_md_ro;
6031 	struct spdk_blob *clone;
6032 	bool clone_md_ro;
6033 	spdk_blob_op_with_handle_complete cb_fn;
6034 	void *cb_arg;
6035 	int bserrno;
6036 };
6037 
6038 static void
6039 _spdk_delete_blob_cleanup_finish(void *cb_arg, int bserrno)
6040 {
6041 	struct delete_snapshot_ctx *ctx = cb_arg;
6042 
6043 	if (bserrno != 0) {
6044 		SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno);
6045 	}
6046 
6047 	assert(ctx != NULL);
6048 
6049 	if (bserrno != 0 && ctx->bserrno == 0) {
6050 		ctx->bserrno = bserrno;
6051 	}
6052 
6053 	ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno);
6054 	free(ctx);
6055 }
6056 
6057 static void
6058 _spdk_delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno)
6059 {
6060 	struct delete_snapshot_ctx *ctx = cb_arg;
6061 
6062 	if (bserrno != 0) {
6063 		ctx->bserrno = bserrno;
6064 		SPDK_ERRLOG("Clone cleanup error %d\n", bserrno);
6065 	}
6066 
6067 	/* open_ref == 1 menas that only deletion context has opened this snapshot
6068 	 * open_ref == 2 menas that clone has opened this snapshot as well,
6069 	 * so we have to add it back to the blobs list */
6070 	if (ctx->snapshot->open_ref == 2) {
6071 		TAILQ_INSERT_HEAD(&ctx->snapshot->bs->blobs, ctx->snapshot, link);
6072 	}
6073 
6074 	ctx->snapshot->locked_operation_in_progress = false;
6075 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
6076 
6077 	spdk_blob_close(ctx->snapshot, _spdk_delete_blob_cleanup_finish, ctx);
6078 }
6079 
6080 static void
6081 _spdk_delete_snapshot_cleanup_clone(void *cb_arg, int bserrno)
6082 {
6083 	struct delete_snapshot_ctx *ctx = cb_arg;
6084 
6085 	ctx->clone->locked_operation_in_progress = false;
6086 	ctx->clone->md_ro = ctx->clone_md_ro;
6087 
6088 	spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
6089 }
6090 
6091 static void
6092 _spdk_delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
6093 {
6094 	struct delete_snapshot_ctx *ctx = cb_arg;
6095 
6096 	if (bserrno) {
6097 		ctx->bserrno = bserrno;
6098 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6099 		return;
6100 	}
6101 
6102 	ctx->clone->locked_operation_in_progress = false;
6103 	spdk_blob_close(ctx->clone, _spdk_delete_blob_cleanup_finish, ctx);
6104 }
6105 
6106 static void
6107 _spdk_delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno)
6108 {
6109 	struct delete_snapshot_ctx *ctx = cb_arg;
6110 	struct spdk_blob_list *parent_snapshot_entry = NULL;
6111 	struct spdk_blob_list *snapshot_entry = NULL;
6112 	struct spdk_blob_list *clone_entry = NULL;
6113 	struct spdk_blob_list *snapshot_clone_entry = NULL;
6114 
6115 	if (bserrno) {
6116 		SPDK_ERRLOG("Failed to sync MD on blob\n");
6117 		ctx->bserrno = bserrno;
6118 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6119 		return;
6120 	}
6121 
6122 	/* Get snapshot entry for the snapshot we want to remove */
6123 	snapshot_entry = _spdk_bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id);
6124 
6125 	assert(snapshot_entry != NULL);
6126 
6127 	/* Remove clone entry in this snapshot (at this point there can be only one clone) */
6128 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6129 	assert(clone_entry != NULL);
6130 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
6131 	snapshot_entry->clone_count--;
6132 	assert(TAILQ_EMPTY(&snapshot_entry->clones));
6133 
6134 	if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) {
6135 		/* This snapshot is at the same time a clone of another snapshot - we need to
6136 		 * update parent snapshot (remove current clone, add new one inherited from
6137 		 * the snapshot that is being removed) */
6138 
6139 		/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
6140 		 * snapshot that we are removing */
6141 		_spdk_blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry,
6142 				&snapshot_clone_entry);
6143 
6144 		/* Switch clone entry in parent snapshot */
6145 		TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link);
6146 		TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link);
6147 		free(snapshot_clone_entry);
6148 	} else {
6149 		/* No parent snapshot - just remove clone entry */
6150 		free(clone_entry);
6151 	}
6152 
6153 	/* Restore md_ro flags */
6154 	ctx->clone->md_ro = ctx->clone_md_ro;
6155 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
6156 
6157 	_spdk_blob_unfreeze_io(ctx->clone, _spdk_delete_snapshot_unfreeze_cpl, ctx);
6158 }
6159 
6160 static void
6161 _spdk_delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
6162 {
6163 	struct delete_snapshot_ctx *ctx = cb_arg;
6164 	uint64_t i;
6165 
6166 	ctx->snapshot->md_ro = false;
6167 
6168 	if (bserrno) {
6169 		SPDK_ERRLOG("Failed to sync MD on clone\n");
6170 		ctx->bserrno = bserrno;
6171 
6172 		/* Restore snapshot to previous state */
6173 		bserrno = _spdk_blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
6174 		if (bserrno != 0) {
6175 			_spdk_delete_snapshot_cleanup_clone(ctx, bserrno);
6176 			return;
6177 		}
6178 
6179 		spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_cleanup_clone, ctx);
6180 		return;
6181 	}
6182 
6183 	/* Clear cluster map entries for snapshot */
6184 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
6185 		if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
6186 			ctx->snapshot->active.clusters[i] = 0;
6187 		}
6188 	}
6189 
6190 	ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY;
6191 
6192 	if (ctx->parent_snapshot_entry != NULL) {
6193 		ctx->snapshot->back_bs_dev = NULL;
6194 	}
6195 
6196 	spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_cpl, ctx);
6197 }
6198 
6199 static void
6200 _spdk_delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
6201 {
6202 	struct delete_snapshot_ctx *ctx = cb_arg;
6203 	uint64_t i;
6204 
6205 	/* Temporarily override md_ro flag for clone for MD modification */
6206 	ctx->clone_md_ro = ctx->clone->md_ro;
6207 	ctx->clone->md_ro = false;
6208 
6209 	if (bserrno) {
6210 		SPDK_ERRLOG("Failed to sync MD with xattr on blob\n");
6211 		ctx->bserrno = bserrno;
6212 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6213 		return;
6214 	}
6215 
6216 	/* Copy snapshot map to clone map (only unallocated clusters in clone) */
6217 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
6218 		if (ctx->clone->active.clusters[i] == 0) {
6219 			ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
6220 		}
6221 	}
6222 
6223 	/* Delete old backing bs_dev from clone (related to snapshot that will be removed) */
6224 	ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev);
6225 
6226 	/* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */
6227 	if (ctx->parent_snapshot_entry != NULL) {
6228 		/* ...to parent snapshot */
6229 		ctx->clone->parent_id = ctx->parent_snapshot_entry->id;
6230 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
6231 		_spdk_blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id,
6232 				     sizeof(spdk_blob_id),
6233 				     true);
6234 	} else {
6235 		/* ...to blobid invalid and zeroes dev */
6236 		ctx->clone->parent_id = SPDK_BLOBID_INVALID;
6237 		ctx->clone->back_bs_dev = spdk_bs_create_zeroes_dev();
6238 		_spdk_blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true);
6239 	}
6240 
6241 	spdk_blob_sync_md(ctx->clone, _spdk_delete_snapshot_sync_clone_cpl, ctx);
6242 }
6243 
6244 static void
6245 _spdk_delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno)
6246 {
6247 	struct delete_snapshot_ctx *ctx = cb_arg;
6248 
6249 	if (bserrno) {
6250 		SPDK_ERRLOG("Failed to freeze I/O on clone\n");
6251 		ctx->bserrno = bserrno;
6252 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6253 		return;
6254 	}
6255 
6256 	/* Temporarily override md_ro flag for snapshot for MD modification */
6257 	ctx->snapshot_md_ro = ctx->snapshot->md_ro;
6258 	ctx->snapshot->md_ro = false;
6259 
6260 	/* Mark blob as pending for removal for power failure safety, use clone id for recovery */
6261 	ctx->bserrno = _spdk_blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id,
6262 					    sizeof(spdk_blob_id), true);
6263 	if (ctx->bserrno != 0) {
6264 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6265 		return;
6266 	}
6267 
6268 	spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_xattr_cpl, ctx);
6269 }
6270 
6271 static void
6272 _spdk_delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno)
6273 {
6274 	struct delete_snapshot_ctx *ctx = cb_arg;
6275 
6276 	if (bserrno) {
6277 		SPDK_ERRLOG("Failed to open clone\n");
6278 		ctx->bserrno = bserrno;
6279 		_spdk_delete_snapshot_cleanup_snapshot(ctx, 0);
6280 		return;
6281 	}
6282 
6283 	ctx->clone = clone;
6284 
6285 	if (clone->locked_operation_in_progress) {
6286 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress on its clone\n");
6287 		ctx->bserrno = -EBUSY;
6288 		spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
6289 		return;
6290 	}
6291 
6292 	clone->locked_operation_in_progress = true;
6293 
6294 	_spdk_blob_freeze_io(clone, _spdk_delete_snapshot_freeze_io_cb, ctx);
6295 }
6296 
6297 static void
6298 _spdk_update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx)
6299 {
6300 	struct spdk_blob_list *snapshot_entry = NULL;
6301 	struct spdk_blob_list *clone_entry = NULL;
6302 	struct spdk_blob_list *snapshot_clone_entry = NULL;
6303 
6304 	/* Get snapshot entry for the snapshot we want to remove */
6305 	snapshot_entry = _spdk_bs_get_snapshot_entry(snapshot->bs, snapshot->id);
6306 
6307 	assert(snapshot_entry != NULL);
6308 
6309 	/* Get clone of the snapshot (at this point there can be only one clone) */
6310 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6311 	assert(snapshot_entry->clone_count == 1);
6312 	assert(clone_entry != NULL);
6313 
6314 	/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
6315 	 * snapshot that we are removing */
6316 	_spdk_blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry,
6317 			&snapshot_clone_entry);
6318 
6319 	spdk_bs_open_blob(snapshot->bs, clone_entry->id, _spdk_delete_snapshot_open_clone_cb, ctx);
6320 }
6321 
6322 static void
6323 _spdk_bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
6324 {
6325 	spdk_bs_sequence_t *seq = cb_arg;
6326 	struct spdk_blob_list *snapshot_entry = NULL;
6327 	uint32_t page_num;
6328 
6329 	if (bserrno) {
6330 		SPDK_ERRLOG("Failed to remove blob\n");
6331 		spdk_bs_sequence_finish(seq, bserrno);
6332 		return;
6333 	}
6334 
6335 	/* Remove snapshot from the list */
6336 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
6337 	if (snapshot_entry != NULL) {
6338 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
6339 		free(snapshot_entry);
6340 	}
6341 
6342 	page_num = _spdk_bs_blobid_to_page(blob->id);
6343 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
6344 	blob->state = SPDK_BLOB_STATE_DIRTY;
6345 	blob->active.num_pages = 0;
6346 	_spdk_blob_resize(blob, 0);
6347 
6348 	_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob);
6349 }
6350 
6351 static int
6352 _spdk_bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone)
6353 {
6354 	struct spdk_blob_list *snapshot_entry = NULL;
6355 	struct spdk_blob_list *clone_entry = NULL;
6356 	struct spdk_blob *clone = NULL;
6357 	bool has_one_clone = false;
6358 
6359 	/* Check if this is a snapshot with clones */
6360 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
6361 	if (snapshot_entry != NULL) {
6362 		if (snapshot_entry->clone_count > 1) {
6363 			SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n");
6364 			return -EBUSY;
6365 		} else if (snapshot_entry->clone_count == 1) {
6366 			has_one_clone = true;
6367 		}
6368 	}
6369 
6370 	/* Check if someone has this blob open (besides this delete context):
6371 	 * - open_ref = 1 - only this context opened blob, so it is ok to remove it
6372 	 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot
6373 	 *	and that is ok, because we will update it accordingly */
6374 	if (blob->open_ref <= 2 && has_one_clone) {
6375 		clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6376 		assert(clone_entry != NULL);
6377 		clone = _spdk_blob_lookup(blob->bs, clone_entry->id);
6378 
6379 		if (blob->open_ref == 2 && clone == NULL) {
6380 			/* Clone is closed and someone else opened this blob */
6381 			SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
6382 			return -EBUSY;
6383 		}
6384 
6385 		*update_clone = true;
6386 		return 0;
6387 	}
6388 
6389 	if (blob->open_ref > 1) {
6390 		SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
6391 		return -EBUSY;
6392 	}
6393 
6394 	assert(has_one_clone == false);
6395 	*update_clone = false;
6396 	return 0;
6397 }
6398 
6399 static void
6400 _spdk_bs_delete_enomem_close_cpl(void *cb_arg, int bserrno)
6401 {
6402 	spdk_bs_sequence_t *seq = cb_arg;
6403 
6404 	spdk_bs_sequence_finish(seq, -ENOMEM);
6405 }
6406 
6407 static void
6408 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
6409 {
6410 	spdk_bs_sequence_t *seq = cb_arg;
6411 	struct delete_snapshot_ctx *ctx;
6412 	bool update_clone = false;
6413 
6414 	if (bserrno != 0) {
6415 		spdk_bs_sequence_finish(seq, bserrno);
6416 		return;
6417 	}
6418 
6419 	_spdk_blob_verify_md_op(blob);
6420 
6421 	ctx = calloc(1, sizeof(*ctx));
6422 	if (ctx == NULL) {
6423 		spdk_blob_close(blob, _spdk_bs_delete_enomem_close_cpl, seq);
6424 		return;
6425 	}
6426 
6427 	ctx->snapshot = blob;
6428 	ctx->cb_fn = _spdk_bs_delete_blob_finish;
6429 	ctx->cb_arg = seq;
6430 
6431 	/* Check if blob can be removed and if it is a snapshot with clone on top of it */
6432 	ctx->bserrno = _spdk_bs_is_blob_deletable(blob, &update_clone);
6433 	if (ctx->bserrno) {
6434 		spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
6435 		return;
6436 	}
6437 
6438 	if (blob->locked_operation_in_progress) {
6439 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress\n");
6440 		ctx->bserrno = -EBUSY;
6441 		spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
6442 		return;
6443 	}
6444 
6445 	blob->locked_operation_in_progress = true;
6446 
6447 	/*
6448 	 * Remove the blob from the blob_store list now, to ensure it does not
6449 	 *  get returned after this point by _spdk_blob_lookup().
6450 	 */
6451 	TAILQ_REMOVE(&blob->bs->blobs, blob, link);
6452 
6453 	if (update_clone) {
6454 		/* This blob is a snapshot with active clone - update clone first */
6455 		_spdk_update_clone_on_snapshot_deletion(blob, ctx);
6456 	} else {
6457 		/* This blob does not have any clones - just remove it */
6458 		_spdk_bs_blob_list_remove(blob);
6459 		_spdk_bs_delete_blob_finish(seq, blob, 0);
6460 		free(ctx);
6461 	}
6462 }
6463 
6464 void
6465 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6466 		    spdk_blob_op_complete cb_fn, void *cb_arg)
6467 {
6468 	struct spdk_bs_cpl	cpl;
6469 	spdk_bs_sequence_t	*seq;
6470 
6471 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid);
6472 
6473 	assert(spdk_get_thread() == bs->md_thread);
6474 
6475 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6476 	cpl.u.blob_basic.cb_fn = cb_fn;
6477 	cpl.u.blob_basic.cb_arg = cb_arg;
6478 
6479 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
6480 	if (!seq) {
6481 		cb_fn(cb_arg, -ENOMEM);
6482 		return;
6483 	}
6484 
6485 	spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq);
6486 }
6487 
6488 /* END spdk_bs_delete_blob */
6489 
6490 /* START spdk_bs_open_blob */
6491 
6492 static void
6493 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6494 {
6495 	struct spdk_blob *blob = cb_arg;
6496 
6497 	if (bserrno != 0) {
6498 		_spdk_blob_free(blob);
6499 		seq->cpl.u.blob_handle.blob = NULL;
6500 		spdk_bs_sequence_finish(seq, bserrno);
6501 		return;
6502 	}
6503 
6504 	blob->open_ref++;
6505 
6506 	TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link);
6507 
6508 	spdk_bs_sequence_finish(seq, bserrno);
6509 }
6510 
6511 static void _spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6512 			       struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6513 {
6514 	struct spdk_blob		*blob;
6515 	struct spdk_bs_cpl		cpl;
6516 	struct spdk_blob_open_opts	opts_default;
6517 	spdk_bs_sequence_t		*seq;
6518 	uint32_t			page_num;
6519 
6520 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid);
6521 	assert(spdk_get_thread() == bs->md_thread);
6522 
6523 	page_num = _spdk_bs_blobid_to_page(blobid);
6524 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
6525 		/* Invalid blobid */
6526 		cb_fn(cb_arg, NULL, -ENOENT);
6527 		return;
6528 	}
6529 
6530 	blob = _spdk_blob_lookup(bs, blobid);
6531 	if (blob) {
6532 		blob->open_ref++;
6533 		cb_fn(cb_arg, blob, 0);
6534 		return;
6535 	}
6536 
6537 	blob = _spdk_blob_alloc(bs, blobid);
6538 	if (!blob) {
6539 		cb_fn(cb_arg, NULL, -ENOMEM);
6540 		return;
6541 	}
6542 
6543 	if (!opts) {
6544 		spdk_blob_open_opts_init(&opts_default);
6545 		opts = &opts_default;
6546 	}
6547 
6548 	blob->clear_method = opts->clear_method;
6549 
6550 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
6551 	cpl.u.blob_handle.cb_fn = cb_fn;
6552 	cpl.u.blob_handle.cb_arg = cb_arg;
6553 	cpl.u.blob_handle.blob = blob;
6554 
6555 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
6556 	if (!seq) {
6557 		_spdk_blob_free(blob);
6558 		cb_fn(cb_arg, NULL, -ENOMEM);
6559 		return;
6560 	}
6561 
6562 	_spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob);
6563 }
6564 
6565 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6566 		       spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6567 {
6568 	_spdk_bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg);
6569 }
6570 
6571 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
6572 			   struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6573 {
6574 	_spdk_bs_open_blob(bs, blobid, opts, cb_fn, cb_arg);
6575 }
6576 
6577 /* END spdk_bs_open_blob */
6578 
6579 /* START spdk_blob_set_read_only */
6580 int spdk_blob_set_read_only(struct spdk_blob *blob)
6581 {
6582 	_spdk_blob_verify_md_op(blob);
6583 
6584 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
6585 
6586 	blob->state = SPDK_BLOB_STATE_DIRTY;
6587 	return 0;
6588 }
6589 /* END spdk_blob_set_read_only */
6590 
6591 /* START spdk_blob_sync_md */
6592 
6593 static void
6594 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6595 {
6596 	struct spdk_blob *blob = cb_arg;
6597 
6598 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
6599 		blob->data_ro = true;
6600 		blob->md_ro = true;
6601 	}
6602 
6603 	spdk_bs_sequence_finish(seq, bserrno);
6604 }
6605 
6606 static void
6607 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6608 {
6609 	struct spdk_bs_cpl	cpl;
6610 	spdk_bs_sequence_t	*seq;
6611 
6612 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6613 	cpl.u.blob_basic.cb_fn = cb_fn;
6614 	cpl.u.blob_basic.cb_arg = cb_arg;
6615 
6616 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
6617 	if (!seq) {
6618 		cb_fn(cb_arg, -ENOMEM);
6619 		return;
6620 	}
6621 
6622 	_spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob);
6623 }
6624 
6625 void
6626 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6627 {
6628 	_spdk_blob_verify_md_op(blob);
6629 
6630 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id);
6631 
6632 	if (blob->md_ro) {
6633 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
6634 		cb_fn(cb_arg, 0);
6635 		return;
6636 	}
6637 
6638 	_spdk_blob_sync_md(blob, cb_fn, cb_arg);
6639 }
6640 
6641 /* END spdk_blob_sync_md */
6642 
6643 struct spdk_blob_insert_cluster_ctx {
6644 	struct spdk_thread	*thread;
6645 	struct spdk_blob	*blob;
6646 	uint32_t		cluster_num;	/* cluster index in blob */
6647 	uint32_t		cluster;	/* cluster on disk */
6648 	uint32_t		extent_page;	/* extent page on disk */
6649 	int			rc;
6650 	spdk_blob_op_complete	cb_fn;
6651 	void			*cb_arg;
6652 };
6653 
6654 static void
6655 _spdk_blob_insert_cluster_msg_cpl(void *arg)
6656 {
6657 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6658 
6659 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
6660 	free(ctx);
6661 }
6662 
6663 static void
6664 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno)
6665 {
6666 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6667 
6668 	ctx->rc = bserrno;
6669 	spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
6670 }
6671 
6672 static void
6673 _spdk_blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6674 {
6675 	struct spdk_blob_md_page        *page = cb_arg;
6676 
6677 	spdk_bs_sequence_finish(seq, bserrno);
6678 	spdk_free(page);
6679 }
6680 
6681 static void
6682 _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
6683 			 spdk_blob_op_complete cb_fn, void *cb_arg)
6684 {
6685 	spdk_bs_sequence_t		*seq;
6686 	struct spdk_bs_cpl		cpl;
6687 	struct spdk_blob_md_page	*page = NULL;
6688 	uint32_t			page_count = 0;
6689 	int				rc;
6690 
6691 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6692 	cpl.u.blob_basic.cb_fn = cb_fn;
6693 	cpl.u.blob_basic.cb_arg = cb_arg;
6694 
6695 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
6696 	if (!seq) {
6697 		cb_fn(cb_arg, -ENOMEM);
6698 		return;
6699 	}
6700 	rc = _spdk_blob_serialize_add_page(blob, &page, &page_count, &page);
6701 	if (rc < 0) {
6702 		spdk_bs_sequence_finish(seq, rc);
6703 		return;
6704 	}
6705 
6706 	_spdk_blob_serialize_extent_page(blob, cluster_num, page);
6707 
6708 	page->crc = _spdk_blob_md_page_calc_crc(page);
6709 
6710 	assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true);
6711 
6712 	spdk_bs_sequence_write_dev(seq, page, _spdk_bs_md_page_to_lba(blob->bs, extent),
6713 				   _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
6714 				   _spdk_blob_persist_extent_page_cpl, page);
6715 }
6716 
6717 static void
6718 _spdk_blob_insert_cluster_msg(void *arg)
6719 {
6720 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6721 	uint32_t *extent_page;
6722 
6723 	ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
6724 	if (ctx->rc != 0) {
6725 		spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
6726 		return;
6727 	}
6728 
6729 	if (ctx->blob->use_extent_table == false) {
6730 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
6731 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
6732 		_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
6733 		return;
6734 	}
6735 
6736 	extent_page = _spdk_bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
6737 	if (*extent_page == 0) {
6738 		/* Extent page requires allocation.
6739 		 * It was already claimed in the used_md_pages map and placed in ctx.
6740 		 * Blob persist will take care of writing out new extent page on disk. */
6741 		assert(ctx->extent_page != 0);
6742 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
6743 		*extent_page = ctx->extent_page;
6744 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
6745 		_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
6746 	} else {
6747 		assert(ctx->extent_page == 0);
6748 		/* Extent page already allocated.
6749 		 * Every cluster allocation, requires just an update of single extent page. */
6750 		_spdk_blob_insert_extent(ctx->blob, ctx->extent_page, ctx->cluster_num,
6751 					 _spdk_blob_insert_cluster_msg_cb, ctx);
6752 	}
6753 }
6754 
6755 static void
6756 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
6757 				       uint64_t cluster, uint32_t extent_page, spdk_blob_op_complete cb_fn, void *cb_arg)
6758 {
6759 	struct spdk_blob_insert_cluster_ctx *ctx;
6760 
6761 	ctx = calloc(1, sizeof(*ctx));
6762 	if (ctx == NULL) {
6763 		cb_fn(cb_arg, -ENOMEM);
6764 		return;
6765 	}
6766 
6767 	ctx->thread = spdk_get_thread();
6768 	ctx->blob = blob;
6769 	ctx->cluster_num = cluster_num;
6770 	ctx->cluster = cluster;
6771 	ctx->extent_page = extent_page;
6772 	ctx->cb_fn = cb_fn;
6773 	ctx->cb_arg = cb_arg;
6774 
6775 	spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx);
6776 }
6777 
6778 /* START spdk_blob_close */
6779 
6780 static void
6781 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6782 {
6783 	struct spdk_blob *blob = cb_arg;
6784 
6785 	if (bserrno == 0) {
6786 		blob->open_ref--;
6787 		if (blob->open_ref == 0) {
6788 			/*
6789 			 * Blobs with active.num_pages == 0 are deleted blobs.
6790 			 *  these blobs are removed from the blob_store list
6791 			 *  when the deletion process starts - so don't try to
6792 			 *  remove them again.
6793 			 */
6794 			if (blob->active.num_pages > 0) {
6795 				TAILQ_REMOVE(&blob->bs->blobs, blob, link);
6796 			}
6797 			_spdk_blob_free(blob);
6798 		}
6799 	}
6800 
6801 	spdk_bs_sequence_finish(seq, bserrno);
6802 }
6803 
6804 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6805 {
6806 	struct spdk_bs_cpl	cpl;
6807 	spdk_bs_sequence_t	*seq;
6808 
6809 	_spdk_blob_verify_md_op(blob);
6810 
6811 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id);
6812 
6813 	if (blob->open_ref == 0) {
6814 		cb_fn(cb_arg, -EBADF);
6815 		return;
6816 	}
6817 
6818 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6819 	cpl.u.blob_basic.cb_fn = cb_fn;
6820 	cpl.u.blob_basic.cb_arg = cb_arg;
6821 
6822 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
6823 	if (!seq) {
6824 		cb_fn(cb_arg, -ENOMEM);
6825 		return;
6826 	}
6827 
6828 	/* Sync metadata */
6829 	_spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob);
6830 }
6831 
6832 /* END spdk_blob_close */
6833 
6834 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
6835 {
6836 	return spdk_get_io_channel(bs);
6837 }
6838 
6839 void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
6840 {
6841 	spdk_put_io_channel(channel);
6842 }
6843 
6844 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
6845 			uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
6846 {
6847 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
6848 				     SPDK_BLOB_UNMAP);
6849 }
6850 
6851 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
6852 			       uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
6853 {
6854 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
6855 				     SPDK_BLOB_WRITE_ZEROES);
6856 }
6857 
6858 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
6859 			void *payload, uint64_t offset, uint64_t length,
6860 			spdk_blob_op_complete cb_fn, void *cb_arg)
6861 {
6862 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
6863 				     SPDK_BLOB_WRITE);
6864 }
6865 
6866 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
6867 		       void *payload, uint64_t offset, uint64_t length,
6868 		       spdk_blob_op_complete cb_fn, void *cb_arg)
6869 {
6870 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
6871 				     SPDK_BLOB_READ);
6872 }
6873 
6874 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6875 			 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6876 			 spdk_blob_op_complete cb_fn, void *cb_arg)
6877 {
6878 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
6879 }
6880 
6881 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6882 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6883 			spdk_blob_op_complete cb_fn, void *cb_arg)
6884 {
6885 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
6886 }
6887 
6888 struct spdk_bs_iter_ctx {
6889 	int64_t page_num;
6890 	struct spdk_blob_store *bs;
6891 
6892 	spdk_blob_op_with_handle_complete cb_fn;
6893 	void *cb_arg;
6894 };
6895 
6896 static void
6897 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6898 {
6899 	struct spdk_bs_iter_ctx *ctx = cb_arg;
6900 	struct spdk_blob_store *bs = ctx->bs;
6901 	spdk_blob_id id;
6902 
6903 	if (bserrno == 0) {
6904 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
6905 		free(ctx);
6906 		return;
6907 	}
6908 
6909 	ctx->page_num++;
6910 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
6911 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
6912 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
6913 		free(ctx);
6914 		return;
6915 	}
6916 
6917 	id = _spdk_bs_page_to_blobid(ctx->page_num);
6918 
6919 	spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx);
6920 }
6921 
6922 void
6923 spdk_bs_iter_first(struct spdk_blob_store *bs,
6924 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6925 {
6926 	struct spdk_bs_iter_ctx *ctx;
6927 
6928 	ctx = calloc(1, sizeof(*ctx));
6929 	if (!ctx) {
6930 		cb_fn(cb_arg, NULL, -ENOMEM);
6931 		return;
6932 	}
6933 
6934 	ctx->page_num = -1;
6935 	ctx->bs = bs;
6936 	ctx->cb_fn = cb_fn;
6937 	ctx->cb_arg = cb_arg;
6938 
6939 	_spdk_bs_iter_cpl(ctx, NULL, -1);
6940 }
6941 
6942 static void
6943 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno)
6944 {
6945 	struct spdk_bs_iter_ctx *ctx = cb_arg;
6946 
6947 	_spdk_bs_iter_cpl(ctx, NULL, -1);
6948 }
6949 
6950 void
6951 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
6952 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6953 {
6954 	struct spdk_bs_iter_ctx *ctx;
6955 
6956 	assert(blob != NULL);
6957 
6958 	ctx = calloc(1, sizeof(*ctx));
6959 	if (!ctx) {
6960 		cb_fn(cb_arg, NULL, -ENOMEM);
6961 		return;
6962 	}
6963 
6964 	ctx->page_num = _spdk_bs_blobid_to_page(blob->id);
6965 	ctx->bs = bs;
6966 	ctx->cb_fn = cb_fn;
6967 	ctx->cb_arg = cb_arg;
6968 
6969 	/* Close the existing blob */
6970 	spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx);
6971 }
6972 
6973 static int
6974 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
6975 		     uint16_t value_len, bool internal)
6976 {
6977 	struct spdk_xattr_tailq *xattrs;
6978 	struct spdk_xattr	*xattr;
6979 	size_t			desc_size;
6980 
6981 	_spdk_blob_verify_md_op(blob);
6982 
6983 	if (blob->md_ro) {
6984 		return -EPERM;
6985 	}
6986 
6987 	desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len;
6988 	if (desc_size > SPDK_BS_MAX_DESC_SIZE) {
6989 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Xattr '%s' of size %ld does not fix into single page %ld\n", name,
6990 			      desc_size, SPDK_BS_MAX_DESC_SIZE);
6991 		return -ENOMEM;
6992 	}
6993 
6994 	if (internal) {
6995 		xattrs = &blob->xattrs_internal;
6996 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
6997 	} else {
6998 		xattrs = &blob->xattrs;
6999 	}
7000 
7001 	TAILQ_FOREACH(xattr, xattrs, link) {
7002 		if (!strcmp(name, xattr->name)) {
7003 			free(xattr->value);
7004 			xattr->value_len = value_len;
7005 			xattr->value = malloc(value_len);
7006 			memcpy(xattr->value, value, value_len);
7007 
7008 			blob->state = SPDK_BLOB_STATE_DIRTY;
7009 
7010 			return 0;
7011 		}
7012 	}
7013 
7014 	xattr = calloc(1, sizeof(*xattr));
7015 	if (!xattr) {
7016 		return -ENOMEM;
7017 	}
7018 	xattr->name = strdup(name);
7019 	xattr->value_len = value_len;
7020 	xattr->value = malloc(value_len);
7021 	memcpy(xattr->value, value, value_len);
7022 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
7023 
7024 	blob->state = SPDK_BLOB_STATE_DIRTY;
7025 
7026 	return 0;
7027 }
7028 
7029 int
7030 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
7031 		    uint16_t value_len)
7032 {
7033 	return _spdk_blob_set_xattr(blob, name, value, value_len, false);
7034 }
7035 
7036 static int
7037 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
7038 {
7039 	struct spdk_xattr_tailq *xattrs;
7040 	struct spdk_xattr	*xattr;
7041 
7042 	_spdk_blob_verify_md_op(blob);
7043 
7044 	if (blob->md_ro) {
7045 		return -EPERM;
7046 	}
7047 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
7048 
7049 	TAILQ_FOREACH(xattr, xattrs, link) {
7050 		if (!strcmp(name, xattr->name)) {
7051 			TAILQ_REMOVE(xattrs, xattr, link);
7052 			free(xattr->value);
7053 			free(xattr->name);
7054 			free(xattr);
7055 
7056 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
7057 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
7058 			}
7059 			blob->state = SPDK_BLOB_STATE_DIRTY;
7060 
7061 			return 0;
7062 		}
7063 	}
7064 
7065 	return -ENOENT;
7066 }
7067 
7068 int
7069 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
7070 {
7071 	return _spdk_blob_remove_xattr(blob, name, false);
7072 }
7073 
7074 static int
7075 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
7076 			   const void **value, size_t *value_len, bool internal)
7077 {
7078 	struct spdk_xattr	*xattr;
7079 	struct spdk_xattr_tailq *xattrs;
7080 
7081 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
7082 
7083 	TAILQ_FOREACH(xattr, xattrs, link) {
7084 		if (!strcmp(name, xattr->name)) {
7085 			*value = xattr->value;
7086 			*value_len = xattr->value_len;
7087 			return 0;
7088 		}
7089 	}
7090 	return -ENOENT;
7091 }
7092 
7093 int
7094 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
7095 			  const void **value, size_t *value_len)
7096 {
7097 	_spdk_blob_verify_md_op(blob);
7098 
7099 	return _spdk_blob_get_xattr_value(blob, name, value, value_len, false);
7100 }
7101 
7102 struct spdk_xattr_names {
7103 	uint32_t	count;
7104 	const char	*names[0];
7105 };
7106 
7107 static int
7108 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
7109 {
7110 	struct spdk_xattr	*xattr;
7111 	int			count = 0;
7112 
7113 	TAILQ_FOREACH(xattr, xattrs, link) {
7114 		count++;
7115 	}
7116 
7117 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
7118 	if (*names == NULL) {
7119 		return -ENOMEM;
7120 	}
7121 
7122 	TAILQ_FOREACH(xattr, xattrs, link) {
7123 		(*names)->names[(*names)->count++] = xattr->name;
7124 	}
7125 
7126 	return 0;
7127 }
7128 
7129 int
7130 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
7131 {
7132 	_spdk_blob_verify_md_op(blob);
7133 
7134 	return _spdk_blob_get_xattr_names(&blob->xattrs, names);
7135 }
7136 
7137 uint32_t
7138 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
7139 {
7140 	assert(names != NULL);
7141 
7142 	return names->count;
7143 }
7144 
7145 const char *
7146 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
7147 {
7148 	if (index >= names->count) {
7149 		return NULL;
7150 	}
7151 
7152 	return names->names[index];
7153 }
7154 
7155 void
7156 spdk_xattr_names_free(struct spdk_xattr_names *names)
7157 {
7158 	free(names);
7159 }
7160 
7161 struct spdk_bs_type
7162 spdk_bs_get_bstype(struct spdk_blob_store *bs)
7163 {
7164 	return bs->bstype;
7165 }
7166 
7167 void
7168 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
7169 {
7170 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
7171 }
7172 
7173 bool
7174 spdk_blob_is_read_only(struct spdk_blob *blob)
7175 {
7176 	assert(blob != NULL);
7177 	return (blob->data_ro || blob->md_ro);
7178 }
7179 
7180 bool
7181 spdk_blob_is_snapshot(struct spdk_blob *blob)
7182 {
7183 	struct spdk_blob_list *snapshot_entry;
7184 
7185 	assert(blob != NULL);
7186 
7187 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
7188 	if (snapshot_entry == NULL) {
7189 		return false;
7190 	}
7191 
7192 	return true;
7193 }
7194 
7195 bool
7196 spdk_blob_is_clone(struct spdk_blob *blob)
7197 {
7198 	assert(blob != NULL);
7199 
7200 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
7201 		assert(spdk_blob_is_thin_provisioned(blob));
7202 		return true;
7203 	}
7204 
7205 	return false;
7206 }
7207 
7208 bool
7209 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
7210 {
7211 	assert(blob != NULL);
7212 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
7213 }
7214 
7215 static void
7216 _spdk_blob_update_clear_method(struct spdk_blob *blob)
7217 {
7218 	enum blob_clear_method stored_cm;
7219 
7220 	assert(blob != NULL);
7221 
7222 	/* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored
7223 	 * in metadata previously.  If something other than the default was
7224 	 * specified, ignore stored value and used what was passed in.
7225 	 */
7226 	stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT);
7227 
7228 	if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) {
7229 		blob->clear_method = stored_cm;
7230 	} else if (blob->clear_method != stored_cm) {
7231 		SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n",
7232 			     blob->clear_method, stored_cm);
7233 	}
7234 }
7235 
7236 spdk_blob_id
7237 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
7238 {
7239 	struct spdk_blob_list *snapshot_entry = NULL;
7240 	struct spdk_blob_list *clone_entry = NULL;
7241 
7242 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
7243 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
7244 			if (clone_entry->id == blob_id) {
7245 				return snapshot_entry->id;
7246 			}
7247 		}
7248 	}
7249 
7250 	return SPDK_BLOBID_INVALID;
7251 }
7252 
7253 int
7254 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
7255 		     size_t *count)
7256 {
7257 	struct spdk_blob_list *snapshot_entry, *clone_entry;
7258 	size_t n;
7259 
7260 	snapshot_entry = _spdk_bs_get_snapshot_entry(bs, blobid);
7261 	if (snapshot_entry == NULL) {
7262 		*count = 0;
7263 		return 0;
7264 	}
7265 
7266 	if (ids == NULL || *count < snapshot_entry->clone_count) {
7267 		*count = snapshot_entry->clone_count;
7268 		return -ENOMEM;
7269 	}
7270 	*count = snapshot_entry->clone_count;
7271 
7272 	n = 0;
7273 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
7274 		ids[n++] = clone_entry->id;
7275 	}
7276 
7277 	return 0;
7278 }
7279 
7280 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB)
7281