xref: /spdk/lib/blob/blobstore.c (revision 42109157f435921aafab559ecf526bbb8ed1125a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/blob.h"
37 #include "spdk/crc32.h"
38 #include "spdk/env.h"
39 #include "spdk/queue.h"
40 #include "spdk/thread.h"
41 #include "spdk/bit_array.h"
42 #include "spdk/likely.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "spdk_internal/assert.h"
47 #include "spdk_internal/log.h"
48 
49 #include "blobstore.h"
50 
51 #define BLOB_CRC32C_INITIAL    0xffffffffUL
52 
53 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
54 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
55 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
56 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
57 		uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg);
58 
59 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
60 				uint16_t value_len, bool internal);
61 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
62 				      const void **value, size_t *value_len, bool internal);
63 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
64 
65 static void _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
66 				     spdk_blob_op_complete cb_fn, void *cb_arg);
67 
68 static void
69 _spdk_blob_verify_md_op(struct spdk_blob *blob)
70 {
71 	assert(blob != NULL);
72 	assert(spdk_get_thread() == blob->bs->md_thread);
73 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
74 }
75 
76 static struct spdk_blob_list *
77 _spdk_bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid)
78 {
79 	struct spdk_blob_list *snapshot_entry = NULL;
80 
81 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
82 		if (snapshot_entry->id == blobid) {
83 			break;
84 		}
85 	}
86 
87 	return snapshot_entry;
88 }
89 
90 static void
91 _spdk_bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page)
92 {
93 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
94 	assert(spdk_bit_array_get(bs->used_md_pages, page) == false);
95 
96 	spdk_bit_array_set(bs->used_md_pages, page);
97 }
98 
99 static void
100 _spdk_bs_release_md_page(struct spdk_blob_store *bs, uint32_t page)
101 {
102 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
103 	assert(spdk_bit_array_get(bs->used_md_pages, page) == true);
104 
105 	spdk_bit_array_clear(bs->used_md_pages, page);
106 }
107 
108 static void
109 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
110 {
111 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
112 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false);
113 	assert(bs->num_free_clusters > 0);
114 
115 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num);
116 
117 	spdk_bit_array_set(bs->used_clusters, cluster_num);
118 	bs->num_free_clusters--;
119 }
120 
121 static int
122 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
123 {
124 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
125 
126 	_spdk_blob_verify_md_op(blob);
127 
128 	if (*cluster_lba != 0) {
129 		return -EEXIST;
130 	}
131 
132 	*cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster);
133 	return 0;
134 }
135 
136 static int
137 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
138 			  uint64_t *lowest_free_cluster, uint32_t *lowest_free_md_page, bool update_map)
139 {
140 	uint32_t *extent_page;
141 
142 	pthread_mutex_lock(&blob->bs->used_clusters_mutex);
143 	*lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters,
144 			       *lowest_free_cluster);
145 	if (*lowest_free_cluster == UINT32_MAX) {
146 		/* No more free clusters. Cannot satisfy the request */
147 		pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
148 		return -ENOSPC;
149 	}
150 
151 	if (blob->use_extent_table) {
152 		extent_page = _spdk_bs_cluster_to_extent_page(blob, cluster_num);
153 		if (*extent_page == 0) {
154 			/* No extent_page is allocated for the cluster */
155 			*lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages,
156 					       *lowest_free_md_page);
157 			if (*lowest_free_md_page == UINT32_MAX) {
158 				/* No more free md pages. Cannot satisfy the request */
159 				pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
160 				return -ENOSPC;
161 			}
162 			_spdk_bs_claim_md_page(blob->bs, *lowest_free_md_page);
163 		}
164 	}
165 
166 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id);
167 	_spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster);
168 
169 	pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
170 
171 	if (update_map) {
172 		_spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster);
173 		if (blob->use_extent_table && *extent_page == 0) {
174 			*extent_page = *lowest_free_md_page;
175 		}
176 	}
177 
178 	return 0;
179 }
180 
181 static void
182 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
183 {
184 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
185 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true);
186 	assert(bs->num_free_clusters < bs->total_clusters);
187 
188 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num);
189 
190 	pthread_mutex_lock(&bs->used_clusters_mutex);
191 	spdk_bit_array_clear(bs->used_clusters, cluster_num);
192 	bs->num_free_clusters++;
193 	pthread_mutex_unlock(&bs->used_clusters_mutex);
194 }
195 
196 static void
197 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
198 {
199 	xattrs->count = 0;
200 	xattrs->names = NULL;
201 	xattrs->ctx = NULL;
202 	xattrs->get_value = NULL;
203 }
204 
205 void
206 spdk_blob_opts_init(struct spdk_blob_opts *opts)
207 {
208 	opts->num_clusters = 0;
209 	opts->thin_provision = false;
210 	opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
211 	_spdk_blob_xattrs_init(&opts->xattrs);
212 	opts->use_extent_table = false;
213 }
214 
215 void
216 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts)
217 {
218 	opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
219 }
220 
221 static struct spdk_blob *
222 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
223 {
224 	struct spdk_blob *blob;
225 
226 	blob = calloc(1, sizeof(*blob));
227 	if (!blob) {
228 		return NULL;
229 	}
230 
231 	blob->id = id;
232 	blob->bs = bs;
233 
234 	blob->parent_id = SPDK_BLOBID_INVALID;
235 
236 	blob->state = SPDK_BLOB_STATE_DIRTY;
237 	blob->extent_rle_found = false;
238 	blob->extent_table_found = false;
239 	blob->active.num_pages = 1;
240 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
241 	if (!blob->active.pages) {
242 		free(blob);
243 		return NULL;
244 	}
245 
246 	blob->active.pages[0] = _spdk_bs_blobid_to_page(id);
247 
248 	TAILQ_INIT(&blob->xattrs);
249 	TAILQ_INIT(&blob->xattrs_internal);
250 
251 	return blob;
252 }
253 
254 static void
255 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs)
256 {
257 	struct spdk_xattr	*xattr, *xattr_tmp;
258 
259 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
260 		TAILQ_REMOVE(xattrs, xattr, link);
261 		free(xattr->name);
262 		free(xattr->value);
263 		free(xattr);
264 	}
265 }
266 
267 static void
268 _spdk_blob_free(struct spdk_blob *blob)
269 {
270 	assert(blob != NULL);
271 
272 	free(blob->active.extent_pages);
273 	free(blob->clean.extent_pages);
274 	free(blob->active.clusters);
275 	free(blob->clean.clusters);
276 	free(blob->active.pages);
277 	free(blob->clean.pages);
278 
279 	_spdk_xattrs_free(&blob->xattrs);
280 	_spdk_xattrs_free(&blob->xattrs_internal);
281 
282 	if (blob->back_bs_dev) {
283 		blob->back_bs_dev->destroy(blob->back_bs_dev);
284 	}
285 
286 	free(blob);
287 }
288 
289 struct freeze_io_ctx {
290 	struct spdk_bs_cpl cpl;
291 	struct spdk_blob *blob;
292 };
293 
294 static void
295 _spdk_blob_io_sync(struct spdk_io_channel_iter *i)
296 {
297 	spdk_for_each_channel_continue(i, 0);
298 }
299 
300 static void
301 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i)
302 {
303 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
304 	struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch);
305 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
306 	struct spdk_bs_request_set	*set;
307 	struct spdk_bs_user_op_args	*args;
308 	spdk_bs_user_op_t *op, *tmp;
309 
310 	TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) {
311 		set = (struct spdk_bs_request_set *)op;
312 		args = &set->u.user_op;
313 
314 		if (args->blob == ctx->blob) {
315 			TAILQ_REMOVE(&ch->queued_io, op, link);
316 			spdk_bs_user_op_execute(op);
317 		}
318 	}
319 
320 	spdk_for_each_channel_continue(i, 0);
321 }
322 
323 static void
324 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status)
325 {
326 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
327 
328 	ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0);
329 
330 	free(ctx);
331 }
332 
333 static void
334 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
335 {
336 	struct freeze_io_ctx *ctx;
337 
338 	ctx = calloc(1, sizeof(*ctx));
339 	if (!ctx) {
340 		cb_fn(cb_arg, -ENOMEM);
341 		return;
342 	}
343 
344 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
345 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
346 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
347 	ctx->blob = blob;
348 
349 	/* Freeze I/O on blob */
350 	blob->frozen_refcnt++;
351 
352 	if (blob->frozen_refcnt == 1) {
353 		spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl);
354 	} else {
355 		cb_fn(cb_arg, 0);
356 		free(ctx);
357 	}
358 }
359 
360 static void
361 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
362 {
363 	struct freeze_io_ctx *ctx;
364 
365 	ctx = calloc(1, sizeof(*ctx));
366 	if (!ctx) {
367 		cb_fn(cb_arg, -ENOMEM);
368 		return;
369 	}
370 
371 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
372 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
373 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
374 	ctx->blob = blob;
375 
376 	assert(blob->frozen_refcnt > 0);
377 
378 	blob->frozen_refcnt--;
379 
380 	if (blob->frozen_refcnt == 0) {
381 		spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl);
382 	} else {
383 		cb_fn(cb_arg, 0);
384 		free(ctx);
385 	}
386 }
387 
388 static int
389 _spdk_blob_mark_clean(struct spdk_blob *blob)
390 {
391 	uint32_t *extent_pages = NULL;
392 	uint64_t *clusters = NULL;
393 	uint32_t *pages = NULL;
394 
395 	assert(blob != NULL);
396 
397 	if (blob->active.num_extent_pages) {
398 		assert(blob->active.extent_pages);
399 		extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages));
400 		if (!extent_pages) {
401 			return -ENOMEM;
402 		}
403 		memcpy(extent_pages, blob->active.extent_pages,
404 		       blob->active.num_extent_pages * sizeof(*extent_pages));
405 	}
406 
407 	if (blob->active.num_clusters) {
408 		assert(blob->active.clusters);
409 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
410 		if (!clusters) {
411 			free(extent_pages);
412 			return -ENOMEM;
413 		}
414 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
415 	}
416 
417 	if (blob->active.num_pages) {
418 		assert(blob->active.pages);
419 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
420 		if (!pages) {
421 			free(extent_pages);
422 			free(clusters);
423 			return -ENOMEM;
424 		}
425 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
426 	}
427 
428 	free(blob->clean.extent_pages);
429 	free(blob->clean.clusters);
430 	free(blob->clean.pages);
431 
432 	blob->clean.num_extent_pages = blob->active.num_extent_pages;
433 	blob->clean.extent_pages = blob->active.extent_pages;
434 	blob->clean.num_clusters = blob->active.num_clusters;
435 	blob->clean.clusters = blob->active.clusters;
436 	blob->clean.num_pages = blob->active.num_pages;
437 	blob->clean.pages = blob->active.pages;
438 
439 	blob->active.extent_pages = extent_pages;
440 	blob->active.clusters = clusters;
441 	blob->active.pages = pages;
442 
443 	/* If the metadata was dirtied again while the metadata was being written to disk,
444 	 *  we do not want to revert the DIRTY state back to CLEAN here.
445 	 */
446 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
447 		blob->state = SPDK_BLOB_STATE_CLEAN;
448 	}
449 
450 	return 0;
451 }
452 
453 static int
454 _spdk_blob_deserialize_xattr(struct spdk_blob *blob,
455 			     struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
456 {
457 	struct spdk_xattr                       *xattr;
458 
459 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
460 	    sizeof(desc_xattr->value_length) +
461 	    desc_xattr->name_length + desc_xattr->value_length) {
462 		return -EINVAL;
463 	}
464 
465 	xattr = calloc(1, sizeof(*xattr));
466 	if (xattr == NULL) {
467 		return -ENOMEM;
468 	}
469 
470 	xattr->name = malloc(desc_xattr->name_length + 1);
471 	if (xattr->name == NULL) {
472 		free(xattr);
473 		return -ENOMEM;
474 	}
475 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
476 	xattr->name[desc_xattr->name_length] = '\0';
477 
478 	xattr->value = malloc(desc_xattr->value_length);
479 	if (xattr->value == NULL) {
480 		free(xattr->name);
481 		free(xattr);
482 		return -ENOMEM;
483 	}
484 	xattr->value_len = desc_xattr->value_length;
485 	memcpy(xattr->value,
486 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
487 	       desc_xattr->value_length);
488 
489 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
490 
491 	return 0;
492 }
493 
494 
495 static int
496 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
497 {
498 	struct spdk_blob_md_descriptor *desc;
499 	size_t	cur_desc = 0;
500 	void *tmp;
501 
502 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
503 	while (cur_desc < sizeof(page->descriptors)) {
504 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
505 			if (desc->length == 0) {
506 				/* If padding and length are 0, this terminates the page */
507 				break;
508 			}
509 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
510 			struct spdk_blob_md_descriptor_flags	*desc_flags;
511 
512 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
513 
514 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
515 				return -EINVAL;
516 			}
517 
518 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
519 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
520 				return -EINVAL;
521 			}
522 
523 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
524 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
525 				blob->data_ro = true;
526 				blob->md_ro = true;
527 			}
528 
529 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
530 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
531 				blob->md_ro = true;
532 			}
533 
534 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
535 				blob->data_ro = true;
536 				blob->md_ro = true;
537 			}
538 
539 			blob->invalid_flags = desc_flags->invalid_flags;
540 			blob->data_ro_flags = desc_flags->data_ro_flags;
541 			blob->md_ro_flags = desc_flags->md_ro_flags;
542 
543 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
544 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
545 			unsigned int				i, j;
546 			unsigned int				cluster_count = blob->active.num_clusters;
547 
548 			if (blob->extent_table_found) {
549 				/* Extent Table already present in the md,
550 				 * both descriptors should never be at the same time. */
551 				return -EINVAL;
552 			}
553 			blob->extent_rle_found = true;
554 
555 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
556 
557 			if (desc_extent_rle->length == 0 ||
558 			    (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) {
559 				return -EINVAL;
560 			}
561 
562 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
563 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
564 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
565 						if (!spdk_bit_array_get(blob->bs->used_clusters,
566 									desc_extent_rle->extents[i].cluster_idx + j)) {
567 							return -EINVAL;
568 						}
569 					}
570 					cluster_count++;
571 				}
572 			}
573 
574 			if (cluster_count == 0) {
575 				return -EINVAL;
576 			}
577 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
578 			if (tmp == NULL) {
579 				return -ENOMEM;
580 			}
581 			blob->active.clusters = tmp;
582 			blob->active.cluster_array_size = cluster_count;
583 
584 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
585 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
586 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
587 						blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
588 								desc_extent_rle->extents[i].cluster_idx + j);
589 					} else if (spdk_blob_is_thin_provisioned(blob)) {
590 						blob->active.clusters[blob->active.num_clusters++] = 0;
591 					} else {
592 						return -EINVAL;
593 					}
594 				}
595 			}
596 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
597 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
598 			uint32_t num_extent_pages = blob->active.num_extent_pages;
599 			uint32_t i, j;
600 			size_t extent_pages_length;
601 
602 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
603 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
604 
605 			if (blob->extent_rle_found) {
606 				/* This means that Extent RLE is present in MD,
607 				 * both should never be at the same time. */
608 				return -EINVAL;
609 			} else if (blob->extent_table_found &&
610 				   desc_extent_table->num_clusters != blob->remaining_clusters_in_et) {
611 				/* Number of clusters in this ET does not match number
612 				 * from previously read EXTENT_TABLE. */
613 				return -EINVAL;
614 			}
615 
616 			blob->extent_table_found = true;
617 
618 			if (desc_extent_table->length == 0 ||
619 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
620 				return -EINVAL;
621 			}
622 
623 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
624 				num_extent_pages += desc_extent_table->extent_page[i].num_pages;
625 			}
626 
627 			tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t));
628 			if (tmp == NULL) {
629 				return -ENOMEM;
630 			}
631 			blob->active.extent_pages = tmp;
632 			blob->active.extent_pages_array_size = num_extent_pages;
633 
634 			blob->remaining_clusters_in_et = desc_extent_table->num_clusters;
635 
636 			/* Extent table entries contain md page numbers for extent pages.
637 			 * Zeroes represent unallocated extent pages, those are run-length-encoded.
638 			 */
639 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
640 				if (desc_extent_table->extent_page[i].page_idx != 0) {
641 					assert(desc_extent_table->extent_page[i].num_pages == 1);
642 					blob->active.extent_pages[blob->active.num_extent_pages++] =
643 						desc_extent_table->extent_page[i].page_idx;
644 				} else if (spdk_blob_is_thin_provisioned(blob)) {
645 					for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) {
646 						blob->active.extent_pages[blob->active.num_extent_pages++] = 0;
647 					}
648 				} else {
649 					return -EINVAL;
650 				}
651 			}
652 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
653 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
654 			unsigned int					i;
655 			unsigned int					cluster_count = blob->active.num_clusters;
656 			size_t						cluster_idx_length;
657 
658 			if (blob->extent_rle_found) {
659 				/* This means that Extent RLE is present in MD,
660 				 * both should never be at the same time. */
661 				return -EINVAL;
662 			}
663 
664 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
665 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
666 
667 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
668 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
669 				return -EINVAL;
670 			}
671 
672 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
673 				if (desc_extent->cluster_idx[i] != 0) {
674 					if (!spdk_bit_array_get(blob->bs->used_clusters, desc_extent->cluster_idx[i])) {
675 						return -EINVAL;
676 					}
677 				}
678 				cluster_count++;
679 			}
680 
681 			if (cluster_count == 0) {
682 				return -EINVAL;
683 			}
684 
685 			/* When reading extent pages sequentially starting cluster idx should match
686 			 * current size of a blob.
687 			 * If changed to batch reading, this check shall be removed. */
688 			if (desc_extent->start_cluster_idx != blob->active.num_clusters) {
689 				return -EINVAL;
690 			}
691 
692 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
693 			if (tmp == NULL) {
694 				return -ENOMEM;
695 			}
696 			blob->active.clusters = tmp;
697 			blob->active.cluster_array_size = cluster_count;
698 
699 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
700 				if (desc_extent->cluster_idx[i] != 0) {
701 					blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
702 							desc_extent->cluster_idx[i]);
703 				} else if (spdk_blob_is_thin_provisioned(blob)) {
704 					blob->active.clusters[blob->active.num_clusters++] = 0;
705 				} else {
706 					return -EINVAL;
707 				}
708 			}
709 			assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters);
710 			assert(blob->remaining_clusters_in_et >= cluster_count);
711 			blob->remaining_clusters_in_et -= cluster_count;
712 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
713 			int rc;
714 
715 			rc = _spdk_blob_deserialize_xattr(blob,
716 							  (struct spdk_blob_md_descriptor_xattr *) desc, false);
717 			if (rc != 0) {
718 				return rc;
719 			}
720 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
721 			int rc;
722 
723 			rc = _spdk_blob_deserialize_xattr(blob,
724 							  (struct spdk_blob_md_descriptor_xattr *) desc, true);
725 			if (rc != 0) {
726 				return rc;
727 			}
728 		} else {
729 			/* Unrecognized descriptor type.  Do not fail - just continue to the
730 			 *  next descriptor.  If this descriptor is associated with some feature
731 			 *  defined in a newer version of blobstore, that version of blobstore
732 			 *  should create and set an associated feature flag to specify if this
733 			 *  blob can be loaded or not.
734 			 */
735 		}
736 
737 		/* Advance to the next descriptor */
738 		cur_desc += sizeof(*desc) + desc->length;
739 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
740 			break;
741 		}
742 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
743 	}
744 
745 	return 0;
746 }
747 
748 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page);
749 
750 static int
751 _spdk_blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob)
752 {
753 	assert(blob != NULL);
754 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
755 	assert(blob->active.clusters == NULL);
756 
757 	if (_spdk_bs_load_cur_extent_page_valid(extent_page) == false) {
758 		return -ENOENT;
759 	}
760 
761 	return _spdk_blob_parse_page(extent_page, blob);
762 }
763 
764 static int
765 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
766 		 struct spdk_blob *blob)
767 {
768 	const struct spdk_blob_md_page *page;
769 	uint32_t i;
770 	int rc;
771 
772 	assert(page_count > 0);
773 	assert(pages[0].sequence_num == 0);
774 	assert(blob != NULL);
775 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
776 	assert(blob->active.clusters == NULL);
777 
778 	/* The blobid provided doesn't match what's in the MD, this can
779 	 * happen for example if a bogus blobid is passed in through open.
780 	 */
781 	if (blob->id != pages[0].id) {
782 		SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n",
783 			    blob->id, pages[0].id);
784 		return -ENOENT;
785 	}
786 
787 	for (i = 0; i < page_count; i++) {
788 		page = &pages[i];
789 
790 		assert(page->id == blob->id);
791 		assert(page->sequence_num == i);
792 
793 		rc = _spdk_blob_parse_page(page, blob);
794 		if (rc != 0) {
795 			return rc;
796 		}
797 	}
798 
799 	return 0;
800 }
801 
802 static int
803 _spdk_blob_serialize_add_page(const struct spdk_blob *blob,
804 			      struct spdk_blob_md_page **pages,
805 			      uint32_t *page_count,
806 			      struct spdk_blob_md_page **last_page)
807 {
808 	struct spdk_blob_md_page *page;
809 
810 	assert(pages != NULL);
811 	assert(page_count != NULL);
812 
813 	if (*page_count == 0) {
814 		assert(*pages == NULL);
815 		*page_count = 1;
816 		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
817 				     NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
818 	} else {
819 		assert(*pages != NULL);
820 		(*page_count)++;
821 		*pages = spdk_realloc(*pages,
822 				      SPDK_BS_PAGE_SIZE * (*page_count),
823 				      SPDK_BS_PAGE_SIZE);
824 	}
825 
826 	if (*pages == NULL) {
827 		*page_count = 0;
828 		*last_page = NULL;
829 		return -ENOMEM;
830 	}
831 
832 	page = &(*pages)[*page_count - 1];
833 	memset(page, 0, sizeof(*page));
834 	page->id = blob->id;
835 	page->sequence_num = *page_count - 1;
836 	page->next = SPDK_INVALID_MD_PAGE;
837 	*last_page = page;
838 
839 	return 0;
840 }
841 
842 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
843  * Update required_sz on both success and failure.
844  *
845  */
846 static int
847 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr,
848 			   uint8_t *buf, size_t buf_sz,
849 			   size_t *required_sz, bool internal)
850 {
851 	struct spdk_blob_md_descriptor_xattr	*desc;
852 
853 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
854 		       strlen(xattr->name) +
855 		       xattr->value_len;
856 
857 	if (buf_sz < *required_sz) {
858 		return -1;
859 	}
860 
861 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
862 
863 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
864 	desc->length = sizeof(desc->name_length) +
865 		       sizeof(desc->value_length) +
866 		       strlen(xattr->name) +
867 		       xattr->value_len;
868 	desc->name_length = strlen(xattr->name);
869 	desc->value_length = xattr->value_len;
870 
871 	memcpy(desc->name, xattr->name, desc->name_length);
872 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
873 	       xattr->value,
874 	       desc->value_length);
875 
876 	return 0;
877 }
878 
879 static void
880 _spdk_blob_serialize_extent_table_entry(const struct spdk_blob *blob,
881 					uint64_t start_ep, uint64_t *next_ep,
882 					uint8_t **buf, size_t *remaining_sz)
883 {
884 	struct spdk_blob_md_descriptor_extent_table *desc;
885 	size_t cur_sz;
886 	uint64_t i, et_idx;
887 	uint32_t extent_page, ep_len;
888 
889 	/* The buffer must have room for at least one extent page */
890 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters) + sizeof(
891 			 desc->extent_page[0]);
892 	if (*remaining_sz < cur_sz) {
893 		*next_ep = start_ep;
894 		return;
895 	}
896 
897 	desc = (struct spdk_blob_md_descriptor_extent_table *)*buf;
898 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE;
899 
900 	desc->num_clusters = blob->active.num_clusters;
901 
902 	extent_page = blob->active.extent_pages[start_ep];
903 	ep_len = 1;
904 	et_idx = 0;
905 	for (i = start_ep + 1; i < blob->active.num_extent_pages; i++) {
906 		/* Extent table entries contain md page offsets for extent pages.
907 		 * Zeroes represent unallocated extent pages, which are run-length-encoded.
908 		 */
909 		if (extent_page == 0 && blob->active.extent_pages[i] == 0) {
910 			ep_len++;
911 			continue;
912 		}
913 		desc->extent_page[et_idx].page_idx = extent_page;
914 		desc->extent_page[et_idx].num_pages = ep_len;
915 		et_idx++;
916 
917 		cur_sz += sizeof(desc->extent_page[et_idx]);
918 
919 		if (*remaining_sz < cur_sz) {
920 			/* If we ran out of buffer space, return */
921 			*next_ep = i;
922 			break;
923 		}
924 		extent_page = blob->active.extent_pages[i];
925 		ep_len = 1;
926 	}
927 
928 	if (*remaining_sz >= cur_sz) {
929 		desc->extent_page[et_idx].page_idx = extent_page;
930 		desc->extent_page[et_idx].num_pages = ep_len;
931 		et_idx++;
932 
933 		*next_ep = blob->active.num_extent_pages;
934 	}
935 
936 	desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx;
937 	*remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length;
938 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc->length;
939 }
940 
941 static int
942 _spdk_blob_serialize_extent_table(const struct spdk_blob *blob,
943 				  struct spdk_blob_md_page **pages,
944 				  struct spdk_blob_md_page *cur_page,
945 				  uint32_t *page_count, uint8_t **buf,
946 				  size_t *remaining_sz)
947 {
948 	uint64_t				last_extent_page;
949 	int					rc;
950 
951 	last_extent_page = 0;
952 	while (last_extent_page < blob->active.num_extent_pages) {
953 		_spdk_blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf,
954 							remaining_sz);
955 
956 		if (last_extent_page == blob->active.num_extent_pages) {
957 			break;
958 		}
959 
960 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
961 		if (rc < 0) {
962 			return rc;
963 		}
964 
965 		*buf = (uint8_t *)cur_page->descriptors;
966 		*remaining_sz = sizeof(cur_page->descriptors);
967 	}
968 
969 	return 0;
970 }
971 
972 static void
973 _spdk_blob_serialize_extent_rle(const struct spdk_blob *blob,
974 				uint64_t start_cluster, uint64_t *next_cluster,
975 				uint8_t **buf, size_t *buf_sz)
976 {
977 	struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle;
978 	size_t cur_sz;
979 	uint64_t i, extent_idx;
980 	uint64_t lba, lba_per_cluster, lba_count;
981 
982 	/* The buffer must have room for at least one extent */
983 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]);
984 	if (*buf_sz < cur_sz) {
985 		*next_cluster = start_cluster;
986 		return;
987 	}
988 
989 	desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf;
990 	desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE;
991 
992 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
993 
994 	lba = blob->active.clusters[start_cluster];
995 	lba_count = lba_per_cluster;
996 	extent_idx = 0;
997 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
998 		if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) {
999 			/* Run-length encode sequential non-zero LBA */
1000 			lba_count += lba_per_cluster;
1001 			continue;
1002 		} else if (lba == 0 && blob->active.clusters[i] == 0) {
1003 			/* Run-length encode unallocated clusters */
1004 			lba_count += lba_per_cluster;
1005 			continue;
1006 		}
1007 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1008 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1009 		extent_idx++;
1010 
1011 		cur_sz += sizeof(desc_extent_rle->extents[extent_idx]);
1012 
1013 		if (*buf_sz < cur_sz) {
1014 			/* If we ran out of buffer space, return */
1015 			*next_cluster = i;
1016 			break;
1017 		}
1018 
1019 		lba = blob->active.clusters[i];
1020 		lba_count = lba_per_cluster;
1021 	}
1022 
1023 	if (*buf_sz >= cur_sz) {
1024 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1025 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1026 		extent_idx++;
1027 
1028 		*next_cluster = blob->active.num_clusters;
1029 	}
1030 
1031 	desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx;
1032 	*buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1033 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1034 }
1035 
1036 static int
1037 _spdk_blob_serialize_extents_rle(const struct spdk_blob *blob,
1038 				 struct spdk_blob_md_page **pages,
1039 				 struct spdk_blob_md_page *cur_page,
1040 				 uint32_t *page_count, uint8_t **buf,
1041 				 size_t *remaining_sz)
1042 {
1043 	uint64_t				last_cluster;
1044 	int					rc;
1045 
1046 	last_cluster = 0;
1047 	while (last_cluster < blob->active.num_clusters) {
1048 		_spdk_blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz);
1049 
1050 		if (last_cluster == blob->active.num_clusters) {
1051 			break;
1052 		}
1053 
1054 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
1055 		if (rc < 0) {
1056 			return rc;
1057 		}
1058 
1059 		*buf = (uint8_t *)cur_page->descriptors;
1060 		*remaining_sz = sizeof(cur_page->descriptors);
1061 	}
1062 
1063 	return 0;
1064 }
1065 
1066 static void
1067 _spdk_blob_serialize_extent_page(const struct spdk_blob *blob,
1068 				 uint64_t cluster, struct spdk_blob_md_page *page)
1069 {
1070 	struct spdk_blob_md_descriptor_extent_page *desc_extent;
1071 	uint64_t i, extent_idx;
1072 	uint64_t lba, lba_per_cluster;
1073 	uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
1074 
1075 	desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors;
1076 	desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE;
1077 
1078 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
1079 
1080 	desc_extent->start_cluster_idx = start_cluster_idx;
1081 	extent_idx = 0;
1082 	for (i = start_cluster_idx; i < blob->active.num_clusters; i++) {
1083 		lba = blob->active.clusters[i];
1084 		desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster;
1085 		if (extent_idx >= SPDK_EXTENTS_PER_EP) {
1086 			break;
1087 		}
1088 	}
1089 	desc_extent->length = sizeof(desc_extent->start_cluster_idx) +
1090 			      sizeof(desc_extent->cluster_idx[0]) * extent_idx;
1091 }
1092 
1093 static void
1094 _spdk_blob_serialize_flags(const struct spdk_blob *blob,
1095 			   uint8_t *buf, size_t *buf_sz)
1096 {
1097 	struct spdk_blob_md_descriptor_flags *desc;
1098 
1099 	/*
1100 	 * Flags get serialized first, so we should always have room for the flags
1101 	 *  descriptor.
1102 	 */
1103 	assert(*buf_sz >= sizeof(*desc));
1104 
1105 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
1106 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
1107 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
1108 	desc->invalid_flags = blob->invalid_flags;
1109 	desc->data_ro_flags = blob->data_ro_flags;
1110 	desc->md_ro_flags = blob->md_ro_flags;
1111 
1112 	*buf_sz -= sizeof(*desc);
1113 }
1114 
1115 static int
1116 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
1117 			    const struct spdk_xattr_tailq *xattrs, bool internal,
1118 			    struct spdk_blob_md_page **pages,
1119 			    struct spdk_blob_md_page *cur_page,
1120 			    uint32_t *page_count, uint8_t **buf,
1121 			    size_t *remaining_sz)
1122 {
1123 	const struct spdk_xattr	*xattr;
1124 	int	rc;
1125 
1126 	TAILQ_FOREACH(xattr, xattrs, link) {
1127 		size_t required_sz = 0;
1128 
1129 		rc = _spdk_blob_serialize_xattr(xattr,
1130 						*buf, *remaining_sz,
1131 						&required_sz, internal);
1132 		if (rc < 0) {
1133 			/* Need to add a new page to the chain */
1134 			rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
1135 							   &cur_page);
1136 			if (rc < 0) {
1137 				spdk_free(*pages);
1138 				*pages = NULL;
1139 				*page_count = 0;
1140 				return rc;
1141 			}
1142 
1143 			*buf = (uint8_t *)cur_page->descriptors;
1144 			*remaining_sz = sizeof(cur_page->descriptors);
1145 
1146 			/* Try again */
1147 			required_sz = 0;
1148 			rc = _spdk_blob_serialize_xattr(xattr,
1149 							*buf, *remaining_sz,
1150 							&required_sz, internal);
1151 
1152 			if (rc < 0) {
1153 				spdk_free(*pages);
1154 				*pages = NULL;
1155 				*page_count = 0;
1156 				return rc;
1157 			}
1158 		}
1159 
1160 		*remaining_sz -= required_sz;
1161 		*buf += required_sz;
1162 	}
1163 
1164 	return 0;
1165 }
1166 
1167 static int
1168 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
1169 		     uint32_t *page_count)
1170 {
1171 	struct spdk_blob_md_page		*cur_page;
1172 	int					rc;
1173 	uint8_t					*buf;
1174 	size_t					remaining_sz;
1175 
1176 	assert(pages != NULL);
1177 	assert(page_count != NULL);
1178 	assert(blob != NULL);
1179 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
1180 
1181 	*pages = NULL;
1182 	*page_count = 0;
1183 
1184 	/* A blob always has at least 1 page, even if it has no descriptors */
1185 	rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
1186 	if (rc < 0) {
1187 		return rc;
1188 	}
1189 
1190 	buf = (uint8_t *)cur_page->descriptors;
1191 	remaining_sz = sizeof(cur_page->descriptors);
1192 
1193 	/* Serialize flags */
1194 	_spdk_blob_serialize_flags(blob, buf, &remaining_sz);
1195 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
1196 
1197 	/* Serialize xattrs */
1198 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false,
1199 					 pages, cur_page, page_count, &buf, &remaining_sz);
1200 	if (rc < 0) {
1201 		return rc;
1202 	}
1203 
1204 	/* Serialize internal xattrs */
1205 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
1206 					 pages, cur_page, page_count, &buf, &remaining_sz);
1207 	if (rc < 0) {
1208 		return rc;
1209 	}
1210 
1211 	if (blob->use_extent_table) {
1212 		/* Serialize extent table */
1213 		rc = _spdk_blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1214 	} else {
1215 		/* Serialize extents */
1216 		rc = _spdk_blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1217 	}
1218 
1219 	return rc;
1220 }
1221 
1222 struct spdk_blob_load_ctx {
1223 	struct spdk_blob		*blob;
1224 
1225 	struct spdk_blob_md_page	*pages;
1226 	uint32_t			num_pages;
1227 	uint32_t			next_extent_page;
1228 	spdk_bs_sequence_t	        *seq;
1229 
1230 	spdk_bs_sequence_cpl		cb_fn;
1231 	void				*cb_arg;
1232 };
1233 
1234 static uint32_t
1235 _spdk_blob_md_page_calc_crc(void *page)
1236 {
1237 	uint32_t		crc;
1238 
1239 	crc = BLOB_CRC32C_INITIAL;
1240 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
1241 	crc ^= BLOB_CRC32C_INITIAL;
1242 
1243 	return crc;
1244 
1245 }
1246 
1247 static void
1248 _spdk_blob_load_final(void *cb_arg, int bserrno)
1249 {
1250 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1251 	struct spdk_blob		*blob = ctx->blob;
1252 
1253 	if (bserrno == 0) {
1254 		_spdk_blob_mark_clean(blob);
1255 	}
1256 
1257 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
1258 
1259 	/* Free the memory */
1260 	spdk_free(ctx->pages);
1261 	free(ctx);
1262 }
1263 
1264 static void
1265 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
1266 {
1267 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1268 	struct spdk_blob		*blob = ctx->blob;
1269 
1270 	if (bserrno == 0) {
1271 		blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot);
1272 		if (blob->back_bs_dev == NULL) {
1273 			bserrno = -ENOMEM;
1274 		}
1275 	}
1276 	if (bserrno != 0) {
1277 		SPDK_ERRLOG("Snapshot fail\n");
1278 	}
1279 
1280 	_spdk_blob_load_final(ctx, bserrno);
1281 }
1282 
1283 static void _spdk_blob_update_clear_method(struct spdk_blob *blob);
1284 
1285 static void
1286 _spdk_blob_load_backing_dev(void *cb_arg)
1287 {
1288 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1289 	struct spdk_blob		*blob = ctx->blob;
1290 	const void			*value;
1291 	size_t				len;
1292 	int				rc;
1293 
1294 	if (spdk_blob_is_thin_provisioned(blob)) {
1295 		rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
1296 		if (rc == 0) {
1297 			if (len != sizeof(spdk_blob_id)) {
1298 				_spdk_blob_load_final(ctx, -EINVAL);
1299 				return;
1300 			}
1301 			/* open snapshot blob and continue in the callback function */
1302 			blob->parent_id = *(spdk_blob_id *)value;
1303 			spdk_bs_open_blob(blob->bs, blob->parent_id,
1304 					  _spdk_blob_load_snapshot_cpl, ctx);
1305 			return;
1306 		} else {
1307 			/* add zeroes_dev for thin provisioned blob */
1308 			blob->back_bs_dev = spdk_bs_create_zeroes_dev();
1309 		}
1310 	} else {
1311 		/* standard blob */
1312 		blob->back_bs_dev = NULL;
1313 	}
1314 	_spdk_blob_load_final(ctx, 0);
1315 }
1316 
1317 static void
1318 _spdk_blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1319 {
1320 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1321 	struct spdk_blob		*blob = ctx->blob;
1322 	struct spdk_blob_md_page	*page;
1323 	uint64_t			i;
1324 	uint32_t			crc;
1325 	uint64_t			lba;
1326 	void				*tmp;
1327 	uint64_t			sz;
1328 
1329 	if (bserrno) {
1330 		SPDK_ERRLOG("Extent page read failed: %d\n", bserrno);
1331 		_spdk_blob_load_final(ctx, bserrno);
1332 		return;
1333 	}
1334 
1335 	if (ctx->pages == NULL) {
1336 		/* First iteration of this function, allocate buffer for single EXTENT_PAGE */
1337 		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, NULL, SPDK_ENV_SOCKET_ID_ANY,
1338 					  SPDK_MALLOC_DMA);
1339 		if (!ctx->pages) {
1340 			_spdk_blob_load_final(ctx, -ENOMEM);
1341 			return;
1342 		}
1343 		ctx->num_pages = 1;
1344 		ctx->next_extent_page = 0;
1345 	} else {
1346 		page = &ctx->pages[0];
1347 		crc = _spdk_blob_md_page_calc_crc(page);
1348 		if (crc != page->crc) {
1349 			_spdk_blob_load_final(ctx, -EINVAL);
1350 			return;
1351 		}
1352 
1353 		if (page->next != SPDK_INVALID_MD_PAGE) {
1354 			_spdk_blob_load_final(ctx, -EINVAL);
1355 			return;
1356 		}
1357 
1358 		bserrno = _spdk_blob_parse_extent_page(page, blob);
1359 		if (bserrno) {
1360 			_spdk_blob_load_final(ctx, bserrno);
1361 			return;
1362 		}
1363 	}
1364 
1365 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
1366 		if (blob->active.extent_pages[i] != 0) {
1367 			/* Extent page was allocated, read and parse it. */
1368 			lba = _spdk_bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]);
1369 			ctx->next_extent_page = i + 1;
1370 
1371 			spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1372 						  _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1373 						  _spdk_blob_load_cpl_extents_cpl, ctx);
1374 			return;
1375 		} else {
1376 			/* Thin provisioned blobs can point to unallocated extent pages.
1377 			 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */
1378 
1379 			sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP);
1380 			blob->active.num_clusters += sz;
1381 			blob->remaining_clusters_in_et -= sz;
1382 
1383 			assert(spdk_blob_is_thin_provisioned(blob));
1384 			assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0);
1385 
1386 			tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
1387 			if (tmp == NULL) {
1388 				_spdk_blob_load_final(ctx, -ENOMEM);
1389 				return;
1390 			}
1391 			memset(tmp + blob->active.cluster_array_size, 0,
1392 			       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
1393 			blob->active.clusters = tmp;
1394 			blob->active.cluster_array_size = blob->active.num_clusters;
1395 		}
1396 	}
1397 
1398 	_spdk_blob_load_backing_dev(ctx);
1399 }
1400 
1401 static void
1402 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1403 {
1404 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1405 	struct spdk_blob		*blob = ctx->blob;
1406 	struct spdk_blob_md_page	*page;
1407 	int				rc;
1408 	uint32_t			crc;
1409 
1410 	if (bserrno) {
1411 		SPDK_ERRLOG("Metadata page read failed: %d\n", bserrno);
1412 		_spdk_blob_load_final(ctx, bserrno);
1413 		return;
1414 	}
1415 
1416 	page = &ctx->pages[ctx->num_pages - 1];
1417 	crc = _spdk_blob_md_page_calc_crc(page);
1418 	if (crc != page->crc) {
1419 		SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages);
1420 		_spdk_blob_load_final(ctx, -EINVAL);
1421 		return;
1422 	}
1423 
1424 	if (page->next != SPDK_INVALID_MD_PAGE) {
1425 		uint32_t next_page = page->next;
1426 		uint64_t next_lba = _spdk_bs_md_page_to_lba(blob->bs, next_page);
1427 
1428 		/* Read the next page */
1429 		ctx->num_pages++;
1430 		ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
1431 					  sizeof(*page));
1432 		if (ctx->pages == NULL) {
1433 			_spdk_blob_load_final(ctx, -ENOMEM);
1434 			return;
1435 		}
1436 
1437 		spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
1438 					  next_lba,
1439 					  _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)),
1440 					  _spdk_blob_load_cpl, ctx);
1441 		return;
1442 	}
1443 
1444 	/* Parse the pages */
1445 	rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob);
1446 	if (rc) {
1447 		_spdk_blob_load_final(ctx, rc);
1448 		return;
1449 	}
1450 
1451 	if (blob->extent_table_found == true) {
1452 		/* If EXTENT_TABLE was found, that means support for it should be enabled. */
1453 		assert(blob->extent_rle_found == false);
1454 		blob->use_extent_table = true;
1455 	} else {
1456 		/* If EXTENT_RLE or no extent_* descriptor was found disable support
1457 		 * for extent table. No extent_* descriptors means that blob has length of 0
1458 		 * and no extent_rle descriptors were persisted for it.
1459 		 * EXTENT_TABLE if used, is always present in metadata regardless of length. */
1460 		blob->use_extent_table = false;
1461 	}
1462 
1463 	/* Check the clear_method stored in metadata vs what may have been passed
1464 	 * via spdk_bs_open_blob_ext() and update accordingly.
1465 	 */
1466 	_spdk_blob_update_clear_method(blob);
1467 
1468 	spdk_free(ctx->pages);
1469 	ctx->pages = NULL;
1470 
1471 	if (blob->extent_table_found) {
1472 		_spdk_blob_load_cpl_extents_cpl(seq, ctx, 0);
1473 	} else {
1474 		_spdk_blob_load_backing_dev(ctx);
1475 	}
1476 }
1477 
1478 /* Load a blob from disk given a blobid */
1479 static void
1480 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1481 		spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1482 {
1483 	struct spdk_blob_load_ctx *ctx;
1484 	struct spdk_blob_store *bs;
1485 	uint32_t page_num;
1486 	uint64_t lba;
1487 
1488 	_spdk_blob_verify_md_op(blob);
1489 
1490 	bs = blob->bs;
1491 
1492 	ctx = calloc(1, sizeof(*ctx));
1493 	if (!ctx) {
1494 		cb_fn(seq, cb_arg, -ENOMEM);
1495 		return;
1496 	}
1497 
1498 	ctx->blob = blob;
1499 	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE);
1500 	if (!ctx->pages) {
1501 		free(ctx);
1502 		cb_fn(seq, cb_arg, -ENOMEM);
1503 		return;
1504 	}
1505 	ctx->num_pages = 1;
1506 	ctx->cb_fn = cb_fn;
1507 	ctx->cb_arg = cb_arg;
1508 	ctx->seq = seq;
1509 
1510 	page_num = _spdk_bs_blobid_to_page(blob->id);
1511 	lba = _spdk_bs_md_page_to_lba(blob->bs, page_num);
1512 
1513 	blob->state = SPDK_BLOB_STATE_LOADING;
1514 
1515 	spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1516 				  _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
1517 				  _spdk_blob_load_cpl, ctx);
1518 }
1519 
1520 struct spdk_blob_persist_ctx {
1521 	struct spdk_blob		*blob;
1522 
1523 	struct spdk_bs_super_block	*super;
1524 
1525 	struct spdk_blob_md_page	*pages;
1526 	uint32_t			next_extent_page;
1527 	struct spdk_blob_md_page	*extent_page;
1528 
1529 	spdk_bs_sequence_t		*seq;
1530 	spdk_bs_sequence_cpl		cb_fn;
1531 	void				*cb_arg;
1532 };
1533 
1534 static void
1535 spdk_bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba,
1536 			uint32_t lba_count)
1537 {
1538 	switch (ctx->blob->clear_method) {
1539 	case BLOB_CLEAR_WITH_DEFAULT:
1540 	case BLOB_CLEAR_WITH_UNMAP:
1541 		spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1542 		break;
1543 	case BLOB_CLEAR_WITH_WRITE_ZEROES:
1544 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1545 		break;
1546 	case BLOB_CLEAR_WITH_NONE:
1547 	default:
1548 		break;
1549 	}
1550 }
1551 
1552 static void
1553 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1554 {
1555 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1556 	struct spdk_blob		*blob = ctx->blob;
1557 
1558 	if (bserrno == 0) {
1559 		_spdk_blob_mark_clean(blob);
1560 	}
1561 
1562 	/* Call user callback */
1563 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
1564 
1565 	/* Free the memory */
1566 	spdk_free(ctx->pages);
1567 	free(ctx);
1568 }
1569 
1570 static void
1571 _spdk_blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1572 {
1573 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1574 	struct spdk_blob		*blob = ctx->blob;
1575 	struct spdk_blob_store		*bs = blob->bs;
1576 	size_t				i;
1577 
1578 	/* Release all clusters that were truncated */
1579 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1580 		uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]);
1581 
1582 		/* Nothing to release if it was not allocated */
1583 		if (blob->active.clusters[i] != 0) {
1584 			_spdk_bs_release_cluster(bs, cluster_num);
1585 		}
1586 	}
1587 
1588 	if (blob->active.num_clusters == 0) {
1589 		free(blob->active.clusters);
1590 		blob->active.clusters = NULL;
1591 		blob->active.cluster_array_size = 0;
1592 	} else if (blob->active.num_clusters != blob->active.cluster_array_size) {
1593 #ifndef __clang_analyzer__
1594 		void *tmp;
1595 
1596 		/* scan-build really can't figure reallocs, workaround it */
1597 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters);
1598 		assert(tmp != NULL);
1599 		blob->active.clusters = tmp;
1600 
1601 		tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages);
1602 		assert(tmp != NULL);
1603 		blob->active.extent_pages = tmp;
1604 #endif
1605 		blob->active.extent_pages_array_size = blob->active.num_extent_pages;
1606 		blob->active.cluster_array_size = blob->active.num_clusters;
1607 	}
1608 
1609 	/* TODO: Add path to persist clear extent pages. */
1610 	_spdk_blob_persist_complete(seq, ctx, bserrno);
1611 }
1612 
1613 static void
1614 _spdk_blob_persist_clear_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1615 {
1616 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1617 	struct spdk_blob		*blob = ctx->blob;
1618 	struct spdk_blob_store		*bs = blob->bs;
1619 	spdk_bs_batch_t			*batch;
1620 	size_t				i;
1621 	uint64_t			lba;
1622 	uint32_t			lba_count;
1623 
1624 	/* Clusters don't move around in blobs. The list shrinks or grows
1625 	 * at the end, but no changes ever occur in the middle of the list.
1626 	 */
1627 
1628 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_clear_clusters_cpl, ctx);
1629 
1630 	/* Clear all clusters that were truncated */
1631 	lba = 0;
1632 	lba_count = 0;
1633 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1634 		uint64_t next_lba = blob->active.clusters[i];
1635 		uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1);
1636 
1637 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1638 			/* This cluster is contiguous with the previous one. */
1639 			lba_count += next_lba_count;
1640 			continue;
1641 		}
1642 
1643 		/* This cluster is not contiguous with the previous one. */
1644 
1645 		/* If a run of LBAs previously existing, clear them now */
1646 		if (lba_count > 0) {
1647 			spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
1648 		}
1649 
1650 		/* Start building the next batch */
1651 		lba = next_lba;
1652 		if (next_lba > 0) {
1653 			lba_count = next_lba_count;
1654 		} else {
1655 			lba_count = 0;
1656 		}
1657 	}
1658 
1659 	/* If we ended with a contiguous set of LBAs, clear them now */
1660 	if (lba_count > 0) {
1661 		spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
1662 	}
1663 
1664 	spdk_bs_batch_close(batch);
1665 }
1666 
1667 static void
1668 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1669 {
1670 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1671 	struct spdk_blob		*blob = ctx->blob;
1672 	struct spdk_blob_store		*bs = blob->bs;
1673 	size_t				i;
1674 
1675 	/* This loop starts at 1 because the first page is special and handled
1676 	 * below. The pages (except the first) are never written in place,
1677 	 * so any pages in the clean list must be zeroed.
1678 	 */
1679 	for (i = 1; i < blob->clean.num_pages; i++) {
1680 		_spdk_bs_release_md_page(bs, blob->clean.pages[i]);
1681 	}
1682 
1683 	if (blob->active.num_pages == 0) {
1684 		uint32_t page_num;
1685 
1686 		page_num = _spdk_bs_blobid_to_page(blob->id);
1687 		_spdk_bs_release_md_page(bs, page_num);
1688 	}
1689 
1690 	/* Move on to clearing clusters */
1691 	_spdk_blob_persist_clear_clusters(seq, ctx, 0);
1692 }
1693 
1694 static void
1695 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1696 {
1697 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1698 	struct spdk_blob		*blob = ctx->blob;
1699 	struct spdk_blob_store		*bs = blob->bs;
1700 	uint64_t			lba;
1701 	uint32_t			lba_count;
1702 	spdk_bs_batch_t			*batch;
1703 	size_t				i;
1704 
1705 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx);
1706 
1707 	lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1708 
1709 	/* This loop starts at 1 because the first page is special and handled
1710 	 * below. The pages (except the first) are never written in place,
1711 	 * so any pages in the clean list must be zeroed.
1712 	 */
1713 	for (i = 1; i < blob->clean.num_pages; i++) {
1714 		lba = _spdk_bs_md_page_to_lba(bs, blob->clean.pages[i]);
1715 
1716 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1717 	}
1718 
1719 	/* The first page will only be zeroed if this is a delete. */
1720 	if (blob->active.num_pages == 0) {
1721 		uint32_t page_num;
1722 
1723 		/* The first page in the metadata goes where the blobid indicates */
1724 		page_num = _spdk_bs_blobid_to_page(blob->id);
1725 		lba = _spdk_bs_md_page_to_lba(bs, page_num);
1726 
1727 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1728 	}
1729 
1730 	spdk_bs_batch_close(batch);
1731 }
1732 
1733 static void
1734 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1735 {
1736 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1737 	struct spdk_blob		*blob = ctx->blob;
1738 	struct spdk_blob_store		*bs = blob->bs;
1739 	uint64_t			lba;
1740 	uint32_t			lba_count;
1741 	struct spdk_blob_md_page	*page;
1742 
1743 	if (blob->active.num_pages == 0) {
1744 		/* Move on to the next step */
1745 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1746 		return;
1747 	}
1748 
1749 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1750 
1751 	page = &ctx->pages[0];
1752 	/* The first page in the metadata goes where the blobid indicates */
1753 	lba = _spdk_bs_md_page_to_lba(bs, _spdk_bs_blobid_to_page(blob->id));
1754 
1755 	spdk_bs_sequence_write_dev(seq, page, lba, lba_count,
1756 				   _spdk_blob_persist_zero_pages, ctx);
1757 }
1758 
1759 static void
1760 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1761 {
1762 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1763 	struct spdk_blob		*blob = ctx->blob;
1764 	struct spdk_blob_store		*bs = blob->bs;
1765 	uint64_t			lba;
1766 	uint32_t			lba_count;
1767 	struct spdk_blob_md_page	*page;
1768 	spdk_bs_batch_t			*batch;
1769 	size_t				i;
1770 
1771 	/* Clusters don't move around in blobs. The list shrinks or grows
1772 	 * at the end, but no changes ever occur in the middle of the list.
1773 	 */
1774 
1775 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1776 
1777 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx);
1778 
1779 	/* This starts at 1. The root page is not written until
1780 	 * all of the others are finished
1781 	 */
1782 	for (i = 1; i < blob->active.num_pages; i++) {
1783 		page = &ctx->pages[i];
1784 		assert(page->sequence_num == i);
1785 
1786 		lba = _spdk_bs_md_page_to_lba(bs, blob->active.pages[i]);
1787 
1788 		spdk_bs_batch_write_dev(batch, page, lba, lba_count);
1789 	}
1790 
1791 	spdk_bs_batch_close(batch);
1792 }
1793 
1794 static int
1795 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz)
1796 {
1797 	uint64_t	i;
1798 	uint64_t	*tmp;
1799 	uint64_t	lfc; /* lowest free cluster */
1800 	uint32_t	lfmd; /*  lowest free md page */
1801 	uint64_t	num_clusters;
1802 	uint32_t	*ep_tmp;
1803 	uint64_t	new_num_ep = 0, current_num_ep = 0;
1804 	struct spdk_blob_store *bs;
1805 
1806 	bs = blob->bs;
1807 
1808 	_spdk_blob_verify_md_op(blob);
1809 
1810 	if (blob->active.num_clusters == sz) {
1811 		return 0;
1812 	}
1813 
1814 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
1815 		/* If this blob was resized to be larger, then smaller, then
1816 		 * larger without syncing, then the cluster array already
1817 		 * contains spare assigned clusters we can use.
1818 		 */
1819 		num_clusters = spdk_min(blob->active.cluster_array_size,
1820 					sz);
1821 	} else {
1822 		num_clusters = blob->active.num_clusters;
1823 	}
1824 
1825 	if (blob->use_extent_table) {
1826 		/* Round up since every cluster beyond current Extent Table size,
1827 		 * requires new extent page. */
1828 		new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP);
1829 		current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP);
1830 	}
1831 
1832 	/* Do two passes - one to verify that we can obtain enough clusters
1833 	 * and md pages, another to actually claim them.
1834 	 */
1835 
1836 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1837 		lfc = 0;
1838 		for (i = num_clusters; i < sz; i++) {
1839 			lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
1840 			if (lfc == UINT32_MAX) {
1841 				/* No more free clusters. Cannot satisfy the request */
1842 				return -ENOSPC;
1843 			}
1844 			lfc++;
1845 		}
1846 		lfmd = 0;
1847 		for (i = current_num_ep; i < new_num_ep ; i++) {
1848 			lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd);
1849 			if (lfmd == UINT32_MAX) {
1850 				/* No more free md pages. Cannot satisfy the request */
1851 				return -ENOSPC;
1852 			}
1853 		}
1854 	}
1855 
1856 	if (sz > num_clusters) {
1857 		/* Expand the cluster array if necessary.
1858 		 * We only shrink the array when persisting.
1859 		 */
1860 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz);
1861 		if (sz > 0 && tmp == NULL) {
1862 			return -ENOMEM;
1863 		}
1864 		memset(tmp + blob->active.cluster_array_size, 0,
1865 		       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
1866 		blob->active.clusters = tmp;
1867 		blob->active.cluster_array_size = sz;
1868 
1869 		/* Expand the extents table, only if enough clusters were added */
1870 		if (new_num_ep > current_num_ep && blob->use_extent_table) {
1871 			ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep);
1872 			if (new_num_ep > 0 && ep_tmp == NULL) {
1873 				return -ENOMEM;
1874 			}
1875 			memset(ep_tmp + blob->active.extent_pages_array_size, 0,
1876 			       sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size));
1877 			blob->active.extent_pages = ep_tmp;
1878 			blob->active.extent_pages_array_size = new_num_ep;
1879 		}
1880 	}
1881 
1882 	blob->state = SPDK_BLOB_STATE_DIRTY;
1883 
1884 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1885 		lfc = 0;
1886 		lfmd = 0;
1887 		for (i = num_clusters; i < sz; i++) {
1888 			_spdk_bs_allocate_cluster(blob, i, &lfc, &lfmd, true);
1889 			lfc++;
1890 			lfmd++;
1891 		}
1892 	}
1893 
1894 	blob->active.num_clusters = sz;
1895 	blob->active.num_extent_pages = new_num_ep;
1896 
1897 	return 0;
1898 }
1899 
1900 static void
1901 _spdk_blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx)
1902 {
1903 	spdk_bs_sequence_t *seq = ctx->seq;
1904 	struct spdk_blob *blob = ctx->blob;
1905 	struct spdk_blob_store *bs = blob->bs;
1906 	uint64_t i;
1907 	uint32_t page_num;
1908 	void *tmp;
1909 	int rc;
1910 
1911 	/* Generate the new metadata */
1912 	rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
1913 	if (rc < 0) {
1914 		_spdk_blob_persist_complete(seq, ctx, rc);
1915 		return;
1916 	}
1917 
1918 	assert(blob->active.num_pages >= 1);
1919 
1920 	/* Resize the cache of page indices */
1921 	tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
1922 	if (!tmp) {
1923 		_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1924 		return;
1925 	}
1926 	blob->active.pages = tmp;
1927 
1928 	/* Assign this metadata to pages. This requires two passes -
1929 	 * one to verify that there are enough pages and a second
1930 	 * to actually claim them. */
1931 	page_num = 0;
1932 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
1933 	for (i = 1; i < blob->active.num_pages; i++) {
1934 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1935 		if (page_num == UINT32_MAX) {
1936 			_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1937 			return;
1938 		}
1939 		page_num++;
1940 	}
1941 
1942 	page_num = 0;
1943 	blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id);
1944 	for (i = 1; i < blob->active.num_pages; i++) {
1945 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1946 		ctx->pages[i - 1].next = page_num;
1947 		/* Now that previous metadata page is complete, calculate the crc for it. */
1948 		ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1949 		blob->active.pages[i] = page_num;
1950 		_spdk_bs_claim_md_page(bs, page_num);
1951 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id);
1952 		page_num++;
1953 	}
1954 	ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1955 	/* Start writing the metadata from last page to first */
1956 	blob->state = SPDK_BLOB_STATE_CLEAN;
1957 	_spdk_blob_persist_write_page_chain(seq, ctx, 0);
1958 }
1959 
1960 static void _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg,
1961 		int bserrno);
1962 
1963 static void
1964 _spdk_blob_persist_write_extent_page(uint32_t extent, uint64_t cluster_num,
1965 				     struct spdk_blob_persist_ctx *ctx)
1966 {
1967 	spdk_bs_sequence_t		*seq = ctx->seq;
1968 	uint32_t                        page_count = 0;
1969 	struct spdk_blob		*blob = ctx->blob;
1970 	int				rc;
1971 
1972 	rc = _spdk_blob_serialize_add_page(blob, &ctx->extent_page, &page_count, &ctx->extent_page);
1973 	if (rc < 0) {
1974 		assert(false);
1975 		return;
1976 	}
1977 
1978 	_spdk_blob_serialize_extent_page(blob, cluster_num, ctx->extent_page);
1979 
1980 	ctx->extent_page->crc = _spdk_blob_md_page_calc_crc(ctx->extent_page);
1981 
1982 	spdk_bs_sequence_write_dev(seq, ctx->extent_page, _spdk_bs_md_page_to_lba(blob->bs, extent),
1983 				   _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1984 				   _spdk_blob_persist_write_extent_pages, ctx);
1985 }
1986 
1987 static void
1988 _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1989 {
1990 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1991 	struct spdk_blob		*blob = ctx->blob;
1992 	size_t				i;
1993 	uint32_t			extent_page_id;
1994 
1995 	if (ctx->extent_page != NULL) {
1996 		spdk_free(ctx->extent_page);
1997 		ctx->extent_page = NULL;
1998 	}
1999 
2000 	/* Only write out changed extent pages */
2001 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
2002 		extent_page_id = blob->active.extent_pages[i];
2003 		if (extent_page_id == 0) {
2004 			/* No Extent Page to persist */
2005 			assert(spdk_blob_is_thin_provisioned(blob));
2006 			continue;
2007 		}
2008 		/* Writing out new extent page for the first time. Either active extent pages is larger
2009 		 * than clean extent pages or there was no extent page assigned due to thin provisioning. */
2010 		if (i >= blob->clean.extent_pages_array_size || blob->clean.extent_pages[i] == 0) {
2011 			assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id));
2012 			ctx->next_extent_page = i + 1;
2013 			_spdk_blob_persist_write_extent_page(extent_page_id, i * SPDK_EXTENTS_PER_EP, ctx);
2014 			return;
2015 		}
2016 		assert(blob->clean.extent_pages[i] != 0);
2017 	}
2018 
2019 	_spdk_blob_persist_generate_new_md(ctx);
2020 }
2021 
2022 static void
2023 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx)
2024 {
2025 	spdk_bs_sequence_t *seq = ctx->seq;
2026 	struct spdk_blob *blob = ctx->blob;
2027 
2028 	if (blob->active.num_pages == 0) {
2029 		/* This is the signal that the blob should be deleted.
2030 		 * Immediately jump to the clean up routine. */
2031 		assert(blob->clean.num_pages > 0);
2032 		blob->state = SPDK_BLOB_STATE_CLEAN;
2033 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
2034 		return;
2035 
2036 	}
2037 
2038 	_spdk_blob_persist_write_extent_pages(seq, ctx, 0);
2039 }
2040 
2041 static void
2042 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2043 {
2044 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2045 
2046 	ctx->blob->bs->clean = 0;
2047 
2048 	spdk_free(ctx->super);
2049 
2050 	_spdk_blob_persist_start(ctx);
2051 }
2052 
2053 static void
2054 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2055 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg);
2056 
2057 
2058 static void
2059 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2060 {
2061 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2062 
2063 	ctx->super->clean = 0;
2064 	if (ctx->super->size == 0) {
2065 		ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen;
2066 	}
2067 
2068 	_spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx);
2069 }
2070 
2071 
2072 /* Write a blob to disk */
2073 static void
2074 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
2075 		   spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2076 {
2077 	struct spdk_blob_persist_ctx *ctx;
2078 
2079 	_spdk_blob_verify_md_op(blob);
2080 
2081 	if (blob->state == SPDK_BLOB_STATE_CLEAN) {
2082 		cb_fn(seq, cb_arg, 0);
2083 		return;
2084 	}
2085 
2086 	ctx = calloc(1, sizeof(*ctx));
2087 	if (!ctx) {
2088 		cb_fn(seq, cb_arg, -ENOMEM);
2089 		return;
2090 	}
2091 	ctx->blob = blob;
2092 	ctx->seq = seq;
2093 	ctx->cb_fn = cb_fn;
2094 	ctx->cb_arg = cb_arg;
2095 	ctx->next_extent_page = 0;
2096 
2097 	if (blob->bs->clean) {
2098 		ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
2099 					  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2100 		if (!ctx->super) {
2101 			cb_fn(seq, cb_arg, -ENOMEM);
2102 			free(ctx);
2103 			return;
2104 		}
2105 
2106 		spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(blob->bs, 0),
2107 					  _spdk_bs_byte_to_lba(blob->bs, sizeof(*ctx->super)),
2108 					  _spdk_blob_persist_dirty, ctx);
2109 	} else {
2110 		_spdk_blob_persist_start(ctx);
2111 	}
2112 }
2113 
2114 struct spdk_blob_copy_cluster_ctx {
2115 	struct spdk_blob *blob;
2116 	uint8_t *buf;
2117 	uint64_t page;
2118 	uint64_t new_cluster;
2119 	uint32_t new_extent_page;
2120 	spdk_bs_sequence_t *seq;
2121 };
2122 
2123 static void
2124 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
2125 {
2126 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2127 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
2128 	TAILQ_HEAD(, spdk_bs_request_set) requests;
2129 	spdk_bs_user_op_t *op;
2130 
2131 	TAILQ_INIT(&requests);
2132 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
2133 
2134 	while (!TAILQ_EMPTY(&requests)) {
2135 		op = TAILQ_FIRST(&requests);
2136 		TAILQ_REMOVE(&requests, op, link);
2137 		if (bserrno == 0) {
2138 			spdk_bs_user_op_execute(op);
2139 		} else {
2140 			spdk_bs_user_op_abort(op);
2141 		}
2142 	}
2143 
2144 	spdk_free(ctx->buf);
2145 	free(ctx);
2146 }
2147 
2148 static void
2149 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno)
2150 {
2151 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2152 
2153 	if (bserrno) {
2154 		if (bserrno == -EEXIST) {
2155 			/* The metadata insert failed because another thread
2156 			 * allocated the cluster first. Free our cluster
2157 			 * but continue without error. */
2158 			bserrno = 0;
2159 		}
2160 		_spdk_bs_release_cluster(ctx->blob->bs, ctx->new_cluster);
2161 		if (ctx->new_extent_page != 0) {
2162 			_spdk_bs_release_md_page(ctx->blob->bs, ctx->new_extent_page);
2163 		}
2164 	}
2165 
2166 	spdk_bs_sequence_finish(ctx->seq, bserrno);
2167 }
2168 
2169 static void
2170 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2171 {
2172 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2173 	uint32_t cluster_number;
2174 
2175 	if (bserrno) {
2176 		/* The write failed, so jump to the final completion handler */
2177 		spdk_bs_sequence_finish(seq, bserrno);
2178 		return;
2179 	}
2180 
2181 	cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
2182 
2183 	_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2184 					       ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx);
2185 }
2186 
2187 static void
2188 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2189 {
2190 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2191 
2192 	if (bserrno != 0) {
2193 		/* The read failed, so jump to the final completion handler */
2194 		spdk_bs_sequence_finish(seq, bserrno);
2195 		return;
2196 	}
2197 
2198 	/* Write whole cluster */
2199 	spdk_bs_sequence_write_dev(seq, ctx->buf,
2200 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2201 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, 1),
2202 				   _spdk_blob_write_copy_cpl, ctx);
2203 }
2204 
2205 static void
2206 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
2207 				   struct spdk_io_channel *_ch,
2208 				   uint64_t io_unit, spdk_bs_user_op_t *op)
2209 {
2210 	struct spdk_bs_cpl cpl;
2211 	struct spdk_bs_channel *ch;
2212 	struct spdk_blob_copy_cluster_ctx *ctx;
2213 	uint32_t cluster_start_page;
2214 	uint32_t cluster_number;
2215 	int rc;
2216 
2217 	ch = spdk_io_channel_get_ctx(_ch);
2218 
2219 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
2220 		/* There are already operations pending. Queue this user op
2221 		 * and return because it will be re-executed when the outstanding
2222 		 * cluster allocation completes. */
2223 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2224 		return;
2225 	}
2226 
2227 	/* Round the io_unit offset down to the first page in the cluster */
2228 	cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit);
2229 
2230 	/* Calculate which index in the metadata cluster array the corresponding
2231 	 * cluster is supposed to be at. */
2232 	cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit);
2233 
2234 	ctx = calloc(1, sizeof(*ctx));
2235 	if (!ctx) {
2236 		spdk_bs_user_op_abort(op);
2237 		return;
2238 	}
2239 
2240 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
2241 
2242 	ctx->blob = blob;
2243 	ctx->page = cluster_start_page;
2244 
2245 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
2246 		ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen,
2247 				       NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2248 		if (!ctx->buf) {
2249 			SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
2250 				    blob->bs->cluster_sz);
2251 			free(ctx);
2252 			spdk_bs_user_op_abort(op);
2253 			return;
2254 		}
2255 	}
2256 
2257 	rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page,
2258 				       false);
2259 	if (rc != 0) {
2260 		spdk_free(ctx->buf);
2261 		free(ctx);
2262 		spdk_bs_user_op_abort(op);
2263 		return;
2264 	}
2265 
2266 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2267 	cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl;
2268 	cpl.u.blob_basic.cb_arg = ctx;
2269 
2270 	ctx->seq = spdk_bs_sequence_start(_ch, &cpl);
2271 	if (!ctx->seq) {
2272 		_spdk_bs_release_cluster(blob->bs, ctx->new_cluster);
2273 		spdk_free(ctx->buf);
2274 		free(ctx);
2275 		spdk_bs_user_op_abort(op);
2276 		return;
2277 	}
2278 
2279 	/* Queue the user op to block other incoming operations */
2280 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2281 
2282 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
2283 		/* Read cluster from backing device */
2284 		spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
2285 					     _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2286 					     _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
2287 					     _spdk_blob_write_copy, ctx);
2288 	} else {
2289 		_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2290 						       ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx);
2291 	}
2292 }
2293 
2294 static void
2295 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length,
2296 				       uint64_t *lba,	uint32_t *lba_count)
2297 {
2298 	*lba_count = length;
2299 
2300 	if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) {
2301 		assert(blob->back_bs_dev != NULL);
2302 		*lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit);
2303 		*lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count);
2304 	} else {
2305 		*lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit);
2306 	}
2307 }
2308 
2309 struct op_split_ctx {
2310 	struct spdk_blob *blob;
2311 	struct spdk_io_channel *channel;
2312 	uint64_t io_unit_offset;
2313 	uint64_t io_units_remaining;
2314 	void *curr_payload;
2315 	enum spdk_blob_op_type op_type;
2316 	spdk_bs_sequence_t *seq;
2317 };
2318 
2319 static void
2320 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno)
2321 {
2322 	struct op_split_ctx	*ctx = cb_arg;
2323 	struct spdk_blob	*blob = ctx->blob;
2324 	struct spdk_io_channel	*ch = ctx->channel;
2325 	enum spdk_blob_op_type	op_type = ctx->op_type;
2326 	uint8_t			*buf = ctx->curr_payload;
2327 	uint64_t		offset = ctx->io_unit_offset;
2328 	uint64_t		length = ctx->io_units_remaining;
2329 	uint64_t		op_length;
2330 
2331 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2332 		spdk_bs_sequence_finish(ctx->seq, bserrno);
2333 		free(ctx);
2334 		return;
2335 	}
2336 
2337 	op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob,
2338 			     offset));
2339 
2340 	/* Update length and payload for next operation */
2341 	ctx->io_units_remaining -= op_length;
2342 	ctx->io_unit_offset += op_length;
2343 	if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
2344 		ctx->curr_payload += op_length * blob->bs->io_unit_size;
2345 	}
2346 
2347 	switch (op_type) {
2348 	case SPDK_BLOB_READ:
2349 		spdk_blob_io_read(blob, ch, buf, offset, op_length,
2350 				  _spdk_blob_request_submit_op_split_next, ctx);
2351 		break;
2352 	case SPDK_BLOB_WRITE:
2353 		spdk_blob_io_write(blob, ch, buf, offset, op_length,
2354 				   _spdk_blob_request_submit_op_split_next, ctx);
2355 		break;
2356 	case SPDK_BLOB_UNMAP:
2357 		spdk_blob_io_unmap(blob, ch, offset, op_length,
2358 				   _spdk_blob_request_submit_op_split_next, ctx);
2359 		break;
2360 	case SPDK_BLOB_WRITE_ZEROES:
2361 		spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
2362 					  _spdk_blob_request_submit_op_split_next, ctx);
2363 		break;
2364 	case SPDK_BLOB_READV:
2365 	case SPDK_BLOB_WRITEV:
2366 		SPDK_ERRLOG("readv/write not valid\n");
2367 		spdk_bs_sequence_finish(ctx->seq, -EINVAL);
2368 		free(ctx);
2369 		break;
2370 	}
2371 }
2372 
2373 static void
2374 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
2375 				   void *payload, uint64_t offset, uint64_t length,
2376 				   spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2377 {
2378 	struct op_split_ctx *ctx;
2379 	spdk_bs_sequence_t *seq;
2380 	struct spdk_bs_cpl cpl;
2381 
2382 	assert(blob != NULL);
2383 
2384 	ctx = calloc(1, sizeof(struct op_split_ctx));
2385 	if (ctx == NULL) {
2386 		cb_fn(cb_arg, -ENOMEM);
2387 		return;
2388 	}
2389 
2390 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2391 	cpl.u.blob_basic.cb_fn = cb_fn;
2392 	cpl.u.blob_basic.cb_arg = cb_arg;
2393 
2394 	seq = spdk_bs_sequence_start(ch, &cpl);
2395 	if (!seq) {
2396 		free(ctx);
2397 		cb_fn(cb_arg, -ENOMEM);
2398 		return;
2399 	}
2400 
2401 	ctx->blob = blob;
2402 	ctx->channel = ch;
2403 	ctx->curr_payload = payload;
2404 	ctx->io_unit_offset = offset;
2405 	ctx->io_units_remaining = length;
2406 	ctx->op_type = op_type;
2407 	ctx->seq = seq;
2408 
2409 	_spdk_blob_request_submit_op_split_next(ctx, 0);
2410 }
2411 
2412 static void
2413 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
2414 				    void *payload, uint64_t offset, uint64_t length,
2415 				    spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2416 {
2417 	struct spdk_bs_cpl cpl;
2418 	uint64_t lba;
2419 	uint32_t lba_count;
2420 
2421 	assert(blob != NULL);
2422 
2423 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2424 	cpl.u.blob_basic.cb_fn = cb_fn;
2425 	cpl.u.blob_basic.cb_arg = cb_arg;
2426 
2427 	_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
2428 
2429 	if (blob->frozen_refcnt) {
2430 		/* This blob I/O is frozen */
2431 		spdk_bs_user_op_t *op;
2432 		struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
2433 
2434 		op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
2435 		if (!op) {
2436 			cb_fn(cb_arg, -ENOMEM);
2437 			return;
2438 		}
2439 
2440 		TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
2441 
2442 		return;
2443 	}
2444 
2445 	switch (op_type) {
2446 	case SPDK_BLOB_READ: {
2447 		spdk_bs_batch_t *batch;
2448 
2449 		batch = spdk_bs_batch_open(_ch, &cpl);
2450 		if (!batch) {
2451 			cb_fn(cb_arg, -ENOMEM);
2452 			return;
2453 		}
2454 
2455 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2456 			/* Read from the blob */
2457 			spdk_bs_batch_read_dev(batch, payload, lba, lba_count);
2458 		} else {
2459 			/* Read from the backing block device */
2460 			spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
2461 		}
2462 
2463 		spdk_bs_batch_close(batch);
2464 		break;
2465 	}
2466 	case SPDK_BLOB_WRITE:
2467 	case SPDK_BLOB_WRITE_ZEROES: {
2468 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2469 			/* Write to the blob */
2470 			spdk_bs_batch_t *batch;
2471 
2472 			if (lba_count == 0) {
2473 				cb_fn(cb_arg, 0);
2474 				return;
2475 			}
2476 
2477 			batch = spdk_bs_batch_open(_ch, &cpl);
2478 			if (!batch) {
2479 				cb_fn(cb_arg, -ENOMEM);
2480 				return;
2481 			}
2482 
2483 			if (op_type == SPDK_BLOB_WRITE) {
2484 				spdk_bs_batch_write_dev(batch, payload, lba, lba_count);
2485 			} else {
2486 				spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
2487 			}
2488 
2489 			spdk_bs_batch_close(batch);
2490 		} else {
2491 			/* Queue this operation and allocate the cluster */
2492 			spdk_bs_user_op_t *op;
2493 
2494 			op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
2495 			if (!op) {
2496 				cb_fn(cb_arg, -ENOMEM);
2497 				return;
2498 			}
2499 
2500 			_spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op);
2501 		}
2502 		break;
2503 	}
2504 	case SPDK_BLOB_UNMAP: {
2505 		spdk_bs_batch_t *batch;
2506 
2507 		batch = spdk_bs_batch_open(_ch, &cpl);
2508 		if (!batch) {
2509 			cb_fn(cb_arg, -ENOMEM);
2510 			return;
2511 		}
2512 
2513 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2514 			spdk_bs_batch_unmap_dev(batch, lba, lba_count);
2515 		}
2516 
2517 		spdk_bs_batch_close(batch);
2518 		break;
2519 	}
2520 	case SPDK_BLOB_READV:
2521 	case SPDK_BLOB_WRITEV:
2522 		SPDK_ERRLOG("readv/write not valid\n");
2523 		cb_fn(cb_arg, -EINVAL);
2524 		break;
2525 	}
2526 }
2527 
2528 static void
2529 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
2530 			     void *payload, uint64_t offset, uint64_t length,
2531 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2532 {
2533 	assert(blob != NULL);
2534 
2535 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
2536 		cb_fn(cb_arg, -EPERM);
2537 		return;
2538 	}
2539 
2540 	if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
2541 		cb_fn(cb_arg, -EINVAL);
2542 		return;
2543 	}
2544 	if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) {
2545 		_spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length,
2546 						    cb_fn, cb_arg, op_type);
2547 	} else {
2548 		_spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length,
2549 						   cb_fn, cb_arg, op_type);
2550 	}
2551 }
2552 
2553 struct rw_iov_ctx {
2554 	struct spdk_blob *blob;
2555 	struct spdk_io_channel *channel;
2556 	spdk_blob_op_complete cb_fn;
2557 	void *cb_arg;
2558 	bool read;
2559 	int iovcnt;
2560 	struct iovec *orig_iov;
2561 	uint64_t io_unit_offset;
2562 	uint64_t io_units_remaining;
2563 	uint64_t io_units_done;
2564 	struct iovec iov[0];
2565 };
2566 
2567 static void
2568 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2569 {
2570 	assert(cb_arg == NULL);
2571 	spdk_bs_sequence_finish(seq, bserrno);
2572 }
2573 
2574 static void
2575 _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
2576 {
2577 	struct rw_iov_ctx *ctx = cb_arg;
2578 	struct spdk_blob *blob = ctx->blob;
2579 	struct iovec *iov, *orig_iov;
2580 	int iovcnt;
2581 	size_t orig_iovoff;
2582 	uint64_t io_units_count, io_units_to_boundary, io_unit_offset;
2583 	uint64_t byte_count;
2584 
2585 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2586 		ctx->cb_fn(ctx->cb_arg, bserrno);
2587 		free(ctx);
2588 		return;
2589 	}
2590 
2591 	io_unit_offset = ctx->io_unit_offset;
2592 	io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset);
2593 	io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary);
2594 	/*
2595 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
2596 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
2597 	 *  point to the current position in the I/O sequence.
2598 	 */
2599 	byte_count = ctx->io_units_done * blob->bs->io_unit_size;
2600 	orig_iov = &ctx->orig_iov[0];
2601 	orig_iovoff = 0;
2602 	while (byte_count > 0) {
2603 		if (byte_count >= orig_iov->iov_len) {
2604 			byte_count -= orig_iov->iov_len;
2605 			orig_iov++;
2606 		} else {
2607 			orig_iovoff = byte_count;
2608 			byte_count = 0;
2609 		}
2610 	}
2611 
2612 	/*
2613 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
2614 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
2615 	 */
2616 	byte_count = io_units_count * blob->bs->io_unit_size;
2617 	iov = &ctx->iov[0];
2618 	iovcnt = 0;
2619 	while (byte_count > 0) {
2620 		assert(iovcnt < ctx->iovcnt);
2621 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
2622 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
2623 		byte_count -= iov->iov_len;
2624 		orig_iovoff = 0;
2625 		orig_iov++;
2626 		iov++;
2627 		iovcnt++;
2628 	}
2629 
2630 	ctx->io_unit_offset += io_units_count;
2631 	ctx->io_units_remaining -= io_units_count;
2632 	ctx->io_units_done += io_units_count;
2633 	iov = &ctx->iov[0];
2634 
2635 	if (ctx->read) {
2636 		spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
2637 				   io_units_count, _spdk_rw_iov_split_next, ctx);
2638 	} else {
2639 		spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
2640 				    io_units_count, _spdk_rw_iov_split_next, ctx);
2641 	}
2642 }
2643 
2644 static void
2645 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
2646 				 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
2647 				 spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
2648 {
2649 	struct spdk_bs_cpl	cpl;
2650 
2651 	assert(blob != NULL);
2652 
2653 	if (!read && blob->data_ro) {
2654 		cb_fn(cb_arg, -EPERM);
2655 		return;
2656 	}
2657 
2658 	if (length == 0) {
2659 		cb_fn(cb_arg, 0);
2660 		return;
2661 	}
2662 
2663 	if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
2664 		cb_fn(cb_arg, -EINVAL);
2665 		return;
2666 	}
2667 
2668 	/*
2669 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
2670 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
2671 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
2672 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
2673 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
2674 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
2675 	 *  but since this case happens very infrequently, any performance impact will be negligible.
2676 	 *
2677 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
2678 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
2679 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
2680 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
2681 	 */
2682 	if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) {
2683 		uint32_t lba_count;
2684 		uint64_t lba;
2685 
2686 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2687 		cpl.u.blob_basic.cb_fn = cb_fn;
2688 		cpl.u.blob_basic.cb_arg = cb_arg;
2689 
2690 		if (blob->frozen_refcnt) {
2691 			/* This blob I/O is frozen */
2692 			enum spdk_blob_op_type op_type;
2693 			spdk_bs_user_op_t *op;
2694 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel);
2695 
2696 			op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV;
2697 			op = spdk_bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length);
2698 			if (!op) {
2699 				cb_fn(cb_arg, -ENOMEM);
2700 				return;
2701 			}
2702 
2703 			TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
2704 
2705 			return;
2706 		}
2707 
2708 		_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
2709 
2710 		if (read) {
2711 			spdk_bs_sequence_t *seq;
2712 
2713 			seq = spdk_bs_sequence_start(_channel, &cpl);
2714 			if (!seq) {
2715 				cb_fn(cb_arg, -ENOMEM);
2716 				return;
2717 			}
2718 
2719 			if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2720 				spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2721 			} else {
2722 				spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
2723 							      _spdk_rw_iov_done, NULL);
2724 			}
2725 		} else {
2726 			if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2727 				spdk_bs_sequence_t *seq;
2728 
2729 				seq = spdk_bs_sequence_start(_channel, &cpl);
2730 				if (!seq) {
2731 					cb_fn(cb_arg, -ENOMEM);
2732 					return;
2733 				}
2734 
2735 				spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2736 			} else {
2737 				/* Queue this operation and allocate the cluster */
2738 				spdk_bs_user_op_t *op;
2739 
2740 				op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset,
2741 							   length);
2742 				if (!op) {
2743 					cb_fn(cb_arg, -ENOMEM);
2744 					return;
2745 				}
2746 
2747 				_spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op);
2748 			}
2749 		}
2750 	} else {
2751 		struct rw_iov_ctx *ctx;
2752 
2753 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
2754 		if (ctx == NULL) {
2755 			cb_fn(cb_arg, -ENOMEM);
2756 			return;
2757 		}
2758 
2759 		ctx->blob = blob;
2760 		ctx->channel = _channel;
2761 		ctx->cb_fn = cb_fn;
2762 		ctx->cb_arg = cb_arg;
2763 		ctx->read = read;
2764 		ctx->orig_iov = iov;
2765 		ctx->iovcnt = iovcnt;
2766 		ctx->io_unit_offset = offset;
2767 		ctx->io_units_remaining = length;
2768 		ctx->io_units_done = 0;
2769 
2770 		_spdk_rw_iov_split_next(ctx, 0);
2771 	}
2772 }
2773 
2774 static struct spdk_blob *
2775 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
2776 {
2777 	struct spdk_blob *blob;
2778 
2779 	TAILQ_FOREACH(blob, &bs->blobs, link) {
2780 		if (blob->id == blobid) {
2781 			return blob;
2782 		}
2783 	}
2784 
2785 	return NULL;
2786 }
2787 
2788 static void
2789 _spdk_blob_get_snapshot_and_clone_entries(struct spdk_blob *blob,
2790 		struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry)
2791 {
2792 	assert(blob != NULL);
2793 	*snapshot_entry = NULL;
2794 	*clone_entry = NULL;
2795 
2796 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
2797 		return;
2798 	}
2799 
2800 	TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) {
2801 		if ((*snapshot_entry)->id == blob->parent_id) {
2802 			break;
2803 		}
2804 	}
2805 
2806 	if (*snapshot_entry != NULL) {
2807 		TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) {
2808 			if ((*clone_entry)->id == blob->id) {
2809 				break;
2810 			}
2811 		}
2812 
2813 		assert(clone_entry != NULL);
2814 	}
2815 }
2816 
2817 static int
2818 _spdk_bs_channel_create(void *io_device, void *ctx_buf)
2819 {
2820 	struct spdk_blob_store		*bs = io_device;
2821 	struct spdk_bs_channel		*channel = ctx_buf;
2822 	struct spdk_bs_dev		*dev;
2823 	uint32_t			max_ops = bs->max_channel_ops;
2824 	uint32_t			i;
2825 
2826 	dev = bs->dev;
2827 
2828 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
2829 	if (!channel->req_mem) {
2830 		return -1;
2831 	}
2832 
2833 	TAILQ_INIT(&channel->reqs);
2834 
2835 	for (i = 0; i < max_ops; i++) {
2836 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
2837 	}
2838 
2839 	channel->bs = bs;
2840 	channel->dev = dev;
2841 	channel->dev_channel = dev->create_channel(dev);
2842 
2843 	if (!channel->dev_channel) {
2844 		SPDK_ERRLOG("Failed to create device channel.\n");
2845 		free(channel->req_mem);
2846 		return -1;
2847 	}
2848 
2849 	TAILQ_INIT(&channel->need_cluster_alloc);
2850 	TAILQ_INIT(&channel->queued_io);
2851 
2852 	return 0;
2853 }
2854 
2855 static void
2856 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf)
2857 {
2858 	struct spdk_bs_channel *channel = ctx_buf;
2859 	spdk_bs_user_op_t *op;
2860 
2861 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
2862 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
2863 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
2864 		spdk_bs_user_op_abort(op);
2865 	}
2866 
2867 	while (!TAILQ_EMPTY(&channel->queued_io)) {
2868 		op = TAILQ_FIRST(&channel->queued_io);
2869 		TAILQ_REMOVE(&channel->queued_io, op, link);
2870 		spdk_bs_user_op_abort(op);
2871 	}
2872 
2873 	free(channel->req_mem);
2874 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
2875 }
2876 
2877 static void
2878 _spdk_bs_dev_destroy(void *io_device)
2879 {
2880 	struct spdk_blob_store *bs = io_device;
2881 	struct spdk_blob	*blob, *blob_tmp;
2882 
2883 	bs->dev->destroy(bs->dev);
2884 
2885 	TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) {
2886 		TAILQ_REMOVE(&bs->blobs, blob, link);
2887 		_spdk_blob_free(blob);
2888 	}
2889 
2890 	pthread_mutex_destroy(&bs->used_clusters_mutex);
2891 
2892 	spdk_bit_array_free(&bs->used_blobids);
2893 	spdk_bit_array_free(&bs->used_md_pages);
2894 	spdk_bit_array_free(&bs->used_clusters);
2895 	/*
2896 	 * If this function is called for any reason except a successful unload,
2897 	 * the unload_cpl type will be NONE and this will be a nop.
2898 	 */
2899 	spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err);
2900 
2901 	free(bs);
2902 }
2903 
2904 static int
2905 _spdk_bs_blob_list_add(struct spdk_blob *blob)
2906 {
2907 	spdk_blob_id snapshot_id;
2908 	struct spdk_blob_list *snapshot_entry = NULL;
2909 	struct spdk_blob_list *clone_entry = NULL;
2910 
2911 	assert(blob != NULL);
2912 
2913 	snapshot_id = blob->parent_id;
2914 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2915 		return 0;
2916 	}
2917 
2918 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, snapshot_id);
2919 	if (snapshot_entry == NULL) {
2920 		/* Snapshot not found */
2921 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
2922 		if (snapshot_entry == NULL) {
2923 			return -ENOMEM;
2924 		}
2925 		snapshot_entry->id = snapshot_id;
2926 		TAILQ_INIT(&snapshot_entry->clones);
2927 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
2928 	} else {
2929 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2930 			if (clone_entry->id == blob->id) {
2931 				break;
2932 			}
2933 		}
2934 	}
2935 
2936 	if (clone_entry == NULL) {
2937 		/* Clone not found */
2938 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
2939 		if (clone_entry == NULL) {
2940 			return -ENOMEM;
2941 		}
2942 		clone_entry->id = blob->id;
2943 		TAILQ_INIT(&clone_entry->clones);
2944 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
2945 		snapshot_entry->clone_count++;
2946 	}
2947 
2948 	return 0;
2949 }
2950 
2951 static void
2952 _spdk_bs_blob_list_remove(struct spdk_blob *blob)
2953 {
2954 	struct spdk_blob_list *snapshot_entry = NULL;
2955 	struct spdk_blob_list *clone_entry = NULL;
2956 
2957 	_spdk_blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry);
2958 
2959 	if (snapshot_entry == NULL) {
2960 		return;
2961 	}
2962 
2963 	blob->parent_id = SPDK_BLOBID_INVALID;
2964 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2965 	free(clone_entry);
2966 
2967 	snapshot_entry->clone_count--;
2968 }
2969 
2970 static int
2971 _spdk_bs_blob_list_free(struct spdk_blob_store *bs)
2972 {
2973 	struct spdk_blob_list *snapshot_entry;
2974 	struct spdk_blob_list *snapshot_entry_tmp;
2975 	struct spdk_blob_list *clone_entry;
2976 	struct spdk_blob_list *clone_entry_tmp;
2977 
2978 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
2979 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
2980 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2981 			free(clone_entry);
2982 		}
2983 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
2984 		free(snapshot_entry);
2985 	}
2986 
2987 	return 0;
2988 }
2989 
2990 static void
2991 _spdk_bs_free(struct spdk_blob_store *bs)
2992 {
2993 	_spdk_bs_blob_list_free(bs);
2994 
2995 	spdk_bs_unregister_md_thread(bs);
2996 	spdk_io_device_unregister(bs, _spdk_bs_dev_destroy);
2997 }
2998 
2999 void
3000 spdk_bs_opts_init(struct spdk_bs_opts *opts)
3001 {
3002 	opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
3003 	opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
3004 	opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
3005 	opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS;
3006 	opts->clear_method = BS_CLEAR_WITH_UNMAP;
3007 	memset(&opts->bstype, 0, sizeof(opts->bstype));
3008 	opts->iter_cb_fn = NULL;
3009 	opts->iter_cb_arg = NULL;
3010 }
3011 
3012 static int
3013 _spdk_bs_opts_verify(struct spdk_bs_opts *opts)
3014 {
3015 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
3016 	    opts->max_channel_ops == 0) {
3017 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
3018 		return -1;
3019 	}
3020 
3021 	return 0;
3022 }
3023 
3024 static int
3025 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs)
3026 {
3027 	struct spdk_blob_store	*bs;
3028 	uint64_t dev_size;
3029 	int rc;
3030 
3031 	dev_size = dev->blocklen * dev->blockcnt;
3032 	if (dev_size < opts->cluster_sz) {
3033 		/* Device size cannot be smaller than cluster size of blobstore */
3034 		SPDK_INFOLOG(SPDK_LOG_BLOB, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
3035 			     dev_size, opts->cluster_sz);
3036 		return -ENOSPC;
3037 	}
3038 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
3039 		/* Cluster size cannot be smaller than page size */
3040 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
3041 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
3042 		return -EINVAL;
3043 	}
3044 	bs = calloc(1, sizeof(struct spdk_blob_store));
3045 	if (!bs) {
3046 		return -ENOMEM;
3047 	}
3048 
3049 	TAILQ_INIT(&bs->blobs);
3050 	TAILQ_INIT(&bs->snapshots);
3051 	bs->dev = dev;
3052 	bs->md_thread = spdk_get_thread();
3053 	assert(bs->md_thread != NULL);
3054 
3055 	/*
3056 	 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an
3057 	 *  even multiple of the cluster size.
3058 	 */
3059 	bs->cluster_sz = opts->cluster_sz;
3060 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
3061 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
3062 	bs->num_free_clusters = bs->total_clusters;
3063 	bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
3064 	bs->io_unit_size = dev->blocklen;
3065 	if (bs->used_clusters == NULL) {
3066 		free(bs);
3067 		return -ENOMEM;
3068 	}
3069 
3070 	bs->max_channel_ops = opts->max_channel_ops;
3071 	bs->super_blob = SPDK_BLOBID_INVALID;
3072 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
3073 
3074 	/* The metadata is assumed to be at least 1 page */
3075 	bs->used_md_pages = spdk_bit_array_create(1);
3076 	bs->used_blobids = spdk_bit_array_create(0);
3077 
3078 	pthread_mutex_init(&bs->used_clusters_mutex, NULL);
3079 
3080 	spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy,
3081 				sizeof(struct spdk_bs_channel), "blobstore");
3082 	rc = spdk_bs_register_md_thread(bs);
3083 	if (rc == -1) {
3084 		spdk_io_device_unregister(bs, NULL);
3085 		pthread_mutex_destroy(&bs->used_clusters_mutex);
3086 		spdk_bit_array_free(&bs->used_blobids);
3087 		spdk_bit_array_free(&bs->used_md_pages);
3088 		spdk_bit_array_free(&bs->used_clusters);
3089 		free(bs);
3090 		/* FIXME: this is a lie but don't know how to get a proper error code here */
3091 		return -ENOMEM;
3092 	}
3093 
3094 	*_bs = bs;
3095 	return 0;
3096 }
3097 
3098 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */
3099 
3100 struct spdk_bs_load_ctx {
3101 	struct spdk_blob_store		*bs;
3102 	struct spdk_bs_super_block	*super;
3103 
3104 	struct spdk_bs_md_mask		*mask;
3105 	bool				in_page_chain;
3106 	uint32_t			page_index;
3107 	uint32_t			cur_page;
3108 	struct spdk_blob_md_page	*page;
3109 
3110 	uint64_t			num_extent_pages;
3111 	uint32_t			*extent_pages;
3112 
3113 	spdk_bs_sequence_t			*seq;
3114 	spdk_blob_op_with_handle_complete	iter_cb_fn;
3115 	void					*iter_cb_arg;
3116 	struct spdk_blob			*blob;
3117 	spdk_blob_id				blobid;
3118 };
3119 
3120 static void
3121 _spdk_bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno)
3122 {
3123 	assert(bserrno != 0);
3124 
3125 	spdk_free(ctx->super);
3126 	spdk_bs_sequence_finish(ctx->seq, bserrno);
3127 	_spdk_bs_free(ctx->bs);
3128 	free(ctx);
3129 }
3130 
3131 static void
3132 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask)
3133 {
3134 	uint32_t i = 0;
3135 
3136 	while (true) {
3137 		i = spdk_bit_array_find_first_set(array, i);
3138 		if (i >= mask->length) {
3139 			break;
3140 		}
3141 		mask->mask[i / 8] |= 1U << (i % 8);
3142 		i++;
3143 	}
3144 }
3145 
3146 static int
3147 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask)
3148 {
3149 	struct spdk_bit_array *array;
3150 	uint32_t i;
3151 
3152 	if (spdk_bit_array_resize(array_ptr, mask->length) < 0) {
3153 		return -ENOMEM;
3154 	}
3155 
3156 	array = *array_ptr;
3157 	for (i = 0; i < mask->length; i++) {
3158 		if (mask->mask[i / 8] & (1U << (i % 8))) {
3159 			spdk_bit_array_set(array, i);
3160 		}
3161 	}
3162 
3163 	return 0;
3164 }
3165 
3166 static void
3167 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
3168 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
3169 {
3170 	/* Update the values in the super block */
3171 	super->super_blob = bs->super_blob;
3172 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
3173 	super->crc = _spdk_blob_md_page_calc_crc(super);
3174 	spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0),
3175 				   _spdk_bs_byte_to_lba(bs, sizeof(*super)),
3176 				   cb_fn, cb_arg);
3177 }
3178 
3179 static void
3180 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3181 {
3182 	struct spdk_bs_load_ctx	*ctx = arg;
3183 	uint64_t	mask_size, lba, lba_count;
3184 
3185 	/* Write out the used clusters mask */
3186 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3187 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3188 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3189 	if (!ctx->mask) {
3190 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3191 		return;
3192 	}
3193 
3194 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
3195 	ctx->mask->length = ctx->bs->total_clusters;
3196 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters));
3197 
3198 	_spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask);
3199 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3200 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3201 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3202 }
3203 
3204 static void
3205 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3206 {
3207 	struct spdk_bs_load_ctx	*ctx = arg;
3208 	uint64_t	mask_size, lba, lba_count;
3209 
3210 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3211 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3212 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3213 	if (!ctx->mask) {
3214 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3215 		return;
3216 	}
3217 
3218 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
3219 	ctx->mask->length = ctx->super->md_len;
3220 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
3221 
3222 	_spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask);
3223 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3224 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3225 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3226 }
3227 
3228 static void
3229 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3230 {
3231 	struct spdk_bs_load_ctx	*ctx = arg;
3232 	uint64_t	mask_size, lba, lba_count;
3233 
3234 	if (ctx->super->used_blobid_mask_len == 0) {
3235 		/*
3236 		 * This is a pre-v3 on-disk format where the blobid mask does not get
3237 		 *  written to disk.
3238 		 */
3239 		cb_fn(seq, arg, 0);
3240 		return;
3241 	}
3242 
3243 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3244 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3245 				 SPDK_MALLOC_DMA);
3246 	if (!ctx->mask) {
3247 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3248 		return;
3249 	}
3250 
3251 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
3252 	ctx->mask->length = ctx->super->md_len;
3253 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
3254 
3255 	_spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask);
3256 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
3257 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
3258 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3259 }
3260 
3261 static void
3262 _spdk_blob_set_thin_provision(struct spdk_blob *blob)
3263 {
3264 	_spdk_blob_verify_md_op(blob);
3265 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
3266 	blob->state = SPDK_BLOB_STATE_DIRTY;
3267 }
3268 
3269 static void
3270 _spdk_blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method)
3271 {
3272 	_spdk_blob_verify_md_op(blob);
3273 	blob->clear_method = clear_method;
3274 	blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT);
3275 	blob->state = SPDK_BLOB_STATE_DIRTY;
3276 }
3277 
3278 static void _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno);
3279 
3280 static void
3281 _spdk_bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno)
3282 {
3283 	struct spdk_bs_load_ctx *ctx = cb_arg;
3284 	spdk_blob_id id;
3285 	int64_t page_num;
3286 
3287 	/* Iterate to next blob (we can't use spdk_bs_iter_next function as our
3288 	 * last blob has been removed */
3289 	page_num = _spdk_bs_blobid_to_page(ctx->blobid);
3290 	page_num++;
3291 	page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num);
3292 	if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) {
3293 		_spdk_bs_load_iter(ctx, NULL, -ENOENT);
3294 		return;
3295 	}
3296 
3297 	id = _spdk_bs_page_to_blobid(page_num);
3298 
3299 	spdk_bs_open_blob(ctx->bs, id, _spdk_bs_load_iter, ctx);
3300 }
3301 
3302 static void
3303 _spdk_bs_delete_corrupted_close_cb(void *cb_arg, int bserrno)
3304 {
3305 	struct spdk_bs_load_ctx *ctx = cb_arg;
3306 
3307 	if (bserrno != 0) {
3308 		SPDK_ERRLOG("Failed to close corrupted blob\n");
3309 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3310 		return;
3311 	}
3312 
3313 	spdk_bs_delete_blob(ctx->bs, ctx->blobid, _spdk_bs_delete_corrupted_blob_cpl, ctx);
3314 }
3315 
3316 static void
3317 _spdk_bs_delete_corrupted_blob(void *cb_arg, int bserrno)
3318 {
3319 	struct spdk_bs_load_ctx *ctx = cb_arg;
3320 	uint64_t i;
3321 
3322 	if (bserrno != 0) {
3323 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
3324 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3325 		return;
3326 	}
3327 
3328 	/* Snapshot and clone have the same copy of cluster map and extent pages
3329 	 * at this point. Let's clear both for snpashot now,
3330 	 * so that it won't be cleared for clone later when we remove snapshot.
3331 	 * Also set thin provision to pass data corruption check */
3332 	for (i = 0; i < ctx->blob->active.num_clusters; i++) {
3333 		ctx->blob->active.clusters[i] = 0;
3334 	}
3335 	for (i = 0; i < ctx->blob->active.num_extent_pages; i++) {
3336 		ctx->blob->active.extent_pages[i] = 0;
3337 	}
3338 
3339 	ctx->blob->md_ro = false;
3340 
3341 	_spdk_blob_set_thin_provision(ctx->blob);
3342 
3343 	ctx->blobid = ctx->blob->id;
3344 
3345 	spdk_blob_close(ctx->blob, _spdk_bs_delete_corrupted_close_cb, ctx);
3346 }
3347 
3348 static void
3349 _spdk_bs_update_corrupted_blob(void *cb_arg, int bserrno)
3350 {
3351 	struct spdk_bs_load_ctx *ctx = cb_arg;
3352 
3353 	if (bserrno != 0) {
3354 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
3355 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3356 		return;
3357 	}
3358 
3359 	ctx->blob->md_ro = false;
3360 	_spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true);
3361 	_spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true);
3362 	spdk_blob_set_read_only(ctx->blob);
3363 
3364 	if (ctx->iter_cb_fn) {
3365 		ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0);
3366 	}
3367 	_spdk_bs_blob_list_add(ctx->blob);
3368 
3369 	spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3370 }
3371 
3372 static void
3373 _spdk_bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
3374 {
3375 	struct spdk_bs_load_ctx *ctx = cb_arg;
3376 
3377 	if (bserrno != 0) {
3378 		SPDK_ERRLOG("Failed to open clone of a corrupted blob\n");
3379 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3380 		return;
3381 	}
3382 
3383 	if (blob->parent_id == ctx->blob->id) {
3384 		/* Power failure occured before updating clone (snapshot delete case)
3385 		 * or after updating clone (creating snapshot case) - keep snapshot */
3386 		spdk_blob_close(blob, _spdk_bs_update_corrupted_blob, ctx);
3387 	} else {
3388 		/* Power failure occured after updating clone (snapshot delete case)
3389 		 * or before updating clone (creating snapshot case) - remove snapshot */
3390 		spdk_blob_close(blob, _spdk_bs_delete_corrupted_blob, ctx);
3391 	}
3392 }
3393 
3394 static void
3395 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
3396 {
3397 	struct spdk_bs_load_ctx *ctx = arg;
3398 	const void *value;
3399 	size_t len;
3400 	int rc = 0;
3401 
3402 	if (bserrno == 0) {
3403 		/* Examine blob if it is corrupted after power failure. Fix
3404 		 * the ones that can be fixed and remove any other corrupted
3405 		 * ones. If it is not corrupted just process it */
3406 		rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true);
3407 		if (rc != 0) {
3408 			rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true);
3409 			if (rc != 0) {
3410 				/* Not corrupted - process it and continue with iterating through blobs */
3411 				if (ctx->iter_cb_fn) {
3412 					ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
3413 				}
3414 				_spdk_bs_blob_list_add(blob);
3415 				spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx);
3416 				return;
3417 			}
3418 
3419 		}
3420 
3421 		assert(len == sizeof(spdk_blob_id));
3422 
3423 		ctx->blob = blob;
3424 
3425 		/* Open clone to check if we are able to fix this blob or should we remove it */
3426 		spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, _spdk_bs_examine_clone, ctx);
3427 		return;
3428 	} else if (bserrno == -ENOENT) {
3429 		bserrno = 0;
3430 	} else {
3431 		/*
3432 		 * This case needs to be looked at further.  Same problem
3433 		 *  exists with applications that rely on explicit blob
3434 		 *  iteration.  We should just skip the blob that failed
3435 		 *  to load and continue on to the next one.
3436 		 */
3437 		SPDK_ERRLOG("Error in iterating blobs\n");
3438 	}
3439 
3440 	ctx->iter_cb_fn = NULL;
3441 
3442 	spdk_free(ctx->super);
3443 	spdk_free(ctx->mask);
3444 	spdk_bs_sequence_finish(ctx->seq, bserrno);
3445 	free(ctx);
3446 }
3447 
3448 static void
3449 _spdk_bs_load_complete(struct spdk_bs_load_ctx *ctx)
3450 {
3451 	spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx);
3452 }
3453 
3454 static void
3455 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3456 {
3457 	struct spdk_bs_load_ctx *ctx = cb_arg;
3458 	int rc;
3459 
3460 	/* The type must be correct */
3461 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
3462 
3463 	/* The length of the mask (in bits) must not be greater than
3464 	 * the length of the buffer (converted to bits) */
3465 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
3466 
3467 	/* The length of the mask must be exactly equal to the size
3468 	 * (in pages) of the metadata region */
3469 	assert(ctx->mask->length == ctx->super->md_len);
3470 
3471 	rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask);
3472 	if (rc < 0) {
3473 		spdk_free(ctx->mask);
3474 		_spdk_bs_load_ctx_fail(ctx, rc);
3475 		return;
3476 	}
3477 
3478 	_spdk_bs_load_complete(ctx);
3479 }
3480 
3481 static void
3482 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3483 {
3484 	struct spdk_bs_load_ctx *ctx = cb_arg;
3485 	uint64_t		lba, lba_count, mask_size;
3486 	int			rc;
3487 
3488 	if (bserrno != 0) {
3489 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3490 		return;
3491 	}
3492 
3493 	/* The type must be correct */
3494 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3495 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
3496 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
3497 					     struct spdk_blob_md_page) * 8));
3498 	/* The length of the mask must be exactly equal to the total number of clusters */
3499 	assert(ctx->mask->length == ctx->bs->total_clusters);
3500 
3501 	rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask);
3502 	if (rc < 0) {
3503 		spdk_free(ctx->mask);
3504 		_spdk_bs_load_ctx_fail(ctx, rc);
3505 		return;
3506 	}
3507 
3508 	ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters);
3509 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
3510 
3511 	spdk_free(ctx->mask);
3512 
3513 	/* Read the used blobids mask */
3514 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3515 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3516 				 SPDK_MALLOC_DMA);
3517 	if (!ctx->mask) {
3518 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3519 		return;
3520 	}
3521 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
3522 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
3523 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
3524 				  _spdk_bs_load_used_blobids_cpl, ctx);
3525 }
3526 
3527 static void
3528 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3529 {
3530 	struct spdk_bs_load_ctx *ctx = cb_arg;
3531 	uint64_t		lba, lba_count, mask_size;
3532 	int			rc;
3533 
3534 	if (bserrno != 0) {
3535 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3536 		return;
3537 	}
3538 
3539 	/* The type must be correct */
3540 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
3541 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
3542 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
3543 				     8));
3544 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
3545 	assert(ctx->mask->length == ctx->super->md_len);
3546 
3547 	rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask);
3548 	if (rc < 0) {
3549 		spdk_free(ctx->mask);
3550 		_spdk_bs_load_ctx_fail(ctx, rc);
3551 		return;
3552 	}
3553 
3554 	spdk_free(ctx->mask);
3555 
3556 	/* Read the used clusters mask */
3557 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3558 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3559 				 SPDK_MALLOC_DMA);
3560 	if (!ctx->mask) {
3561 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3562 		return;
3563 	}
3564 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3565 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3566 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
3567 				  _spdk_bs_load_used_clusters_cpl, ctx);
3568 }
3569 
3570 static void
3571 _spdk_bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx)
3572 {
3573 	uint64_t lba, lba_count, mask_size;
3574 
3575 	/* Read the used pages mask */
3576 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3577 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3578 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3579 	if (!ctx->mask) {
3580 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3581 		return;
3582 	}
3583 
3584 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3585 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3586 	spdk_bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
3587 				  _spdk_bs_load_used_pages_cpl, ctx);
3588 }
3589 
3590 static int
3591 _spdk_bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx)
3592 {
3593 	struct spdk_blob_store *bs = ctx->bs;
3594 	struct spdk_blob_md_page *page = ctx->page;
3595 	struct spdk_blob_md_descriptor *desc;
3596 	size_t	cur_desc = 0;
3597 
3598 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
3599 	while (cur_desc < sizeof(page->descriptors)) {
3600 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
3601 			if (desc->length == 0) {
3602 				/* If padding and length are 0, this terminates the page */
3603 				break;
3604 			}
3605 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
3606 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
3607 			unsigned int				i, j;
3608 			unsigned int				cluster_count = 0;
3609 			uint32_t				cluster_idx;
3610 
3611 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
3612 
3613 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
3614 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
3615 					cluster_idx = desc_extent_rle->extents[i].cluster_idx;
3616 					/*
3617 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
3618 					 * in the used cluster map.
3619 					 */
3620 					if (cluster_idx != 0) {
3621 						spdk_bit_array_set(bs->used_clusters, cluster_idx + j);
3622 						if (bs->num_free_clusters == 0) {
3623 							return -ENOSPC;
3624 						}
3625 						bs->num_free_clusters--;
3626 					}
3627 					cluster_count++;
3628 				}
3629 			}
3630 			if (cluster_count == 0) {
3631 				return -EINVAL;
3632 			}
3633 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
3634 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
3635 			uint32_t					i;
3636 			uint32_t					cluster_count = 0;
3637 			uint32_t					cluster_idx;
3638 			size_t						cluster_idx_length;
3639 
3640 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
3641 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
3642 
3643 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
3644 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
3645 				return -EINVAL;
3646 			}
3647 
3648 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
3649 				cluster_idx = desc_extent->cluster_idx[i];
3650 				/*
3651 				 * cluster_idx = 0 means an unallocated cluster - don't mark that
3652 				 * in the used cluster map.
3653 				 */
3654 				if (cluster_idx != 0) {
3655 					if (cluster_idx < desc_extent->start_cluster_idx &&
3656 					    cluster_idx >= desc_extent->start_cluster_idx + cluster_count) {
3657 						return -EINVAL;
3658 					}
3659 					spdk_bit_array_set(bs->used_clusters, cluster_idx);
3660 					if (bs->num_free_clusters == 0) {
3661 						return -ENOSPC;
3662 					}
3663 					bs->num_free_clusters--;
3664 				}
3665 				cluster_count++;
3666 			}
3667 
3668 			if (cluster_count == 0) {
3669 				return -EINVAL;
3670 			}
3671 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
3672 			/* Skip this item */
3673 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
3674 			/* Skip this item */
3675 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
3676 			/* Skip this item */
3677 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
3678 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
3679 			uint32_t num_extent_pages = ctx->num_extent_pages;
3680 			uint32_t i;
3681 			size_t extent_pages_length;
3682 			void *tmp;
3683 
3684 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
3685 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
3686 
3687 			if (desc_extent_table->length == 0 ||
3688 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
3689 				return -EINVAL;
3690 			}
3691 
3692 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
3693 				if (desc_extent_table->extent_page[i].page_idx != 0) {
3694 					if (desc_extent_table->extent_page[i].num_pages != 1) {
3695 						return -EINVAL;
3696 					}
3697 					num_extent_pages += 1;
3698 				}
3699 			}
3700 
3701 			if (num_extent_pages > 0) {
3702 				tmp = realloc(ctx->extent_pages, num_extent_pages * sizeof(uint32_t));
3703 				if (tmp == NULL) {
3704 					return -ENOMEM;
3705 				}
3706 				ctx->extent_pages = tmp;
3707 
3708 				/* Extent table entries contain md page numbers for extent pages.
3709 				 * Zeroes represent unallocated extent pages, those are run-length-encoded.
3710 				 */
3711 				for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
3712 					if (desc_extent_table->extent_page[i].page_idx != 0) {
3713 						ctx->extent_pages[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx;
3714 						ctx->num_extent_pages += 1;
3715 					}
3716 				}
3717 			}
3718 		} else {
3719 			/* Error */
3720 			return -EINVAL;
3721 		}
3722 		/* Advance to the next descriptor */
3723 		cur_desc += sizeof(*desc) + desc->length;
3724 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
3725 			break;
3726 		}
3727 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
3728 	}
3729 	return 0;
3730 }
3731 
3732 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page)
3733 {
3734 	uint32_t crc;
3735 	struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors;
3736 	size_t desc_len;
3737 
3738 	crc = _spdk_blob_md_page_calc_crc(page);
3739 	if (crc != page->crc) {
3740 		return false;
3741 	}
3742 
3743 	/* Extent page should always be of sequence num 0. */
3744 	if (page->sequence_num != 0) {
3745 		return false;
3746 	}
3747 
3748 	/* Descriptor type must be EXTENT_PAGE. */
3749 	if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
3750 		return false;
3751 	}
3752 
3753 	/* Descriptor length cannot exceed the page. */
3754 	desc_len = sizeof(*desc) + desc->length;
3755 	if (desc_len > sizeof(page->descriptors)) {
3756 		return false;
3757 	}
3758 
3759 	/* It has to be the only descriptor in the page. */
3760 	if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) {
3761 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len);
3762 		if (desc->length != 0) {
3763 			return false;
3764 		}
3765 	}
3766 
3767 	return true;
3768 }
3769 
3770 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
3771 {
3772 	uint32_t crc;
3773 
3774 	crc = _spdk_blob_md_page_calc_crc(ctx->page);
3775 	if (crc != ctx->page->crc) {
3776 		return false;
3777 	}
3778 
3779 	/* First page of a sequence should match the blobid. */
3780 	if (ctx->page->sequence_num == 0 &&
3781 	    _spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) {
3782 		return false;
3783 	}
3784 	return true;
3785 }
3786 
3787 static void
3788 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx);
3789 
3790 static void
3791 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3792 {
3793 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3794 
3795 	if (bserrno != 0) {
3796 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3797 		return;
3798 	}
3799 
3800 	_spdk_bs_load_complete(ctx);
3801 }
3802 
3803 static void
3804 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3805 {
3806 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3807 
3808 	spdk_free(ctx->mask);
3809 	ctx->mask = NULL;
3810 
3811 	if (bserrno != 0) {
3812 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3813 		return;
3814 	}
3815 
3816 	_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_load_write_used_clusters_cpl);
3817 }
3818 
3819 static void
3820 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3821 {
3822 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3823 
3824 	spdk_free(ctx->mask);
3825 	ctx->mask = NULL;
3826 
3827 	if (bserrno != 0) {
3828 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3829 		return;
3830 	}
3831 
3832 	_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_load_write_used_blobids_cpl);
3833 }
3834 
3835 static void
3836 _spdk_bs_load_write_used_md(struct spdk_bs_load_ctx *ctx)
3837 {
3838 	_spdk_bs_write_used_md(ctx->seq, ctx, _spdk_bs_load_write_used_pages_cpl);
3839 }
3840 
3841 static void
3842 _spdk_bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx)
3843 {
3844 	uint64_t num_md_clusters;
3845 	uint64_t i;
3846 
3847 	ctx->in_page_chain = false;
3848 
3849 	do {
3850 		ctx->page_index++;
3851 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
3852 
3853 	if (ctx->page_index < ctx->super->md_len) {
3854 		ctx->cur_page = ctx->page_index;
3855 		_spdk_bs_load_replay_cur_md_page(ctx);
3856 	} else {
3857 		/* Claim all of the clusters used by the metadata */
3858 		num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
3859 		for (i = 0; i < num_md_clusters; i++) {
3860 			_spdk_bs_claim_cluster(ctx->bs, i);
3861 		}
3862 		spdk_free(ctx->page);
3863 		_spdk_bs_load_write_used_md(ctx);
3864 	}
3865 }
3866 
3867 static void _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg);
3868 
3869 static void
3870 _spdk_bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3871 {
3872 	struct spdk_bs_load_ctx *ctx = cb_arg;
3873 	uint32_t page_num;
3874 
3875 	if (bserrno != 0) {
3876 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3877 		return;
3878 	}
3879 
3880 	/* Extent pages are only read when present within in chain md.
3881 	 * Integrity of md is not right if that page was not a valid extent page. */
3882 	if (_spdk_bs_load_cur_extent_page_valid(ctx->page) != true) {
3883 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3884 		return;
3885 	}
3886 
3887 	page_num = ctx->extent_pages[ctx->num_extent_pages - 1];
3888 	spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
3889 	if (_spdk_bs_load_replay_md_parse_page(ctx)) {
3890 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3891 		return;
3892 	}
3893 
3894 	ctx->num_extent_pages--;
3895 	if (ctx->num_extent_pages > 0) {
3896 		_spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx);
3897 		return;
3898 	}
3899 
3900 	free(ctx->extent_pages);
3901 	ctx->extent_pages = NULL;
3902 
3903 	_spdk_bs_load_replay_md_chain_cpl(ctx);
3904 }
3905 
3906 static void
3907 _spdk_bs_load_replay_extent_page(spdk_bs_sequence_t *seq, uint32_t page, void *cb_arg)
3908 {
3909 	struct spdk_bs_load_ctx *ctx = cb_arg;
3910 	uint64_t lba;
3911 
3912 	assert(page < ctx->super->md_len);
3913 	lba = _spdk_bs_md_page_to_lba(ctx->bs, page);
3914 	spdk_bs_sequence_read_dev(seq, ctx->page, lba,
3915 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
3916 				  _spdk_bs_load_replay_extent_page_cpl, ctx);
3917 }
3918 
3919 static void
3920 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3921 {
3922 	struct spdk_bs_load_ctx *ctx = cb_arg;
3923 	uint32_t page_num;
3924 
3925 	if (bserrno != 0) {
3926 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3927 		return;
3928 	}
3929 
3930 	page_num = ctx->cur_page;
3931 	if (_spdk_bs_load_cur_md_page_valid(ctx) == true) {
3932 		if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) {
3933 			_spdk_bs_claim_md_page(ctx->bs, page_num);
3934 			if (ctx->page->sequence_num == 0) {
3935 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
3936 			}
3937 			if (_spdk_bs_load_replay_md_parse_page(ctx)) {
3938 				_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3939 				return;
3940 			}
3941 			if (ctx->page->next != SPDK_INVALID_MD_PAGE) {
3942 				ctx->in_page_chain = true;
3943 				ctx->cur_page = ctx->page->next;
3944 				_spdk_bs_load_replay_cur_md_page(ctx);
3945 				return;
3946 			}
3947 			if (ctx->num_extent_pages != 0) {
3948 				/* Extent pages are read from last to first,
3949 				 * decreasing the num_extent_pages as they are read. */
3950 				_spdk_bs_load_replay_extent_page(seq, ctx->extent_pages[ctx->num_extent_pages - 1], ctx);
3951 				return;
3952 			}
3953 		}
3954 	}
3955 	_spdk_bs_load_replay_md_chain_cpl(ctx);
3956 }
3957 
3958 static void
3959 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx)
3960 {
3961 	uint64_t lba;
3962 
3963 	assert(ctx->cur_page < ctx->super->md_len);
3964 	lba = _spdk_bs_md_page_to_lba(ctx->bs, ctx->cur_page);
3965 	spdk_bs_sequence_read_dev(ctx->seq, ctx->page, lba,
3966 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
3967 				  _spdk_bs_load_replay_md_cpl, ctx);
3968 }
3969 
3970 static void
3971 _spdk_bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
3972 {
3973 	ctx->page_index = 0;
3974 	ctx->cur_page = 0;
3975 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
3976 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3977 	if (!ctx->page) {
3978 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3979 		return;
3980 	}
3981 	_spdk_bs_load_replay_cur_md_page(ctx);
3982 }
3983 
3984 static void
3985 _spdk_bs_recover(struct spdk_bs_load_ctx *ctx)
3986 {
3987 	int		rc;
3988 
3989 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
3990 	if (rc < 0) {
3991 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3992 		return;
3993 	}
3994 
3995 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
3996 	if (rc < 0) {
3997 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3998 		return;
3999 	}
4000 
4001 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
4002 	if (rc < 0) {
4003 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4004 		return;
4005 	}
4006 
4007 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
4008 	_spdk_bs_load_replay_md(ctx);
4009 }
4010 
4011 static void
4012 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4013 {
4014 	struct spdk_bs_load_ctx *ctx = cb_arg;
4015 	uint32_t	crc;
4016 	int		rc;
4017 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
4018 
4019 	if (ctx->super->version > SPDK_BS_VERSION ||
4020 	    ctx->super->version < SPDK_BS_INITIAL_VERSION) {
4021 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4022 		return;
4023 	}
4024 
4025 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4026 		   sizeof(ctx->super->signature)) != 0) {
4027 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4028 		return;
4029 	}
4030 
4031 	crc = _spdk_blob_md_page_calc_crc(ctx->super);
4032 	if (crc != ctx->super->crc) {
4033 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4034 		return;
4035 	}
4036 
4037 	if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
4038 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n");
4039 	} else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
4040 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n");
4041 	} else {
4042 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n");
4043 		SPDK_LOGDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
4044 		SPDK_LOGDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
4045 		_spdk_bs_load_ctx_fail(ctx, -ENXIO);
4046 		return;
4047 	}
4048 
4049 	if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) {
4050 		SPDK_NOTICELOG("Size mismatch, dev size: %lu, blobstore size: %lu\n",
4051 			       ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size);
4052 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4053 		return;
4054 	}
4055 
4056 	if (ctx->super->size == 0) {
4057 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
4058 	}
4059 
4060 	if (ctx->super->io_unit_size == 0) {
4061 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
4062 	}
4063 
4064 	/* Parse the super block */
4065 	ctx->bs->clean = 1;
4066 	ctx->bs->cluster_sz = ctx->super->cluster_size;
4067 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
4068 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
4069 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
4070 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
4071 	if (rc < 0) {
4072 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4073 		return;
4074 	}
4075 	ctx->bs->md_start = ctx->super->md_start;
4076 	ctx->bs->md_len = ctx->super->md_len;
4077 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
4078 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
4079 	ctx->bs->super_blob = ctx->super->super_blob;
4080 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
4081 
4082 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
4083 		_spdk_bs_recover(ctx);
4084 	} else {
4085 		_spdk_bs_load_read_used_pages(ctx);
4086 	}
4087 }
4088 
4089 void
4090 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4091 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4092 {
4093 	struct spdk_blob_store	*bs;
4094 	struct spdk_bs_cpl	cpl;
4095 	struct spdk_bs_load_ctx *ctx;
4096 	struct spdk_bs_opts	opts = {};
4097 	int err;
4098 
4099 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev);
4100 
4101 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4102 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen);
4103 		dev->destroy(dev);
4104 		cb_fn(cb_arg, NULL, -EINVAL);
4105 		return;
4106 	}
4107 
4108 	if (o) {
4109 		opts = *o;
4110 	} else {
4111 		spdk_bs_opts_init(&opts);
4112 	}
4113 
4114 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
4115 		dev->destroy(dev);
4116 		cb_fn(cb_arg, NULL, -EINVAL);
4117 		return;
4118 	}
4119 
4120 	err = _spdk_bs_alloc(dev, &opts, &bs);
4121 	if (err) {
4122 		dev->destroy(dev);
4123 		cb_fn(cb_arg, NULL, err);
4124 		return;
4125 	}
4126 
4127 	ctx = calloc(1, sizeof(*ctx));
4128 	if (!ctx) {
4129 		_spdk_bs_free(bs);
4130 		cb_fn(cb_arg, NULL, -ENOMEM);
4131 		return;
4132 	}
4133 
4134 	ctx->bs = bs;
4135 	ctx->iter_cb_fn = opts.iter_cb_fn;
4136 	ctx->iter_cb_arg = opts.iter_cb_arg;
4137 
4138 	/* Allocate memory for the super block */
4139 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4140 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4141 	if (!ctx->super) {
4142 		free(ctx);
4143 		_spdk_bs_free(bs);
4144 		cb_fn(cb_arg, NULL, -ENOMEM);
4145 		return;
4146 	}
4147 
4148 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4149 	cpl.u.bs_handle.cb_fn = cb_fn;
4150 	cpl.u.bs_handle.cb_arg = cb_arg;
4151 	cpl.u.bs_handle.bs = bs;
4152 
4153 	ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4154 	if (!ctx->seq) {
4155 		spdk_free(ctx->super);
4156 		free(ctx);
4157 		_spdk_bs_free(bs);
4158 		cb_fn(cb_arg, NULL, -ENOMEM);
4159 		return;
4160 	}
4161 
4162 	/* Read the super block */
4163 	spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4164 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4165 				  _spdk_bs_load_super_cpl, ctx);
4166 }
4167 
4168 /* END spdk_bs_load */
4169 
4170 /* START spdk_bs_dump */
4171 
4172 struct spdk_bs_dump_ctx {
4173 	struct spdk_blob_store		*bs;
4174 	struct spdk_bs_super_block	*super;
4175 	uint32_t			cur_page;
4176 	struct spdk_blob_md_page	*page;
4177 	spdk_bs_sequence_t		*seq;
4178 	FILE				*fp;
4179 	spdk_bs_dump_print_xattr	print_xattr_fn;
4180 	char				xattr_name[4096];
4181 };
4182 
4183 static void
4184 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno)
4185 {
4186 	spdk_free(ctx->super);
4187 
4188 	/*
4189 	 * We need to defer calling spdk_bs_call_cpl() until after
4190 	 * dev destruction, so tuck these away for later use.
4191 	 */
4192 	ctx->bs->unload_err = bserrno;
4193 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4194 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4195 
4196 	spdk_bs_sequence_finish(seq, 0);
4197 	_spdk_bs_free(ctx->bs);
4198 	free(ctx);
4199 }
4200 
4201 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
4202 
4203 static void
4204 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx)
4205 {
4206 	uint32_t page_idx = ctx->cur_page;
4207 	struct spdk_blob_md_page *page = ctx->page;
4208 	struct spdk_blob_md_descriptor *desc;
4209 	size_t cur_desc = 0;
4210 	uint32_t crc;
4211 
4212 	fprintf(ctx->fp, "=========\n");
4213 	fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx);
4214 	fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id);
4215 
4216 	crc = _spdk_blob_md_page_calc_crc(page);
4217 	fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch");
4218 
4219 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4220 	while (cur_desc < sizeof(page->descriptors)) {
4221 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
4222 			if (desc->length == 0) {
4223 				/* If padding and length are 0, this terminates the page */
4224 				break;
4225 			}
4226 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
4227 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
4228 			unsigned int				i;
4229 
4230 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
4231 
4232 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
4233 				if (desc_extent_rle->extents[i].cluster_idx != 0) {
4234 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
4235 						desc_extent_rle->extents[i].cluster_idx);
4236 				} else {
4237 					fprintf(ctx->fp, "Unallocated Extent - ");
4238 				}
4239 				fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length);
4240 				fprintf(ctx->fp, "\n");
4241 			}
4242 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4243 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
4244 			unsigned int					i;
4245 
4246 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
4247 
4248 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) {
4249 				if (desc_extent->cluster_idx[i] != 0) {
4250 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
4251 						desc_extent->cluster_idx[i]);
4252 				} else {
4253 					fprintf(ctx->fp, "Unallocated Extent");
4254 				}
4255 				fprintf(ctx->fp, "\n");
4256 			}
4257 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
4258 			struct spdk_blob_md_descriptor_xattr *desc_xattr;
4259 			uint32_t i;
4260 
4261 			desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc;
4262 
4263 			if (desc_xattr->length !=
4264 			    sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) +
4265 			    desc_xattr->name_length + desc_xattr->value_length) {
4266 			}
4267 
4268 			memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length);
4269 			ctx->xattr_name[desc_xattr->name_length] = '\0';
4270 			fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name);
4271 			fprintf(ctx->fp, "       value = \"");
4272 			ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name,
4273 					    (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
4274 					    desc_xattr->value_length);
4275 			fprintf(ctx->fp, "\"\n");
4276 			for (i = 0; i < desc_xattr->value_length; i++) {
4277 				if (i % 16 == 0) {
4278 					fprintf(ctx->fp, "               ");
4279 				}
4280 				fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i));
4281 				if ((i + 1) % 16 == 0) {
4282 					fprintf(ctx->fp, "\n");
4283 				}
4284 			}
4285 			if (i % 16 != 0) {
4286 				fprintf(ctx->fp, "\n");
4287 			}
4288 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
4289 			/* TODO */
4290 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
4291 			/* TODO */
4292 		} else {
4293 			/* Error */
4294 		}
4295 		/* Advance to the next descriptor */
4296 		cur_desc += sizeof(*desc) + desc->length;
4297 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
4298 			break;
4299 		}
4300 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
4301 	}
4302 }
4303 
4304 static void
4305 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4306 {
4307 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4308 
4309 	if (bserrno != 0) {
4310 		_spdk_bs_dump_finish(seq, ctx, bserrno);
4311 		return;
4312 	}
4313 
4314 	if (ctx->page->id != 0) {
4315 		_spdk_bs_dump_print_md_page(ctx);
4316 	}
4317 
4318 	ctx->cur_page++;
4319 
4320 	if (ctx->cur_page < ctx->super->md_len) {
4321 		_spdk_bs_dump_read_md_page(seq, ctx);
4322 	} else {
4323 		spdk_free(ctx->page);
4324 		_spdk_bs_dump_finish(seq, ctx, 0);
4325 	}
4326 }
4327 
4328 static void
4329 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
4330 {
4331 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4332 	uint64_t lba;
4333 
4334 	assert(ctx->cur_page < ctx->super->md_len);
4335 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
4336 	spdk_bs_sequence_read_dev(seq, ctx->page, lba,
4337 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
4338 				  _spdk_bs_dump_read_md_page_cpl, ctx);
4339 }
4340 
4341 static void
4342 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4343 {
4344 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4345 
4346 	fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature);
4347 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4348 		   sizeof(ctx->super->signature)) != 0) {
4349 		fprintf(ctx->fp, "(Mismatch)\n");
4350 		_spdk_bs_dump_finish(seq, ctx, bserrno);
4351 		return;
4352 	} else {
4353 		fprintf(ctx->fp, "(OK)\n");
4354 	}
4355 	fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version);
4356 	fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc,
4357 		(ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch");
4358 	fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype);
4359 	fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size);
4360 	fprintf(ctx->fp, "Super Blob ID: ");
4361 	if (ctx->super->super_blob == SPDK_BLOBID_INVALID) {
4362 		fprintf(ctx->fp, "(None)\n");
4363 	} else {
4364 		fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob);
4365 	}
4366 	fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean);
4367 	fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start);
4368 	fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len);
4369 	fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start);
4370 	fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len);
4371 	fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start);
4372 	fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len);
4373 	fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start);
4374 	fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);
4375 
4376 	ctx->cur_page = 0;
4377 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
4378 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4379 	if (!ctx->page) {
4380 		_spdk_bs_dump_finish(seq, ctx, -ENOMEM);
4381 		return;
4382 	}
4383 	_spdk_bs_dump_read_md_page(seq, ctx);
4384 }
4385 
4386 void
4387 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn,
4388 	     spdk_bs_op_complete cb_fn, void *cb_arg)
4389 {
4390 	struct spdk_blob_store	*bs;
4391 	struct spdk_bs_cpl	cpl;
4392 	spdk_bs_sequence_t	*seq;
4393 	struct spdk_bs_dump_ctx *ctx;
4394 	struct spdk_bs_opts	opts = {};
4395 	int err;
4396 
4397 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev);
4398 
4399 	spdk_bs_opts_init(&opts);
4400 
4401 	err = _spdk_bs_alloc(dev, &opts, &bs);
4402 	if (err) {
4403 		dev->destroy(dev);
4404 		cb_fn(cb_arg, err);
4405 		return;
4406 	}
4407 
4408 	ctx = calloc(1, sizeof(*ctx));
4409 	if (!ctx) {
4410 		_spdk_bs_free(bs);
4411 		cb_fn(cb_arg, -ENOMEM);
4412 		return;
4413 	}
4414 
4415 	ctx->bs = bs;
4416 	ctx->fp = fp;
4417 	ctx->print_xattr_fn = print_xattr_fn;
4418 
4419 	/* Allocate memory for the super block */
4420 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4421 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4422 	if (!ctx->super) {
4423 		free(ctx);
4424 		_spdk_bs_free(bs);
4425 		cb_fn(cb_arg, -ENOMEM);
4426 		return;
4427 	}
4428 
4429 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4430 	cpl.u.bs_basic.cb_fn = cb_fn;
4431 	cpl.u.bs_basic.cb_arg = cb_arg;
4432 
4433 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4434 	if (!seq) {
4435 		spdk_free(ctx->super);
4436 		free(ctx);
4437 		_spdk_bs_free(bs);
4438 		cb_fn(cb_arg, -ENOMEM);
4439 		return;
4440 	}
4441 
4442 	/* Read the super block */
4443 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4444 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4445 				  _spdk_bs_dump_super_cpl, ctx);
4446 }
4447 
4448 /* END spdk_bs_dump */
4449 
4450 /* START spdk_bs_init */
4451 
4452 struct spdk_bs_init_ctx {
4453 	struct spdk_blob_store		*bs;
4454 	struct spdk_bs_super_block	*super;
4455 };
4456 
4457 static void
4458 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4459 {
4460 	struct spdk_bs_init_ctx *ctx = cb_arg;
4461 
4462 	spdk_free(ctx->super);
4463 	free(ctx);
4464 
4465 	spdk_bs_sequence_finish(seq, bserrno);
4466 }
4467 
4468 static void
4469 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4470 {
4471 	struct spdk_bs_init_ctx *ctx = cb_arg;
4472 
4473 	/* Write super block */
4474 	spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0),
4475 				   _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
4476 				   _spdk_bs_init_persist_super_cpl, ctx);
4477 }
4478 
4479 void
4480 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4481 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4482 {
4483 	struct spdk_bs_init_ctx *ctx;
4484 	struct spdk_blob_store	*bs;
4485 	struct spdk_bs_cpl	cpl;
4486 	spdk_bs_sequence_t	*seq;
4487 	spdk_bs_batch_t		*batch;
4488 	uint64_t		num_md_lba;
4489 	uint64_t		num_md_pages;
4490 	uint64_t		num_md_clusters;
4491 	uint32_t		i;
4492 	struct spdk_bs_opts	opts = {};
4493 	int			rc;
4494 
4495 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev);
4496 
4497 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4498 		SPDK_ERRLOG("unsupported dev block length of %d\n",
4499 			    dev->blocklen);
4500 		dev->destroy(dev);
4501 		cb_fn(cb_arg, NULL, -EINVAL);
4502 		return;
4503 	}
4504 
4505 	if (o) {
4506 		opts = *o;
4507 	} else {
4508 		spdk_bs_opts_init(&opts);
4509 	}
4510 
4511 	if (_spdk_bs_opts_verify(&opts) != 0) {
4512 		dev->destroy(dev);
4513 		cb_fn(cb_arg, NULL, -EINVAL);
4514 		return;
4515 	}
4516 
4517 	rc = _spdk_bs_alloc(dev, &opts, &bs);
4518 	if (rc) {
4519 		dev->destroy(dev);
4520 		cb_fn(cb_arg, NULL, rc);
4521 		return;
4522 	}
4523 
4524 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
4525 		/* By default, allocate 1 page per cluster.
4526 		 * Technically, this over-allocates metadata
4527 		 * because more metadata will reduce the number
4528 		 * of usable clusters. This can be addressed with
4529 		 * more complex math in the future.
4530 		 */
4531 		bs->md_len = bs->total_clusters;
4532 	} else {
4533 		bs->md_len = opts.num_md_pages;
4534 	}
4535 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
4536 	if (rc < 0) {
4537 		_spdk_bs_free(bs);
4538 		cb_fn(cb_arg, NULL, -ENOMEM);
4539 		return;
4540 	}
4541 
4542 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
4543 	if (rc < 0) {
4544 		_spdk_bs_free(bs);
4545 		cb_fn(cb_arg, NULL, -ENOMEM);
4546 		return;
4547 	}
4548 
4549 	ctx = calloc(1, sizeof(*ctx));
4550 	if (!ctx) {
4551 		_spdk_bs_free(bs);
4552 		cb_fn(cb_arg, NULL, -ENOMEM);
4553 		return;
4554 	}
4555 
4556 	ctx->bs = bs;
4557 
4558 	/* Allocate memory for the super block */
4559 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4560 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4561 	if (!ctx->super) {
4562 		free(ctx);
4563 		_spdk_bs_free(bs);
4564 		cb_fn(cb_arg, NULL, -ENOMEM);
4565 		return;
4566 	}
4567 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4568 	       sizeof(ctx->super->signature));
4569 	ctx->super->version = SPDK_BS_VERSION;
4570 	ctx->super->length = sizeof(*ctx->super);
4571 	ctx->super->super_blob = bs->super_blob;
4572 	ctx->super->clean = 0;
4573 	ctx->super->cluster_size = bs->cluster_sz;
4574 	ctx->super->io_unit_size = bs->io_unit_size;
4575 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
4576 
4577 	/* Calculate how many pages the metadata consumes at the front
4578 	 * of the disk.
4579 	 */
4580 
4581 	/* The super block uses 1 page */
4582 	num_md_pages = 1;
4583 
4584 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
4585 	 * up to the nearest page, plus a header.
4586 	 */
4587 	ctx->super->used_page_mask_start = num_md_pages;
4588 	ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4589 					 spdk_divide_round_up(bs->md_len, 8),
4590 					 SPDK_BS_PAGE_SIZE);
4591 	num_md_pages += ctx->super->used_page_mask_len;
4592 
4593 	/* The used_clusters mask requires 1 bit per cluster, rounded
4594 	 * up to the nearest page, plus a header.
4595 	 */
4596 	ctx->super->used_cluster_mask_start = num_md_pages;
4597 	ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4598 					    spdk_divide_round_up(bs->total_clusters, 8),
4599 					    SPDK_BS_PAGE_SIZE);
4600 	num_md_pages += ctx->super->used_cluster_mask_len;
4601 
4602 	/* The used_blobids mask requires 1 bit per metadata page, rounded
4603 	 * up to the nearest page, plus a header.
4604 	 */
4605 	ctx->super->used_blobid_mask_start = num_md_pages;
4606 	ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4607 					   spdk_divide_round_up(bs->md_len, 8),
4608 					   SPDK_BS_PAGE_SIZE);
4609 	num_md_pages += ctx->super->used_blobid_mask_len;
4610 
4611 	/* The metadata region size was chosen above */
4612 	ctx->super->md_start = bs->md_start = num_md_pages;
4613 	ctx->super->md_len = bs->md_len;
4614 	num_md_pages += bs->md_len;
4615 
4616 	num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages);
4617 
4618 	ctx->super->size = dev->blockcnt * dev->blocklen;
4619 
4620 	ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super);
4621 
4622 	num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster);
4623 	if (num_md_clusters > bs->total_clusters) {
4624 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
4625 			    "please decrease number of pages reserved for metadata "
4626 			    "or increase cluster size.\n");
4627 		spdk_free(ctx->super);
4628 		free(ctx);
4629 		_spdk_bs_free(bs);
4630 		cb_fn(cb_arg, NULL, -ENOMEM);
4631 		return;
4632 	}
4633 	/* Claim all of the clusters used by the metadata */
4634 	for (i = 0; i < num_md_clusters; i++) {
4635 		_spdk_bs_claim_cluster(bs, i);
4636 	}
4637 
4638 	bs->total_data_clusters = bs->num_free_clusters;
4639 
4640 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4641 	cpl.u.bs_handle.cb_fn = cb_fn;
4642 	cpl.u.bs_handle.cb_arg = cb_arg;
4643 	cpl.u.bs_handle.bs = bs;
4644 
4645 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4646 	if (!seq) {
4647 		spdk_free(ctx->super);
4648 		free(ctx);
4649 		_spdk_bs_free(bs);
4650 		cb_fn(cb_arg, NULL, -ENOMEM);
4651 		return;
4652 	}
4653 
4654 	batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx);
4655 
4656 	/* Clear metadata space */
4657 	spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
4658 
4659 	switch (opts.clear_method) {
4660 	case BS_CLEAR_WITH_UNMAP:
4661 		/* Trim data clusters */
4662 		spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
4663 		break;
4664 	case BS_CLEAR_WITH_WRITE_ZEROES:
4665 		/* Write_zeroes to data clusters */
4666 		spdk_bs_batch_write_zeroes_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
4667 		break;
4668 	case BS_CLEAR_WITH_NONE:
4669 	default:
4670 		break;
4671 	}
4672 
4673 	spdk_bs_batch_close(batch);
4674 }
4675 
4676 /* END spdk_bs_init */
4677 
4678 /* START spdk_bs_destroy */
4679 
4680 static void
4681 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4682 {
4683 	struct spdk_bs_init_ctx *ctx = cb_arg;
4684 	struct spdk_blob_store *bs = ctx->bs;
4685 
4686 	/*
4687 	 * We need to defer calling spdk_bs_call_cpl() until after
4688 	 * dev destruction, so tuck these away for later use.
4689 	 */
4690 	bs->unload_err = bserrno;
4691 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4692 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4693 
4694 	spdk_bs_sequence_finish(seq, bserrno);
4695 
4696 	_spdk_bs_free(bs);
4697 	free(ctx);
4698 }
4699 
4700 void
4701 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
4702 		void *cb_arg)
4703 {
4704 	struct spdk_bs_cpl	cpl;
4705 	spdk_bs_sequence_t	*seq;
4706 	struct spdk_bs_init_ctx *ctx;
4707 
4708 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n");
4709 
4710 	if (!TAILQ_EMPTY(&bs->blobs)) {
4711 		SPDK_ERRLOG("Blobstore still has open blobs\n");
4712 		cb_fn(cb_arg, -EBUSY);
4713 		return;
4714 	}
4715 
4716 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4717 	cpl.u.bs_basic.cb_fn = cb_fn;
4718 	cpl.u.bs_basic.cb_arg = cb_arg;
4719 
4720 	ctx = calloc(1, sizeof(*ctx));
4721 	if (!ctx) {
4722 		cb_fn(cb_arg, -ENOMEM);
4723 		return;
4724 	}
4725 
4726 	ctx->bs = bs;
4727 
4728 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4729 	if (!seq) {
4730 		free(ctx);
4731 		cb_fn(cb_arg, -ENOMEM);
4732 		return;
4733 	}
4734 
4735 	/* Write zeroes to the super block */
4736 	spdk_bs_sequence_write_zeroes_dev(seq,
4737 					  _spdk_bs_page_to_lba(bs, 0),
4738 					  _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
4739 					  _spdk_bs_destroy_trim_cpl, ctx);
4740 }
4741 
4742 /* END spdk_bs_destroy */
4743 
4744 /* START spdk_bs_unload */
4745 
4746 static void
4747 _spdk_bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno)
4748 {
4749 	spdk_bs_sequence_t *seq = ctx->seq;
4750 
4751 	spdk_free(ctx->super);
4752 
4753 	/*
4754 	 * We need to defer calling spdk_bs_call_cpl() until after
4755 	 * dev destruction, so tuck these away for later use.
4756 	 */
4757 	ctx->bs->unload_err = bserrno;
4758 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4759 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4760 
4761 	spdk_bs_sequence_finish(seq, bserrno);
4762 
4763 	_spdk_bs_free(ctx->bs);
4764 	free(ctx);
4765 }
4766 
4767 static void
4768 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4769 {
4770 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4771 
4772 	_spdk_bs_unload_finish(ctx, bserrno);
4773 }
4774 
4775 static void
4776 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4777 {
4778 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4779 
4780 	spdk_free(ctx->mask);
4781 
4782 	if (bserrno != 0) {
4783 		_spdk_bs_unload_finish(ctx, bserrno);
4784 		return;
4785 	}
4786 
4787 	ctx->super->clean = 1;
4788 
4789 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
4790 }
4791 
4792 static void
4793 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4794 {
4795 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4796 
4797 	spdk_free(ctx->mask);
4798 	ctx->mask = NULL;
4799 
4800 	if (bserrno != 0) {
4801 		_spdk_bs_unload_finish(ctx, bserrno);
4802 		return;
4803 	}
4804 
4805 	_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_unload_write_used_clusters_cpl);
4806 }
4807 
4808 static void
4809 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4810 {
4811 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4812 
4813 	spdk_free(ctx->mask);
4814 	ctx->mask = NULL;
4815 
4816 	if (bserrno != 0) {
4817 		_spdk_bs_unload_finish(ctx, bserrno);
4818 		return;
4819 	}
4820 
4821 	_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_unload_write_used_blobids_cpl);
4822 }
4823 
4824 static void
4825 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4826 {
4827 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4828 
4829 	if (bserrno != 0) {
4830 		_spdk_bs_unload_finish(ctx, bserrno);
4831 		return;
4832 	}
4833 
4834 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl);
4835 }
4836 
4837 void
4838 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
4839 {
4840 	struct spdk_bs_cpl	cpl;
4841 	struct spdk_bs_load_ctx *ctx;
4842 
4843 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n");
4844 
4845 	if (!TAILQ_EMPTY(&bs->blobs)) {
4846 		SPDK_ERRLOG("Blobstore still has open blobs\n");
4847 		cb_fn(cb_arg, -EBUSY);
4848 		return;
4849 	}
4850 
4851 	ctx = calloc(1, sizeof(*ctx));
4852 	if (!ctx) {
4853 		cb_fn(cb_arg, -ENOMEM);
4854 		return;
4855 	}
4856 
4857 	ctx->bs = bs;
4858 
4859 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4860 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4861 	if (!ctx->super) {
4862 		free(ctx);
4863 		cb_fn(cb_arg, -ENOMEM);
4864 		return;
4865 	}
4866 
4867 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4868 	cpl.u.bs_basic.cb_fn = cb_fn;
4869 	cpl.u.bs_basic.cb_arg = cb_arg;
4870 
4871 	ctx->seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4872 	if (!ctx->seq) {
4873 		spdk_free(ctx->super);
4874 		free(ctx);
4875 		cb_fn(cb_arg, -ENOMEM);
4876 		return;
4877 	}
4878 
4879 	/* Read super block */
4880 	spdk_bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4881 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4882 				  _spdk_bs_unload_read_super_cpl, ctx);
4883 }
4884 
4885 /* END spdk_bs_unload */
4886 
4887 /* START spdk_bs_set_super */
4888 
4889 struct spdk_bs_set_super_ctx {
4890 	struct spdk_blob_store		*bs;
4891 	struct spdk_bs_super_block	*super;
4892 };
4893 
4894 static void
4895 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4896 {
4897 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
4898 
4899 	if (bserrno != 0) {
4900 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
4901 	}
4902 
4903 	spdk_free(ctx->super);
4904 
4905 	spdk_bs_sequence_finish(seq, bserrno);
4906 
4907 	free(ctx);
4908 }
4909 
4910 static void
4911 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4912 {
4913 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
4914 
4915 	if (bserrno != 0) {
4916 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
4917 		spdk_free(ctx->super);
4918 		spdk_bs_sequence_finish(seq, bserrno);
4919 		free(ctx);
4920 		return;
4921 	}
4922 
4923 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx);
4924 }
4925 
4926 void
4927 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
4928 		  spdk_bs_op_complete cb_fn, void *cb_arg)
4929 {
4930 	struct spdk_bs_cpl		cpl;
4931 	spdk_bs_sequence_t		*seq;
4932 	struct spdk_bs_set_super_ctx	*ctx;
4933 
4934 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n");
4935 
4936 	ctx = calloc(1, sizeof(*ctx));
4937 	if (!ctx) {
4938 		cb_fn(cb_arg, -ENOMEM);
4939 		return;
4940 	}
4941 
4942 	ctx->bs = bs;
4943 
4944 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4945 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4946 	if (!ctx->super) {
4947 		free(ctx);
4948 		cb_fn(cb_arg, -ENOMEM);
4949 		return;
4950 	}
4951 
4952 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4953 	cpl.u.bs_basic.cb_fn = cb_fn;
4954 	cpl.u.bs_basic.cb_arg = cb_arg;
4955 
4956 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4957 	if (!seq) {
4958 		spdk_free(ctx->super);
4959 		free(ctx);
4960 		cb_fn(cb_arg, -ENOMEM);
4961 		return;
4962 	}
4963 
4964 	bs->super_blob = blobid;
4965 
4966 	/* Read super block */
4967 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4968 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4969 				  _spdk_bs_set_super_read_cpl, ctx);
4970 }
4971 
4972 /* END spdk_bs_set_super */
4973 
4974 void
4975 spdk_bs_get_super(struct spdk_blob_store *bs,
4976 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
4977 {
4978 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
4979 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
4980 	} else {
4981 		cb_fn(cb_arg, bs->super_blob, 0);
4982 	}
4983 }
4984 
4985 uint64_t
4986 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
4987 {
4988 	return bs->cluster_sz;
4989 }
4990 
4991 uint64_t
4992 spdk_bs_get_page_size(struct spdk_blob_store *bs)
4993 {
4994 	return SPDK_BS_PAGE_SIZE;
4995 }
4996 
4997 uint64_t
4998 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
4999 {
5000 	return bs->io_unit_size;
5001 }
5002 
5003 uint64_t
5004 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
5005 {
5006 	return bs->num_free_clusters;
5007 }
5008 
5009 uint64_t
5010 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
5011 {
5012 	return bs->total_data_clusters;
5013 }
5014 
5015 static int
5016 spdk_bs_register_md_thread(struct spdk_blob_store *bs)
5017 {
5018 	bs->md_channel = spdk_get_io_channel(bs);
5019 	if (!bs->md_channel) {
5020 		SPDK_ERRLOG("Failed to get IO channel.\n");
5021 		return -1;
5022 	}
5023 
5024 	return 0;
5025 }
5026 
5027 static int
5028 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
5029 {
5030 	spdk_put_io_channel(bs->md_channel);
5031 
5032 	return 0;
5033 }
5034 
5035 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob)
5036 {
5037 	assert(blob != NULL);
5038 
5039 	return blob->id;
5040 }
5041 
5042 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob)
5043 {
5044 	assert(blob != NULL);
5045 
5046 	return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters);
5047 }
5048 
5049 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob)
5050 {
5051 	assert(blob != NULL);
5052 
5053 	return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs);
5054 }
5055 
5056 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
5057 {
5058 	assert(blob != NULL);
5059 
5060 	return blob->active.num_clusters;
5061 }
5062 
5063 /* START spdk_bs_create_blob */
5064 
5065 static void
5066 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5067 {
5068 	struct spdk_blob *blob = cb_arg;
5069 
5070 	_spdk_blob_free(blob);
5071 
5072 	spdk_bs_sequence_finish(seq, bserrno);
5073 }
5074 
5075 static int
5076 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
5077 		      bool internal)
5078 {
5079 	uint64_t i;
5080 	size_t value_len = 0;
5081 	int rc;
5082 	const void *value = NULL;
5083 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
5084 		return -EINVAL;
5085 	}
5086 	for (i = 0; i < xattrs->count; i++) {
5087 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
5088 		if (value == NULL || value_len == 0) {
5089 			return -EINVAL;
5090 		}
5091 		rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
5092 		if (rc < 0) {
5093 			return rc;
5094 		}
5095 	}
5096 	return 0;
5097 }
5098 
5099 static void
5100 _spdk_bs_create_blob(struct spdk_blob_store *bs,
5101 		     const struct spdk_blob_opts *opts,
5102 		     const struct spdk_blob_xattr_opts *internal_xattrs,
5103 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5104 {
5105 	struct spdk_blob	*blob;
5106 	uint32_t		page_idx;
5107 	struct spdk_bs_cpl	cpl;
5108 	struct spdk_blob_opts	opts_default;
5109 	struct spdk_blob_xattr_opts internal_xattrs_default;
5110 	spdk_bs_sequence_t	*seq;
5111 	spdk_blob_id		id;
5112 	int rc;
5113 
5114 	assert(spdk_get_thread() == bs->md_thread);
5115 
5116 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
5117 	if (page_idx == UINT32_MAX) {
5118 		cb_fn(cb_arg, 0, -ENOMEM);
5119 		return;
5120 	}
5121 	spdk_bit_array_set(bs->used_blobids, page_idx);
5122 	_spdk_bs_claim_md_page(bs, page_idx);
5123 
5124 	id = _spdk_bs_page_to_blobid(page_idx);
5125 
5126 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx);
5127 
5128 	blob = _spdk_blob_alloc(bs, id);
5129 	if (!blob) {
5130 		cb_fn(cb_arg, 0, -ENOMEM);
5131 		return;
5132 	}
5133 
5134 	if (!opts) {
5135 		spdk_blob_opts_init(&opts_default);
5136 		opts = &opts_default;
5137 	}
5138 
5139 	blob->use_extent_table = opts->use_extent_table;
5140 
5141 	if (!internal_xattrs) {
5142 		_spdk_blob_xattrs_init(&internal_xattrs_default);
5143 		internal_xattrs = &internal_xattrs_default;
5144 	}
5145 
5146 	rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false);
5147 	if (rc < 0) {
5148 		_spdk_blob_free(blob);
5149 		cb_fn(cb_arg, 0, rc);
5150 		return;
5151 	}
5152 
5153 	rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true);
5154 	if (rc < 0) {
5155 		_spdk_blob_free(blob);
5156 		cb_fn(cb_arg, 0, rc);
5157 		return;
5158 	}
5159 
5160 	if (opts->thin_provision) {
5161 		_spdk_blob_set_thin_provision(blob);
5162 	}
5163 
5164 	_spdk_blob_set_clear_method(blob, opts->clear_method);
5165 
5166 	rc = _spdk_blob_resize(blob, opts->num_clusters);
5167 	if (rc < 0) {
5168 		_spdk_blob_free(blob);
5169 		cb_fn(cb_arg, 0, rc);
5170 		return;
5171 	}
5172 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5173 	cpl.u.blobid.cb_fn = cb_fn;
5174 	cpl.u.blobid.cb_arg = cb_arg;
5175 	cpl.u.blobid.blobid = blob->id;
5176 
5177 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
5178 	if (!seq) {
5179 		_spdk_blob_free(blob);
5180 		cb_fn(cb_arg, 0, -ENOMEM);
5181 		return;
5182 	}
5183 
5184 	_spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob);
5185 }
5186 
5187 void spdk_bs_create_blob(struct spdk_blob_store *bs,
5188 			 spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5189 {
5190 	_spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
5191 }
5192 
5193 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
5194 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5195 {
5196 	_spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
5197 }
5198 
5199 /* END spdk_bs_create_blob */
5200 
5201 /* START blob_cleanup */
5202 
5203 struct spdk_clone_snapshot_ctx {
5204 	struct spdk_bs_cpl      cpl;
5205 	int bserrno;
5206 	bool frozen;
5207 
5208 	struct spdk_io_channel *channel;
5209 
5210 	/* Current cluster for inflate operation */
5211 	uint64_t cluster;
5212 
5213 	/* For inflation force allocation of all unallocated clusters and remove
5214 	 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */
5215 	bool allocate_all;
5216 
5217 	struct {
5218 		spdk_blob_id id;
5219 		struct spdk_blob *blob;
5220 	} original;
5221 	struct {
5222 		spdk_blob_id id;
5223 		struct spdk_blob *blob;
5224 	} new;
5225 
5226 	/* xattrs specified for snapshot/clones only. They have no impact on
5227 	 * the original blobs xattrs. */
5228 	const struct spdk_blob_xattr_opts *xattrs;
5229 };
5230 
5231 static void
5232 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
5233 {
5234 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
5235 	struct spdk_bs_cpl *cpl = &ctx->cpl;
5236 
5237 	if (bserrno != 0) {
5238 		if (ctx->bserrno != 0) {
5239 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5240 		} else {
5241 			ctx->bserrno = bserrno;
5242 		}
5243 	}
5244 
5245 	switch (cpl->type) {
5246 	case SPDK_BS_CPL_TYPE_BLOBID:
5247 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
5248 		break;
5249 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
5250 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
5251 		break;
5252 	default:
5253 		SPDK_UNREACHABLE();
5254 		break;
5255 	}
5256 
5257 	free(ctx);
5258 }
5259 
5260 static void
5261 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
5262 {
5263 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5264 	struct spdk_blob *origblob = ctx->original.blob;
5265 
5266 	if (bserrno != 0) {
5267 		if (ctx->bserrno != 0) {
5268 			SPDK_ERRLOG("Unfreeze error %d\n", bserrno);
5269 		} else {
5270 			ctx->bserrno = bserrno;
5271 		}
5272 	}
5273 
5274 	ctx->original.id = origblob->id;
5275 	origblob->locked_operation_in_progress = false;
5276 
5277 	spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5278 }
5279 
5280 static void
5281 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
5282 {
5283 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5284 	struct spdk_blob *origblob = ctx->original.blob;
5285 
5286 	if (bserrno != 0) {
5287 		if (ctx->bserrno != 0) {
5288 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5289 		} else {
5290 			ctx->bserrno = bserrno;
5291 		}
5292 	}
5293 
5294 	if (ctx->frozen) {
5295 		/* Unfreeze any outstanding I/O */
5296 		_spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx);
5297 	} else {
5298 		_spdk_bs_snapshot_unfreeze_cpl(ctx, 0);
5299 	}
5300 
5301 }
5302 
5303 static void
5304 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno)
5305 {
5306 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5307 	struct spdk_blob *newblob = ctx->new.blob;
5308 
5309 	if (bserrno != 0) {
5310 		if (ctx->bserrno != 0) {
5311 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5312 		} else {
5313 			ctx->bserrno = bserrno;
5314 		}
5315 	}
5316 
5317 	ctx->new.id = newblob->id;
5318 	spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5319 }
5320 
5321 /* END blob_cleanup */
5322 
5323 /* START spdk_bs_create_snapshot */
5324 
5325 static void
5326 _spdk_bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2)
5327 {
5328 	uint64_t *cluster_temp;
5329 	uint32_t *extent_page_temp;
5330 
5331 	cluster_temp = blob1->active.clusters;
5332 	blob1->active.clusters = blob2->active.clusters;
5333 	blob2->active.clusters = cluster_temp;
5334 
5335 	extent_page_temp = blob1->active.extent_pages;
5336 	blob1->active.extent_pages = blob2->active.extent_pages;
5337 	blob2->active.extent_pages = extent_page_temp;
5338 }
5339 
5340 static void
5341 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
5342 {
5343 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5344 	struct spdk_blob *origblob = ctx->original.blob;
5345 	struct spdk_blob *newblob = ctx->new.blob;
5346 
5347 	if (bserrno != 0) {
5348 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5349 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5350 		return;
5351 	}
5352 
5353 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
5354 	bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
5355 	if (bserrno != 0) {
5356 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5357 		return;
5358 	}
5359 
5360 	_spdk_bs_blob_list_add(ctx->original.blob);
5361 
5362 	spdk_blob_set_read_only(newblob);
5363 
5364 	/* sync snapshot metadata */
5365 	spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5366 }
5367 
5368 static void
5369 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
5370 {
5371 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5372 	struct spdk_blob *origblob = ctx->original.blob;
5373 	struct spdk_blob *newblob = ctx->new.blob;
5374 
5375 	if (bserrno != 0) {
5376 		/* return cluster map back to original */
5377 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5378 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5379 		return;
5380 	}
5381 
5382 	/* Set internal xattr for snapshot id */
5383 	bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
5384 	if (bserrno != 0) {
5385 		/* return cluster map back to original */
5386 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5387 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5388 		return;
5389 	}
5390 
5391 	_spdk_bs_blob_list_remove(origblob);
5392 	origblob->parent_id = newblob->id;
5393 
5394 	/* Create new back_bs_dev for snapshot */
5395 	origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob);
5396 	if (origblob->back_bs_dev == NULL) {
5397 		/* return cluster map back to original */
5398 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5399 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
5400 		return;
5401 	}
5402 
5403 	/* set clone blob as thin provisioned */
5404 	_spdk_blob_set_thin_provision(origblob);
5405 
5406 	_spdk_bs_blob_list_add(newblob);
5407 
5408 	/* sync clone metadata */
5409 	spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx);
5410 }
5411 
5412 static void
5413 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc)
5414 {
5415 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5416 	struct spdk_blob *origblob = ctx->original.blob;
5417 	struct spdk_blob *newblob = ctx->new.blob;
5418 	int bserrno;
5419 
5420 	if (rc != 0) {
5421 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc);
5422 		return;
5423 	}
5424 
5425 	ctx->frozen = true;
5426 
5427 	/* set new back_bs_dev for snapshot */
5428 	newblob->back_bs_dev = origblob->back_bs_dev;
5429 	/* Set invalid flags from origblob */
5430 	newblob->invalid_flags = origblob->invalid_flags;
5431 
5432 	/* inherit parent from original blob if set */
5433 	newblob->parent_id = origblob->parent_id;
5434 	if (origblob->parent_id != SPDK_BLOBID_INVALID) {
5435 		/* Set internal xattr for snapshot id */
5436 		bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT,
5437 					       &origblob->parent_id, sizeof(spdk_blob_id), true);
5438 		if (bserrno != 0) {
5439 			_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5440 			return;
5441 		}
5442 	}
5443 
5444 	/* swap cluster maps */
5445 	_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5446 
5447 	/* Set the clear method on the new blob to match the original. */
5448 	_spdk_blob_set_clear_method(newblob, origblob->clear_method);
5449 
5450 	/* sync snapshot metadata */
5451 	spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx);
5452 }
5453 
5454 static void
5455 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5456 {
5457 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5458 	struct spdk_blob *origblob = ctx->original.blob;
5459 	struct spdk_blob *newblob = _blob;
5460 
5461 	if (bserrno != 0) {
5462 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5463 		return;
5464 	}
5465 
5466 	ctx->new.blob = newblob;
5467 	assert(spdk_blob_is_thin_provisioned(newblob));
5468 	assert(spdk_mem_all_zero(newblob->active.clusters,
5469 				 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
5470 	assert(spdk_mem_all_zero(newblob->active.extent_pages,
5471 				 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
5472 
5473 	_spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx);
5474 }
5475 
5476 static void
5477 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
5478 {
5479 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5480 	struct spdk_blob *origblob = ctx->original.blob;
5481 
5482 	if (bserrno != 0) {
5483 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5484 		return;
5485 	}
5486 
5487 	ctx->new.id = blobid;
5488 	ctx->cpl.u.blobid.blobid = blobid;
5489 
5490 	spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx);
5491 }
5492 
5493 
5494 static void
5495 _spdk_bs_xattr_snapshot(void *arg, const char *name,
5496 			const void **value, size_t *value_len)
5497 {
5498 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
5499 
5500 	struct spdk_blob *blob = (struct spdk_blob *)arg;
5501 	*value = &blob->id;
5502 	*value_len = sizeof(blob->id);
5503 }
5504 
5505 static void
5506 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5507 {
5508 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5509 	struct spdk_blob_opts opts;
5510 	struct spdk_blob_xattr_opts internal_xattrs;
5511 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
5512 
5513 	if (bserrno != 0) {
5514 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5515 		return;
5516 	}
5517 
5518 	ctx->original.blob = _blob;
5519 
5520 	if (_blob->data_ro || _blob->md_ro) {
5521 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n",
5522 			      _blob->id);
5523 		ctx->bserrno = -EINVAL;
5524 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5525 		return;
5526 	}
5527 
5528 	if (_blob->locked_operation_in_progress) {
5529 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot - another operation in progress\n");
5530 		ctx->bserrno = -EBUSY;
5531 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5532 		return;
5533 	}
5534 
5535 	_blob->locked_operation_in_progress = true;
5536 
5537 	spdk_blob_opts_init(&opts);
5538 	_spdk_blob_xattrs_init(&internal_xattrs);
5539 
5540 	/* Change the size of new blob to the same as in original blob,
5541 	 * but do not allocate clusters */
5542 	opts.thin_provision = true;
5543 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
5544 	opts.use_extent_table = _blob->use_extent_table;
5545 
5546 	/* If there are any xattrs specified for snapshot, set them now */
5547 	if (ctx->xattrs) {
5548 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
5549 	}
5550 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
5551 	internal_xattrs.count = 1;
5552 	internal_xattrs.ctx = _blob;
5553 	internal_xattrs.names = xattrs_names;
5554 	internal_xattrs.get_value = _spdk_bs_xattr_snapshot;
5555 
5556 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
5557 			     _spdk_bs_snapshot_newblob_create_cpl, ctx);
5558 }
5559 
5560 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
5561 			     const struct spdk_blob_xattr_opts *snapshot_xattrs,
5562 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5563 {
5564 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
5565 
5566 	if (!ctx) {
5567 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
5568 		return;
5569 	}
5570 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5571 	ctx->cpl.u.blobid.cb_fn = cb_fn;
5572 	ctx->cpl.u.blobid.cb_arg = cb_arg;
5573 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
5574 	ctx->bserrno = 0;
5575 	ctx->frozen = false;
5576 	ctx->original.id = blobid;
5577 	ctx->xattrs = snapshot_xattrs;
5578 
5579 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx);
5580 }
5581 /* END spdk_bs_create_snapshot */
5582 
5583 /* START spdk_bs_create_clone */
5584 
5585 static void
5586 _spdk_bs_xattr_clone(void *arg, const char *name,
5587 		     const void **value, size_t *value_len)
5588 {
5589 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
5590 
5591 	struct spdk_blob *blob = (struct spdk_blob *)arg;
5592 	*value = &blob->id;
5593 	*value_len = sizeof(blob->id);
5594 }
5595 
5596 static void
5597 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5598 {
5599 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5600 	struct spdk_blob *clone = _blob;
5601 
5602 	ctx->new.blob = clone;
5603 	_spdk_bs_blob_list_add(clone);
5604 
5605 	spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5606 }
5607 
5608 static void
5609 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
5610 {
5611 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5612 
5613 	ctx->cpl.u.blobid.blobid = blobid;
5614 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx);
5615 }
5616 
5617 static void
5618 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5619 {
5620 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5621 	struct spdk_blob_opts		opts;
5622 	struct spdk_blob_xattr_opts internal_xattrs;
5623 	char *xattr_names[] = { BLOB_SNAPSHOT };
5624 
5625 	if (bserrno != 0) {
5626 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5627 		return;
5628 	}
5629 
5630 	ctx->original.blob = _blob;
5631 
5632 	if (!_blob->data_ro || !_blob->md_ro) {
5633 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n");
5634 		ctx->bserrno = -EINVAL;
5635 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5636 		return;
5637 	}
5638 
5639 	if (_blob->locked_operation_in_progress) {
5640 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create clone - another operation in progress\n");
5641 		ctx->bserrno = -EBUSY;
5642 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5643 		return;
5644 	}
5645 
5646 	_blob->locked_operation_in_progress = true;
5647 
5648 	spdk_blob_opts_init(&opts);
5649 	_spdk_blob_xattrs_init(&internal_xattrs);
5650 
5651 	opts.thin_provision = true;
5652 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
5653 	opts.use_extent_table = _blob->use_extent_table;
5654 	if (ctx->xattrs) {
5655 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
5656 	}
5657 
5658 	/* Set internal xattr BLOB_SNAPSHOT */
5659 	internal_xattrs.count = 1;
5660 	internal_xattrs.ctx = _blob;
5661 	internal_xattrs.names = xattr_names;
5662 	internal_xattrs.get_value = _spdk_bs_xattr_clone;
5663 
5664 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
5665 			     _spdk_bs_clone_newblob_create_cpl, ctx);
5666 }
5667 
5668 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
5669 			  const struct spdk_blob_xattr_opts *clone_xattrs,
5670 			  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5671 {
5672 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
5673 
5674 	if (!ctx) {
5675 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
5676 		return;
5677 	}
5678 
5679 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5680 	ctx->cpl.u.blobid.cb_fn = cb_fn;
5681 	ctx->cpl.u.blobid.cb_arg = cb_arg;
5682 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
5683 	ctx->bserrno = 0;
5684 	ctx->xattrs = clone_xattrs;
5685 	ctx->original.id = blobid;
5686 
5687 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx);
5688 }
5689 
5690 /* END spdk_bs_create_clone */
5691 
5692 /* START spdk_bs_inflate_blob */
5693 
5694 static void
5695 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno)
5696 {
5697 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5698 	struct spdk_blob *_blob = ctx->original.blob;
5699 
5700 	if (bserrno != 0) {
5701 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5702 		return;
5703 	}
5704 
5705 	assert(_parent != NULL);
5706 
5707 	_spdk_bs_blob_list_remove(_blob);
5708 	_blob->parent_id = _parent->id;
5709 	_spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id,
5710 			     sizeof(spdk_blob_id), true);
5711 
5712 	_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5713 	_blob->back_bs_dev = spdk_bs_create_blob_bs_dev(_parent);
5714 	_spdk_bs_blob_list_add(_blob);
5715 
5716 	spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5717 }
5718 
5719 static void
5720 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno)
5721 {
5722 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5723 	struct spdk_blob *_blob = ctx->original.blob;
5724 	struct spdk_blob *_parent;
5725 
5726 	if (bserrno != 0) {
5727 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5728 		return;
5729 	}
5730 
5731 	if (ctx->allocate_all) {
5732 		/* remove thin provisioning */
5733 		_spdk_bs_blob_list_remove(_blob);
5734 		_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
5735 		_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
5736 		_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5737 		_blob->back_bs_dev = NULL;
5738 		_blob->parent_id = SPDK_BLOBID_INVALID;
5739 	} else {
5740 		_parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob;
5741 		if (_parent->parent_id != SPDK_BLOBID_INVALID) {
5742 			/* We must change the parent of the inflated blob */
5743 			spdk_bs_open_blob(_blob->bs, _parent->parent_id,
5744 					  _spdk_bs_inflate_blob_set_parent_cpl, ctx);
5745 			return;
5746 		}
5747 
5748 		_spdk_bs_blob_list_remove(_blob);
5749 		_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
5750 		_blob->parent_id = SPDK_BLOBID_INVALID;
5751 		_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5752 		_blob->back_bs_dev = spdk_bs_create_zeroes_dev();
5753 	}
5754 
5755 	_blob->state = SPDK_BLOB_STATE_DIRTY;
5756 	spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5757 }
5758 
5759 /* Check if cluster needs allocation */
5760 static inline bool
5761 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all)
5762 {
5763 	struct spdk_blob_bs_dev *b;
5764 
5765 	assert(blob != NULL);
5766 
5767 	if (blob->active.clusters[cluster] != 0) {
5768 		/* Cluster is already allocated */
5769 		return false;
5770 	}
5771 
5772 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
5773 		/* Blob have no parent blob */
5774 		return allocate_all;
5775 	}
5776 
5777 	b = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
5778 	return (allocate_all || b->blob->active.clusters[cluster] != 0);
5779 }
5780 
5781 static void
5782 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
5783 {
5784 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5785 	struct spdk_blob *_blob = ctx->original.blob;
5786 	uint64_t offset;
5787 
5788 	if (bserrno != 0) {
5789 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5790 		return;
5791 	}
5792 
5793 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
5794 		if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) {
5795 			break;
5796 		}
5797 	}
5798 
5799 	if (ctx->cluster < _blob->active.num_clusters) {
5800 		offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster);
5801 
5802 		/* We may safely increment a cluster before write */
5803 		ctx->cluster++;
5804 
5805 		/* Use zero length write to touch a cluster */
5806 		spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0,
5807 				   _spdk_bs_inflate_blob_touch_next, ctx);
5808 	} else {
5809 		_spdk_bs_inflate_blob_done(cb_arg, bserrno);
5810 	}
5811 }
5812 
5813 static void
5814 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5815 {
5816 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5817 	uint64_t lfc; /* lowest free cluster */
5818 	uint64_t i;
5819 
5820 	if (bserrno != 0) {
5821 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5822 		return;
5823 	}
5824 
5825 	ctx->original.blob = _blob;
5826 
5827 	if (_blob->locked_operation_in_progress) {
5828 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot inflate blob - another operation in progress\n");
5829 		ctx->bserrno = -EBUSY;
5830 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5831 		return;
5832 	}
5833 
5834 	_blob->locked_operation_in_progress = true;
5835 
5836 	if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) {
5837 		/* This blob have no parent, so we cannot decouple it. */
5838 		SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n");
5839 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
5840 		return;
5841 	}
5842 
5843 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
5844 		/* This is not thin provisioned blob. No need to inflate. */
5845 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0);
5846 		return;
5847 	}
5848 
5849 	/* Do two passes - one to verify that we can obtain enough clusters
5850 	 * and another to actually claim them.
5851 	 */
5852 	lfc = 0;
5853 	for (i = 0; i < _blob->active.num_clusters; i++) {
5854 		if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) {
5855 			lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc);
5856 			if (lfc == UINT32_MAX) {
5857 				/* No more free clusters. Cannot satisfy the request */
5858 				_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
5859 				return;
5860 			}
5861 			lfc++;
5862 		}
5863 	}
5864 
5865 	ctx->cluster = 0;
5866 	_spdk_bs_inflate_blob_touch_next(ctx, 0);
5867 }
5868 
5869 static void
5870 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5871 		      spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg)
5872 {
5873 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
5874 
5875 	if (!ctx) {
5876 		cb_fn(cb_arg, -ENOMEM);
5877 		return;
5878 	}
5879 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
5880 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
5881 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
5882 	ctx->bserrno = 0;
5883 	ctx->original.id = blobid;
5884 	ctx->channel = channel;
5885 	ctx->allocate_all = allocate_all;
5886 
5887 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx);
5888 }
5889 
5890 void
5891 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5892 		     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
5893 {
5894 	_spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg);
5895 }
5896 
5897 void
5898 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5899 			     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
5900 {
5901 	_spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg);
5902 }
5903 /* END spdk_bs_inflate_blob */
5904 
5905 /* START spdk_blob_resize */
5906 struct spdk_bs_resize_ctx {
5907 	spdk_blob_op_complete cb_fn;
5908 	void *cb_arg;
5909 	struct spdk_blob *blob;
5910 	uint64_t sz;
5911 	int rc;
5912 };
5913 
5914 static void
5915 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc)
5916 {
5917 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
5918 
5919 	if (rc != 0) {
5920 		SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc);
5921 	}
5922 
5923 	if (ctx->rc != 0) {
5924 		SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc);
5925 		rc = ctx->rc;
5926 	}
5927 
5928 	ctx->blob->locked_operation_in_progress = false;
5929 
5930 	ctx->cb_fn(ctx->cb_arg, rc);
5931 	free(ctx);
5932 }
5933 
5934 static void
5935 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc)
5936 {
5937 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
5938 
5939 	if (rc != 0) {
5940 		ctx->blob->locked_operation_in_progress = false;
5941 		ctx->cb_fn(ctx->cb_arg, rc);
5942 		free(ctx);
5943 		return;
5944 	}
5945 
5946 	ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz);
5947 
5948 	_spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx);
5949 }
5950 
5951 void
5952 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
5953 {
5954 	struct spdk_bs_resize_ctx *ctx;
5955 
5956 	_spdk_blob_verify_md_op(blob);
5957 
5958 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz);
5959 
5960 	if (blob->md_ro) {
5961 		cb_fn(cb_arg, -EPERM);
5962 		return;
5963 	}
5964 
5965 	if (sz == blob->active.num_clusters) {
5966 		cb_fn(cb_arg, 0);
5967 		return;
5968 	}
5969 
5970 	if (blob->locked_operation_in_progress) {
5971 		cb_fn(cb_arg, -EBUSY);
5972 		return;
5973 	}
5974 
5975 	ctx = calloc(1, sizeof(*ctx));
5976 	if (!ctx) {
5977 		cb_fn(cb_arg, -ENOMEM);
5978 		return;
5979 	}
5980 
5981 	blob->locked_operation_in_progress = true;
5982 	ctx->cb_fn = cb_fn;
5983 	ctx->cb_arg = cb_arg;
5984 	ctx->blob = blob;
5985 	ctx->sz = sz;
5986 	_spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx);
5987 }
5988 
5989 /* END spdk_blob_resize */
5990 
5991 
5992 /* START spdk_bs_delete_blob */
5993 
5994 static void
5995 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
5996 {
5997 	spdk_bs_sequence_t *seq = cb_arg;
5998 
5999 	spdk_bs_sequence_finish(seq, bserrno);
6000 }
6001 
6002 static void
6003 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6004 {
6005 	struct spdk_blob *blob = cb_arg;
6006 
6007 	if (bserrno != 0) {
6008 		/*
6009 		 * We already removed this blob from the blobstore tailq, so
6010 		 *  we need to free it here since this is the last reference
6011 		 *  to it.
6012 		 */
6013 		_spdk_blob_free(blob);
6014 		_spdk_bs_delete_close_cpl(seq, bserrno);
6015 		return;
6016 	}
6017 
6018 	/*
6019 	 * This will immediately decrement the ref_count and call
6020 	 *  the completion routine since the metadata state is clean.
6021 	 *  By calling spdk_blob_close, we reduce the number of call
6022 	 *  points into code that touches the blob->open_ref count
6023 	 *  and the blobstore's blob list.
6024 	 */
6025 	spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq);
6026 }
6027 
6028 struct delete_snapshot_ctx {
6029 	struct spdk_blob_list *parent_snapshot_entry;
6030 	struct spdk_blob *snapshot;
6031 	bool snapshot_md_ro;
6032 	struct spdk_blob *clone;
6033 	bool clone_md_ro;
6034 	spdk_blob_op_with_handle_complete cb_fn;
6035 	void *cb_arg;
6036 	int bserrno;
6037 };
6038 
6039 static void
6040 _spdk_delete_blob_cleanup_finish(void *cb_arg, int bserrno)
6041 {
6042 	struct delete_snapshot_ctx *ctx = cb_arg;
6043 
6044 	if (bserrno != 0) {
6045 		SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno);
6046 	}
6047 
6048 	assert(ctx != NULL);
6049 
6050 	if (bserrno != 0 && ctx->bserrno == 0) {
6051 		ctx->bserrno = bserrno;
6052 	}
6053 
6054 	ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno);
6055 	free(ctx);
6056 }
6057 
6058 static void
6059 _spdk_delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno)
6060 {
6061 	struct delete_snapshot_ctx *ctx = cb_arg;
6062 
6063 	if (bserrno != 0) {
6064 		ctx->bserrno = bserrno;
6065 		SPDK_ERRLOG("Clone cleanup error %d\n", bserrno);
6066 	}
6067 
6068 	/* open_ref == 1 menas that only deletion context has opened this snapshot
6069 	 * open_ref == 2 menas that clone has opened this snapshot as well,
6070 	 * so we have to add it back to the blobs list */
6071 	if (ctx->snapshot->open_ref == 2) {
6072 		TAILQ_INSERT_HEAD(&ctx->snapshot->bs->blobs, ctx->snapshot, link);
6073 	}
6074 
6075 	ctx->snapshot->locked_operation_in_progress = false;
6076 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
6077 
6078 	spdk_blob_close(ctx->snapshot, _spdk_delete_blob_cleanup_finish, ctx);
6079 }
6080 
6081 static void
6082 _spdk_delete_snapshot_cleanup_clone(void *cb_arg, int bserrno)
6083 {
6084 	struct delete_snapshot_ctx *ctx = cb_arg;
6085 
6086 	ctx->clone->locked_operation_in_progress = false;
6087 	ctx->clone->md_ro = ctx->clone_md_ro;
6088 
6089 	spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
6090 }
6091 
6092 static void
6093 _spdk_delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
6094 {
6095 	struct delete_snapshot_ctx *ctx = cb_arg;
6096 
6097 	if (bserrno) {
6098 		ctx->bserrno = bserrno;
6099 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6100 		return;
6101 	}
6102 
6103 	ctx->clone->locked_operation_in_progress = false;
6104 	spdk_blob_close(ctx->clone, _spdk_delete_blob_cleanup_finish, ctx);
6105 }
6106 
6107 static void
6108 _spdk_delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno)
6109 {
6110 	struct delete_snapshot_ctx *ctx = cb_arg;
6111 	struct spdk_blob_list *parent_snapshot_entry = NULL;
6112 	struct spdk_blob_list *snapshot_entry = NULL;
6113 	struct spdk_blob_list *clone_entry = NULL;
6114 	struct spdk_blob_list *snapshot_clone_entry = NULL;
6115 
6116 	if (bserrno) {
6117 		SPDK_ERRLOG("Failed to sync MD on blob\n");
6118 		ctx->bserrno = bserrno;
6119 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6120 		return;
6121 	}
6122 
6123 	/* Get snapshot entry for the snapshot we want to remove */
6124 	snapshot_entry = _spdk_bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id);
6125 
6126 	assert(snapshot_entry != NULL);
6127 
6128 	/* Remove clone entry in this snapshot (at this point there can be only one clone) */
6129 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6130 	assert(clone_entry != NULL);
6131 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
6132 	snapshot_entry->clone_count--;
6133 	assert(TAILQ_EMPTY(&snapshot_entry->clones));
6134 
6135 	if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) {
6136 		/* This snapshot is at the same time a clone of another snapshot - we need to
6137 		 * update parent snapshot (remove current clone, add new one inherited from
6138 		 * the snapshot that is being removed) */
6139 
6140 		/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
6141 		 * snapshot that we are removing */
6142 		_spdk_blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry,
6143 				&snapshot_clone_entry);
6144 
6145 		/* Switch clone entry in parent snapshot */
6146 		TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link);
6147 		TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link);
6148 		free(snapshot_clone_entry);
6149 	} else {
6150 		/* No parent snapshot - just remove clone entry */
6151 		free(clone_entry);
6152 	}
6153 
6154 	/* Restore md_ro flags */
6155 	ctx->clone->md_ro = ctx->clone_md_ro;
6156 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
6157 
6158 	_spdk_blob_unfreeze_io(ctx->clone, _spdk_delete_snapshot_unfreeze_cpl, ctx);
6159 }
6160 
6161 static void
6162 _spdk_delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
6163 {
6164 	struct delete_snapshot_ctx *ctx = cb_arg;
6165 	uint64_t i;
6166 
6167 	ctx->snapshot->md_ro = false;
6168 
6169 	if (bserrno) {
6170 		SPDK_ERRLOG("Failed to sync MD on clone\n");
6171 		ctx->bserrno = bserrno;
6172 
6173 		/* Restore snapshot to previous state */
6174 		bserrno = _spdk_blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
6175 		if (bserrno != 0) {
6176 			_spdk_delete_snapshot_cleanup_clone(ctx, bserrno);
6177 			return;
6178 		}
6179 
6180 		spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_cleanup_clone, ctx);
6181 		return;
6182 	}
6183 
6184 	/* Clear cluster map entries for snapshot */
6185 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
6186 		if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
6187 			ctx->snapshot->active.clusters[i] = 0;
6188 		}
6189 	}
6190 
6191 	ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY;
6192 
6193 	if (ctx->parent_snapshot_entry != NULL) {
6194 		ctx->snapshot->back_bs_dev = NULL;
6195 	}
6196 
6197 	spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_cpl, ctx);
6198 }
6199 
6200 static void
6201 _spdk_delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
6202 {
6203 	struct delete_snapshot_ctx *ctx = cb_arg;
6204 	uint64_t i;
6205 
6206 	/* Temporarily override md_ro flag for clone for MD modification */
6207 	ctx->clone_md_ro = ctx->clone->md_ro;
6208 	ctx->clone->md_ro = false;
6209 
6210 	if (bserrno) {
6211 		SPDK_ERRLOG("Failed to sync MD with xattr on blob\n");
6212 		ctx->bserrno = bserrno;
6213 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6214 		return;
6215 	}
6216 
6217 	/* Copy snapshot map to clone map (only unallocated clusters in clone) */
6218 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
6219 		if (ctx->clone->active.clusters[i] == 0) {
6220 			ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
6221 		}
6222 	}
6223 
6224 	/* Delete old backing bs_dev from clone (related to snapshot that will be removed) */
6225 	ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev);
6226 
6227 	/* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */
6228 	if (ctx->parent_snapshot_entry != NULL) {
6229 		/* ...to parent snapshot */
6230 		ctx->clone->parent_id = ctx->parent_snapshot_entry->id;
6231 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
6232 		_spdk_blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id,
6233 				     sizeof(spdk_blob_id),
6234 				     true);
6235 	} else {
6236 		/* ...to blobid invalid and zeroes dev */
6237 		ctx->clone->parent_id = SPDK_BLOBID_INVALID;
6238 		ctx->clone->back_bs_dev = spdk_bs_create_zeroes_dev();
6239 		_spdk_blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true);
6240 	}
6241 
6242 	spdk_blob_sync_md(ctx->clone, _spdk_delete_snapshot_sync_clone_cpl, ctx);
6243 }
6244 
6245 static void
6246 _spdk_delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno)
6247 {
6248 	struct delete_snapshot_ctx *ctx = cb_arg;
6249 
6250 	if (bserrno) {
6251 		SPDK_ERRLOG("Failed to freeze I/O on clone\n");
6252 		ctx->bserrno = bserrno;
6253 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6254 		return;
6255 	}
6256 
6257 	/* Temporarily override md_ro flag for snapshot for MD modification */
6258 	ctx->snapshot_md_ro = ctx->snapshot->md_ro;
6259 	ctx->snapshot->md_ro = false;
6260 
6261 	/* Mark blob as pending for removal for power failure safety, use clone id for recovery */
6262 	ctx->bserrno = _spdk_blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id,
6263 					    sizeof(spdk_blob_id), true);
6264 	if (ctx->bserrno != 0) {
6265 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6266 		return;
6267 	}
6268 
6269 	spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_xattr_cpl, ctx);
6270 }
6271 
6272 static void
6273 _spdk_delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno)
6274 {
6275 	struct delete_snapshot_ctx *ctx = cb_arg;
6276 
6277 	if (bserrno) {
6278 		SPDK_ERRLOG("Failed to open clone\n");
6279 		ctx->bserrno = bserrno;
6280 		_spdk_delete_snapshot_cleanup_snapshot(ctx, 0);
6281 		return;
6282 	}
6283 
6284 	ctx->clone = clone;
6285 
6286 	if (clone->locked_operation_in_progress) {
6287 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress on its clone\n");
6288 		ctx->bserrno = -EBUSY;
6289 		spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
6290 		return;
6291 	}
6292 
6293 	clone->locked_operation_in_progress = true;
6294 
6295 	_spdk_blob_freeze_io(clone, _spdk_delete_snapshot_freeze_io_cb, ctx);
6296 }
6297 
6298 static void
6299 _spdk_update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx)
6300 {
6301 	struct spdk_blob_list *snapshot_entry = NULL;
6302 	struct spdk_blob_list *clone_entry = NULL;
6303 	struct spdk_blob_list *snapshot_clone_entry = NULL;
6304 
6305 	/* Get snapshot entry for the snapshot we want to remove */
6306 	snapshot_entry = _spdk_bs_get_snapshot_entry(snapshot->bs, snapshot->id);
6307 
6308 	assert(snapshot_entry != NULL);
6309 
6310 	/* Get clone of the snapshot (at this point there can be only one clone) */
6311 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6312 	assert(snapshot_entry->clone_count == 1);
6313 	assert(clone_entry != NULL);
6314 
6315 	/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
6316 	 * snapshot that we are removing */
6317 	_spdk_blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry,
6318 			&snapshot_clone_entry);
6319 
6320 	spdk_bs_open_blob(snapshot->bs, clone_entry->id, _spdk_delete_snapshot_open_clone_cb, ctx);
6321 }
6322 
6323 static void
6324 _spdk_bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
6325 {
6326 	spdk_bs_sequence_t *seq = cb_arg;
6327 	struct spdk_blob_list *snapshot_entry = NULL;
6328 	uint32_t page_num;
6329 
6330 	if (bserrno) {
6331 		SPDK_ERRLOG("Failed to remove blob\n");
6332 		spdk_bs_sequence_finish(seq, bserrno);
6333 		return;
6334 	}
6335 
6336 	/* Remove snapshot from the list */
6337 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
6338 	if (snapshot_entry != NULL) {
6339 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
6340 		free(snapshot_entry);
6341 	}
6342 
6343 	page_num = _spdk_bs_blobid_to_page(blob->id);
6344 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
6345 	blob->state = SPDK_BLOB_STATE_DIRTY;
6346 	blob->active.num_pages = 0;
6347 	_spdk_blob_resize(blob, 0);
6348 
6349 	_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob);
6350 }
6351 
6352 static int
6353 _spdk_bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone)
6354 {
6355 	struct spdk_blob_list *snapshot_entry = NULL;
6356 	struct spdk_blob_list *clone_entry = NULL;
6357 	struct spdk_blob *clone = NULL;
6358 	bool has_one_clone = false;
6359 
6360 	/* Check if this is a snapshot with clones */
6361 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
6362 	if (snapshot_entry != NULL) {
6363 		if (snapshot_entry->clone_count > 1) {
6364 			SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n");
6365 			return -EBUSY;
6366 		} else if (snapshot_entry->clone_count == 1) {
6367 			has_one_clone = true;
6368 		}
6369 	}
6370 
6371 	/* Check if someone has this blob open (besides this delete context):
6372 	 * - open_ref = 1 - only this context opened blob, so it is ok to remove it
6373 	 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot
6374 	 *	and that is ok, because we will update it accordingly */
6375 	if (blob->open_ref <= 2 && has_one_clone) {
6376 		clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6377 		assert(clone_entry != NULL);
6378 		clone = _spdk_blob_lookup(blob->bs, clone_entry->id);
6379 
6380 		if (blob->open_ref == 2 && clone == NULL) {
6381 			/* Clone is closed and someone else opened this blob */
6382 			SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
6383 			return -EBUSY;
6384 		}
6385 
6386 		*update_clone = true;
6387 		return 0;
6388 	}
6389 
6390 	if (blob->open_ref > 1) {
6391 		SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
6392 		return -EBUSY;
6393 	}
6394 
6395 	assert(has_one_clone == false);
6396 	*update_clone = false;
6397 	return 0;
6398 }
6399 
6400 static void
6401 _spdk_bs_delete_enomem_close_cpl(void *cb_arg, int bserrno)
6402 {
6403 	spdk_bs_sequence_t *seq = cb_arg;
6404 
6405 	spdk_bs_sequence_finish(seq, -ENOMEM);
6406 }
6407 
6408 static void
6409 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
6410 {
6411 	spdk_bs_sequence_t *seq = cb_arg;
6412 	struct delete_snapshot_ctx *ctx;
6413 	bool update_clone = false;
6414 
6415 	if (bserrno != 0) {
6416 		spdk_bs_sequence_finish(seq, bserrno);
6417 		return;
6418 	}
6419 
6420 	_spdk_blob_verify_md_op(blob);
6421 
6422 	ctx = calloc(1, sizeof(*ctx));
6423 	if (ctx == NULL) {
6424 		spdk_blob_close(blob, _spdk_bs_delete_enomem_close_cpl, seq);
6425 		return;
6426 	}
6427 
6428 	ctx->snapshot = blob;
6429 	ctx->cb_fn = _spdk_bs_delete_blob_finish;
6430 	ctx->cb_arg = seq;
6431 
6432 	/* Check if blob can be removed and if it is a snapshot with clone on top of it */
6433 	ctx->bserrno = _spdk_bs_is_blob_deletable(blob, &update_clone);
6434 	if (ctx->bserrno) {
6435 		spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
6436 		return;
6437 	}
6438 
6439 	if (blob->locked_operation_in_progress) {
6440 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress\n");
6441 		ctx->bserrno = -EBUSY;
6442 		spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
6443 		return;
6444 	}
6445 
6446 	blob->locked_operation_in_progress = true;
6447 
6448 	/*
6449 	 * Remove the blob from the blob_store list now, to ensure it does not
6450 	 *  get returned after this point by _spdk_blob_lookup().
6451 	 */
6452 	TAILQ_REMOVE(&blob->bs->blobs, blob, link);
6453 
6454 	if (update_clone) {
6455 		/* This blob is a snapshot with active clone - update clone first */
6456 		_spdk_update_clone_on_snapshot_deletion(blob, ctx);
6457 	} else {
6458 		/* This blob does not have any clones - just remove it */
6459 		_spdk_bs_blob_list_remove(blob);
6460 		_spdk_bs_delete_blob_finish(seq, blob, 0);
6461 		free(ctx);
6462 	}
6463 }
6464 
6465 void
6466 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6467 		    spdk_blob_op_complete cb_fn, void *cb_arg)
6468 {
6469 	struct spdk_bs_cpl	cpl;
6470 	spdk_bs_sequence_t	*seq;
6471 
6472 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid);
6473 
6474 	assert(spdk_get_thread() == bs->md_thread);
6475 
6476 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6477 	cpl.u.blob_basic.cb_fn = cb_fn;
6478 	cpl.u.blob_basic.cb_arg = cb_arg;
6479 
6480 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
6481 	if (!seq) {
6482 		cb_fn(cb_arg, -ENOMEM);
6483 		return;
6484 	}
6485 
6486 	spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq);
6487 }
6488 
6489 /* END spdk_bs_delete_blob */
6490 
6491 /* START spdk_bs_open_blob */
6492 
6493 static void
6494 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6495 {
6496 	struct spdk_blob *blob = cb_arg;
6497 
6498 	if (bserrno != 0) {
6499 		_spdk_blob_free(blob);
6500 		seq->cpl.u.blob_handle.blob = NULL;
6501 		spdk_bs_sequence_finish(seq, bserrno);
6502 		return;
6503 	}
6504 
6505 	blob->open_ref++;
6506 
6507 	TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link);
6508 
6509 	spdk_bs_sequence_finish(seq, bserrno);
6510 }
6511 
6512 static void _spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6513 			       struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6514 {
6515 	struct spdk_blob		*blob;
6516 	struct spdk_bs_cpl		cpl;
6517 	struct spdk_blob_open_opts	opts_default;
6518 	spdk_bs_sequence_t		*seq;
6519 	uint32_t			page_num;
6520 
6521 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid);
6522 	assert(spdk_get_thread() == bs->md_thread);
6523 
6524 	page_num = _spdk_bs_blobid_to_page(blobid);
6525 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
6526 		/* Invalid blobid */
6527 		cb_fn(cb_arg, NULL, -ENOENT);
6528 		return;
6529 	}
6530 
6531 	blob = _spdk_blob_lookup(bs, blobid);
6532 	if (blob) {
6533 		blob->open_ref++;
6534 		cb_fn(cb_arg, blob, 0);
6535 		return;
6536 	}
6537 
6538 	blob = _spdk_blob_alloc(bs, blobid);
6539 	if (!blob) {
6540 		cb_fn(cb_arg, NULL, -ENOMEM);
6541 		return;
6542 	}
6543 
6544 	if (!opts) {
6545 		spdk_blob_open_opts_init(&opts_default);
6546 		opts = &opts_default;
6547 	}
6548 
6549 	blob->clear_method = opts->clear_method;
6550 
6551 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
6552 	cpl.u.blob_handle.cb_fn = cb_fn;
6553 	cpl.u.blob_handle.cb_arg = cb_arg;
6554 	cpl.u.blob_handle.blob = blob;
6555 
6556 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
6557 	if (!seq) {
6558 		_spdk_blob_free(blob);
6559 		cb_fn(cb_arg, NULL, -ENOMEM);
6560 		return;
6561 	}
6562 
6563 	_spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob);
6564 }
6565 
6566 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6567 		       spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6568 {
6569 	_spdk_bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg);
6570 }
6571 
6572 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
6573 			   struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6574 {
6575 	_spdk_bs_open_blob(bs, blobid, opts, cb_fn, cb_arg);
6576 }
6577 
6578 /* END spdk_bs_open_blob */
6579 
6580 /* START spdk_blob_set_read_only */
6581 int spdk_blob_set_read_only(struct spdk_blob *blob)
6582 {
6583 	_spdk_blob_verify_md_op(blob);
6584 
6585 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
6586 
6587 	blob->state = SPDK_BLOB_STATE_DIRTY;
6588 	return 0;
6589 }
6590 /* END spdk_blob_set_read_only */
6591 
6592 /* START spdk_blob_sync_md */
6593 
6594 static void
6595 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6596 {
6597 	struct spdk_blob *blob = cb_arg;
6598 
6599 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
6600 		blob->data_ro = true;
6601 		blob->md_ro = true;
6602 	}
6603 
6604 	spdk_bs_sequence_finish(seq, bserrno);
6605 }
6606 
6607 static void
6608 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6609 {
6610 	struct spdk_bs_cpl	cpl;
6611 	spdk_bs_sequence_t	*seq;
6612 
6613 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6614 	cpl.u.blob_basic.cb_fn = cb_fn;
6615 	cpl.u.blob_basic.cb_arg = cb_arg;
6616 
6617 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
6618 	if (!seq) {
6619 		cb_fn(cb_arg, -ENOMEM);
6620 		return;
6621 	}
6622 
6623 	_spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob);
6624 }
6625 
6626 void
6627 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6628 {
6629 	_spdk_blob_verify_md_op(blob);
6630 
6631 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id);
6632 
6633 	if (blob->md_ro) {
6634 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
6635 		cb_fn(cb_arg, 0);
6636 		return;
6637 	}
6638 
6639 	_spdk_blob_sync_md(blob, cb_fn, cb_arg);
6640 }
6641 
6642 /* END spdk_blob_sync_md */
6643 
6644 struct spdk_blob_insert_cluster_ctx {
6645 	struct spdk_thread	*thread;
6646 	struct spdk_blob	*blob;
6647 	uint32_t		cluster_num;	/* cluster index in blob */
6648 	uint32_t		cluster;	/* cluster on disk */
6649 	uint32_t		extent_page;	/* extent page on disk */
6650 	int			rc;
6651 	spdk_blob_op_complete	cb_fn;
6652 	void			*cb_arg;
6653 };
6654 
6655 static void
6656 _spdk_blob_insert_cluster_msg_cpl(void *arg)
6657 {
6658 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6659 
6660 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
6661 	free(ctx);
6662 }
6663 
6664 static void
6665 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno)
6666 {
6667 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6668 
6669 	ctx->rc = bserrno;
6670 	spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
6671 }
6672 
6673 static void
6674 _spdk_blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6675 {
6676 	struct spdk_blob_md_page        *page = cb_arg;
6677 
6678 	spdk_bs_sequence_finish(seq, bserrno);
6679 	spdk_free(page);
6680 }
6681 
6682 static void
6683 _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
6684 			 spdk_blob_op_complete cb_fn, void *cb_arg)
6685 {
6686 	spdk_bs_sequence_t		*seq;
6687 	struct spdk_bs_cpl		cpl;
6688 	struct spdk_blob_md_page	*page = NULL;
6689 	uint32_t			page_count = 0;
6690 	int				rc;
6691 
6692 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6693 	cpl.u.blob_basic.cb_fn = cb_fn;
6694 	cpl.u.blob_basic.cb_arg = cb_arg;
6695 
6696 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
6697 	if (!seq) {
6698 		cb_fn(cb_arg, -ENOMEM);
6699 		return;
6700 	}
6701 	rc = _spdk_blob_serialize_add_page(blob, &page, &page_count, &page);
6702 	if (rc < 0) {
6703 		spdk_bs_sequence_finish(seq, rc);
6704 		return;
6705 	}
6706 
6707 	_spdk_blob_serialize_extent_page(blob, cluster_num, page);
6708 
6709 	page->crc = _spdk_blob_md_page_calc_crc(page);
6710 
6711 	assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true);
6712 
6713 	spdk_bs_sequence_write_dev(seq, page, _spdk_bs_md_page_to_lba(blob->bs, extent),
6714 				   _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
6715 				   _spdk_blob_persist_extent_page_cpl, page);
6716 }
6717 
6718 static void
6719 _spdk_blob_insert_cluster_msg(void *arg)
6720 {
6721 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6722 	uint32_t *extent_page;
6723 
6724 	ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
6725 	if (ctx->rc != 0) {
6726 		spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
6727 		return;
6728 	}
6729 
6730 	if (ctx->blob->use_extent_table == false) {
6731 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
6732 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
6733 		_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
6734 		return;
6735 	}
6736 
6737 	extent_page = _spdk_bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
6738 	if (*extent_page == 0) {
6739 		/* Extent page requires allocation.
6740 		 * It was already claimed in the used_md_pages map and placed in ctx.
6741 		 * Blob persist will take care of writing out new extent page on disk. */
6742 		assert(ctx->extent_page != 0);
6743 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
6744 		*extent_page = ctx->extent_page;
6745 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
6746 		_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
6747 	} else {
6748 		assert(ctx->extent_page == 0);
6749 		/* Extent page already allocated.
6750 		 * Every cluster allocation, requires just an update of single extent page. */
6751 		_spdk_blob_insert_extent(ctx->blob, ctx->extent_page, ctx->cluster_num,
6752 					 _spdk_blob_insert_cluster_msg_cb, ctx);
6753 	}
6754 }
6755 
6756 static void
6757 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
6758 				       uint64_t cluster, uint32_t extent_page, spdk_blob_op_complete cb_fn, void *cb_arg)
6759 {
6760 	struct spdk_blob_insert_cluster_ctx *ctx;
6761 
6762 	ctx = calloc(1, sizeof(*ctx));
6763 	if (ctx == NULL) {
6764 		cb_fn(cb_arg, -ENOMEM);
6765 		return;
6766 	}
6767 
6768 	ctx->thread = spdk_get_thread();
6769 	ctx->blob = blob;
6770 	ctx->cluster_num = cluster_num;
6771 	ctx->cluster = cluster;
6772 	ctx->extent_page = extent_page;
6773 	ctx->cb_fn = cb_fn;
6774 	ctx->cb_arg = cb_arg;
6775 
6776 	spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx);
6777 }
6778 
6779 /* START spdk_blob_close */
6780 
6781 static void
6782 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6783 {
6784 	struct spdk_blob *blob = cb_arg;
6785 
6786 	if (bserrno == 0) {
6787 		blob->open_ref--;
6788 		if (blob->open_ref == 0) {
6789 			/*
6790 			 * Blobs with active.num_pages == 0 are deleted blobs.
6791 			 *  these blobs are removed from the blob_store list
6792 			 *  when the deletion process starts - so don't try to
6793 			 *  remove them again.
6794 			 */
6795 			if (blob->active.num_pages > 0) {
6796 				TAILQ_REMOVE(&blob->bs->blobs, blob, link);
6797 			}
6798 			_spdk_blob_free(blob);
6799 		}
6800 	}
6801 
6802 	spdk_bs_sequence_finish(seq, bserrno);
6803 }
6804 
6805 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6806 {
6807 	struct spdk_bs_cpl	cpl;
6808 	spdk_bs_sequence_t	*seq;
6809 
6810 	_spdk_blob_verify_md_op(blob);
6811 
6812 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id);
6813 
6814 	if (blob->open_ref == 0) {
6815 		cb_fn(cb_arg, -EBADF);
6816 		return;
6817 	}
6818 
6819 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6820 	cpl.u.blob_basic.cb_fn = cb_fn;
6821 	cpl.u.blob_basic.cb_arg = cb_arg;
6822 
6823 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
6824 	if (!seq) {
6825 		cb_fn(cb_arg, -ENOMEM);
6826 		return;
6827 	}
6828 
6829 	/* Sync metadata */
6830 	_spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob);
6831 }
6832 
6833 /* END spdk_blob_close */
6834 
6835 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
6836 {
6837 	return spdk_get_io_channel(bs);
6838 }
6839 
6840 void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
6841 {
6842 	spdk_put_io_channel(channel);
6843 }
6844 
6845 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
6846 			uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
6847 {
6848 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
6849 				     SPDK_BLOB_UNMAP);
6850 }
6851 
6852 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
6853 			       uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
6854 {
6855 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
6856 				     SPDK_BLOB_WRITE_ZEROES);
6857 }
6858 
6859 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
6860 			void *payload, uint64_t offset, uint64_t length,
6861 			spdk_blob_op_complete cb_fn, void *cb_arg)
6862 {
6863 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
6864 				     SPDK_BLOB_WRITE);
6865 }
6866 
6867 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
6868 		       void *payload, uint64_t offset, uint64_t length,
6869 		       spdk_blob_op_complete cb_fn, void *cb_arg)
6870 {
6871 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
6872 				     SPDK_BLOB_READ);
6873 }
6874 
6875 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6876 			 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6877 			 spdk_blob_op_complete cb_fn, void *cb_arg)
6878 {
6879 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
6880 }
6881 
6882 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6883 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6884 			spdk_blob_op_complete cb_fn, void *cb_arg)
6885 {
6886 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
6887 }
6888 
6889 struct spdk_bs_iter_ctx {
6890 	int64_t page_num;
6891 	struct spdk_blob_store *bs;
6892 
6893 	spdk_blob_op_with_handle_complete cb_fn;
6894 	void *cb_arg;
6895 };
6896 
6897 static void
6898 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
6899 {
6900 	struct spdk_bs_iter_ctx *ctx = cb_arg;
6901 	struct spdk_blob_store *bs = ctx->bs;
6902 	spdk_blob_id id;
6903 
6904 	if (bserrno == 0) {
6905 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
6906 		free(ctx);
6907 		return;
6908 	}
6909 
6910 	ctx->page_num++;
6911 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
6912 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
6913 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
6914 		free(ctx);
6915 		return;
6916 	}
6917 
6918 	id = _spdk_bs_page_to_blobid(ctx->page_num);
6919 
6920 	spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx);
6921 }
6922 
6923 void
6924 spdk_bs_iter_first(struct spdk_blob_store *bs,
6925 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6926 {
6927 	struct spdk_bs_iter_ctx *ctx;
6928 
6929 	ctx = calloc(1, sizeof(*ctx));
6930 	if (!ctx) {
6931 		cb_fn(cb_arg, NULL, -ENOMEM);
6932 		return;
6933 	}
6934 
6935 	ctx->page_num = -1;
6936 	ctx->bs = bs;
6937 	ctx->cb_fn = cb_fn;
6938 	ctx->cb_arg = cb_arg;
6939 
6940 	_spdk_bs_iter_cpl(ctx, NULL, -1);
6941 }
6942 
6943 static void
6944 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno)
6945 {
6946 	struct spdk_bs_iter_ctx *ctx = cb_arg;
6947 
6948 	_spdk_bs_iter_cpl(ctx, NULL, -1);
6949 }
6950 
6951 void
6952 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
6953 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6954 {
6955 	struct spdk_bs_iter_ctx *ctx;
6956 
6957 	assert(blob != NULL);
6958 
6959 	ctx = calloc(1, sizeof(*ctx));
6960 	if (!ctx) {
6961 		cb_fn(cb_arg, NULL, -ENOMEM);
6962 		return;
6963 	}
6964 
6965 	ctx->page_num = _spdk_bs_blobid_to_page(blob->id);
6966 	ctx->bs = bs;
6967 	ctx->cb_fn = cb_fn;
6968 	ctx->cb_arg = cb_arg;
6969 
6970 	/* Close the existing blob */
6971 	spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx);
6972 }
6973 
6974 static int
6975 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
6976 		     uint16_t value_len, bool internal)
6977 {
6978 	struct spdk_xattr_tailq *xattrs;
6979 	struct spdk_xattr	*xattr;
6980 	size_t			desc_size;
6981 
6982 	_spdk_blob_verify_md_op(blob);
6983 
6984 	if (blob->md_ro) {
6985 		return -EPERM;
6986 	}
6987 
6988 	desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len;
6989 	if (desc_size > SPDK_BS_MAX_DESC_SIZE) {
6990 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Xattr '%s' of size %ld does not fix into single page %ld\n", name,
6991 			      desc_size, SPDK_BS_MAX_DESC_SIZE);
6992 		return -ENOMEM;
6993 	}
6994 
6995 	if (internal) {
6996 		xattrs = &blob->xattrs_internal;
6997 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
6998 	} else {
6999 		xattrs = &blob->xattrs;
7000 	}
7001 
7002 	TAILQ_FOREACH(xattr, xattrs, link) {
7003 		if (!strcmp(name, xattr->name)) {
7004 			free(xattr->value);
7005 			xattr->value_len = value_len;
7006 			xattr->value = malloc(value_len);
7007 			memcpy(xattr->value, value, value_len);
7008 
7009 			blob->state = SPDK_BLOB_STATE_DIRTY;
7010 
7011 			return 0;
7012 		}
7013 	}
7014 
7015 	xattr = calloc(1, sizeof(*xattr));
7016 	if (!xattr) {
7017 		return -ENOMEM;
7018 	}
7019 	xattr->name = strdup(name);
7020 	xattr->value_len = value_len;
7021 	xattr->value = malloc(value_len);
7022 	memcpy(xattr->value, value, value_len);
7023 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
7024 
7025 	blob->state = SPDK_BLOB_STATE_DIRTY;
7026 
7027 	return 0;
7028 }
7029 
7030 int
7031 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
7032 		    uint16_t value_len)
7033 {
7034 	return _spdk_blob_set_xattr(blob, name, value, value_len, false);
7035 }
7036 
7037 static int
7038 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
7039 {
7040 	struct spdk_xattr_tailq *xattrs;
7041 	struct spdk_xattr	*xattr;
7042 
7043 	_spdk_blob_verify_md_op(blob);
7044 
7045 	if (blob->md_ro) {
7046 		return -EPERM;
7047 	}
7048 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
7049 
7050 	TAILQ_FOREACH(xattr, xattrs, link) {
7051 		if (!strcmp(name, xattr->name)) {
7052 			TAILQ_REMOVE(xattrs, xattr, link);
7053 			free(xattr->value);
7054 			free(xattr->name);
7055 			free(xattr);
7056 
7057 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
7058 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
7059 			}
7060 			blob->state = SPDK_BLOB_STATE_DIRTY;
7061 
7062 			return 0;
7063 		}
7064 	}
7065 
7066 	return -ENOENT;
7067 }
7068 
7069 int
7070 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
7071 {
7072 	return _spdk_blob_remove_xattr(blob, name, false);
7073 }
7074 
7075 static int
7076 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
7077 			   const void **value, size_t *value_len, bool internal)
7078 {
7079 	struct spdk_xattr	*xattr;
7080 	struct spdk_xattr_tailq *xattrs;
7081 
7082 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
7083 
7084 	TAILQ_FOREACH(xattr, xattrs, link) {
7085 		if (!strcmp(name, xattr->name)) {
7086 			*value = xattr->value;
7087 			*value_len = xattr->value_len;
7088 			return 0;
7089 		}
7090 	}
7091 	return -ENOENT;
7092 }
7093 
7094 int
7095 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
7096 			  const void **value, size_t *value_len)
7097 {
7098 	_spdk_blob_verify_md_op(blob);
7099 
7100 	return _spdk_blob_get_xattr_value(blob, name, value, value_len, false);
7101 }
7102 
7103 struct spdk_xattr_names {
7104 	uint32_t	count;
7105 	const char	*names[0];
7106 };
7107 
7108 static int
7109 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
7110 {
7111 	struct spdk_xattr	*xattr;
7112 	int			count = 0;
7113 
7114 	TAILQ_FOREACH(xattr, xattrs, link) {
7115 		count++;
7116 	}
7117 
7118 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
7119 	if (*names == NULL) {
7120 		return -ENOMEM;
7121 	}
7122 
7123 	TAILQ_FOREACH(xattr, xattrs, link) {
7124 		(*names)->names[(*names)->count++] = xattr->name;
7125 	}
7126 
7127 	return 0;
7128 }
7129 
7130 int
7131 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
7132 {
7133 	_spdk_blob_verify_md_op(blob);
7134 
7135 	return _spdk_blob_get_xattr_names(&blob->xattrs, names);
7136 }
7137 
7138 uint32_t
7139 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
7140 {
7141 	assert(names != NULL);
7142 
7143 	return names->count;
7144 }
7145 
7146 const char *
7147 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
7148 {
7149 	if (index >= names->count) {
7150 		return NULL;
7151 	}
7152 
7153 	return names->names[index];
7154 }
7155 
7156 void
7157 spdk_xattr_names_free(struct spdk_xattr_names *names)
7158 {
7159 	free(names);
7160 }
7161 
7162 struct spdk_bs_type
7163 spdk_bs_get_bstype(struct spdk_blob_store *bs)
7164 {
7165 	return bs->bstype;
7166 }
7167 
7168 void
7169 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
7170 {
7171 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
7172 }
7173 
7174 bool
7175 spdk_blob_is_read_only(struct spdk_blob *blob)
7176 {
7177 	assert(blob != NULL);
7178 	return (blob->data_ro || blob->md_ro);
7179 }
7180 
7181 bool
7182 spdk_blob_is_snapshot(struct spdk_blob *blob)
7183 {
7184 	struct spdk_blob_list *snapshot_entry;
7185 
7186 	assert(blob != NULL);
7187 
7188 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
7189 	if (snapshot_entry == NULL) {
7190 		return false;
7191 	}
7192 
7193 	return true;
7194 }
7195 
7196 bool
7197 spdk_blob_is_clone(struct spdk_blob *blob)
7198 {
7199 	assert(blob != NULL);
7200 
7201 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
7202 		assert(spdk_blob_is_thin_provisioned(blob));
7203 		return true;
7204 	}
7205 
7206 	return false;
7207 }
7208 
7209 bool
7210 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
7211 {
7212 	assert(blob != NULL);
7213 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
7214 }
7215 
7216 static void
7217 _spdk_blob_update_clear_method(struct spdk_blob *blob)
7218 {
7219 	enum blob_clear_method stored_cm;
7220 
7221 	assert(blob != NULL);
7222 
7223 	/* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored
7224 	 * in metadata previously.  If something other than the default was
7225 	 * specified, ignore stored value and used what was passed in.
7226 	 */
7227 	stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT);
7228 
7229 	if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) {
7230 		blob->clear_method = stored_cm;
7231 	} else if (blob->clear_method != stored_cm) {
7232 		SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n",
7233 			     blob->clear_method, stored_cm);
7234 	}
7235 }
7236 
7237 spdk_blob_id
7238 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
7239 {
7240 	struct spdk_blob_list *snapshot_entry = NULL;
7241 	struct spdk_blob_list *clone_entry = NULL;
7242 
7243 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
7244 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
7245 			if (clone_entry->id == blob_id) {
7246 				return snapshot_entry->id;
7247 			}
7248 		}
7249 	}
7250 
7251 	return SPDK_BLOBID_INVALID;
7252 }
7253 
7254 int
7255 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
7256 		     size_t *count)
7257 {
7258 	struct spdk_blob_list *snapshot_entry, *clone_entry;
7259 	size_t n;
7260 
7261 	snapshot_entry = _spdk_bs_get_snapshot_entry(bs, blobid);
7262 	if (snapshot_entry == NULL) {
7263 		*count = 0;
7264 		return 0;
7265 	}
7266 
7267 	if (ids == NULL || *count < snapshot_entry->clone_count) {
7268 		*count = snapshot_entry->clone_count;
7269 		return -ENOMEM;
7270 	}
7271 	*count = snapshot_entry->clone_count;
7272 
7273 	n = 0;
7274 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
7275 		ids[n++] = clone_entry->id;
7276 	}
7277 
7278 	return 0;
7279 }
7280 
7281 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB)
7282