xref: /spdk/lib/blob/blobstore.c (revision a83f91c29a4740e4bea5f9509b7036e9e7dc2788)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/blob.h"
37 #include "spdk/crc32.h"
38 #include "spdk/env.h"
39 #include "spdk/queue.h"
40 #include "spdk/thread.h"
41 #include "spdk/bit_array.h"
42 #include "spdk/likely.h"
43 
44 #include "spdk_internal/assert.h"
45 #include "spdk_internal/log.h"
46 
47 #include "blobstore.h"
48 
49 #define BLOB_CRC32C_INITIAL    0xffffffffUL
50 
51 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
52 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
53 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
54 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
55 		uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg);
56 
57 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
58 				uint16_t value_len, bool internal);
59 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
60 				      const void **value, size_t *value_len, bool internal);
61 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
62 
63 static void
64 _spdk_blob_verify_md_op(struct spdk_blob *blob)
65 {
66 	assert(blob != NULL);
67 	assert(spdk_get_thread() == blob->bs->md_thread);
68 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
69 }
70 
71 static inline size_t
72 divide_round_up(size_t num, size_t divisor)
73 {
74 	return (num + divisor - 1) / divisor;
75 }
76 
77 static void
78 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
79 {
80 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
81 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false);
82 	assert(bs->num_free_clusters > 0);
83 
84 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num);
85 
86 	spdk_bit_array_set(bs->used_clusters, cluster_num);
87 	bs->num_free_clusters--;
88 }
89 
90 static int
91 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
92 {
93 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
94 
95 	_spdk_blob_verify_md_op(blob);
96 
97 	if (*cluster_lba != 0) {
98 		return -EEXIST;
99 	}
100 
101 	*cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster);
102 	return 0;
103 }
104 
105 static int
106 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
107 			  uint64_t *lowest_free_cluster, bool update_map)
108 {
109 	pthread_mutex_lock(&blob->bs->used_clusters_mutex);
110 	*lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters,
111 			       *lowest_free_cluster);
112 	if (*lowest_free_cluster >= blob->bs->total_clusters) {
113 		/* No more free clusters. Cannot satisfy the request */
114 		pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
115 		return -ENOSPC;
116 	}
117 
118 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id);
119 	_spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster);
120 	pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
121 
122 	if (update_map) {
123 		_spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster);
124 	}
125 
126 	return 0;
127 }
128 
129 static void
130 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
131 {
132 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
133 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true);
134 	assert(bs->num_free_clusters < bs->total_clusters);
135 
136 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num);
137 
138 	pthread_mutex_lock(&bs->used_clusters_mutex);
139 	spdk_bit_array_clear(bs->used_clusters, cluster_num);
140 	bs->num_free_clusters++;
141 	pthread_mutex_unlock(&bs->used_clusters_mutex);
142 }
143 
144 static void
145 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
146 {
147 	xattrs->count = 0;
148 	xattrs->names = NULL;
149 	xattrs->ctx = NULL;
150 	xattrs->get_value = NULL;
151 }
152 
153 void
154 spdk_blob_opts_init(struct spdk_blob_opts *opts)
155 {
156 	opts->num_clusters = 0;
157 	opts->thin_provision = false;
158 	_spdk_blob_xattrs_init(&opts->xattrs);
159 }
160 
161 static struct spdk_blob *
162 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
163 {
164 	struct spdk_blob *blob;
165 
166 	blob = calloc(1, sizeof(*blob));
167 	if (!blob) {
168 		return NULL;
169 	}
170 
171 	blob->id = id;
172 	blob->bs = bs;
173 
174 	blob->parent_id = SPDK_BLOBID_INVALID;
175 
176 	blob->state = SPDK_BLOB_STATE_DIRTY;
177 	blob->active.num_pages = 1;
178 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
179 	if (!blob->active.pages) {
180 		free(blob);
181 		return NULL;
182 	}
183 
184 	blob->active.pages[0] = _spdk_bs_blobid_to_page(id);
185 
186 	TAILQ_INIT(&blob->xattrs);
187 	TAILQ_INIT(&blob->xattrs_internal);
188 
189 	return blob;
190 }
191 
192 static void
193 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs)
194 {
195 	struct spdk_xattr	*xattr, *xattr_tmp;
196 
197 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
198 		TAILQ_REMOVE(xattrs, xattr, link);
199 		free(xattr->name);
200 		free(xattr->value);
201 		free(xattr);
202 	}
203 }
204 
205 static void
206 _spdk_blob_free(struct spdk_blob *blob)
207 {
208 	assert(blob != NULL);
209 
210 	free(blob->active.clusters);
211 	free(blob->clean.clusters);
212 	free(blob->active.pages);
213 	free(blob->clean.pages);
214 
215 	_spdk_xattrs_free(&blob->xattrs);
216 	_spdk_xattrs_free(&blob->xattrs_internal);
217 
218 	if (blob->back_bs_dev) {
219 		blob->back_bs_dev->destroy(blob->back_bs_dev);
220 	}
221 
222 	free(blob);
223 }
224 
225 struct freeze_io_ctx {
226 	struct spdk_bs_cpl cpl;
227 	struct spdk_blob *blob;
228 };
229 
230 static void
231 _spdk_blob_io_sync(struct spdk_io_channel_iter *i)
232 {
233 	spdk_for_each_channel_continue(i, 0);
234 }
235 
236 static void
237 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i)
238 {
239 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
240 	struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch);
241 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
242 	struct spdk_bs_request_set	*set;
243 	struct spdk_bs_user_op_args	*args;
244 	spdk_bs_user_op_t *op, *tmp;
245 
246 	TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) {
247 		set = (struct spdk_bs_request_set *)op;
248 		args = &set->u.user_op;
249 
250 		if (args->blob == ctx->blob) {
251 			TAILQ_REMOVE(&ch->queued_io, op, link);
252 			spdk_bs_user_op_execute(op);
253 		}
254 	}
255 
256 	spdk_for_each_channel_continue(i, 0);
257 }
258 
259 static void
260 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status)
261 {
262 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
263 
264 	ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0);
265 
266 	free(ctx);
267 }
268 
269 static void
270 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
271 {
272 	struct freeze_io_ctx *ctx;
273 
274 	ctx = calloc(1, sizeof(*ctx));
275 	if (!ctx) {
276 		cb_fn(cb_arg, -ENOMEM);
277 		return;
278 	}
279 
280 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
281 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
282 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
283 	ctx->blob = blob;
284 
285 	/* Freeze I/O on blob */
286 	blob->frozen_refcnt++;
287 
288 	if (blob->frozen_refcnt == 1) {
289 		spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl);
290 	} else {
291 		cb_fn(cb_arg, 0);
292 		free(ctx);
293 	}
294 }
295 
296 static void
297 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
298 {
299 	struct freeze_io_ctx *ctx;
300 
301 	ctx = calloc(1, sizeof(*ctx));
302 	if (!ctx) {
303 		cb_fn(cb_arg, -ENOMEM);
304 		return;
305 	}
306 
307 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
308 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
309 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
310 	ctx->blob = blob;
311 
312 	assert(blob->frozen_refcnt > 0);
313 
314 	blob->frozen_refcnt--;
315 
316 	if (blob->frozen_refcnt == 0) {
317 		spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl);
318 	} else {
319 		cb_fn(cb_arg, 0);
320 		free(ctx);
321 	}
322 }
323 
324 static int
325 _spdk_blob_mark_clean(struct spdk_blob *blob)
326 {
327 	uint64_t *clusters = NULL;
328 	uint32_t *pages = NULL;
329 
330 	assert(blob != NULL);
331 
332 	if (blob->active.num_clusters) {
333 		assert(blob->active.clusters);
334 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
335 		if (!clusters) {
336 			return -ENOMEM;
337 		}
338 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters));
339 	}
340 
341 	if (blob->active.num_pages) {
342 		assert(blob->active.pages);
343 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
344 		if (!pages) {
345 			free(clusters);
346 			return -ENOMEM;
347 		}
348 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages));
349 	}
350 
351 	free(blob->clean.clusters);
352 	free(blob->clean.pages);
353 
354 	blob->clean.num_clusters = blob->active.num_clusters;
355 	blob->clean.clusters = blob->active.clusters;
356 	blob->clean.num_pages = blob->active.num_pages;
357 	blob->clean.pages = blob->active.pages;
358 
359 	blob->active.clusters = clusters;
360 	blob->active.pages = pages;
361 
362 	/* If the metadata was dirtied again while the metadata was being written to disk,
363 	 *  we do not want to revert the DIRTY state back to CLEAN here.
364 	 */
365 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
366 		blob->state = SPDK_BLOB_STATE_CLEAN;
367 	}
368 
369 	return 0;
370 }
371 
372 static int
373 _spdk_blob_deserialize_xattr(struct spdk_blob *blob,
374 			     struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
375 {
376 	struct spdk_xattr                       *xattr;
377 
378 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
379 	    sizeof(desc_xattr->value_length) +
380 	    desc_xattr->name_length + desc_xattr->value_length) {
381 		return -EINVAL;
382 	}
383 
384 	xattr = calloc(1, sizeof(*xattr));
385 	if (xattr == NULL) {
386 		return -ENOMEM;
387 	}
388 
389 	xattr->name = malloc(desc_xattr->name_length + 1);
390 	if (xattr->name == NULL) {
391 		free(xattr);
392 		return -ENOMEM;
393 	}
394 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
395 	xattr->name[desc_xattr->name_length] = '\0';
396 
397 	xattr->value = malloc(desc_xattr->value_length);
398 	if (xattr->value == NULL) {
399 		free(xattr->name);
400 		free(xattr);
401 		return -ENOMEM;
402 	}
403 	xattr->value_len = desc_xattr->value_length;
404 	memcpy(xattr->value,
405 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
406 	       desc_xattr->value_length);
407 
408 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
409 
410 	return 0;
411 }
412 
413 
414 static int
415 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
416 {
417 	struct spdk_blob_md_descriptor *desc;
418 	size_t	cur_desc = 0;
419 	void *tmp;
420 
421 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
422 	while (cur_desc < sizeof(page->descriptors)) {
423 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
424 			if (desc->length == 0) {
425 				/* If padding and length are 0, this terminates the page */
426 				break;
427 			}
428 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
429 			struct spdk_blob_md_descriptor_flags	*desc_flags;
430 
431 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
432 
433 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
434 				return -EINVAL;
435 			}
436 
437 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
438 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
439 				return -EINVAL;
440 			}
441 
442 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
443 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
444 				blob->data_ro = true;
445 				blob->md_ro = true;
446 			}
447 
448 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
449 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
450 				blob->md_ro = true;
451 			}
452 
453 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
454 				blob->data_ro = true;
455 				blob->md_ro = true;
456 			}
457 
458 			blob->invalid_flags = desc_flags->invalid_flags;
459 			blob->data_ro_flags = desc_flags->data_ro_flags;
460 			blob->md_ro_flags = desc_flags->md_ro_flags;
461 
462 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) {
463 			struct spdk_blob_md_descriptor_extent	*desc_extent;
464 			unsigned int				i, j;
465 			unsigned int				cluster_count = blob->active.num_clusters;
466 
467 			desc_extent = (struct spdk_blob_md_descriptor_extent *)desc;
468 
469 			if (desc_extent->length == 0 ||
470 			    (desc_extent->length % sizeof(desc_extent->extents[0]) != 0)) {
471 				return -EINVAL;
472 			}
473 
474 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
475 				for (j = 0; j < desc_extent->extents[i].length; j++) {
476 					if (!spdk_bit_array_get(blob->bs->used_clusters,
477 								desc_extent->extents[i].cluster_idx + j)) {
478 						return -EINVAL;
479 					}
480 					cluster_count++;
481 				}
482 			}
483 
484 			if (cluster_count == 0) {
485 				return -EINVAL;
486 			}
487 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t));
488 			if (tmp == NULL) {
489 				return -ENOMEM;
490 			}
491 			blob->active.clusters = tmp;
492 			blob->active.cluster_array_size = cluster_count;
493 
494 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
495 				for (j = 0; j < desc_extent->extents[i].length; j++) {
496 					if (desc_extent->extents[i].cluster_idx != 0) {
497 						blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
498 								desc_extent->extents[i].cluster_idx + j);
499 					} else if (spdk_blob_is_thin_provisioned(blob)) {
500 						blob->active.clusters[blob->active.num_clusters++] = 0;
501 					} else {
502 						return -EINVAL;
503 					}
504 				}
505 			}
506 
507 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
508 			int rc;
509 
510 			rc = _spdk_blob_deserialize_xattr(blob,
511 							  (struct spdk_blob_md_descriptor_xattr *) desc, false);
512 			if (rc != 0) {
513 				return rc;
514 			}
515 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
516 			int rc;
517 
518 			rc = _spdk_blob_deserialize_xattr(blob,
519 							  (struct spdk_blob_md_descriptor_xattr *) desc, true);
520 			if (rc != 0) {
521 				return rc;
522 			}
523 		} else {
524 			/* Unrecognized descriptor type.  Do not fail - just continue to the
525 			 *  next descriptor.  If this descriptor is associated with some feature
526 			 *  defined in a newer version of blobstore, that version of blobstore
527 			 *  should create and set an associated feature flag to specify if this
528 			 *  blob can be loaded or not.
529 			 */
530 		}
531 
532 		/* Advance to the next descriptor */
533 		cur_desc += sizeof(*desc) + desc->length;
534 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
535 			break;
536 		}
537 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
538 	}
539 
540 	return 0;
541 }
542 
543 static int
544 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
545 		 struct spdk_blob *blob)
546 {
547 	const struct spdk_blob_md_page *page;
548 	uint32_t i;
549 	int rc;
550 
551 	assert(page_count > 0);
552 	assert(pages[0].sequence_num == 0);
553 	assert(blob != NULL);
554 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
555 	assert(blob->active.clusters == NULL);
556 
557 	/* The blobid provided doesn't match what's in the MD, this can
558 	 * happen for example if a bogus blobid is passed in through open.
559 	 */
560 	if (blob->id != pages[0].id) {
561 		SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n",
562 			    blob->id, pages[0].id);
563 		return -ENOENT;
564 	}
565 
566 	for (i = 0; i < page_count; i++) {
567 		page = &pages[i];
568 
569 		assert(page->id == blob->id);
570 		assert(page->sequence_num == i);
571 
572 		rc = _spdk_blob_parse_page(page, blob);
573 		if (rc != 0) {
574 			return rc;
575 		}
576 	}
577 
578 	return 0;
579 }
580 
581 static int
582 _spdk_blob_serialize_add_page(const struct spdk_blob *blob,
583 			      struct spdk_blob_md_page **pages,
584 			      uint32_t *page_count,
585 			      struct spdk_blob_md_page **last_page)
586 {
587 	struct spdk_blob_md_page *page;
588 
589 	assert(pages != NULL);
590 	assert(page_count != NULL);
591 
592 	if (*page_count == 0) {
593 		assert(*pages == NULL);
594 		*page_count = 1;
595 		*pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE,
596 					 SPDK_BS_PAGE_SIZE,
597 					 NULL);
598 	} else {
599 		assert(*pages != NULL);
600 		(*page_count)++;
601 		*pages = spdk_dma_realloc(*pages,
602 					  SPDK_BS_PAGE_SIZE * (*page_count),
603 					  SPDK_BS_PAGE_SIZE,
604 					  NULL);
605 	}
606 
607 	if (*pages == NULL) {
608 		*page_count = 0;
609 		*last_page = NULL;
610 		return -ENOMEM;
611 	}
612 
613 	page = &(*pages)[*page_count - 1];
614 	memset(page, 0, sizeof(*page));
615 	page->id = blob->id;
616 	page->sequence_num = *page_count - 1;
617 	page->next = SPDK_INVALID_MD_PAGE;
618 	*last_page = page;
619 
620 	return 0;
621 }
622 
623 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
624  * Update required_sz on both success and failure.
625  *
626  */
627 static int
628 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr,
629 			   uint8_t *buf, size_t buf_sz,
630 			   size_t *required_sz, bool internal)
631 {
632 	struct spdk_blob_md_descriptor_xattr	*desc;
633 
634 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
635 		       strlen(xattr->name) +
636 		       xattr->value_len;
637 
638 	if (buf_sz < *required_sz) {
639 		return -1;
640 	}
641 
642 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
643 
644 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
645 	desc->length = sizeof(desc->name_length) +
646 		       sizeof(desc->value_length) +
647 		       strlen(xattr->name) +
648 		       xattr->value_len;
649 	desc->name_length = strlen(xattr->name);
650 	desc->value_length = xattr->value_len;
651 
652 	memcpy(desc->name, xattr->name, desc->name_length);
653 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
654 	       xattr->value,
655 	       desc->value_length);
656 
657 	return 0;
658 }
659 
660 static void
661 _spdk_blob_serialize_extent(const struct spdk_blob *blob,
662 			    uint64_t start_cluster, uint64_t *next_cluster,
663 			    uint8_t *buf, size_t buf_sz)
664 {
665 	struct spdk_blob_md_descriptor_extent *desc;
666 	size_t cur_sz;
667 	uint64_t i, extent_idx;
668 	uint32_t lba, lba_per_cluster, lba_count;
669 
670 	/* The buffer must have room for at least one extent */
671 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]);
672 	if (buf_sz < cur_sz) {
673 		*next_cluster = start_cluster;
674 		return;
675 	}
676 
677 	desc = (struct spdk_blob_md_descriptor_extent *)buf;
678 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT;
679 
680 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
681 
682 	lba = blob->active.clusters[start_cluster];
683 	lba_count = lba_per_cluster;
684 	extent_idx = 0;
685 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
686 		if ((lba + lba_count) == blob->active.clusters[i]) {
687 			lba_count += lba_per_cluster;
688 			continue;
689 		}
690 		desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
691 		desc->extents[extent_idx].length = lba_count / lba_per_cluster;
692 		extent_idx++;
693 
694 		cur_sz += sizeof(desc->extents[extent_idx]);
695 
696 		if (buf_sz < cur_sz) {
697 			/* If we ran out of buffer space, return */
698 			desc->length = sizeof(desc->extents[0]) * extent_idx;
699 			*next_cluster = i;
700 			return;
701 		}
702 
703 		lba = blob->active.clusters[i];
704 		lba_count = lba_per_cluster;
705 	}
706 
707 	desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
708 	desc->extents[extent_idx].length = lba_count / lba_per_cluster;
709 	extent_idx++;
710 
711 	desc->length = sizeof(desc->extents[0]) * extent_idx;
712 	*next_cluster = blob->active.num_clusters;
713 
714 	return;
715 }
716 
717 static void
718 _spdk_blob_serialize_flags(const struct spdk_blob *blob,
719 			   uint8_t *buf, size_t *buf_sz)
720 {
721 	struct spdk_blob_md_descriptor_flags *desc;
722 
723 	/*
724 	 * Flags get serialized first, so we should always have room for the flags
725 	 *  descriptor.
726 	 */
727 	assert(*buf_sz >= sizeof(*desc));
728 
729 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
730 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
731 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
732 	desc->invalid_flags = blob->invalid_flags;
733 	desc->data_ro_flags = blob->data_ro_flags;
734 	desc->md_ro_flags = blob->md_ro_flags;
735 
736 	*buf_sz -= sizeof(*desc);
737 }
738 
739 static int
740 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
741 			    const struct spdk_xattr_tailq *xattrs, bool internal,
742 			    struct spdk_blob_md_page **pages,
743 			    struct spdk_blob_md_page *cur_page,
744 			    uint32_t *page_count, uint8_t **buf,
745 			    size_t *remaining_sz)
746 {
747 	const struct spdk_xattr	*xattr;
748 	int	rc;
749 
750 	TAILQ_FOREACH(xattr, xattrs, link) {
751 		size_t required_sz = 0;
752 
753 		rc = _spdk_blob_serialize_xattr(xattr,
754 						*buf, *remaining_sz,
755 						&required_sz, internal);
756 		if (rc < 0) {
757 			/* Need to add a new page to the chain */
758 			rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
759 							   &cur_page);
760 			if (rc < 0) {
761 				spdk_dma_free(*pages);
762 				*pages = NULL;
763 				*page_count = 0;
764 				return rc;
765 			}
766 
767 			*buf = (uint8_t *)cur_page->descriptors;
768 			*remaining_sz = sizeof(cur_page->descriptors);
769 
770 			/* Try again */
771 			required_sz = 0;
772 			rc = _spdk_blob_serialize_xattr(xattr,
773 							*buf, *remaining_sz,
774 							&required_sz, internal);
775 
776 			if (rc < 0) {
777 				spdk_dma_free(*pages);
778 				*pages = NULL;
779 				*page_count = 0;
780 				return rc;
781 			}
782 		}
783 
784 		*remaining_sz -= required_sz;
785 		*buf += required_sz;
786 	}
787 
788 	return 0;
789 }
790 
791 static int
792 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
793 		     uint32_t *page_count)
794 {
795 	struct spdk_blob_md_page		*cur_page;
796 	int					rc;
797 	uint8_t					*buf;
798 	size_t					remaining_sz;
799 	uint64_t				last_cluster;
800 
801 	assert(pages != NULL);
802 	assert(page_count != NULL);
803 	assert(blob != NULL);
804 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
805 
806 	*pages = NULL;
807 	*page_count = 0;
808 
809 	/* A blob always has at least 1 page, even if it has no descriptors */
810 	rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
811 	if (rc < 0) {
812 		return rc;
813 	}
814 
815 	buf = (uint8_t *)cur_page->descriptors;
816 	remaining_sz = sizeof(cur_page->descriptors);
817 
818 	/* Serialize flags */
819 	_spdk_blob_serialize_flags(blob, buf, &remaining_sz);
820 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
821 
822 	/* Serialize xattrs */
823 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false,
824 					 pages, cur_page, page_count, &buf, &remaining_sz);
825 	if (rc < 0) {
826 		return rc;
827 	}
828 
829 	/* Serialize internal xattrs */
830 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
831 					 pages, cur_page, page_count, &buf, &remaining_sz);
832 	if (rc < 0) {
833 		return rc;
834 	}
835 
836 	/* Serialize extents */
837 	last_cluster = 0;
838 	while (last_cluster < blob->active.num_clusters) {
839 		_spdk_blob_serialize_extent(blob, last_cluster, &last_cluster,
840 					    buf, remaining_sz);
841 
842 		if (last_cluster == blob->active.num_clusters) {
843 			break;
844 		}
845 
846 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
847 						   &cur_page);
848 		if (rc < 0) {
849 			return rc;
850 		}
851 
852 		buf = (uint8_t *)cur_page->descriptors;
853 		remaining_sz = sizeof(cur_page->descriptors);
854 	}
855 
856 	return 0;
857 }
858 
859 struct spdk_blob_load_ctx {
860 	struct spdk_blob		*blob;
861 
862 	struct spdk_blob_md_page	*pages;
863 	uint32_t			num_pages;
864 	spdk_bs_sequence_t	        *seq;
865 
866 	spdk_bs_sequence_cpl		cb_fn;
867 	void				*cb_arg;
868 };
869 
870 static uint32_t
871 _spdk_blob_md_page_calc_crc(void *page)
872 {
873 	uint32_t		crc;
874 
875 	crc = BLOB_CRC32C_INITIAL;
876 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
877 	crc ^= BLOB_CRC32C_INITIAL;
878 
879 	return crc;
880 
881 }
882 
883 static void
884 _spdk_blob_load_final(void *cb_arg, int bserrno)
885 {
886 	struct spdk_blob_load_ctx	*ctx = cb_arg;
887 	struct spdk_blob		*blob = ctx->blob;
888 
889 	_spdk_blob_mark_clean(blob);
890 
891 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
892 
893 	/* Free the memory */
894 	spdk_dma_free(ctx->pages);
895 	free(ctx);
896 }
897 
898 static void
899 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
900 {
901 	struct spdk_blob_load_ctx	*ctx = cb_arg;
902 	struct spdk_blob		*blob = ctx->blob;
903 
904 	if (bserrno != 0) {
905 		goto error;
906 	}
907 
908 	blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot);
909 
910 	if (blob->back_bs_dev == NULL) {
911 		bserrno = -ENOMEM;
912 		goto error;
913 	}
914 
915 	_spdk_blob_load_final(ctx, bserrno);
916 	return;
917 
918 error:
919 	SPDK_ERRLOG("Snapshot fail\n");
920 	_spdk_blob_free(blob);
921 	ctx->cb_fn(ctx->seq, NULL, bserrno);
922 	spdk_dma_free(ctx->pages);
923 	free(ctx);
924 }
925 
926 static void
927 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
928 {
929 	struct spdk_blob_load_ctx	*ctx = cb_arg;
930 	struct spdk_blob		*blob = ctx->blob;
931 	struct spdk_blob_md_page	*page;
932 	const void			*value;
933 	size_t				len;
934 	int				rc;
935 	uint32_t			crc;
936 
937 	page = &ctx->pages[ctx->num_pages - 1];
938 	crc = _spdk_blob_md_page_calc_crc(page);
939 	if (crc != page->crc) {
940 		SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages);
941 		_spdk_blob_free(blob);
942 		ctx->cb_fn(seq, NULL, -EINVAL);
943 		spdk_dma_free(ctx->pages);
944 		free(ctx);
945 		return;
946 	}
947 
948 	if (page->next != SPDK_INVALID_MD_PAGE) {
949 		uint32_t next_page = page->next;
950 		uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page);
951 
952 
953 		assert(next_lba < (blob->bs->md_start + blob->bs->md_len));
954 
955 		/* Read the next page */
956 		ctx->num_pages++;
957 		ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
958 					      sizeof(*page), NULL);
959 		if (ctx->pages == NULL) {
960 			ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM);
961 			free(ctx);
962 			return;
963 		}
964 
965 		spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
966 					  next_lba,
967 					  _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)),
968 					  _spdk_blob_load_cpl, ctx);
969 		return;
970 	}
971 
972 	/* Parse the pages */
973 	rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob);
974 	if (rc) {
975 		_spdk_blob_free(blob);
976 		ctx->cb_fn(seq, NULL, rc);
977 		spdk_dma_free(ctx->pages);
978 		free(ctx);
979 		return;
980 	}
981 	ctx->seq = seq;
982 
983 
984 	if (spdk_blob_is_thin_provisioned(blob)) {
985 		rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
986 		if (rc == 0) {
987 			if (len != sizeof(spdk_blob_id)) {
988 				_spdk_blob_free(blob);
989 				ctx->cb_fn(seq, NULL, -EINVAL);
990 				spdk_dma_free(ctx->pages);
991 				free(ctx);
992 				return;
993 			}
994 			/* open snapshot blob and continue in the callback function */
995 			blob->parent_id = *(spdk_blob_id *)value;
996 			spdk_bs_open_blob(blob->bs, blob->parent_id,
997 					  _spdk_blob_load_snapshot_cpl, ctx);
998 			return;
999 		} else {
1000 			/* add zeroes_dev for thin provisioned blob */
1001 			blob->back_bs_dev = spdk_bs_create_zeroes_dev();
1002 		}
1003 	} else {
1004 		/* standard blob */
1005 		blob->back_bs_dev = NULL;
1006 	}
1007 	_spdk_blob_load_final(ctx, bserrno);
1008 }
1009 
1010 /* Load a blob from disk given a blobid */
1011 static void
1012 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1013 		spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1014 {
1015 	struct spdk_blob_load_ctx *ctx;
1016 	struct spdk_blob_store *bs;
1017 	uint32_t page_num;
1018 	uint64_t lba;
1019 
1020 	_spdk_blob_verify_md_op(blob);
1021 
1022 	bs = blob->bs;
1023 
1024 	ctx = calloc(1, sizeof(*ctx));
1025 	if (!ctx) {
1026 		cb_fn(seq, cb_arg, -ENOMEM);
1027 		return;
1028 	}
1029 
1030 	ctx->blob = blob;
1031 	ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE,
1032 				      SPDK_BS_PAGE_SIZE, NULL);
1033 	if (!ctx->pages) {
1034 		free(ctx);
1035 		cb_fn(seq, cb_arg, -ENOMEM);
1036 		return;
1037 	}
1038 	ctx->num_pages = 1;
1039 	ctx->cb_fn = cb_fn;
1040 	ctx->cb_arg = cb_arg;
1041 
1042 	page_num = _spdk_bs_blobid_to_page(blob->id);
1043 	lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num);
1044 
1045 	blob->state = SPDK_BLOB_STATE_LOADING;
1046 
1047 	spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1048 				  _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
1049 				  _spdk_blob_load_cpl, ctx);
1050 }
1051 
1052 struct spdk_blob_persist_ctx {
1053 	struct spdk_blob		*blob;
1054 
1055 	struct spdk_bs_super_block	*super;
1056 
1057 	struct spdk_blob_md_page	*pages;
1058 
1059 	uint64_t			idx;
1060 
1061 	spdk_bs_sequence_t		*seq;
1062 	spdk_bs_sequence_cpl		cb_fn;
1063 	void				*cb_arg;
1064 };
1065 
1066 static void
1067 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1068 {
1069 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1070 	struct spdk_blob		*blob = ctx->blob;
1071 
1072 	if (bserrno == 0) {
1073 		_spdk_blob_mark_clean(blob);
1074 	}
1075 
1076 	/* Call user callback */
1077 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
1078 
1079 	/* Free the memory */
1080 	spdk_dma_free(ctx->pages);
1081 	free(ctx);
1082 }
1083 
1084 static void
1085 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1086 {
1087 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1088 	struct spdk_blob		*blob = ctx->blob;
1089 	struct spdk_blob_store		*bs = blob->bs;
1090 	void				*tmp;
1091 	size_t				i;
1092 
1093 	/* Release all clusters that were truncated */
1094 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1095 		uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]);
1096 
1097 		/* Nothing to release if it was not allocated */
1098 		if (blob->active.clusters[i] != 0) {
1099 			_spdk_bs_release_cluster(bs, cluster_num);
1100 		}
1101 	}
1102 
1103 	if (blob->active.num_clusters == 0) {
1104 		free(blob->active.clusters);
1105 		blob->active.clusters = NULL;
1106 		blob->active.cluster_array_size = 0;
1107 	} else {
1108 		tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters);
1109 		assert(tmp != NULL);
1110 		blob->active.clusters = tmp;
1111 		blob->active.cluster_array_size = blob->active.num_clusters;
1112 	}
1113 
1114 	_spdk_blob_persist_complete(seq, ctx, bserrno);
1115 }
1116 
1117 static void
1118 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1119 {
1120 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1121 	struct spdk_blob		*blob = ctx->blob;
1122 	struct spdk_blob_store		*bs = blob->bs;
1123 	spdk_bs_batch_t			*batch;
1124 	size_t				i;
1125 	uint64_t			lba;
1126 	uint32_t			lba_count;
1127 
1128 	/* Clusters don't move around in blobs. The list shrinks or grows
1129 	 * at the end, but no changes ever occur in the middle of the list.
1130 	 */
1131 
1132 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx);
1133 
1134 	/* Unmap all clusters that were truncated */
1135 	lba = 0;
1136 	lba_count = 0;
1137 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1138 		uint64_t next_lba = blob->active.clusters[i];
1139 		uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1);
1140 
1141 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1142 			/* This cluster is contiguous with the previous one. */
1143 			lba_count += next_lba_count;
1144 			continue;
1145 		}
1146 
1147 		/* This cluster is not contiguous with the previous one. */
1148 
1149 		/* If a run of LBAs previously existing, send them
1150 		 * as an unmap.
1151 		 */
1152 		if (lba_count > 0) {
1153 			spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1154 		}
1155 
1156 		/* Start building the next batch */
1157 		lba = next_lba;
1158 		if (next_lba > 0) {
1159 			lba_count = next_lba_count;
1160 		} else {
1161 			lba_count = 0;
1162 		}
1163 	}
1164 
1165 	/* If we ended with a contiguous set of LBAs, send the unmap now */
1166 	if (lba_count > 0) {
1167 		spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1168 	}
1169 
1170 	spdk_bs_batch_close(batch);
1171 }
1172 
1173 static void
1174 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1175 {
1176 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1177 	struct spdk_blob		*blob = ctx->blob;
1178 	struct spdk_blob_store		*bs = blob->bs;
1179 	size_t				i;
1180 
1181 	/* This loop starts at 1 because the first page is special and handled
1182 	 * below. The pages (except the first) are never written in place,
1183 	 * so any pages in the clean list must be zeroed.
1184 	 */
1185 	for (i = 1; i < blob->clean.num_pages; i++) {
1186 		spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]);
1187 	}
1188 
1189 	if (blob->active.num_pages == 0) {
1190 		uint32_t page_num;
1191 
1192 		page_num = _spdk_bs_blobid_to_page(blob->id);
1193 		spdk_bit_array_clear(bs->used_md_pages, page_num);
1194 	}
1195 
1196 	/* Move on to unmapping clusters */
1197 	_spdk_blob_persist_unmap_clusters(seq, ctx, 0);
1198 }
1199 
1200 static void
1201 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1202 {
1203 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1204 	struct spdk_blob		*blob = ctx->blob;
1205 	struct spdk_blob_store		*bs = blob->bs;
1206 	uint64_t			lba;
1207 	uint32_t			lba_count;
1208 	spdk_bs_batch_t			*batch;
1209 	size_t				i;
1210 
1211 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx);
1212 
1213 	lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1214 
1215 	/* This loop starts at 1 because the first page is special and handled
1216 	 * below. The pages (except the first) are never written in place,
1217 	 * so any pages in the clean list must be zeroed.
1218 	 */
1219 	for (i = 1; i < blob->clean.num_pages; i++) {
1220 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]);
1221 
1222 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1223 	}
1224 
1225 	/* The first page will only be zeroed if this is a delete. */
1226 	if (blob->active.num_pages == 0) {
1227 		uint32_t page_num;
1228 
1229 		/* The first page in the metadata goes where the blobid indicates */
1230 		page_num = _spdk_bs_blobid_to_page(blob->id);
1231 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num);
1232 
1233 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1234 	}
1235 
1236 	spdk_bs_batch_close(batch);
1237 }
1238 
1239 static void
1240 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1241 {
1242 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1243 	struct spdk_blob		*blob = ctx->blob;
1244 	struct spdk_blob_store		*bs = blob->bs;
1245 	uint64_t			lba;
1246 	uint32_t			lba_count;
1247 	struct spdk_blob_md_page	*page;
1248 
1249 	if (blob->active.num_pages == 0) {
1250 		/* Move on to the next step */
1251 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1252 		return;
1253 	}
1254 
1255 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1256 
1257 	page = &ctx->pages[0];
1258 	/* The first page in the metadata goes where the blobid indicates */
1259 	lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id));
1260 
1261 	spdk_bs_sequence_write_dev(seq, page, lba, lba_count,
1262 				   _spdk_blob_persist_zero_pages, ctx);
1263 }
1264 
1265 static void
1266 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1267 {
1268 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1269 	struct spdk_blob		*blob = ctx->blob;
1270 	struct spdk_blob_store		*bs = blob->bs;
1271 	uint64_t			lba;
1272 	uint32_t			lba_count;
1273 	struct spdk_blob_md_page	*page;
1274 	spdk_bs_batch_t			*batch;
1275 	size_t				i;
1276 
1277 	/* Clusters don't move around in blobs. The list shrinks or grows
1278 	 * at the end, but no changes ever occur in the middle of the list.
1279 	 */
1280 
1281 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1282 
1283 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx);
1284 
1285 	/* This starts at 1. The root page is not written until
1286 	 * all of the others are finished
1287 	 */
1288 	for (i = 1; i < blob->active.num_pages; i++) {
1289 		page = &ctx->pages[i];
1290 		assert(page->sequence_num == i);
1291 
1292 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]);
1293 
1294 		spdk_bs_batch_write_dev(batch, page, lba, lba_count);
1295 	}
1296 
1297 	spdk_bs_batch_close(batch);
1298 }
1299 
1300 static int
1301 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz)
1302 {
1303 	uint64_t	i;
1304 	uint64_t	*tmp;
1305 	uint64_t	lfc; /* lowest free cluster */
1306 	uint64_t	num_clusters;
1307 	struct spdk_blob_store *bs;
1308 
1309 	bs = blob->bs;
1310 
1311 	_spdk_blob_verify_md_op(blob);
1312 
1313 	if (blob->active.num_clusters == sz) {
1314 		return 0;
1315 	}
1316 
1317 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
1318 		/* If this blob was resized to be larger, then smaller, then
1319 		 * larger without syncing, then the cluster array already
1320 		 * contains spare assigned clusters we can use.
1321 		 */
1322 		num_clusters = spdk_min(blob->active.cluster_array_size,
1323 					sz);
1324 	} else {
1325 		num_clusters = blob->active.num_clusters;
1326 	}
1327 
1328 	/* Do two passes - one to verify that we can obtain enough clusters
1329 	 * and another to actually claim them.
1330 	 */
1331 
1332 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1333 		lfc = 0;
1334 		for (i = num_clusters; i < sz; i++) {
1335 			lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
1336 			if (lfc >= bs->total_clusters) {
1337 				/* No more free clusters. Cannot satisfy the request */
1338 				return -ENOSPC;
1339 			}
1340 			lfc++;
1341 		}
1342 	}
1343 
1344 	if (sz > num_clusters) {
1345 		/* Expand the cluster array if necessary.
1346 		 * We only shrink the array when persisting.
1347 		 */
1348 		tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz);
1349 		if (sz > 0 && tmp == NULL) {
1350 			return -ENOMEM;
1351 		}
1352 		memset(tmp + blob->active.cluster_array_size, 0,
1353 		       sizeof(uint64_t) * (sz - blob->active.cluster_array_size));
1354 		blob->active.clusters = tmp;
1355 		blob->active.cluster_array_size = sz;
1356 	}
1357 
1358 	blob->state = SPDK_BLOB_STATE_DIRTY;
1359 
1360 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1361 		lfc = 0;
1362 		for (i = num_clusters; i < sz; i++) {
1363 			_spdk_bs_allocate_cluster(blob, i, &lfc, true);
1364 			lfc++;
1365 		}
1366 	}
1367 
1368 	blob->active.num_clusters = sz;
1369 
1370 	return 0;
1371 }
1372 
1373 static void
1374 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx)
1375 {
1376 	spdk_bs_sequence_t *seq = ctx->seq;
1377 	struct spdk_blob *blob = ctx->blob;
1378 	struct spdk_blob_store *bs = blob->bs;
1379 	uint64_t i;
1380 	uint32_t page_num;
1381 	void *tmp;
1382 	int rc;
1383 
1384 	if (blob->active.num_pages == 0) {
1385 		/* This is the signal that the blob should be deleted.
1386 		 * Immediately jump to the clean up routine. */
1387 		assert(blob->clean.num_pages > 0);
1388 		ctx->idx = blob->clean.num_pages - 1;
1389 		blob->state = SPDK_BLOB_STATE_CLEAN;
1390 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1391 		return;
1392 
1393 	}
1394 
1395 	/* Generate the new metadata */
1396 	rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
1397 	if (rc < 0) {
1398 		_spdk_blob_persist_complete(seq, ctx, rc);
1399 		return;
1400 	}
1401 
1402 	assert(blob->active.num_pages >= 1);
1403 
1404 	/* Resize the cache of page indices */
1405 	tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
1406 	if (!tmp) {
1407 		_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1408 		return;
1409 	}
1410 	blob->active.pages = tmp;
1411 
1412 	/* Assign this metadata to pages. This requires two passes -
1413 	 * one to verify that there are enough pages and a second
1414 	 * to actually claim them. */
1415 	page_num = 0;
1416 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
1417 	for (i = 1; i < blob->active.num_pages; i++) {
1418 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1419 		if (page_num >= spdk_bit_array_capacity(bs->used_md_pages)) {
1420 			_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1421 			return;
1422 		}
1423 		page_num++;
1424 	}
1425 
1426 	page_num = 0;
1427 	blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id);
1428 	for (i = 1; i < blob->active.num_pages; i++) {
1429 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1430 		ctx->pages[i - 1].next = page_num;
1431 		/* Now that previous metadata page is complete, calculate the crc for it. */
1432 		ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1433 		blob->active.pages[i] = page_num;
1434 		spdk_bit_array_set(bs->used_md_pages, page_num);
1435 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id);
1436 		page_num++;
1437 	}
1438 	ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1439 	/* Start writing the metadata from last page to first */
1440 	ctx->idx = blob->active.num_pages - 1;
1441 	blob->state = SPDK_BLOB_STATE_CLEAN;
1442 	_spdk_blob_persist_write_page_chain(seq, ctx, 0);
1443 }
1444 
1445 static void
1446 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1447 {
1448 	struct spdk_blob_persist_ctx *ctx = cb_arg;
1449 
1450 	ctx->blob->bs->clean = 0;
1451 
1452 	spdk_dma_free(ctx->super);
1453 
1454 	_spdk_blob_persist_start(ctx);
1455 }
1456 
1457 static void
1458 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
1459 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg);
1460 
1461 
1462 static void
1463 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1464 {
1465 	struct spdk_blob_persist_ctx *ctx = cb_arg;
1466 
1467 	ctx->super->clean = 0;
1468 
1469 	_spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx);
1470 }
1471 
1472 
1473 /* Write a blob to disk */
1474 static void
1475 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1476 		   spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1477 {
1478 	struct spdk_blob_persist_ctx *ctx;
1479 
1480 	_spdk_blob_verify_md_op(blob);
1481 
1482 	if (blob->state == SPDK_BLOB_STATE_CLEAN) {
1483 		cb_fn(seq, cb_arg, 0);
1484 		return;
1485 	}
1486 
1487 	ctx = calloc(1, sizeof(*ctx));
1488 	if (!ctx) {
1489 		cb_fn(seq, cb_arg, -ENOMEM);
1490 		return;
1491 	}
1492 	ctx->blob = blob;
1493 	ctx->seq = seq;
1494 	ctx->cb_fn = cb_fn;
1495 	ctx->cb_arg = cb_arg;
1496 
1497 	if (blob->bs->clean) {
1498 		ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
1499 		if (!ctx->super) {
1500 			cb_fn(seq, cb_arg, -ENOMEM);
1501 			free(ctx);
1502 			return;
1503 		}
1504 
1505 		spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(blob->bs, 0),
1506 					  _spdk_bs_byte_to_lba(blob->bs, sizeof(*ctx->super)),
1507 					  _spdk_blob_persist_dirty, ctx);
1508 	} else {
1509 		_spdk_blob_persist_start(ctx);
1510 	}
1511 }
1512 
1513 struct spdk_blob_copy_cluster_ctx {
1514 	struct spdk_blob *blob;
1515 	uint8_t *buf;
1516 	uint64_t page;
1517 	uint64_t new_cluster;
1518 	spdk_bs_sequence_t *seq;
1519 };
1520 
1521 static void
1522 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
1523 {
1524 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1525 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
1526 	TAILQ_HEAD(, spdk_bs_request_set) requests;
1527 	spdk_bs_user_op_t *op;
1528 
1529 	TAILQ_INIT(&requests);
1530 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
1531 
1532 	while (!TAILQ_EMPTY(&requests)) {
1533 		op = TAILQ_FIRST(&requests);
1534 		TAILQ_REMOVE(&requests, op, link);
1535 		if (bserrno == 0) {
1536 			spdk_bs_user_op_execute(op);
1537 		} else {
1538 			spdk_bs_user_op_abort(op);
1539 		}
1540 	}
1541 
1542 	spdk_dma_free(ctx->buf);
1543 	free(ctx);
1544 }
1545 
1546 static void
1547 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno)
1548 {
1549 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1550 
1551 	if (bserrno) {
1552 		uint32_t cluster_number;
1553 
1554 		if (bserrno == -EEXIST) {
1555 			/* The metadata insert failed because another thread
1556 			 * allocated the cluster first. Free our cluster
1557 			 * but continue without error. */
1558 			bserrno = 0;
1559 		}
1560 
1561 		cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
1562 		_spdk_bs_release_cluster(ctx->blob->bs, cluster_number);
1563 	}
1564 
1565 	spdk_bs_sequence_finish(ctx->seq, bserrno);
1566 }
1567 
1568 static void
1569 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1570 {
1571 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1572 	uint32_t cluster_number;
1573 
1574 	if (bserrno) {
1575 		/* The write failed, so jump to the final completion handler */
1576 		spdk_bs_sequence_finish(seq, bserrno);
1577 		return;
1578 	}
1579 
1580 	cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
1581 
1582 	_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
1583 					       _spdk_blob_insert_cluster_cpl, ctx);
1584 }
1585 
1586 static void
1587 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1588 {
1589 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1590 
1591 	if (bserrno != 0) {
1592 		/* The read failed, so jump to the final completion handler */
1593 		spdk_bs_sequence_finish(seq, bserrno);
1594 		return;
1595 	}
1596 
1597 	/* Write whole cluster */
1598 	spdk_bs_sequence_write_dev(seq, ctx->buf,
1599 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
1600 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, 1),
1601 				   _spdk_blob_write_copy_cpl, ctx);
1602 }
1603 
1604 static void
1605 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
1606 				   struct spdk_io_channel *_ch,
1607 				   uint64_t offset, spdk_bs_user_op_t *op)
1608 {
1609 	struct spdk_bs_cpl cpl;
1610 	struct spdk_bs_channel *ch;
1611 	struct spdk_blob_copy_cluster_ctx *ctx;
1612 	uint32_t cluster_start_page;
1613 	uint32_t cluster_number;
1614 	int rc;
1615 
1616 	ch = spdk_io_channel_get_ctx(_ch);
1617 
1618 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
1619 		/* There are already operations pending. Queue this user op
1620 		 * and return because it will be re-executed when the outstanding
1621 		 * cluster allocation completes. */
1622 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
1623 		return;
1624 	}
1625 
1626 	/* Round the page offset down to the first page in the cluster */
1627 	cluster_start_page = _spdk_bs_page_to_cluster_start(blob, offset);
1628 
1629 	/* Calculate which index in the metadata cluster array the corresponding
1630 	 * cluster is supposed to be at. */
1631 	cluster_number = _spdk_bs_page_to_cluster(blob->bs, cluster_start_page);
1632 
1633 	ctx = calloc(1, sizeof(*ctx));
1634 	if (!ctx) {
1635 		spdk_bs_user_op_abort(op);
1636 		return;
1637 	}
1638 
1639 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
1640 
1641 	ctx->blob = blob;
1642 	ctx->page = cluster_start_page;
1643 
1644 	ctx->buf = spdk_dma_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, NULL);
1645 	if (!ctx->buf) {
1646 		SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
1647 			    blob->bs->cluster_sz);
1648 		free(ctx);
1649 		spdk_bs_user_op_abort(op);
1650 		return;
1651 	}
1652 
1653 	rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, false);
1654 	if (rc != 0) {
1655 		spdk_dma_free(ctx->buf);
1656 		free(ctx);
1657 		spdk_bs_user_op_abort(op);
1658 		return;
1659 	}
1660 
1661 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1662 	cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl;
1663 	cpl.u.blob_basic.cb_arg = ctx;
1664 
1665 	ctx->seq = spdk_bs_sequence_start(_ch, &cpl);
1666 	if (!ctx->seq) {
1667 		_spdk_bs_release_cluster(blob->bs, ctx->new_cluster);
1668 		spdk_dma_free(ctx->buf);
1669 		free(ctx);
1670 		spdk_bs_user_op_abort(op);
1671 		return;
1672 	}
1673 
1674 	/* Queue the user op to block other incoming operations */
1675 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
1676 
1677 	/* Read cluster from backing device */
1678 	spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
1679 				     _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
1680 				     _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
1681 				     _spdk_blob_write_copy, ctx);
1682 }
1683 
1684 static void
1685 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t page, uint64_t length,
1686 				       uint64_t *lba,	uint32_t *lba_count)
1687 {
1688 	*lba_count = _spdk_bs_page_to_lba(blob->bs, length);
1689 
1690 	if (!_spdk_bs_page_is_allocated(blob, page)) {
1691 		assert(blob->back_bs_dev != NULL);
1692 		*lba = _spdk_bs_dev_page_to_lba(blob->back_bs_dev, page);
1693 		*lba_count = _spdk_bs_blob_lba_to_back_dev_lba(blob, *lba_count);
1694 	} else {
1695 		*lba = _spdk_bs_blob_page_to_lba(blob, page);
1696 	}
1697 }
1698 
1699 struct op_split_ctx {
1700 	struct spdk_blob *blob;
1701 	struct spdk_io_channel *channel;
1702 	uint64_t page_offset;
1703 	uint64_t pages_remaining;
1704 	void *curr_payload;
1705 	enum spdk_blob_op_type op_type;
1706 	spdk_bs_sequence_t *seq;
1707 };
1708 
1709 static void
1710 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno)
1711 {
1712 	struct op_split_ctx	*ctx = cb_arg;
1713 	struct spdk_blob	*blob = ctx->blob;
1714 	struct spdk_io_channel	*ch = ctx->channel;
1715 	enum spdk_blob_op_type	op_type = ctx->op_type;
1716 	uint8_t			*buf = ctx->curr_payload;
1717 	uint64_t		offset = ctx->page_offset;
1718 	uint64_t		length = ctx->pages_remaining;
1719 	uint64_t		op_length;
1720 
1721 	if (bserrno != 0 || ctx->pages_remaining == 0) {
1722 		spdk_bs_sequence_finish(ctx->seq, bserrno);
1723 		free(ctx);
1724 		return;
1725 	}
1726 
1727 	op_length = spdk_min(length, _spdk_bs_num_pages_to_cluster_boundary(blob, offset));
1728 
1729 	/* Update length and payload for next operation */
1730 	ctx->pages_remaining -= op_length;
1731 	ctx->page_offset += op_length;
1732 	if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
1733 		ctx->curr_payload += (op_length * SPDK_BS_PAGE_SIZE);
1734 	}
1735 
1736 	switch (op_type) {
1737 	case SPDK_BLOB_READ:
1738 		spdk_blob_io_read(blob, ch, buf, offset, op_length,
1739 				  _spdk_blob_request_submit_op_split_next, ctx);
1740 		break;
1741 	case SPDK_BLOB_WRITE:
1742 		spdk_blob_io_write(blob, ch, buf, offset, op_length,
1743 				   _spdk_blob_request_submit_op_split_next, ctx);
1744 		break;
1745 	case SPDK_BLOB_UNMAP:
1746 		spdk_blob_io_unmap(blob, ch, offset, op_length,
1747 				   _spdk_blob_request_submit_op_split_next, ctx);
1748 		break;
1749 	case SPDK_BLOB_WRITE_ZEROES:
1750 		spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
1751 					  _spdk_blob_request_submit_op_split_next, ctx);
1752 		break;
1753 	case SPDK_BLOB_READV:
1754 	case SPDK_BLOB_WRITEV:
1755 		SPDK_ERRLOG("readv/write not valid for %s\n", __func__);
1756 		spdk_bs_sequence_finish(ctx->seq, -EINVAL);
1757 		free(ctx);
1758 		break;
1759 	}
1760 }
1761 
1762 static void
1763 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
1764 				   void *payload, uint64_t offset, uint64_t length,
1765 				   spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1766 {
1767 	struct op_split_ctx *ctx;
1768 	spdk_bs_sequence_t *seq;
1769 	struct spdk_bs_cpl cpl;
1770 
1771 	assert(blob != NULL);
1772 
1773 	ctx = calloc(1, sizeof(struct op_split_ctx));
1774 	if (ctx == NULL) {
1775 		cb_fn(cb_arg, -ENOMEM);
1776 		return;
1777 	}
1778 
1779 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1780 	cpl.u.blob_basic.cb_fn = cb_fn;
1781 	cpl.u.blob_basic.cb_arg = cb_arg;
1782 
1783 	seq = spdk_bs_sequence_start(ch, &cpl);
1784 	if (!seq) {
1785 		free(ctx);
1786 		cb_fn(cb_arg, -ENOMEM);
1787 		return;
1788 	}
1789 
1790 	ctx->blob = blob;
1791 	ctx->channel = ch;
1792 	ctx->curr_payload = payload;
1793 	ctx->page_offset = offset;
1794 	ctx->pages_remaining = length;
1795 	ctx->op_type = op_type;
1796 	ctx->seq = seq;
1797 
1798 	_spdk_blob_request_submit_op_split_next(ctx, 0);
1799 }
1800 
1801 static void
1802 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
1803 				    void *payload, uint64_t offset, uint64_t length,
1804 				    spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1805 {
1806 	struct spdk_bs_cpl cpl;
1807 	uint64_t lba;
1808 	uint32_t lba_count;
1809 
1810 	assert(blob != NULL);
1811 
1812 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1813 	cpl.u.blob_basic.cb_fn = cb_fn;
1814 	cpl.u.blob_basic.cb_arg = cb_arg;
1815 
1816 	_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
1817 
1818 	if (blob->frozen_refcnt) {
1819 		/* This blob I/O is frozen */
1820 		spdk_bs_user_op_t *op;
1821 		struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
1822 
1823 		op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
1824 		if (!op) {
1825 			cb_fn(cb_arg, -ENOMEM);
1826 			return;
1827 		}
1828 
1829 		TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
1830 
1831 		return;
1832 	}
1833 
1834 	switch (op_type) {
1835 	case SPDK_BLOB_READ: {
1836 		spdk_bs_batch_t *batch;
1837 
1838 		batch = spdk_bs_batch_open(_ch, &cpl);
1839 		if (!batch) {
1840 			cb_fn(cb_arg, -ENOMEM);
1841 			return;
1842 		}
1843 
1844 		if (_spdk_bs_page_is_allocated(blob, offset)) {
1845 			/* Read from the blob */
1846 			spdk_bs_batch_read_dev(batch, payload, lba, lba_count);
1847 		} else {
1848 			/* Read from the backing block device */
1849 			spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
1850 		}
1851 
1852 		spdk_bs_batch_close(batch);
1853 		break;
1854 	}
1855 	case SPDK_BLOB_WRITE:
1856 	case SPDK_BLOB_WRITE_ZEROES: {
1857 		if (_spdk_bs_page_is_allocated(blob, offset)) {
1858 			/* Write to the blob */
1859 			spdk_bs_batch_t *batch;
1860 
1861 			if (lba_count == 0) {
1862 				cb_fn(cb_arg, 0);
1863 				return;
1864 			}
1865 
1866 			batch = spdk_bs_batch_open(_ch, &cpl);
1867 			if (!batch) {
1868 				cb_fn(cb_arg, -ENOMEM);
1869 				return;
1870 			}
1871 
1872 			if (op_type == SPDK_BLOB_WRITE) {
1873 				spdk_bs_batch_write_dev(batch, payload, lba, lba_count);
1874 			} else {
1875 				spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1876 			}
1877 
1878 			spdk_bs_batch_close(batch);
1879 		} else {
1880 			/* Queue this operation and allocate the cluster */
1881 			spdk_bs_user_op_t *op;
1882 
1883 			op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
1884 			if (!op) {
1885 				cb_fn(cb_arg, -ENOMEM);
1886 				return;
1887 			}
1888 
1889 			_spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op);
1890 		}
1891 		break;
1892 	}
1893 	case SPDK_BLOB_UNMAP: {
1894 		spdk_bs_batch_t *batch;
1895 
1896 		batch = spdk_bs_batch_open(_ch, &cpl);
1897 		if (!batch) {
1898 			cb_fn(cb_arg, -ENOMEM);
1899 			return;
1900 		}
1901 
1902 		if (_spdk_bs_page_is_allocated(blob, offset)) {
1903 			spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1904 		}
1905 
1906 		spdk_bs_batch_close(batch);
1907 		break;
1908 	}
1909 	case SPDK_BLOB_READV:
1910 	case SPDK_BLOB_WRITEV:
1911 		SPDK_ERRLOG("readv/write not valid\n");
1912 		cb_fn(cb_arg, -EINVAL);
1913 		break;
1914 	}
1915 }
1916 
1917 static void
1918 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
1919 			     void *payload, uint64_t offset, uint64_t length,
1920 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1921 {
1922 	assert(blob != NULL);
1923 
1924 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
1925 		cb_fn(cb_arg, -EPERM);
1926 		return;
1927 	}
1928 
1929 	if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
1930 		cb_fn(cb_arg, -EINVAL);
1931 		return;
1932 	}
1933 
1934 	if (length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset)) {
1935 		_spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length,
1936 						    cb_fn, cb_arg, op_type);
1937 	} else {
1938 		_spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length,
1939 						   cb_fn, cb_arg, op_type);
1940 	}
1941 }
1942 
1943 struct rw_iov_ctx {
1944 	struct spdk_blob *blob;
1945 	struct spdk_io_channel *channel;
1946 	spdk_blob_op_complete cb_fn;
1947 	void *cb_arg;
1948 	bool read;
1949 	int iovcnt;
1950 	struct iovec *orig_iov;
1951 	uint64_t page_offset;
1952 	uint64_t pages_remaining;
1953 	uint64_t pages_done;
1954 	struct iovec iov[0];
1955 };
1956 
1957 static void
1958 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1959 {
1960 	assert(cb_arg == NULL);
1961 	spdk_bs_sequence_finish(seq, bserrno);
1962 }
1963 
1964 static void
1965 _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
1966 {
1967 	struct rw_iov_ctx *ctx = cb_arg;
1968 	struct spdk_blob *blob = ctx->blob;
1969 	struct iovec *iov, *orig_iov;
1970 	int iovcnt;
1971 	size_t orig_iovoff;
1972 	uint64_t page_count, pages_to_boundary, page_offset;
1973 	uint64_t byte_count;
1974 
1975 	if (bserrno != 0 || ctx->pages_remaining == 0) {
1976 		ctx->cb_fn(ctx->cb_arg, bserrno);
1977 		free(ctx);
1978 		return;
1979 	}
1980 
1981 	page_offset = ctx->page_offset;
1982 	pages_to_boundary = _spdk_bs_num_pages_to_cluster_boundary(blob, page_offset);
1983 	page_count = spdk_min(ctx->pages_remaining, pages_to_boundary);
1984 
1985 	/*
1986 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
1987 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
1988 	 *  point to the current position in the I/O sequence.
1989 	 */
1990 	byte_count = ctx->pages_done * sizeof(struct spdk_blob_md_page);
1991 	orig_iov = &ctx->orig_iov[0];
1992 	orig_iovoff = 0;
1993 	while (byte_count > 0) {
1994 		if (byte_count >= orig_iov->iov_len) {
1995 			byte_count -= orig_iov->iov_len;
1996 			orig_iov++;
1997 		} else {
1998 			orig_iovoff = byte_count;
1999 			byte_count = 0;
2000 		}
2001 	}
2002 
2003 	/*
2004 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
2005 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
2006 	 */
2007 	byte_count = page_count * sizeof(struct spdk_blob_md_page);
2008 	iov = &ctx->iov[0];
2009 	iovcnt = 0;
2010 	while (byte_count > 0) {
2011 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
2012 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
2013 		byte_count -= iov->iov_len;
2014 		orig_iovoff = 0;
2015 		orig_iov++;
2016 		iov++;
2017 		iovcnt++;
2018 	}
2019 
2020 	ctx->page_offset += page_count;
2021 	ctx->pages_done += page_count;
2022 	ctx->pages_remaining -= page_count;
2023 	iov = &ctx->iov[0];
2024 
2025 	if (ctx->read) {
2026 		spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, page_offset,
2027 				   page_count, _spdk_rw_iov_split_next, ctx);
2028 	} else {
2029 		spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, page_offset,
2030 				    page_count, _spdk_rw_iov_split_next, ctx);
2031 	}
2032 }
2033 
2034 static void
2035 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
2036 				 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
2037 				 spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
2038 {
2039 	struct spdk_bs_cpl	cpl;
2040 
2041 	assert(blob != NULL);
2042 
2043 	if (!read && blob->data_ro) {
2044 		cb_fn(cb_arg, -EPERM);
2045 		return;
2046 	}
2047 
2048 	if (length == 0) {
2049 		cb_fn(cb_arg, 0);
2050 		return;
2051 	}
2052 
2053 	if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
2054 		cb_fn(cb_arg, -EINVAL);
2055 		return;
2056 	}
2057 
2058 	/*
2059 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
2060 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
2061 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
2062 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
2063 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
2064 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
2065 	 *  but since this case happens very infrequently, any performance impact will be negligible.
2066 	 *
2067 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
2068 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
2069 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
2070 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
2071 	 */
2072 	if (spdk_likely(length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset))) {
2073 		uint32_t lba_count;
2074 		uint64_t lba;
2075 
2076 		_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
2077 
2078 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2079 		cpl.u.blob_basic.cb_fn = cb_fn;
2080 		cpl.u.blob_basic.cb_arg = cb_arg;
2081 		if (blob->frozen_refcnt) {
2082 			/* This blob I/O is frozen */
2083 			spdk_bs_user_op_t *op;
2084 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel);
2085 
2086 			op = spdk_bs_user_op_alloc(_channel, &cpl, read, blob, iov, iovcnt, offset, length);
2087 			if (!op) {
2088 				cb_fn(cb_arg, -ENOMEM);
2089 				return;
2090 			}
2091 
2092 			TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
2093 
2094 			return;
2095 		}
2096 
2097 		if (read) {
2098 			spdk_bs_sequence_t *seq;
2099 
2100 			seq = spdk_bs_sequence_start(_channel, &cpl);
2101 			if (!seq) {
2102 				cb_fn(cb_arg, -ENOMEM);
2103 				return;
2104 			}
2105 
2106 			if (_spdk_bs_page_is_allocated(blob, offset)) {
2107 				spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2108 			} else {
2109 				spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
2110 							      _spdk_rw_iov_done, NULL);
2111 			}
2112 		} else {
2113 			if (_spdk_bs_page_is_allocated(blob, offset)) {
2114 				spdk_bs_sequence_t *seq;
2115 
2116 				seq = spdk_bs_sequence_start(_channel, &cpl);
2117 				if (!seq) {
2118 					cb_fn(cb_arg, -ENOMEM);
2119 					return;
2120 				}
2121 
2122 				spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2123 			} else {
2124 				/* Queue this operation and allocate the cluster */
2125 				spdk_bs_user_op_t *op;
2126 
2127 				op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, length);
2128 				if (!op) {
2129 					cb_fn(cb_arg, -ENOMEM);
2130 					return;
2131 				}
2132 
2133 				_spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op);
2134 			}
2135 		}
2136 	} else {
2137 		struct rw_iov_ctx *ctx;
2138 
2139 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
2140 		if (ctx == NULL) {
2141 			cb_fn(cb_arg, -ENOMEM);
2142 			return;
2143 		}
2144 
2145 		ctx->blob = blob;
2146 		ctx->channel = _channel;
2147 		ctx->cb_fn = cb_fn;
2148 		ctx->cb_arg = cb_arg;
2149 		ctx->read = read;
2150 		ctx->orig_iov = iov;
2151 		ctx->iovcnt = iovcnt;
2152 		ctx->page_offset = offset;
2153 		ctx->pages_remaining = length;
2154 		ctx->pages_done = 0;
2155 
2156 		_spdk_rw_iov_split_next(ctx, 0);
2157 	}
2158 }
2159 
2160 static struct spdk_blob *
2161 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
2162 {
2163 	struct spdk_blob *blob;
2164 
2165 	TAILQ_FOREACH(blob, &bs->blobs, link) {
2166 		if (blob->id == blobid) {
2167 			return blob;
2168 		}
2169 	}
2170 
2171 	return NULL;
2172 }
2173 
2174 static int
2175 _spdk_bs_channel_create(void *io_device, void *ctx_buf)
2176 {
2177 	struct spdk_blob_store		*bs = io_device;
2178 	struct spdk_bs_channel		*channel = ctx_buf;
2179 	struct spdk_bs_dev		*dev;
2180 	uint32_t			max_ops = bs->max_channel_ops;
2181 	uint32_t			i;
2182 
2183 	dev = bs->dev;
2184 
2185 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
2186 	if (!channel->req_mem) {
2187 		return -1;
2188 	}
2189 
2190 	TAILQ_INIT(&channel->reqs);
2191 
2192 	for (i = 0; i < max_ops; i++) {
2193 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
2194 	}
2195 
2196 	channel->bs = bs;
2197 	channel->dev = dev;
2198 	channel->dev_channel = dev->create_channel(dev);
2199 
2200 	if (!channel->dev_channel) {
2201 		SPDK_ERRLOG("Failed to create device channel.\n");
2202 		free(channel->req_mem);
2203 		return -1;
2204 	}
2205 
2206 	TAILQ_INIT(&channel->need_cluster_alloc);
2207 	TAILQ_INIT(&channel->queued_io);
2208 
2209 	return 0;
2210 }
2211 
2212 static void
2213 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf)
2214 {
2215 	struct spdk_bs_channel *channel = ctx_buf;
2216 	spdk_bs_user_op_t *op;
2217 
2218 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
2219 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
2220 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
2221 		spdk_bs_user_op_abort(op);
2222 	}
2223 
2224 	while (!TAILQ_EMPTY(&channel->queued_io)) {
2225 		op = TAILQ_FIRST(&channel->queued_io);
2226 		TAILQ_REMOVE(&channel->queued_io, op, link);
2227 		spdk_bs_user_op_abort(op);
2228 	}
2229 
2230 	free(channel->req_mem);
2231 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
2232 }
2233 
2234 static void
2235 _spdk_bs_dev_destroy(void *io_device)
2236 {
2237 	struct spdk_blob_store *bs = io_device;
2238 	struct spdk_blob	*blob, *blob_tmp;
2239 
2240 	bs->dev->destroy(bs->dev);
2241 
2242 	TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) {
2243 		TAILQ_REMOVE(&bs->blobs, blob, link);
2244 		_spdk_blob_free(blob);
2245 	}
2246 
2247 	pthread_mutex_destroy(&bs->used_clusters_mutex);
2248 
2249 	spdk_bit_array_free(&bs->used_blobids);
2250 	spdk_bit_array_free(&bs->used_md_pages);
2251 	spdk_bit_array_free(&bs->used_clusters);
2252 	/*
2253 	 * If this function is called for any reason except a successful unload,
2254 	 * the unload_cpl type will be NONE and this will be a nop.
2255 	 */
2256 	spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err);
2257 
2258 	free(bs);
2259 }
2260 
2261 static int
2262 _spdk_bs_blob_list_add(struct spdk_blob *blob)
2263 {
2264 	spdk_blob_id snapshot_id;
2265 	struct spdk_blob_list *snapshot_entry = NULL;
2266 	struct spdk_blob_list *clone_entry = NULL;
2267 
2268 	assert(blob != NULL);
2269 
2270 	snapshot_id = blob->parent_id;
2271 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2272 		return 0;
2273 	}
2274 
2275 	TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) {
2276 		if (snapshot_entry->id == snapshot_id) {
2277 			break;
2278 		}
2279 	}
2280 
2281 	if (snapshot_entry == NULL) {
2282 		/* Snapshot not found */
2283 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
2284 		if (snapshot_entry == NULL) {
2285 			return -ENOMEM;
2286 		}
2287 		snapshot_entry->id = snapshot_id;
2288 		TAILQ_INIT(&snapshot_entry->clones);
2289 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
2290 	} else {
2291 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2292 			if (clone_entry->id == blob->id) {
2293 				break;
2294 			}
2295 		}
2296 	}
2297 
2298 	if (clone_entry == NULL) {
2299 		/* Clone not found */
2300 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
2301 		if (clone_entry == NULL) {
2302 			return -ENOMEM;
2303 		}
2304 		clone_entry->id = blob->id;
2305 		TAILQ_INIT(&clone_entry->clones);
2306 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
2307 		snapshot_entry->clone_count++;
2308 	}
2309 
2310 	return 0;
2311 }
2312 
2313 static int
2314 _spdk_bs_blob_list_remove(struct spdk_blob *blob)
2315 {
2316 	struct spdk_blob_list *snapshot_entry = NULL;
2317 	struct spdk_blob_list *clone_entry = NULL;
2318 	spdk_blob_id snapshot_id;
2319 
2320 	assert(blob != NULL);
2321 
2322 	snapshot_id = blob->parent_id;
2323 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2324 		return 0;
2325 	}
2326 
2327 	TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) {
2328 		if (snapshot_entry->id == snapshot_id) {
2329 			break;
2330 		}
2331 	}
2332 
2333 	assert(snapshot_entry != NULL);
2334 
2335 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2336 		if (clone_entry->id == blob->id) {
2337 			break;
2338 		}
2339 	}
2340 
2341 	assert(clone_entry != NULL);
2342 
2343 	blob->parent_id = SPDK_BLOBID_INVALID;
2344 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2345 	free(clone_entry);
2346 
2347 	snapshot_entry->clone_count--;
2348 	if (snapshot_entry->clone_count == 0) {
2349 		/* Snapshot have no more clones */
2350 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
2351 		free(snapshot_entry);
2352 	}
2353 
2354 	return 0;
2355 }
2356 
2357 static int
2358 _spdk_bs_blob_list_free(struct spdk_blob_store *bs)
2359 {
2360 	struct spdk_blob_list *snapshot_entry;
2361 	struct spdk_blob_list *snapshot_entry_tmp;
2362 	struct spdk_blob_list *clone_entry;
2363 	struct spdk_blob_list *clone_entry_tmp;
2364 
2365 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
2366 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
2367 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2368 			free(clone_entry);
2369 		}
2370 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
2371 		free(snapshot_entry);
2372 	}
2373 
2374 	return 0;
2375 }
2376 
2377 static void
2378 _spdk_bs_free(struct spdk_blob_store *bs)
2379 {
2380 	_spdk_bs_blob_list_free(bs);
2381 
2382 	spdk_bs_unregister_md_thread(bs);
2383 	spdk_io_device_unregister(bs, _spdk_bs_dev_destroy);
2384 }
2385 
2386 void
2387 spdk_bs_opts_init(struct spdk_bs_opts *opts)
2388 {
2389 	opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
2390 	opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
2391 	opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
2392 	opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS;
2393 	memset(&opts->bstype, 0, sizeof(opts->bstype));
2394 	opts->iter_cb_fn = NULL;
2395 	opts->iter_cb_arg = NULL;
2396 }
2397 
2398 static int
2399 _spdk_bs_opts_verify(struct spdk_bs_opts *opts)
2400 {
2401 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
2402 	    opts->max_channel_ops == 0) {
2403 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
2404 		return -1;
2405 	}
2406 
2407 	return 0;
2408 }
2409 
2410 static struct spdk_blob_store *
2411 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts)
2412 {
2413 	struct spdk_blob_store	*bs;
2414 	uint64_t dev_size;
2415 	int rc;
2416 
2417 	dev_size = dev->blocklen * dev->blockcnt;
2418 	if (dev_size < opts->cluster_sz) {
2419 		/* Device size cannot be smaller than cluster size of blobstore */
2420 		SPDK_ERRLOG("Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
2421 			    dev_size, opts->cluster_sz);
2422 		return NULL;
2423 	}
2424 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
2425 		/* Cluster size cannot be smaller than page size */
2426 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
2427 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
2428 		return NULL;
2429 	}
2430 	bs = calloc(1, sizeof(struct spdk_blob_store));
2431 	if (!bs) {
2432 		return NULL;
2433 	}
2434 
2435 	TAILQ_INIT(&bs->blobs);
2436 	TAILQ_INIT(&bs->snapshots);
2437 	bs->dev = dev;
2438 	bs->md_thread = spdk_get_thread();
2439 	assert(bs->md_thread != NULL);
2440 
2441 	/*
2442 	 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an
2443 	 *  even multiple of the cluster size.
2444 	 */
2445 	bs->cluster_sz = opts->cluster_sz;
2446 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
2447 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
2448 	bs->num_free_clusters = bs->total_clusters;
2449 	bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
2450 	if (bs->used_clusters == NULL) {
2451 		free(bs);
2452 		return NULL;
2453 	}
2454 
2455 	bs->max_channel_ops = opts->max_channel_ops;
2456 	bs->super_blob = SPDK_BLOBID_INVALID;
2457 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
2458 
2459 	/* The metadata is assumed to be at least 1 page */
2460 	bs->used_md_pages = spdk_bit_array_create(1);
2461 	bs->used_blobids = spdk_bit_array_create(0);
2462 
2463 	pthread_mutex_init(&bs->used_clusters_mutex, NULL);
2464 
2465 	spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy,
2466 				sizeof(struct spdk_bs_channel));
2467 	rc = spdk_bs_register_md_thread(bs);
2468 	if (rc == -1) {
2469 		spdk_io_device_unregister(bs, NULL);
2470 		pthread_mutex_destroy(&bs->used_clusters_mutex);
2471 		spdk_bit_array_free(&bs->used_blobids);
2472 		spdk_bit_array_free(&bs->used_md_pages);
2473 		spdk_bit_array_free(&bs->used_clusters);
2474 		free(bs);
2475 		return NULL;
2476 	}
2477 
2478 	return bs;
2479 }
2480 
2481 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */
2482 
2483 struct spdk_bs_load_ctx {
2484 	struct spdk_blob_store		*bs;
2485 	struct spdk_bs_super_block	*super;
2486 
2487 	struct spdk_bs_md_mask		*mask;
2488 	bool				in_page_chain;
2489 	uint32_t			page_index;
2490 	uint32_t			cur_page;
2491 	struct spdk_blob_md_page	*page;
2492 	bool				is_load;
2493 
2494 	spdk_bs_sequence_t			*seq;
2495 	spdk_blob_op_with_handle_complete	iter_cb_fn;
2496 	void					*iter_cb_arg;
2497 };
2498 
2499 static void
2500 _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
2501 {
2502 	assert(bserrno != 0);
2503 
2504 	spdk_dma_free(ctx->super);
2505 	spdk_bs_sequence_finish(seq, bserrno);
2506 	/*
2507 	 * Only free the blobstore when a load fails.  If an unload fails (for some reason)
2508 	 *  we want to keep the blobstore in case the caller wants to try again.
2509 	 */
2510 	if (ctx->is_load) {
2511 		_spdk_bs_free(ctx->bs);
2512 	}
2513 	free(ctx);
2514 }
2515 
2516 static void
2517 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask)
2518 {
2519 	uint32_t i = 0;
2520 
2521 	while (true) {
2522 		i = spdk_bit_array_find_first_set(array, i);
2523 		if (i >= mask->length) {
2524 			break;
2525 		}
2526 		mask->mask[i / 8] |= 1U << (i % 8);
2527 		i++;
2528 	}
2529 }
2530 
2531 static void
2532 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2533 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2534 {
2535 	/* Update the values in the super block */
2536 	super->super_blob = bs->super_blob;
2537 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
2538 	super->crc = _spdk_blob_md_page_calc_crc(super);
2539 	spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0),
2540 				   _spdk_bs_byte_to_lba(bs, sizeof(*super)),
2541 				   cb_fn, cb_arg);
2542 }
2543 
2544 static void
2545 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
2546 {
2547 	struct spdk_bs_load_ctx	*ctx = arg;
2548 	uint64_t	mask_size, lba, lba_count;
2549 
2550 	/* Write out the used clusters mask */
2551 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
2552 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2553 	if (!ctx->mask) {
2554 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2555 		return;
2556 	}
2557 
2558 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
2559 	ctx->mask->length = ctx->bs->total_clusters;
2560 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters));
2561 
2562 	_spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask);
2563 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
2564 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
2565 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
2566 }
2567 
2568 static void
2569 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
2570 {
2571 	struct spdk_bs_load_ctx	*ctx = arg;
2572 	uint64_t	mask_size, lba, lba_count;
2573 
2574 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
2575 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2576 	if (!ctx->mask) {
2577 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2578 		return;
2579 	}
2580 
2581 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
2582 	ctx->mask->length = ctx->super->md_len;
2583 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
2584 
2585 	_spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask);
2586 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
2587 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
2588 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
2589 }
2590 
2591 static void
2592 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
2593 {
2594 	struct spdk_bs_load_ctx	*ctx = arg;
2595 	uint64_t	mask_size, lba, lba_count;
2596 
2597 	if (ctx->super->used_blobid_mask_len == 0) {
2598 		/*
2599 		 * This is a pre-v3 on-disk format where the blobid mask does not get
2600 		 *  written to disk.
2601 		 */
2602 		cb_fn(seq, arg, 0);
2603 		return;
2604 	}
2605 
2606 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
2607 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2608 	if (!ctx->mask) {
2609 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2610 		return;
2611 	}
2612 
2613 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
2614 	ctx->mask->length = ctx->super->md_len;
2615 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
2616 
2617 	_spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask);
2618 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
2619 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
2620 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
2621 }
2622 
2623 static void
2624 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
2625 {
2626 	struct spdk_bs_load_ctx *ctx = arg;
2627 
2628 	if (bserrno == 0) {
2629 		if (ctx->iter_cb_fn) {
2630 			ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
2631 		}
2632 		_spdk_bs_blob_list_add(blob);
2633 		spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx);
2634 		return;
2635 	}
2636 
2637 	if (bserrno == -ENOENT) {
2638 		bserrno = 0;
2639 	} else {
2640 		/*
2641 		 * This case needs to be looked at further.  Same problem
2642 		 *  exists with applications that rely on explicit blob
2643 		 *  iteration.  We should just skip the blob that failed
2644 		 *  to load and coontinue on to the next one.
2645 		 */
2646 		SPDK_ERRLOG("Error in iterating blobs\n");
2647 	}
2648 
2649 	ctx->iter_cb_fn = NULL;
2650 
2651 	spdk_dma_free(ctx->super);
2652 	spdk_dma_free(ctx->mask);
2653 	spdk_bs_sequence_finish(ctx->seq, bserrno);
2654 	free(ctx);
2655 }
2656 
2657 static void
2658 _spdk_bs_load_complete(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
2659 {
2660 	ctx->seq = seq;
2661 	spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx);
2662 }
2663 
2664 static void
2665 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2666 {
2667 	struct spdk_bs_load_ctx *ctx = cb_arg;
2668 	uint32_t i, j;
2669 	int rc;
2670 
2671 	/* The type must be correct */
2672 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
2673 
2674 	/* The length of the mask (in bits) must not be greater than
2675 	 * the length of the buffer (converted to bits) */
2676 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
2677 
2678 	/* The length of the mask must be exactly equal to the size
2679 	 * (in pages) of the metadata region */
2680 	assert(ctx->mask->length == ctx->super->md_len);
2681 
2682 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length);
2683 	if (rc < 0) {
2684 		spdk_dma_free(ctx->mask);
2685 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2686 		return;
2687 	}
2688 
2689 	for (i = 0; i < ctx->mask->length / 8; i++) {
2690 		uint8_t segment = ctx->mask->mask[i];
2691 		for (j = 0; segment; j++) {
2692 			if (segment & 1U) {
2693 				spdk_bit_array_set(ctx->bs->used_blobids, (i * 8) + j);
2694 			}
2695 			segment >>= 1U;
2696 		}
2697 	}
2698 
2699 	_spdk_bs_load_complete(seq, ctx, bserrno);
2700 }
2701 
2702 static void
2703 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2704 {
2705 	struct spdk_bs_load_ctx *ctx = cb_arg;
2706 	uint64_t		lba, lba_count, mask_size;
2707 	uint32_t		i, j;
2708 	int			rc;
2709 
2710 	/* The type must be correct */
2711 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2712 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
2713 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
2714 					     struct spdk_blob_md_page) * 8));
2715 	/* The length of the mask must be exactly equal to the total number of clusters */
2716 	assert(ctx->mask->length == ctx->bs->total_clusters);
2717 
2718 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
2719 	if (rc < 0) {
2720 		spdk_dma_free(ctx->mask);
2721 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2722 		return;
2723 	}
2724 
2725 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
2726 	for (i = 0; i < ctx->mask->length / 8; i++) {
2727 		uint8_t segment = ctx->mask->mask[i];
2728 		for (j = 0; segment && (j < 8); j++) {
2729 			if (segment & 1U) {
2730 				spdk_bit_array_set(ctx->bs->used_clusters, (i * 8) + j);
2731 				assert(ctx->bs->num_free_clusters > 0);
2732 				ctx->bs->num_free_clusters--;
2733 			}
2734 			segment >>= 1U;
2735 		}
2736 	}
2737 
2738 	spdk_dma_free(ctx->mask);
2739 
2740 	/* Read the used blobids mask */
2741 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
2742 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2743 	if (!ctx->mask) {
2744 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2745 		return;
2746 	}
2747 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
2748 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
2749 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
2750 				  _spdk_bs_load_used_blobids_cpl, ctx);
2751 }
2752 
2753 static void
2754 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2755 {
2756 	struct spdk_bs_load_ctx *ctx = cb_arg;
2757 	uint64_t		lba, lba_count, mask_size;
2758 	uint32_t		i, j;
2759 	int			rc;
2760 
2761 	/* The type must be correct */
2762 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
2763 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
2764 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
2765 				     8));
2766 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
2767 	assert(ctx->mask->length == ctx->super->md_len);
2768 
2769 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length);
2770 	if (rc < 0) {
2771 		spdk_dma_free(ctx->mask);
2772 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2773 		return;
2774 	}
2775 
2776 	for (i = 0; i < ctx->mask->length / 8; i++) {
2777 		uint8_t segment = ctx->mask->mask[i];
2778 		for (j = 0; segment && (j < 8); j++) {
2779 			if (segment & 1U) {
2780 				spdk_bit_array_set(ctx->bs->used_md_pages, (i * 8) + j);
2781 			}
2782 			segment >>= 1U;
2783 		}
2784 	}
2785 	spdk_dma_free(ctx->mask);
2786 
2787 	/* Read the used clusters mask */
2788 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
2789 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2790 	if (!ctx->mask) {
2791 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2792 		return;
2793 	}
2794 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
2795 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
2796 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
2797 				  _spdk_bs_load_used_clusters_cpl, ctx);
2798 }
2799 
2800 static void
2801 _spdk_bs_load_read_used_pages(spdk_bs_sequence_t *seq, void *cb_arg)
2802 {
2803 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2804 	uint64_t lba, lba_count, mask_size;
2805 
2806 	/* Read the used pages mask */
2807 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
2808 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2809 	if (!ctx->mask) {
2810 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2811 		return;
2812 	}
2813 
2814 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
2815 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
2816 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
2817 				  _spdk_bs_load_used_pages_cpl, ctx);
2818 }
2819 
2820 static int
2821 _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_store *bs)
2822 {
2823 	struct spdk_blob_md_descriptor *desc;
2824 	size_t	cur_desc = 0;
2825 
2826 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
2827 	while (cur_desc < sizeof(page->descriptors)) {
2828 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
2829 			if (desc->length == 0) {
2830 				/* If padding and length are 0, this terminates the page */
2831 				break;
2832 			}
2833 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) {
2834 			struct spdk_blob_md_descriptor_extent	*desc_extent;
2835 			unsigned int				i, j;
2836 			unsigned int				cluster_count = 0;
2837 			uint32_t				cluster_idx;
2838 
2839 			desc_extent = (struct spdk_blob_md_descriptor_extent *)desc;
2840 
2841 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
2842 				for (j = 0; j < desc_extent->extents[i].length; j++) {
2843 					cluster_idx = desc_extent->extents[i].cluster_idx;
2844 					/*
2845 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
2846 					 * in the used cluster map.
2847 					 */
2848 					if (cluster_idx != 0) {
2849 						spdk_bit_array_set(bs->used_clusters, cluster_idx + j);
2850 						if (bs->num_free_clusters == 0) {
2851 							return -ENOSPC;
2852 						}
2853 						bs->num_free_clusters--;
2854 					}
2855 					cluster_count++;
2856 				}
2857 			}
2858 			if (cluster_count == 0) {
2859 				return -EINVAL;
2860 			}
2861 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
2862 			/* Skip this item */
2863 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
2864 			/* Skip this item */
2865 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
2866 			/* Skip this item */
2867 		} else {
2868 			/* Error */
2869 			return -EINVAL;
2870 		}
2871 		/* Advance to the next descriptor */
2872 		cur_desc += sizeof(*desc) + desc->length;
2873 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
2874 			break;
2875 		}
2876 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
2877 	}
2878 	return 0;
2879 }
2880 
2881 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
2882 {
2883 	uint32_t crc;
2884 
2885 	crc = _spdk_blob_md_page_calc_crc(ctx->page);
2886 	if (crc != ctx->page->crc) {
2887 		return false;
2888 	}
2889 
2890 	if (_spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) {
2891 		return false;
2892 	}
2893 	return true;
2894 }
2895 
2896 static void
2897 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
2898 
2899 static void
2900 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2901 {
2902 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2903 
2904 	_spdk_bs_load_complete(seq, ctx, bserrno);
2905 }
2906 
2907 static void
2908 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2909 {
2910 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2911 
2912 	spdk_dma_free(ctx->mask);
2913 	ctx->mask = NULL;
2914 
2915 	_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl);
2916 }
2917 
2918 static void
2919 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2920 {
2921 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2922 
2923 	spdk_dma_free(ctx->mask);
2924 	ctx->mask = NULL;
2925 
2926 	_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl);
2927 }
2928 
2929 static void
2930 _spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2931 {
2932 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl);
2933 }
2934 
2935 static void
2936 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2937 {
2938 	struct spdk_bs_load_ctx *ctx = cb_arg;
2939 	uint64_t num_md_clusters;
2940 	uint64_t i;
2941 	uint32_t page_num;
2942 
2943 	if (bserrno != 0) {
2944 		_spdk_bs_load_ctx_fail(seq, ctx, bserrno);
2945 		return;
2946 	}
2947 
2948 	page_num = ctx->cur_page;
2949 	if (_spdk_bs_load_cur_md_page_valid(ctx) == true) {
2950 		if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) {
2951 			spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
2952 			if (ctx->page->sequence_num == 0) {
2953 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
2954 			}
2955 			if (_spdk_bs_load_replay_md_parse_page(ctx->page, ctx->bs)) {
2956 				_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2957 				return;
2958 			}
2959 			if (ctx->page->next != SPDK_INVALID_MD_PAGE) {
2960 				ctx->in_page_chain = true;
2961 				ctx->cur_page = ctx->page->next;
2962 				_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2963 				return;
2964 			}
2965 		}
2966 	}
2967 
2968 	ctx->in_page_chain = false;
2969 
2970 	do {
2971 		ctx->page_index++;
2972 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
2973 
2974 	if (ctx->page_index < ctx->super->md_len) {
2975 		ctx->cur_page = ctx->page_index;
2976 		_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2977 	} else {
2978 		/* Claim all of the clusters used by the metadata */
2979 		num_md_clusters = divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
2980 		for (i = 0; i < num_md_clusters; i++) {
2981 			_spdk_bs_claim_cluster(ctx->bs, i);
2982 		}
2983 		spdk_dma_free(ctx->page);
2984 		_spdk_bs_load_write_used_md(seq, ctx, bserrno);
2985 	}
2986 }
2987 
2988 static void
2989 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
2990 {
2991 	struct spdk_bs_load_ctx *ctx = cb_arg;
2992 	uint64_t lba;
2993 
2994 	assert(ctx->cur_page < ctx->super->md_len);
2995 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
2996 	spdk_bs_sequence_read_dev(seq, ctx->page, lba,
2997 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
2998 				  _spdk_bs_load_replay_md_cpl, ctx);
2999 }
3000 
3001 static void
3002 _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg)
3003 {
3004 	struct spdk_bs_load_ctx *ctx = cb_arg;
3005 
3006 	ctx->page_index = 0;
3007 	ctx->cur_page = 0;
3008 	ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE,
3009 				     SPDK_BS_PAGE_SIZE,
3010 				     NULL);
3011 	if (!ctx->page) {
3012 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
3013 		return;
3014 	}
3015 	_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
3016 }
3017 
3018 static void
3019 _spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3020 {
3021 	struct spdk_bs_load_ctx *ctx = cb_arg;
3022 	int		rc;
3023 
3024 	if (bserrno != 0) {
3025 		_spdk_bs_load_ctx_fail(seq, ctx, -EIO);
3026 		return;
3027 	}
3028 
3029 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
3030 	if (rc < 0) {
3031 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
3032 		return;
3033 	}
3034 
3035 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
3036 	if (rc < 0) {
3037 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
3038 		return;
3039 	}
3040 
3041 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
3042 	if (rc < 0) {
3043 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
3044 		return;
3045 	}
3046 
3047 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
3048 	_spdk_bs_load_replay_md(seq, cb_arg);
3049 }
3050 
3051 static void
3052 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3053 {
3054 	struct spdk_bs_load_ctx *ctx = cb_arg;
3055 	uint32_t	crc;
3056 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
3057 
3058 	if (ctx->super->version > SPDK_BS_VERSION ||
3059 	    ctx->super->version < SPDK_BS_INITIAL_VERSION) {
3060 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
3061 		return;
3062 	}
3063 
3064 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
3065 		   sizeof(ctx->super->signature)) != 0) {
3066 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
3067 		return;
3068 	}
3069 
3070 	crc = _spdk_blob_md_page_calc_crc(ctx->super);
3071 	if (crc != ctx->super->crc) {
3072 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
3073 		return;
3074 	}
3075 
3076 	if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
3077 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n");
3078 	} else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
3079 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n");
3080 	} else {
3081 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n");
3082 		SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
3083 		SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
3084 		_spdk_bs_load_ctx_fail(seq, ctx, -ENXIO);
3085 		return;
3086 	}
3087 
3088 	/* Parse the super block */
3089 	ctx->bs->clean = 1;
3090 	ctx->bs->cluster_sz = ctx->super->cluster_size;
3091 	ctx->bs->total_clusters = ctx->bs->dev->blockcnt / (ctx->bs->cluster_sz / ctx->bs->dev->blocklen);
3092 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
3093 	ctx->bs->md_start = ctx->super->md_start;
3094 	ctx->bs->md_len = ctx->super->md_len;
3095 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - divide_round_up(
3096 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
3097 	ctx->bs->super_blob = ctx->super->super_blob;
3098 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
3099 
3100 	if (ctx->super->clean == 0) {
3101 		_spdk_bs_recover(seq, ctx, 0);
3102 	} else if (ctx->super->used_blobid_mask_len == 0) {
3103 		/*
3104 		 * Metadata is clean, but this is an old metadata format without
3105 		 *  a blobid mask.  Clear the clean bit and then build the masks
3106 		 *  using _spdk_bs_recover.
3107 		 */
3108 		ctx->super->clean = 0;
3109 		ctx->bs->clean = 0;
3110 		_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_recover, ctx);
3111 	} else {
3112 		_spdk_bs_load_read_used_pages(seq, ctx);
3113 	}
3114 }
3115 
3116 void
3117 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
3118 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
3119 {
3120 	struct spdk_blob_store	*bs;
3121 	struct spdk_bs_cpl	cpl;
3122 	spdk_bs_sequence_t	*seq;
3123 	struct spdk_bs_load_ctx *ctx;
3124 	struct spdk_bs_opts	opts = {};
3125 
3126 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev);
3127 
3128 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
3129 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen);
3130 		dev->destroy(dev);
3131 		cb_fn(cb_arg, NULL, -EINVAL);
3132 		return;
3133 	}
3134 
3135 	if (o) {
3136 		opts = *o;
3137 	} else {
3138 		spdk_bs_opts_init(&opts);
3139 	}
3140 
3141 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
3142 		dev->destroy(dev);
3143 		cb_fn(cb_arg, NULL, -EINVAL);
3144 		return;
3145 	}
3146 
3147 	bs = _spdk_bs_alloc(dev, &opts);
3148 	if (!bs) {
3149 		dev->destroy(dev);
3150 		cb_fn(cb_arg, NULL, -ENOMEM);
3151 		return;
3152 	}
3153 
3154 	ctx = calloc(1, sizeof(*ctx));
3155 	if (!ctx) {
3156 		_spdk_bs_free(bs);
3157 		cb_fn(cb_arg, NULL, -ENOMEM);
3158 		return;
3159 	}
3160 
3161 	ctx->bs = bs;
3162 	ctx->is_load = true;
3163 	ctx->iter_cb_fn = opts.iter_cb_fn;
3164 	ctx->iter_cb_arg = opts.iter_cb_arg;
3165 
3166 	/* Allocate memory for the super block */
3167 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3168 	if (!ctx->super) {
3169 		free(ctx);
3170 		_spdk_bs_free(bs);
3171 		cb_fn(cb_arg, NULL, -ENOMEM);
3172 		return;
3173 	}
3174 
3175 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
3176 	cpl.u.bs_handle.cb_fn = cb_fn;
3177 	cpl.u.bs_handle.cb_arg = cb_arg;
3178 	cpl.u.bs_handle.bs = bs;
3179 
3180 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3181 	if (!seq) {
3182 		spdk_dma_free(ctx->super);
3183 		free(ctx);
3184 		_spdk_bs_free(bs);
3185 		cb_fn(cb_arg, NULL, -ENOMEM);
3186 		return;
3187 	}
3188 
3189 	/* Read the super block */
3190 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
3191 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
3192 				  _spdk_bs_load_super_cpl, ctx);
3193 }
3194 
3195 /* END spdk_bs_load */
3196 
3197 /* START spdk_bs_init */
3198 
3199 struct spdk_bs_init_ctx {
3200 	struct spdk_blob_store		*bs;
3201 	struct spdk_bs_super_block	*super;
3202 };
3203 
3204 static void
3205 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3206 {
3207 	struct spdk_bs_init_ctx *ctx = cb_arg;
3208 
3209 	spdk_dma_free(ctx->super);
3210 	free(ctx);
3211 
3212 	spdk_bs_sequence_finish(seq, bserrno);
3213 }
3214 
3215 static void
3216 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3217 {
3218 	struct spdk_bs_init_ctx *ctx = cb_arg;
3219 
3220 	/* Write super block */
3221 	spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0),
3222 				   _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
3223 				   _spdk_bs_init_persist_super_cpl, ctx);
3224 }
3225 
3226 void
3227 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
3228 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
3229 {
3230 	struct spdk_bs_init_ctx *ctx;
3231 	struct spdk_blob_store	*bs;
3232 	struct spdk_bs_cpl	cpl;
3233 	spdk_bs_sequence_t	*seq;
3234 	spdk_bs_batch_t		*batch;
3235 	uint64_t		num_md_lba;
3236 	uint64_t		num_md_pages;
3237 	uint64_t		num_md_clusters;
3238 	uint32_t		i;
3239 	struct spdk_bs_opts	opts = {};
3240 	int			rc;
3241 
3242 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev);
3243 
3244 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
3245 		SPDK_ERRLOG("unsupported dev block length of %d\n",
3246 			    dev->blocklen);
3247 		dev->destroy(dev);
3248 		cb_fn(cb_arg, NULL, -EINVAL);
3249 		return;
3250 	}
3251 
3252 	if (o) {
3253 		opts = *o;
3254 	} else {
3255 		spdk_bs_opts_init(&opts);
3256 	}
3257 
3258 	if (_spdk_bs_opts_verify(&opts) != 0) {
3259 		dev->destroy(dev);
3260 		cb_fn(cb_arg, NULL, -EINVAL);
3261 		return;
3262 	}
3263 
3264 	bs = _spdk_bs_alloc(dev, &opts);
3265 	if (!bs) {
3266 		dev->destroy(dev);
3267 		cb_fn(cb_arg, NULL, -ENOMEM);
3268 		return;
3269 	}
3270 
3271 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
3272 		/* By default, allocate 1 page per cluster.
3273 		 * Technically, this over-allocates metadata
3274 		 * because more metadata will reduce the number
3275 		 * of usable clusters. This can be addressed with
3276 		 * more complex math in the future.
3277 		 */
3278 		bs->md_len = bs->total_clusters;
3279 	} else {
3280 		bs->md_len = opts.num_md_pages;
3281 	}
3282 
3283 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
3284 	if (rc < 0) {
3285 		_spdk_bs_free(bs);
3286 		cb_fn(cb_arg, NULL, -ENOMEM);
3287 		return;
3288 	}
3289 
3290 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
3291 	if (rc < 0) {
3292 		_spdk_bs_free(bs);
3293 		cb_fn(cb_arg, NULL, -ENOMEM);
3294 		return;
3295 	}
3296 
3297 	ctx = calloc(1, sizeof(*ctx));
3298 	if (!ctx) {
3299 		_spdk_bs_free(bs);
3300 		cb_fn(cb_arg, NULL, -ENOMEM);
3301 		return;
3302 	}
3303 
3304 	ctx->bs = bs;
3305 
3306 	/* Allocate memory for the super block */
3307 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3308 	if (!ctx->super) {
3309 		free(ctx);
3310 		_spdk_bs_free(bs);
3311 		cb_fn(cb_arg, NULL, -ENOMEM);
3312 		return;
3313 	}
3314 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
3315 	       sizeof(ctx->super->signature));
3316 	ctx->super->version = SPDK_BS_VERSION;
3317 	ctx->super->length = sizeof(*ctx->super);
3318 	ctx->super->super_blob = bs->super_blob;
3319 	ctx->super->clean = 0;
3320 	ctx->super->cluster_size = bs->cluster_sz;
3321 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
3322 
3323 	/* Calculate how many pages the metadata consumes at the front
3324 	 * of the disk.
3325 	 */
3326 
3327 	/* The super block uses 1 page */
3328 	num_md_pages = 1;
3329 
3330 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
3331 	 * up to the nearest page, plus a header.
3332 	 */
3333 	ctx->super->used_page_mask_start = num_md_pages;
3334 	ctx->super->used_page_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
3335 					 divide_round_up(bs->md_len, 8),
3336 					 SPDK_BS_PAGE_SIZE);
3337 	num_md_pages += ctx->super->used_page_mask_len;
3338 
3339 	/* The used_clusters mask requires 1 bit per cluster, rounded
3340 	 * up to the nearest page, plus a header.
3341 	 */
3342 	ctx->super->used_cluster_mask_start = num_md_pages;
3343 	ctx->super->used_cluster_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
3344 					    divide_round_up(bs->total_clusters, 8),
3345 					    SPDK_BS_PAGE_SIZE);
3346 	num_md_pages += ctx->super->used_cluster_mask_len;
3347 
3348 	/* The used_blobids mask requires 1 bit per metadata page, rounded
3349 	 * up to the nearest page, plus a header.
3350 	 */
3351 	ctx->super->used_blobid_mask_start = num_md_pages;
3352 	ctx->super->used_blobid_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
3353 					   divide_round_up(bs->md_len, 8),
3354 					   SPDK_BS_PAGE_SIZE);
3355 	num_md_pages += ctx->super->used_blobid_mask_len;
3356 
3357 	/* The metadata region size was chosen above */
3358 	ctx->super->md_start = bs->md_start = num_md_pages;
3359 	ctx->super->md_len = bs->md_len;
3360 	num_md_pages += bs->md_len;
3361 
3362 	num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages);
3363 
3364 	ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super);
3365 
3366 	num_md_clusters = divide_round_up(num_md_pages, bs->pages_per_cluster);
3367 	if (num_md_clusters > bs->total_clusters) {
3368 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
3369 			    "please decrease number of pages reserved for metadata "
3370 			    "or increase cluster size.\n");
3371 		spdk_dma_free(ctx->super);
3372 		free(ctx);
3373 		_spdk_bs_free(bs);
3374 		cb_fn(cb_arg, NULL, -ENOMEM);
3375 		return;
3376 	}
3377 	/* Claim all of the clusters used by the metadata */
3378 	for (i = 0; i < num_md_clusters; i++) {
3379 		_spdk_bs_claim_cluster(bs, i);
3380 	}
3381 
3382 	bs->total_data_clusters = bs->num_free_clusters;
3383 
3384 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
3385 	cpl.u.bs_handle.cb_fn = cb_fn;
3386 	cpl.u.bs_handle.cb_arg = cb_arg;
3387 	cpl.u.bs_handle.bs = bs;
3388 
3389 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3390 	if (!seq) {
3391 		spdk_dma_free(ctx->super);
3392 		free(ctx);
3393 		_spdk_bs_free(bs);
3394 		cb_fn(cb_arg, NULL, -ENOMEM);
3395 		return;
3396 	}
3397 
3398 	batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx);
3399 
3400 	/* Clear metadata space */
3401 	spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
3402 	/* Trim data clusters */
3403 	spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
3404 
3405 	spdk_bs_batch_close(batch);
3406 }
3407 
3408 /* END spdk_bs_init */
3409 
3410 /* START spdk_bs_destroy */
3411 
3412 static void
3413 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3414 {
3415 	struct spdk_bs_init_ctx *ctx = cb_arg;
3416 	struct spdk_blob_store *bs = ctx->bs;
3417 
3418 	/*
3419 	 * We need to defer calling spdk_bs_call_cpl() until after
3420 	 * dev destruction, so tuck these away for later use.
3421 	 */
3422 	bs->unload_err = bserrno;
3423 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
3424 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
3425 
3426 	spdk_bs_sequence_finish(seq, bserrno);
3427 
3428 	_spdk_bs_free(bs);
3429 	free(ctx);
3430 }
3431 
3432 void
3433 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
3434 		void *cb_arg)
3435 {
3436 	struct spdk_bs_cpl	cpl;
3437 	spdk_bs_sequence_t	*seq;
3438 	struct spdk_bs_init_ctx *ctx;
3439 
3440 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n");
3441 
3442 	if (!TAILQ_EMPTY(&bs->blobs)) {
3443 		SPDK_ERRLOG("Blobstore still has open blobs\n");
3444 		cb_fn(cb_arg, -EBUSY);
3445 		return;
3446 	}
3447 
3448 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
3449 	cpl.u.bs_basic.cb_fn = cb_fn;
3450 	cpl.u.bs_basic.cb_arg = cb_arg;
3451 
3452 	ctx = calloc(1, sizeof(*ctx));
3453 	if (!ctx) {
3454 		cb_fn(cb_arg, -ENOMEM);
3455 		return;
3456 	}
3457 
3458 	ctx->bs = bs;
3459 
3460 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3461 	if (!seq) {
3462 		free(ctx);
3463 		cb_fn(cb_arg, -ENOMEM);
3464 		return;
3465 	}
3466 
3467 	/* Write zeroes to the super block */
3468 	spdk_bs_sequence_write_zeroes_dev(seq,
3469 					  _spdk_bs_page_to_lba(bs, 0),
3470 					  _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
3471 					  _spdk_bs_destroy_trim_cpl, ctx);
3472 }
3473 
3474 /* END spdk_bs_destroy */
3475 
3476 /* START spdk_bs_unload */
3477 
3478 static void
3479 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3480 {
3481 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3482 
3483 	spdk_dma_free(ctx->super);
3484 
3485 	/*
3486 	 * We need to defer calling spdk_bs_call_cpl() until after
3487 	 * dev destuction, so tuck these away for later use.
3488 	 */
3489 	ctx->bs->unload_err = bserrno;
3490 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
3491 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
3492 
3493 	spdk_bs_sequence_finish(seq, bserrno);
3494 
3495 	_spdk_bs_free(ctx->bs);
3496 	free(ctx);
3497 }
3498 
3499 static void
3500 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3501 {
3502 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3503 
3504 	spdk_dma_free(ctx->mask);
3505 	ctx->super->clean = 1;
3506 
3507 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
3508 }
3509 
3510 static void
3511 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3512 {
3513 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3514 
3515 	spdk_dma_free(ctx->mask);
3516 	ctx->mask = NULL;
3517 
3518 	_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl);
3519 }
3520 
3521 static void
3522 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3523 {
3524 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3525 
3526 	spdk_dma_free(ctx->mask);
3527 	ctx->mask = NULL;
3528 
3529 	_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl);
3530 }
3531 
3532 static void
3533 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3534 {
3535 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl);
3536 }
3537 
3538 void
3539 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
3540 {
3541 	struct spdk_bs_cpl	cpl;
3542 	spdk_bs_sequence_t	*seq;
3543 	struct spdk_bs_load_ctx *ctx;
3544 
3545 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n");
3546 
3547 	if (!TAILQ_EMPTY(&bs->blobs)) {
3548 		SPDK_ERRLOG("Blobstore still has open blobs\n");
3549 		cb_fn(cb_arg, -EBUSY);
3550 		return;
3551 	}
3552 
3553 	ctx = calloc(1, sizeof(*ctx));
3554 	if (!ctx) {
3555 		cb_fn(cb_arg, -ENOMEM);
3556 		return;
3557 	}
3558 
3559 	ctx->bs = bs;
3560 	ctx->is_load = false;
3561 
3562 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3563 	if (!ctx->super) {
3564 		free(ctx);
3565 		cb_fn(cb_arg, -ENOMEM);
3566 		return;
3567 	}
3568 
3569 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
3570 	cpl.u.bs_basic.cb_fn = cb_fn;
3571 	cpl.u.bs_basic.cb_arg = cb_arg;
3572 
3573 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3574 	if (!seq) {
3575 		spdk_dma_free(ctx->super);
3576 		free(ctx);
3577 		cb_fn(cb_arg, -ENOMEM);
3578 		return;
3579 	}
3580 
3581 	/* Read super block */
3582 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
3583 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
3584 				  _spdk_bs_unload_read_super_cpl, ctx);
3585 }
3586 
3587 /* END spdk_bs_unload */
3588 
3589 /* START spdk_bs_set_super */
3590 
3591 struct spdk_bs_set_super_ctx {
3592 	struct spdk_blob_store		*bs;
3593 	struct spdk_bs_super_block	*super;
3594 };
3595 
3596 static void
3597 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3598 {
3599 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
3600 
3601 	if (bserrno != 0) {
3602 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
3603 	}
3604 
3605 	spdk_dma_free(ctx->super);
3606 
3607 	spdk_bs_sequence_finish(seq, bserrno);
3608 
3609 	free(ctx);
3610 }
3611 
3612 static void
3613 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3614 {
3615 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
3616 
3617 	if (bserrno != 0) {
3618 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
3619 		spdk_dma_free(ctx->super);
3620 		spdk_bs_sequence_finish(seq, bserrno);
3621 		free(ctx);
3622 		return;
3623 	}
3624 
3625 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx);
3626 }
3627 
3628 void
3629 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
3630 		  spdk_bs_op_complete cb_fn, void *cb_arg)
3631 {
3632 	struct spdk_bs_cpl		cpl;
3633 	spdk_bs_sequence_t		*seq;
3634 	struct spdk_bs_set_super_ctx	*ctx;
3635 
3636 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n");
3637 
3638 	ctx = calloc(1, sizeof(*ctx));
3639 	if (!ctx) {
3640 		cb_fn(cb_arg, -ENOMEM);
3641 		return;
3642 	}
3643 
3644 	ctx->bs = bs;
3645 
3646 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3647 	if (!ctx->super) {
3648 		free(ctx);
3649 		cb_fn(cb_arg, -ENOMEM);
3650 		return;
3651 	}
3652 
3653 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
3654 	cpl.u.bs_basic.cb_fn = cb_fn;
3655 	cpl.u.bs_basic.cb_arg = cb_arg;
3656 
3657 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3658 	if (!seq) {
3659 		spdk_dma_free(ctx->super);
3660 		free(ctx);
3661 		cb_fn(cb_arg, -ENOMEM);
3662 		return;
3663 	}
3664 
3665 	bs->super_blob = blobid;
3666 
3667 	/* Read super block */
3668 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
3669 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
3670 				  _spdk_bs_set_super_read_cpl, ctx);
3671 }
3672 
3673 /* END spdk_bs_set_super */
3674 
3675 void
3676 spdk_bs_get_super(struct spdk_blob_store *bs,
3677 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3678 {
3679 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
3680 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
3681 	} else {
3682 		cb_fn(cb_arg, bs->super_blob, 0);
3683 	}
3684 }
3685 
3686 uint64_t
3687 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
3688 {
3689 	return bs->cluster_sz;
3690 }
3691 
3692 uint64_t
3693 spdk_bs_get_page_size(struct spdk_blob_store *bs)
3694 {
3695 	return SPDK_BS_PAGE_SIZE;
3696 }
3697 
3698 uint64_t
3699 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
3700 {
3701 	return bs->num_free_clusters;
3702 }
3703 
3704 uint64_t
3705 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
3706 {
3707 	return bs->total_data_clusters;
3708 }
3709 
3710 static int
3711 spdk_bs_register_md_thread(struct spdk_blob_store *bs)
3712 {
3713 	bs->md_channel = spdk_get_io_channel(bs);
3714 	if (!bs->md_channel) {
3715 		SPDK_ERRLOG("Failed to get IO channel.\n");
3716 		return -1;
3717 	}
3718 
3719 	return 0;
3720 }
3721 
3722 static int
3723 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
3724 {
3725 	spdk_put_io_channel(bs->md_channel);
3726 
3727 	return 0;
3728 }
3729 
3730 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob)
3731 {
3732 	assert(blob != NULL);
3733 
3734 	return blob->id;
3735 }
3736 
3737 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob)
3738 {
3739 	assert(blob != NULL);
3740 
3741 	return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters);
3742 }
3743 
3744 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
3745 {
3746 	assert(blob != NULL);
3747 
3748 	return blob->active.num_clusters;
3749 }
3750 
3751 /* START spdk_bs_create_blob */
3752 
3753 static void
3754 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3755 {
3756 	struct spdk_blob *blob = cb_arg;
3757 
3758 	_spdk_blob_free(blob);
3759 
3760 	spdk_bs_sequence_finish(seq, bserrno);
3761 }
3762 
3763 static int
3764 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
3765 		      bool internal)
3766 {
3767 	uint64_t i;
3768 	size_t value_len = 0;
3769 	int rc;
3770 	const void *value = NULL;
3771 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
3772 		return -EINVAL;
3773 	}
3774 	for (i = 0; i < xattrs->count; i++) {
3775 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
3776 		if (value == NULL || value_len == 0) {
3777 			return -EINVAL;
3778 		}
3779 		rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
3780 		if (rc < 0) {
3781 			return rc;
3782 		}
3783 	}
3784 	return 0;
3785 }
3786 
3787 static void
3788 _spdk_blob_set_thin_provision(struct spdk_blob *blob)
3789 {
3790 	_spdk_blob_verify_md_op(blob);
3791 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
3792 	blob->state = SPDK_BLOB_STATE_DIRTY;
3793 }
3794 
3795 static void
3796 _spdk_bs_create_blob(struct spdk_blob_store *bs,
3797 		     const struct spdk_blob_opts *opts,
3798 		     const struct spdk_blob_xattr_opts *internal_xattrs,
3799 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3800 {
3801 	struct spdk_blob	*blob;
3802 	uint32_t		page_idx;
3803 	struct spdk_bs_cpl	cpl;
3804 	struct spdk_blob_opts	opts_default;
3805 	struct spdk_blob_xattr_opts internal_xattrs_default;
3806 	spdk_bs_sequence_t	*seq;
3807 	spdk_blob_id		id;
3808 	int rc;
3809 
3810 	assert(spdk_get_thread() == bs->md_thread);
3811 
3812 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
3813 	if (page_idx >= spdk_bit_array_capacity(bs->used_md_pages)) {
3814 		cb_fn(cb_arg, 0, -ENOMEM);
3815 		return;
3816 	}
3817 	spdk_bit_array_set(bs->used_blobids, page_idx);
3818 	spdk_bit_array_set(bs->used_md_pages, page_idx);
3819 
3820 	id = _spdk_bs_page_to_blobid(page_idx);
3821 
3822 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx);
3823 
3824 	blob = _spdk_blob_alloc(bs, id);
3825 	if (!blob) {
3826 		cb_fn(cb_arg, 0, -ENOMEM);
3827 		return;
3828 	}
3829 
3830 	if (!opts) {
3831 		spdk_blob_opts_init(&opts_default);
3832 		opts = &opts_default;
3833 	}
3834 	if (!internal_xattrs) {
3835 		_spdk_blob_xattrs_init(&internal_xattrs_default);
3836 		internal_xattrs = &internal_xattrs_default;
3837 	}
3838 
3839 	rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false);
3840 	if (rc < 0) {
3841 		_spdk_blob_free(blob);
3842 		cb_fn(cb_arg, 0, rc);
3843 		return;
3844 	}
3845 
3846 	rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true);
3847 	if (rc < 0) {
3848 		_spdk_blob_free(blob);
3849 		cb_fn(cb_arg, 0, rc);
3850 		return;
3851 	}
3852 
3853 	if (opts->thin_provision) {
3854 		_spdk_blob_set_thin_provision(blob);
3855 	}
3856 
3857 	rc = _spdk_blob_resize(blob, opts->num_clusters);
3858 	if (rc < 0) {
3859 		_spdk_blob_free(blob);
3860 		cb_fn(cb_arg, 0, rc);
3861 		return;
3862 	}
3863 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
3864 	cpl.u.blobid.cb_fn = cb_fn;
3865 	cpl.u.blobid.cb_arg = cb_arg;
3866 	cpl.u.blobid.blobid = blob->id;
3867 
3868 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3869 	if (!seq) {
3870 		_spdk_blob_free(blob);
3871 		cb_fn(cb_arg, 0, -ENOMEM);
3872 		return;
3873 	}
3874 
3875 	_spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob);
3876 }
3877 
3878 void spdk_bs_create_blob(struct spdk_blob_store *bs,
3879 			 spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3880 {
3881 	_spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
3882 }
3883 
3884 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
3885 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3886 {
3887 	_spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
3888 }
3889 
3890 /* END spdk_bs_create_blob */
3891 
3892 /* START blob_cleanup */
3893 
3894 struct spdk_clone_snapshot_ctx {
3895 	struct spdk_bs_cpl      cpl;
3896 	int bserrno;
3897 	bool frozen;
3898 
3899 	struct spdk_io_channel *channel;
3900 
3901 	/* Current cluster for inflate operation */
3902 	uint64_t cluster;
3903 
3904 	struct {
3905 		spdk_blob_id id;
3906 		struct spdk_blob *blob;
3907 	} original;
3908 	struct {
3909 		spdk_blob_id id;
3910 		struct spdk_blob *blob;
3911 	} new;
3912 
3913 	/* xattrs specified for snapshot/clones only. They have no impact on
3914 	 * the original blobs xattrs. */
3915 	const struct spdk_blob_xattr_opts *xattrs;
3916 };
3917 
3918 static void
3919 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
3920 {
3921 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
3922 	struct spdk_bs_cpl *cpl = &ctx->cpl;
3923 
3924 	if (bserrno != 0) {
3925 		if (ctx->bserrno != 0) {
3926 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
3927 		} else {
3928 			ctx->bserrno = bserrno;
3929 		}
3930 	}
3931 
3932 	switch (cpl->type) {
3933 	case SPDK_BS_CPL_TYPE_BLOBID:
3934 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
3935 		break;
3936 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
3937 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
3938 		break;
3939 	default:
3940 		SPDK_UNREACHABLE();
3941 		break;
3942 	}
3943 
3944 	free(ctx);
3945 }
3946 
3947 static void
3948 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
3949 {
3950 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3951 	struct spdk_blob *origblob = ctx->original.blob;
3952 
3953 	if (bserrno != 0) {
3954 		if (ctx->bserrno != 0) {
3955 			SPDK_ERRLOG("Unfreeze error %d\n", bserrno);
3956 		} else {
3957 			ctx->bserrno = bserrno;
3958 		}
3959 	}
3960 
3961 	ctx->original.id = origblob->id;
3962 	spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
3963 }
3964 
3965 static void
3966 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
3967 {
3968 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3969 	struct spdk_blob *origblob = ctx->original.blob;
3970 
3971 	if (bserrno != 0) {
3972 		if (ctx->bserrno != 0) {
3973 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
3974 		} else {
3975 			ctx->bserrno = bserrno;
3976 		}
3977 	}
3978 
3979 	if (ctx->frozen) {
3980 		/* Unfreeze any outstanding I/O */
3981 		_spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx);
3982 	} else {
3983 		_spdk_bs_snapshot_unfreeze_cpl(ctx, 0);
3984 	}
3985 
3986 }
3987 
3988 static void
3989 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno)
3990 {
3991 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3992 	struct spdk_blob *newblob = ctx->new.blob;
3993 
3994 	if (bserrno != 0) {
3995 		if (ctx->bserrno != 0) {
3996 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
3997 		} else {
3998 			ctx->bserrno = bserrno;
3999 		}
4000 	}
4001 
4002 	ctx->new.id = newblob->id;
4003 	spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
4004 }
4005 
4006 /* END blob_cleanup */
4007 
4008 /* START spdk_bs_create_snapshot */
4009 
4010 static void
4011 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
4012 {
4013 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4014 	struct spdk_blob *newblob = ctx->new.blob;
4015 
4016 	if (bserrno != 0) {
4017 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
4018 		return;
4019 	}
4020 
4021 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
4022 	bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
4023 	if (bserrno != 0) {
4024 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
4025 		return;
4026 	}
4027 
4028 	_spdk_bs_blob_list_add(ctx->original.blob);
4029 
4030 	spdk_blob_set_read_only(newblob);
4031 
4032 	/* sync snapshot metadata */
4033 	spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, cb_arg);
4034 }
4035 
4036 static void
4037 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
4038 {
4039 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4040 	struct spdk_blob *origblob = ctx->original.blob;
4041 	struct spdk_blob *newblob = ctx->new.blob;
4042 
4043 	if (bserrno != 0) {
4044 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
4045 		return;
4046 	}
4047 
4048 	/* Set internal xattr for snapshot id */
4049 	bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
4050 	if (bserrno != 0) {
4051 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
4052 		return;
4053 	}
4054 	origblob->parent_id = newblob->id;
4055 
4056 	/* Create new back_bs_dev for snapshot */
4057 	origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob);
4058 	if (origblob->back_bs_dev == NULL) {
4059 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
4060 		return;
4061 	}
4062 
4063 	/* set clone blob as thin provisioned */
4064 	_spdk_blob_set_thin_provision(origblob);
4065 
4066 	_spdk_bs_blob_list_add(newblob);
4067 
4068 	/* Zero out origblob cluster map */
4069 	memset(origblob->active.clusters, 0,
4070 	       origblob->active.num_clusters * sizeof(origblob->active.clusters));
4071 
4072 	/* sync clone metadata */
4073 	spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx);
4074 }
4075 
4076 static void
4077 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc)
4078 {
4079 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4080 	struct spdk_blob *origblob = ctx->original.blob;
4081 	struct spdk_blob *newblob = ctx->new.blob;
4082 
4083 	ctx->frozen = true;
4084 
4085 	/* set new back_bs_dev for snapshot */
4086 	newblob->back_bs_dev = origblob->back_bs_dev;
4087 	/* Set invalid flags from origblob */
4088 	newblob->invalid_flags = origblob->invalid_flags;
4089 
4090 	/* Copy cluster map to snapshot */
4091 	memcpy(newblob->active.clusters, origblob->active.clusters,
4092 	       origblob->active.num_clusters * sizeof(origblob->active.clusters));
4093 
4094 	/* sync snapshot metadata */
4095 	spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx);
4096 }
4097 
4098 static void
4099 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4100 {
4101 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4102 	struct spdk_blob *origblob = ctx->original.blob;
4103 	struct spdk_blob *newblob = _blob;
4104 
4105 	if (bserrno != 0) {
4106 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
4107 		return;
4108 	}
4109 
4110 	ctx->new.blob = newblob;
4111 
4112 	_spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx);
4113 }
4114 
4115 static void
4116 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
4117 {
4118 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4119 	struct spdk_blob *origblob = ctx->original.blob;
4120 
4121 	if (bserrno != 0) {
4122 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
4123 		return;
4124 	}
4125 
4126 	ctx->new.id = blobid;
4127 	ctx->cpl.u.blobid.blobid = blobid;
4128 
4129 	spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx);
4130 }
4131 
4132 
4133 static void
4134 _spdk_bs_xattr_snapshot(void *arg, const char *name,
4135 			const void **value, size_t *value_len)
4136 {
4137 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
4138 
4139 	struct spdk_blob *blob = (struct spdk_blob *)arg;
4140 	*value = &blob->id;
4141 	*value_len = sizeof(blob->id);
4142 }
4143 
4144 static void
4145 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4146 {
4147 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4148 	struct spdk_blob_opts opts;
4149 	struct spdk_blob_xattr_opts internal_xattrs;
4150 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
4151 
4152 	if (bserrno != 0) {
4153 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
4154 		return;
4155 	}
4156 
4157 	ctx->original.blob = _blob;
4158 
4159 	if (_blob->data_ro || _blob->md_ro) {
4160 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n",
4161 			      _blob->id);
4162 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
4163 		return;
4164 	}
4165 
4166 	spdk_blob_opts_init(&opts);
4167 	_spdk_blob_xattrs_init(&internal_xattrs);
4168 
4169 	/* Change the size of new blob to the same as in original blob,
4170 	 * but do not allocate clusters */
4171 	opts.thin_provision = true;
4172 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
4173 
4174 	/* If there are any xattrs specified for snapshot, set them now */
4175 	if (ctx->xattrs) {
4176 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
4177 	}
4178 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
4179 	internal_xattrs.count = 1;
4180 	internal_xattrs.ctx = _blob;
4181 	internal_xattrs.names = xattrs_names;
4182 	internal_xattrs.get_value = _spdk_bs_xattr_snapshot;
4183 
4184 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
4185 			     _spdk_bs_snapshot_newblob_create_cpl, ctx);
4186 }
4187 
4188 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
4189 			     const struct spdk_blob_xattr_opts *snapshot_xattrs,
4190 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
4191 {
4192 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
4193 
4194 	if (!ctx) {
4195 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
4196 		return;
4197 	}
4198 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
4199 	ctx->cpl.u.blobid.cb_fn = cb_fn;
4200 	ctx->cpl.u.blobid.cb_arg = cb_arg;
4201 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
4202 	ctx->bserrno = 0;
4203 	ctx->frozen = false;
4204 	ctx->original.id = blobid;
4205 	ctx->xattrs = snapshot_xattrs;
4206 
4207 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx);
4208 }
4209 /* END spdk_bs_create_snapshot */
4210 
4211 /* START spdk_bs_create_clone */
4212 
4213 static void
4214 _spdk_bs_xattr_clone(void *arg, const char *name,
4215 		     const void **value, size_t *value_len)
4216 {
4217 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
4218 
4219 	struct spdk_blob *blob = (struct spdk_blob *)arg;
4220 	*value = &blob->id;
4221 	*value_len = sizeof(blob->id);
4222 }
4223 
4224 static void
4225 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4226 {
4227 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4228 	struct spdk_blob *clone = _blob;
4229 
4230 	ctx->new.blob = clone;
4231 	_spdk_bs_blob_list_add(clone);
4232 
4233 	spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
4234 }
4235 
4236 static void
4237 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
4238 {
4239 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4240 
4241 	ctx->cpl.u.blobid.blobid = blobid;
4242 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx);
4243 }
4244 
4245 static void
4246 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4247 {
4248 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4249 	struct spdk_blob_opts		opts;
4250 	struct spdk_blob_xattr_opts internal_xattrs;
4251 	char *xattr_names[] = { BLOB_SNAPSHOT };
4252 
4253 	if (bserrno != 0) {
4254 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
4255 		return;
4256 	}
4257 
4258 	ctx->original.blob = _blob;
4259 
4260 	if (!_blob->data_ro || !_blob->md_ro) {
4261 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n");
4262 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
4263 		return;
4264 	}
4265 
4266 	spdk_blob_opts_init(&opts);
4267 	_spdk_blob_xattrs_init(&internal_xattrs);
4268 
4269 	opts.thin_provision = true;
4270 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
4271 	if (ctx->xattrs) {
4272 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
4273 	}
4274 
4275 	/* Set internal xattr BLOB_SNAPSHOT */
4276 	internal_xattrs.count = 1;
4277 	internal_xattrs.ctx = _blob;
4278 	internal_xattrs.names = xattr_names;
4279 	internal_xattrs.get_value = _spdk_bs_xattr_clone;
4280 
4281 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
4282 			     _spdk_bs_clone_newblob_create_cpl, ctx);
4283 }
4284 
4285 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
4286 			  const struct spdk_blob_xattr_opts *clone_xattrs,
4287 			  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
4288 {
4289 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
4290 
4291 	if (!ctx) {
4292 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
4293 		return;
4294 	}
4295 
4296 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
4297 	ctx->cpl.u.blobid.cb_fn = cb_fn;
4298 	ctx->cpl.u.blobid.cb_arg = cb_arg;
4299 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
4300 	ctx->bserrno = 0;
4301 	ctx->xattrs = clone_xattrs;
4302 	ctx->original.id = blobid;
4303 
4304 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx);
4305 }
4306 
4307 /* END spdk_bs_create_clone */
4308 
4309 /* START spdk_bs_inflate_blob */
4310 
4311 static void
4312 _spdk_bs_inflate_blob_sync(void *cb_arg, int bserrno)
4313 {
4314 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4315 	struct spdk_blob *_blob = ctx->original.blob;
4316 
4317 	if (bserrno != 0) {
4318 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
4319 		return;
4320 	}
4321 
4322 	/* Destroy back_bs_dev */
4323 	_blob->back_bs_dev->destroy(_blob->back_bs_dev);
4324 	_blob->back_bs_dev = NULL;
4325 
4326 	_spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0);
4327 }
4328 
4329 static void
4330 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno)
4331 {
4332 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4333 	struct spdk_blob *_blob = ctx->original.blob;
4334 
4335 	if (bserrno != 0) {
4336 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
4337 		return;
4338 	}
4339 
4340 	_spdk_bs_blob_list_remove(_blob);
4341 
4342 	_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
4343 
4344 	/* Unset thin provision */
4345 	_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
4346 	_blob->state = SPDK_BLOB_STATE_DIRTY;
4347 
4348 	spdk_blob_sync_md(_blob, _spdk_bs_inflate_blob_sync, ctx);
4349 }
4350 
4351 static void
4352 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
4353 {
4354 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4355 	struct spdk_blob *_blob = ctx->original.blob;
4356 	uint64_t offset;
4357 
4358 	if (bserrno != 0) {
4359 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
4360 		return;
4361 	}
4362 
4363 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
4364 		if (_blob->active.clusters[ctx->cluster] == 0) {
4365 			break;
4366 		}
4367 	}
4368 
4369 	if (ctx->cluster < _blob->active.num_clusters) {
4370 		offset = _spdk_bs_cluster_to_page(_blob->bs, ctx->cluster);
4371 
4372 		/* We may safely increment a cluster before write */
4373 		ctx->cluster++;
4374 
4375 		/* Use zero length write to touch a cluster */
4376 		spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0,
4377 				   _spdk_bs_inflate_blob_touch_next, ctx);
4378 	} else {
4379 		_spdk_bs_inflate_blob_done(cb_arg, bserrno);
4380 	}
4381 }
4382 
4383 static void
4384 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4385 {
4386 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4387 	uint64_t lfc; /* lowest free cluster */
4388 	uint64_t i;
4389 
4390 	if (bserrno != 0) {
4391 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
4392 		return;
4393 	}
4394 	ctx->original.blob = _blob;
4395 
4396 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
4397 		/* This is not thin provisioned blob. No need to inflate. */
4398 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0);
4399 		return;
4400 	}
4401 
4402 	/* Do two passes - one to verify that we can obtain enough clusters
4403 	 * and another to actually claim them.
4404 	 */
4405 	lfc = 0;
4406 	for (i = 0; i < _blob->active.num_clusters; i++) {
4407 		if (_blob->active.clusters[i] == 0) {
4408 			lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc);
4409 			if (lfc >= _blob->bs->total_clusters) {
4410 				/* No more free clusters. Cannot satisfy the request */
4411 				_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
4412 				return;
4413 			}
4414 			lfc++;
4415 		}
4416 	}
4417 
4418 	ctx->cluster = 0;
4419 	_spdk_bs_inflate_blob_touch_next(ctx, 0);
4420 }
4421 
4422 void spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
4423 			  spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
4424 {
4425 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
4426 
4427 	if (!ctx) {
4428 		cb_fn(cb_arg, -ENOMEM);
4429 		return;
4430 	}
4431 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4432 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
4433 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
4434 	ctx->bserrno = 0;
4435 	ctx->original.id = blobid;
4436 	ctx->channel = channel;
4437 
4438 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx);
4439 }
4440 
4441 /* END spdk_bs_inflate_blob */
4442 
4443 /* START spdk_blob_resize */
4444 struct spdk_bs_resize_ctx {
4445 	spdk_blob_op_complete cb_fn;
4446 	void *cb_arg;
4447 	struct spdk_blob *blob;
4448 	uint64_t sz;
4449 	int rc;
4450 };
4451 
4452 static void
4453 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc)
4454 {
4455 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
4456 
4457 	if (rc != 0) {
4458 		SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc);
4459 	}
4460 
4461 	if (ctx->rc != 0) {
4462 		SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc);
4463 		rc = ctx->rc;
4464 	}
4465 
4466 	ctx->blob->resize_in_progress = false;
4467 
4468 	ctx->cb_fn(ctx->cb_arg, rc);
4469 	free(ctx);
4470 }
4471 
4472 static void
4473 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc)
4474 {
4475 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
4476 
4477 	if (rc != 0) {
4478 		ctx->blob->resize_in_progress = false;
4479 		ctx->cb_fn(ctx->cb_arg, rc);
4480 		free(ctx);
4481 		return;
4482 	}
4483 
4484 	ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz);
4485 
4486 	_spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx);
4487 }
4488 
4489 void
4490 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
4491 {
4492 	struct spdk_bs_resize_ctx *ctx;
4493 
4494 	_spdk_blob_verify_md_op(blob);
4495 
4496 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz);
4497 
4498 	if (blob->md_ro) {
4499 		cb_fn(cb_arg, -EPERM);
4500 		return;
4501 	}
4502 
4503 	if (sz == blob->active.num_clusters) {
4504 		cb_fn(cb_arg, 0);
4505 		return;
4506 	}
4507 
4508 	if (blob->resize_in_progress) {
4509 		cb_fn(cb_arg, -EBUSY);
4510 		return;
4511 	}
4512 
4513 	ctx = calloc(1, sizeof(*ctx));
4514 	if (!ctx) {
4515 		cb_fn(cb_arg, -ENOMEM);
4516 		return;
4517 	}
4518 
4519 	blob->resize_in_progress = true;
4520 	ctx->cb_fn = cb_fn;
4521 	ctx->cb_arg = cb_arg;
4522 	ctx->blob = blob;
4523 	ctx->sz = sz;
4524 	_spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx);
4525 }
4526 
4527 /* END spdk_blob_resize */
4528 
4529 
4530 /* START spdk_bs_delete_blob */
4531 
4532 static void
4533 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
4534 {
4535 	spdk_bs_sequence_t *seq = cb_arg;
4536 
4537 	spdk_bs_sequence_finish(seq, bserrno);
4538 }
4539 
4540 static void
4541 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4542 {
4543 	struct spdk_blob *blob = cb_arg;
4544 
4545 	if (bserrno != 0) {
4546 		/*
4547 		 * We already removed this blob from the blobstore tailq, so
4548 		 *  we need to free it here since this is the last reference
4549 		 *  to it.
4550 		 */
4551 		_spdk_blob_free(blob);
4552 		_spdk_bs_delete_close_cpl(seq, bserrno);
4553 		return;
4554 	}
4555 
4556 	/*
4557 	 * This will immediately decrement the ref_count and call
4558 	 *  the completion routine since the metadata state is clean.
4559 	 *  By calling spdk_blob_close, we reduce the number of call
4560 	 *  points into code that touches the blob->open_ref count
4561 	 *  and the blobstore's blob list.
4562 	 */
4563 	spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq);
4564 }
4565 
4566 static void
4567 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
4568 {
4569 	spdk_bs_sequence_t *seq = cb_arg;
4570 	uint32_t page_num;
4571 
4572 	if (bserrno != 0) {
4573 		spdk_bs_sequence_finish(seq, bserrno);
4574 		return;
4575 	}
4576 
4577 	_spdk_blob_verify_md_op(blob);
4578 
4579 	if (blob->open_ref > 1) {
4580 		/*
4581 		 * Someone has this blob open (besides this delete context).
4582 		 *  Decrement the ref count directly and return -EBUSY.
4583 		 */
4584 		blob->open_ref--;
4585 		spdk_bs_sequence_finish(seq, -EBUSY);
4586 		return;
4587 	}
4588 
4589 	bserrno = _spdk_bs_blob_list_remove(blob);
4590 	if (bserrno != 0) {
4591 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Remove blob #%" PRIu64 " from a list\n", blob->id);
4592 		spdk_bs_sequence_finish(seq, bserrno);
4593 		return;
4594 	}
4595 
4596 	/*
4597 	 * Remove the blob from the blob_store list now, to ensure it does not
4598 	 *  get returned after this point by _spdk_blob_lookup().
4599 	 */
4600 	TAILQ_REMOVE(&blob->bs->blobs, blob, link);
4601 	page_num = _spdk_bs_blobid_to_page(blob->id);
4602 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
4603 	blob->state = SPDK_BLOB_STATE_DIRTY;
4604 	blob->active.num_pages = 0;
4605 	_spdk_blob_resize(blob, 0);
4606 
4607 	_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob);
4608 }
4609 
4610 void
4611 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
4612 		    spdk_blob_op_complete cb_fn, void *cb_arg)
4613 {
4614 	struct spdk_bs_cpl	cpl;
4615 	spdk_bs_sequence_t	*seq;
4616 	struct spdk_blob_list	*snapshot_entry = NULL;
4617 
4618 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid);
4619 
4620 	assert(spdk_get_thread() == bs->md_thread);
4621 
4622 	/* Check if this is a snapshot with clones */
4623 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
4624 		if (snapshot_entry->id == blobid) {
4625 			break;
4626 		}
4627 	}
4628 	if (snapshot_entry != NULL) {
4629 		/* If snapshot have clones, we cannot remove it */
4630 		if (!TAILQ_EMPTY(&snapshot_entry->clones)) {
4631 			SPDK_ERRLOG("Cannot remove snapshot with clones\n");
4632 			cb_fn(cb_arg, -EBUSY);
4633 			return;
4634 		}
4635 	}
4636 
4637 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4638 	cpl.u.blob_basic.cb_fn = cb_fn;
4639 	cpl.u.blob_basic.cb_arg = cb_arg;
4640 
4641 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4642 	if (!seq) {
4643 		cb_fn(cb_arg, -ENOMEM);
4644 		return;
4645 	}
4646 
4647 	spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq);
4648 }
4649 
4650 /* END spdk_bs_delete_blob */
4651 
4652 /* START spdk_bs_open_blob */
4653 
4654 static void
4655 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4656 {
4657 	struct spdk_blob *blob = cb_arg;
4658 
4659 	/* If the blob have crc error, we just return NULL. */
4660 	if (blob == NULL) {
4661 		seq->cpl.u.blob_handle.blob = NULL;
4662 		spdk_bs_sequence_finish(seq, bserrno);
4663 		return;
4664 	}
4665 
4666 	blob->open_ref++;
4667 
4668 	TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link);
4669 
4670 	spdk_bs_sequence_finish(seq, bserrno);
4671 }
4672 
4673 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
4674 		       spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
4675 {
4676 	struct spdk_blob		*blob;
4677 	struct spdk_bs_cpl		cpl;
4678 	spdk_bs_sequence_t		*seq;
4679 	uint32_t			page_num;
4680 
4681 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid);
4682 	assert(spdk_get_thread() == bs->md_thread);
4683 
4684 	page_num = _spdk_bs_blobid_to_page(blobid);
4685 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
4686 		/* Invalid blobid */
4687 		cb_fn(cb_arg, NULL, -ENOENT);
4688 		return;
4689 	}
4690 
4691 	blob = _spdk_blob_lookup(bs, blobid);
4692 	if (blob) {
4693 		blob->open_ref++;
4694 		cb_fn(cb_arg, blob, 0);
4695 		return;
4696 	}
4697 
4698 	blob = _spdk_blob_alloc(bs, blobid);
4699 	if (!blob) {
4700 		cb_fn(cb_arg, NULL, -ENOMEM);
4701 		return;
4702 	}
4703 
4704 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
4705 	cpl.u.blob_handle.cb_fn = cb_fn;
4706 	cpl.u.blob_handle.cb_arg = cb_arg;
4707 	cpl.u.blob_handle.blob = blob;
4708 
4709 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4710 	if (!seq) {
4711 		_spdk_blob_free(blob);
4712 		cb_fn(cb_arg, NULL, -ENOMEM);
4713 		return;
4714 	}
4715 
4716 	_spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob);
4717 }
4718 /* END spdk_bs_open_blob */
4719 
4720 /* START spdk_blob_set_read_only */
4721 int spdk_blob_set_read_only(struct spdk_blob *blob)
4722 {
4723 	_spdk_blob_verify_md_op(blob);
4724 
4725 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
4726 
4727 	blob->state = SPDK_BLOB_STATE_DIRTY;
4728 	return 0;
4729 }
4730 /* END spdk_blob_set_read_only */
4731 
4732 /* START spdk_blob_sync_md */
4733 
4734 static void
4735 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4736 {
4737 	struct spdk_blob *blob = cb_arg;
4738 
4739 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
4740 		blob->data_ro = true;
4741 		blob->md_ro = true;
4742 	}
4743 
4744 	spdk_bs_sequence_finish(seq, bserrno);
4745 }
4746 
4747 static void
4748 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
4749 {
4750 	struct spdk_bs_cpl	cpl;
4751 	spdk_bs_sequence_t	*seq;
4752 
4753 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4754 	cpl.u.blob_basic.cb_fn = cb_fn;
4755 	cpl.u.blob_basic.cb_arg = cb_arg;
4756 
4757 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
4758 	if (!seq) {
4759 		cb_fn(cb_arg, -ENOMEM);
4760 		return;
4761 	}
4762 
4763 	_spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob);
4764 }
4765 
4766 void
4767 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
4768 {
4769 	_spdk_blob_verify_md_op(blob);
4770 
4771 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id);
4772 
4773 	if (blob->md_ro) {
4774 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
4775 		cb_fn(cb_arg, 0);
4776 		return;
4777 	}
4778 
4779 	_spdk_blob_sync_md(blob, cb_fn, cb_arg);
4780 }
4781 
4782 /* END spdk_blob_sync_md */
4783 
4784 struct spdk_blob_insert_cluster_ctx {
4785 	struct spdk_thread	*thread;
4786 	struct spdk_blob	*blob;
4787 	uint32_t		cluster_num;	/* cluster index in blob */
4788 	uint32_t		cluster;	/* cluster on disk */
4789 	int			rc;
4790 	spdk_blob_op_complete	cb_fn;
4791 	void			*cb_arg;
4792 };
4793 
4794 static void
4795 _spdk_blob_insert_cluster_msg_cpl(void *arg)
4796 {
4797 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
4798 
4799 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
4800 	free(ctx);
4801 }
4802 
4803 static void
4804 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno)
4805 {
4806 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
4807 
4808 	ctx->rc = bserrno;
4809 	spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
4810 }
4811 
4812 static void
4813 _spdk_blob_insert_cluster_msg(void *arg)
4814 {
4815 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
4816 
4817 	ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
4818 	if (ctx->rc != 0) {
4819 		spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
4820 		return;
4821 	}
4822 
4823 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
4824 	_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
4825 }
4826 
4827 static void
4828 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
4829 				       uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg)
4830 {
4831 	struct spdk_blob_insert_cluster_ctx *ctx;
4832 
4833 	ctx = calloc(1, sizeof(*ctx));
4834 	if (ctx == NULL) {
4835 		cb_fn(cb_arg, -ENOMEM);
4836 		return;
4837 	}
4838 
4839 	ctx->thread = spdk_get_thread();
4840 	ctx->blob = blob;
4841 	ctx->cluster_num = cluster_num;
4842 	ctx->cluster = cluster;
4843 	ctx->cb_fn = cb_fn;
4844 	ctx->cb_arg = cb_arg;
4845 
4846 	spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx);
4847 }
4848 
4849 /* START spdk_blob_close */
4850 
4851 static void
4852 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4853 {
4854 	struct spdk_blob *blob = cb_arg;
4855 
4856 	if (bserrno == 0) {
4857 		blob->open_ref--;
4858 		if (blob->open_ref == 0) {
4859 			/*
4860 			 * Blobs with active.num_pages == 0 are deleted blobs.
4861 			 *  these blobs are removed from the blob_store list
4862 			 *  when the deletion process starts - so don't try to
4863 			 *  remove them again.
4864 			 */
4865 			if (blob->active.num_pages > 0) {
4866 				TAILQ_REMOVE(&blob->bs->blobs, blob, link);
4867 			}
4868 			_spdk_blob_free(blob);
4869 		}
4870 	}
4871 
4872 	spdk_bs_sequence_finish(seq, bserrno);
4873 }
4874 
4875 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
4876 {
4877 	struct spdk_bs_cpl	cpl;
4878 	spdk_bs_sequence_t	*seq;
4879 
4880 	_spdk_blob_verify_md_op(blob);
4881 
4882 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id);
4883 
4884 	if (blob->open_ref == 0) {
4885 		cb_fn(cb_arg, -EBADF);
4886 		return;
4887 	}
4888 
4889 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4890 	cpl.u.blob_basic.cb_fn = cb_fn;
4891 	cpl.u.blob_basic.cb_arg = cb_arg;
4892 
4893 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
4894 	if (!seq) {
4895 		cb_fn(cb_arg, -ENOMEM);
4896 		return;
4897 	}
4898 
4899 	/* Sync metadata */
4900 	_spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob);
4901 }
4902 
4903 /* END spdk_blob_close */
4904 
4905 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
4906 {
4907 	return spdk_get_io_channel(bs);
4908 }
4909 
4910 void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
4911 {
4912 	spdk_put_io_channel(channel);
4913 }
4914 
4915 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
4916 			uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
4917 {
4918 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
4919 				     SPDK_BLOB_UNMAP);
4920 }
4921 
4922 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
4923 			       uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
4924 {
4925 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
4926 				     SPDK_BLOB_WRITE_ZEROES);
4927 }
4928 
4929 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
4930 			void *payload, uint64_t offset, uint64_t length,
4931 			spdk_blob_op_complete cb_fn, void *cb_arg)
4932 {
4933 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
4934 				     SPDK_BLOB_WRITE);
4935 }
4936 
4937 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
4938 		       void *payload, uint64_t offset, uint64_t length,
4939 		       spdk_blob_op_complete cb_fn, void *cb_arg)
4940 {
4941 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
4942 				     SPDK_BLOB_READ);
4943 }
4944 
4945 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
4946 			 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
4947 			 spdk_blob_op_complete cb_fn, void *cb_arg)
4948 {
4949 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
4950 }
4951 
4952 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
4953 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
4954 			spdk_blob_op_complete cb_fn, void *cb_arg)
4955 {
4956 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
4957 }
4958 
4959 struct spdk_bs_iter_ctx {
4960 	int64_t page_num;
4961 	struct spdk_blob_store *bs;
4962 
4963 	spdk_blob_op_with_handle_complete cb_fn;
4964 	void *cb_arg;
4965 };
4966 
4967 static void
4968 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4969 {
4970 	struct spdk_bs_iter_ctx *ctx = cb_arg;
4971 	struct spdk_blob_store *bs = ctx->bs;
4972 	spdk_blob_id id;
4973 
4974 	if (bserrno == 0) {
4975 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
4976 		free(ctx);
4977 		return;
4978 	}
4979 
4980 	ctx->page_num++;
4981 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
4982 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
4983 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
4984 		free(ctx);
4985 		return;
4986 	}
4987 
4988 	id = _spdk_bs_page_to_blobid(ctx->page_num);
4989 
4990 	spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx);
4991 }
4992 
4993 void
4994 spdk_bs_iter_first(struct spdk_blob_store *bs,
4995 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
4996 {
4997 	struct spdk_bs_iter_ctx *ctx;
4998 
4999 	ctx = calloc(1, sizeof(*ctx));
5000 	if (!ctx) {
5001 		cb_fn(cb_arg, NULL, -ENOMEM);
5002 		return;
5003 	}
5004 
5005 	ctx->page_num = -1;
5006 	ctx->bs = bs;
5007 	ctx->cb_fn = cb_fn;
5008 	ctx->cb_arg = cb_arg;
5009 
5010 	_spdk_bs_iter_cpl(ctx, NULL, -1);
5011 }
5012 
5013 static void
5014 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno)
5015 {
5016 	struct spdk_bs_iter_ctx *ctx = cb_arg;
5017 
5018 	_spdk_bs_iter_cpl(ctx, NULL, -1);
5019 }
5020 
5021 void
5022 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
5023 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
5024 {
5025 	struct spdk_bs_iter_ctx *ctx;
5026 
5027 	assert(blob != NULL);
5028 
5029 	ctx = calloc(1, sizeof(*ctx));
5030 	if (!ctx) {
5031 		cb_fn(cb_arg, NULL, -ENOMEM);
5032 		return;
5033 	}
5034 
5035 	ctx->page_num = _spdk_bs_blobid_to_page(blob->id);
5036 	ctx->bs = bs;
5037 	ctx->cb_fn = cb_fn;
5038 	ctx->cb_arg = cb_arg;
5039 
5040 	/* Close the existing blob */
5041 	spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx);
5042 }
5043 
5044 static int
5045 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
5046 		     uint16_t value_len, bool internal)
5047 {
5048 	struct spdk_xattr_tailq *xattrs;
5049 	struct spdk_xattr	*xattr;
5050 
5051 	_spdk_blob_verify_md_op(blob);
5052 
5053 	if (blob->md_ro) {
5054 		return -EPERM;
5055 	}
5056 
5057 	if (internal) {
5058 		xattrs = &blob->xattrs_internal;
5059 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
5060 	} else {
5061 		xattrs = &blob->xattrs;
5062 	}
5063 
5064 	TAILQ_FOREACH(xattr, xattrs, link) {
5065 		if (!strcmp(name, xattr->name)) {
5066 			free(xattr->value);
5067 			xattr->value_len = value_len;
5068 			xattr->value = malloc(value_len);
5069 			memcpy(xattr->value, value, value_len);
5070 
5071 			blob->state = SPDK_BLOB_STATE_DIRTY;
5072 
5073 			return 0;
5074 		}
5075 	}
5076 
5077 	xattr = calloc(1, sizeof(*xattr));
5078 	if (!xattr) {
5079 		return -ENOMEM;
5080 	}
5081 	xattr->name = strdup(name);
5082 	xattr->value_len = value_len;
5083 	xattr->value = malloc(value_len);
5084 	memcpy(xattr->value, value, value_len);
5085 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
5086 
5087 	blob->state = SPDK_BLOB_STATE_DIRTY;
5088 
5089 	return 0;
5090 }
5091 
5092 int
5093 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
5094 		    uint16_t value_len)
5095 {
5096 	return _spdk_blob_set_xattr(blob, name, value, value_len, false);
5097 }
5098 
5099 static int
5100 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
5101 {
5102 	struct spdk_xattr_tailq *xattrs;
5103 	struct spdk_xattr	*xattr;
5104 
5105 	_spdk_blob_verify_md_op(blob);
5106 
5107 	if (blob->md_ro) {
5108 		return -EPERM;
5109 	}
5110 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
5111 
5112 	TAILQ_FOREACH(xattr, xattrs, link) {
5113 		if (!strcmp(name, xattr->name)) {
5114 			TAILQ_REMOVE(xattrs, xattr, link);
5115 			free(xattr->value);
5116 			free(xattr->name);
5117 			free(xattr);
5118 
5119 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
5120 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
5121 			}
5122 			blob->state = SPDK_BLOB_STATE_DIRTY;
5123 
5124 			return 0;
5125 		}
5126 	}
5127 
5128 	return -ENOENT;
5129 }
5130 
5131 int
5132 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
5133 {
5134 	return _spdk_blob_remove_xattr(blob, name, false);
5135 }
5136 
5137 static int
5138 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
5139 			   const void **value, size_t *value_len, bool internal)
5140 {
5141 	struct spdk_xattr	*xattr;
5142 	struct spdk_xattr_tailq *xattrs;
5143 
5144 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
5145 
5146 	TAILQ_FOREACH(xattr, xattrs, link) {
5147 		if (!strcmp(name, xattr->name)) {
5148 			*value = xattr->value;
5149 			*value_len = xattr->value_len;
5150 			return 0;
5151 		}
5152 	}
5153 	return -ENOENT;
5154 }
5155 
5156 int
5157 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
5158 			  const void **value, size_t *value_len)
5159 {
5160 	_spdk_blob_verify_md_op(blob);
5161 
5162 	return _spdk_blob_get_xattr_value(blob, name, value, value_len, false);
5163 }
5164 
5165 struct spdk_xattr_names {
5166 	uint32_t	count;
5167 	const char	*names[0];
5168 };
5169 
5170 static int
5171 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
5172 {
5173 	struct spdk_xattr	*xattr;
5174 	int			count = 0;
5175 
5176 	TAILQ_FOREACH(xattr, xattrs, link) {
5177 		count++;
5178 	}
5179 
5180 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
5181 	if (*names == NULL) {
5182 		return -ENOMEM;
5183 	}
5184 
5185 	TAILQ_FOREACH(xattr, xattrs, link) {
5186 		(*names)->names[(*names)->count++] = xattr->name;
5187 	}
5188 
5189 	return 0;
5190 }
5191 
5192 int
5193 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
5194 {
5195 	_spdk_blob_verify_md_op(blob);
5196 
5197 	return _spdk_blob_get_xattr_names(&blob->xattrs, names);
5198 }
5199 
5200 uint32_t
5201 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
5202 {
5203 	assert(names != NULL);
5204 
5205 	return names->count;
5206 }
5207 
5208 const char *
5209 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
5210 {
5211 	if (index >= names->count) {
5212 		return NULL;
5213 	}
5214 
5215 	return names->names[index];
5216 }
5217 
5218 void
5219 spdk_xattr_names_free(struct spdk_xattr_names *names)
5220 {
5221 	free(names);
5222 }
5223 
5224 struct spdk_bs_type
5225 spdk_bs_get_bstype(struct spdk_blob_store *bs)
5226 {
5227 	return bs->bstype;
5228 }
5229 
5230 void
5231 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
5232 {
5233 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
5234 }
5235 
5236 bool
5237 spdk_blob_is_read_only(struct spdk_blob *blob)
5238 {
5239 	assert(blob != NULL);
5240 	return (blob->data_ro || blob->md_ro);
5241 }
5242 
5243 bool
5244 spdk_blob_is_snapshot(struct spdk_blob *blob)
5245 {
5246 	struct spdk_blob_list *snapshot_entry;
5247 
5248 	assert(blob != NULL);
5249 
5250 	TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) {
5251 		if (snapshot_entry->id == blob->id) {
5252 			break;
5253 		}
5254 	}
5255 
5256 	if (snapshot_entry == NULL) {
5257 		return false;
5258 	}
5259 
5260 	return true;
5261 }
5262 
5263 bool
5264 spdk_blob_is_clone(struct spdk_blob *blob)
5265 {
5266 	assert(blob != NULL);
5267 
5268 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
5269 		assert(spdk_blob_is_thin_provisioned(blob));
5270 		return true;
5271 	}
5272 
5273 	return false;
5274 }
5275 
5276 bool
5277 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
5278 {
5279 	assert(blob != NULL);
5280 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
5281 }
5282 
5283 spdk_blob_id
5284 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
5285 {
5286 	struct spdk_blob_list *snapshot_entry = NULL;
5287 	struct spdk_blob_list *clone_entry = NULL;
5288 
5289 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
5290 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
5291 			if (clone_entry->id == blob_id) {
5292 				return snapshot_entry->id;
5293 			}
5294 		}
5295 	}
5296 
5297 	return SPDK_BLOBID_INVALID;
5298 }
5299 
5300 int
5301 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
5302 		     size_t *count)
5303 {
5304 	struct spdk_blob_list *snapshot_entry, *clone_entry;
5305 	size_t n;
5306 
5307 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
5308 		if (snapshot_entry->id == blobid) {
5309 			break;
5310 		}
5311 	}
5312 	if (snapshot_entry == NULL) {
5313 		*count = 0;
5314 		return 0;
5315 	}
5316 
5317 	if (ids == NULL || *count < snapshot_entry->clone_count) {
5318 		*count = snapshot_entry->clone_count;
5319 		return -ENOMEM;
5320 	}
5321 	*count = snapshot_entry->clone_count;
5322 
5323 	n = 0;
5324 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
5325 		ids[n++] = clone_entry->id;
5326 	}
5327 
5328 	return 0;
5329 }
5330 
5331 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB)
5332