xref: /spdk/lib/blob/blobstore.c (revision fa68a0fdbd11c085a2975699c65a5b7237ccbadb)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/blob.h"
37 #include "spdk/crc32.h"
38 #include "spdk/env.h"
39 #include "spdk/queue.h"
40 #include "spdk/io_channel.h"
41 #include "spdk/bit_array.h"
42 #include "spdk/likely.h"
43 
44 #include "spdk_internal/assert.h"
45 #include "spdk_internal/log.h"
46 
47 #include "blobstore.h"
48 
49 #define BLOB_CRC32C_INITIAL    0xffffffffUL
50 
51 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
52 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
53 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
54 void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
55 		uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg);
56 
57 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
58 				uint16_t value_len, bool internal);
59 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
60 				      const void **value, size_t *value_len, bool internal);
61 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
62 
63 static void
64 _spdk_blob_verify_md_op(struct spdk_blob *blob)
65 {
66 	assert(blob != NULL);
67 	assert(spdk_get_thread() == blob->bs->md_thread);
68 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
69 }
70 
71 static inline size_t
72 divide_round_up(size_t num, size_t divisor)
73 {
74 	return (num + divisor - 1) / divisor;
75 }
76 
77 static void
78 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
79 {
80 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
81 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false);
82 	assert(bs->num_free_clusters > 0);
83 
84 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num);
85 
86 	spdk_bit_array_set(bs->used_clusters, cluster_num);
87 	bs->num_free_clusters--;
88 }
89 
90 static int
91 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
92 {
93 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
94 
95 	_spdk_blob_verify_md_op(blob);
96 
97 	if (*cluster_lba != 0) {
98 		return -EEXIST;
99 	}
100 
101 	*cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster);
102 	return 0;
103 }
104 
105 static int
106 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
107 			  uint64_t *lowest_free_cluster, bool update_map)
108 {
109 	pthread_mutex_lock(&blob->bs->used_clusters_mutex);
110 	*lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters,
111 			       *lowest_free_cluster);
112 	if (*lowest_free_cluster >= blob->bs->total_clusters) {
113 		/* No more free clusters. Cannot satisfy the request */
114 		pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
115 		return -ENOSPC;
116 	}
117 
118 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id);
119 	_spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster);
120 	pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
121 
122 	if (update_map) {
123 		_spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster);
124 	}
125 
126 	return 0;
127 }
128 
129 static void
130 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
131 {
132 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
133 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true);
134 	assert(bs->num_free_clusters < bs->total_clusters);
135 
136 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num);
137 
138 	pthread_mutex_lock(&bs->used_clusters_mutex);
139 	spdk_bit_array_clear(bs->used_clusters, cluster_num);
140 	bs->num_free_clusters++;
141 	pthread_mutex_unlock(&bs->used_clusters_mutex);
142 }
143 
144 static void
145 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
146 {
147 	xattrs->count = 0;
148 	xattrs->names = NULL;
149 	xattrs->ctx = NULL;
150 	xattrs->get_value = NULL;
151 }
152 
153 void
154 spdk_blob_opts_init(struct spdk_blob_opts *opts)
155 {
156 	opts->num_clusters = 0;
157 	opts->thin_provision = false;
158 	_spdk_blob_xattrs_init(&opts->xattrs);
159 }
160 
161 static struct spdk_blob *
162 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
163 {
164 	struct spdk_blob *blob;
165 
166 	blob = calloc(1, sizeof(*blob));
167 	if (!blob) {
168 		return NULL;
169 	}
170 
171 	blob->id = id;
172 	blob->bs = bs;
173 
174 	blob->parent_id = SPDK_BLOBID_INVALID;
175 
176 	blob->state = SPDK_BLOB_STATE_DIRTY;
177 	blob->active.num_pages = 1;
178 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
179 	if (!blob->active.pages) {
180 		free(blob);
181 		return NULL;
182 	}
183 
184 	blob->active.pages[0] = _spdk_bs_blobid_to_page(id);
185 
186 	TAILQ_INIT(&blob->xattrs);
187 	TAILQ_INIT(&blob->xattrs_internal);
188 
189 	return blob;
190 }
191 
192 static void
193 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs)
194 {
195 	struct spdk_xattr	*xattr, *xattr_tmp;
196 
197 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
198 		TAILQ_REMOVE(xattrs, xattr, link);
199 		free(xattr->name);
200 		free(xattr->value);
201 		free(xattr);
202 	}
203 }
204 
205 static void
206 _spdk_blob_free(struct spdk_blob *blob)
207 {
208 	assert(blob != NULL);
209 
210 	free(blob->active.clusters);
211 	free(blob->clean.clusters);
212 	free(blob->active.pages);
213 	free(blob->clean.pages);
214 
215 	_spdk_xattrs_free(&blob->xattrs);
216 	_spdk_xattrs_free(&blob->xattrs_internal);
217 
218 	if (blob->back_bs_dev) {
219 		blob->back_bs_dev->destroy(blob->back_bs_dev);
220 	}
221 
222 	free(blob);
223 }
224 
225 static int
226 _spdk_blob_mark_clean(struct spdk_blob *blob)
227 {
228 	uint64_t *clusters = NULL;
229 	uint32_t *pages = NULL;
230 
231 	assert(blob != NULL);
232 
233 	if (blob->active.num_clusters) {
234 		assert(blob->active.clusters);
235 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
236 		if (!clusters) {
237 			return -1;
238 		}
239 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters));
240 	}
241 
242 	if (blob->active.num_pages) {
243 		assert(blob->active.pages);
244 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
245 		if (!pages) {
246 			free(clusters);
247 			return -1;
248 		}
249 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages));
250 	}
251 
252 	free(blob->clean.clusters);
253 	free(blob->clean.pages);
254 
255 	blob->clean.num_clusters = blob->active.num_clusters;
256 	blob->clean.clusters = blob->active.clusters;
257 	blob->clean.num_pages = blob->active.num_pages;
258 	blob->clean.pages = blob->active.pages;
259 
260 	blob->active.clusters = clusters;
261 	blob->active.pages = pages;
262 
263 	/* If the metadata was dirtied again while the metadata was being written to disk,
264 	 *  we do not want to revert the DIRTY state back to CLEAN here.
265 	 */
266 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
267 		blob->state = SPDK_BLOB_STATE_CLEAN;
268 	}
269 
270 	return 0;
271 }
272 
273 static int
274 _spdk_blob_deserialize_xattr(struct spdk_blob *blob,
275 			     struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
276 {
277 	struct spdk_xattr                       *xattr;
278 
279 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
280 	    sizeof(desc_xattr->value_length) +
281 	    desc_xattr->name_length + desc_xattr->value_length) {
282 		return -EINVAL;
283 	}
284 
285 	xattr = calloc(1, sizeof(*xattr));
286 	if (xattr == NULL) {
287 		return -ENOMEM;
288 	}
289 
290 	xattr->name = malloc(desc_xattr->name_length + 1);
291 	if (xattr->name == NULL) {
292 		free(xattr);
293 		return -ENOMEM;
294 	}
295 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
296 	xattr->name[desc_xattr->name_length] = '\0';
297 
298 	xattr->value = malloc(desc_xattr->value_length);
299 	if (xattr->value == NULL) {
300 		free(xattr->name);
301 		free(xattr);
302 		return -ENOMEM;
303 	}
304 	xattr->value_len = desc_xattr->value_length;
305 	memcpy(xattr->value,
306 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
307 	       desc_xattr->value_length);
308 
309 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
310 
311 	return 0;
312 }
313 
314 
315 static int
316 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
317 {
318 	struct spdk_blob_md_descriptor *desc;
319 	size_t	cur_desc = 0;
320 	void *tmp;
321 
322 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
323 	while (cur_desc < sizeof(page->descriptors)) {
324 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
325 			if (desc->length == 0) {
326 				/* If padding and length are 0, this terminates the page */
327 				break;
328 			}
329 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
330 			struct spdk_blob_md_descriptor_flags	*desc_flags;
331 
332 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
333 
334 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
335 				return -EINVAL;
336 			}
337 
338 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
339 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
340 				return -EINVAL;
341 			}
342 
343 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
344 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
345 				blob->data_ro = true;
346 				blob->md_ro = true;
347 			}
348 
349 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
350 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
351 				blob->md_ro = true;
352 			}
353 
354 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
355 				blob->data_ro = true;
356 				blob->md_ro = true;
357 			}
358 
359 			blob->invalid_flags = desc_flags->invalid_flags;
360 			blob->data_ro_flags = desc_flags->data_ro_flags;
361 			blob->md_ro_flags = desc_flags->md_ro_flags;
362 
363 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) {
364 			struct spdk_blob_md_descriptor_extent	*desc_extent;
365 			unsigned int				i, j;
366 			unsigned int				cluster_count = blob->active.num_clusters;
367 
368 			desc_extent = (struct spdk_blob_md_descriptor_extent *)desc;
369 
370 			if (desc_extent->length == 0 ||
371 			    (desc_extent->length % sizeof(desc_extent->extents[0]) != 0)) {
372 				return -EINVAL;
373 			}
374 
375 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
376 				for (j = 0; j < desc_extent->extents[i].length; j++) {
377 					if (!spdk_bit_array_get(blob->bs->used_clusters,
378 								desc_extent->extents[i].cluster_idx + j)) {
379 						return -EINVAL;
380 					}
381 					cluster_count++;
382 				}
383 			}
384 
385 			if (cluster_count == 0) {
386 				return -EINVAL;
387 			}
388 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t));
389 			if (tmp == NULL) {
390 				return -ENOMEM;
391 			}
392 			blob->active.clusters = tmp;
393 			blob->active.cluster_array_size = cluster_count;
394 
395 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
396 				for (j = 0; j < desc_extent->extents[i].length; j++) {
397 					if (desc_extent->extents[i].cluster_idx != 0) {
398 						blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
399 								desc_extent->extents[i].cluster_idx + j);
400 					} else if (spdk_blob_is_thin_provisioned(blob)) {
401 						blob->active.clusters[blob->active.num_clusters++] = 0;
402 					} else {
403 						return -EINVAL;
404 					}
405 				}
406 			}
407 
408 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
409 			int rc;
410 
411 			rc = _spdk_blob_deserialize_xattr(blob,
412 							  (struct spdk_blob_md_descriptor_xattr *) desc, false);
413 			if (rc != 0) {
414 				return rc;
415 			}
416 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
417 			int rc;
418 
419 			rc = _spdk_blob_deserialize_xattr(blob,
420 							  (struct spdk_blob_md_descriptor_xattr *) desc, true);
421 			if (rc != 0) {
422 				return rc;
423 			}
424 		} else {
425 			/* Unrecognized descriptor type.  Do not fail - just continue to the
426 			 *  next descriptor.  If this descriptor is associated with some feature
427 			 *  defined in a newer version of blobstore, that version of blobstore
428 			 *  should create and set an associated feature flag to specify if this
429 			 *  blob can be loaded or not.
430 			 */
431 		}
432 
433 		/* Advance to the next descriptor */
434 		cur_desc += sizeof(*desc) + desc->length;
435 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
436 			break;
437 		}
438 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
439 	}
440 
441 	return 0;
442 }
443 
444 static int
445 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
446 		 struct spdk_blob *blob)
447 {
448 	const struct spdk_blob_md_page *page;
449 	uint32_t i;
450 	int rc;
451 
452 	assert(page_count > 0);
453 	assert(pages[0].sequence_num == 0);
454 	assert(blob != NULL);
455 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
456 	assert(blob->active.clusters == NULL);
457 
458 	/* The blobid provided doesn't match what's in the MD, this can
459 	 * happen for example if a bogus blobid is passed in through open.
460 	 */
461 	if (blob->id != pages[0].id) {
462 		SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n",
463 			    blob->id, pages[0].id);
464 		return -ENOENT;
465 	}
466 
467 	for (i = 0; i < page_count; i++) {
468 		page = &pages[i];
469 
470 		assert(page->id == blob->id);
471 		assert(page->sequence_num == i);
472 
473 		rc = _spdk_blob_parse_page(page, blob);
474 		if (rc != 0) {
475 			return rc;
476 		}
477 	}
478 
479 	return 0;
480 }
481 
482 static int
483 _spdk_blob_serialize_add_page(const struct spdk_blob *blob,
484 			      struct spdk_blob_md_page **pages,
485 			      uint32_t *page_count,
486 			      struct spdk_blob_md_page **last_page)
487 {
488 	struct spdk_blob_md_page *page;
489 
490 	assert(pages != NULL);
491 	assert(page_count != NULL);
492 
493 	if (*page_count == 0) {
494 		assert(*pages == NULL);
495 		*page_count = 1;
496 		*pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE,
497 					 SPDK_BS_PAGE_SIZE,
498 					 NULL);
499 	} else {
500 		assert(*pages != NULL);
501 		(*page_count)++;
502 		*pages = spdk_dma_realloc(*pages,
503 					  SPDK_BS_PAGE_SIZE * (*page_count),
504 					  SPDK_BS_PAGE_SIZE,
505 					  NULL);
506 	}
507 
508 	if (*pages == NULL) {
509 		*page_count = 0;
510 		*last_page = NULL;
511 		return -ENOMEM;
512 	}
513 
514 	page = &(*pages)[*page_count - 1];
515 	memset(page, 0, sizeof(*page));
516 	page->id = blob->id;
517 	page->sequence_num = *page_count - 1;
518 	page->next = SPDK_INVALID_MD_PAGE;
519 	*last_page = page;
520 
521 	return 0;
522 }
523 
524 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
525  * Update required_sz on both success and failure.
526  *
527  */
528 static int
529 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr,
530 			   uint8_t *buf, size_t buf_sz,
531 			   size_t *required_sz, bool internal)
532 {
533 	struct spdk_blob_md_descriptor_xattr	*desc;
534 
535 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
536 		       strlen(xattr->name) +
537 		       xattr->value_len;
538 
539 	if (buf_sz < *required_sz) {
540 		return -1;
541 	}
542 
543 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
544 
545 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
546 	desc->length = sizeof(desc->name_length) +
547 		       sizeof(desc->value_length) +
548 		       strlen(xattr->name) +
549 		       xattr->value_len;
550 	desc->name_length = strlen(xattr->name);
551 	desc->value_length = xattr->value_len;
552 
553 	memcpy(desc->name, xattr->name, desc->name_length);
554 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
555 	       xattr->value,
556 	       desc->value_length);
557 
558 	return 0;
559 }
560 
561 static void
562 _spdk_blob_serialize_extent(const struct spdk_blob *blob,
563 			    uint64_t start_cluster, uint64_t *next_cluster,
564 			    uint8_t *buf, size_t buf_sz)
565 {
566 	struct spdk_blob_md_descriptor_extent *desc;
567 	size_t cur_sz;
568 	uint64_t i, extent_idx;
569 	uint32_t lba, lba_per_cluster, lba_count;
570 
571 	/* The buffer must have room for at least one extent */
572 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]);
573 	if (buf_sz < cur_sz) {
574 		*next_cluster = start_cluster;
575 		return;
576 	}
577 
578 	desc = (struct spdk_blob_md_descriptor_extent *)buf;
579 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT;
580 
581 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
582 
583 	lba = blob->active.clusters[start_cluster];
584 	lba_count = lba_per_cluster;
585 	extent_idx = 0;
586 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
587 		if ((lba + lba_count) == blob->active.clusters[i]) {
588 			lba_count += lba_per_cluster;
589 			continue;
590 		}
591 		desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
592 		desc->extents[extent_idx].length = lba_count / lba_per_cluster;
593 		extent_idx++;
594 
595 		cur_sz += sizeof(desc->extents[extent_idx]);
596 
597 		if (buf_sz < cur_sz) {
598 			/* If we ran out of buffer space, return */
599 			desc->length = sizeof(desc->extents[0]) * extent_idx;
600 			*next_cluster = i;
601 			return;
602 		}
603 
604 		lba = blob->active.clusters[i];
605 		lba_count = lba_per_cluster;
606 	}
607 
608 	desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
609 	desc->extents[extent_idx].length = lba_count / lba_per_cluster;
610 	extent_idx++;
611 
612 	desc->length = sizeof(desc->extents[0]) * extent_idx;
613 	*next_cluster = blob->active.num_clusters;
614 
615 	return;
616 }
617 
618 static void
619 _spdk_blob_serialize_flags(const struct spdk_blob *blob,
620 			   uint8_t *buf, size_t *buf_sz)
621 {
622 	struct spdk_blob_md_descriptor_flags *desc;
623 
624 	/*
625 	 * Flags get serialized first, so we should always have room for the flags
626 	 *  descriptor.
627 	 */
628 	assert(*buf_sz >= sizeof(*desc));
629 
630 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
631 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
632 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
633 	desc->invalid_flags = blob->invalid_flags;
634 	desc->data_ro_flags = blob->data_ro_flags;
635 	desc->md_ro_flags = blob->md_ro_flags;
636 
637 	*buf_sz -= sizeof(*desc);
638 }
639 
640 static int
641 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
642 			    const struct spdk_xattr_tailq *xattrs, bool internal,
643 			    struct spdk_blob_md_page **pages,
644 			    struct spdk_blob_md_page *cur_page,
645 			    uint32_t *page_count, uint8_t **buf,
646 			    size_t *remaining_sz)
647 {
648 	const struct spdk_xattr	*xattr;
649 	int	rc;
650 
651 	TAILQ_FOREACH(xattr, xattrs, link) {
652 		size_t required_sz = 0;
653 
654 		rc = _spdk_blob_serialize_xattr(xattr,
655 						*buf, *remaining_sz,
656 						&required_sz, internal);
657 		if (rc < 0) {
658 			/* Need to add a new page to the chain */
659 			rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
660 							   &cur_page);
661 			if (rc < 0) {
662 				spdk_dma_free(*pages);
663 				*pages = NULL;
664 				*page_count = 0;
665 				return rc;
666 			}
667 
668 			*buf = (uint8_t *)cur_page->descriptors;
669 			*remaining_sz = sizeof(cur_page->descriptors);
670 
671 			/* Try again */
672 			required_sz = 0;
673 			rc = _spdk_blob_serialize_xattr(xattr,
674 							*buf, *remaining_sz,
675 							&required_sz, internal);
676 
677 			if (rc < 0) {
678 				spdk_dma_free(*pages);
679 				*pages = NULL;
680 				*page_count = 0;
681 				return -1;
682 			}
683 		}
684 
685 		*remaining_sz -= required_sz;
686 		*buf += required_sz;
687 	}
688 
689 	return 0;
690 }
691 
692 static int
693 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
694 		     uint32_t *page_count)
695 {
696 	struct spdk_blob_md_page		*cur_page;
697 	int					rc;
698 	uint8_t					*buf;
699 	size_t					remaining_sz;
700 	uint64_t				last_cluster;
701 
702 	assert(pages != NULL);
703 	assert(page_count != NULL);
704 	assert(blob != NULL);
705 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
706 
707 	*pages = NULL;
708 	*page_count = 0;
709 
710 	/* A blob always has at least 1 page, even if it has no descriptors */
711 	rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
712 	if (rc < 0) {
713 		return rc;
714 	}
715 
716 	buf = (uint8_t *)cur_page->descriptors;
717 	remaining_sz = sizeof(cur_page->descriptors);
718 
719 	/* Serialize flags */
720 	_spdk_blob_serialize_flags(blob, buf, &remaining_sz);
721 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
722 
723 	/* Serialize xattrs */
724 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false,
725 					 pages, cur_page, page_count, &buf, &remaining_sz);
726 	if (rc < 0) {
727 		return rc;
728 	}
729 
730 	/* Serialize internal xattrs */
731 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
732 					 pages, cur_page, page_count, &buf, &remaining_sz);
733 	if (rc < 0) {
734 		return rc;
735 	}
736 
737 	/* Serialize extents */
738 	last_cluster = 0;
739 	while (last_cluster < blob->active.num_clusters) {
740 		_spdk_blob_serialize_extent(blob, last_cluster, &last_cluster,
741 					    buf, remaining_sz);
742 
743 		if (last_cluster == blob->active.num_clusters) {
744 			break;
745 		}
746 
747 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
748 						   &cur_page);
749 		if (rc < 0) {
750 			return rc;
751 		}
752 
753 		buf = (uint8_t *)cur_page->descriptors;
754 		remaining_sz = sizeof(cur_page->descriptors);
755 	}
756 
757 	return 0;
758 }
759 
760 struct spdk_blob_load_ctx {
761 	struct spdk_blob		*blob;
762 
763 	struct spdk_blob_md_page	*pages;
764 	uint32_t			num_pages;
765 	spdk_bs_sequence_t	        *seq;
766 
767 	spdk_bs_sequence_cpl		cb_fn;
768 	void				*cb_arg;
769 };
770 
771 static uint32_t
772 _spdk_blob_md_page_calc_crc(void *page)
773 {
774 	uint32_t		crc;
775 
776 	crc = BLOB_CRC32C_INITIAL;
777 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
778 	crc ^= BLOB_CRC32C_INITIAL;
779 
780 	return crc;
781 
782 }
783 
784 static void
785 _spdk_blob_load_final(void *cb_arg, int bserrno)
786 {
787 	struct spdk_blob_load_ctx	*ctx = cb_arg;
788 	struct spdk_blob		*blob = ctx->blob;
789 
790 	_spdk_blob_mark_clean(blob);
791 
792 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
793 
794 	/* Free the memory */
795 	spdk_dma_free(ctx->pages);
796 	free(ctx);
797 }
798 
799 static void
800 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
801 {
802 	struct spdk_blob_load_ctx	*ctx = cb_arg;
803 	struct spdk_blob		*blob = ctx->blob;
804 
805 	if (bserrno != 0) {
806 		goto error;
807 	}
808 
809 	blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot);
810 
811 	if (blob->back_bs_dev == NULL) {
812 		bserrno = -ENOMEM;
813 		goto error;
814 	}
815 
816 	_spdk_blob_load_final(ctx, bserrno);
817 	return;
818 
819 error:
820 	SPDK_ERRLOG("Snapshot fail\n");
821 	_spdk_blob_free(blob);
822 	ctx->cb_fn(ctx->seq, NULL, bserrno);
823 	spdk_dma_free(ctx->pages);
824 	free(ctx);
825 }
826 
827 static void
828 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
829 {
830 	struct spdk_blob_load_ctx	*ctx = cb_arg;
831 	struct spdk_blob		*blob = ctx->blob;
832 	struct spdk_blob_md_page	*page;
833 	const void			*value;
834 	size_t				len;
835 	int				rc;
836 	uint32_t			crc;
837 
838 	page = &ctx->pages[ctx->num_pages - 1];
839 	crc = _spdk_blob_md_page_calc_crc(page);
840 	if (crc != page->crc) {
841 		SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages);
842 		_spdk_blob_free(blob);
843 		ctx->cb_fn(seq, NULL, -EINVAL);
844 		spdk_dma_free(ctx->pages);
845 		free(ctx);
846 		return;
847 	}
848 
849 	if (page->next != SPDK_INVALID_MD_PAGE) {
850 		uint32_t next_page = page->next;
851 		uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page);
852 
853 
854 		assert(next_lba < (blob->bs->md_start + blob->bs->md_len));
855 
856 		/* Read the next page */
857 		ctx->num_pages++;
858 		ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
859 					      sizeof(*page), NULL);
860 		if (ctx->pages == NULL) {
861 			ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM);
862 			free(ctx);
863 			return;
864 		}
865 
866 		spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
867 					  next_lba,
868 					  _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)),
869 					  _spdk_blob_load_cpl, ctx);
870 		return;
871 	}
872 
873 	/* Parse the pages */
874 	rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob);
875 	if (rc) {
876 		_spdk_blob_free(blob);
877 		ctx->cb_fn(seq, NULL, rc);
878 		spdk_dma_free(ctx->pages);
879 		free(ctx);
880 		return;
881 	}
882 	ctx->seq = seq;
883 
884 
885 	if (spdk_blob_is_thin_provisioned(blob)) {
886 		rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
887 		if (rc == 0) {
888 			if (len != sizeof(spdk_blob_id)) {
889 				_spdk_blob_free(blob);
890 				ctx->cb_fn(seq, NULL, -EINVAL);
891 				spdk_dma_free(ctx->pages);
892 				free(ctx);
893 				return;
894 			}
895 			/* open snapshot blob and continue in the callback function */
896 			blob->parent_id = *(spdk_blob_id *)value;
897 			spdk_bs_open_blob(blob->bs, blob->parent_id,
898 					  _spdk_blob_load_snapshot_cpl, ctx);
899 			return;
900 		} else {
901 			/* add zeroes_dev for thin provisioned blob */
902 			blob->back_bs_dev = spdk_bs_create_zeroes_dev();
903 		}
904 	} else {
905 		/* standard blob */
906 		blob->back_bs_dev = NULL;
907 	}
908 	_spdk_blob_load_final(ctx, bserrno);
909 }
910 
911 /* Load a blob from disk given a blobid */
912 static void
913 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
914 		spdk_bs_sequence_cpl cb_fn, void *cb_arg)
915 {
916 	struct spdk_blob_load_ctx *ctx;
917 	struct spdk_blob_store *bs;
918 	uint32_t page_num;
919 	uint64_t lba;
920 
921 	_spdk_blob_verify_md_op(blob);
922 
923 	bs = blob->bs;
924 
925 	ctx = calloc(1, sizeof(*ctx));
926 	if (!ctx) {
927 		cb_fn(seq, cb_arg, -ENOMEM);
928 		return;
929 	}
930 
931 	ctx->blob = blob;
932 	ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE,
933 				      SPDK_BS_PAGE_SIZE, NULL);
934 	if (!ctx->pages) {
935 		free(ctx);
936 		cb_fn(seq, cb_arg, -ENOMEM);
937 		return;
938 	}
939 	ctx->num_pages = 1;
940 	ctx->cb_fn = cb_fn;
941 	ctx->cb_arg = cb_arg;
942 
943 	page_num = _spdk_bs_blobid_to_page(blob->id);
944 	lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num);
945 
946 	blob->state = SPDK_BLOB_STATE_LOADING;
947 
948 	spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba,
949 				  _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
950 				  _spdk_blob_load_cpl, ctx);
951 }
952 
953 struct spdk_blob_persist_ctx {
954 	struct spdk_blob		*blob;
955 
956 	struct spdk_blob_md_page	*pages;
957 
958 	uint64_t			idx;
959 
960 	spdk_bs_sequence_t		*seq;
961 	spdk_bs_sequence_cpl		cb_fn;
962 	void				*cb_arg;
963 };
964 
965 static void
966 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
967 {
968 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
969 	struct spdk_blob		*blob = ctx->blob;
970 
971 	if (bserrno == 0) {
972 		_spdk_blob_mark_clean(blob);
973 	}
974 
975 	/* Call user callback */
976 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
977 
978 	/* Free the memory */
979 	spdk_dma_free(ctx->pages);
980 	free(ctx);
981 }
982 
983 static void
984 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
985 {
986 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
987 	struct spdk_blob		*blob = ctx->blob;
988 	struct spdk_blob_store		*bs = blob->bs;
989 	void				*tmp;
990 	size_t				i;
991 
992 	/* Release all clusters that were truncated */
993 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
994 		uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]);
995 
996 		/* Nothing to release if it was not allocated */
997 		if (blob->active.clusters[i] != 0) {
998 			_spdk_bs_release_cluster(bs, cluster_num);
999 		}
1000 	}
1001 
1002 	if (blob->active.num_clusters == 0) {
1003 		free(blob->active.clusters);
1004 		blob->active.clusters = NULL;
1005 		blob->active.cluster_array_size = 0;
1006 	} else {
1007 		tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters);
1008 		assert(tmp != NULL);
1009 		blob->active.clusters = tmp;
1010 		blob->active.cluster_array_size = blob->active.num_clusters;
1011 	}
1012 
1013 	_spdk_blob_persist_complete(seq, ctx, bserrno);
1014 }
1015 
1016 static void
1017 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1018 {
1019 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1020 	struct spdk_blob		*blob = ctx->blob;
1021 	struct spdk_blob_store		*bs = blob->bs;
1022 	spdk_bs_batch_t			*batch;
1023 	size_t				i;
1024 	uint64_t			lba;
1025 	uint32_t			lba_count;
1026 
1027 	/* Clusters don't move around in blobs. The list shrinks or grows
1028 	 * at the end, but no changes ever occur in the middle of the list.
1029 	 */
1030 
1031 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx);
1032 
1033 	/* Unmap all clusters that were truncated */
1034 	lba = 0;
1035 	lba_count = 0;
1036 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1037 		uint64_t next_lba = blob->active.clusters[i];
1038 		uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1);
1039 
1040 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1041 			/* This cluster is contiguous with the previous one. */
1042 			lba_count += next_lba_count;
1043 			continue;
1044 		}
1045 
1046 		/* This cluster is not contiguous with the previous one. */
1047 
1048 		/* If a run of LBAs previously existing, send them
1049 		 * as an unmap.
1050 		 */
1051 		if (lba_count > 0) {
1052 			spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1053 		}
1054 
1055 		/* Start building the next batch */
1056 		lba = next_lba;
1057 		if (next_lba > 0) {
1058 			lba_count = next_lba_count;
1059 		} else {
1060 			lba_count = 0;
1061 		}
1062 	}
1063 
1064 	/* If we ended with a contiguous set of LBAs, send the unmap now */
1065 	if (lba_count > 0) {
1066 		spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1067 	}
1068 
1069 	spdk_bs_batch_close(batch);
1070 }
1071 
1072 static void
1073 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1074 {
1075 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1076 	struct spdk_blob		*blob = ctx->blob;
1077 	struct spdk_blob_store		*bs = blob->bs;
1078 	size_t				i;
1079 
1080 	/* This loop starts at 1 because the first page is special and handled
1081 	 * below. The pages (except the first) are never written in place,
1082 	 * so any pages in the clean list must be zeroed.
1083 	 */
1084 	for (i = 1; i < blob->clean.num_pages; i++) {
1085 		spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]);
1086 	}
1087 
1088 	if (blob->active.num_pages == 0) {
1089 		uint32_t page_num;
1090 
1091 		page_num = _spdk_bs_blobid_to_page(blob->id);
1092 		spdk_bit_array_clear(bs->used_md_pages, page_num);
1093 	}
1094 
1095 	/* Move on to unmapping clusters */
1096 	_spdk_blob_persist_unmap_clusters(seq, ctx, 0);
1097 }
1098 
1099 static void
1100 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1101 {
1102 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1103 	struct spdk_blob		*blob = ctx->blob;
1104 	struct spdk_blob_store		*bs = blob->bs;
1105 	uint64_t			lba;
1106 	uint32_t			lba_count;
1107 	spdk_bs_batch_t			*batch;
1108 	size_t				i;
1109 
1110 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx);
1111 
1112 	lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1113 
1114 	/* This loop starts at 1 because the first page is special and handled
1115 	 * below. The pages (except the first) are never written in place,
1116 	 * so any pages in the clean list must be zeroed.
1117 	 */
1118 	for (i = 1; i < blob->clean.num_pages; i++) {
1119 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]);
1120 
1121 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1122 	}
1123 
1124 	/* The first page will only be zeroed if this is a delete. */
1125 	if (blob->active.num_pages == 0) {
1126 		uint32_t page_num;
1127 
1128 		/* The first page in the metadata goes where the blobid indicates */
1129 		page_num = _spdk_bs_blobid_to_page(blob->id);
1130 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num);
1131 
1132 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1133 	}
1134 
1135 	spdk_bs_batch_close(batch);
1136 }
1137 
1138 static void
1139 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1140 {
1141 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1142 	struct spdk_blob		*blob = ctx->blob;
1143 	struct spdk_blob_store		*bs = blob->bs;
1144 	uint64_t			lba;
1145 	uint32_t			lba_count;
1146 	struct spdk_blob_md_page	*page;
1147 
1148 	if (blob->active.num_pages == 0) {
1149 		/* Move on to the next step */
1150 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1151 		return;
1152 	}
1153 
1154 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1155 
1156 	page = &ctx->pages[0];
1157 	/* The first page in the metadata goes where the blobid indicates */
1158 	lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id));
1159 
1160 	spdk_bs_sequence_write_dev(seq, page, lba, lba_count,
1161 				   _spdk_blob_persist_zero_pages, ctx);
1162 }
1163 
1164 static void
1165 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1166 {
1167 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1168 	struct spdk_blob		*blob = ctx->blob;
1169 	struct spdk_blob_store		*bs = blob->bs;
1170 	uint64_t			lba;
1171 	uint32_t			lba_count;
1172 	struct spdk_blob_md_page	*page;
1173 	spdk_bs_batch_t			*batch;
1174 	size_t				i;
1175 
1176 	/* Clusters don't move around in blobs. The list shrinks or grows
1177 	 * at the end, but no changes ever occur in the middle of the list.
1178 	 */
1179 
1180 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1181 
1182 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx);
1183 
1184 	/* This starts at 1. The root page is not written until
1185 	 * all of the others are finished
1186 	 */
1187 	for (i = 1; i < blob->active.num_pages; i++) {
1188 		page = &ctx->pages[i];
1189 		assert(page->sequence_num == i);
1190 
1191 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]);
1192 
1193 		spdk_bs_batch_write_dev(batch, page, lba, lba_count);
1194 	}
1195 
1196 	spdk_bs_batch_close(batch);
1197 }
1198 
1199 static int
1200 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz)
1201 {
1202 	uint64_t	i;
1203 	uint64_t	*tmp;
1204 	uint64_t	lfc; /* lowest free cluster */
1205 	uint64_t	num_clusters;
1206 	struct spdk_blob_store *bs;
1207 
1208 	bs = blob->bs;
1209 
1210 	_spdk_blob_verify_md_op(blob);
1211 
1212 	if (blob->active.num_clusters == sz) {
1213 		return 0;
1214 	}
1215 
1216 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
1217 		/* If this blob was resized to be larger, then smaller, then
1218 		 * larger without syncing, then the cluster array already
1219 		 * contains spare assigned clusters we can use.
1220 		 */
1221 		num_clusters = spdk_min(blob->active.cluster_array_size,
1222 					sz);
1223 	} else {
1224 		num_clusters = blob->active.num_clusters;
1225 	}
1226 
1227 	/* Do two passes - one to verify that we can obtain enough clusters
1228 	 * and another to actually claim them.
1229 	 */
1230 
1231 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1232 		lfc = 0;
1233 		for (i = num_clusters; i < sz; i++) {
1234 			lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
1235 			if (lfc >= bs->total_clusters) {
1236 				/* No more free clusters. Cannot satisfy the request */
1237 				return -ENOSPC;
1238 			}
1239 			lfc++;
1240 		}
1241 	}
1242 
1243 	if (sz > num_clusters) {
1244 		/* Expand the cluster array if necessary.
1245 		 * We only shrink the array when persisting.
1246 		 */
1247 		tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz);
1248 		if (sz > 0 && tmp == NULL) {
1249 			return -ENOMEM;
1250 		}
1251 		memset(tmp + blob->active.cluster_array_size, 0,
1252 		       sizeof(uint64_t) * (sz - blob->active.cluster_array_size));
1253 		blob->active.clusters = tmp;
1254 		blob->active.cluster_array_size = sz;
1255 	}
1256 
1257 	blob->state = SPDK_BLOB_STATE_DIRTY;
1258 
1259 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1260 		lfc = 0;
1261 		for (i = num_clusters; i < sz; i++) {
1262 			_spdk_bs_allocate_cluster(blob, i, &lfc, true);
1263 			lfc++;
1264 		}
1265 	}
1266 
1267 	blob->active.num_clusters = sz;
1268 
1269 	return 0;
1270 }
1271 
1272 static void
1273 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx)
1274 {
1275 	spdk_bs_sequence_t *seq = ctx->seq;
1276 	struct spdk_blob *blob = ctx->blob;
1277 	struct spdk_blob_store *bs = blob->bs;
1278 	uint64_t i;
1279 	uint32_t page_num;
1280 	int rc;
1281 
1282 	if (blob->active.num_pages == 0) {
1283 		/* This is the signal that the blob should be deleted.
1284 		 * Immediately jump to the clean up routine. */
1285 		assert(blob->clean.num_pages > 0);
1286 		ctx->idx = blob->clean.num_pages - 1;
1287 		blob->state = SPDK_BLOB_STATE_CLEAN;
1288 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1289 		return;
1290 
1291 	}
1292 
1293 	/* Generate the new metadata */
1294 	rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
1295 	if (rc < 0) {
1296 		_spdk_blob_persist_complete(seq, ctx, rc);
1297 		return;
1298 	}
1299 
1300 	assert(blob->active.num_pages >= 1);
1301 
1302 	/* Resize the cache of page indices */
1303 	blob->active.pages = realloc(blob->active.pages,
1304 				     blob->active.num_pages * sizeof(*blob->active.pages));
1305 	if (!blob->active.pages) {
1306 		_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1307 		return;
1308 	}
1309 
1310 	/* Assign this metadata to pages. This requires two passes -
1311 	 * one to verify that there are enough pages and a second
1312 	 * to actually claim them. */
1313 	page_num = 0;
1314 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
1315 	for (i = 1; i < blob->active.num_pages; i++) {
1316 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1317 		if (page_num >= spdk_bit_array_capacity(bs->used_md_pages)) {
1318 			_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1319 			return;
1320 		}
1321 		page_num++;
1322 	}
1323 
1324 	page_num = 0;
1325 	blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id);
1326 	for (i = 1; i < blob->active.num_pages; i++) {
1327 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1328 		ctx->pages[i - 1].next = page_num;
1329 		/* Now that previous metadata page is complete, calculate the crc for it. */
1330 		ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1331 		blob->active.pages[i] = page_num;
1332 		spdk_bit_array_set(bs->used_md_pages, page_num);
1333 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id);
1334 		page_num++;
1335 	}
1336 	ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1337 	/* Start writing the metadata from last page to first */
1338 	ctx->idx = blob->active.num_pages - 1;
1339 	blob->state = SPDK_BLOB_STATE_CLEAN;
1340 	_spdk_blob_persist_write_page_chain(seq, ctx, 0);
1341 }
1342 
1343 /* Write a blob to disk */
1344 static void
1345 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1346 		   spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1347 {
1348 	struct spdk_blob_persist_ctx *ctx;
1349 
1350 	_spdk_blob_verify_md_op(blob);
1351 
1352 	if (blob->state == SPDK_BLOB_STATE_CLEAN) {
1353 		cb_fn(seq, cb_arg, 0);
1354 		return;
1355 	}
1356 
1357 	ctx = calloc(1, sizeof(*ctx));
1358 	if (!ctx) {
1359 		cb_fn(seq, cb_arg, -ENOMEM);
1360 		return;
1361 	}
1362 	ctx->blob = blob;
1363 	ctx->seq = seq;
1364 	ctx->cb_fn = cb_fn;
1365 	ctx->cb_arg = cb_arg;
1366 
1367 	_spdk_blob_persist_start(ctx);
1368 }
1369 
1370 struct spdk_blob_copy_cluster_ctx {
1371 	struct spdk_blob *blob;
1372 	uint8_t *buf;
1373 	uint64_t page;
1374 	uint64_t new_cluster;
1375 	spdk_bs_sequence_t *seq;
1376 };
1377 
1378 static void
1379 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
1380 {
1381 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1382 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
1383 	TAILQ_HEAD(, spdk_bs_request_set) requests;
1384 	spdk_bs_user_op_t *op;
1385 
1386 	TAILQ_INIT(&requests);
1387 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
1388 
1389 	while (!TAILQ_EMPTY(&requests)) {
1390 		op = TAILQ_FIRST(&requests);
1391 		TAILQ_REMOVE(&requests, op, link);
1392 		if (bserrno == 0) {
1393 			spdk_bs_user_op_execute(op);
1394 		} else {
1395 			spdk_bs_user_op_abort(op);
1396 		}
1397 	}
1398 
1399 	spdk_dma_free(ctx->buf);
1400 	free(ctx);
1401 }
1402 
1403 static void
1404 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno)
1405 {
1406 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1407 
1408 	if (bserrno) {
1409 		uint32_t cluster_number;
1410 
1411 		if (bserrno == -EEXIST) {
1412 			/* The metadata insert failed because another thread
1413 			 * allocated the cluster first. Free our cluster
1414 			 * but continue without error. */
1415 			bserrno = 0;
1416 		}
1417 
1418 		cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
1419 		_spdk_bs_release_cluster(ctx->blob->bs, cluster_number);
1420 	}
1421 
1422 	spdk_bs_sequence_finish(ctx->seq, bserrno);
1423 }
1424 
1425 static void
1426 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1427 {
1428 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1429 	uint32_t cluster_number;
1430 
1431 	if (bserrno) {
1432 		/* The write failed, so jump to the final completion handler */
1433 		spdk_bs_sequence_finish(seq, bserrno);
1434 		return;
1435 	}
1436 
1437 	cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
1438 
1439 	_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
1440 					       _spdk_blob_insert_cluster_cpl, ctx);
1441 }
1442 
1443 static void
1444 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1445 {
1446 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1447 
1448 	if (bserrno != 0) {
1449 		/* The read failed, so jump to the final completion handler */
1450 		spdk_bs_sequence_finish(seq, bserrno);
1451 		return;
1452 	}
1453 
1454 	/* Write whole cluster */
1455 	spdk_bs_sequence_write_dev(seq, ctx->buf,
1456 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
1457 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, 1),
1458 				   _spdk_blob_write_copy_cpl, ctx);
1459 }
1460 
1461 static void
1462 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
1463 				   struct spdk_io_channel *_ch,
1464 				   uint64_t offset, spdk_bs_user_op_t *op)
1465 {
1466 	struct spdk_bs_cpl cpl;
1467 	struct spdk_bs_channel *ch;
1468 	struct spdk_blob_copy_cluster_ctx *ctx;
1469 	uint32_t cluster_start_page;
1470 	uint32_t cluster_number;
1471 	int rc;
1472 
1473 	ch = spdk_io_channel_get_ctx(_ch);
1474 
1475 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
1476 		/* There are already operations pending. Queue this user op
1477 		 * and return because it will be re-executed when the outstanding
1478 		 * cluster allocation completes. */
1479 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
1480 		return;
1481 	}
1482 
1483 	/* Round the page offset down to the first page in the cluster */
1484 	cluster_start_page = _spdk_bs_page_to_cluster_start(blob, offset);
1485 
1486 	/* Calculate which index in the metadata cluster array the corresponding
1487 	 * cluster is supposed to be at. */
1488 	cluster_number = _spdk_bs_page_to_cluster(blob->bs, cluster_start_page);
1489 
1490 	ctx = calloc(1, sizeof(*ctx));
1491 	if (!ctx) {
1492 		spdk_bs_user_op_abort(op);
1493 		return;
1494 	}
1495 
1496 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
1497 
1498 	ctx->blob = blob;
1499 	ctx->page = cluster_start_page;
1500 
1501 	ctx->buf = spdk_dma_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, NULL);
1502 	if (!ctx->buf) {
1503 		SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
1504 			    blob->bs->cluster_sz);
1505 		free(ctx);
1506 		spdk_bs_user_op_abort(op);
1507 		return;
1508 	}
1509 
1510 	rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, false);
1511 	if (rc != 0) {
1512 		spdk_dma_free(ctx->buf);
1513 		free(ctx);
1514 		spdk_bs_user_op_abort(op);
1515 		return;
1516 	}
1517 
1518 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1519 	cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl;
1520 	cpl.u.blob_basic.cb_arg = ctx;
1521 
1522 	ctx->seq = spdk_bs_sequence_start(_ch, &cpl);
1523 	if (!ctx->seq) {
1524 		_spdk_bs_release_cluster(blob->bs, ctx->new_cluster);
1525 		spdk_dma_free(ctx->buf);
1526 		free(ctx);
1527 		spdk_bs_user_op_abort(op);
1528 		return;
1529 	}
1530 
1531 	/* Queue the user op to block other incoming operations */
1532 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
1533 
1534 	/* Read cluster from backing device */
1535 	spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
1536 				     _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
1537 				     _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
1538 				     _spdk_blob_write_copy, ctx);
1539 }
1540 
1541 static void
1542 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t page, uint64_t length,
1543 				       uint64_t *lba,	uint32_t *lba_count)
1544 {
1545 	*lba_count = _spdk_bs_page_to_lba(blob->bs, length);
1546 
1547 	if (!_spdk_bs_page_is_allocated(blob, page)) {
1548 		assert(blob->back_bs_dev != NULL);
1549 		*lba = _spdk_bs_dev_page_to_lba(blob->back_bs_dev, page);
1550 		*lba_count = _spdk_bs_blob_lba_to_back_dev_lba(blob, *lba_count);
1551 	} else {
1552 		*lba = _spdk_bs_blob_page_to_lba(blob, page);
1553 	}
1554 }
1555 
1556 struct op_split_ctx {
1557 	struct spdk_blob *blob;
1558 	struct spdk_io_channel *channel;
1559 	uint64_t page_offset;
1560 	uint64_t pages_remaining;
1561 	void *curr_payload;
1562 	enum spdk_blob_op_type op_type;
1563 	spdk_bs_sequence_t *seq;
1564 };
1565 
1566 static void
1567 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno)
1568 {
1569 	struct op_split_ctx	*ctx = cb_arg;
1570 	struct spdk_blob	*blob = ctx->blob;
1571 	struct spdk_io_channel	*ch = ctx->channel;
1572 	enum spdk_blob_op_type	op_type = ctx->op_type;
1573 	uint8_t			*buf = ctx->curr_payload;
1574 	uint64_t		offset = ctx->page_offset;
1575 	uint64_t		length = ctx->pages_remaining;
1576 	uint64_t		op_length;
1577 
1578 	if (bserrno != 0 || ctx->pages_remaining == 0) {
1579 		spdk_bs_sequence_finish(ctx->seq, bserrno);
1580 		free(ctx);
1581 		return;
1582 	}
1583 
1584 	op_length = spdk_min(length, _spdk_bs_num_pages_to_cluster_boundary(blob, offset));
1585 
1586 	/* Update length and payload for next operation */
1587 	ctx->pages_remaining -= op_length;
1588 	ctx->page_offset += op_length;
1589 	if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
1590 		ctx->curr_payload += (op_length * SPDK_BS_PAGE_SIZE);
1591 	}
1592 
1593 	switch (op_type) {
1594 	case SPDK_BLOB_READ:
1595 		spdk_blob_io_read(blob, ch, buf, offset, op_length,
1596 				  _spdk_blob_request_submit_op_split_next, ctx);
1597 		break;
1598 	case SPDK_BLOB_WRITE:
1599 		spdk_blob_io_write(blob, ch, buf, offset, op_length,
1600 				   _spdk_blob_request_submit_op_split_next, ctx);
1601 		break;
1602 	case SPDK_BLOB_UNMAP:
1603 		spdk_blob_io_unmap(blob, ch, offset, op_length,
1604 				   _spdk_blob_request_submit_op_split_next, ctx);
1605 		break;
1606 	case SPDK_BLOB_WRITE_ZEROES:
1607 		spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
1608 					  _spdk_blob_request_submit_op_split_next, ctx);
1609 		break;
1610 	case SPDK_BLOB_READV:
1611 	case SPDK_BLOB_WRITEV:
1612 		SPDK_ERRLOG("readv/write not valid for %s\n", __func__);
1613 		spdk_bs_sequence_finish(ctx->seq, -EINVAL);
1614 		free(ctx);
1615 		break;
1616 	}
1617 }
1618 
1619 static void
1620 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
1621 				   void *payload, uint64_t offset, uint64_t length,
1622 				   spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1623 {
1624 	struct op_split_ctx *ctx;
1625 	spdk_bs_sequence_t *seq;
1626 	struct spdk_bs_cpl cpl;
1627 
1628 	assert(blob != NULL);
1629 
1630 	ctx = calloc(1, sizeof(struct op_split_ctx));
1631 	if (ctx == NULL) {
1632 		cb_fn(cb_arg, -ENOMEM);
1633 		return;
1634 	}
1635 
1636 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1637 	cpl.u.blob_basic.cb_fn = cb_fn;
1638 	cpl.u.blob_basic.cb_arg = cb_arg;
1639 
1640 	seq = spdk_bs_sequence_start(ch, &cpl);
1641 	if (!seq) {
1642 		free(ctx);
1643 		cb_fn(cb_arg, -ENOMEM);
1644 		return;
1645 	}
1646 
1647 	ctx->blob = blob;
1648 	ctx->channel = ch;
1649 	ctx->curr_payload = payload;
1650 	ctx->page_offset = offset;
1651 	ctx->pages_remaining = length;
1652 	ctx->op_type = op_type;
1653 	ctx->seq = seq;
1654 
1655 	_spdk_blob_request_submit_op_split_next(ctx, 0);
1656 }
1657 
1658 static void
1659 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
1660 				    void *payload, uint64_t offset, uint64_t length,
1661 				    spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1662 {
1663 	struct spdk_bs_cpl cpl;
1664 	uint64_t lba;
1665 	uint32_t lba_count;
1666 
1667 	assert(blob != NULL);
1668 
1669 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1670 	cpl.u.blob_basic.cb_fn = cb_fn;
1671 	cpl.u.blob_basic.cb_arg = cb_arg;
1672 
1673 	_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
1674 
1675 	switch (op_type) {
1676 	case SPDK_BLOB_READ: {
1677 		spdk_bs_batch_t *batch;
1678 
1679 		batch = spdk_bs_batch_open(_ch, &cpl);
1680 		if (!batch) {
1681 			cb_fn(cb_arg, -ENOMEM);
1682 			return;
1683 		}
1684 
1685 		if (_spdk_bs_page_is_allocated(blob, offset)) {
1686 			/* Read from the blob */
1687 			spdk_bs_batch_read_dev(batch, payload, lba, lba_count);
1688 		} else {
1689 			/* Read from the backing block device */
1690 			spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
1691 		}
1692 
1693 		spdk_bs_batch_close(batch);
1694 		break;
1695 	}
1696 	case SPDK_BLOB_WRITE:
1697 	case SPDK_BLOB_WRITE_ZEROES: {
1698 		if (_spdk_bs_page_is_allocated(blob, offset)) {
1699 			/* Write to the blob */
1700 			spdk_bs_batch_t *batch;
1701 
1702 			batch = spdk_bs_batch_open(_ch, &cpl);
1703 			if (!batch) {
1704 				cb_fn(cb_arg, -ENOMEM);
1705 				return;
1706 			}
1707 
1708 			if (op_type == SPDK_BLOB_WRITE) {
1709 				spdk_bs_batch_write_dev(batch, payload, lba, lba_count);
1710 			} else {
1711 				spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1712 			}
1713 
1714 			spdk_bs_batch_close(batch);
1715 		} else {
1716 			/* Queue this operation and allocate the cluster */
1717 			spdk_bs_user_op_t *op;
1718 
1719 			op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
1720 			if (!op) {
1721 				cb_fn(cb_arg, -ENOMEM);
1722 				return;
1723 			}
1724 
1725 			_spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op);
1726 		}
1727 		break;
1728 	}
1729 	case SPDK_BLOB_UNMAP: {
1730 		spdk_bs_batch_t *batch;
1731 
1732 		batch = spdk_bs_batch_open(_ch, &cpl);
1733 		if (!batch) {
1734 			cb_fn(cb_arg, -ENOMEM);
1735 			return;
1736 		}
1737 
1738 		if (_spdk_bs_page_is_allocated(blob, offset)) {
1739 			spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1740 		}
1741 
1742 		spdk_bs_batch_close(batch);
1743 		break;
1744 	}
1745 	case SPDK_BLOB_READV:
1746 	case SPDK_BLOB_WRITEV:
1747 		SPDK_ERRLOG("readv/write not valid\n");
1748 		cb_fn(cb_arg, -EINVAL);
1749 		break;
1750 	}
1751 }
1752 
1753 static void
1754 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
1755 			     void *payload, uint64_t offset, uint64_t length,
1756 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1757 {
1758 	assert(blob != NULL);
1759 
1760 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
1761 		cb_fn(cb_arg, -EPERM);
1762 		return;
1763 	}
1764 
1765 	if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
1766 		cb_fn(cb_arg, -EINVAL);
1767 		return;
1768 	}
1769 
1770 	if (length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset)) {
1771 		_spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length,
1772 						    cb_fn, cb_arg, op_type);
1773 	} else {
1774 		_spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length,
1775 						   cb_fn, cb_arg, op_type);
1776 	}
1777 }
1778 
1779 struct rw_iov_ctx {
1780 	struct spdk_blob *blob;
1781 	struct spdk_io_channel *channel;
1782 	spdk_blob_op_complete cb_fn;
1783 	void *cb_arg;
1784 	bool read;
1785 	int iovcnt;
1786 	struct iovec *orig_iov;
1787 	uint64_t page_offset;
1788 	uint64_t pages_remaining;
1789 	uint64_t pages_done;
1790 	struct iovec iov[0];
1791 };
1792 
1793 static void
1794 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1795 {
1796 	assert(cb_arg == NULL);
1797 	spdk_bs_sequence_finish(seq, bserrno);
1798 }
1799 
1800 static void
1801 _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
1802 {
1803 	struct rw_iov_ctx *ctx = cb_arg;
1804 	struct spdk_blob *blob = ctx->blob;
1805 	struct iovec *iov, *orig_iov;
1806 	int iovcnt;
1807 	size_t orig_iovoff;
1808 	uint64_t page_count, pages_to_boundary, page_offset;
1809 	uint64_t byte_count;
1810 
1811 	if (bserrno != 0 || ctx->pages_remaining == 0) {
1812 		ctx->cb_fn(ctx->cb_arg, bserrno);
1813 		free(ctx);
1814 		return;
1815 	}
1816 
1817 	page_offset = ctx->page_offset;
1818 	pages_to_boundary = _spdk_bs_num_pages_to_cluster_boundary(blob, page_offset);
1819 	page_count = spdk_min(ctx->pages_remaining, pages_to_boundary);
1820 
1821 	/*
1822 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
1823 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
1824 	 *  point to the current position in the I/O sequence.
1825 	 */
1826 	byte_count = ctx->pages_done * sizeof(struct spdk_blob_md_page);
1827 	orig_iov = &ctx->orig_iov[0];
1828 	orig_iovoff = 0;
1829 	while (byte_count > 0) {
1830 		if (byte_count >= orig_iov->iov_len) {
1831 			byte_count -= orig_iov->iov_len;
1832 			orig_iov++;
1833 		} else {
1834 			orig_iovoff = byte_count;
1835 			byte_count = 0;
1836 		}
1837 	}
1838 
1839 	/*
1840 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
1841 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
1842 	 */
1843 	byte_count = page_count * sizeof(struct spdk_blob_md_page);
1844 	iov = &ctx->iov[0];
1845 	iovcnt = 0;
1846 	while (byte_count > 0) {
1847 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
1848 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
1849 		byte_count -= iov->iov_len;
1850 		orig_iovoff = 0;
1851 		orig_iov++;
1852 		iov++;
1853 		iovcnt++;
1854 	}
1855 
1856 	ctx->page_offset += page_count;
1857 	ctx->pages_done += page_count;
1858 	ctx->pages_remaining -= page_count;
1859 	iov = &ctx->iov[0];
1860 
1861 	if (ctx->read) {
1862 		spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, page_offset,
1863 				   page_count, _spdk_rw_iov_split_next, ctx);
1864 	} else {
1865 		spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, page_offset,
1866 				    page_count, _spdk_rw_iov_split_next, ctx);
1867 	}
1868 }
1869 
1870 static void
1871 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
1872 				 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
1873 				 spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
1874 {
1875 	struct spdk_bs_cpl	cpl;
1876 
1877 	assert(blob != NULL);
1878 
1879 	if (!read && blob->data_ro) {
1880 		cb_fn(cb_arg, -EPERM);
1881 		return;
1882 	}
1883 
1884 	if (length == 0) {
1885 		cb_fn(cb_arg, 0);
1886 		return;
1887 	}
1888 
1889 	if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
1890 		cb_fn(cb_arg, -EINVAL);
1891 		return;
1892 	}
1893 
1894 	/*
1895 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
1896 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
1897 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
1898 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
1899 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
1900 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
1901 	 *  but since this case happens very infrequently, any performance impact will be negligible.
1902 	 *
1903 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
1904 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
1905 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
1906 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
1907 	 */
1908 	if (spdk_likely(length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset))) {
1909 		uint32_t lba_count;
1910 		uint64_t lba;
1911 
1912 		_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
1913 
1914 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1915 		cpl.u.blob_basic.cb_fn = cb_fn;
1916 		cpl.u.blob_basic.cb_arg = cb_arg;
1917 
1918 		if (read) {
1919 			spdk_bs_sequence_t *seq;
1920 
1921 			seq = spdk_bs_sequence_start(_channel, &cpl);
1922 			if (!seq) {
1923 				cb_fn(cb_arg, -ENOMEM);
1924 				return;
1925 			}
1926 
1927 			if (_spdk_bs_page_is_allocated(blob, offset)) {
1928 				spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
1929 			} else {
1930 				spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
1931 							      _spdk_rw_iov_done, NULL);
1932 			}
1933 		} else {
1934 			if (_spdk_bs_page_is_allocated(blob, offset)) {
1935 				spdk_bs_sequence_t *seq;
1936 
1937 				seq = spdk_bs_sequence_start(_channel, &cpl);
1938 				if (!seq) {
1939 					cb_fn(cb_arg, -ENOMEM);
1940 					return;
1941 				}
1942 
1943 				spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
1944 			} else {
1945 				/* Queue this operation and allocate the cluster */
1946 				spdk_bs_user_op_t *op;
1947 
1948 				op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, length);
1949 				if (!op) {
1950 					cb_fn(cb_arg, -ENOMEM);
1951 					return;
1952 				}
1953 
1954 				_spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op);
1955 			}
1956 		}
1957 	} else {
1958 		struct rw_iov_ctx *ctx;
1959 
1960 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
1961 		if (ctx == NULL) {
1962 			cb_fn(cb_arg, -ENOMEM);
1963 			return;
1964 		}
1965 
1966 		ctx->blob = blob;
1967 		ctx->channel = _channel;
1968 		ctx->cb_fn = cb_fn;
1969 		ctx->cb_arg = cb_arg;
1970 		ctx->read = read;
1971 		ctx->orig_iov = iov;
1972 		ctx->iovcnt = iovcnt;
1973 		ctx->page_offset = offset;
1974 		ctx->pages_remaining = length;
1975 		ctx->pages_done = 0;
1976 
1977 		_spdk_rw_iov_split_next(ctx, 0);
1978 	}
1979 }
1980 
1981 static struct spdk_blob *
1982 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
1983 {
1984 	struct spdk_blob *blob;
1985 
1986 	TAILQ_FOREACH(blob, &bs->blobs, link) {
1987 		if (blob->id == blobid) {
1988 			return blob;
1989 		}
1990 	}
1991 
1992 	return NULL;
1993 }
1994 
1995 static int
1996 _spdk_bs_channel_create(void *io_device, void *ctx_buf)
1997 {
1998 	struct spdk_blob_store		*bs = io_device;
1999 	struct spdk_bs_channel		*channel = ctx_buf;
2000 	struct spdk_bs_dev		*dev;
2001 	uint32_t			max_ops = bs->max_channel_ops;
2002 	uint32_t			i;
2003 
2004 	dev = bs->dev;
2005 
2006 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
2007 	if (!channel->req_mem) {
2008 		return -1;
2009 	}
2010 
2011 	TAILQ_INIT(&channel->reqs);
2012 
2013 	for (i = 0; i < max_ops; i++) {
2014 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
2015 	}
2016 
2017 	channel->bs = bs;
2018 	channel->dev = dev;
2019 	channel->dev_channel = dev->create_channel(dev);
2020 
2021 	if (!channel->dev_channel) {
2022 		SPDK_ERRLOG("Failed to create device channel.\n");
2023 		free(channel->req_mem);
2024 		return -1;
2025 	}
2026 
2027 	TAILQ_INIT(&channel->need_cluster_alloc);
2028 
2029 	return 0;
2030 }
2031 
2032 static void
2033 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf)
2034 {
2035 	struct spdk_bs_channel *channel = ctx_buf;
2036 	spdk_bs_user_op_t *op;
2037 
2038 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
2039 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
2040 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
2041 		spdk_bs_user_op_abort(op);
2042 	}
2043 
2044 	free(channel->req_mem);
2045 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
2046 }
2047 
2048 static void
2049 _spdk_bs_dev_destroy(void *io_device)
2050 {
2051 	struct spdk_blob_store *bs = io_device;
2052 	struct spdk_blob	*blob, *blob_tmp;
2053 
2054 	bs->dev->destroy(bs->dev);
2055 
2056 	TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) {
2057 		TAILQ_REMOVE(&bs->blobs, blob, link);
2058 		_spdk_blob_free(blob);
2059 	}
2060 
2061 	pthread_mutex_destroy(&bs->used_clusters_mutex);
2062 
2063 	spdk_bit_array_free(&bs->used_blobids);
2064 	spdk_bit_array_free(&bs->used_md_pages);
2065 	spdk_bit_array_free(&bs->used_clusters);
2066 	/*
2067 	 * If this function is called for any reason except a successful unload,
2068 	 * the unload_cpl type will be NONE and this will be a nop.
2069 	 */
2070 	spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err);
2071 
2072 	free(bs);
2073 }
2074 
2075 static int
2076 _spdk_bs_blob_list_add(struct spdk_blob *blob)
2077 {
2078 	spdk_blob_id snapshot_id;
2079 	struct spdk_blob_list *snapshot_entry = NULL;
2080 	struct spdk_blob_list *clone_entry = NULL;
2081 
2082 	assert(blob != NULL);
2083 
2084 	snapshot_id = blob->parent_id;
2085 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2086 		return 0;
2087 	}
2088 
2089 	TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) {
2090 		if (snapshot_entry->id == snapshot_id) {
2091 			break;
2092 		}
2093 	}
2094 
2095 	if (snapshot_entry == NULL) {
2096 		/* Snapshot not found */
2097 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
2098 		if (snapshot_entry == NULL) {
2099 			return -ENOMEM;
2100 		}
2101 		snapshot_entry->id = snapshot_id;
2102 		TAILQ_INIT(&snapshot_entry->clones);
2103 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
2104 	} else {
2105 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2106 			if (clone_entry->id == blob->id) {
2107 				break;
2108 			}
2109 		}
2110 	}
2111 
2112 	if (clone_entry == NULL) {
2113 		/* Clone not found */
2114 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
2115 		if (clone_entry == NULL) {
2116 			return -ENOMEM;
2117 		}
2118 		clone_entry->id = blob->id;
2119 		TAILQ_INIT(&clone_entry->clones);
2120 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
2121 		snapshot_entry->clone_count++;
2122 	}
2123 
2124 	return 0;
2125 }
2126 
2127 static int
2128 _spdk_bs_blob_list_remove(struct spdk_blob *blob)
2129 {
2130 	struct spdk_blob_list *snapshot_entry = NULL;
2131 	struct spdk_blob_list *clone_entry = NULL;
2132 	spdk_blob_id snapshot_id;
2133 
2134 	assert(blob != NULL);
2135 
2136 	snapshot_id = blob->parent_id;
2137 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2138 		return 0;
2139 	}
2140 
2141 	TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) {
2142 		if (snapshot_entry->id == snapshot_id) {
2143 			break;
2144 		}
2145 	}
2146 
2147 	assert(snapshot_entry != NULL);
2148 
2149 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2150 		if (clone_entry->id == blob->id) {
2151 			break;
2152 		}
2153 	}
2154 
2155 	assert(clone_entry != NULL);
2156 
2157 	blob->parent_id = SPDK_BLOBID_INVALID;
2158 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2159 	free(clone_entry);
2160 
2161 	snapshot_entry->clone_count--;
2162 	if (snapshot_entry->clone_count == 0) {
2163 		/* Snapshot have no more clones */
2164 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
2165 		free(snapshot_entry);
2166 	}
2167 
2168 	return 0;
2169 }
2170 
2171 static int
2172 _spdk_bs_blob_list_free(struct spdk_blob_store *bs)
2173 {
2174 	struct spdk_blob_list *snapshot_entry;
2175 	struct spdk_blob_list *snapshot_entry_tmp;
2176 	struct spdk_blob_list *clone_entry;
2177 	struct spdk_blob_list *clone_entry_tmp;
2178 
2179 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
2180 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
2181 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2182 			free(clone_entry);
2183 		}
2184 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
2185 		free(snapshot_entry);
2186 	}
2187 
2188 	return 0;
2189 }
2190 
2191 static void
2192 _spdk_bs_free(struct spdk_blob_store *bs)
2193 {
2194 	_spdk_bs_blob_list_free(bs);
2195 
2196 	spdk_bs_unregister_md_thread(bs);
2197 	spdk_io_device_unregister(bs, _spdk_bs_dev_destroy);
2198 }
2199 
2200 void
2201 spdk_bs_opts_init(struct spdk_bs_opts *opts)
2202 {
2203 	opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
2204 	opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
2205 	opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
2206 	opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS;
2207 	memset(&opts->bstype, 0, sizeof(opts->bstype));
2208 	opts->iter_cb_fn = NULL;
2209 	opts->iter_cb_arg = NULL;
2210 }
2211 
2212 static int
2213 _spdk_bs_opts_verify(struct spdk_bs_opts *opts)
2214 {
2215 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
2216 	    opts->max_channel_ops == 0) {
2217 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
2218 		return -1;
2219 	}
2220 
2221 	return 0;
2222 }
2223 
2224 static struct spdk_blob_store *
2225 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts)
2226 {
2227 	struct spdk_blob_store	*bs;
2228 	uint64_t dev_size;
2229 	int rc;
2230 
2231 	dev_size = dev->blocklen * dev->blockcnt;
2232 	if (dev_size < opts->cluster_sz) {
2233 		/* Device size cannot be smaller than cluster size of blobstore */
2234 		SPDK_ERRLOG("Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
2235 			    dev_size, opts->cluster_sz);
2236 		return NULL;
2237 	}
2238 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
2239 		/* Cluster size cannot be smaller than page size */
2240 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
2241 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
2242 		return NULL;
2243 	}
2244 	bs = calloc(1, sizeof(struct spdk_blob_store));
2245 	if (!bs) {
2246 		return NULL;
2247 	}
2248 
2249 	TAILQ_INIT(&bs->blobs);
2250 	TAILQ_INIT(&bs->snapshots);
2251 	bs->dev = dev;
2252 	bs->md_thread = spdk_get_thread();
2253 	assert(bs->md_thread != NULL);
2254 
2255 	/*
2256 	 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an
2257 	 *  even multiple of the cluster size.
2258 	 */
2259 	bs->cluster_sz = opts->cluster_sz;
2260 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
2261 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
2262 	bs->num_free_clusters = bs->total_clusters;
2263 	bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
2264 	if (bs->used_clusters == NULL) {
2265 		free(bs);
2266 		return NULL;
2267 	}
2268 
2269 	bs->max_channel_ops = opts->max_channel_ops;
2270 	bs->super_blob = SPDK_BLOBID_INVALID;
2271 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
2272 
2273 	/* The metadata is assumed to be at least 1 page */
2274 	bs->used_md_pages = spdk_bit_array_create(1);
2275 	bs->used_blobids = spdk_bit_array_create(0);
2276 
2277 	pthread_mutex_init(&bs->used_clusters_mutex, NULL);
2278 
2279 	spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy,
2280 				sizeof(struct spdk_bs_channel));
2281 	rc = spdk_bs_register_md_thread(bs);
2282 	if (rc == -1) {
2283 		spdk_io_device_unregister(bs, NULL);
2284 		pthread_mutex_destroy(&bs->used_clusters_mutex);
2285 		spdk_bit_array_free(&bs->used_blobids);
2286 		spdk_bit_array_free(&bs->used_md_pages);
2287 		spdk_bit_array_free(&bs->used_clusters);
2288 		free(bs);
2289 		return NULL;
2290 	}
2291 
2292 	return bs;
2293 }
2294 
2295 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */
2296 
2297 struct spdk_bs_load_ctx {
2298 	struct spdk_blob_store		*bs;
2299 	struct spdk_bs_super_block	*super;
2300 
2301 	struct spdk_bs_md_mask		*mask;
2302 	bool				in_page_chain;
2303 	uint32_t			page_index;
2304 	uint32_t			cur_page;
2305 	struct spdk_blob_md_page	*page;
2306 	bool				is_load;
2307 
2308 	spdk_bs_sequence_t			*seq;
2309 	spdk_blob_op_with_handle_complete	iter_cb_fn;
2310 	void					*iter_cb_arg;
2311 };
2312 
2313 static void
2314 _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
2315 {
2316 	assert(bserrno != 0);
2317 
2318 	spdk_dma_free(ctx->super);
2319 	spdk_bs_sequence_finish(seq, bserrno);
2320 	/*
2321 	 * Only free the blobstore when a load fails.  If an unload fails (for some reason)
2322 	 *  we want to keep the blobstore in case the caller wants to try again.
2323 	 */
2324 	if (ctx->is_load) {
2325 		_spdk_bs_free(ctx->bs);
2326 	}
2327 	free(ctx);
2328 }
2329 
2330 static void
2331 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask)
2332 {
2333 	uint32_t i = 0;
2334 
2335 	while (true) {
2336 		i = spdk_bit_array_find_first_set(array, i);
2337 		if (i >= mask->length) {
2338 			break;
2339 		}
2340 		mask->mask[i / 8] |= 1U << (i % 8);
2341 		i++;
2342 	}
2343 }
2344 
2345 static void
2346 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2347 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2348 {
2349 	/* Update the values in the super block */
2350 	super->super_blob = bs->super_blob;
2351 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
2352 	super->crc = _spdk_blob_md_page_calc_crc(super);
2353 	spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0),
2354 				   _spdk_bs_byte_to_lba(bs, sizeof(*super)),
2355 				   cb_fn, cb_arg);
2356 }
2357 
2358 static void
2359 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
2360 {
2361 	struct spdk_bs_load_ctx	*ctx = arg;
2362 	uint64_t	mask_size, lba, lba_count;
2363 
2364 	/* Write out the used clusters mask */
2365 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
2366 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2367 	if (!ctx->mask) {
2368 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2369 		return;
2370 	}
2371 
2372 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
2373 	ctx->mask->length = ctx->bs->total_clusters;
2374 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters));
2375 
2376 	_spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask);
2377 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
2378 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
2379 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
2380 }
2381 
2382 static void
2383 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
2384 {
2385 	struct spdk_bs_load_ctx	*ctx = arg;
2386 	uint64_t	mask_size, lba, lba_count;
2387 
2388 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
2389 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2390 	if (!ctx->mask) {
2391 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2392 		return;
2393 	}
2394 
2395 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
2396 	ctx->mask->length = ctx->super->md_len;
2397 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
2398 
2399 	_spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask);
2400 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
2401 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
2402 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
2403 }
2404 
2405 static void
2406 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
2407 {
2408 	struct spdk_bs_load_ctx	*ctx = arg;
2409 	uint64_t	mask_size, lba, lba_count;
2410 
2411 	if (ctx->super->used_blobid_mask_len == 0) {
2412 		/*
2413 		 * This is a pre-v3 on-disk format where the blobid mask does not get
2414 		 *  written to disk.
2415 		 */
2416 		cb_fn(seq, arg, 0);
2417 		return;
2418 	}
2419 
2420 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
2421 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2422 	if (!ctx->mask) {
2423 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2424 		return;
2425 	}
2426 
2427 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
2428 	ctx->mask->length = ctx->super->md_len;
2429 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
2430 
2431 	_spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask);
2432 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
2433 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
2434 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
2435 }
2436 
2437 static void
2438 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
2439 {
2440 	struct spdk_bs_load_ctx *ctx = arg;
2441 
2442 	if (bserrno == 0) {
2443 		if (ctx->iter_cb_fn) {
2444 			ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
2445 		}
2446 		_spdk_bs_blob_list_add(blob);
2447 		spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx);
2448 		return;
2449 	}
2450 
2451 	if (bserrno == -ENOENT) {
2452 		bserrno = 0;
2453 	} else {
2454 		/*
2455 		 * This case needs to be looked at further.  Same problem
2456 		 *  exists with applications that rely on explicit blob
2457 		 *  iteration.  We should just skip the blob that failed
2458 		 *  to load and coontinue on to the next one.
2459 		 */
2460 		SPDK_ERRLOG("Error in iterating blobs\n");
2461 	}
2462 
2463 	ctx->iter_cb_fn = NULL;
2464 
2465 	spdk_dma_free(ctx->super);
2466 	spdk_dma_free(ctx->mask);
2467 	spdk_bs_sequence_finish(ctx->seq, bserrno);
2468 	free(ctx);
2469 }
2470 
2471 static void
2472 _spdk_bs_load_complete(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
2473 {
2474 	ctx->seq = seq;
2475 	spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx);
2476 }
2477 
2478 static void
2479 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2480 {
2481 	struct spdk_bs_load_ctx *ctx = cb_arg;
2482 	uint32_t i, j;
2483 	int rc;
2484 
2485 	/* The type must be correct */
2486 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
2487 
2488 	/* The length of the mask (in bits) must not be greater than
2489 	 * the length of the buffer (converted to bits) */
2490 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
2491 
2492 	/* The length of the mask must be exactly equal to the size
2493 	 * (in pages) of the metadata region */
2494 	assert(ctx->mask->length == ctx->super->md_len);
2495 
2496 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length);
2497 	if (rc < 0) {
2498 		spdk_dma_free(ctx->mask);
2499 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2500 		return;
2501 	}
2502 
2503 	for (i = 0; i < ctx->mask->length / 8; i++) {
2504 		uint8_t segment = ctx->mask->mask[i];
2505 		for (j = 0; segment; j++) {
2506 			if (segment & 1U) {
2507 				spdk_bit_array_set(ctx->bs->used_blobids, (i * 8) + j);
2508 			}
2509 			segment >>= 1U;
2510 		}
2511 	}
2512 
2513 	_spdk_bs_load_complete(seq, ctx, bserrno);
2514 }
2515 
2516 static void
2517 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2518 {
2519 	struct spdk_bs_load_ctx *ctx = cb_arg;
2520 	uint64_t		lba, lba_count, mask_size;
2521 	uint32_t		i, j;
2522 	int			rc;
2523 
2524 	/* The type must be correct */
2525 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2526 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
2527 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
2528 					     struct spdk_blob_md_page) * 8));
2529 	/* The length of the mask must be exactly equal to the total number of clusters */
2530 	assert(ctx->mask->length == ctx->bs->total_clusters);
2531 
2532 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
2533 	if (rc < 0) {
2534 		spdk_dma_free(ctx->mask);
2535 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2536 		return;
2537 	}
2538 
2539 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
2540 	for (i = 0; i < ctx->mask->length / 8; i++) {
2541 		uint8_t segment = ctx->mask->mask[i];
2542 		for (j = 0; segment && (j < 8); j++) {
2543 			if (segment & 1U) {
2544 				spdk_bit_array_set(ctx->bs->used_clusters, (i * 8) + j);
2545 				assert(ctx->bs->num_free_clusters > 0);
2546 				ctx->bs->num_free_clusters--;
2547 			}
2548 			segment >>= 1U;
2549 		}
2550 	}
2551 
2552 	spdk_dma_free(ctx->mask);
2553 
2554 	/* Read the used blobids mask */
2555 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
2556 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2557 	if (!ctx->mask) {
2558 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2559 		return;
2560 	}
2561 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
2562 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
2563 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
2564 				  _spdk_bs_load_used_blobids_cpl, ctx);
2565 }
2566 
2567 static void
2568 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2569 {
2570 	struct spdk_bs_load_ctx *ctx = cb_arg;
2571 	uint64_t		lba, lba_count, mask_size;
2572 	uint32_t		i, j;
2573 	int			rc;
2574 
2575 	/* The type must be correct */
2576 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
2577 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
2578 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
2579 				     8));
2580 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
2581 	assert(ctx->mask->length == ctx->super->md_len);
2582 
2583 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length);
2584 	if (rc < 0) {
2585 		spdk_dma_free(ctx->mask);
2586 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2587 		return;
2588 	}
2589 
2590 	for (i = 0; i < ctx->mask->length / 8; i++) {
2591 		uint8_t segment = ctx->mask->mask[i];
2592 		for (j = 0; segment && (j < 8); j++) {
2593 			if (segment & 1U) {
2594 				spdk_bit_array_set(ctx->bs->used_md_pages, (i * 8) + j);
2595 			}
2596 			segment >>= 1U;
2597 		}
2598 	}
2599 	spdk_dma_free(ctx->mask);
2600 
2601 	/* Read the used clusters mask */
2602 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
2603 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2604 	if (!ctx->mask) {
2605 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2606 		return;
2607 	}
2608 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
2609 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
2610 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
2611 				  _spdk_bs_load_used_clusters_cpl, ctx);
2612 }
2613 
2614 static void
2615 _spdk_bs_load_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2616 {
2617 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2618 	uint64_t lba, lba_count, mask_size;
2619 
2620 	/* Read the used pages mask */
2621 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
2622 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2623 	if (!ctx->mask) {
2624 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2625 		return;
2626 	}
2627 
2628 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
2629 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
2630 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
2631 				  _spdk_bs_load_used_pages_cpl, ctx);
2632 }
2633 
2634 static int
2635 _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_store *bs)
2636 {
2637 	struct spdk_blob_md_descriptor *desc;
2638 	size_t	cur_desc = 0;
2639 
2640 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
2641 	while (cur_desc < sizeof(page->descriptors)) {
2642 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
2643 			if (desc->length == 0) {
2644 				/* If padding and length are 0, this terminates the page */
2645 				break;
2646 			}
2647 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) {
2648 			struct spdk_blob_md_descriptor_extent	*desc_extent;
2649 			unsigned int				i, j;
2650 			unsigned int				cluster_count = 0;
2651 			uint32_t				cluster_idx;
2652 
2653 			desc_extent = (struct spdk_blob_md_descriptor_extent *)desc;
2654 
2655 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
2656 				for (j = 0; j < desc_extent->extents[i].length; j++) {
2657 					cluster_idx = desc_extent->extents[i].cluster_idx;
2658 					/*
2659 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
2660 					 * in the used cluster map.
2661 					 */
2662 					if (cluster_idx != 0) {
2663 						spdk_bit_array_set(bs->used_clusters, cluster_idx + j);
2664 						if (bs->num_free_clusters == 0) {
2665 							return -1;
2666 						}
2667 						bs->num_free_clusters--;
2668 					}
2669 					cluster_count++;
2670 				}
2671 			}
2672 			if (cluster_count == 0) {
2673 				return -1;
2674 			}
2675 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
2676 			/* Skip this item */
2677 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
2678 			/* Skip this item */
2679 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
2680 			/* Skip this item */
2681 		} else {
2682 			/* Error */
2683 			return -1;
2684 		}
2685 		/* Advance to the next descriptor */
2686 		cur_desc += sizeof(*desc) + desc->length;
2687 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
2688 			break;
2689 		}
2690 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
2691 	}
2692 	return 0;
2693 }
2694 
2695 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
2696 {
2697 	uint32_t crc;
2698 
2699 	crc = _spdk_blob_md_page_calc_crc(ctx->page);
2700 	if (crc != ctx->page->crc) {
2701 		return false;
2702 	}
2703 
2704 	if (_spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) {
2705 		return false;
2706 	}
2707 	return true;
2708 }
2709 
2710 static void
2711 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
2712 
2713 static void
2714 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2715 {
2716 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2717 
2718 	_spdk_bs_load_complete(seq, ctx, bserrno);
2719 }
2720 
2721 static void
2722 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2723 {
2724 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2725 
2726 	spdk_dma_free(ctx->mask);
2727 	ctx->mask = NULL;
2728 
2729 	_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl);
2730 }
2731 
2732 static void
2733 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2734 {
2735 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2736 
2737 	spdk_dma_free(ctx->mask);
2738 	ctx->mask = NULL;
2739 
2740 	_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl);
2741 }
2742 
2743 static void
2744 _spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2745 {
2746 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl);
2747 }
2748 
2749 static void
2750 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2751 {
2752 	struct spdk_bs_load_ctx *ctx = cb_arg;
2753 	uint64_t num_md_clusters;
2754 	uint64_t i;
2755 	uint32_t page_num;
2756 
2757 	if (bserrno != 0) {
2758 		_spdk_bs_load_ctx_fail(seq, ctx, bserrno);
2759 		return;
2760 	}
2761 
2762 	page_num = ctx->cur_page;
2763 	if (_spdk_bs_load_cur_md_page_valid(ctx) == true) {
2764 		if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) {
2765 			spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
2766 			if (ctx->page->sequence_num == 0) {
2767 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
2768 			}
2769 			if (_spdk_bs_load_replay_md_parse_page(ctx->page, ctx->bs)) {
2770 				_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2771 				return;
2772 			}
2773 			if (ctx->page->next != SPDK_INVALID_MD_PAGE) {
2774 				ctx->in_page_chain = true;
2775 				ctx->cur_page = ctx->page->next;
2776 				_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2777 				return;
2778 			}
2779 		}
2780 	}
2781 
2782 	ctx->in_page_chain = false;
2783 
2784 	do {
2785 		ctx->page_index++;
2786 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
2787 
2788 	if (ctx->page_index < ctx->super->md_len) {
2789 		ctx->cur_page = ctx->page_index;
2790 		_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2791 	} else {
2792 		/* Claim all of the clusters used by the metadata */
2793 		num_md_clusters = divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
2794 		for (i = 0; i < num_md_clusters; i++) {
2795 			_spdk_bs_claim_cluster(ctx->bs, i);
2796 		}
2797 		spdk_dma_free(ctx->page);
2798 		_spdk_bs_load_write_used_md(seq, ctx, bserrno);
2799 	}
2800 }
2801 
2802 static void
2803 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
2804 {
2805 	struct spdk_bs_load_ctx *ctx = cb_arg;
2806 	uint64_t lba;
2807 
2808 	assert(ctx->cur_page < ctx->super->md_len);
2809 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
2810 	spdk_bs_sequence_read_dev(seq, ctx->page, lba,
2811 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
2812 				  _spdk_bs_load_replay_md_cpl, ctx);
2813 }
2814 
2815 static void
2816 _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg)
2817 {
2818 	struct spdk_bs_load_ctx *ctx = cb_arg;
2819 
2820 	ctx->page_index = 0;
2821 	ctx->cur_page = 0;
2822 	ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE,
2823 				     SPDK_BS_PAGE_SIZE,
2824 				     NULL);
2825 	if (!ctx->page) {
2826 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2827 		return;
2828 	}
2829 	_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2830 }
2831 
2832 static void
2833 _spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2834 {
2835 	struct spdk_bs_load_ctx *ctx = cb_arg;
2836 	int		rc;
2837 
2838 	if (bserrno != 0) {
2839 		_spdk_bs_load_ctx_fail(seq, ctx, -EIO);
2840 		return;
2841 	}
2842 
2843 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
2844 	if (rc < 0) {
2845 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2846 		return;
2847 	}
2848 
2849 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
2850 	if (rc < 0) {
2851 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2852 		return;
2853 	}
2854 
2855 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
2856 	if (rc < 0) {
2857 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2858 		return;
2859 	}
2860 
2861 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
2862 	_spdk_bs_load_replay_md(seq, cb_arg);
2863 }
2864 
2865 static void
2866 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2867 {
2868 	struct spdk_bs_load_ctx *ctx = cb_arg;
2869 	uint32_t	crc;
2870 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
2871 
2872 	if (ctx->super->version > SPDK_BS_VERSION ||
2873 	    ctx->super->version < SPDK_BS_INITIAL_VERSION) {
2874 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2875 		return;
2876 	}
2877 
2878 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
2879 		   sizeof(ctx->super->signature)) != 0) {
2880 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2881 		return;
2882 	}
2883 
2884 	crc = _spdk_blob_md_page_calc_crc(ctx->super);
2885 	if (crc != ctx->super->crc) {
2886 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2887 		return;
2888 	}
2889 
2890 	if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
2891 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n");
2892 	} else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
2893 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n");
2894 	} else {
2895 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n");
2896 		SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
2897 		SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
2898 		_spdk_bs_load_ctx_fail(seq, ctx, -ENXIO);
2899 		return;
2900 	}
2901 
2902 	/* Parse the super block */
2903 	ctx->bs->cluster_sz = ctx->super->cluster_size;
2904 	ctx->bs->total_clusters = ctx->bs->dev->blockcnt / (ctx->bs->cluster_sz / ctx->bs->dev->blocklen);
2905 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
2906 	ctx->bs->md_start = ctx->super->md_start;
2907 	ctx->bs->md_len = ctx->super->md_len;
2908 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - divide_round_up(
2909 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
2910 	ctx->bs->super_blob = ctx->super->super_blob;
2911 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
2912 
2913 	if (ctx->super->clean == 0) {
2914 		_spdk_bs_recover(seq, ctx, 0);
2915 	} else if (ctx->super->used_blobid_mask_len == 0) {
2916 		/*
2917 		 * Metadata is clean, but this is an old metadata format without
2918 		 *  a blobid mask.  Clear the clean bit and then build the masks
2919 		 *  using _spdk_bs_recover.
2920 		 */
2921 		ctx->super->clean = 0;
2922 		_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_recover, ctx);
2923 	} else {
2924 		ctx->super->clean = 0;
2925 		_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_load_write_super_cpl, ctx);
2926 	}
2927 }
2928 
2929 void
2930 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
2931 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
2932 {
2933 	struct spdk_blob_store	*bs;
2934 	struct spdk_bs_cpl	cpl;
2935 	spdk_bs_sequence_t	*seq;
2936 	struct spdk_bs_load_ctx *ctx;
2937 	struct spdk_bs_opts	opts = {};
2938 
2939 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev);
2940 
2941 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
2942 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen);
2943 		dev->destroy(dev);
2944 		cb_fn(cb_arg, NULL, -EINVAL);
2945 		return;
2946 	}
2947 
2948 	if (o) {
2949 		opts = *o;
2950 	} else {
2951 		spdk_bs_opts_init(&opts);
2952 	}
2953 
2954 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
2955 		dev->destroy(dev);
2956 		cb_fn(cb_arg, NULL, -EINVAL);
2957 		return;
2958 	}
2959 
2960 	bs = _spdk_bs_alloc(dev, &opts);
2961 	if (!bs) {
2962 		dev->destroy(dev);
2963 		cb_fn(cb_arg, NULL, -ENOMEM);
2964 		return;
2965 	}
2966 
2967 	ctx = calloc(1, sizeof(*ctx));
2968 	if (!ctx) {
2969 		_spdk_bs_free(bs);
2970 		cb_fn(cb_arg, NULL, -ENOMEM);
2971 		return;
2972 	}
2973 
2974 	ctx->bs = bs;
2975 	ctx->is_load = true;
2976 	ctx->iter_cb_fn = opts.iter_cb_fn;
2977 	ctx->iter_cb_arg = opts.iter_cb_arg;
2978 
2979 	/* Allocate memory for the super block */
2980 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
2981 	if (!ctx->super) {
2982 		free(ctx);
2983 		_spdk_bs_free(bs);
2984 		return;
2985 	}
2986 
2987 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
2988 	cpl.u.bs_handle.cb_fn = cb_fn;
2989 	cpl.u.bs_handle.cb_arg = cb_arg;
2990 	cpl.u.bs_handle.bs = bs;
2991 
2992 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
2993 	if (!seq) {
2994 		spdk_dma_free(ctx->super);
2995 		free(ctx);
2996 		_spdk_bs_free(bs);
2997 		cb_fn(cb_arg, NULL, -ENOMEM);
2998 		return;
2999 	}
3000 
3001 	/* Read the super block */
3002 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
3003 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
3004 				  _spdk_bs_load_super_cpl, ctx);
3005 }
3006 
3007 /* END spdk_bs_load */
3008 
3009 /* START spdk_bs_init */
3010 
3011 struct spdk_bs_init_ctx {
3012 	struct spdk_blob_store		*bs;
3013 	struct spdk_bs_super_block	*super;
3014 };
3015 
3016 static void
3017 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3018 {
3019 	struct spdk_bs_init_ctx *ctx = cb_arg;
3020 
3021 	spdk_dma_free(ctx->super);
3022 	free(ctx);
3023 
3024 	spdk_bs_sequence_finish(seq, bserrno);
3025 }
3026 
3027 static void
3028 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3029 {
3030 	struct spdk_bs_init_ctx *ctx = cb_arg;
3031 
3032 	/* Write super block */
3033 	spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0),
3034 				   _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
3035 				   _spdk_bs_init_persist_super_cpl, ctx);
3036 }
3037 
3038 void
3039 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
3040 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
3041 {
3042 	struct spdk_bs_init_ctx *ctx;
3043 	struct spdk_blob_store	*bs;
3044 	struct spdk_bs_cpl	cpl;
3045 	spdk_bs_sequence_t	*seq;
3046 	spdk_bs_batch_t		*batch;
3047 	uint64_t		num_md_lba;
3048 	uint64_t		num_md_pages;
3049 	uint64_t		num_md_clusters;
3050 	uint32_t		i;
3051 	struct spdk_bs_opts	opts = {};
3052 	int			rc;
3053 
3054 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev);
3055 
3056 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
3057 		SPDK_ERRLOG("unsupported dev block length of %d\n",
3058 			    dev->blocklen);
3059 		dev->destroy(dev);
3060 		cb_fn(cb_arg, NULL, -EINVAL);
3061 		return;
3062 	}
3063 
3064 	if (o) {
3065 		opts = *o;
3066 	} else {
3067 		spdk_bs_opts_init(&opts);
3068 	}
3069 
3070 	if (_spdk_bs_opts_verify(&opts) != 0) {
3071 		dev->destroy(dev);
3072 		cb_fn(cb_arg, NULL, -EINVAL);
3073 		return;
3074 	}
3075 
3076 	bs = _spdk_bs_alloc(dev, &opts);
3077 	if (!bs) {
3078 		dev->destroy(dev);
3079 		cb_fn(cb_arg, NULL, -ENOMEM);
3080 		return;
3081 	}
3082 
3083 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
3084 		/* By default, allocate 1 page per cluster.
3085 		 * Technically, this over-allocates metadata
3086 		 * because more metadata will reduce the number
3087 		 * of usable clusters. This can be addressed with
3088 		 * more complex math in the future.
3089 		 */
3090 		bs->md_len = bs->total_clusters;
3091 	} else {
3092 		bs->md_len = opts.num_md_pages;
3093 	}
3094 
3095 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
3096 	if (rc < 0) {
3097 		_spdk_bs_free(bs);
3098 		cb_fn(cb_arg, NULL, -ENOMEM);
3099 		return;
3100 	}
3101 
3102 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
3103 	if (rc < 0) {
3104 		_spdk_bs_free(bs);
3105 		cb_fn(cb_arg, NULL, -ENOMEM);
3106 		return;
3107 	}
3108 
3109 	ctx = calloc(1, sizeof(*ctx));
3110 	if (!ctx) {
3111 		_spdk_bs_free(bs);
3112 		cb_fn(cb_arg, NULL, -ENOMEM);
3113 		return;
3114 	}
3115 
3116 	ctx->bs = bs;
3117 
3118 	/* Allocate memory for the super block */
3119 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3120 	if (!ctx->super) {
3121 		free(ctx);
3122 		_spdk_bs_free(bs);
3123 		return;
3124 	}
3125 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
3126 	       sizeof(ctx->super->signature));
3127 	ctx->super->version = SPDK_BS_VERSION;
3128 	ctx->super->length = sizeof(*ctx->super);
3129 	ctx->super->super_blob = bs->super_blob;
3130 	ctx->super->clean = 0;
3131 	ctx->super->cluster_size = bs->cluster_sz;
3132 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
3133 
3134 	/* Calculate how many pages the metadata consumes at the front
3135 	 * of the disk.
3136 	 */
3137 
3138 	/* The super block uses 1 page */
3139 	num_md_pages = 1;
3140 
3141 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
3142 	 * up to the nearest page, plus a header.
3143 	 */
3144 	ctx->super->used_page_mask_start = num_md_pages;
3145 	ctx->super->used_page_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
3146 					 divide_round_up(bs->md_len, 8),
3147 					 SPDK_BS_PAGE_SIZE);
3148 	num_md_pages += ctx->super->used_page_mask_len;
3149 
3150 	/* The used_clusters mask requires 1 bit per cluster, rounded
3151 	 * up to the nearest page, plus a header.
3152 	 */
3153 	ctx->super->used_cluster_mask_start = num_md_pages;
3154 	ctx->super->used_cluster_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
3155 					    divide_round_up(bs->total_clusters, 8),
3156 					    SPDK_BS_PAGE_SIZE);
3157 	num_md_pages += ctx->super->used_cluster_mask_len;
3158 
3159 	/* The used_blobids mask requires 1 bit per metadata page, rounded
3160 	 * up to the nearest page, plus a header.
3161 	 */
3162 	ctx->super->used_blobid_mask_start = num_md_pages;
3163 	ctx->super->used_blobid_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
3164 					   divide_round_up(bs->md_len, 8),
3165 					   SPDK_BS_PAGE_SIZE);
3166 	num_md_pages += ctx->super->used_blobid_mask_len;
3167 
3168 	/* The metadata region size was chosen above */
3169 	ctx->super->md_start = bs->md_start = num_md_pages;
3170 	ctx->super->md_len = bs->md_len;
3171 	num_md_pages += bs->md_len;
3172 
3173 	num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages);
3174 
3175 	ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super);
3176 
3177 	num_md_clusters = divide_round_up(num_md_pages, bs->pages_per_cluster);
3178 	if (num_md_clusters > bs->total_clusters) {
3179 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
3180 			    "please decrease number of pages reserved for metadata "
3181 			    "or increase cluster size.\n");
3182 		spdk_dma_free(ctx->super);
3183 		free(ctx);
3184 		_spdk_bs_free(bs);
3185 		cb_fn(cb_arg, NULL, -ENOMEM);
3186 		return;
3187 	}
3188 	/* Claim all of the clusters used by the metadata */
3189 	for (i = 0; i < num_md_clusters; i++) {
3190 		_spdk_bs_claim_cluster(bs, i);
3191 	}
3192 
3193 	bs->total_data_clusters = bs->num_free_clusters;
3194 
3195 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
3196 	cpl.u.bs_handle.cb_fn = cb_fn;
3197 	cpl.u.bs_handle.cb_arg = cb_arg;
3198 	cpl.u.bs_handle.bs = bs;
3199 
3200 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3201 	if (!seq) {
3202 		spdk_dma_free(ctx->super);
3203 		free(ctx);
3204 		_spdk_bs_free(bs);
3205 		cb_fn(cb_arg, NULL, -ENOMEM);
3206 		return;
3207 	}
3208 
3209 	batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx);
3210 
3211 	/* Clear metadata space */
3212 	spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
3213 	/* Trim data clusters */
3214 	spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
3215 
3216 	spdk_bs_batch_close(batch);
3217 }
3218 
3219 /* END spdk_bs_init */
3220 
3221 /* START spdk_bs_destroy */
3222 
3223 static void
3224 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3225 {
3226 	struct spdk_bs_init_ctx *ctx = cb_arg;
3227 	struct spdk_blob_store *bs = ctx->bs;
3228 
3229 	/*
3230 	 * We need to defer calling spdk_bs_call_cpl() until after
3231 	 * dev destruction, so tuck these away for later use.
3232 	 */
3233 	bs->unload_err = bserrno;
3234 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
3235 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
3236 
3237 	spdk_bs_sequence_finish(seq, bserrno);
3238 
3239 	_spdk_bs_free(bs);
3240 	free(ctx);
3241 }
3242 
3243 void
3244 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
3245 		void *cb_arg)
3246 {
3247 	struct spdk_bs_cpl	cpl;
3248 	spdk_bs_sequence_t	*seq;
3249 	struct spdk_bs_init_ctx *ctx;
3250 
3251 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n");
3252 
3253 	if (!TAILQ_EMPTY(&bs->blobs)) {
3254 		SPDK_ERRLOG("Blobstore still has open blobs\n");
3255 		cb_fn(cb_arg, -EBUSY);
3256 		return;
3257 	}
3258 
3259 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
3260 	cpl.u.bs_basic.cb_fn = cb_fn;
3261 	cpl.u.bs_basic.cb_arg = cb_arg;
3262 
3263 	ctx = calloc(1, sizeof(*ctx));
3264 	if (!ctx) {
3265 		cb_fn(cb_arg, -ENOMEM);
3266 		return;
3267 	}
3268 
3269 	ctx->bs = bs;
3270 
3271 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3272 	if (!seq) {
3273 		free(ctx);
3274 		cb_fn(cb_arg, -ENOMEM);
3275 		return;
3276 	}
3277 
3278 	/* Write zeroes to the super block */
3279 	spdk_bs_sequence_write_zeroes_dev(seq,
3280 					  _spdk_bs_page_to_lba(bs, 0),
3281 					  _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
3282 					  _spdk_bs_destroy_trim_cpl, ctx);
3283 }
3284 
3285 /* END spdk_bs_destroy */
3286 
3287 /* START spdk_bs_unload */
3288 
3289 static void
3290 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3291 {
3292 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3293 
3294 	spdk_dma_free(ctx->super);
3295 
3296 	/*
3297 	 * We need to defer calling spdk_bs_call_cpl() until after
3298 	 * dev destuction, so tuck these away for later use.
3299 	 */
3300 	ctx->bs->unload_err = bserrno;
3301 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
3302 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
3303 
3304 	spdk_bs_sequence_finish(seq, bserrno);
3305 
3306 	_spdk_bs_free(ctx->bs);
3307 	free(ctx);
3308 }
3309 
3310 static void
3311 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3312 {
3313 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3314 
3315 	spdk_dma_free(ctx->mask);
3316 	ctx->super->clean = 1;
3317 
3318 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
3319 }
3320 
3321 static void
3322 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3323 {
3324 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3325 
3326 	spdk_dma_free(ctx->mask);
3327 	ctx->mask = NULL;
3328 
3329 	_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl);
3330 }
3331 
3332 static void
3333 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3334 {
3335 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3336 
3337 	spdk_dma_free(ctx->mask);
3338 	ctx->mask = NULL;
3339 
3340 	_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl);
3341 }
3342 
3343 static void
3344 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3345 {
3346 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl);
3347 }
3348 
3349 void
3350 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
3351 {
3352 	struct spdk_bs_cpl	cpl;
3353 	spdk_bs_sequence_t	*seq;
3354 	struct spdk_bs_load_ctx *ctx;
3355 
3356 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n");
3357 
3358 	if (!TAILQ_EMPTY(&bs->blobs)) {
3359 		SPDK_ERRLOG("Blobstore still has open blobs\n");
3360 		cb_fn(cb_arg, -EBUSY);
3361 		return;
3362 	}
3363 
3364 	ctx = calloc(1, sizeof(*ctx));
3365 	if (!ctx) {
3366 		cb_fn(cb_arg, -ENOMEM);
3367 		return;
3368 	}
3369 
3370 	ctx->bs = bs;
3371 	ctx->is_load = false;
3372 
3373 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3374 	if (!ctx->super) {
3375 		free(ctx);
3376 		cb_fn(cb_arg, -ENOMEM);
3377 		return;
3378 	}
3379 
3380 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
3381 	cpl.u.bs_basic.cb_fn = cb_fn;
3382 	cpl.u.bs_basic.cb_arg = cb_arg;
3383 
3384 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3385 	if (!seq) {
3386 		spdk_dma_free(ctx->super);
3387 		free(ctx);
3388 		cb_fn(cb_arg, -ENOMEM);
3389 		return;
3390 	}
3391 
3392 	/* Read super block */
3393 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
3394 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
3395 				  _spdk_bs_unload_read_super_cpl, ctx);
3396 }
3397 
3398 /* END spdk_bs_unload */
3399 
3400 /* START spdk_bs_set_super */
3401 
3402 struct spdk_bs_set_super_ctx {
3403 	struct spdk_blob_store		*bs;
3404 	struct spdk_bs_super_block	*super;
3405 };
3406 
3407 static void
3408 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3409 {
3410 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
3411 
3412 	if (bserrno != 0) {
3413 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
3414 	}
3415 
3416 	spdk_dma_free(ctx->super);
3417 
3418 	spdk_bs_sequence_finish(seq, bserrno);
3419 
3420 	free(ctx);
3421 }
3422 
3423 static void
3424 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3425 {
3426 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
3427 
3428 	if (bserrno != 0) {
3429 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
3430 		spdk_dma_free(ctx->super);
3431 		spdk_bs_sequence_finish(seq, bserrno);
3432 		free(ctx);
3433 		return;
3434 	}
3435 
3436 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx);
3437 }
3438 
3439 void
3440 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
3441 		  spdk_bs_op_complete cb_fn, void *cb_arg)
3442 {
3443 	struct spdk_bs_cpl		cpl;
3444 	spdk_bs_sequence_t		*seq;
3445 	struct spdk_bs_set_super_ctx	*ctx;
3446 
3447 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n");
3448 
3449 	ctx = calloc(1, sizeof(*ctx));
3450 	if (!ctx) {
3451 		cb_fn(cb_arg, -ENOMEM);
3452 		return;
3453 	}
3454 
3455 	ctx->bs = bs;
3456 
3457 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3458 	if (!ctx->super) {
3459 		free(ctx);
3460 		cb_fn(cb_arg, -ENOMEM);
3461 		return;
3462 	}
3463 
3464 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
3465 	cpl.u.bs_basic.cb_fn = cb_fn;
3466 	cpl.u.bs_basic.cb_arg = cb_arg;
3467 
3468 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3469 	if (!seq) {
3470 		spdk_dma_free(ctx->super);
3471 		free(ctx);
3472 		cb_fn(cb_arg, -ENOMEM);
3473 		return;
3474 	}
3475 
3476 	bs->super_blob = blobid;
3477 
3478 	/* Read super block */
3479 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
3480 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
3481 				  _spdk_bs_set_super_read_cpl, ctx);
3482 }
3483 
3484 /* END spdk_bs_set_super */
3485 
3486 void
3487 spdk_bs_get_super(struct spdk_blob_store *bs,
3488 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3489 {
3490 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
3491 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
3492 	} else {
3493 		cb_fn(cb_arg, bs->super_blob, 0);
3494 	}
3495 }
3496 
3497 uint64_t
3498 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
3499 {
3500 	return bs->cluster_sz;
3501 }
3502 
3503 uint64_t
3504 spdk_bs_get_page_size(struct spdk_blob_store *bs)
3505 {
3506 	return SPDK_BS_PAGE_SIZE;
3507 }
3508 
3509 uint64_t
3510 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
3511 {
3512 	return bs->num_free_clusters;
3513 }
3514 
3515 uint64_t
3516 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
3517 {
3518 	return bs->total_data_clusters;
3519 }
3520 
3521 static int
3522 spdk_bs_register_md_thread(struct spdk_blob_store *bs)
3523 {
3524 	bs->md_channel = spdk_get_io_channel(bs);
3525 	if (!bs->md_channel) {
3526 		SPDK_ERRLOG("Failed to get IO channel.\n");
3527 		return -1;
3528 	}
3529 
3530 	return 0;
3531 }
3532 
3533 static int
3534 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
3535 {
3536 	spdk_put_io_channel(bs->md_channel);
3537 
3538 	return 0;
3539 }
3540 
3541 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob)
3542 {
3543 	assert(blob != NULL);
3544 
3545 	return blob->id;
3546 }
3547 
3548 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob)
3549 {
3550 	assert(blob != NULL);
3551 
3552 	return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters);
3553 }
3554 
3555 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
3556 {
3557 	assert(blob != NULL);
3558 
3559 	return blob->active.num_clusters;
3560 }
3561 
3562 /* START spdk_bs_create_blob */
3563 
3564 static void
3565 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3566 {
3567 	struct spdk_blob *blob = cb_arg;
3568 
3569 	_spdk_blob_free(blob);
3570 
3571 	spdk_bs_sequence_finish(seq, bserrno);
3572 }
3573 
3574 static int
3575 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
3576 		      bool internal)
3577 {
3578 	uint64_t i;
3579 	size_t value_len = 0;
3580 	int rc;
3581 	const void *value = NULL;
3582 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
3583 		return -EINVAL;
3584 	}
3585 	for (i = 0; i < xattrs->count; i++) {
3586 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
3587 		if (value == NULL || value_len == 0) {
3588 			return -EINVAL;
3589 		}
3590 		rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
3591 		if (rc < 0) {
3592 			return rc;
3593 		}
3594 	}
3595 	return 0;
3596 }
3597 
3598 static void
3599 _spdk_blob_set_thin_provision(struct spdk_blob *blob)
3600 {
3601 	_spdk_blob_verify_md_op(blob);
3602 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
3603 	blob->state = SPDK_BLOB_STATE_DIRTY;
3604 }
3605 
3606 static void
3607 _spdk_bs_create_blob(struct spdk_blob_store *bs,
3608 		     const struct spdk_blob_opts *opts,
3609 		     const struct spdk_blob_xattr_opts *internal_xattrs,
3610 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3611 {
3612 	struct spdk_blob	*blob;
3613 	uint32_t		page_idx;
3614 	struct spdk_bs_cpl	cpl;
3615 	struct spdk_blob_opts	opts_default;
3616 	struct spdk_blob_xattr_opts internal_xattrs_default;
3617 	spdk_bs_sequence_t	*seq;
3618 	spdk_blob_id		id;
3619 	int rc;
3620 
3621 	assert(spdk_get_thread() == bs->md_thread);
3622 
3623 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
3624 	if (page_idx >= spdk_bit_array_capacity(bs->used_md_pages)) {
3625 		cb_fn(cb_arg, 0, -ENOMEM);
3626 		return;
3627 	}
3628 	spdk_bit_array_set(bs->used_blobids, page_idx);
3629 	spdk_bit_array_set(bs->used_md_pages, page_idx);
3630 
3631 	id = _spdk_bs_page_to_blobid(page_idx);
3632 
3633 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx);
3634 
3635 	blob = _spdk_blob_alloc(bs, id);
3636 	if (!blob) {
3637 		cb_fn(cb_arg, 0, -ENOMEM);
3638 		return;
3639 	}
3640 
3641 	if (!opts) {
3642 		spdk_blob_opts_init(&opts_default);
3643 		opts = &opts_default;
3644 	}
3645 	if (!internal_xattrs) {
3646 		_spdk_blob_xattrs_init(&internal_xattrs_default);
3647 		internal_xattrs = &internal_xattrs_default;
3648 	}
3649 
3650 	rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false);
3651 	if (rc < 0) {
3652 		_spdk_blob_free(blob);
3653 		cb_fn(cb_arg, 0, rc);
3654 		return;
3655 	}
3656 
3657 	rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true);
3658 	if (rc < 0) {
3659 		_spdk_blob_free(blob);
3660 		cb_fn(cb_arg, 0, rc);
3661 		return;
3662 	}
3663 
3664 	if (opts->thin_provision) {
3665 		_spdk_blob_set_thin_provision(blob);
3666 	}
3667 
3668 	rc = _spdk_blob_resize(blob, opts->num_clusters);
3669 	if (rc < 0) {
3670 		_spdk_blob_free(blob);
3671 		cb_fn(cb_arg, 0, rc);
3672 		return;
3673 	}
3674 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
3675 	cpl.u.blobid.cb_fn = cb_fn;
3676 	cpl.u.blobid.cb_arg = cb_arg;
3677 	cpl.u.blobid.blobid = blob->id;
3678 
3679 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3680 	if (!seq) {
3681 		_spdk_blob_free(blob);
3682 		cb_fn(cb_arg, 0, -ENOMEM);
3683 		return;
3684 	}
3685 
3686 	_spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob);
3687 }
3688 
3689 void spdk_bs_create_blob(struct spdk_blob_store *bs,
3690 			 spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3691 {
3692 	_spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
3693 }
3694 
3695 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
3696 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3697 {
3698 	_spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
3699 }
3700 
3701 /* END spdk_bs_create_blob */
3702 
3703 /* START blob_cleanup */
3704 
3705 struct spdk_clone_snapshot_ctx {
3706 	struct spdk_bs_cpl      cpl;
3707 	int bserrno;
3708 
3709 	struct {
3710 		spdk_blob_id id;
3711 		struct spdk_blob *blob;
3712 	} original;
3713 	struct {
3714 		spdk_blob_id id;
3715 		struct spdk_blob *blob;
3716 	} new;
3717 
3718 	/* xattrs specified for snapshot/clones only. They have no impact on
3719 	 * the original blobs xattrs. */
3720 	const struct spdk_blob_xattr_opts *xattrs;
3721 };
3722 
3723 static void
3724 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
3725 {
3726 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
3727 	struct spdk_bs_cpl *cpl = &ctx->cpl;
3728 
3729 	if (bserrno != 0) {
3730 		if (ctx->bserrno != 0) {
3731 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
3732 		} else {
3733 			ctx->bserrno = bserrno;
3734 		}
3735 	}
3736 
3737 	switch (cpl->type) {
3738 	case SPDK_BS_CPL_TYPE_BLOBID:
3739 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
3740 		break;
3741 	default:
3742 		SPDK_UNREACHABLE();
3743 		break;
3744 	}
3745 
3746 	free(ctx);
3747 }
3748 
3749 static void
3750 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
3751 {
3752 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3753 	struct spdk_blob *origblob = ctx->original.blob;
3754 
3755 	if (bserrno != 0) {
3756 		if (ctx->bserrno != 0) {
3757 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
3758 		} else {
3759 			ctx->bserrno = bserrno;
3760 		}
3761 	}
3762 
3763 	ctx->original.id = origblob->id;
3764 	spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
3765 }
3766 
3767 static void
3768 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno)
3769 {
3770 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3771 	struct spdk_blob *newblob = ctx->new.blob;
3772 
3773 	if (bserrno != 0) {
3774 		if (ctx->bserrno != 0) {
3775 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
3776 		} else {
3777 			ctx->bserrno = bserrno;
3778 		}
3779 	}
3780 
3781 	ctx->new.id = newblob->id;
3782 	spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
3783 }
3784 
3785 /* END blob_cleanup */
3786 
3787 /* START spdk_bs_create_snapshot */
3788 
3789 static void
3790 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
3791 {
3792 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3793 	struct spdk_blob *newblob = ctx->new.blob;
3794 
3795 	if (bserrno != 0) {
3796 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
3797 		return;
3798 	}
3799 
3800 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
3801 	bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
3802 	if (bserrno != 0) {
3803 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
3804 		return;
3805 	}
3806 
3807 	_spdk_bs_blob_list_add(ctx->original.blob);
3808 
3809 	spdk_blob_set_read_only(newblob);
3810 
3811 	/* sync snapshot metadata */
3812 	spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, cb_arg);
3813 }
3814 
3815 static void
3816 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
3817 {
3818 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3819 	struct spdk_blob *origblob = ctx->original.blob;
3820 	struct spdk_blob *newblob = ctx->new.blob;
3821 
3822 	if (bserrno != 0) {
3823 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
3824 		return;
3825 	}
3826 
3827 	/* Set internal xattr for snapshot id */
3828 	bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
3829 	if (bserrno != 0) {
3830 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
3831 		return;
3832 	}
3833 	origblob->parent_id = newblob->id;
3834 
3835 	/* Create new back_bs_dev for snapshot */
3836 	origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob);
3837 	if (origblob->back_bs_dev == NULL) {
3838 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
3839 		return;
3840 	}
3841 
3842 	/* set clone blob as thin provisioned */
3843 	_spdk_blob_set_thin_provision(origblob);
3844 
3845 	_spdk_bs_blob_list_add(newblob);
3846 
3847 	/* Zero out origblob cluster map */
3848 	memset(origblob->active.clusters, 0,
3849 	       origblob->active.num_clusters * sizeof(origblob->active.clusters));
3850 
3851 	/* sync clone metadata */
3852 	spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx);
3853 }
3854 
3855 static void
3856 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
3857 {
3858 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3859 	struct spdk_blob *origblob = ctx->original.blob;
3860 	struct spdk_blob *newblob = _blob;
3861 
3862 	if (bserrno != 0) {
3863 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
3864 		return;
3865 	}
3866 
3867 	ctx->new.blob = newblob;
3868 
3869 	/* set new back_bs_dev for snapshot */
3870 	newblob->back_bs_dev = origblob->back_bs_dev;
3871 	/* Set invalid flags from origblob */
3872 	newblob->invalid_flags = origblob->invalid_flags;
3873 
3874 	/* Copy cluster map to snapshot */
3875 	memcpy(newblob->active.clusters, origblob->active.clusters,
3876 	       origblob->active.num_clusters * sizeof(origblob->active.clusters));
3877 
3878 	/* sync snapshot metadata */
3879 	spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx);
3880 }
3881 
3882 static void
3883 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
3884 {
3885 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3886 	struct spdk_blob *origblob = ctx->original.blob;
3887 
3888 	if (bserrno != 0) {
3889 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
3890 		return;
3891 	}
3892 
3893 	ctx->new.id = blobid;
3894 	ctx->cpl.u.blobid.blobid = blobid;
3895 
3896 	spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx);
3897 }
3898 
3899 
3900 static void
3901 _spdk_bs_xattr_snapshot(void *arg, const char *name,
3902 			const void **value, size_t *value_len)
3903 {
3904 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
3905 
3906 	struct spdk_blob *blob = (struct spdk_blob *)arg;
3907 	*value = &blob->id;
3908 	*value_len = sizeof(blob->id);
3909 }
3910 
3911 static void
3912 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
3913 {
3914 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3915 	struct spdk_blob_opts opts;
3916 	struct spdk_blob_xattr_opts internal_xattrs;
3917 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
3918 
3919 	if (bserrno != 0) {
3920 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
3921 		return;
3922 	}
3923 
3924 	ctx->original.blob = _blob;
3925 
3926 	if (_blob->data_ro || _blob->md_ro) {
3927 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n",
3928 			      _blob->id);
3929 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
3930 		return;
3931 	}
3932 
3933 	spdk_blob_opts_init(&opts);
3934 	_spdk_blob_xattrs_init(&internal_xattrs);
3935 
3936 	/* Change the size of new blob to the same as in original blob,
3937 	 * but do not allocate clusters */
3938 	opts.thin_provision = true;
3939 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
3940 
3941 	/* If there are any xattrs specified for snapshot, set them now */
3942 	if (ctx->xattrs) {
3943 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
3944 	}
3945 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
3946 	internal_xattrs.count = 1;
3947 	internal_xattrs.ctx = _blob;
3948 	internal_xattrs.names = xattrs_names;
3949 	internal_xattrs.get_value = _spdk_bs_xattr_snapshot;
3950 
3951 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
3952 			     _spdk_bs_snapshot_newblob_create_cpl, ctx);
3953 }
3954 
3955 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
3956 			     const struct spdk_blob_xattr_opts *snapshot_xattrs,
3957 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3958 {
3959 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
3960 
3961 	if (!ctx) {
3962 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
3963 		return;
3964 	}
3965 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
3966 	ctx->cpl.u.blobid.cb_fn = cb_fn;
3967 	ctx->cpl.u.blobid.cb_arg = cb_arg;
3968 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
3969 	ctx->bserrno = 0;
3970 	ctx->original.id = blobid;
3971 	ctx->xattrs = snapshot_xattrs;
3972 
3973 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx);
3974 }
3975 /* END spdk_bs_create_snapshot */
3976 
3977 /* START spdk_bs_create_clone */
3978 
3979 static void
3980 _spdk_bs_xattr_clone(void *arg, const char *name,
3981 		     const void **value, size_t *value_len)
3982 {
3983 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
3984 
3985 	struct spdk_blob *blob = (struct spdk_blob *)arg;
3986 	*value = &blob->id;
3987 	*value_len = sizeof(blob->id);
3988 }
3989 
3990 static void
3991 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
3992 {
3993 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3994 	struct spdk_blob *clone = _blob;
3995 
3996 	ctx->new.blob = clone;
3997 	_spdk_bs_blob_list_add(clone);
3998 
3999 	spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
4000 }
4001 
4002 static void
4003 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
4004 {
4005 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4006 
4007 	ctx->cpl.u.blobid.blobid = blobid;
4008 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx);
4009 }
4010 
4011 static void
4012 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4013 {
4014 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4015 	struct spdk_blob_opts		opts;
4016 	struct spdk_blob_xattr_opts internal_xattrs;
4017 	char *xattr_names[] = { BLOB_SNAPSHOT };
4018 
4019 	if (bserrno != 0) {
4020 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
4021 		return;
4022 	}
4023 
4024 	ctx->original.blob = _blob;
4025 
4026 	if (!_blob->data_ro || !_blob->md_ro) {
4027 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n");
4028 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
4029 		return;
4030 	}
4031 
4032 	spdk_blob_opts_init(&opts);
4033 	_spdk_blob_xattrs_init(&internal_xattrs);
4034 
4035 	opts.thin_provision = true;
4036 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
4037 	if (ctx->xattrs) {
4038 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
4039 	}
4040 
4041 	/* Set internal xattr BLOB_SNAPSHOT */
4042 	internal_xattrs.count = 1;
4043 	internal_xattrs.ctx = _blob;
4044 	internal_xattrs.names = xattr_names;
4045 	internal_xattrs.get_value = _spdk_bs_xattr_clone;
4046 
4047 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
4048 			     _spdk_bs_clone_newblob_create_cpl, ctx);
4049 }
4050 
4051 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
4052 			  const struct spdk_blob_xattr_opts *clone_xattrs,
4053 			  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
4054 {
4055 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
4056 
4057 	if (!ctx) {
4058 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
4059 		return;
4060 	}
4061 
4062 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
4063 	ctx->cpl.u.blobid.cb_fn = cb_fn;
4064 	ctx->cpl.u.blobid.cb_arg = cb_arg;
4065 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
4066 	ctx->bserrno = 0;
4067 	ctx->xattrs = clone_xattrs;
4068 	ctx->original.id = blobid;
4069 
4070 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx);
4071 }
4072 
4073 /* END spdk_bs_create_clone */
4074 
4075 /* START spdk_blob_resize */
4076 void
4077 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
4078 {
4079 	int			rc;
4080 
4081 	_spdk_blob_verify_md_op(blob);
4082 
4083 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz);
4084 
4085 	if (blob->md_ro) {
4086 		cb_fn(cb_arg, -EPERM);
4087 		return;
4088 	}
4089 
4090 	if (sz == blob->active.num_clusters) {
4091 		cb_fn(cb_arg, 0);
4092 		return;
4093 	}
4094 
4095 	rc = _spdk_blob_resize(blob, sz);
4096 	cb_fn(cb_arg, rc);
4097 }
4098 
4099 /* END spdk_blob_resize */
4100 
4101 
4102 /* START spdk_bs_delete_blob */
4103 
4104 static void
4105 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
4106 {
4107 	spdk_bs_sequence_t *seq = cb_arg;
4108 
4109 	spdk_bs_sequence_finish(seq, bserrno);
4110 }
4111 
4112 static void
4113 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4114 {
4115 	struct spdk_blob *blob = cb_arg;
4116 
4117 	if (bserrno != 0) {
4118 		/*
4119 		 * We already removed this blob from the blobstore tailq, so
4120 		 *  we need to free it here since this is the last reference
4121 		 *  to it.
4122 		 */
4123 		_spdk_blob_free(blob);
4124 		_spdk_bs_delete_close_cpl(seq, bserrno);
4125 		return;
4126 	}
4127 
4128 	/*
4129 	 * This will immediately decrement the ref_count and call
4130 	 *  the completion routine since the metadata state is clean.
4131 	 *  By calling spdk_blob_close, we reduce the number of call
4132 	 *  points into code that touches the blob->open_ref count
4133 	 *  and the blobstore's blob list.
4134 	 */
4135 	spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq);
4136 }
4137 
4138 static void
4139 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
4140 {
4141 	spdk_bs_sequence_t *seq = cb_arg;
4142 	uint32_t page_num;
4143 
4144 	if (bserrno != 0) {
4145 		spdk_bs_sequence_finish(seq, bserrno);
4146 		return;
4147 	}
4148 
4149 	_spdk_blob_verify_md_op(blob);
4150 
4151 	if (blob->open_ref > 1) {
4152 		/*
4153 		 * Someone has this blob open (besides this delete context).
4154 		 *  Decrement the ref count directly and return -EBUSY.
4155 		 */
4156 		blob->open_ref--;
4157 		spdk_bs_sequence_finish(seq, -EBUSY);
4158 		return;
4159 	}
4160 
4161 	bserrno = _spdk_bs_blob_list_remove(blob);
4162 	if (bserrno != 0) {
4163 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Remove blob #%" PRIu64 " from a list\n", blob->id);
4164 		spdk_bs_sequence_finish(seq, bserrno);
4165 		return;
4166 	}
4167 
4168 	/*
4169 	 * Remove the blob from the blob_store list now, to ensure it does not
4170 	 *  get returned after this point by _spdk_blob_lookup().
4171 	 */
4172 	TAILQ_REMOVE(&blob->bs->blobs, blob, link);
4173 	page_num = _spdk_bs_blobid_to_page(blob->id);
4174 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
4175 	blob->state = SPDK_BLOB_STATE_DIRTY;
4176 	blob->active.num_pages = 0;
4177 	_spdk_blob_resize(blob, 0);
4178 
4179 	_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob);
4180 }
4181 
4182 void
4183 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
4184 		    spdk_blob_op_complete cb_fn, void *cb_arg)
4185 {
4186 	struct spdk_bs_cpl	cpl;
4187 	spdk_bs_sequence_t	*seq;
4188 	struct spdk_blob_list	*snapshot_entry = NULL;
4189 
4190 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid);
4191 
4192 	assert(spdk_get_thread() == bs->md_thread);
4193 
4194 	/* Check if this is a snapshot with clones */
4195 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
4196 		if (snapshot_entry->id == blobid) {
4197 			break;
4198 		}
4199 	}
4200 	if (snapshot_entry != NULL) {
4201 		/* If snapshot have clones, we cannot remove it */
4202 		if (!TAILQ_EMPTY(&snapshot_entry->clones)) {
4203 			SPDK_ERRLOG("Cannot remove snapshot with clones\n");
4204 			cb_fn(cb_arg, -EBUSY);
4205 			return;
4206 		}
4207 	}
4208 
4209 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4210 	cpl.u.blob_basic.cb_fn = cb_fn;
4211 	cpl.u.blob_basic.cb_arg = cb_arg;
4212 
4213 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4214 	if (!seq) {
4215 		cb_fn(cb_arg, -ENOMEM);
4216 		return;
4217 	}
4218 
4219 	spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq);
4220 }
4221 
4222 /* END spdk_bs_delete_blob */
4223 
4224 /* START spdk_bs_open_blob */
4225 
4226 static void
4227 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4228 {
4229 	struct spdk_blob *blob = cb_arg;
4230 
4231 	/* If the blob have crc error, we just return NULL. */
4232 	if (blob == NULL) {
4233 		seq->cpl.u.blob_handle.blob = NULL;
4234 		spdk_bs_sequence_finish(seq, bserrno);
4235 		return;
4236 	}
4237 
4238 	blob->open_ref++;
4239 
4240 	TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link);
4241 
4242 	spdk_bs_sequence_finish(seq, bserrno);
4243 }
4244 
4245 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
4246 		       spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
4247 {
4248 	struct spdk_blob		*blob;
4249 	struct spdk_bs_cpl		cpl;
4250 	spdk_bs_sequence_t		*seq;
4251 	uint32_t			page_num;
4252 
4253 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid);
4254 	assert(spdk_get_thread() == bs->md_thread);
4255 
4256 	page_num = _spdk_bs_blobid_to_page(blobid);
4257 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
4258 		/* Invalid blobid */
4259 		cb_fn(cb_arg, NULL, -ENOENT);
4260 		return;
4261 	}
4262 
4263 	blob = _spdk_blob_lookup(bs, blobid);
4264 	if (blob) {
4265 		blob->open_ref++;
4266 		cb_fn(cb_arg, blob, 0);
4267 		return;
4268 	}
4269 
4270 	blob = _spdk_blob_alloc(bs, blobid);
4271 	if (!blob) {
4272 		cb_fn(cb_arg, NULL, -ENOMEM);
4273 		return;
4274 	}
4275 
4276 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
4277 	cpl.u.blob_handle.cb_fn = cb_fn;
4278 	cpl.u.blob_handle.cb_arg = cb_arg;
4279 	cpl.u.blob_handle.blob = blob;
4280 
4281 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4282 	if (!seq) {
4283 		_spdk_blob_free(blob);
4284 		cb_fn(cb_arg, NULL, -ENOMEM);
4285 		return;
4286 	}
4287 
4288 	_spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob);
4289 }
4290 /* END spdk_bs_open_blob */
4291 
4292 /* START spdk_blob_set_read_only */
4293 int spdk_blob_set_read_only(struct spdk_blob *blob)
4294 {
4295 	_spdk_blob_verify_md_op(blob);
4296 
4297 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
4298 
4299 	blob->state = SPDK_BLOB_STATE_DIRTY;
4300 	return 0;
4301 }
4302 /* END spdk_blob_set_read_only */
4303 
4304 /* START spdk_blob_sync_md */
4305 
4306 static void
4307 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4308 {
4309 	struct spdk_blob *blob = cb_arg;
4310 
4311 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
4312 		blob->data_ro = true;
4313 		blob->md_ro = true;
4314 	}
4315 
4316 	spdk_bs_sequence_finish(seq, bserrno);
4317 }
4318 
4319 static void
4320 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
4321 {
4322 	struct spdk_bs_cpl	cpl;
4323 	spdk_bs_sequence_t	*seq;
4324 
4325 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4326 	cpl.u.blob_basic.cb_fn = cb_fn;
4327 	cpl.u.blob_basic.cb_arg = cb_arg;
4328 
4329 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
4330 	if (!seq) {
4331 		cb_fn(cb_arg, -ENOMEM);
4332 		return;
4333 	}
4334 
4335 	_spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob);
4336 }
4337 
4338 void
4339 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
4340 {
4341 	_spdk_blob_verify_md_op(blob);
4342 
4343 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id);
4344 
4345 	if (blob->md_ro) {
4346 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
4347 		cb_fn(cb_arg, 0);
4348 		return;
4349 	}
4350 
4351 	_spdk_blob_sync_md(blob, cb_fn, cb_arg);
4352 }
4353 
4354 /* END spdk_blob_sync_md */
4355 
4356 struct spdk_blob_insert_cluster_ctx {
4357 	struct spdk_thread	*thread;
4358 	struct spdk_blob	*blob;
4359 	uint32_t		cluster_num;	/* cluster index in blob */
4360 	uint32_t		cluster;	/* cluster on disk */
4361 	int			rc;
4362 	spdk_blob_op_complete	cb_fn;
4363 	void			*cb_arg;
4364 };
4365 
4366 static void
4367 _spdk_blob_insert_cluster_msg_cpl(void *arg)
4368 {
4369 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
4370 
4371 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
4372 	free(ctx);
4373 }
4374 
4375 static void
4376 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno)
4377 {
4378 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
4379 
4380 	ctx->rc = bserrno;
4381 	spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
4382 }
4383 
4384 static void
4385 _spdk_blob_insert_cluster_msg(void *arg)
4386 {
4387 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
4388 
4389 	ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
4390 	if (ctx->rc != 0) {
4391 		spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
4392 		return;
4393 	}
4394 
4395 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
4396 	_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
4397 }
4398 
4399 void
4400 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
4401 				       uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg)
4402 {
4403 	struct spdk_blob_insert_cluster_ctx *ctx;
4404 
4405 	ctx = calloc(1, sizeof(*ctx));
4406 	if (ctx == NULL) {
4407 		cb_fn(cb_arg, -ENOMEM);
4408 		return;
4409 	}
4410 
4411 	ctx->thread = spdk_get_thread();
4412 	ctx->blob = blob;
4413 	ctx->cluster_num = cluster_num;
4414 	ctx->cluster = cluster;
4415 	ctx->cb_fn = cb_fn;
4416 	ctx->cb_arg = cb_arg;
4417 
4418 	spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx);
4419 }
4420 
4421 /* START spdk_blob_close */
4422 
4423 static void
4424 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4425 {
4426 	struct spdk_blob *blob = cb_arg;
4427 
4428 	if (bserrno == 0) {
4429 		blob->open_ref--;
4430 		if (blob->open_ref == 0) {
4431 			/*
4432 			 * Blobs with active.num_pages == 0 are deleted blobs.
4433 			 *  these blobs are removed from the blob_store list
4434 			 *  when the deletion process starts - so don't try to
4435 			 *  remove them again.
4436 			 */
4437 			if (blob->active.num_pages > 0) {
4438 				TAILQ_REMOVE(&blob->bs->blobs, blob, link);
4439 			}
4440 			_spdk_blob_free(blob);
4441 		}
4442 	}
4443 
4444 	spdk_bs_sequence_finish(seq, bserrno);
4445 }
4446 
4447 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
4448 {
4449 	struct spdk_bs_cpl	cpl;
4450 	spdk_bs_sequence_t	*seq;
4451 
4452 	_spdk_blob_verify_md_op(blob);
4453 
4454 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id);
4455 
4456 	if (blob->open_ref == 0) {
4457 		cb_fn(cb_arg, -EBADF);
4458 		return;
4459 	}
4460 
4461 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4462 	cpl.u.blob_basic.cb_fn = cb_fn;
4463 	cpl.u.blob_basic.cb_arg = cb_arg;
4464 
4465 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
4466 	if (!seq) {
4467 		cb_fn(cb_arg, -ENOMEM);
4468 		return;
4469 	}
4470 
4471 	/* Sync metadata */
4472 	_spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob);
4473 }
4474 
4475 /* END spdk_blob_close */
4476 
4477 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
4478 {
4479 	return spdk_get_io_channel(bs);
4480 }
4481 
4482 void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
4483 {
4484 	spdk_put_io_channel(channel);
4485 }
4486 
4487 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
4488 			uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
4489 {
4490 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
4491 				     SPDK_BLOB_UNMAP);
4492 }
4493 
4494 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
4495 			       uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
4496 {
4497 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
4498 				     SPDK_BLOB_WRITE_ZEROES);
4499 }
4500 
4501 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
4502 			void *payload, uint64_t offset, uint64_t length,
4503 			spdk_blob_op_complete cb_fn, void *cb_arg)
4504 {
4505 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
4506 				     SPDK_BLOB_WRITE);
4507 }
4508 
4509 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
4510 		       void *payload, uint64_t offset, uint64_t length,
4511 		       spdk_blob_op_complete cb_fn, void *cb_arg)
4512 {
4513 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
4514 				     SPDK_BLOB_READ);
4515 }
4516 
4517 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
4518 			 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
4519 			 spdk_blob_op_complete cb_fn, void *cb_arg)
4520 {
4521 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
4522 }
4523 
4524 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
4525 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
4526 			spdk_blob_op_complete cb_fn, void *cb_arg)
4527 {
4528 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
4529 }
4530 
4531 void spdk_bs_io_unmap_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
4532 			   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
4533 {
4534 	spdk_blob_io_unmap(blob, channel, offset, length, cb_fn, cb_arg);
4535 }
4536 
4537 void spdk_bs_io_write_zeroes_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
4538 				  uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
4539 {
4540 	spdk_blob_io_write_zeroes(blob, channel, offset, length, cb_fn, cb_arg);
4541 }
4542 
4543 void spdk_bs_io_write_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
4544 			   void *payload, uint64_t offset, uint64_t length,
4545 			   spdk_blob_op_complete cb_fn, void *cb_arg)
4546 {
4547 	spdk_blob_io_write(blob, channel, payload, offset, length, cb_fn, cb_arg);
4548 }
4549 
4550 void spdk_bs_io_read_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
4551 			  void *payload, uint64_t offset, uint64_t length,
4552 			  spdk_blob_op_complete cb_fn, void *cb_arg)
4553 {
4554 	spdk_blob_io_read(blob, channel, payload, offset, length, cb_fn, cb_arg);
4555 }
4556 
4557 void spdk_bs_io_writev_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
4558 			    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
4559 			    spdk_blob_op_complete cb_fn, void *cb_arg)
4560 {
4561 	spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg);
4562 }
4563 
4564 void spdk_bs_io_readv_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
4565 			   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
4566 			   spdk_blob_op_complete cb_fn, void *cb_arg)
4567 {
4568 	spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg);
4569 }
4570 
4571 struct spdk_bs_iter_ctx {
4572 	int64_t page_num;
4573 	struct spdk_blob_store *bs;
4574 
4575 	spdk_blob_op_with_handle_complete cb_fn;
4576 	void *cb_arg;
4577 };
4578 
4579 static void
4580 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4581 {
4582 	struct spdk_bs_iter_ctx *ctx = cb_arg;
4583 	struct spdk_blob_store *bs = ctx->bs;
4584 	spdk_blob_id id;
4585 
4586 	if (bserrno == 0) {
4587 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
4588 		free(ctx);
4589 		return;
4590 	}
4591 
4592 	ctx->page_num++;
4593 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
4594 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
4595 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
4596 		free(ctx);
4597 		return;
4598 	}
4599 
4600 	id = _spdk_bs_page_to_blobid(ctx->page_num);
4601 
4602 	spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx);
4603 }
4604 
4605 void
4606 spdk_bs_iter_first(struct spdk_blob_store *bs,
4607 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
4608 {
4609 	struct spdk_bs_iter_ctx *ctx;
4610 
4611 	ctx = calloc(1, sizeof(*ctx));
4612 	if (!ctx) {
4613 		cb_fn(cb_arg, NULL, -ENOMEM);
4614 		return;
4615 	}
4616 
4617 	ctx->page_num = -1;
4618 	ctx->bs = bs;
4619 	ctx->cb_fn = cb_fn;
4620 	ctx->cb_arg = cb_arg;
4621 
4622 	_spdk_bs_iter_cpl(ctx, NULL, -1);
4623 }
4624 
4625 static void
4626 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno)
4627 {
4628 	struct spdk_bs_iter_ctx *ctx = cb_arg;
4629 
4630 	_spdk_bs_iter_cpl(ctx, NULL, -1);
4631 }
4632 
4633 void
4634 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
4635 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
4636 {
4637 	struct spdk_bs_iter_ctx *ctx;
4638 
4639 	assert(blob != NULL);
4640 
4641 	ctx = calloc(1, sizeof(*ctx));
4642 	if (!ctx) {
4643 		cb_fn(cb_arg, NULL, -ENOMEM);
4644 		return;
4645 	}
4646 
4647 	ctx->page_num = _spdk_bs_blobid_to_page(blob->id);
4648 	ctx->bs = bs;
4649 	ctx->cb_fn = cb_fn;
4650 	ctx->cb_arg = cb_arg;
4651 
4652 	/* Close the existing blob */
4653 	spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx);
4654 }
4655 
4656 static int
4657 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
4658 		     uint16_t value_len, bool internal)
4659 {
4660 	struct spdk_xattr_tailq *xattrs;
4661 	struct spdk_xattr	*xattr;
4662 
4663 	_spdk_blob_verify_md_op(blob);
4664 
4665 	if (blob->md_ro) {
4666 		return -EPERM;
4667 	}
4668 
4669 	if (internal) {
4670 		xattrs = &blob->xattrs_internal;
4671 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
4672 	} else {
4673 		xattrs = &blob->xattrs;
4674 	}
4675 
4676 	TAILQ_FOREACH(xattr, xattrs, link) {
4677 		if (!strcmp(name, xattr->name)) {
4678 			free(xattr->value);
4679 			xattr->value_len = value_len;
4680 			xattr->value = malloc(value_len);
4681 			memcpy(xattr->value, value, value_len);
4682 
4683 			blob->state = SPDK_BLOB_STATE_DIRTY;
4684 
4685 			return 0;
4686 		}
4687 	}
4688 
4689 	xattr = calloc(1, sizeof(*xattr));
4690 	if (!xattr) {
4691 		return -1;
4692 	}
4693 	xattr->name = strdup(name);
4694 	xattr->value_len = value_len;
4695 	xattr->value = malloc(value_len);
4696 	memcpy(xattr->value, value, value_len);
4697 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
4698 
4699 	blob->state = SPDK_BLOB_STATE_DIRTY;
4700 
4701 	return 0;
4702 }
4703 
4704 int
4705 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
4706 		    uint16_t value_len)
4707 {
4708 	return _spdk_blob_set_xattr(blob, name, value, value_len, false);
4709 }
4710 
4711 static int
4712 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
4713 {
4714 	struct spdk_xattr_tailq *xattrs;
4715 	struct spdk_xattr	*xattr;
4716 
4717 	_spdk_blob_verify_md_op(blob);
4718 
4719 	if (blob->md_ro) {
4720 		return -EPERM;
4721 	}
4722 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
4723 
4724 	TAILQ_FOREACH(xattr, xattrs, link) {
4725 		if (!strcmp(name, xattr->name)) {
4726 			TAILQ_REMOVE(xattrs, xattr, link);
4727 			free(xattr->value);
4728 			free(xattr->name);
4729 			free(xattr);
4730 
4731 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
4732 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
4733 			}
4734 			blob->state = SPDK_BLOB_STATE_DIRTY;
4735 
4736 			return 0;
4737 		}
4738 	}
4739 
4740 	return -ENOENT;
4741 }
4742 
4743 int
4744 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
4745 {
4746 	return _spdk_blob_remove_xattr(blob, name, false);
4747 }
4748 
4749 static int
4750 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
4751 			   const void **value, size_t *value_len, bool internal)
4752 {
4753 	struct spdk_xattr	*xattr;
4754 	struct spdk_xattr_tailq *xattrs;
4755 
4756 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
4757 
4758 	TAILQ_FOREACH(xattr, xattrs, link) {
4759 		if (!strcmp(name, xattr->name)) {
4760 			*value = xattr->value;
4761 			*value_len = xattr->value_len;
4762 			return 0;
4763 		}
4764 	}
4765 	return -ENOENT;
4766 }
4767 
4768 int
4769 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
4770 			  const void **value, size_t *value_len)
4771 {
4772 	_spdk_blob_verify_md_op(blob);
4773 
4774 	return _spdk_blob_get_xattr_value(blob, name, value, value_len, false);
4775 }
4776 
4777 struct spdk_xattr_names {
4778 	uint32_t	count;
4779 	const char	*names[0];
4780 };
4781 
4782 static int
4783 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
4784 {
4785 	struct spdk_xattr	*xattr;
4786 	int			count = 0;
4787 
4788 	TAILQ_FOREACH(xattr, xattrs, link) {
4789 		count++;
4790 	}
4791 
4792 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
4793 	if (*names == NULL) {
4794 		return -ENOMEM;
4795 	}
4796 
4797 	TAILQ_FOREACH(xattr, xattrs, link) {
4798 		(*names)->names[(*names)->count++] = xattr->name;
4799 	}
4800 
4801 	return 0;
4802 }
4803 
4804 int
4805 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
4806 {
4807 	_spdk_blob_verify_md_op(blob);
4808 
4809 	return _spdk_blob_get_xattr_names(&blob->xattrs, names);
4810 }
4811 
4812 uint32_t
4813 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
4814 {
4815 	assert(names != NULL);
4816 
4817 	return names->count;
4818 }
4819 
4820 const char *
4821 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
4822 {
4823 	if (index >= names->count) {
4824 		return NULL;
4825 	}
4826 
4827 	return names->names[index];
4828 }
4829 
4830 void
4831 spdk_xattr_names_free(struct spdk_xattr_names *names)
4832 {
4833 	free(names);
4834 }
4835 
4836 struct spdk_bs_type
4837 spdk_bs_get_bstype(struct spdk_blob_store *bs)
4838 {
4839 	return bs->bstype;
4840 }
4841 
4842 void
4843 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
4844 {
4845 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
4846 }
4847 
4848 bool
4849 spdk_blob_is_read_only(struct spdk_blob *blob)
4850 {
4851 	assert(blob != NULL);
4852 	return (blob->data_ro || blob->md_ro);
4853 }
4854 
4855 bool
4856 spdk_blob_is_snapshot(struct spdk_blob *blob)
4857 {
4858 	struct spdk_blob_list *snapshot_entry;
4859 
4860 	assert(blob != NULL);
4861 
4862 	TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) {
4863 		if (snapshot_entry->id == blob->id) {
4864 			break;
4865 		}
4866 	}
4867 
4868 	if (snapshot_entry == NULL) {
4869 		return false;
4870 	}
4871 
4872 	return true;
4873 }
4874 
4875 bool
4876 spdk_blob_is_clone(struct spdk_blob *blob)
4877 {
4878 	assert(blob != NULL);
4879 
4880 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
4881 		assert(spdk_blob_is_thin_provisioned(blob));
4882 		return true;
4883 	}
4884 
4885 	return false;
4886 }
4887 
4888 bool
4889 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
4890 {
4891 	assert(blob != NULL);
4892 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
4893 }
4894 
4895 spdk_blob_id
4896 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
4897 {
4898 	struct spdk_blob_list *snapshot_entry = NULL;
4899 	struct spdk_blob_list *clone_entry = NULL;
4900 
4901 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
4902 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
4903 			if (clone_entry->id == blob_id) {
4904 				return snapshot_entry->id;
4905 			}
4906 		}
4907 	}
4908 
4909 	return SPDK_BLOBID_INVALID;
4910 }
4911 
4912 int
4913 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
4914 		     size_t *count)
4915 {
4916 	struct spdk_blob_list *snapshot_entry, *clone_entry;
4917 	size_t n;
4918 
4919 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
4920 		if (snapshot_entry->id == blobid) {
4921 			break;
4922 		}
4923 	}
4924 	if (snapshot_entry == NULL) {
4925 		*count = 0;
4926 		return 0;
4927 	}
4928 
4929 	if (ids == NULL || *count < snapshot_entry->clone_count) {
4930 		*count = snapshot_entry->clone_count;
4931 		return -ENOMEM;
4932 	}
4933 	*count = snapshot_entry->clone_count;
4934 
4935 	n = 0;
4936 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
4937 		ids[n++] = clone_entry->id;
4938 	}
4939 
4940 	return 0;
4941 }
4942 
4943 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB)
4944