xref: /spdk/lib/blob/blobstore.c (revision aa67900a2e42b33386033ed906844d751a57599a)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/blob.h"
37 #include "spdk/crc32.h"
38 #include "spdk/env.h"
39 #include "spdk/queue.h"
40 #include "spdk/io_channel.h"
41 #include "spdk/bit_array.h"
42 #include "spdk/likely.h"
43 
44 #include "spdk_internal/assert.h"
45 #include "spdk_internal/log.h"
46 
47 #include "blobstore.h"
48 
49 #define BLOB_CRC32C_INITIAL    0xffffffffUL
50 
51 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
52 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
53 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
54 void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
55 		uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg);
56 
57 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
58 				uint16_t value_len, bool internal);
59 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
60 				      const void **value, size_t *value_len, bool internal);
61 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
62 
63 static void
64 _spdk_blob_verify_md_op(struct spdk_blob *blob)
65 {
66 	assert(blob != NULL);
67 	assert(spdk_get_thread() == blob->bs->md_thread);
68 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
69 }
70 
71 static inline size_t
72 divide_round_up(size_t num, size_t divisor)
73 {
74 	return (num + divisor - 1) / divisor;
75 }
76 
77 static void
78 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
79 {
80 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
81 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false);
82 	assert(bs->num_free_clusters > 0);
83 
84 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num);
85 
86 	spdk_bit_array_set(bs->used_clusters, cluster_num);
87 	bs->num_free_clusters--;
88 }
89 
90 static int
91 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
92 {
93 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
94 
95 	_spdk_blob_verify_md_op(blob);
96 
97 	if (*cluster_lba != 0) {
98 		return -EEXIST;
99 	}
100 
101 	*cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster);
102 	return 0;
103 }
104 
105 static int
106 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
107 			  uint64_t *lowest_free_cluster, bool update_map)
108 {
109 	pthread_mutex_lock(&blob->bs->used_clusters_mutex);
110 	*lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters,
111 			       *lowest_free_cluster);
112 	if (*lowest_free_cluster >= blob->bs->total_clusters) {
113 		/* No more free clusters. Cannot satisfy the request */
114 		pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
115 		return -ENOSPC;
116 	}
117 
118 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id);
119 	_spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster);
120 	pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
121 
122 	if (update_map) {
123 		_spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster);
124 	}
125 
126 	return 0;
127 }
128 
129 static void
130 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
131 {
132 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
133 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true);
134 	assert(bs->num_free_clusters < bs->total_clusters);
135 
136 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num);
137 
138 	pthread_mutex_lock(&bs->used_clusters_mutex);
139 	spdk_bit_array_clear(bs->used_clusters, cluster_num);
140 	bs->num_free_clusters++;
141 	pthread_mutex_unlock(&bs->used_clusters_mutex);
142 }
143 
144 static void
145 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
146 {
147 	xattrs->count = 0;
148 	xattrs->names = NULL;
149 	xattrs->ctx = NULL;
150 	xattrs->get_value = NULL;
151 }
152 
153 void
154 spdk_blob_opts_init(struct spdk_blob_opts *opts)
155 {
156 	opts->num_clusters = 0;
157 	opts->thin_provision = false;
158 	_spdk_blob_xattrs_init(&opts->xattrs);
159 }
160 
161 static struct spdk_blob *
162 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
163 {
164 	struct spdk_blob *blob;
165 
166 	blob = calloc(1, sizeof(*blob));
167 	if (!blob) {
168 		return NULL;
169 	}
170 
171 	blob->id = id;
172 	blob->bs = bs;
173 
174 	blob->parent_id = SPDK_BLOBID_INVALID;
175 
176 	blob->state = SPDK_BLOB_STATE_DIRTY;
177 	blob->active.num_pages = 1;
178 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
179 	if (!blob->active.pages) {
180 		free(blob);
181 		return NULL;
182 	}
183 
184 	blob->active.pages[0] = _spdk_bs_blobid_to_page(id);
185 
186 	TAILQ_INIT(&blob->xattrs);
187 	TAILQ_INIT(&blob->xattrs_internal);
188 
189 	return blob;
190 }
191 
192 static void
193 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs)
194 {
195 	struct spdk_xattr	*xattr, *xattr_tmp;
196 
197 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
198 		TAILQ_REMOVE(xattrs, xattr, link);
199 		free(xattr->name);
200 		free(xattr->value);
201 		free(xattr);
202 	}
203 }
204 
205 static void
206 _spdk_blob_free(struct spdk_blob *blob)
207 {
208 	assert(blob != NULL);
209 
210 	free(blob->active.clusters);
211 	free(blob->clean.clusters);
212 	free(blob->active.pages);
213 	free(blob->clean.pages);
214 
215 	_spdk_xattrs_free(&blob->xattrs);
216 	_spdk_xattrs_free(&blob->xattrs_internal);
217 
218 	if (blob->back_bs_dev) {
219 		blob->back_bs_dev->destroy(blob->back_bs_dev);
220 	}
221 
222 	free(blob);
223 }
224 
225 static int
226 _spdk_blob_mark_clean(struct spdk_blob *blob)
227 {
228 	uint64_t *clusters = NULL;
229 	uint32_t *pages = NULL;
230 
231 	assert(blob != NULL);
232 
233 	if (blob->active.num_clusters) {
234 		assert(blob->active.clusters);
235 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
236 		if (!clusters) {
237 			return -1;
238 		}
239 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters));
240 	}
241 
242 	if (blob->active.num_pages) {
243 		assert(blob->active.pages);
244 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
245 		if (!pages) {
246 			free(clusters);
247 			return -1;
248 		}
249 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages));
250 	}
251 
252 	free(blob->clean.clusters);
253 	free(blob->clean.pages);
254 
255 	blob->clean.num_clusters = blob->active.num_clusters;
256 	blob->clean.clusters = blob->active.clusters;
257 	blob->clean.num_pages = blob->active.num_pages;
258 	blob->clean.pages = blob->active.pages;
259 
260 	blob->active.clusters = clusters;
261 	blob->active.pages = pages;
262 
263 	/* If the metadata was dirtied again while the metadata was being written to disk,
264 	 *  we do not want to revert the DIRTY state back to CLEAN here.
265 	 */
266 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
267 		blob->state = SPDK_BLOB_STATE_CLEAN;
268 	}
269 
270 	return 0;
271 }
272 
273 static int
274 _spdk_blob_deserialize_xattr(struct spdk_blob *blob,
275 			     struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
276 {
277 	struct spdk_xattr                       *xattr;
278 
279 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
280 	    sizeof(desc_xattr->value_length) +
281 	    desc_xattr->name_length + desc_xattr->value_length) {
282 		return -EINVAL;
283 	}
284 
285 	xattr = calloc(1, sizeof(*xattr));
286 	if (xattr == NULL) {
287 		return -ENOMEM;
288 	}
289 
290 	xattr->name = malloc(desc_xattr->name_length + 1);
291 	if (xattr->name == NULL) {
292 		free(xattr);
293 		return -ENOMEM;
294 	}
295 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
296 	xattr->name[desc_xattr->name_length] = '\0';
297 
298 	xattr->value = malloc(desc_xattr->value_length);
299 	if (xattr->value == NULL) {
300 		free(xattr->name);
301 		free(xattr);
302 		return -ENOMEM;
303 	}
304 	xattr->value_len = desc_xattr->value_length;
305 	memcpy(xattr->value,
306 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
307 	       desc_xattr->value_length);
308 
309 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
310 
311 	return 0;
312 }
313 
314 
315 static int
316 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
317 {
318 	struct spdk_blob_md_descriptor *desc;
319 	size_t	cur_desc = 0;
320 	void *tmp;
321 
322 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
323 	while (cur_desc < sizeof(page->descriptors)) {
324 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
325 			if (desc->length == 0) {
326 				/* If padding and length are 0, this terminates the page */
327 				break;
328 			}
329 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
330 			struct spdk_blob_md_descriptor_flags	*desc_flags;
331 
332 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
333 
334 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
335 				return -EINVAL;
336 			}
337 
338 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
339 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
340 				return -EINVAL;
341 			}
342 
343 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
344 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
345 				blob->data_ro = true;
346 				blob->md_ro = true;
347 			}
348 
349 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
350 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
351 				blob->md_ro = true;
352 			}
353 
354 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
355 				blob->data_ro = true;
356 				blob->md_ro = true;
357 			}
358 
359 			blob->invalid_flags = desc_flags->invalid_flags;
360 			blob->data_ro_flags = desc_flags->data_ro_flags;
361 			blob->md_ro_flags = desc_flags->md_ro_flags;
362 
363 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) {
364 			struct spdk_blob_md_descriptor_extent	*desc_extent;
365 			unsigned int				i, j;
366 			unsigned int				cluster_count = blob->active.num_clusters;
367 
368 			desc_extent = (struct spdk_blob_md_descriptor_extent *)desc;
369 
370 			if (desc_extent->length == 0 ||
371 			    (desc_extent->length % sizeof(desc_extent->extents[0]) != 0)) {
372 				return -EINVAL;
373 			}
374 
375 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
376 				for (j = 0; j < desc_extent->extents[i].length; j++) {
377 					if (!spdk_bit_array_get(blob->bs->used_clusters,
378 								desc_extent->extents[i].cluster_idx + j)) {
379 						return -EINVAL;
380 					}
381 					cluster_count++;
382 				}
383 			}
384 
385 			if (cluster_count == 0) {
386 				return -EINVAL;
387 			}
388 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t));
389 			if (tmp == NULL) {
390 				return -ENOMEM;
391 			}
392 			blob->active.clusters = tmp;
393 			blob->active.cluster_array_size = cluster_count;
394 
395 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
396 				for (j = 0; j < desc_extent->extents[i].length; j++) {
397 					if (desc_extent->extents[i].cluster_idx != 0) {
398 						blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
399 								desc_extent->extents[i].cluster_idx + j);
400 					} else if (spdk_blob_is_thin_provisioned(blob)) {
401 						blob->active.clusters[blob->active.num_clusters++] = 0;
402 					} else {
403 						return -EINVAL;
404 					}
405 				}
406 			}
407 
408 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
409 			int rc;
410 
411 			rc = _spdk_blob_deserialize_xattr(blob,
412 							  (struct spdk_blob_md_descriptor_xattr *) desc, false);
413 			if (rc != 0) {
414 				return rc;
415 			}
416 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
417 			int rc;
418 
419 			rc = _spdk_blob_deserialize_xattr(blob,
420 							  (struct spdk_blob_md_descriptor_xattr *) desc, true);
421 			if (rc != 0) {
422 				return rc;
423 			}
424 		} else {
425 			/* Unrecognized descriptor type.  Do not fail - just continue to the
426 			 *  next descriptor.  If this descriptor is associated with some feature
427 			 *  defined in a newer version of blobstore, that version of blobstore
428 			 *  should create and set an associated feature flag to specify if this
429 			 *  blob can be loaded or not.
430 			 */
431 		}
432 
433 		/* Advance to the next descriptor */
434 		cur_desc += sizeof(*desc) + desc->length;
435 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
436 			break;
437 		}
438 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
439 	}
440 
441 	return 0;
442 }
443 
444 static int
445 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
446 		 struct spdk_blob *blob)
447 {
448 	const struct spdk_blob_md_page *page;
449 	uint32_t i;
450 	int rc;
451 
452 	assert(page_count > 0);
453 	assert(pages[0].sequence_num == 0);
454 	assert(blob != NULL);
455 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
456 	assert(blob->active.clusters == NULL);
457 
458 	/* The blobid provided doesn't match what's in the MD, this can
459 	 * happen for example if a bogus blobid is passed in through open.
460 	 */
461 	if (blob->id != pages[0].id) {
462 		SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n",
463 			    blob->id, pages[0].id);
464 		return -ENOENT;
465 	}
466 
467 	for (i = 0; i < page_count; i++) {
468 		page = &pages[i];
469 
470 		assert(page->id == blob->id);
471 		assert(page->sequence_num == i);
472 
473 		rc = _spdk_blob_parse_page(page, blob);
474 		if (rc != 0) {
475 			return rc;
476 		}
477 	}
478 
479 	return 0;
480 }
481 
482 static int
483 _spdk_blob_serialize_add_page(const struct spdk_blob *blob,
484 			      struct spdk_blob_md_page **pages,
485 			      uint32_t *page_count,
486 			      struct spdk_blob_md_page **last_page)
487 {
488 	struct spdk_blob_md_page *page;
489 
490 	assert(pages != NULL);
491 	assert(page_count != NULL);
492 
493 	if (*page_count == 0) {
494 		assert(*pages == NULL);
495 		*page_count = 1;
496 		*pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE,
497 					 SPDK_BS_PAGE_SIZE,
498 					 NULL);
499 	} else {
500 		assert(*pages != NULL);
501 		(*page_count)++;
502 		*pages = spdk_dma_realloc(*pages,
503 					  SPDK_BS_PAGE_SIZE * (*page_count),
504 					  SPDK_BS_PAGE_SIZE,
505 					  NULL);
506 	}
507 
508 	if (*pages == NULL) {
509 		*page_count = 0;
510 		*last_page = NULL;
511 		return -ENOMEM;
512 	}
513 
514 	page = &(*pages)[*page_count - 1];
515 	memset(page, 0, sizeof(*page));
516 	page->id = blob->id;
517 	page->sequence_num = *page_count - 1;
518 	page->next = SPDK_INVALID_MD_PAGE;
519 	*last_page = page;
520 
521 	return 0;
522 }
523 
524 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
525  * Update required_sz on both success and failure.
526  *
527  */
528 static int
529 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr,
530 			   uint8_t *buf, size_t buf_sz,
531 			   size_t *required_sz, bool internal)
532 {
533 	struct spdk_blob_md_descriptor_xattr	*desc;
534 
535 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
536 		       strlen(xattr->name) +
537 		       xattr->value_len;
538 
539 	if (buf_sz < *required_sz) {
540 		return -1;
541 	}
542 
543 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
544 
545 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
546 	desc->length = sizeof(desc->name_length) +
547 		       sizeof(desc->value_length) +
548 		       strlen(xattr->name) +
549 		       xattr->value_len;
550 	desc->name_length = strlen(xattr->name);
551 	desc->value_length = xattr->value_len;
552 
553 	memcpy(desc->name, xattr->name, desc->name_length);
554 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
555 	       xattr->value,
556 	       desc->value_length);
557 
558 	return 0;
559 }
560 
561 static void
562 _spdk_blob_serialize_extent(const struct spdk_blob *blob,
563 			    uint64_t start_cluster, uint64_t *next_cluster,
564 			    uint8_t *buf, size_t buf_sz)
565 {
566 	struct spdk_blob_md_descriptor_extent *desc;
567 	size_t cur_sz;
568 	uint64_t i, extent_idx;
569 	uint32_t lba, lba_per_cluster, lba_count;
570 
571 	/* The buffer must have room for at least one extent */
572 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]);
573 	if (buf_sz < cur_sz) {
574 		*next_cluster = start_cluster;
575 		return;
576 	}
577 
578 	desc = (struct spdk_blob_md_descriptor_extent *)buf;
579 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT;
580 
581 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
582 
583 	lba = blob->active.clusters[start_cluster];
584 	lba_count = lba_per_cluster;
585 	extent_idx = 0;
586 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
587 		if ((lba + lba_count) == blob->active.clusters[i]) {
588 			lba_count += lba_per_cluster;
589 			continue;
590 		}
591 		desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
592 		desc->extents[extent_idx].length = lba_count / lba_per_cluster;
593 		extent_idx++;
594 
595 		cur_sz += sizeof(desc->extents[extent_idx]);
596 
597 		if (buf_sz < cur_sz) {
598 			/* If we ran out of buffer space, return */
599 			desc->length = sizeof(desc->extents[0]) * extent_idx;
600 			*next_cluster = i;
601 			return;
602 		}
603 
604 		lba = blob->active.clusters[i];
605 		lba_count = lba_per_cluster;
606 	}
607 
608 	desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
609 	desc->extents[extent_idx].length = lba_count / lba_per_cluster;
610 	extent_idx++;
611 
612 	desc->length = sizeof(desc->extents[0]) * extent_idx;
613 	*next_cluster = blob->active.num_clusters;
614 
615 	return;
616 }
617 
618 static void
619 _spdk_blob_serialize_flags(const struct spdk_blob *blob,
620 			   uint8_t *buf, size_t *buf_sz)
621 {
622 	struct spdk_blob_md_descriptor_flags *desc;
623 
624 	/*
625 	 * Flags get serialized first, so we should always have room for the flags
626 	 *  descriptor.
627 	 */
628 	assert(*buf_sz >= sizeof(*desc));
629 
630 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
631 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
632 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
633 	desc->invalid_flags = blob->invalid_flags;
634 	desc->data_ro_flags = blob->data_ro_flags;
635 	desc->md_ro_flags = blob->md_ro_flags;
636 
637 	*buf_sz -= sizeof(*desc);
638 }
639 
640 static int
641 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
642 			    const struct spdk_xattr_tailq *xattrs, bool internal,
643 			    struct spdk_blob_md_page **pages,
644 			    struct spdk_blob_md_page *cur_page,
645 			    uint32_t *page_count, uint8_t **buf,
646 			    size_t *remaining_sz)
647 {
648 	const struct spdk_xattr	*xattr;
649 	int	rc;
650 
651 	TAILQ_FOREACH(xattr, xattrs, link) {
652 		size_t required_sz = 0;
653 
654 		rc = _spdk_blob_serialize_xattr(xattr,
655 						*buf, *remaining_sz,
656 						&required_sz, internal);
657 		if (rc < 0) {
658 			/* Need to add a new page to the chain */
659 			rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
660 							   &cur_page);
661 			if (rc < 0) {
662 				spdk_dma_free(*pages);
663 				*pages = NULL;
664 				*page_count = 0;
665 				return rc;
666 			}
667 
668 			*buf = (uint8_t *)cur_page->descriptors;
669 			*remaining_sz = sizeof(cur_page->descriptors);
670 
671 			/* Try again */
672 			required_sz = 0;
673 			rc = _spdk_blob_serialize_xattr(xattr,
674 							*buf, *remaining_sz,
675 							&required_sz, internal);
676 
677 			if (rc < 0) {
678 				spdk_dma_free(*pages);
679 				*pages = NULL;
680 				*page_count = 0;
681 				return -1;
682 			}
683 		}
684 
685 		*remaining_sz -= required_sz;
686 		*buf += required_sz;
687 	}
688 
689 	return 0;
690 }
691 
692 static int
693 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
694 		     uint32_t *page_count)
695 {
696 	struct spdk_blob_md_page		*cur_page;
697 	int					rc;
698 	uint8_t					*buf;
699 	size_t					remaining_sz;
700 	uint64_t				last_cluster;
701 
702 	assert(pages != NULL);
703 	assert(page_count != NULL);
704 	assert(blob != NULL);
705 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
706 
707 	*pages = NULL;
708 	*page_count = 0;
709 
710 	/* A blob always has at least 1 page, even if it has no descriptors */
711 	rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
712 	if (rc < 0) {
713 		return rc;
714 	}
715 
716 	buf = (uint8_t *)cur_page->descriptors;
717 	remaining_sz = sizeof(cur_page->descriptors);
718 
719 	/* Serialize flags */
720 	_spdk_blob_serialize_flags(blob, buf, &remaining_sz);
721 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
722 
723 	/* Serialize xattrs */
724 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false,
725 					 pages, cur_page, page_count, &buf, &remaining_sz);
726 	if (rc < 0) {
727 		return rc;
728 	}
729 
730 	/* Serialize internal xattrs */
731 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
732 					 pages, cur_page, page_count, &buf, &remaining_sz);
733 	if (rc < 0) {
734 		return rc;
735 	}
736 
737 	/* Serialize extents */
738 	last_cluster = 0;
739 	while (last_cluster < blob->active.num_clusters) {
740 		_spdk_blob_serialize_extent(blob, last_cluster, &last_cluster,
741 					    buf, remaining_sz);
742 
743 		if (last_cluster == blob->active.num_clusters) {
744 			break;
745 		}
746 
747 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
748 						   &cur_page);
749 		if (rc < 0) {
750 			return rc;
751 		}
752 
753 		buf = (uint8_t *)cur_page->descriptors;
754 		remaining_sz = sizeof(cur_page->descriptors);
755 	}
756 
757 	return 0;
758 }
759 
760 struct spdk_blob_load_ctx {
761 	struct spdk_blob		*blob;
762 
763 	struct spdk_blob_md_page	*pages;
764 	uint32_t			num_pages;
765 	spdk_bs_sequence_t	        *seq;
766 
767 	spdk_bs_sequence_cpl		cb_fn;
768 	void				*cb_arg;
769 };
770 
771 static uint32_t
772 _spdk_blob_md_page_calc_crc(void *page)
773 {
774 	uint32_t		crc;
775 
776 	crc = BLOB_CRC32C_INITIAL;
777 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
778 	crc ^= BLOB_CRC32C_INITIAL;
779 
780 	return crc;
781 
782 }
783 
784 static void
785 _spdk_blob_load_final(void *cb_arg, int bserrno)
786 {
787 	struct spdk_blob_load_ctx	*ctx = cb_arg;
788 	struct spdk_blob		*blob = ctx->blob;
789 
790 	_spdk_blob_mark_clean(blob);
791 
792 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
793 
794 	/* Free the memory */
795 	spdk_dma_free(ctx->pages);
796 	free(ctx);
797 }
798 
799 static void
800 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
801 {
802 	struct spdk_blob_load_ctx	*ctx = cb_arg;
803 	struct spdk_blob		*blob = ctx->blob;
804 
805 	if (bserrno != 0) {
806 		goto error;
807 	}
808 
809 	blob->back_bs_dev = spdk_bs_create_blob_bs_dev(snapshot);
810 
811 	if (blob->back_bs_dev == NULL) {
812 		bserrno = -ENOMEM;
813 		goto error;
814 	}
815 
816 	_spdk_blob_load_final(ctx, bserrno);
817 	return;
818 
819 error:
820 	SPDK_ERRLOG("Snapshot fail\n");
821 	_spdk_blob_free(blob);
822 	ctx->cb_fn(ctx->seq, NULL, bserrno);
823 	spdk_dma_free(ctx->pages);
824 	free(ctx);
825 }
826 
827 static void
828 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
829 {
830 	struct spdk_blob_load_ctx	*ctx = cb_arg;
831 	struct spdk_blob		*blob = ctx->blob;
832 	struct spdk_blob_md_page	*page;
833 	const void			*value;
834 	size_t				len;
835 	int				rc;
836 	uint32_t			crc;
837 
838 	page = &ctx->pages[ctx->num_pages - 1];
839 	crc = _spdk_blob_md_page_calc_crc(page);
840 	if (crc != page->crc) {
841 		SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages);
842 		_spdk_blob_free(blob);
843 		ctx->cb_fn(seq, NULL, -EINVAL);
844 		spdk_dma_free(ctx->pages);
845 		free(ctx);
846 		return;
847 	}
848 
849 	if (page->next != SPDK_INVALID_MD_PAGE) {
850 		uint32_t next_page = page->next;
851 		uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page);
852 
853 
854 		assert(next_lba < (blob->bs->md_start + blob->bs->md_len));
855 
856 		/* Read the next page */
857 		ctx->num_pages++;
858 		ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
859 					      sizeof(*page), NULL);
860 		if (ctx->pages == NULL) {
861 			ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM);
862 			free(ctx);
863 			return;
864 		}
865 
866 		spdk_bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
867 					  next_lba,
868 					  _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)),
869 					  _spdk_blob_load_cpl, ctx);
870 		return;
871 	}
872 
873 	/* Parse the pages */
874 	rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob);
875 	if (rc) {
876 		_spdk_blob_free(blob);
877 		ctx->cb_fn(seq, NULL, rc);
878 		spdk_dma_free(ctx->pages);
879 		free(ctx);
880 		return;
881 	}
882 	ctx->seq = seq;
883 
884 
885 	if (spdk_blob_is_thin_provisioned(blob)) {
886 		rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
887 		if (rc == 0) {
888 			if (len != sizeof(spdk_blob_id)) {
889 				_spdk_blob_free(blob);
890 				ctx->cb_fn(seq, NULL, -EINVAL);
891 				spdk_dma_free(ctx->pages);
892 				free(ctx);
893 				return;
894 			}
895 			/* open snapshot blob and continue in the callback function */
896 			blob->parent_id = *(spdk_blob_id *)value;
897 			spdk_bs_open_blob(blob->bs, blob->parent_id,
898 					  _spdk_blob_load_snapshot_cpl, ctx);
899 			return;
900 		} else {
901 			/* add zeroes_dev for thin provisioned blob */
902 			blob->back_bs_dev = spdk_bs_create_zeroes_dev();
903 		}
904 	} else {
905 		/* standard blob */
906 		blob->back_bs_dev = NULL;
907 	}
908 	_spdk_blob_load_final(ctx, bserrno);
909 }
910 
911 /* Load a blob from disk given a blobid */
912 static void
913 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
914 		spdk_bs_sequence_cpl cb_fn, void *cb_arg)
915 {
916 	struct spdk_blob_load_ctx *ctx;
917 	struct spdk_blob_store *bs;
918 	uint32_t page_num;
919 	uint64_t lba;
920 
921 	_spdk_blob_verify_md_op(blob);
922 
923 	bs = blob->bs;
924 
925 	ctx = calloc(1, sizeof(*ctx));
926 	if (!ctx) {
927 		cb_fn(seq, cb_arg, -ENOMEM);
928 		return;
929 	}
930 
931 	ctx->blob = blob;
932 	ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE,
933 				      SPDK_BS_PAGE_SIZE, NULL);
934 	if (!ctx->pages) {
935 		free(ctx);
936 		cb_fn(seq, cb_arg, -ENOMEM);
937 		return;
938 	}
939 	ctx->num_pages = 1;
940 	ctx->cb_fn = cb_fn;
941 	ctx->cb_arg = cb_arg;
942 
943 	page_num = _spdk_bs_blobid_to_page(blob->id);
944 	lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num);
945 
946 	blob->state = SPDK_BLOB_STATE_LOADING;
947 
948 	spdk_bs_sequence_read_dev(seq, &ctx->pages[0], lba,
949 				  _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
950 				  _spdk_blob_load_cpl, ctx);
951 }
952 
953 struct spdk_blob_persist_ctx {
954 	struct spdk_blob		*blob;
955 
956 	struct spdk_blob_md_page	*pages;
957 
958 	uint64_t			idx;
959 
960 	spdk_bs_sequence_t		*seq;
961 	spdk_bs_sequence_cpl		cb_fn;
962 	void				*cb_arg;
963 };
964 
965 static void
966 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
967 {
968 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
969 	struct spdk_blob		*blob = ctx->blob;
970 
971 	if (bserrno == 0) {
972 		_spdk_blob_mark_clean(blob);
973 	}
974 
975 	/* Call user callback */
976 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
977 
978 	/* Free the memory */
979 	spdk_dma_free(ctx->pages);
980 	free(ctx);
981 }
982 
983 static void
984 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
985 {
986 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
987 	struct spdk_blob		*blob = ctx->blob;
988 	struct spdk_blob_store		*bs = blob->bs;
989 	void				*tmp;
990 	size_t				i;
991 
992 	/* Release all clusters that were truncated */
993 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
994 		uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]);
995 
996 		/* Nothing to release if it was not allocated */
997 		if (blob->active.clusters[i] != 0) {
998 			_spdk_bs_release_cluster(bs, cluster_num);
999 		}
1000 	}
1001 
1002 	if (blob->active.num_clusters == 0) {
1003 		free(blob->active.clusters);
1004 		blob->active.clusters = NULL;
1005 		blob->active.cluster_array_size = 0;
1006 	} else {
1007 		tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters);
1008 		assert(tmp != NULL);
1009 		blob->active.clusters = tmp;
1010 		blob->active.cluster_array_size = blob->active.num_clusters;
1011 	}
1012 
1013 	_spdk_blob_persist_complete(seq, ctx, bserrno);
1014 }
1015 
1016 static void
1017 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1018 {
1019 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1020 	struct spdk_blob		*blob = ctx->blob;
1021 	struct spdk_blob_store		*bs = blob->bs;
1022 	spdk_bs_batch_t			*batch;
1023 	size_t				i;
1024 	uint64_t			lba;
1025 	uint32_t			lba_count;
1026 
1027 	/* Clusters don't move around in blobs. The list shrinks or grows
1028 	 * at the end, but no changes ever occur in the middle of the list.
1029 	 */
1030 
1031 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx);
1032 
1033 	/* Unmap all clusters that were truncated */
1034 	lba = 0;
1035 	lba_count = 0;
1036 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1037 		uint64_t next_lba = blob->active.clusters[i];
1038 		uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1);
1039 
1040 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1041 			/* This cluster is contiguous with the previous one. */
1042 			lba_count += next_lba_count;
1043 			continue;
1044 		}
1045 
1046 		/* This cluster is not contiguous with the previous one. */
1047 
1048 		/* If a run of LBAs previously existing, send them
1049 		 * as an unmap.
1050 		 */
1051 		if (lba_count > 0) {
1052 			spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1053 		}
1054 
1055 		/* Start building the next batch */
1056 		lba = next_lba;
1057 		if (next_lba > 0) {
1058 			lba_count = next_lba_count;
1059 		} else {
1060 			lba_count = 0;
1061 		}
1062 	}
1063 
1064 	/* If we ended with a contiguous set of LBAs, send the unmap now */
1065 	if (lba_count > 0) {
1066 		spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1067 	}
1068 
1069 	spdk_bs_batch_close(batch);
1070 }
1071 
1072 static void
1073 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1074 {
1075 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1076 	struct spdk_blob		*blob = ctx->blob;
1077 	struct spdk_blob_store		*bs = blob->bs;
1078 	size_t				i;
1079 
1080 	/* This loop starts at 1 because the first page is special and handled
1081 	 * below. The pages (except the first) are never written in place,
1082 	 * so any pages in the clean list must be zeroed.
1083 	 */
1084 	for (i = 1; i < blob->clean.num_pages; i++) {
1085 		spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]);
1086 	}
1087 
1088 	if (blob->active.num_pages == 0) {
1089 		uint32_t page_num;
1090 
1091 		page_num = _spdk_bs_blobid_to_page(blob->id);
1092 		spdk_bit_array_clear(bs->used_md_pages, page_num);
1093 	}
1094 
1095 	/* Move on to unmapping clusters */
1096 	_spdk_blob_persist_unmap_clusters(seq, ctx, 0);
1097 }
1098 
1099 static void
1100 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1101 {
1102 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1103 	struct spdk_blob		*blob = ctx->blob;
1104 	struct spdk_blob_store		*bs = blob->bs;
1105 	uint64_t			lba;
1106 	uint32_t			lba_count;
1107 	spdk_bs_batch_t			*batch;
1108 	size_t				i;
1109 
1110 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx);
1111 
1112 	lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1113 
1114 	/* This loop starts at 1 because the first page is special and handled
1115 	 * below. The pages (except the first) are never written in place,
1116 	 * so any pages in the clean list must be zeroed.
1117 	 */
1118 	for (i = 1; i < blob->clean.num_pages; i++) {
1119 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]);
1120 
1121 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1122 	}
1123 
1124 	/* The first page will only be zeroed if this is a delete. */
1125 	if (blob->active.num_pages == 0) {
1126 		uint32_t page_num;
1127 
1128 		/* The first page in the metadata goes where the blobid indicates */
1129 		page_num = _spdk_bs_blobid_to_page(blob->id);
1130 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num);
1131 
1132 		spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1133 	}
1134 
1135 	spdk_bs_batch_close(batch);
1136 }
1137 
1138 static void
1139 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1140 {
1141 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1142 	struct spdk_blob		*blob = ctx->blob;
1143 	struct spdk_blob_store		*bs = blob->bs;
1144 	uint64_t			lba;
1145 	uint32_t			lba_count;
1146 	struct spdk_blob_md_page	*page;
1147 
1148 	if (blob->active.num_pages == 0) {
1149 		/* Move on to the next step */
1150 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1151 		return;
1152 	}
1153 
1154 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1155 
1156 	page = &ctx->pages[0];
1157 	/* The first page in the metadata goes where the blobid indicates */
1158 	lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id));
1159 
1160 	spdk_bs_sequence_write_dev(seq, page, lba, lba_count,
1161 				   _spdk_blob_persist_zero_pages, ctx);
1162 }
1163 
1164 static void
1165 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1166 {
1167 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1168 	struct spdk_blob		*blob = ctx->blob;
1169 	struct spdk_blob_store		*bs = blob->bs;
1170 	uint64_t			lba;
1171 	uint32_t			lba_count;
1172 	struct spdk_blob_md_page	*page;
1173 	spdk_bs_batch_t			*batch;
1174 	size_t				i;
1175 
1176 	/* Clusters don't move around in blobs. The list shrinks or grows
1177 	 * at the end, but no changes ever occur in the middle of the list.
1178 	 */
1179 
1180 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1181 
1182 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx);
1183 
1184 	/* This starts at 1. The root page is not written until
1185 	 * all of the others are finished
1186 	 */
1187 	for (i = 1; i < blob->active.num_pages; i++) {
1188 		page = &ctx->pages[i];
1189 		assert(page->sequence_num == i);
1190 
1191 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]);
1192 
1193 		spdk_bs_batch_write_dev(batch, page, lba, lba_count);
1194 	}
1195 
1196 	spdk_bs_batch_close(batch);
1197 }
1198 
1199 static int
1200 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz)
1201 {
1202 	uint64_t	i;
1203 	uint64_t	*tmp;
1204 	uint64_t	lfc; /* lowest free cluster */
1205 	uint64_t	num_clusters;
1206 	struct spdk_blob_store *bs;
1207 
1208 	bs = blob->bs;
1209 
1210 	_spdk_blob_verify_md_op(blob);
1211 
1212 	if (blob->active.num_clusters == sz) {
1213 		return 0;
1214 	}
1215 
1216 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
1217 		/* If this blob was resized to be larger, then smaller, then
1218 		 * larger without syncing, then the cluster array already
1219 		 * contains spare assigned clusters we can use.
1220 		 */
1221 		num_clusters = spdk_min(blob->active.cluster_array_size,
1222 					sz);
1223 	} else {
1224 		num_clusters = blob->active.num_clusters;
1225 	}
1226 
1227 	/* Do two passes - one to verify that we can obtain enough clusters
1228 	 * and another to actually claim them.
1229 	 */
1230 
1231 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1232 		lfc = 0;
1233 		for (i = num_clusters; i < sz; i++) {
1234 			lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
1235 			if (lfc >= bs->total_clusters) {
1236 				/* No more free clusters. Cannot satisfy the request */
1237 				return -ENOSPC;
1238 			}
1239 			lfc++;
1240 		}
1241 	}
1242 
1243 	if (sz > num_clusters) {
1244 		/* Expand the cluster array if necessary.
1245 		 * We only shrink the array when persisting.
1246 		 */
1247 		tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz);
1248 		if (sz > 0 && tmp == NULL) {
1249 			return -ENOMEM;
1250 		}
1251 		memset(tmp + blob->active.cluster_array_size, 0,
1252 		       sizeof(uint64_t) * (sz - blob->active.cluster_array_size));
1253 		blob->active.clusters = tmp;
1254 		blob->active.cluster_array_size = sz;
1255 	}
1256 
1257 	blob->state = SPDK_BLOB_STATE_DIRTY;
1258 
1259 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1260 		lfc = 0;
1261 		for (i = num_clusters; i < sz; i++) {
1262 			_spdk_bs_allocate_cluster(blob, i, &lfc, true);
1263 			lfc++;
1264 		}
1265 	}
1266 
1267 	blob->active.num_clusters = sz;
1268 
1269 	return 0;
1270 }
1271 
1272 static void
1273 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx)
1274 {
1275 	spdk_bs_sequence_t *seq = ctx->seq;
1276 	struct spdk_blob *blob = ctx->blob;
1277 	struct spdk_blob_store *bs = blob->bs;
1278 	uint64_t i;
1279 	uint32_t page_num;
1280 	int rc;
1281 
1282 	if (blob->active.num_pages == 0) {
1283 		/* This is the signal that the blob should be deleted.
1284 		 * Immediately jump to the clean up routine. */
1285 		assert(blob->clean.num_pages > 0);
1286 		ctx->idx = blob->clean.num_pages - 1;
1287 		blob->state = SPDK_BLOB_STATE_CLEAN;
1288 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1289 		return;
1290 
1291 	}
1292 
1293 	/* Generate the new metadata */
1294 	rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
1295 	if (rc < 0) {
1296 		_spdk_blob_persist_complete(seq, ctx, rc);
1297 		return;
1298 	}
1299 
1300 	assert(blob->active.num_pages >= 1);
1301 
1302 	/* Resize the cache of page indices */
1303 	blob->active.pages = realloc(blob->active.pages,
1304 				     blob->active.num_pages * sizeof(*blob->active.pages));
1305 	if (!blob->active.pages) {
1306 		_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1307 		return;
1308 	}
1309 
1310 	/* Assign this metadata to pages. This requires two passes -
1311 	 * one to verify that there are enough pages and a second
1312 	 * to actually claim them. */
1313 	page_num = 0;
1314 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
1315 	for (i = 1; i < blob->active.num_pages; i++) {
1316 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1317 		if (page_num >= spdk_bit_array_capacity(bs->used_md_pages)) {
1318 			_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1319 			return;
1320 		}
1321 		page_num++;
1322 	}
1323 
1324 	page_num = 0;
1325 	blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id);
1326 	for (i = 1; i < blob->active.num_pages; i++) {
1327 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1328 		ctx->pages[i - 1].next = page_num;
1329 		/* Now that previous metadata page is complete, calculate the crc for it. */
1330 		ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1331 		blob->active.pages[i] = page_num;
1332 		spdk_bit_array_set(bs->used_md_pages, page_num);
1333 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id);
1334 		page_num++;
1335 	}
1336 	ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1337 	/* Start writing the metadata from last page to first */
1338 	ctx->idx = blob->active.num_pages - 1;
1339 	blob->state = SPDK_BLOB_STATE_CLEAN;
1340 	_spdk_blob_persist_write_page_chain(seq, ctx, 0);
1341 }
1342 
1343 /* Write a blob to disk */
1344 static void
1345 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1346 		   spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1347 {
1348 	struct spdk_blob_persist_ctx *ctx;
1349 
1350 	_spdk_blob_verify_md_op(blob);
1351 
1352 	if (blob->state == SPDK_BLOB_STATE_CLEAN) {
1353 		cb_fn(seq, cb_arg, 0);
1354 		return;
1355 	}
1356 
1357 	ctx = calloc(1, sizeof(*ctx));
1358 	if (!ctx) {
1359 		cb_fn(seq, cb_arg, -ENOMEM);
1360 		return;
1361 	}
1362 	ctx->blob = blob;
1363 	ctx->seq = seq;
1364 	ctx->cb_fn = cb_fn;
1365 	ctx->cb_arg = cb_arg;
1366 
1367 	_spdk_blob_persist_start(ctx);
1368 }
1369 
1370 struct spdk_blob_copy_cluster_ctx {
1371 	struct spdk_blob *blob;
1372 	uint8_t *buf;
1373 	uint64_t page;
1374 	uint64_t new_cluster;
1375 	spdk_bs_sequence_t *seq;
1376 };
1377 
1378 static void
1379 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
1380 {
1381 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1382 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
1383 	TAILQ_HEAD(, spdk_bs_request_set) requests;
1384 	spdk_bs_user_op_t *op;
1385 
1386 	TAILQ_INIT(&requests);
1387 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
1388 
1389 	while (!TAILQ_EMPTY(&requests)) {
1390 		op = TAILQ_FIRST(&requests);
1391 		TAILQ_REMOVE(&requests, op, link);
1392 		if (bserrno == 0) {
1393 			spdk_bs_user_op_execute(op);
1394 		} else {
1395 			spdk_bs_user_op_abort(op);
1396 		}
1397 	}
1398 
1399 	spdk_dma_free(ctx->buf);
1400 	free(ctx);
1401 }
1402 
1403 static void
1404 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno)
1405 {
1406 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1407 
1408 	if (bserrno) {
1409 		uint32_t cluster_number;
1410 
1411 		if (bserrno == -EEXIST) {
1412 			/* The metadata insert failed because another thread
1413 			 * allocated the cluster first. Free our cluster
1414 			 * but continue without error. */
1415 			bserrno = 0;
1416 		}
1417 
1418 		cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
1419 		_spdk_bs_release_cluster(ctx->blob->bs, cluster_number);
1420 	}
1421 
1422 	spdk_bs_sequence_finish(ctx->seq, bserrno);
1423 }
1424 
1425 static void
1426 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1427 {
1428 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1429 	uint32_t cluster_number;
1430 
1431 	if (bserrno) {
1432 		/* The write failed, so jump to the final completion handler */
1433 		spdk_bs_sequence_finish(seq, bserrno);
1434 		return;
1435 	}
1436 
1437 	cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
1438 
1439 	_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
1440 					       _spdk_blob_insert_cluster_cpl, ctx);
1441 }
1442 
1443 static void
1444 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1445 {
1446 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
1447 
1448 	if (bserrno != 0) {
1449 		/* The read failed, so jump to the final completion handler */
1450 		spdk_bs_sequence_finish(seq, bserrno);
1451 		return;
1452 	}
1453 
1454 	/* Write whole cluster */
1455 	spdk_bs_sequence_write_dev(seq, ctx->buf,
1456 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
1457 				   _spdk_bs_cluster_to_lba(ctx->blob->bs, 1),
1458 				   _spdk_blob_write_copy_cpl, ctx);
1459 }
1460 
1461 static void
1462 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
1463 				   struct spdk_io_channel *_ch,
1464 				   uint64_t offset, spdk_bs_user_op_t *op)
1465 {
1466 	struct spdk_bs_cpl cpl;
1467 	struct spdk_bs_channel *ch;
1468 	struct spdk_blob_copy_cluster_ctx *ctx;
1469 	uint32_t cluster_start_page;
1470 	uint32_t cluster_number;
1471 	int rc;
1472 
1473 	ch = spdk_io_channel_get_ctx(_ch);
1474 
1475 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
1476 		/* There are already operations pending. Queue this user op
1477 		 * and return because it will be re-executed when the outstanding
1478 		 * cluster allocation completes. */
1479 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
1480 		return;
1481 	}
1482 
1483 	/* Round the page offset down to the first page in the cluster */
1484 	cluster_start_page = _spdk_bs_page_to_cluster_start(blob, offset);
1485 
1486 	/* Calculate which index in the metadata cluster array the corresponding
1487 	 * cluster is supposed to be at. */
1488 	cluster_number = _spdk_bs_page_to_cluster(blob->bs, cluster_start_page);
1489 
1490 	ctx = calloc(1, sizeof(*ctx));
1491 	if (!ctx) {
1492 		spdk_bs_user_op_abort(op);
1493 		return;
1494 	}
1495 
1496 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
1497 
1498 	ctx->blob = blob;
1499 	ctx->page = cluster_start_page;
1500 
1501 	ctx->buf = spdk_dma_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen, NULL);
1502 	if (!ctx->buf) {
1503 		SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
1504 			    blob->bs->cluster_sz);
1505 		free(ctx);
1506 		spdk_bs_user_op_abort(op);
1507 		return;
1508 	}
1509 
1510 	rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, false);
1511 	if (rc != 0) {
1512 		spdk_dma_free(ctx->buf);
1513 		free(ctx);
1514 		spdk_bs_user_op_abort(op);
1515 		return;
1516 	}
1517 
1518 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1519 	cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl;
1520 	cpl.u.blob_basic.cb_arg = ctx;
1521 
1522 	ctx->seq = spdk_bs_sequence_start(_ch, &cpl);
1523 	if (!ctx->seq) {
1524 		_spdk_bs_release_cluster(blob->bs, ctx->new_cluster);
1525 		spdk_dma_free(ctx->buf);
1526 		free(ctx);
1527 		spdk_bs_user_op_abort(op);
1528 		return;
1529 	}
1530 
1531 	/* Queue the user op to block other incoming operations */
1532 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
1533 
1534 	/* Read cluster from backing device */
1535 	spdk_bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
1536 				     _spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
1537 				     _spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
1538 				     _spdk_blob_write_copy, ctx);
1539 }
1540 
1541 static void
1542 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t page, uint64_t length,
1543 				       uint64_t *lba,	uint32_t *lba_count)
1544 {
1545 	*lba_count = _spdk_bs_page_to_lba(blob->bs, length);
1546 
1547 	if (!_spdk_bs_page_is_allocated(blob, page)) {
1548 		assert(blob->back_bs_dev != NULL);
1549 		*lba = _spdk_bs_dev_page_to_lba(blob->back_bs_dev, page);
1550 		*lba_count = _spdk_bs_blob_lba_to_back_dev_lba(blob, *lba_count);
1551 	} else {
1552 		*lba = _spdk_bs_blob_page_to_lba(blob, page);
1553 	}
1554 }
1555 
1556 struct op_split_ctx {
1557 	struct spdk_blob *blob;
1558 	struct spdk_io_channel *channel;
1559 	uint64_t page_offset;
1560 	uint64_t pages_remaining;
1561 	void *curr_payload;
1562 	enum spdk_blob_op_type op_type;
1563 	spdk_bs_sequence_t *seq;
1564 };
1565 
1566 static void
1567 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno)
1568 {
1569 	struct op_split_ctx	*ctx = cb_arg;
1570 	struct spdk_blob	*blob = ctx->blob;
1571 	struct spdk_io_channel	*ch = ctx->channel;
1572 	enum spdk_blob_op_type	op_type = ctx->op_type;
1573 	uint8_t			*buf = ctx->curr_payload;
1574 	uint64_t		offset = ctx->page_offset;
1575 	uint64_t		length = ctx->pages_remaining;
1576 	uint64_t		op_length;
1577 
1578 	if (bserrno != 0 || ctx->pages_remaining == 0) {
1579 		spdk_bs_sequence_finish(ctx->seq, bserrno);
1580 		free(ctx);
1581 		return;
1582 	}
1583 
1584 	op_length = spdk_min(length, _spdk_bs_num_pages_to_cluster_boundary(blob, offset));
1585 
1586 	/* Update length and payload for next operation */
1587 	ctx->pages_remaining -= op_length;
1588 	ctx->page_offset += op_length;
1589 	if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
1590 		ctx->curr_payload += (op_length * SPDK_BS_PAGE_SIZE);
1591 	}
1592 
1593 	switch (op_type) {
1594 	case SPDK_BLOB_READ:
1595 		spdk_blob_io_read(blob, ch, buf, offset, op_length,
1596 				  _spdk_blob_request_submit_op_split_next, ctx);
1597 		break;
1598 	case SPDK_BLOB_WRITE:
1599 		spdk_blob_io_write(blob, ch, buf, offset, op_length,
1600 				   _spdk_blob_request_submit_op_split_next, ctx);
1601 		break;
1602 	case SPDK_BLOB_UNMAP:
1603 		spdk_blob_io_unmap(blob, ch, offset, op_length,
1604 				   _spdk_blob_request_submit_op_split_next, ctx);
1605 		break;
1606 	case SPDK_BLOB_WRITE_ZEROES:
1607 		spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
1608 					  _spdk_blob_request_submit_op_split_next, ctx);
1609 		break;
1610 	case SPDK_BLOB_READV:
1611 	case SPDK_BLOB_WRITEV:
1612 		SPDK_ERRLOG("readv/write not valid for %s\n", __func__);
1613 		spdk_bs_sequence_finish(ctx->seq, -EINVAL);
1614 		free(ctx);
1615 		break;
1616 	}
1617 }
1618 
1619 static void
1620 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
1621 				   void *payload, uint64_t offset, uint64_t length,
1622 				   spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1623 {
1624 	struct op_split_ctx *ctx;
1625 	spdk_bs_sequence_t *seq;
1626 	struct spdk_bs_cpl cpl;
1627 
1628 	assert(blob != NULL);
1629 
1630 	ctx = calloc(1, sizeof(struct op_split_ctx));
1631 	if (ctx == NULL) {
1632 		cb_fn(cb_arg, -ENOMEM);
1633 		return;
1634 	}
1635 
1636 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1637 	cpl.u.blob_basic.cb_fn = cb_fn;
1638 	cpl.u.blob_basic.cb_arg = cb_arg;
1639 
1640 	seq = spdk_bs_sequence_start(ch, &cpl);
1641 	if (!seq) {
1642 		free(ctx);
1643 		cb_fn(cb_arg, -ENOMEM);
1644 		return;
1645 	}
1646 
1647 	ctx->blob = blob;
1648 	ctx->channel = ch;
1649 	ctx->curr_payload = payload;
1650 	ctx->page_offset = offset;
1651 	ctx->pages_remaining = length;
1652 	ctx->op_type = op_type;
1653 	ctx->seq = seq;
1654 
1655 	_spdk_blob_request_submit_op_split_next(ctx, 0);
1656 }
1657 
1658 static void
1659 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
1660 				    void *payload, uint64_t offset, uint64_t length,
1661 				    spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1662 {
1663 	struct spdk_bs_cpl cpl;
1664 	uint64_t lba;
1665 	uint32_t lba_count;
1666 
1667 	assert(blob != NULL);
1668 
1669 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1670 	cpl.u.blob_basic.cb_fn = cb_fn;
1671 	cpl.u.blob_basic.cb_arg = cb_arg;
1672 
1673 	_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
1674 
1675 	switch (op_type) {
1676 	case SPDK_BLOB_READ: {
1677 		spdk_bs_batch_t *batch;
1678 
1679 		batch = spdk_bs_batch_open(_ch, &cpl);
1680 		if (!batch) {
1681 			cb_fn(cb_arg, -ENOMEM);
1682 			return;
1683 		}
1684 
1685 		if (_spdk_bs_page_is_allocated(blob, offset)) {
1686 			/* Read from the blob */
1687 			spdk_bs_batch_read_dev(batch, payload, lba, lba_count);
1688 		} else {
1689 			/* Read from the backing block device */
1690 			spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
1691 		}
1692 
1693 		spdk_bs_batch_close(batch);
1694 		break;
1695 	}
1696 	case SPDK_BLOB_WRITE:
1697 	case SPDK_BLOB_WRITE_ZEROES: {
1698 		if (_spdk_bs_page_is_allocated(blob, offset)) {
1699 			/* Write to the blob */
1700 			spdk_bs_batch_t *batch;
1701 
1702 			if (lba_count == 0) {
1703 				cb_fn(cb_arg, 0);
1704 				return;
1705 			}
1706 
1707 			batch = spdk_bs_batch_open(_ch, &cpl);
1708 			if (!batch) {
1709 				cb_fn(cb_arg, -ENOMEM);
1710 				return;
1711 			}
1712 
1713 			if (op_type == SPDK_BLOB_WRITE) {
1714 				spdk_bs_batch_write_dev(batch, payload, lba, lba_count);
1715 			} else {
1716 				spdk_bs_batch_write_zeroes_dev(batch, lba, lba_count);
1717 			}
1718 
1719 			spdk_bs_batch_close(batch);
1720 		} else {
1721 			/* Queue this operation and allocate the cluster */
1722 			spdk_bs_user_op_t *op;
1723 
1724 			op = spdk_bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
1725 			if (!op) {
1726 				cb_fn(cb_arg, -ENOMEM);
1727 				return;
1728 			}
1729 
1730 			_spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op);
1731 		}
1732 		break;
1733 	}
1734 	case SPDK_BLOB_UNMAP: {
1735 		spdk_bs_batch_t *batch;
1736 
1737 		batch = spdk_bs_batch_open(_ch, &cpl);
1738 		if (!batch) {
1739 			cb_fn(cb_arg, -ENOMEM);
1740 			return;
1741 		}
1742 
1743 		if (_spdk_bs_page_is_allocated(blob, offset)) {
1744 			spdk_bs_batch_unmap_dev(batch, lba, lba_count);
1745 		}
1746 
1747 		spdk_bs_batch_close(batch);
1748 		break;
1749 	}
1750 	case SPDK_BLOB_READV:
1751 	case SPDK_BLOB_WRITEV:
1752 		SPDK_ERRLOG("readv/write not valid\n");
1753 		cb_fn(cb_arg, -EINVAL);
1754 		break;
1755 	}
1756 }
1757 
1758 static void
1759 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
1760 			     void *payload, uint64_t offset, uint64_t length,
1761 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1762 {
1763 	assert(blob != NULL);
1764 
1765 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
1766 		cb_fn(cb_arg, -EPERM);
1767 		return;
1768 	}
1769 
1770 	if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
1771 		cb_fn(cb_arg, -EINVAL);
1772 		return;
1773 	}
1774 
1775 	if (length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset)) {
1776 		_spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length,
1777 						    cb_fn, cb_arg, op_type);
1778 	} else {
1779 		_spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length,
1780 						   cb_fn, cb_arg, op_type);
1781 	}
1782 }
1783 
1784 struct rw_iov_ctx {
1785 	struct spdk_blob *blob;
1786 	struct spdk_io_channel *channel;
1787 	spdk_blob_op_complete cb_fn;
1788 	void *cb_arg;
1789 	bool read;
1790 	int iovcnt;
1791 	struct iovec *orig_iov;
1792 	uint64_t page_offset;
1793 	uint64_t pages_remaining;
1794 	uint64_t pages_done;
1795 	struct iovec iov[0];
1796 };
1797 
1798 static void
1799 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1800 {
1801 	assert(cb_arg == NULL);
1802 	spdk_bs_sequence_finish(seq, bserrno);
1803 }
1804 
1805 static void
1806 _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
1807 {
1808 	struct rw_iov_ctx *ctx = cb_arg;
1809 	struct spdk_blob *blob = ctx->blob;
1810 	struct iovec *iov, *orig_iov;
1811 	int iovcnt;
1812 	size_t orig_iovoff;
1813 	uint64_t page_count, pages_to_boundary, page_offset;
1814 	uint64_t byte_count;
1815 
1816 	if (bserrno != 0 || ctx->pages_remaining == 0) {
1817 		ctx->cb_fn(ctx->cb_arg, bserrno);
1818 		free(ctx);
1819 		return;
1820 	}
1821 
1822 	page_offset = ctx->page_offset;
1823 	pages_to_boundary = _spdk_bs_num_pages_to_cluster_boundary(blob, page_offset);
1824 	page_count = spdk_min(ctx->pages_remaining, pages_to_boundary);
1825 
1826 	/*
1827 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
1828 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
1829 	 *  point to the current position in the I/O sequence.
1830 	 */
1831 	byte_count = ctx->pages_done * sizeof(struct spdk_blob_md_page);
1832 	orig_iov = &ctx->orig_iov[0];
1833 	orig_iovoff = 0;
1834 	while (byte_count > 0) {
1835 		if (byte_count >= orig_iov->iov_len) {
1836 			byte_count -= orig_iov->iov_len;
1837 			orig_iov++;
1838 		} else {
1839 			orig_iovoff = byte_count;
1840 			byte_count = 0;
1841 		}
1842 	}
1843 
1844 	/*
1845 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
1846 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
1847 	 */
1848 	byte_count = page_count * sizeof(struct spdk_blob_md_page);
1849 	iov = &ctx->iov[0];
1850 	iovcnt = 0;
1851 	while (byte_count > 0) {
1852 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
1853 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
1854 		byte_count -= iov->iov_len;
1855 		orig_iovoff = 0;
1856 		orig_iov++;
1857 		iov++;
1858 		iovcnt++;
1859 	}
1860 
1861 	ctx->page_offset += page_count;
1862 	ctx->pages_done += page_count;
1863 	ctx->pages_remaining -= page_count;
1864 	iov = &ctx->iov[0];
1865 
1866 	if (ctx->read) {
1867 		spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, page_offset,
1868 				   page_count, _spdk_rw_iov_split_next, ctx);
1869 	} else {
1870 		spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, page_offset,
1871 				    page_count, _spdk_rw_iov_split_next, ctx);
1872 	}
1873 }
1874 
1875 static void
1876 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
1877 				 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
1878 				 spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
1879 {
1880 	struct spdk_bs_cpl	cpl;
1881 
1882 	assert(blob != NULL);
1883 
1884 	if (!read && blob->data_ro) {
1885 		cb_fn(cb_arg, -EPERM);
1886 		return;
1887 	}
1888 
1889 	if (length == 0) {
1890 		cb_fn(cb_arg, 0);
1891 		return;
1892 	}
1893 
1894 	if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
1895 		cb_fn(cb_arg, -EINVAL);
1896 		return;
1897 	}
1898 
1899 	/*
1900 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
1901 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
1902 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
1903 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
1904 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
1905 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
1906 	 *  but since this case happens very infrequently, any performance impact will be negligible.
1907 	 *
1908 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
1909 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
1910 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
1911 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
1912 	 */
1913 	if (spdk_likely(length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset))) {
1914 		uint32_t lba_count;
1915 		uint64_t lba;
1916 
1917 		_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
1918 
1919 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1920 		cpl.u.blob_basic.cb_fn = cb_fn;
1921 		cpl.u.blob_basic.cb_arg = cb_arg;
1922 
1923 		if (read) {
1924 			spdk_bs_sequence_t *seq;
1925 
1926 			seq = spdk_bs_sequence_start(_channel, &cpl);
1927 			if (!seq) {
1928 				cb_fn(cb_arg, -ENOMEM);
1929 				return;
1930 			}
1931 
1932 			if (_spdk_bs_page_is_allocated(blob, offset)) {
1933 				spdk_bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
1934 			} else {
1935 				spdk_bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
1936 							      _spdk_rw_iov_done, NULL);
1937 			}
1938 		} else {
1939 			if (_spdk_bs_page_is_allocated(blob, offset)) {
1940 				spdk_bs_sequence_t *seq;
1941 
1942 				seq = spdk_bs_sequence_start(_channel, &cpl);
1943 				if (!seq) {
1944 					cb_fn(cb_arg, -ENOMEM);
1945 					return;
1946 				}
1947 
1948 				spdk_bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
1949 			} else {
1950 				/* Queue this operation and allocate the cluster */
1951 				spdk_bs_user_op_t *op;
1952 
1953 				op = spdk_bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset, length);
1954 				if (!op) {
1955 					cb_fn(cb_arg, -ENOMEM);
1956 					return;
1957 				}
1958 
1959 				_spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op);
1960 			}
1961 		}
1962 	} else {
1963 		struct rw_iov_ctx *ctx;
1964 
1965 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
1966 		if (ctx == NULL) {
1967 			cb_fn(cb_arg, -ENOMEM);
1968 			return;
1969 		}
1970 
1971 		ctx->blob = blob;
1972 		ctx->channel = _channel;
1973 		ctx->cb_fn = cb_fn;
1974 		ctx->cb_arg = cb_arg;
1975 		ctx->read = read;
1976 		ctx->orig_iov = iov;
1977 		ctx->iovcnt = iovcnt;
1978 		ctx->page_offset = offset;
1979 		ctx->pages_remaining = length;
1980 		ctx->pages_done = 0;
1981 
1982 		_spdk_rw_iov_split_next(ctx, 0);
1983 	}
1984 }
1985 
1986 static struct spdk_blob *
1987 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
1988 {
1989 	struct spdk_blob *blob;
1990 
1991 	TAILQ_FOREACH(blob, &bs->blobs, link) {
1992 		if (blob->id == blobid) {
1993 			return blob;
1994 		}
1995 	}
1996 
1997 	return NULL;
1998 }
1999 
2000 static int
2001 _spdk_bs_channel_create(void *io_device, void *ctx_buf)
2002 {
2003 	struct spdk_blob_store		*bs = io_device;
2004 	struct spdk_bs_channel		*channel = ctx_buf;
2005 	struct spdk_bs_dev		*dev;
2006 	uint32_t			max_ops = bs->max_channel_ops;
2007 	uint32_t			i;
2008 
2009 	dev = bs->dev;
2010 
2011 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
2012 	if (!channel->req_mem) {
2013 		return -1;
2014 	}
2015 
2016 	TAILQ_INIT(&channel->reqs);
2017 
2018 	for (i = 0; i < max_ops; i++) {
2019 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
2020 	}
2021 
2022 	channel->bs = bs;
2023 	channel->dev = dev;
2024 	channel->dev_channel = dev->create_channel(dev);
2025 
2026 	if (!channel->dev_channel) {
2027 		SPDK_ERRLOG("Failed to create device channel.\n");
2028 		free(channel->req_mem);
2029 		return -1;
2030 	}
2031 
2032 	TAILQ_INIT(&channel->need_cluster_alloc);
2033 
2034 	return 0;
2035 }
2036 
2037 static void
2038 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf)
2039 {
2040 	struct spdk_bs_channel *channel = ctx_buf;
2041 	spdk_bs_user_op_t *op;
2042 
2043 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
2044 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
2045 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
2046 		spdk_bs_user_op_abort(op);
2047 	}
2048 
2049 	free(channel->req_mem);
2050 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
2051 }
2052 
2053 static void
2054 _spdk_bs_dev_destroy(void *io_device)
2055 {
2056 	struct spdk_blob_store *bs = io_device;
2057 	struct spdk_blob	*blob, *blob_tmp;
2058 
2059 	bs->dev->destroy(bs->dev);
2060 
2061 	TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) {
2062 		TAILQ_REMOVE(&bs->blobs, blob, link);
2063 		_spdk_blob_free(blob);
2064 	}
2065 
2066 	pthread_mutex_destroy(&bs->used_clusters_mutex);
2067 
2068 	spdk_bit_array_free(&bs->used_blobids);
2069 	spdk_bit_array_free(&bs->used_md_pages);
2070 	spdk_bit_array_free(&bs->used_clusters);
2071 	/*
2072 	 * If this function is called for any reason except a successful unload,
2073 	 * the unload_cpl type will be NONE and this will be a nop.
2074 	 */
2075 	spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err);
2076 
2077 	free(bs);
2078 }
2079 
2080 static int
2081 _spdk_bs_blob_list_add(struct spdk_blob *blob)
2082 {
2083 	spdk_blob_id snapshot_id;
2084 	struct spdk_blob_list *snapshot_entry = NULL;
2085 	struct spdk_blob_list *clone_entry = NULL;
2086 
2087 	assert(blob != NULL);
2088 
2089 	snapshot_id = blob->parent_id;
2090 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2091 		return 0;
2092 	}
2093 
2094 	TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) {
2095 		if (snapshot_entry->id == snapshot_id) {
2096 			break;
2097 		}
2098 	}
2099 
2100 	if (snapshot_entry == NULL) {
2101 		/* Snapshot not found */
2102 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
2103 		if (snapshot_entry == NULL) {
2104 			return -ENOMEM;
2105 		}
2106 		snapshot_entry->id = snapshot_id;
2107 		TAILQ_INIT(&snapshot_entry->clones);
2108 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
2109 	} else {
2110 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2111 			if (clone_entry->id == blob->id) {
2112 				break;
2113 			}
2114 		}
2115 	}
2116 
2117 	if (clone_entry == NULL) {
2118 		/* Clone not found */
2119 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
2120 		if (clone_entry == NULL) {
2121 			return -ENOMEM;
2122 		}
2123 		clone_entry->id = blob->id;
2124 		TAILQ_INIT(&clone_entry->clones);
2125 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
2126 		snapshot_entry->clone_count++;
2127 	}
2128 
2129 	return 0;
2130 }
2131 
2132 static int
2133 _spdk_bs_blob_list_remove(struct spdk_blob *blob)
2134 {
2135 	struct spdk_blob_list *snapshot_entry = NULL;
2136 	struct spdk_blob_list *clone_entry = NULL;
2137 	spdk_blob_id snapshot_id;
2138 
2139 	assert(blob != NULL);
2140 
2141 	snapshot_id = blob->parent_id;
2142 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2143 		return 0;
2144 	}
2145 
2146 	TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) {
2147 		if (snapshot_entry->id == snapshot_id) {
2148 			break;
2149 		}
2150 	}
2151 
2152 	assert(snapshot_entry != NULL);
2153 
2154 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2155 		if (clone_entry->id == blob->id) {
2156 			break;
2157 		}
2158 	}
2159 
2160 	assert(clone_entry != NULL);
2161 
2162 	blob->parent_id = SPDK_BLOBID_INVALID;
2163 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2164 	free(clone_entry);
2165 
2166 	snapshot_entry->clone_count--;
2167 	if (snapshot_entry->clone_count == 0) {
2168 		/* Snapshot have no more clones */
2169 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
2170 		free(snapshot_entry);
2171 	}
2172 
2173 	return 0;
2174 }
2175 
2176 static int
2177 _spdk_bs_blob_list_free(struct spdk_blob_store *bs)
2178 {
2179 	struct spdk_blob_list *snapshot_entry;
2180 	struct spdk_blob_list *snapshot_entry_tmp;
2181 	struct spdk_blob_list *clone_entry;
2182 	struct spdk_blob_list *clone_entry_tmp;
2183 
2184 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
2185 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
2186 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
2187 			free(clone_entry);
2188 		}
2189 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
2190 		free(snapshot_entry);
2191 	}
2192 
2193 	return 0;
2194 }
2195 
2196 static void
2197 _spdk_bs_free(struct spdk_blob_store *bs)
2198 {
2199 	_spdk_bs_blob_list_free(bs);
2200 
2201 	spdk_bs_unregister_md_thread(bs);
2202 	spdk_io_device_unregister(bs, _spdk_bs_dev_destroy);
2203 }
2204 
2205 void
2206 spdk_bs_opts_init(struct spdk_bs_opts *opts)
2207 {
2208 	opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
2209 	opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
2210 	opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
2211 	opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS;
2212 	memset(&opts->bstype, 0, sizeof(opts->bstype));
2213 	opts->iter_cb_fn = NULL;
2214 	opts->iter_cb_arg = NULL;
2215 }
2216 
2217 static int
2218 _spdk_bs_opts_verify(struct spdk_bs_opts *opts)
2219 {
2220 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
2221 	    opts->max_channel_ops == 0) {
2222 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
2223 		return -1;
2224 	}
2225 
2226 	return 0;
2227 }
2228 
2229 static struct spdk_blob_store *
2230 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts)
2231 {
2232 	struct spdk_blob_store	*bs;
2233 	uint64_t dev_size;
2234 	int rc;
2235 
2236 	dev_size = dev->blocklen * dev->blockcnt;
2237 	if (dev_size < opts->cluster_sz) {
2238 		/* Device size cannot be smaller than cluster size of blobstore */
2239 		SPDK_ERRLOG("Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
2240 			    dev_size, opts->cluster_sz);
2241 		return NULL;
2242 	}
2243 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
2244 		/* Cluster size cannot be smaller than page size */
2245 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
2246 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
2247 		return NULL;
2248 	}
2249 	bs = calloc(1, sizeof(struct spdk_blob_store));
2250 	if (!bs) {
2251 		return NULL;
2252 	}
2253 
2254 	TAILQ_INIT(&bs->blobs);
2255 	TAILQ_INIT(&bs->snapshots);
2256 	bs->dev = dev;
2257 	bs->md_thread = spdk_get_thread();
2258 	assert(bs->md_thread != NULL);
2259 
2260 	/*
2261 	 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an
2262 	 *  even multiple of the cluster size.
2263 	 */
2264 	bs->cluster_sz = opts->cluster_sz;
2265 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
2266 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
2267 	bs->num_free_clusters = bs->total_clusters;
2268 	bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
2269 	if (bs->used_clusters == NULL) {
2270 		free(bs);
2271 		return NULL;
2272 	}
2273 
2274 	bs->max_channel_ops = opts->max_channel_ops;
2275 	bs->super_blob = SPDK_BLOBID_INVALID;
2276 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
2277 
2278 	/* The metadata is assumed to be at least 1 page */
2279 	bs->used_md_pages = spdk_bit_array_create(1);
2280 	bs->used_blobids = spdk_bit_array_create(0);
2281 
2282 	pthread_mutex_init(&bs->used_clusters_mutex, NULL);
2283 
2284 	spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy,
2285 				sizeof(struct spdk_bs_channel));
2286 	rc = spdk_bs_register_md_thread(bs);
2287 	if (rc == -1) {
2288 		spdk_io_device_unregister(bs, NULL);
2289 		pthread_mutex_destroy(&bs->used_clusters_mutex);
2290 		spdk_bit_array_free(&bs->used_blobids);
2291 		spdk_bit_array_free(&bs->used_md_pages);
2292 		spdk_bit_array_free(&bs->used_clusters);
2293 		free(bs);
2294 		return NULL;
2295 	}
2296 
2297 	return bs;
2298 }
2299 
2300 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */
2301 
2302 struct spdk_bs_load_ctx {
2303 	struct spdk_blob_store		*bs;
2304 	struct spdk_bs_super_block	*super;
2305 
2306 	struct spdk_bs_md_mask		*mask;
2307 	bool				in_page_chain;
2308 	uint32_t			page_index;
2309 	uint32_t			cur_page;
2310 	struct spdk_blob_md_page	*page;
2311 	bool				is_load;
2312 
2313 	spdk_bs_sequence_t			*seq;
2314 	spdk_blob_op_with_handle_complete	iter_cb_fn;
2315 	void					*iter_cb_arg;
2316 };
2317 
2318 static void
2319 _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
2320 {
2321 	assert(bserrno != 0);
2322 
2323 	spdk_dma_free(ctx->super);
2324 	spdk_bs_sequence_finish(seq, bserrno);
2325 	/*
2326 	 * Only free the blobstore when a load fails.  If an unload fails (for some reason)
2327 	 *  we want to keep the blobstore in case the caller wants to try again.
2328 	 */
2329 	if (ctx->is_load) {
2330 		_spdk_bs_free(ctx->bs);
2331 	}
2332 	free(ctx);
2333 }
2334 
2335 static void
2336 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask)
2337 {
2338 	uint32_t i = 0;
2339 
2340 	while (true) {
2341 		i = spdk_bit_array_find_first_set(array, i);
2342 		if (i >= mask->length) {
2343 			break;
2344 		}
2345 		mask->mask[i / 8] |= 1U << (i % 8);
2346 		i++;
2347 	}
2348 }
2349 
2350 static void
2351 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2352 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2353 {
2354 	/* Update the values in the super block */
2355 	super->super_blob = bs->super_blob;
2356 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
2357 	super->crc = _spdk_blob_md_page_calc_crc(super);
2358 	spdk_bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0),
2359 				   _spdk_bs_byte_to_lba(bs, sizeof(*super)),
2360 				   cb_fn, cb_arg);
2361 }
2362 
2363 static void
2364 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
2365 {
2366 	struct spdk_bs_load_ctx	*ctx = arg;
2367 	uint64_t	mask_size, lba, lba_count;
2368 
2369 	/* Write out the used clusters mask */
2370 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
2371 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2372 	if (!ctx->mask) {
2373 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2374 		return;
2375 	}
2376 
2377 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
2378 	ctx->mask->length = ctx->bs->total_clusters;
2379 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters));
2380 
2381 	_spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask);
2382 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
2383 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
2384 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
2385 }
2386 
2387 static void
2388 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
2389 {
2390 	struct spdk_bs_load_ctx	*ctx = arg;
2391 	uint64_t	mask_size, lba, lba_count;
2392 
2393 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
2394 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2395 	if (!ctx->mask) {
2396 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2397 		return;
2398 	}
2399 
2400 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
2401 	ctx->mask->length = ctx->super->md_len;
2402 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
2403 
2404 	_spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask);
2405 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
2406 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
2407 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
2408 }
2409 
2410 static void
2411 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
2412 {
2413 	struct spdk_bs_load_ctx	*ctx = arg;
2414 	uint64_t	mask_size, lba, lba_count;
2415 
2416 	if (ctx->super->used_blobid_mask_len == 0) {
2417 		/*
2418 		 * This is a pre-v3 on-disk format where the blobid mask does not get
2419 		 *  written to disk.
2420 		 */
2421 		cb_fn(seq, arg, 0);
2422 		return;
2423 	}
2424 
2425 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
2426 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2427 	if (!ctx->mask) {
2428 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2429 		return;
2430 	}
2431 
2432 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
2433 	ctx->mask->length = ctx->super->md_len;
2434 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
2435 
2436 	_spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask);
2437 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
2438 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
2439 	spdk_bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
2440 }
2441 
2442 static void
2443 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
2444 {
2445 	struct spdk_bs_load_ctx *ctx = arg;
2446 
2447 	if (bserrno == 0) {
2448 		if (ctx->iter_cb_fn) {
2449 			ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
2450 		}
2451 		_spdk_bs_blob_list_add(blob);
2452 		spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx);
2453 		return;
2454 	}
2455 
2456 	if (bserrno == -ENOENT) {
2457 		bserrno = 0;
2458 	} else {
2459 		/*
2460 		 * This case needs to be looked at further.  Same problem
2461 		 *  exists with applications that rely on explicit blob
2462 		 *  iteration.  We should just skip the blob that failed
2463 		 *  to load and coontinue on to the next one.
2464 		 */
2465 		SPDK_ERRLOG("Error in iterating blobs\n");
2466 	}
2467 
2468 	ctx->iter_cb_fn = NULL;
2469 
2470 	spdk_dma_free(ctx->super);
2471 	spdk_dma_free(ctx->mask);
2472 	spdk_bs_sequence_finish(ctx->seq, bserrno);
2473 	free(ctx);
2474 }
2475 
2476 static void
2477 _spdk_bs_load_complete(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
2478 {
2479 	ctx->seq = seq;
2480 	spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx);
2481 }
2482 
2483 static void
2484 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2485 {
2486 	struct spdk_bs_load_ctx *ctx = cb_arg;
2487 	uint32_t i, j;
2488 	int rc;
2489 
2490 	/* The type must be correct */
2491 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
2492 
2493 	/* The length of the mask (in bits) must not be greater than
2494 	 * the length of the buffer (converted to bits) */
2495 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
2496 
2497 	/* The length of the mask must be exactly equal to the size
2498 	 * (in pages) of the metadata region */
2499 	assert(ctx->mask->length == ctx->super->md_len);
2500 
2501 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length);
2502 	if (rc < 0) {
2503 		spdk_dma_free(ctx->mask);
2504 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2505 		return;
2506 	}
2507 
2508 	for (i = 0; i < ctx->mask->length / 8; i++) {
2509 		uint8_t segment = ctx->mask->mask[i];
2510 		for (j = 0; segment; j++) {
2511 			if (segment & 1U) {
2512 				spdk_bit_array_set(ctx->bs->used_blobids, (i * 8) + j);
2513 			}
2514 			segment >>= 1U;
2515 		}
2516 	}
2517 
2518 	_spdk_bs_load_complete(seq, ctx, bserrno);
2519 }
2520 
2521 static void
2522 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2523 {
2524 	struct spdk_bs_load_ctx *ctx = cb_arg;
2525 	uint64_t		lba, lba_count, mask_size;
2526 	uint32_t		i, j;
2527 	int			rc;
2528 
2529 	/* The type must be correct */
2530 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2531 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
2532 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
2533 					     struct spdk_blob_md_page) * 8));
2534 	/* The length of the mask must be exactly equal to the total number of clusters */
2535 	assert(ctx->mask->length == ctx->bs->total_clusters);
2536 
2537 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
2538 	if (rc < 0) {
2539 		spdk_dma_free(ctx->mask);
2540 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2541 		return;
2542 	}
2543 
2544 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
2545 	for (i = 0; i < ctx->mask->length / 8; i++) {
2546 		uint8_t segment = ctx->mask->mask[i];
2547 		for (j = 0; segment && (j < 8); j++) {
2548 			if (segment & 1U) {
2549 				spdk_bit_array_set(ctx->bs->used_clusters, (i * 8) + j);
2550 				assert(ctx->bs->num_free_clusters > 0);
2551 				ctx->bs->num_free_clusters--;
2552 			}
2553 			segment >>= 1U;
2554 		}
2555 	}
2556 
2557 	spdk_dma_free(ctx->mask);
2558 
2559 	/* Read the used blobids mask */
2560 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
2561 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2562 	if (!ctx->mask) {
2563 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2564 		return;
2565 	}
2566 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
2567 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
2568 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
2569 				  _spdk_bs_load_used_blobids_cpl, ctx);
2570 }
2571 
2572 static void
2573 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2574 {
2575 	struct spdk_bs_load_ctx *ctx = cb_arg;
2576 	uint64_t		lba, lba_count, mask_size;
2577 	uint32_t		i, j;
2578 	int			rc;
2579 
2580 	/* The type must be correct */
2581 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
2582 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
2583 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
2584 				     8));
2585 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
2586 	assert(ctx->mask->length == ctx->super->md_len);
2587 
2588 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length);
2589 	if (rc < 0) {
2590 		spdk_dma_free(ctx->mask);
2591 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2592 		return;
2593 	}
2594 
2595 	for (i = 0; i < ctx->mask->length / 8; i++) {
2596 		uint8_t segment = ctx->mask->mask[i];
2597 		for (j = 0; segment && (j < 8); j++) {
2598 			if (segment & 1U) {
2599 				spdk_bit_array_set(ctx->bs->used_md_pages, (i * 8) + j);
2600 			}
2601 			segment >>= 1U;
2602 		}
2603 	}
2604 	spdk_dma_free(ctx->mask);
2605 
2606 	/* Read the used clusters mask */
2607 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
2608 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2609 	if (!ctx->mask) {
2610 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2611 		return;
2612 	}
2613 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
2614 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
2615 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
2616 				  _spdk_bs_load_used_clusters_cpl, ctx);
2617 }
2618 
2619 static void
2620 _spdk_bs_load_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2621 {
2622 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2623 	uint64_t lba, lba_count, mask_size;
2624 
2625 	/* Read the used pages mask */
2626 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
2627 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
2628 	if (!ctx->mask) {
2629 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2630 		return;
2631 	}
2632 
2633 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
2634 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
2635 	spdk_bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
2636 				  _spdk_bs_load_used_pages_cpl, ctx);
2637 }
2638 
2639 static int
2640 _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_store *bs)
2641 {
2642 	struct spdk_blob_md_descriptor *desc;
2643 	size_t	cur_desc = 0;
2644 
2645 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
2646 	while (cur_desc < sizeof(page->descriptors)) {
2647 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
2648 			if (desc->length == 0) {
2649 				/* If padding and length are 0, this terminates the page */
2650 				break;
2651 			}
2652 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) {
2653 			struct spdk_blob_md_descriptor_extent	*desc_extent;
2654 			unsigned int				i, j;
2655 			unsigned int				cluster_count = 0;
2656 			uint32_t				cluster_idx;
2657 
2658 			desc_extent = (struct spdk_blob_md_descriptor_extent *)desc;
2659 
2660 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
2661 				for (j = 0; j < desc_extent->extents[i].length; j++) {
2662 					cluster_idx = desc_extent->extents[i].cluster_idx;
2663 					/*
2664 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
2665 					 * in the used cluster map.
2666 					 */
2667 					if (cluster_idx != 0) {
2668 						spdk_bit_array_set(bs->used_clusters, cluster_idx + j);
2669 						if (bs->num_free_clusters == 0) {
2670 							return -1;
2671 						}
2672 						bs->num_free_clusters--;
2673 					}
2674 					cluster_count++;
2675 				}
2676 			}
2677 			if (cluster_count == 0) {
2678 				return -1;
2679 			}
2680 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
2681 			/* Skip this item */
2682 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
2683 			/* Skip this item */
2684 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
2685 			/* Skip this item */
2686 		} else {
2687 			/* Error */
2688 			return -1;
2689 		}
2690 		/* Advance to the next descriptor */
2691 		cur_desc += sizeof(*desc) + desc->length;
2692 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
2693 			break;
2694 		}
2695 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
2696 	}
2697 	return 0;
2698 }
2699 
2700 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
2701 {
2702 	uint32_t crc;
2703 
2704 	crc = _spdk_blob_md_page_calc_crc(ctx->page);
2705 	if (crc != ctx->page->crc) {
2706 		return false;
2707 	}
2708 
2709 	if (_spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) {
2710 		return false;
2711 	}
2712 	return true;
2713 }
2714 
2715 static void
2716 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
2717 
2718 static void
2719 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2720 {
2721 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2722 
2723 	_spdk_bs_load_complete(seq, ctx, bserrno);
2724 }
2725 
2726 static void
2727 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2728 {
2729 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2730 
2731 	spdk_dma_free(ctx->mask);
2732 	ctx->mask = NULL;
2733 
2734 	_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl);
2735 }
2736 
2737 static void
2738 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2739 {
2740 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2741 
2742 	spdk_dma_free(ctx->mask);
2743 	ctx->mask = NULL;
2744 
2745 	_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl);
2746 }
2747 
2748 static void
2749 _spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2750 {
2751 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl);
2752 }
2753 
2754 static void
2755 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2756 {
2757 	struct spdk_bs_load_ctx *ctx = cb_arg;
2758 	uint64_t num_md_clusters;
2759 	uint64_t i;
2760 	uint32_t page_num;
2761 
2762 	if (bserrno != 0) {
2763 		_spdk_bs_load_ctx_fail(seq, ctx, bserrno);
2764 		return;
2765 	}
2766 
2767 	page_num = ctx->cur_page;
2768 	if (_spdk_bs_load_cur_md_page_valid(ctx) == true) {
2769 		if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) {
2770 			spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
2771 			if (ctx->page->sequence_num == 0) {
2772 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
2773 			}
2774 			if (_spdk_bs_load_replay_md_parse_page(ctx->page, ctx->bs)) {
2775 				_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2776 				return;
2777 			}
2778 			if (ctx->page->next != SPDK_INVALID_MD_PAGE) {
2779 				ctx->in_page_chain = true;
2780 				ctx->cur_page = ctx->page->next;
2781 				_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2782 				return;
2783 			}
2784 		}
2785 	}
2786 
2787 	ctx->in_page_chain = false;
2788 
2789 	do {
2790 		ctx->page_index++;
2791 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
2792 
2793 	if (ctx->page_index < ctx->super->md_len) {
2794 		ctx->cur_page = ctx->page_index;
2795 		_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2796 	} else {
2797 		/* Claim all of the clusters used by the metadata */
2798 		num_md_clusters = divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
2799 		for (i = 0; i < num_md_clusters; i++) {
2800 			_spdk_bs_claim_cluster(ctx->bs, i);
2801 		}
2802 		spdk_dma_free(ctx->page);
2803 		_spdk_bs_load_write_used_md(seq, ctx, bserrno);
2804 	}
2805 }
2806 
2807 static void
2808 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
2809 {
2810 	struct spdk_bs_load_ctx *ctx = cb_arg;
2811 	uint64_t lba;
2812 
2813 	assert(ctx->cur_page < ctx->super->md_len);
2814 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
2815 	spdk_bs_sequence_read_dev(seq, ctx->page, lba,
2816 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
2817 				  _spdk_bs_load_replay_md_cpl, ctx);
2818 }
2819 
2820 static void
2821 _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg)
2822 {
2823 	struct spdk_bs_load_ctx *ctx = cb_arg;
2824 
2825 	ctx->page_index = 0;
2826 	ctx->cur_page = 0;
2827 	ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE,
2828 				     SPDK_BS_PAGE_SIZE,
2829 				     NULL);
2830 	if (!ctx->page) {
2831 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2832 		return;
2833 	}
2834 	_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2835 }
2836 
2837 static void
2838 _spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2839 {
2840 	struct spdk_bs_load_ctx *ctx = cb_arg;
2841 	int		rc;
2842 
2843 	if (bserrno != 0) {
2844 		_spdk_bs_load_ctx_fail(seq, ctx, -EIO);
2845 		return;
2846 	}
2847 
2848 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
2849 	if (rc < 0) {
2850 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2851 		return;
2852 	}
2853 
2854 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
2855 	if (rc < 0) {
2856 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2857 		return;
2858 	}
2859 
2860 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
2861 	if (rc < 0) {
2862 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2863 		return;
2864 	}
2865 
2866 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
2867 	_spdk_bs_load_replay_md(seq, cb_arg);
2868 }
2869 
2870 static void
2871 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2872 {
2873 	struct spdk_bs_load_ctx *ctx = cb_arg;
2874 	uint32_t	crc;
2875 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
2876 
2877 	if (ctx->super->version > SPDK_BS_VERSION ||
2878 	    ctx->super->version < SPDK_BS_INITIAL_VERSION) {
2879 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2880 		return;
2881 	}
2882 
2883 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
2884 		   sizeof(ctx->super->signature)) != 0) {
2885 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2886 		return;
2887 	}
2888 
2889 	crc = _spdk_blob_md_page_calc_crc(ctx->super);
2890 	if (crc != ctx->super->crc) {
2891 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2892 		return;
2893 	}
2894 
2895 	if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
2896 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n");
2897 	} else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
2898 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n");
2899 	} else {
2900 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n");
2901 		SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
2902 		SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
2903 		_spdk_bs_load_ctx_fail(seq, ctx, -ENXIO);
2904 		return;
2905 	}
2906 
2907 	/* Parse the super block */
2908 	ctx->bs->cluster_sz = ctx->super->cluster_size;
2909 	ctx->bs->total_clusters = ctx->bs->dev->blockcnt / (ctx->bs->cluster_sz / ctx->bs->dev->blocklen);
2910 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
2911 	ctx->bs->md_start = ctx->super->md_start;
2912 	ctx->bs->md_len = ctx->super->md_len;
2913 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - divide_round_up(
2914 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
2915 	ctx->bs->super_blob = ctx->super->super_blob;
2916 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
2917 
2918 	if (ctx->super->clean == 0) {
2919 		_spdk_bs_recover(seq, ctx, 0);
2920 	} else if (ctx->super->used_blobid_mask_len == 0) {
2921 		/*
2922 		 * Metadata is clean, but this is an old metadata format without
2923 		 *  a blobid mask.  Clear the clean bit and then build the masks
2924 		 *  using _spdk_bs_recover.
2925 		 */
2926 		ctx->super->clean = 0;
2927 		_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_recover, ctx);
2928 	} else {
2929 		ctx->super->clean = 0;
2930 		_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_load_write_super_cpl, ctx);
2931 	}
2932 }
2933 
2934 void
2935 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
2936 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
2937 {
2938 	struct spdk_blob_store	*bs;
2939 	struct spdk_bs_cpl	cpl;
2940 	spdk_bs_sequence_t	*seq;
2941 	struct spdk_bs_load_ctx *ctx;
2942 	struct spdk_bs_opts	opts = {};
2943 
2944 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev);
2945 
2946 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
2947 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen);
2948 		dev->destroy(dev);
2949 		cb_fn(cb_arg, NULL, -EINVAL);
2950 		return;
2951 	}
2952 
2953 	if (o) {
2954 		opts = *o;
2955 	} else {
2956 		spdk_bs_opts_init(&opts);
2957 	}
2958 
2959 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
2960 		dev->destroy(dev);
2961 		cb_fn(cb_arg, NULL, -EINVAL);
2962 		return;
2963 	}
2964 
2965 	bs = _spdk_bs_alloc(dev, &opts);
2966 	if (!bs) {
2967 		dev->destroy(dev);
2968 		cb_fn(cb_arg, NULL, -ENOMEM);
2969 		return;
2970 	}
2971 
2972 	ctx = calloc(1, sizeof(*ctx));
2973 	if (!ctx) {
2974 		_spdk_bs_free(bs);
2975 		cb_fn(cb_arg, NULL, -ENOMEM);
2976 		return;
2977 	}
2978 
2979 	ctx->bs = bs;
2980 	ctx->is_load = true;
2981 	ctx->iter_cb_fn = opts.iter_cb_fn;
2982 	ctx->iter_cb_arg = opts.iter_cb_arg;
2983 
2984 	/* Allocate memory for the super block */
2985 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
2986 	if (!ctx->super) {
2987 		free(ctx);
2988 		_spdk_bs_free(bs);
2989 		return;
2990 	}
2991 
2992 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
2993 	cpl.u.bs_handle.cb_fn = cb_fn;
2994 	cpl.u.bs_handle.cb_arg = cb_arg;
2995 	cpl.u.bs_handle.bs = bs;
2996 
2997 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
2998 	if (!seq) {
2999 		spdk_dma_free(ctx->super);
3000 		free(ctx);
3001 		_spdk_bs_free(bs);
3002 		cb_fn(cb_arg, NULL, -ENOMEM);
3003 		return;
3004 	}
3005 
3006 	/* Read the super block */
3007 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
3008 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
3009 				  _spdk_bs_load_super_cpl, ctx);
3010 }
3011 
3012 /* END spdk_bs_load */
3013 
3014 /* START spdk_bs_init */
3015 
3016 struct spdk_bs_init_ctx {
3017 	struct spdk_blob_store		*bs;
3018 	struct spdk_bs_super_block	*super;
3019 };
3020 
3021 static void
3022 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3023 {
3024 	struct spdk_bs_init_ctx *ctx = cb_arg;
3025 
3026 	spdk_dma_free(ctx->super);
3027 	free(ctx);
3028 
3029 	spdk_bs_sequence_finish(seq, bserrno);
3030 }
3031 
3032 static void
3033 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3034 {
3035 	struct spdk_bs_init_ctx *ctx = cb_arg;
3036 
3037 	/* Write super block */
3038 	spdk_bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0),
3039 				   _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
3040 				   _spdk_bs_init_persist_super_cpl, ctx);
3041 }
3042 
3043 void
3044 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
3045 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
3046 {
3047 	struct spdk_bs_init_ctx *ctx;
3048 	struct spdk_blob_store	*bs;
3049 	struct spdk_bs_cpl	cpl;
3050 	spdk_bs_sequence_t	*seq;
3051 	spdk_bs_batch_t		*batch;
3052 	uint64_t		num_md_lba;
3053 	uint64_t		num_md_pages;
3054 	uint64_t		num_md_clusters;
3055 	uint32_t		i;
3056 	struct spdk_bs_opts	opts = {};
3057 	int			rc;
3058 
3059 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev);
3060 
3061 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
3062 		SPDK_ERRLOG("unsupported dev block length of %d\n",
3063 			    dev->blocklen);
3064 		dev->destroy(dev);
3065 		cb_fn(cb_arg, NULL, -EINVAL);
3066 		return;
3067 	}
3068 
3069 	if (o) {
3070 		opts = *o;
3071 	} else {
3072 		spdk_bs_opts_init(&opts);
3073 	}
3074 
3075 	if (_spdk_bs_opts_verify(&opts) != 0) {
3076 		dev->destroy(dev);
3077 		cb_fn(cb_arg, NULL, -EINVAL);
3078 		return;
3079 	}
3080 
3081 	bs = _spdk_bs_alloc(dev, &opts);
3082 	if (!bs) {
3083 		dev->destroy(dev);
3084 		cb_fn(cb_arg, NULL, -ENOMEM);
3085 		return;
3086 	}
3087 
3088 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
3089 		/* By default, allocate 1 page per cluster.
3090 		 * Technically, this over-allocates metadata
3091 		 * because more metadata will reduce the number
3092 		 * of usable clusters. This can be addressed with
3093 		 * more complex math in the future.
3094 		 */
3095 		bs->md_len = bs->total_clusters;
3096 	} else {
3097 		bs->md_len = opts.num_md_pages;
3098 	}
3099 
3100 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
3101 	if (rc < 0) {
3102 		_spdk_bs_free(bs);
3103 		cb_fn(cb_arg, NULL, -ENOMEM);
3104 		return;
3105 	}
3106 
3107 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
3108 	if (rc < 0) {
3109 		_spdk_bs_free(bs);
3110 		cb_fn(cb_arg, NULL, -ENOMEM);
3111 		return;
3112 	}
3113 
3114 	ctx = calloc(1, sizeof(*ctx));
3115 	if (!ctx) {
3116 		_spdk_bs_free(bs);
3117 		cb_fn(cb_arg, NULL, -ENOMEM);
3118 		return;
3119 	}
3120 
3121 	ctx->bs = bs;
3122 
3123 	/* Allocate memory for the super block */
3124 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3125 	if (!ctx->super) {
3126 		free(ctx);
3127 		_spdk_bs_free(bs);
3128 		return;
3129 	}
3130 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
3131 	       sizeof(ctx->super->signature));
3132 	ctx->super->version = SPDK_BS_VERSION;
3133 	ctx->super->length = sizeof(*ctx->super);
3134 	ctx->super->super_blob = bs->super_blob;
3135 	ctx->super->clean = 0;
3136 	ctx->super->cluster_size = bs->cluster_sz;
3137 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
3138 
3139 	/* Calculate how many pages the metadata consumes at the front
3140 	 * of the disk.
3141 	 */
3142 
3143 	/* The super block uses 1 page */
3144 	num_md_pages = 1;
3145 
3146 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
3147 	 * up to the nearest page, plus a header.
3148 	 */
3149 	ctx->super->used_page_mask_start = num_md_pages;
3150 	ctx->super->used_page_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
3151 					 divide_round_up(bs->md_len, 8),
3152 					 SPDK_BS_PAGE_SIZE);
3153 	num_md_pages += ctx->super->used_page_mask_len;
3154 
3155 	/* The used_clusters mask requires 1 bit per cluster, rounded
3156 	 * up to the nearest page, plus a header.
3157 	 */
3158 	ctx->super->used_cluster_mask_start = num_md_pages;
3159 	ctx->super->used_cluster_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
3160 					    divide_round_up(bs->total_clusters, 8),
3161 					    SPDK_BS_PAGE_SIZE);
3162 	num_md_pages += ctx->super->used_cluster_mask_len;
3163 
3164 	/* The used_blobids mask requires 1 bit per metadata page, rounded
3165 	 * up to the nearest page, plus a header.
3166 	 */
3167 	ctx->super->used_blobid_mask_start = num_md_pages;
3168 	ctx->super->used_blobid_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
3169 					   divide_round_up(bs->md_len, 8),
3170 					   SPDK_BS_PAGE_SIZE);
3171 	num_md_pages += ctx->super->used_blobid_mask_len;
3172 
3173 	/* The metadata region size was chosen above */
3174 	ctx->super->md_start = bs->md_start = num_md_pages;
3175 	ctx->super->md_len = bs->md_len;
3176 	num_md_pages += bs->md_len;
3177 
3178 	num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages);
3179 
3180 	ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super);
3181 
3182 	num_md_clusters = divide_round_up(num_md_pages, bs->pages_per_cluster);
3183 	if (num_md_clusters > bs->total_clusters) {
3184 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
3185 			    "please decrease number of pages reserved for metadata "
3186 			    "or increase cluster size.\n");
3187 		spdk_dma_free(ctx->super);
3188 		free(ctx);
3189 		_spdk_bs_free(bs);
3190 		cb_fn(cb_arg, NULL, -ENOMEM);
3191 		return;
3192 	}
3193 	/* Claim all of the clusters used by the metadata */
3194 	for (i = 0; i < num_md_clusters; i++) {
3195 		_spdk_bs_claim_cluster(bs, i);
3196 	}
3197 
3198 	bs->total_data_clusters = bs->num_free_clusters;
3199 
3200 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
3201 	cpl.u.bs_handle.cb_fn = cb_fn;
3202 	cpl.u.bs_handle.cb_arg = cb_arg;
3203 	cpl.u.bs_handle.bs = bs;
3204 
3205 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3206 	if (!seq) {
3207 		spdk_dma_free(ctx->super);
3208 		free(ctx);
3209 		_spdk_bs_free(bs);
3210 		cb_fn(cb_arg, NULL, -ENOMEM);
3211 		return;
3212 	}
3213 
3214 	batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx);
3215 
3216 	/* Clear metadata space */
3217 	spdk_bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
3218 	/* Trim data clusters */
3219 	spdk_bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
3220 
3221 	spdk_bs_batch_close(batch);
3222 }
3223 
3224 /* END spdk_bs_init */
3225 
3226 /* START spdk_bs_destroy */
3227 
3228 static void
3229 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3230 {
3231 	struct spdk_bs_init_ctx *ctx = cb_arg;
3232 	struct spdk_blob_store *bs = ctx->bs;
3233 
3234 	/*
3235 	 * We need to defer calling spdk_bs_call_cpl() until after
3236 	 * dev destruction, so tuck these away for later use.
3237 	 */
3238 	bs->unload_err = bserrno;
3239 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
3240 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
3241 
3242 	spdk_bs_sequence_finish(seq, bserrno);
3243 
3244 	_spdk_bs_free(bs);
3245 	free(ctx);
3246 }
3247 
3248 void
3249 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
3250 		void *cb_arg)
3251 {
3252 	struct spdk_bs_cpl	cpl;
3253 	spdk_bs_sequence_t	*seq;
3254 	struct spdk_bs_init_ctx *ctx;
3255 
3256 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n");
3257 
3258 	if (!TAILQ_EMPTY(&bs->blobs)) {
3259 		SPDK_ERRLOG("Blobstore still has open blobs\n");
3260 		cb_fn(cb_arg, -EBUSY);
3261 		return;
3262 	}
3263 
3264 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
3265 	cpl.u.bs_basic.cb_fn = cb_fn;
3266 	cpl.u.bs_basic.cb_arg = cb_arg;
3267 
3268 	ctx = calloc(1, sizeof(*ctx));
3269 	if (!ctx) {
3270 		cb_fn(cb_arg, -ENOMEM);
3271 		return;
3272 	}
3273 
3274 	ctx->bs = bs;
3275 
3276 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3277 	if (!seq) {
3278 		free(ctx);
3279 		cb_fn(cb_arg, -ENOMEM);
3280 		return;
3281 	}
3282 
3283 	/* Write zeroes to the super block */
3284 	spdk_bs_sequence_write_zeroes_dev(seq,
3285 					  _spdk_bs_page_to_lba(bs, 0),
3286 					  _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
3287 					  _spdk_bs_destroy_trim_cpl, ctx);
3288 }
3289 
3290 /* END spdk_bs_destroy */
3291 
3292 /* START spdk_bs_unload */
3293 
3294 static void
3295 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3296 {
3297 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3298 
3299 	spdk_dma_free(ctx->super);
3300 
3301 	/*
3302 	 * We need to defer calling spdk_bs_call_cpl() until after
3303 	 * dev destuction, so tuck these away for later use.
3304 	 */
3305 	ctx->bs->unload_err = bserrno;
3306 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
3307 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
3308 
3309 	spdk_bs_sequence_finish(seq, bserrno);
3310 
3311 	_spdk_bs_free(ctx->bs);
3312 	free(ctx);
3313 }
3314 
3315 static void
3316 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3317 {
3318 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3319 
3320 	spdk_dma_free(ctx->mask);
3321 	ctx->super->clean = 1;
3322 
3323 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
3324 }
3325 
3326 static void
3327 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3328 {
3329 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3330 
3331 	spdk_dma_free(ctx->mask);
3332 	ctx->mask = NULL;
3333 
3334 	_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl);
3335 }
3336 
3337 static void
3338 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3339 {
3340 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3341 
3342 	spdk_dma_free(ctx->mask);
3343 	ctx->mask = NULL;
3344 
3345 	_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl);
3346 }
3347 
3348 static void
3349 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3350 {
3351 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl);
3352 }
3353 
3354 void
3355 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
3356 {
3357 	struct spdk_bs_cpl	cpl;
3358 	spdk_bs_sequence_t	*seq;
3359 	struct spdk_bs_load_ctx *ctx;
3360 
3361 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n");
3362 
3363 	if (!TAILQ_EMPTY(&bs->blobs)) {
3364 		SPDK_ERRLOG("Blobstore still has open blobs\n");
3365 		cb_fn(cb_arg, -EBUSY);
3366 		return;
3367 	}
3368 
3369 	ctx = calloc(1, sizeof(*ctx));
3370 	if (!ctx) {
3371 		cb_fn(cb_arg, -ENOMEM);
3372 		return;
3373 	}
3374 
3375 	ctx->bs = bs;
3376 	ctx->is_load = false;
3377 
3378 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3379 	if (!ctx->super) {
3380 		free(ctx);
3381 		cb_fn(cb_arg, -ENOMEM);
3382 		return;
3383 	}
3384 
3385 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
3386 	cpl.u.bs_basic.cb_fn = cb_fn;
3387 	cpl.u.bs_basic.cb_arg = cb_arg;
3388 
3389 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3390 	if (!seq) {
3391 		spdk_dma_free(ctx->super);
3392 		free(ctx);
3393 		cb_fn(cb_arg, -ENOMEM);
3394 		return;
3395 	}
3396 
3397 	/* Read super block */
3398 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
3399 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
3400 				  _spdk_bs_unload_read_super_cpl, ctx);
3401 }
3402 
3403 /* END spdk_bs_unload */
3404 
3405 /* START spdk_bs_set_super */
3406 
3407 struct spdk_bs_set_super_ctx {
3408 	struct spdk_blob_store		*bs;
3409 	struct spdk_bs_super_block	*super;
3410 };
3411 
3412 static void
3413 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3414 {
3415 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
3416 
3417 	if (bserrno != 0) {
3418 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
3419 	}
3420 
3421 	spdk_dma_free(ctx->super);
3422 
3423 	spdk_bs_sequence_finish(seq, bserrno);
3424 
3425 	free(ctx);
3426 }
3427 
3428 static void
3429 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3430 {
3431 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
3432 
3433 	if (bserrno != 0) {
3434 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
3435 		spdk_dma_free(ctx->super);
3436 		spdk_bs_sequence_finish(seq, bserrno);
3437 		free(ctx);
3438 		return;
3439 	}
3440 
3441 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx);
3442 }
3443 
3444 void
3445 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
3446 		  spdk_bs_op_complete cb_fn, void *cb_arg)
3447 {
3448 	struct spdk_bs_cpl		cpl;
3449 	spdk_bs_sequence_t		*seq;
3450 	struct spdk_bs_set_super_ctx	*ctx;
3451 
3452 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n");
3453 
3454 	ctx = calloc(1, sizeof(*ctx));
3455 	if (!ctx) {
3456 		cb_fn(cb_arg, -ENOMEM);
3457 		return;
3458 	}
3459 
3460 	ctx->bs = bs;
3461 
3462 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
3463 	if (!ctx->super) {
3464 		free(ctx);
3465 		cb_fn(cb_arg, -ENOMEM);
3466 		return;
3467 	}
3468 
3469 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
3470 	cpl.u.bs_basic.cb_fn = cb_fn;
3471 	cpl.u.bs_basic.cb_arg = cb_arg;
3472 
3473 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3474 	if (!seq) {
3475 		spdk_dma_free(ctx->super);
3476 		free(ctx);
3477 		cb_fn(cb_arg, -ENOMEM);
3478 		return;
3479 	}
3480 
3481 	bs->super_blob = blobid;
3482 
3483 	/* Read super block */
3484 	spdk_bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
3485 				  _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
3486 				  _spdk_bs_set_super_read_cpl, ctx);
3487 }
3488 
3489 /* END spdk_bs_set_super */
3490 
3491 void
3492 spdk_bs_get_super(struct spdk_blob_store *bs,
3493 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3494 {
3495 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
3496 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
3497 	} else {
3498 		cb_fn(cb_arg, bs->super_blob, 0);
3499 	}
3500 }
3501 
3502 uint64_t
3503 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
3504 {
3505 	return bs->cluster_sz;
3506 }
3507 
3508 uint64_t
3509 spdk_bs_get_page_size(struct spdk_blob_store *bs)
3510 {
3511 	return SPDK_BS_PAGE_SIZE;
3512 }
3513 
3514 uint64_t
3515 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
3516 {
3517 	return bs->num_free_clusters;
3518 }
3519 
3520 uint64_t
3521 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
3522 {
3523 	return bs->total_data_clusters;
3524 }
3525 
3526 static int
3527 spdk_bs_register_md_thread(struct spdk_blob_store *bs)
3528 {
3529 	bs->md_channel = spdk_get_io_channel(bs);
3530 	if (!bs->md_channel) {
3531 		SPDK_ERRLOG("Failed to get IO channel.\n");
3532 		return -1;
3533 	}
3534 
3535 	return 0;
3536 }
3537 
3538 static int
3539 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
3540 {
3541 	spdk_put_io_channel(bs->md_channel);
3542 
3543 	return 0;
3544 }
3545 
3546 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob)
3547 {
3548 	assert(blob != NULL);
3549 
3550 	return blob->id;
3551 }
3552 
3553 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob)
3554 {
3555 	assert(blob != NULL);
3556 
3557 	return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters);
3558 }
3559 
3560 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
3561 {
3562 	assert(blob != NULL);
3563 
3564 	return blob->active.num_clusters;
3565 }
3566 
3567 /* START spdk_bs_create_blob */
3568 
3569 static void
3570 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3571 {
3572 	struct spdk_blob *blob = cb_arg;
3573 
3574 	_spdk_blob_free(blob);
3575 
3576 	spdk_bs_sequence_finish(seq, bserrno);
3577 }
3578 
3579 static int
3580 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
3581 		      bool internal)
3582 {
3583 	uint64_t i;
3584 	size_t value_len = 0;
3585 	int rc;
3586 	const void *value = NULL;
3587 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
3588 		return -EINVAL;
3589 	}
3590 	for (i = 0; i < xattrs->count; i++) {
3591 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
3592 		if (value == NULL || value_len == 0) {
3593 			return -EINVAL;
3594 		}
3595 		rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
3596 		if (rc < 0) {
3597 			return rc;
3598 		}
3599 	}
3600 	return 0;
3601 }
3602 
3603 static void
3604 _spdk_blob_set_thin_provision(struct spdk_blob *blob)
3605 {
3606 	_spdk_blob_verify_md_op(blob);
3607 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
3608 	blob->state = SPDK_BLOB_STATE_DIRTY;
3609 }
3610 
3611 static void
3612 _spdk_bs_create_blob(struct spdk_blob_store *bs,
3613 		     const struct spdk_blob_opts *opts,
3614 		     const struct spdk_blob_xattr_opts *internal_xattrs,
3615 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3616 {
3617 	struct spdk_blob	*blob;
3618 	uint32_t		page_idx;
3619 	struct spdk_bs_cpl	cpl;
3620 	struct spdk_blob_opts	opts_default;
3621 	struct spdk_blob_xattr_opts internal_xattrs_default;
3622 	spdk_bs_sequence_t	*seq;
3623 	spdk_blob_id		id;
3624 	int rc;
3625 
3626 	assert(spdk_get_thread() == bs->md_thread);
3627 
3628 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
3629 	if (page_idx >= spdk_bit_array_capacity(bs->used_md_pages)) {
3630 		cb_fn(cb_arg, 0, -ENOMEM);
3631 		return;
3632 	}
3633 	spdk_bit_array_set(bs->used_blobids, page_idx);
3634 	spdk_bit_array_set(bs->used_md_pages, page_idx);
3635 
3636 	id = _spdk_bs_page_to_blobid(page_idx);
3637 
3638 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx);
3639 
3640 	blob = _spdk_blob_alloc(bs, id);
3641 	if (!blob) {
3642 		cb_fn(cb_arg, 0, -ENOMEM);
3643 		return;
3644 	}
3645 
3646 	if (!opts) {
3647 		spdk_blob_opts_init(&opts_default);
3648 		opts = &opts_default;
3649 	}
3650 	if (!internal_xattrs) {
3651 		_spdk_blob_xattrs_init(&internal_xattrs_default);
3652 		internal_xattrs = &internal_xattrs_default;
3653 	}
3654 
3655 	rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false);
3656 	if (rc < 0) {
3657 		_spdk_blob_free(blob);
3658 		cb_fn(cb_arg, 0, rc);
3659 		return;
3660 	}
3661 
3662 	rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true);
3663 	if (rc < 0) {
3664 		_spdk_blob_free(blob);
3665 		cb_fn(cb_arg, 0, rc);
3666 		return;
3667 	}
3668 
3669 	if (opts->thin_provision) {
3670 		_spdk_blob_set_thin_provision(blob);
3671 	}
3672 
3673 	rc = _spdk_blob_resize(blob, opts->num_clusters);
3674 	if (rc < 0) {
3675 		_spdk_blob_free(blob);
3676 		cb_fn(cb_arg, 0, rc);
3677 		return;
3678 	}
3679 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
3680 	cpl.u.blobid.cb_fn = cb_fn;
3681 	cpl.u.blobid.cb_arg = cb_arg;
3682 	cpl.u.blobid.blobid = blob->id;
3683 
3684 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3685 	if (!seq) {
3686 		_spdk_blob_free(blob);
3687 		cb_fn(cb_arg, 0, -ENOMEM);
3688 		return;
3689 	}
3690 
3691 	_spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob);
3692 }
3693 
3694 void spdk_bs_create_blob(struct spdk_blob_store *bs,
3695 			 spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3696 {
3697 	_spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
3698 }
3699 
3700 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
3701 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3702 {
3703 	_spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
3704 }
3705 
3706 /* END spdk_bs_create_blob */
3707 
3708 /* START blob_cleanup */
3709 
3710 struct spdk_clone_snapshot_ctx {
3711 	struct spdk_bs_cpl      cpl;
3712 	int bserrno;
3713 
3714 	struct spdk_io_channel *channel;
3715 
3716 	/* Current cluster for inflate operation */
3717 	uint64_t cluster;
3718 
3719 	struct {
3720 		spdk_blob_id id;
3721 		struct spdk_blob *blob;
3722 	} original;
3723 	struct {
3724 		spdk_blob_id id;
3725 		struct spdk_blob *blob;
3726 	} new;
3727 
3728 	/* xattrs specified for snapshot/clones only. They have no impact on
3729 	 * the original blobs xattrs. */
3730 	const struct spdk_blob_xattr_opts *xattrs;
3731 };
3732 
3733 static void
3734 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
3735 {
3736 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
3737 	struct spdk_bs_cpl *cpl = &ctx->cpl;
3738 
3739 	if (bserrno != 0) {
3740 		if (ctx->bserrno != 0) {
3741 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
3742 		} else {
3743 			ctx->bserrno = bserrno;
3744 		}
3745 	}
3746 
3747 	switch (cpl->type) {
3748 	case SPDK_BS_CPL_TYPE_BLOBID:
3749 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
3750 		break;
3751 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
3752 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
3753 		break;
3754 	default:
3755 		SPDK_UNREACHABLE();
3756 		break;
3757 	}
3758 
3759 	free(ctx);
3760 }
3761 
3762 static void
3763 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
3764 {
3765 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3766 	struct spdk_blob *origblob = ctx->original.blob;
3767 
3768 	if (bserrno != 0) {
3769 		if (ctx->bserrno != 0) {
3770 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
3771 		} else {
3772 			ctx->bserrno = bserrno;
3773 		}
3774 	}
3775 
3776 	ctx->original.id = origblob->id;
3777 	spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
3778 }
3779 
3780 static void
3781 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno)
3782 {
3783 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3784 	struct spdk_blob *newblob = ctx->new.blob;
3785 
3786 	if (bserrno != 0) {
3787 		if (ctx->bserrno != 0) {
3788 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
3789 		} else {
3790 			ctx->bserrno = bserrno;
3791 		}
3792 	}
3793 
3794 	ctx->new.id = newblob->id;
3795 	spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
3796 }
3797 
3798 /* END blob_cleanup */
3799 
3800 /* START spdk_bs_create_snapshot */
3801 
3802 static void
3803 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
3804 {
3805 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3806 	struct spdk_blob *newblob = ctx->new.blob;
3807 
3808 	if (bserrno != 0) {
3809 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
3810 		return;
3811 	}
3812 
3813 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
3814 	bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
3815 	if (bserrno != 0) {
3816 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
3817 		return;
3818 	}
3819 
3820 	_spdk_bs_blob_list_add(ctx->original.blob);
3821 
3822 	spdk_blob_set_read_only(newblob);
3823 
3824 	/* sync snapshot metadata */
3825 	spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, cb_arg);
3826 }
3827 
3828 static void
3829 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
3830 {
3831 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3832 	struct spdk_blob *origblob = ctx->original.blob;
3833 	struct spdk_blob *newblob = ctx->new.blob;
3834 
3835 	if (bserrno != 0) {
3836 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
3837 		return;
3838 	}
3839 
3840 	/* Set internal xattr for snapshot id */
3841 	bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
3842 	if (bserrno != 0) {
3843 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
3844 		return;
3845 	}
3846 	origblob->parent_id = newblob->id;
3847 
3848 	/* Create new back_bs_dev for snapshot */
3849 	origblob->back_bs_dev = spdk_bs_create_blob_bs_dev(newblob);
3850 	if (origblob->back_bs_dev == NULL) {
3851 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
3852 		return;
3853 	}
3854 
3855 	/* set clone blob as thin provisioned */
3856 	_spdk_blob_set_thin_provision(origblob);
3857 
3858 	_spdk_bs_blob_list_add(newblob);
3859 
3860 	/* Zero out origblob cluster map */
3861 	memset(origblob->active.clusters, 0,
3862 	       origblob->active.num_clusters * sizeof(origblob->active.clusters));
3863 
3864 	/* sync clone metadata */
3865 	spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx);
3866 }
3867 
3868 static void
3869 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
3870 {
3871 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3872 	struct spdk_blob *origblob = ctx->original.blob;
3873 	struct spdk_blob *newblob = _blob;
3874 
3875 	if (bserrno != 0) {
3876 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
3877 		return;
3878 	}
3879 
3880 	ctx->new.blob = newblob;
3881 
3882 	/* set new back_bs_dev for snapshot */
3883 	newblob->back_bs_dev = origblob->back_bs_dev;
3884 	/* Set invalid flags from origblob */
3885 	newblob->invalid_flags = origblob->invalid_flags;
3886 
3887 	/* Copy cluster map to snapshot */
3888 	memcpy(newblob->active.clusters, origblob->active.clusters,
3889 	       origblob->active.num_clusters * sizeof(origblob->active.clusters));
3890 
3891 	/* sync snapshot metadata */
3892 	spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx);
3893 }
3894 
3895 static void
3896 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
3897 {
3898 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3899 	struct spdk_blob *origblob = ctx->original.blob;
3900 
3901 	if (bserrno != 0) {
3902 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
3903 		return;
3904 	}
3905 
3906 	ctx->new.id = blobid;
3907 	ctx->cpl.u.blobid.blobid = blobid;
3908 
3909 	spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx);
3910 }
3911 
3912 
3913 static void
3914 _spdk_bs_xattr_snapshot(void *arg, const char *name,
3915 			const void **value, size_t *value_len)
3916 {
3917 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
3918 
3919 	struct spdk_blob *blob = (struct spdk_blob *)arg;
3920 	*value = &blob->id;
3921 	*value_len = sizeof(blob->id);
3922 }
3923 
3924 static void
3925 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
3926 {
3927 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
3928 	struct spdk_blob_opts opts;
3929 	struct spdk_blob_xattr_opts internal_xattrs;
3930 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
3931 
3932 	if (bserrno != 0) {
3933 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
3934 		return;
3935 	}
3936 
3937 	ctx->original.blob = _blob;
3938 
3939 	if (_blob->data_ro || _blob->md_ro) {
3940 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n",
3941 			      _blob->id);
3942 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
3943 		return;
3944 	}
3945 
3946 	spdk_blob_opts_init(&opts);
3947 	_spdk_blob_xattrs_init(&internal_xattrs);
3948 
3949 	/* Change the size of new blob to the same as in original blob,
3950 	 * but do not allocate clusters */
3951 	opts.thin_provision = true;
3952 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
3953 
3954 	/* If there are any xattrs specified for snapshot, set them now */
3955 	if (ctx->xattrs) {
3956 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
3957 	}
3958 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
3959 	internal_xattrs.count = 1;
3960 	internal_xattrs.ctx = _blob;
3961 	internal_xattrs.names = xattrs_names;
3962 	internal_xattrs.get_value = _spdk_bs_xattr_snapshot;
3963 
3964 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
3965 			     _spdk_bs_snapshot_newblob_create_cpl, ctx);
3966 }
3967 
3968 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
3969 			     const struct spdk_blob_xattr_opts *snapshot_xattrs,
3970 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
3971 {
3972 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
3973 
3974 	if (!ctx) {
3975 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
3976 		return;
3977 	}
3978 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
3979 	ctx->cpl.u.blobid.cb_fn = cb_fn;
3980 	ctx->cpl.u.blobid.cb_arg = cb_arg;
3981 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
3982 	ctx->bserrno = 0;
3983 	ctx->original.id = blobid;
3984 	ctx->xattrs = snapshot_xattrs;
3985 
3986 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx);
3987 }
3988 /* END spdk_bs_create_snapshot */
3989 
3990 /* START spdk_bs_create_clone */
3991 
3992 static void
3993 _spdk_bs_xattr_clone(void *arg, const char *name,
3994 		     const void **value, size_t *value_len)
3995 {
3996 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
3997 
3998 	struct spdk_blob *blob = (struct spdk_blob *)arg;
3999 	*value = &blob->id;
4000 	*value_len = sizeof(blob->id);
4001 }
4002 
4003 static void
4004 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4005 {
4006 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4007 	struct spdk_blob *clone = _blob;
4008 
4009 	ctx->new.blob = clone;
4010 	_spdk_bs_blob_list_add(clone);
4011 
4012 	spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
4013 }
4014 
4015 static void
4016 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
4017 {
4018 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4019 
4020 	ctx->cpl.u.blobid.blobid = blobid;
4021 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx);
4022 }
4023 
4024 static void
4025 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4026 {
4027 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4028 	struct spdk_blob_opts		opts;
4029 	struct spdk_blob_xattr_opts internal_xattrs;
4030 	char *xattr_names[] = { BLOB_SNAPSHOT };
4031 
4032 	if (bserrno != 0) {
4033 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
4034 		return;
4035 	}
4036 
4037 	ctx->original.blob = _blob;
4038 
4039 	if (!_blob->data_ro || !_blob->md_ro) {
4040 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n");
4041 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
4042 		return;
4043 	}
4044 
4045 	spdk_blob_opts_init(&opts);
4046 	_spdk_blob_xattrs_init(&internal_xattrs);
4047 
4048 	opts.thin_provision = true;
4049 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
4050 	if (ctx->xattrs) {
4051 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
4052 	}
4053 
4054 	/* Set internal xattr BLOB_SNAPSHOT */
4055 	internal_xattrs.count = 1;
4056 	internal_xattrs.ctx = _blob;
4057 	internal_xattrs.names = xattr_names;
4058 	internal_xattrs.get_value = _spdk_bs_xattr_clone;
4059 
4060 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
4061 			     _spdk_bs_clone_newblob_create_cpl, ctx);
4062 }
4063 
4064 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
4065 			  const struct spdk_blob_xattr_opts *clone_xattrs,
4066 			  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
4067 {
4068 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
4069 
4070 	if (!ctx) {
4071 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
4072 		return;
4073 	}
4074 
4075 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
4076 	ctx->cpl.u.blobid.cb_fn = cb_fn;
4077 	ctx->cpl.u.blobid.cb_arg = cb_arg;
4078 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
4079 	ctx->bserrno = 0;
4080 	ctx->xattrs = clone_xattrs;
4081 	ctx->original.id = blobid;
4082 
4083 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx);
4084 }
4085 
4086 /* END spdk_bs_create_clone */
4087 
4088 /* START spdk_bs_inflate_blob */
4089 
4090 static void
4091 _spdk_bs_inflate_blob_sync(void *cb_arg, int bserrno)
4092 {
4093 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4094 	struct spdk_blob *_blob = ctx->original.blob;
4095 
4096 	if (bserrno != 0) {
4097 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
4098 		return;
4099 	}
4100 
4101 	/* Destroy back_bs_dev */
4102 	_blob->back_bs_dev->destroy(_blob->back_bs_dev);
4103 	_blob->back_bs_dev = NULL;
4104 
4105 	_spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0);
4106 }
4107 
4108 static void
4109 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno)
4110 {
4111 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4112 	struct spdk_blob *_blob = ctx->original.blob;
4113 
4114 	if (bserrno != 0) {
4115 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
4116 		return;
4117 	}
4118 
4119 	_spdk_bs_blob_list_remove(_blob);
4120 
4121 	_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
4122 
4123 	/* Unset thin provision */
4124 	_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
4125 	_blob->state = SPDK_BLOB_STATE_DIRTY;
4126 
4127 	spdk_blob_sync_md(_blob, _spdk_bs_inflate_blob_sync, ctx);
4128 }
4129 
4130 static void
4131 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
4132 {
4133 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4134 	struct spdk_blob *_blob = ctx->original.blob;
4135 	uint64_t offset;
4136 
4137 	if (bserrno != 0) {
4138 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
4139 		return;
4140 	}
4141 
4142 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
4143 		if (_blob->active.clusters[ctx->cluster] == 0) {
4144 			break;
4145 		}
4146 	}
4147 
4148 	if (ctx->cluster < _blob->active.num_clusters) {
4149 		offset = _spdk_bs_cluster_to_page(_blob->bs, ctx->cluster);
4150 
4151 		/* We may safely increment a cluster before write */
4152 		ctx->cluster++;
4153 
4154 		/* Use zero length write to touch a cluster */
4155 		spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0,
4156 				   _spdk_bs_inflate_blob_touch_next, ctx);
4157 	} else {
4158 		_spdk_bs_inflate_blob_done(cb_arg, bserrno);
4159 	}
4160 }
4161 
4162 static void
4163 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4164 {
4165 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
4166 	uint64_t lfc; /* lowest free cluster */
4167 	uint64_t i;
4168 
4169 	if (bserrno != 0) {
4170 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
4171 		return;
4172 	}
4173 	ctx->original.blob = _blob;
4174 
4175 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
4176 		/* This is not thin provisioned blob. No need to inflate. */
4177 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0);
4178 		return;
4179 	}
4180 
4181 	/* Do two passes - one to verify that we can obtain enough clusters
4182 	 * and another to actually claim them.
4183 	 */
4184 	lfc = 0;
4185 	for (i = 0; i < _blob->active.num_clusters; i++) {
4186 		if (_blob->active.clusters[i] == 0) {
4187 			lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc);
4188 			if (lfc >= _blob->bs->total_clusters) {
4189 				/* No more free clusters. Cannot satisfy the request */
4190 				_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
4191 				return;
4192 			}
4193 			lfc++;
4194 		}
4195 	}
4196 
4197 	ctx->cluster = 0;
4198 	_spdk_bs_inflate_blob_touch_next(ctx, 0);
4199 }
4200 
4201 void spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
4202 			  spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
4203 {
4204 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
4205 
4206 	if (!ctx) {
4207 		cb_fn(cb_arg, -ENOMEM);
4208 		return;
4209 	}
4210 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4211 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
4212 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
4213 	ctx->bserrno = 0;
4214 	ctx->original.id = blobid;
4215 	ctx->channel = channel;
4216 
4217 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx);
4218 }
4219 
4220 /* END spdk_bs_inflate_blob */
4221 
4222 /* START spdk_blob_resize */
4223 void
4224 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
4225 {
4226 	int			rc;
4227 
4228 	_spdk_blob_verify_md_op(blob);
4229 
4230 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz);
4231 
4232 	if (blob->md_ro) {
4233 		cb_fn(cb_arg, -EPERM);
4234 		return;
4235 	}
4236 
4237 	if (sz == blob->active.num_clusters) {
4238 		cb_fn(cb_arg, 0);
4239 		return;
4240 	}
4241 
4242 	rc = _spdk_blob_resize(blob, sz);
4243 	cb_fn(cb_arg, rc);
4244 }
4245 
4246 /* END spdk_blob_resize */
4247 
4248 
4249 /* START spdk_bs_delete_blob */
4250 
4251 static void
4252 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
4253 {
4254 	spdk_bs_sequence_t *seq = cb_arg;
4255 
4256 	spdk_bs_sequence_finish(seq, bserrno);
4257 }
4258 
4259 static void
4260 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4261 {
4262 	struct spdk_blob *blob = cb_arg;
4263 
4264 	if (bserrno != 0) {
4265 		/*
4266 		 * We already removed this blob from the blobstore tailq, so
4267 		 *  we need to free it here since this is the last reference
4268 		 *  to it.
4269 		 */
4270 		_spdk_blob_free(blob);
4271 		_spdk_bs_delete_close_cpl(seq, bserrno);
4272 		return;
4273 	}
4274 
4275 	/*
4276 	 * This will immediately decrement the ref_count and call
4277 	 *  the completion routine since the metadata state is clean.
4278 	 *  By calling spdk_blob_close, we reduce the number of call
4279 	 *  points into code that touches the blob->open_ref count
4280 	 *  and the blobstore's blob list.
4281 	 */
4282 	spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq);
4283 }
4284 
4285 static void
4286 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
4287 {
4288 	spdk_bs_sequence_t *seq = cb_arg;
4289 	uint32_t page_num;
4290 
4291 	if (bserrno != 0) {
4292 		spdk_bs_sequence_finish(seq, bserrno);
4293 		return;
4294 	}
4295 
4296 	_spdk_blob_verify_md_op(blob);
4297 
4298 	if (blob->open_ref > 1) {
4299 		/*
4300 		 * Someone has this blob open (besides this delete context).
4301 		 *  Decrement the ref count directly and return -EBUSY.
4302 		 */
4303 		blob->open_ref--;
4304 		spdk_bs_sequence_finish(seq, -EBUSY);
4305 		return;
4306 	}
4307 
4308 	bserrno = _spdk_bs_blob_list_remove(blob);
4309 	if (bserrno != 0) {
4310 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Remove blob #%" PRIu64 " from a list\n", blob->id);
4311 		spdk_bs_sequence_finish(seq, bserrno);
4312 		return;
4313 	}
4314 
4315 	/*
4316 	 * Remove the blob from the blob_store list now, to ensure it does not
4317 	 *  get returned after this point by _spdk_blob_lookup().
4318 	 */
4319 	TAILQ_REMOVE(&blob->bs->blobs, blob, link);
4320 	page_num = _spdk_bs_blobid_to_page(blob->id);
4321 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
4322 	blob->state = SPDK_BLOB_STATE_DIRTY;
4323 	blob->active.num_pages = 0;
4324 	_spdk_blob_resize(blob, 0);
4325 
4326 	_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob);
4327 }
4328 
4329 void
4330 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
4331 		    spdk_blob_op_complete cb_fn, void *cb_arg)
4332 {
4333 	struct spdk_bs_cpl	cpl;
4334 	spdk_bs_sequence_t	*seq;
4335 	struct spdk_blob_list	*snapshot_entry = NULL;
4336 
4337 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid);
4338 
4339 	assert(spdk_get_thread() == bs->md_thread);
4340 
4341 	/* Check if this is a snapshot with clones */
4342 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
4343 		if (snapshot_entry->id == blobid) {
4344 			break;
4345 		}
4346 	}
4347 	if (snapshot_entry != NULL) {
4348 		/* If snapshot have clones, we cannot remove it */
4349 		if (!TAILQ_EMPTY(&snapshot_entry->clones)) {
4350 			SPDK_ERRLOG("Cannot remove snapshot with clones\n");
4351 			cb_fn(cb_arg, -EBUSY);
4352 			return;
4353 		}
4354 	}
4355 
4356 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4357 	cpl.u.blob_basic.cb_fn = cb_fn;
4358 	cpl.u.blob_basic.cb_arg = cb_arg;
4359 
4360 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4361 	if (!seq) {
4362 		cb_fn(cb_arg, -ENOMEM);
4363 		return;
4364 	}
4365 
4366 	spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq);
4367 }
4368 
4369 /* END spdk_bs_delete_blob */
4370 
4371 /* START spdk_bs_open_blob */
4372 
4373 static void
4374 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4375 {
4376 	struct spdk_blob *blob = cb_arg;
4377 
4378 	/* If the blob have crc error, we just return NULL. */
4379 	if (blob == NULL) {
4380 		seq->cpl.u.blob_handle.blob = NULL;
4381 		spdk_bs_sequence_finish(seq, bserrno);
4382 		return;
4383 	}
4384 
4385 	blob->open_ref++;
4386 
4387 	TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link);
4388 
4389 	spdk_bs_sequence_finish(seq, bserrno);
4390 }
4391 
4392 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
4393 		       spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
4394 {
4395 	struct spdk_blob		*blob;
4396 	struct spdk_bs_cpl		cpl;
4397 	spdk_bs_sequence_t		*seq;
4398 	uint32_t			page_num;
4399 
4400 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid);
4401 	assert(spdk_get_thread() == bs->md_thread);
4402 
4403 	page_num = _spdk_bs_blobid_to_page(blobid);
4404 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
4405 		/* Invalid blobid */
4406 		cb_fn(cb_arg, NULL, -ENOENT);
4407 		return;
4408 	}
4409 
4410 	blob = _spdk_blob_lookup(bs, blobid);
4411 	if (blob) {
4412 		blob->open_ref++;
4413 		cb_fn(cb_arg, blob, 0);
4414 		return;
4415 	}
4416 
4417 	blob = _spdk_blob_alloc(bs, blobid);
4418 	if (!blob) {
4419 		cb_fn(cb_arg, NULL, -ENOMEM);
4420 		return;
4421 	}
4422 
4423 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
4424 	cpl.u.blob_handle.cb_fn = cb_fn;
4425 	cpl.u.blob_handle.cb_arg = cb_arg;
4426 	cpl.u.blob_handle.blob = blob;
4427 
4428 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
4429 	if (!seq) {
4430 		_spdk_blob_free(blob);
4431 		cb_fn(cb_arg, NULL, -ENOMEM);
4432 		return;
4433 	}
4434 
4435 	_spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob);
4436 }
4437 /* END spdk_bs_open_blob */
4438 
4439 /* START spdk_blob_set_read_only */
4440 int spdk_blob_set_read_only(struct spdk_blob *blob)
4441 {
4442 	_spdk_blob_verify_md_op(blob);
4443 
4444 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
4445 
4446 	blob->state = SPDK_BLOB_STATE_DIRTY;
4447 	return 0;
4448 }
4449 /* END spdk_blob_set_read_only */
4450 
4451 /* START spdk_blob_sync_md */
4452 
4453 static void
4454 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4455 {
4456 	struct spdk_blob *blob = cb_arg;
4457 
4458 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
4459 		blob->data_ro = true;
4460 		blob->md_ro = true;
4461 	}
4462 
4463 	spdk_bs_sequence_finish(seq, bserrno);
4464 }
4465 
4466 static void
4467 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
4468 {
4469 	struct spdk_bs_cpl	cpl;
4470 	spdk_bs_sequence_t	*seq;
4471 
4472 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4473 	cpl.u.blob_basic.cb_fn = cb_fn;
4474 	cpl.u.blob_basic.cb_arg = cb_arg;
4475 
4476 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
4477 	if (!seq) {
4478 		cb_fn(cb_arg, -ENOMEM);
4479 		return;
4480 	}
4481 
4482 	_spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob);
4483 }
4484 
4485 void
4486 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
4487 {
4488 	_spdk_blob_verify_md_op(blob);
4489 
4490 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id);
4491 
4492 	if (blob->md_ro) {
4493 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
4494 		cb_fn(cb_arg, 0);
4495 		return;
4496 	}
4497 
4498 	_spdk_blob_sync_md(blob, cb_fn, cb_arg);
4499 }
4500 
4501 /* END spdk_blob_sync_md */
4502 
4503 struct spdk_blob_insert_cluster_ctx {
4504 	struct spdk_thread	*thread;
4505 	struct spdk_blob	*blob;
4506 	uint32_t		cluster_num;	/* cluster index in blob */
4507 	uint32_t		cluster;	/* cluster on disk */
4508 	int			rc;
4509 	spdk_blob_op_complete	cb_fn;
4510 	void			*cb_arg;
4511 };
4512 
4513 static void
4514 _spdk_blob_insert_cluster_msg_cpl(void *arg)
4515 {
4516 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
4517 
4518 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
4519 	free(ctx);
4520 }
4521 
4522 static void
4523 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno)
4524 {
4525 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
4526 
4527 	ctx->rc = bserrno;
4528 	spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
4529 }
4530 
4531 static void
4532 _spdk_blob_insert_cluster_msg(void *arg)
4533 {
4534 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
4535 
4536 	ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
4537 	if (ctx->rc != 0) {
4538 		spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
4539 		return;
4540 	}
4541 
4542 	ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
4543 	_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
4544 }
4545 
4546 void
4547 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
4548 				       uint64_t cluster, spdk_blob_op_complete cb_fn, void *cb_arg)
4549 {
4550 	struct spdk_blob_insert_cluster_ctx *ctx;
4551 
4552 	ctx = calloc(1, sizeof(*ctx));
4553 	if (ctx == NULL) {
4554 		cb_fn(cb_arg, -ENOMEM);
4555 		return;
4556 	}
4557 
4558 	ctx->thread = spdk_get_thread();
4559 	ctx->blob = blob;
4560 	ctx->cluster_num = cluster_num;
4561 	ctx->cluster = cluster;
4562 	ctx->cb_fn = cb_fn;
4563 	ctx->cb_arg = cb_arg;
4564 
4565 	spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx);
4566 }
4567 
4568 /* START spdk_blob_close */
4569 
4570 static void
4571 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4572 {
4573 	struct spdk_blob *blob = cb_arg;
4574 
4575 	if (bserrno == 0) {
4576 		blob->open_ref--;
4577 		if (blob->open_ref == 0) {
4578 			/*
4579 			 * Blobs with active.num_pages == 0 are deleted blobs.
4580 			 *  these blobs are removed from the blob_store list
4581 			 *  when the deletion process starts - so don't try to
4582 			 *  remove them again.
4583 			 */
4584 			if (blob->active.num_pages > 0) {
4585 				TAILQ_REMOVE(&blob->bs->blobs, blob, link);
4586 			}
4587 			_spdk_blob_free(blob);
4588 		}
4589 	}
4590 
4591 	spdk_bs_sequence_finish(seq, bserrno);
4592 }
4593 
4594 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
4595 {
4596 	struct spdk_bs_cpl	cpl;
4597 	spdk_bs_sequence_t	*seq;
4598 
4599 	_spdk_blob_verify_md_op(blob);
4600 
4601 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id);
4602 
4603 	if (blob->open_ref == 0) {
4604 		cb_fn(cb_arg, -EBADF);
4605 		return;
4606 	}
4607 
4608 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
4609 	cpl.u.blob_basic.cb_fn = cb_fn;
4610 	cpl.u.blob_basic.cb_arg = cb_arg;
4611 
4612 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
4613 	if (!seq) {
4614 		cb_fn(cb_arg, -ENOMEM);
4615 		return;
4616 	}
4617 
4618 	/* Sync metadata */
4619 	_spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob);
4620 }
4621 
4622 /* END spdk_blob_close */
4623 
4624 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
4625 {
4626 	return spdk_get_io_channel(bs);
4627 }
4628 
4629 void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
4630 {
4631 	spdk_put_io_channel(channel);
4632 }
4633 
4634 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
4635 			uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
4636 {
4637 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
4638 				     SPDK_BLOB_UNMAP);
4639 }
4640 
4641 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
4642 			       uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
4643 {
4644 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
4645 				     SPDK_BLOB_WRITE_ZEROES);
4646 }
4647 
4648 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
4649 			void *payload, uint64_t offset, uint64_t length,
4650 			spdk_blob_op_complete cb_fn, void *cb_arg)
4651 {
4652 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
4653 				     SPDK_BLOB_WRITE);
4654 }
4655 
4656 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
4657 		       void *payload, uint64_t offset, uint64_t length,
4658 		       spdk_blob_op_complete cb_fn, void *cb_arg)
4659 {
4660 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
4661 				     SPDK_BLOB_READ);
4662 }
4663 
4664 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
4665 			 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
4666 			 spdk_blob_op_complete cb_fn, void *cb_arg)
4667 {
4668 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
4669 }
4670 
4671 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
4672 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
4673 			spdk_blob_op_complete cb_fn, void *cb_arg)
4674 {
4675 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
4676 }
4677 
4678 struct spdk_bs_iter_ctx {
4679 	int64_t page_num;
4680 	struct spdk_blob_store *bs;
4681 
4682 	spdk_blob_op_with_handle_complete cb_fn;
4683 	void *cb_arg;
4684 };
4685 
4686 static void
4687 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
4688 {
4689 	struct spdk_bs_iter_ctx *ctx = cb_arg;
4690 	struct spdk_blob_store *bs = ctx->bs;
4691 	spdk_blob_id id;
4692 
4693 	if (bserrno == 0) {
4694 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
4695 		free(ctx);
4696 		return;
4697 	}
4698 
4699 	ctx->page_num++;
4700 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
4701 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
4702 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
4703 		free(ctx);
4704 		return;
4705 	}
4706 
4707 	id = _spdk_bs_page_to_blobid(ctx->page_num);
4708 
4709 	spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx);
4710 }
4711 
4712 void
4713 spdk_bs_iter_first(struct spdk_blob_store *bs,
4714 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
4715 {
4716 	struct spdk_bs_iter_ctx *ctx;
4717 
4718 	ctx = calloc(1, sizeof(*ctx));
4719 	if (!ctx) {
4720 		cb_fn(cb_arg, NULL, -ENOMEM);
4721 		return;
4722 	}
4723 
4724 	ctx->page_num = -1;
4725 	ctx->bs = bs;
4726 	ctx->cb_fn = cb_fn;
4727 	ctx->cb_arg = cb_arg;
4728 
4729 	_spdk_bs_iter_cpl(ctx, NULL, -1);
4730 }
4731 
4732 static void
4733 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno)
4734 {
4735 	struct spdk_bs_iter_ctx *ctx = cb_arg;
4736 
4737 	_spdk_bs_iter_cpl(ctx, NULL, -1);
4738 }
4739 
4740 void
4741 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
4742 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
4743 {
4744 	struct spdk_bs_iter_ctx *ctx;
4745 
4746 	assert(blob != NULL);
4747 
4748 	ctx = calloc(1, sizeof(*ctx));
4749 	if (!ctx) {
4750 		cb_fn(cb_arg, NULL, -ENOMEM);
4751 		return;
4752 	}
4753 
4754 	ctx->page_num = _spdk_bs_blobid_to_page(blob->id);
4755 	ctx->bs = bs;
4756 	ctx->cb_fn = cb_fn;
4757 	ctx->cb_arg = cb_arg;
4758 
4759 	/* Close the existing blob */
4760 	spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx);
4761 }
4762 
4763 static int
4764 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
4765 		     uint16_t value_len, bool internal)
4766 {
4767 	struct spdk_xattr_tailq *xattrs;
4768 	struct spdk_xattr	*xattr;
4769 
4770 	_spdk_blob_verify_md_op(blob);
4771 
4772 	if (blob->md_ro) {
4773 		return -EPERM;
4774 	}
4775 
4776 	if (internal) {
4777 		xattrs = &blob->xattrs_internal;
4778 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
4779 	} else {
4780 		xattrs = &blob->xattrs;
4781 	}
4782 
4783 	TAILQ_FOREACH(xattr, xattrs, link) {
4784 		if (!strcmp(name, xattr->name)) {
4785 			free(xattr->value);
4786 			xattr->value_len = value_len;
4787 			xattr->value = malloc(value_len);
4788 			memcpy(xattr->value, value, value_len);
4789 
4790 			blob->state = SPDK_BLOB_STATE_DIRTY;
4791 
4792 			return 0;
4793 		}
4794 	}
4795 
4796 	xattr = calloc(1, sizeof(*xattr));
4797 	if (!xattr) {
4798 		return -1;
4799 	}
4800 	xattr->name = strdup(name);
4801 	xattr->value_len = value_len;
4802 	xattr->value = malloc(value_len);
4803 	memcpy(xattr->value, value, value_len);
4804 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
4805 
4806 	blob->state = SPDK_BLOB_STATE_DIRTY;
4807 
4808 	return 0;
4809 }
4810 
4811 int
4812 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
4813 		    uint16_t value_len)
4814 {
4815 	return _spdk_blob_set_xattr(blob, name, value, value_len, false);
4816 }
4817 
4818 static int
4819 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
4820 {
4821 	struct spdk_xattr_tailq *xattrs;
4822 	struct spdk_xattr	*xattr;
4823 
4824 	_spdk_blob_verify_md_op(blob);
4825 
4826 	if (blob->md_ro) {
4827 		return -EPERM;
4828 	}
4829 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
4830 
4831 	TAILQ_FOREACH(xattr, xattrs, link) {
4832 		if (!strcmp(name, xattr->name)) {
4833 			TAILQ_REMOVE(xattrs, xattr, link);
4834 			free(xattr->value);
4835 			free(xattr->name);
4836 			free(xattr);
4837 
4838 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
4839 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
4840 			}
4841 			blob->state = SPDK_BLOB_STATE_DIRTY;
4842 
4843 			return 0;
4844 		}
4845 	}
4846 
4847 	return -ENOENT;
4848 }
4849 
4850 int
4851 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
4852 {
4853 	return _spdk_blob_remove_xattr(blob, name, false);
4854 }
4855 
4856 static int
4857 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
4858 			   const void **value, size_t *value_len, bool internal)
4859 {
4860 	struct spdk_xattr	*xattr;
4861 	struct spdk_xattr_tailq *xattrs;
4862 
4863 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
4864 
4865 	TAILQ_FOREACH(xattr, xattrs, link) {
4866 		if (!strcmp(name, xattr->name)) {
4867 			*value = xattr->value;
4868 			*value_len = xattr->value_len;
4869 			return 0;
4870 		}
4871 	}
4872 	return -ENOENT;
4873 }
4874 
4875 int
4876 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
4877 			  const void **value, size_t *value_len)
4878 {
4879 	_spdk_blob_verify_md_op(blob);
4880 
4881 	return _spdk_blob_get_xattr_value(blob, name, value, value_len, false);
4882 }
4883 
4884 struct spdk_xattr_names {
4885 	uint32_t	count;
4886 	const char	*names[0];
4887 };
4888 
4889 static int
4890 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
4891 {
4892 	struct spdk_xattr	*xattr;
4893 	int			count = 0;
4894 
4895 	TAILQ_FOREACH(xattr, xattrs, link) {
4896 		count++;
4897 	}
4898 
4899 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
4900 	if (*names == NULL) {
4901 		return -ENOMEM;
4902 	}
4903 
4904 	TAILQ_FOREACH(xattr, xattrs, link) {
4905 		(*names)->names[(*names)->count++] = xattr->name;
4906 	}
4907 
4908 	return 0;
4909 }
4910 
4911 int
4912 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
4913 {
4914 	_spdk_blob_verify_md_op(blob);
4915 
4916 	return _spdk_blob_get_xattr_names(&blob->xattrs, names);
4917 }
4918 
4919 uint32_t
4920 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
4921 {
4922 	assert(names != NULL);
4923 
4924 	return names->count;
4925 }
4926 
4927 const char *
4928 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
4929 {
4930 	if (index >= names->count) {
4931 		return NULL;
4932 	}
4933 
4934 	return names->names[index];
4935 }
4936 
4937 void
4938 spdk_xattr_names_free(struct spdk_xattr_names *names)
4939 {
4940 	free(names);
4941 }
4942 
4943 struct spdk_bs_type
4944 spdk_bs_get_bstype(struct spdk_blob_store *bs)
4945 {
4946 	return bs->bstype;
4947 }
4948 
4949 void
4950 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
4951 {
4952 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
4953 }
4954 
4955 bool
4956 spdk_blob_is_read_only(struct spdk_blob *blob)
4957 {
4958 	assert(blob != NULL);
4959 	return (blob->data_ro || blob->md_ro);
4960 }
4961 
4962 bool
4963 spdk_blob_is_snapshot(struct spdk_blob *blob)
4964 {
4965 	struct spdk_blob_list *snapshot_entry;
4966 
4967 	assert(blob != NULL);
4968 
4969 	TAILQ_FOREACH(snapshot_entry, &blob->bs->snapshots, link) {
4970 		if (snapshot_entry->id == blob->id) {
4971 			break;
4972 		}
4973 	}
4974 
4975 	if (snapshot_entry == NULL) {
4976 		return false;
4977 	}
4978 
4979 	return true;
4980 }
4981 
4982 bool
4983 spdk_blob_is_clone(struct spdk_blob *blob)
4984 {
4985 	assert(blob != NULL);
4986 
4987 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
4988 		assert(spdk_blob_is_thin_provisioned(blob));
4989 		return true;
4990 	}
4991 
4992 	return false;
4993 }
4994 
4995 bool
4996 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
4997 {
4998 	assert(blob != NULL);
4999 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
5000 }
5001 
5002 spdk_blob_id
5003 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
5004 {
5005 	struct spdk_blob_list *snapshot_entry = NULL;
5006 	struct spdk_blob_list *clone_entry = NULL;
5007 
5008 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
5009 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
5010 			if (clone_entry->id == blob_id) {
5011 				return snapshot_entry->id;
5012 			}
5013 		}
5014 	}
5015 
5016 	return SPDK_BLOBID_INVALID;
5017 }
5018 
5019 int
5020 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
5021 		     size_t *count)
5022 {
5023 	struct spdk_blob_list *snapshot_entry, *clone_entry;
5024 	size_t n;
5025 
5026 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
5027 		if (snapshot_entry->id == blobid) {
5028 			break;
5029 		}
5030 	}
5031 	if (snapshot_entry == NULL) {
5032 		*count = 0;
5033 		return 0;
5034 	}
5035 
5036 	if (ids == NULL || *count < snapshot_entry->clone_count) {
5037 		*count = snapshot_entry->clone_count;
5038 		return -ENOMEM;
5039 	}
5040 	*count = snapshot_entry->clone_count;
5041 
5042 	n = 0;
5043 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
5044 		ids[n++] = clone_entry->id;
5045 	}
5046 
5047 	return 0;
5048 }
5049 
5050 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB)
5051