xref: /spdk/lib/blob/blobstore.c (revision 1f4f4cc75a522f897856e980a0b35d3c8fac24ed)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/blob.h"
37 #include "spdk/crc32.h"
38 #include "spdk/env.h"
39 #include "spdk/queue.h"
40 #include "spdk/thread.h"
41 #include "spdk/bit_array.h"
42 #include "spdk/likely.h"
43 #include "spdk/util.h"
44 #include "spdk/string.h"
45 
46 #include "spdk_internal/assert.h"
47 #include "spdk_internal/log.h"
48 
49 #include "blobstore.h"
50 
51 #define BLOB_CRC32C_INITIAL    0xffffffffUL
52 
53 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
54 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
55 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
56 static void _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
57 		uint64_t cluster, uint32_t extent, spdk_blob_op_complete cb_fn, void *cb_arg);
58 
59 static int _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
60 				uint16_t value_len, bool internal);
61 static int _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
62 				      const void **value, size_t *value_len, bool internal);
63 static int _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal);
64 
65 static void _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
66 				     spdk_blob_op_complete cb_fn, void *cb_arg);
67 
68 static void
69 _spdk_blob_verify_md_op(struct spdk_blob *blob)
70 {
71 	assert(blob != NULL);
72 	assert(spdk_get_thread() == blob->bs->md_thread);
73 	assert(blob->state != SPDK_BLOB_STATE_LOADING);
74 }
75 
76 static struct spdk_blob_list *
77 _spdk_bs_get_snapshot_entry(struct spdk_blob_store *bs, spdk_blob_id blobid)
78 {
79 	struct spdk_blob_list *snapshot_entry = NULL;
80 
81 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
82 		if (snapshot_entry->id == blobid) {
83 			break;
84 		}
85 	}
86 
87 	return snapshot_entry;
88 }
89 
90 static void
91 _spdk_bs_claim_md_page(struct spdk_blob_store *bs, uint32_t page)
92 {
93 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
94 	assert(spdk_bit_array_get(bs->used_md_pages, page) == false);
95 
96 	spdk_bit_array_set(bs->used_md_pages, page);
97 }
98 
99 static void
100 _spdk_bs_release_md_page(struct spdk_blob_store *bs, uint32_t page)
101 {
102 	assert(page < spdk_bit_array_capacity(bs->used_md_pages));
103 	assert(spdk_bit_array_get(bs->used_md_pages, page) == true);
104 
105 	spdk_bit_array_clear(bs->used_md_pages, page);
106 }
107 
108 static void
109 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
110 {
111 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
112 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false);
113 	assert(bs->num_free_clusters > 0);
114 
115 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num);
116 
117 	spdk_bit_array_set(bs->used_clusters, cluster_num);
118 	bs->num_free_clusters--;
119 }
120 
121 static int
122 _spdk_blob_insert_cluster(struct spdk_blob *blob, uint32_t cluster_num, uint64_t cluster)
123 {
124 	uint64_t *cluster_lba = &blob->active.clusters[cluster_num];
125 
126 	_spdk_blob_verify_md_op(blob);
127 
128 	if (*cluster_lba != 0) {
129 		return -EEXIST;
130 	}
131 
132 	*cluster_lba = _spdk_bs_cluster_to_lba(blob->bs, cluster);
133 	return 0;
134 }
135 
136 static int
137 _spdk_bs_allocate_cluster(struct spdk_blob *blob, uint32_t cluster_num,
138 			  uint64_t *lowest_free_cluster, uint32_t *lowest_free_md_page, bool update_map)
139 {
140 	uint32_t *extent_page = 0;
141 
142 	pthread_mutex_lock(&blob->bs->used_clusters_mutex);
143 	*lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters,
144 			       *lowest_free_cluster);
145 	if (*lowest_free_cluster == UINT32_MAX) {
146 		/* No more free clusters. Cannot satisfy the request */
147 		pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
148 		return -ENOSPC;
149 	}
150 
151 	if (blob->use_extent_table) {
152 		extent_page = _spdk_bs_cluster_to_extent_page(blob, cluster_num);
153 		if (*extent_page == 0) {
154 			/* No extent_page is allocated for the cluster */
155 			*lowest_free_md_page = spdk_bit_array_find_first_clear(blob->bs->used_md_pages,
156 					       *lowest_free_md_page);
157 			if (*lowest_free_md_page == UINT32_MAX) {
158 				/* No more free md pages. Cannot satisfy the request */
159 				pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
160 				return -ENOSPC;
161 			}
162 			_spdk_bs_claim_md_page(blob->bs, *lowest_free_md_page);
163 		}
164 	}
165 
166 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id);
167 	_spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster);
168 
169 	pthread_mutex_unlock(&blob->bs->used_clusters_mutex);
170 
171 	if (update_map) {
172 		_spdk_blob_insert_cluster(blob, cluster_num, *lowest_free_cluster);
173 		if (blob->use_extent_table && *extent_page == 0) {
174 			*extent_page = *lowest_free_md_page;
175 		}
176 	}
177 
178 	return 0;
179 }
180 
181 static void
182 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
183 {
184 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
185 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true);
186 	assert(bs->num_free_clusters < bs->total_clusters);
187 
188 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num);
189 
190 	pthread_mutex_lock(&bs->used_clusters_mutex);
191 	spdk_bit_array_clear(bs->used_clusters, cluster_num);
192 	bs->num_free_clusters++;
193 	pthread_mutex_unlock(&bs->used_clusters_mutex);
194 }
195 
196 static void
197 _spdk_blob_xattrs_init(struct spdk_blob_xattr_opts *xattrs)
198 {
199 	xattrs->count = 0;
200 	xattrs->names = NULL;
201 	xattrs->ctx = NULL;
202 	xattrs->get_value = NULL;
203 }
204 
205 void
206 spdk_blob_opts_init(struct spdk_blob_opts *opts)
207 {
208 	opts->num_clusters = 0;
209 	opts->thin_provision = false;
210 	opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
211 	_spdk_blob_xattrs_init(&opts->xattrs);
212 	opts->use_extent_table = true;
213 }
214 
215 void
216 spdk_blob_open_opts_init(struct spdk_blob_open_opts *opts)
217 {
218 	opts->clear_method = BLOB_CLEAR_WITH_DEFAULT;
219 }
220 
221 static struct spdk_blob *
222 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
223 {
224 	struct spdk_blob *blob;
225 
226 	blob = calloc(1, sizeof(*blob));
227 	if (!blob) {
228 		return NULL;
229 	}
230 
231 	blob->id = id;
232 	blob->bs = bs;
233 
234 	blob->parent_id = SPDK_BLOBID_INVALID;
235 
236 	blob->state = SPDK_BLOB_STATE_DIRTY;
237 	blob->extent_rle_found = false;
238 	blob->extent_table_found = false;
239 	blob->active.num_pages = 1;
240 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
241 	if (!blob->active.pages) {
242 		free(blob);
243 		return NULL;
244 	}
245 
246 	blob->active.pages[0] = _spdk_bs_blobid_to_page(id);
247 
248 	TAILQ_INIT(&blob->xattrs);
249 	TAILQ_INIT(&blob->xattrs_internal);
250 	TAILQ_INIT(&blob->pending_persists);
251 
252 	return blob;
253 }
254 
255 static void
256 _spdk_xattrs_free(struct spdk_xattr_tailq *xattrs)
257 {
258 	struct spdk_xattr	*xattr, *xattr_tmp;
259 
260 	TAILQ_FOREACH_SAFE(xattr, xattrs, link, xattr_tmp) {
261 		TAILQ_REMOVE(xattrs, xattr, link);
262 		free(xattr->name);
263 		free(xattr->value);
264 		free(xattr);
265 	}
266 }
267 
268 static void
269 _spdk_blob_free(struct spdk_blob *blob)
270 {
271 	assert(blob != NULL);
272 	assert(TAILQ_EMPTY(&blob->pending_persists));
273 
274 	free(blob->active.extent_pages);
275 	free(blob->clean.extent_pages);
276 	free(blob->active.clusters);
277 	free(blob->clean.clusters);
278 	free(blob->active.pages);
279 	free(blob->clean.pages);
280 
281 	_spdk_xattrs_free(&blob->xattrs);
282 	_spdk_xattrs_free(&blob->xattrs_internal);
283 
284 	if (blob->back_bs_dev) {
285 		blob->back_bs_dev->destroy(blob->back_bs_dev);
286 	}
287 
288 	free(blob);
289 }
290 
291 struct freeze_io_ctx {
292 	struct spdk_bs_cpl cpl;
293 	struct spdk_blob *blob;
294 };
295 
296 static void
297 _spdk_blob_io_sync(struct spdk_io_channel_iter *i)
298 {
299 	spdk_for_each_channel_continue(i, 0);
300 }
301 
302 static void
303 _spdk_blob_execute_queued_io(struct spdk_io_channel_iter *i)
304 {
305 	struct spdk_io_channel *_ch = spdk_io_channel_iter_get_channel(i);
306 	struct spdk_bs_channel *ch = spdk_io_channel_get_ctx(_ch);
307 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
308 	struct spdk_bs_request_set	*set;
309 	struct spdk_bs_user_op_args	*args;
310 	spdk_bs_user_op_t *op, *tmp;
311 
312 	TAILQ_FOREACH_SAFE(op, &ch->queued_io, link, tmp) {
313 		set = (struct spdk_bs_request_set *)op;
314 		args = &set->u.user_op;
315 
316 		if (args->blob == ctx->blob) {
317 			TAILQ_REMOVE(&ch->queued_io, op, link);
318 			bs_user_op_execute(op);
319 		}
320 	}
321 
322 	spdk_for_each_channel_continue(i, 0);
323 }
324 
325 static void
326 _spdk_blob_io_cpl(struct spdk_io_channel_iter *i, int status)
327 {
328 	struct freeze_io_ctx *ctx = spdk_io_channel_iter_get_ctx(i);
329 
330 	ctx->cpl.u.blob_basic.cb_fn(ctx->cpl.u.blob_basic.cb_arg, 0);
331 
332 	free(ctx);
333 }
334 
335 static void
336 _spdk_blob_freeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
337 {
338 	struct freeze_io_ctx *ctx;
339 
340 	ctx = calloc(1, sizeof(*ctx));
341 	if (!ctx) {
342 		cb_fn(cb_arg, -ENOMEM);
343 		return;
344 	}
345 
346 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
347 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
348 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
349 	ctx->blob = blob;
350 
351 	/* Freeze I/O on blob */
352 	blob->frozen_refcnt++;
353 
354 	if (blob->frozen_refcnt == 1) {
355 		spdk_for_each_channel(blob->bs, _spdk_blob_io_sync, ctx, _spdk_blob_io_cpl);
356 	} else {
357 		cb_fn(cb_arg, 0);
358 		free(ctx);
359 	}
360 }
361 
362 static void
363 _spdk_blob_unfreeze_io(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
364 {
365 	struct freeze_io_ctx *ctx;
366 
367 	ctx = calloc(1, sizeof(*ctx));
368 	if (!ctx) {
369 		cb_fn(cb_arg, -ENOMEM);
370 		return;
371 	}
372 
373 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
374 	ctx->cpl.u.blob_basic.cb_fn = cb_fn;
375 	ctx->cpl.u.blob_basic.cb_arg = cb_arg;
376 	ctx->blob = blob;
377 
378 	assert(blob->frozen_refcnt > 0);
379 
380 	blob->frozen_refcnt--;
381 
382 	if (blob->frozen_refcnt == 0) {
383 		spdk_for_each_channel(blob->bs, _spdk_blob_execute_queued_io, ctx, _spdk_blob_io_cpl);
384 	} else {
385 		cb_fn(cb_arg, 0);
386 		free(ctx);
387 	}
388 }
389 
390 static int
391 _spdk_blob_mark_clean(struct spdk_blob *blob)
392 {
393 	uint32_t *extent_pages = NULL;
394 	uint64_t *clusters = NULL;
395 	uint32_t *pages = NULL;
396 
397 	assert(blob != NULL);
398 
399 	if (blob->active.num_extent_pages) {
400 		assert(blob->active.extent_pages);
401 		extent_pages = calloc(blob->active.num_extent_pages, sizeof(*blob->active.extent_pages));
402 		if (!extent_pages) {
403 			return -ENOMEM;
404 		}
405 		memcpy(extent_pages, blob->active.extent_pages,
406 		       blob->active.num_extent_pages * sizeof(*extent_pages));
407 	}
408 
409 	if (blob->active.num_clusters) {
410 		assert(blob->active.clusters);
411 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
412 		if (!clusters) {
413 			free(extent_pages);
414 			return -ENOMEM;
415 		}
416 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
417 	}
418 
419 	if (blob->active.num_pages) {
420 		assert(blob->active.pages);
421 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
422 		if (!pages) {
423 			free(extent_pages);
424 			free(clusters);
425 			return -ENOMEM;
426 		}
427 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
428 	}
429 
430 	free(blob->clean.extent_pages);
431 	free(blob->clean.clusters);
432 	free(blob->clean.pages);
433 
434 	blob->clean.num_extent_pages = blob->active.num_extent_pages;
435 	blob->clean.extent_pages = blob->active.extent_pages;
436 	blob->clean.num_clusters = blob->active.num_clusters;
437 	blob->clean.clusters = blob->active.clusters;
438 	blob->clean.num_pages = blob->active.num_pages;
439 	blob->clean.pages = blob->active.pages;
440 
441 	blob->active.extent_pages = extent_pages;
442 	blob->active.clusters = clusters;
443 	blob->active.pages = pages;
444 
445 	/* If the metadata was dirtied again while the metadata was being written to disk,
446 	 *  we do not want to revert the DIRTY state back to CLEAN here.
447 	 */
448 	if (blob->state == SPDK_BLOB_STATE_LOADING) {
449 		blob->state = SPDK_BLOB_STATE_CLEAN;
450 	}
451 
452 	return 0;
453 }
454 
455 static int
456 _spdk_blob_deserialize_xattr(struct spdk_blob *blob,
457 			     struct spdk_blob_md_descriptor_xattr *desc_xattr, bool internal)
458 {
459 	struct spdk_xattr                       *xattr;
460 
461 	if (desc_xattr->length != sizeof(desc_xattr->name_length) +
462 	    sizeof(desc_xattr->value_length) +
463 	    desc_xattr->name_length + desc_xattr->value_length) {
464 		return -EINVAL;
465 	}
466 
467 	xattr = calloc(1, sizeof(*xattr));
468 	if (xattr == NULL) {
469 		return -ENOMEM;
470 	}
471 
472 	xattr->name = malloc(desc_xattr->name_length + 1);
473 	if (xattr->name == NULL) {
474 		free(xattr);
475 		return -ENOMEM;
476 	}
477 	memcpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
478 	xattr->name[desc_xattr->name_length] = '\0';
479 
480 	xattr->value = malloc(desc_xattr->value_length);
481 	if (xattr->value == NULL) {
482 		free(xattr->name);
483 		free(xattr);
484 		return -ENOMEM;
485 	}
486 	xattr->value_len = desc_xattr->value_length;
487 	memcpy(xattr->value,
488 	       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
489 	       desc_xattr->value_length);
490 
491 	TAILQ_INSERT_TAIL(internal ? &blob->xattrs_internal : &blob->xattrs, xattr, link);
492 
493 	return 0;
494 }
495 
496 
497 static int
498 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob *blob)
499 {
500 	struct spdk_blob_md_descriptor *desc;
501 	size_t	cur_desc = 0;
502 	void *tmp;
503 
504 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
505 	while (cur_desc < sizeof(page->descriptors)) {
506 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
507 			if (desc->length == 0) {
508 				/* If padding and length are 0, this terminates the page */
509 				break;
510 			}
511 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
512 			struct spdk_blob_md_descriptor_flags	*desc_flags;
513 
514 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
515 
516 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
517 				return -EINVAL;
518 			}
519 
520 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
521 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
522 				return -EINVAL;
523 			}
524 
525 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
526 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
527 				blob->data_ro = true;
528 				blob->md_ro = true;
529 			}
530 
531 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
532 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
533 				blob->md_ro = true;
534 			}
535 
536 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
537 				blob->data_ro = true;
538 				blob->md_ro = true;
539 			}
540 
541 			blob->invalid_flags = desc_flags->invalid_flags;
542 			blob->data_ro_flags = desc_flags->data_ro_flags;
543 			blob->md_ro_flags = desc_flags->md_ro_flags;
544 
545 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
546 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
547 			unsigned int				i, j;
548 			unsigned int				cluster_count = blob->active.num_clusters;
549 
550 			if (blob->extent_table_found) {
551 				/* Extent Table already present in the md,
552 				 * both descriptors should never be at the same time. */
553 				return -EINVAL;
554 			}
555 			blob->extent_rle_found = true;
556 
557 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
558 
559 			if (desc_extent_rle->length == 0 ||
560 			    (desc_extent_rle->length % sizeof(desc_extent_rle->extents[0]) != 0)) {
561 				return -EINVAL;
562 			}
563 
564 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
565 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
566 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
567 						if (!spdk_bit_array_get(blob->bs->used_clusters,
568 									desc_extent_rle->extents[i].cluster_idx + j)) {
569 							return -EINVAL;
570 						}
571 					}
572 					cluster_count++;
573 				}
574 			}
575 
576 			if (cluster_count == 0) {
577 				return -EINVAL;
578 			}
579 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(*blob->active.clusters));
580 			if (tmp == NULL) {
581 				return -ENOMEM;
582 			}
583 			blob->active.clusters = tmp;
584 			blob->active.cluster_array_size = cluster_count;
585 
586 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
587 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
588 					if (desc_extent_rle->extents[i].cluster_idx != 0) {
589 						blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
590 								desc_extent_rle->extents[i].cluster_idx + j);
591 					} else if (spdk_blob_is_thin_provisioned(blob)) {
592 						blob->active.clusters[blob->active.num_clusters++] = 0;
593 					} else {
594 						return -EINVAL;
595 					}
596 				}
597 			}
598 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
599 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
600 			uint32_t num_extent_pages = blob->active.num_extent_pages;
601 			uint32_t i, j;
602 			size_t extent_pages_length;
603 
604 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
605 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
606 
607 			if (blob->extent_rle_found) {
608 				/* This means that Extent RLE is present in MD,
609 				 * both should never be at the same time. */
610 				return -EINVAL;
611 			} else if (blob->extent_table_found &&
612 				   desc_extent_table->num_clusters != blob->remaining_clusters_in_et) {
613 				/* Number of clusters in this ET does not match number
614 				 * from previously read EXTENT_TABLE. */
615 				return -EINVAL;
616 			}
617 
618 			blob->extent_table_found = true;
619 
620 			if (desc_extent_table->length == 0 ||
621 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
622 				return -EINVAL;
623 			}
624 
625 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
626 				num_extent_pages += desc_extent_table->extent_page[i].num_pages;
627 			}
628 
629 			tmp = realloc(blob->active.extent_pages, num_extent_pages * sizeof(uint32_t));
630 			if (tmp == NULL) {
631 				return -ENOMEM;
632 			}
633 			blob->active.extent_pages = tmp;
634 			blob->active.extent_pages_array_size = num_extent_pages;
635 
636 			blob->remaining_clusters_in_et = desc_extent_table->num_clusters;
637 
638 			/* Extent table entries contain md page numbers for extent pages.
639 			 * Zeroes represent unallocated extent pages, those are run-length-encoded.
640 			 */
641 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
642 				if (desc_extent_table->extent_page[i].page_idx != 0) {
643 					assert(desc_extent_table->extent_page[i].num_pages == 1);
644 					blob->active.extent_pages[blob->active.num_extent_pages++] =
645 						desc_extent_table->extent_page[i].page_idx;
646 				} else if (spdk_blob_is_thin_provisioned(blob)) {
647 					for (j = 0; j < desc_extent_table->extent_page[i].num_pages; j++) {
648 						blob->active.extent_pages[blob->active.num_extent_pages++] = 0;
649 					}
650 				} else {
651 					return -EINVAL;
652 				}
653 			}
654 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
655 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
656 			unsigned int					i;
657 			unsigned int					cluster_count = 0;
658 			size_t						cluster_idx_length;
659 
660 			if (blob->extent_rle_found) {
661 				/* This means that Extent RLE is present in MD,
662 				 * both should never be at the same time. */
663 				return -EINVAL;
664 			}
665 
666 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
667 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
668 
669 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
670 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
671 				return -EINVAL;
672 			}
673 
674 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
675 				if (desc_extent->cluster_idx[i] != 0) {
676 					if (!spdk_bit_array_get(blob->bs->used_clusters, desc_extent->cluster_idx[i])) {
677 						return -EINVAL;
678 					}
679 				}
680 				cluster_count++;
681 			}
682 
683 			if (cluster_count == 0) {
684 				return -EINVAL;
685 			}
686 
687 			/* When reading extent pages sequentially starting cluster idx should match
688 			 * current size of a blob.
689 			 * If changed to batch reading, this check shall be removed. */
690 			if (desc_extent->start_cluster_idx != blob->active.num_clusters) {
691 				return -EINVAL;
692 			}
693 
694 			tmp = realloc(blob->active.clusters,
695 				      (cluster_count + blob->active.num_clusters) * sizeof(*blob->active.clusters));
696 			if (tmp == NULL) {
697 				return -ENOMEM;
698 			}
699 			blob->active.clusters = tmp;
700 			blob->active.cluster_array_size = (cluster_count + blob->active.num_clusters);
701 
702 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
703 				if (desc_extent->cluster_idx[i] != 0) {
704 					blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
705 							desc_extent->cluster_idx[i]);
706 				} else if (spdk_blob_is_thin_provisioned(blob)) {
707 					blob->active.clusters[blob->active.num_clusters++] = 0;
708 				} else {
709 					return -EINVAL;
710 				}
711 			}
712 			assert(desc_extent->start_cluster_idx + cluster_count == blob->active.num_clusters);
713 			assert(blob->remaining_clusters_in_et >= cluster_count);
714 			blob->remaining_clusters_in_et -= cluster_count;
715 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
716 			int rc;
717 
718 			rc = _spdk_blob_deserialize_xattr(blob,
719 							  (struct spdk_blob_md_descriptor_xattr *) desc, false);
720 			if (rc != 0) {
721 				return rc;
722 			}
723 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
724 			int rc;
725 
726 			rc = _spdk_blob_deserialize_xattr(blob,
727 							  (struct spdk_blob_md_descriptor_xattr *) desc, true);
728 			if (rc != 0) {
729 				return rc;
730 			}
731 		} else {
732 			/* Unrecognized descriptor type.  Do not fail - just continue to the
733 			 *  next descriptor.  If this descriptor is associated with some feature
734 			 *  defined in a newer version of blobstore, that version of blobstore
735 			 *  should create and set an associated feature flag to specify if this
736 			 *  blob can be loaded or not.
737 			 */
738 		}
739 
740 		/* Advance to the next descriptor */
741 		cur_desc += sizeof(*desc) + desc->length;
742 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
743 			break;
744 		}
745 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
746 	}
747 
748 	return 0;
749 }
750 
751 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page);
752 
753 static int
754 _spdk_blob_parse_extent_page(struct spdk_blob_md_page *extent_page, struct spdk_blob *blob)
755 {
756 	assert(blob != NULL);
757 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
758 
759 	if (_spdk_bs_load_cur_extent_page_valid(extent_page) == false) {
760 		return -ENOENT;
761 	}
762 
763 	return _spdk_blob_parse_page(extent_page, blob);
764 }
765 
766 static int
767 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
768 		 struct spdk_blob *blob)
769 {
770 	const struct spdk_blob_md_page *page;
771 	uint32_t i;
772 	int rc;
773 
774 	assert(page_count > 0);
775 	assert(pages[0].sequence_num == 0);
776 	assert(blob != NULL);
777 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
778 	assert(blob->active.clusters == NULL);
779 
780 	/* The blobid provided doesn't match what's in the MD, this can
781 	 * happen for example if a bogus blobid is passed in through open.
782 	 */
783 	if (blob->id != pages[0].id) {
784 		SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n",
785 			    blob->id, pages[0].id);
786 		return -ENOENT;
787 	}
788 
789 	for (i = 0; i < page_count; i++) {
790 		page = &pages[i];
791 
792 		assert(page->id == blob->id);
793 		assert(page->sequence_num == i);
794 
795 		rc = _spdk_blob_parse_page(page, blob);
796 		if (rc != 0) {
797 			return rc;
798 		}
799 	}
800 
801 	return 0;
802 }
803 
804 static int
805 _spdk_blob_serialize_add_page(const struct spdk_blob *blob,
806 			      struct spdk_blob_md_page **pages,
807 			      uint32_t *page_count,
808 			      struct spdk_blob_md_page **last_page)
809 {
810 	struct spdk_blob_md_page *page;
811 
812 	assert(pages != NULL);
813 	assert(page_count != NULL);
814 
815 	if (*page_count == 0) {
816 		assert(*pages == NULL);
817 		*page_count = 1;
818 		*pages = spdk_malloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
819 				     NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
820 	} else {
821 		assert(*pages != NULL);
822 		(*page_count)++;
823 		*pages = spdk_realloc(*pages,
824 				      SPDK_BS_PAGE_SIZE * (*page_count),
825 				      SPDK_BS_PAGE_SIZE);
826 	}
827 
828 	if (*pages == NULL) {
829 		*page_count = 0;
830 		*last_page = NULL;
831 		return -ENOMEM;
832 	}
833 
834 	page = &(*pages)[*page_count - 1];
835 	memset(page, 0, sizeof(*page));
836 	page->id = blob->id;
837 	page->sequence_num = *page_count - 1;
838 	page->next = SPDK_INVALID_MD_PAGE;
839 	*last_page = page;
840 
841 	return 0;
842 }
843 
844 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
845  * Update required_sz on both success and failure.
846  *
847  */
848 static int
849 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr,
850 			   uint8_t *buf, size_t buf_sz,
851 			   size_t *required_sz, bool internal)
852 {
853 	struct spdk_blob_md_descriptor_xattr	*desc;
854 
855 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
856 		       strlen(xattr->name) +
857 		       xattr->value_len;
858 
859 	if (buf_sz < *required_sz) {
860 		return -1;
861 	}
862 
863 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
864 
865 	desc->type = internal ? SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL : SPDK_MD_DESCRIPTOR_TYPE_XATTR;
866 	desc->length = sizeof(desc->name_length) +
867 		       sizeof(desc->value_length) +
868 		       strlen(xattr->name) +
869 		       xattr->value_len;
870 	desc->name_length = strlen(xattr->name);
871 	desc->value_length = xattr->value_len;
872 
873 	memcpy(desc->name, xattr->name, desc->name_length);
874 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
875 	       xattr->value,
876 	       desc->value_length);
877 
878 	return 0;
879 }
880 
881 static void
882 _spdk_blob_serialize_extent_table_entry(const struct spdk_blob *blob,
883 					uint64_t start_ep, uint64_t *next_ep,
884 					uint8_t **buf, size_t *remaining_sz)
885 {
886 	struct spdk_blob_md_descriptor_extent_table *desc;
887 	size_t cur_sz;
888 	uint64_t i, et_idx;
889 	uint32_t extent_page, ep_len;
890 
891 	/* The buffer must have room for at least num_clusters entry */
892 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->num_clusters);
893 	if (*remaining_sz < cur_sz) {
894 		*next_ep = start_ep;
895 		return;
896 	}
897 
898 	desc = (struct spdk_blob_md_descriptor_extent_table *)*buf;
899 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE;
900 
901 	desc->num_clusters = blob->active.num_clusters;
902 
903 	ep_len = 1;
904 	et_idx = 0;
905 	for (i = start_ep; i < blob->active.num_extent_pages; i++) {
906 		if (*remaining_sz < cur_sz  + sizeof(desc->extent_page[0])) {
907 			/* If we ran out of buffer space, return */
908 			break;
909 		}
910 
911 		extent_page = blob->active.extent_pages[i];
912 		/* Verify that next extent_page is unallocated */
913 		if (extent_page == 0 &&
914 		    (i + 1 < blob->active.num_extent_pages && blob->active.extent_pages[i + 1] == 0)) {
915 			ep_len++;
916 			continue;
917 		}
918 		desc->extent_page[et_idx].page_idx = extent_page;
919 		desc->extent_page[et_idx].num_pages = ep_len;
920 		et_idx++;
921 
922 		ep_len = 1;
923 		cur_sz += sizeof(desc->extent_page[et_idx]);
924 	}
925 	*next_ep = i;
926 
927 	desc->length = sizeof(desc->num_clusters) + sizeof(desc->extent_page[0]) * et_idx;
928 	*remaining_sz -= sizeof(struct spdk_blob_md_descriptor) + desc->length;
929 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc->length;
930 }
931 
932 static int
933 _spdk_blob_serialize_extent_table(const struct spdk_blob *blob,
934 				  struct spdk_blob_md_page **pages,
935 				  struct spdk_blob_md_page *cur_page,
936 				  uint32_t *page_count, uint8_t **buf,
937 				  size_t *remaining_sz)
938 {
939 	uint64_t				last_extent_page;
940 	int					rc;
941 
942 	last_extent_page = 0;
943 	/* At least single extent table entry has to be always persisted.
944 	 * Such case occurs with num_extent_pages == 0. */
945 	while (last_extent_page <= blob->active.num_extent_pages) {
946 		_spdk_blob_serialize_extent_table_entry(blob, last_extent_page, &last_extent_page, buf,
947 							remaining_sz);
948 
949 		if (last_extent_page == blob->active.num_extent_pages) {
950 			break;
951 		}
952 
953 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
954 		if (rc < 0) {
955 			return rc;
956 		}
957 
958 		*buf = (uint8_t *)cur_page->descriptors;
959 		*remaining_sz = sizeof(cur_page->descriptors);
960 	}
961 
962 	return 0;
963 }
964 
965 static void
966 _spdk_blob_serialize_extent_rle(const struct spdk_blob *blob,
967 				uint64_t start_cluster, uint64_t *next_cluster,
968 				uint8_t **buf, size_t *buf_sz)
969 {
970 	struct spdk_blob_md_descriptor_extent_rle *desc_extent_rle;
971 	size_t cur_sz;
972 	uint64_t i, extent_idx;
973 	uint64_t lba, lba_per_cluster, lba_count;
974 
975 	/* The buffer must have room for at least one extent */
976 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc_extent_rle->extents[0]);
977 	if (*buf_sz < cur_sz) {
978 		*next_cluster = start_cluster;
979 		return;
980 	}
981 
982 	desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)*buf;
983 	desc_extent_rle->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE;
984 
985 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
986 
987 	lba = blob->active.clusters[start_cluster];
988 	lba_count = lba_per_cluster;
989 	extent_idx = 0;
990 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
991 		if ((lba + lba_count) == blob->active.clusters[i] && lba != 0) {
992 			/* Run-length encode sequential non-zero LBA */
993 			lba_count += lba_per_cluster;
994 			continue;
995 		} else if (lba == 0 && blob->active.clusters[i] == 0) {
996 			/* Run-length encode unallocated clusters */
997 			lba_count += lba_per_cluster;
998 			continue;
999 		}
1000 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1001 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1002 		extent_idx++;
1003 
1004 		cur_sz += sizeof(desc_extent_rle->extents[extent_idx]);
1005 
1006 		if (*buf_sz < cur_sz) {
1007 			/* If we ran out of buffer space, return */
1008 			*next_cluster = i;
1009 			break;
1010 		}
1011 
1012 		lba = blob->active.clusters[i];
1013 		lba_count = lba_per_cluster;
1014 	}
1015 
1016 	if (*buf_sz >= cur_sz) {
1017 		desc_extent_rle->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
1018 		desc_extent_rle->extents[extent_idx].length = lba_count / lba_per_cluster;
1019 		extent_idx++;
1020 
1021 		*next_cluster = blob->active.num_clusters;
1022 	}
1023 
1024 	desc_extent_rle->length = sizeof(desc_extent_rle->extents[0]) * extent_idx;
1025 	*buf_sz -= sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1026 	*buf += sizeof(struct spdk_blob_md_descriptor) + desc_extent_rle->length;
1027 }
1028 
1029 static int
1030 _spdk_blob_serialize_extents_rle(const struct spdk_blob *blob,
1031 				 struct spdk_blob_md_page **pages,
1032 				 struct spdk_blob_md_page *cur_page,
1033 				 uint32_t *page_count, uint8_t **buf,
1034 				 size_t *remaining_sz)
1035 {
1036 	uint64_t				last_cluster;
1037 	int					rc;
1038 
1039 	last_cluster = 0;
1040 	while (last_cluster < blob->active.num_clusters) {
1041 		_spdk_blob_serialize_extent_rle(blob, last_cluster, &last_cluster, buf, remaining_sz);
1042 
1043 		if (last_cluster == blob->active.num_clusters) {
1044 			break;
1045 		}
1046 
1047 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
1048 		if (rc < 0) {
1049 			return rc;
1050 		}
1051 
1052 		*buf = (uint8_t *)cur_page->descriptors;
1053 		*remaining_sz = sizeof(cur_page->descriptors);
1054 	}
1055 
1056 	return 0;
1057 }
1058 
1059 static void
1060 _spdk_blob_serialize_extent_page(const struct spdk_blob *blob,
1061 				 uint64_t cluster, struct spdk_blob_md_page *page)
1062 {
1063 	struct spdk_blob_md_descriptor_extent_page *desc_extent;
1064 	uint64_t i, extent_idx;
1065 	uint64_t lba, lba_per_cluster;
1066 	uint64_t start_cluster_idx = (cluster / SPDK_EXTENTS_PER_EP) * SPDK_EXTENTS_PER_EP;
1067 
1068 	desc_extent = (struct spdk_blob_md_descriptor_extent_page *) page->descriptors;
1069 	desc_extent->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE;
1070 
1071 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
1072 
1073 	desc_extent->start_cluster_idx = start_cluster_idx;
1074 	extent_idx = 0;
1075 	for (i = start_cluster_idx; i < blob->active.num_clusters; i++) {
1076 		lba = blob->active.clusters[i];
1077 		desc_extent->cluster_idx[extent_idx++] = lba / lba_per_cluster;
1078 		if (extent_idx >= SPDK_EXTENTS_PER_EP) {
1079 			break;
1080 		}
1081 	}
1082 	desc_extent->length = sizeof(desc_extent->start_cluster_idx) +
1083 			      sizeof(desc_extent->cluster_idx[0]) * extent_idx;
1084 }
1085 
1086 static void
1087 _spdk_blob_serialize_flags(const struct spdk_blob *blob,
1088 			   uint8_t *buf, size_t *buf_sz)
1089 {
1090 	struct spdk_blob_md_descriptor_flags *desc;
1091 
1092 	/*
1093 	 * Flags get serialized first, so we should always have room for the flags
1094 	 *  descriptor.
1095 	 */
1096 	assert(*buf_sz >= sizeof(*desc));
1097 
1098 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
1099 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
1100 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
1101 	desc->invalid_flags = blob->invalid_flags;
1102 	desc->data_ro_flags = blob->data_ro_flags;
1103 	desc->md_ro_flags = blob->md_ro_flags;
1104 
1105 	*buf_sz -= sizeof(*desc);
1106 }
1107 
1108 static int
1109 _spdk_blob_serialize_xattrs(const struct spdk_blob *blob,
1110 			    const struct spdk_xattr_tailq *xattrs, bool internal,
1111 			    struct spdk_blob_md_page **pages,
1112 			    struct spdk_blob_md_page *cur_page,
1113 			    uint32_t *page_count, uint8_t **buf,
1114 			    size_t *remaining_sz)
1115 {
1116 	const struct spdk_xattr	*xattr;
1117 	int	rc;
1118 
1119 	TAILQ_FOREACH(xattr, xattrs, link) {
1120 		size_t required_sz = 0;
1121 
1122 		rc = _spdk_blob_serialize_xattr(xattr,
1123 						*buf, *remaining_sz,
1124 						&required_sz, internal);
1125 		if (rc < 0) {
1126 			/* Need to add a new page to the chain */
1127 			rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
1128 							   &cur_page);
1129 			if (rc < 0) {
1130 				spdk_free(*pages);
1131 				*pages = NULL;
1132 				*page_count = 0;
1133 				return rc;
1134 			}
1135 
1136 			*buf = (uint8_t *)cur_page->descriptors;
1137 			*remaining_sz = sizeof(cur_page->descriptors);
1138 
1139 			/* Try again */
1140 			required_sz = 0;
1141 			rc = _spdk_blob_serialize_xattr(xattr,
1142 							*buf, *remaining_sz,
1143 							&required_sz, internal);
1144 
1145 			if (rc < 0) {
1146 				spdk_free(*pages);
1147 				*pages = NULL;
1148 				*page_count = 0;
1149 				return rc;
1150 			}
1151 		}
1152 
1153 		*remaining_sz -= required_sz;
1154 		*buf += required_sz;
1155 	}
1156 
1157 	return 0;
1158 }
1159 
1160 static int
1161 _spdk_blob_serialize(const struct spdk_blob *blob, struct spdk_blob_md_page **pages,
1162 		     uint32_t *page_count)
1163 {
1164 	struct spdk_blob_md_page		*cur_page;
1165 	int					rc;
1166 	uint8_t					*buf;
1167 	size_t					remaining_sz;
1168 
1169 	assert(pages != NULL);
1170 	assert(page_count != NULL);
1171 	assert(blob != NULL);
1172 	assert(blob->state == SPDK_BLOB_STATE_DIRTY);
1173 
1174 	*pages = NULL;
1175 	*page_count = 0;
1176 
1177 	/* A blob always has at least 1 page, even if it has no descriptors */
1178 	rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
1179 	if (rc < 0) {
1180 		return rc;
1181 	}
1182 
1183 	buf = (uint8_t *)cur_page->descriptors;
1184 	remaining_sz = sizeof(cur_page->descriptors);
1185 
1186 	/* Serialize flags */
1187 	_spdk_blob_serialize_flags(blob, buf, &remaining_sz);
1188 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
1189 
1190 	/* Serialize xattrs */
1191 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs, false,
1192 					 pages, cur_page, page_count, &buf, &remaining_sz);
1193 	if (rc < 0) {
1194 		return rc;
1195 	}
1196 
1197 	/* Serialize internal xattrs */
1198 	rc = _spdk_blob_serialize_xattrs(blob, &blob->xattrs_internal, true,
1199 					 pages, cur_page, page_count, &buf, &remaining_sz);
1200 	if (rc < 0) {
1201 		return rc;
1202 	}
1203 
1204 	if (blob->use_extent_table) {
1205 		/* Serialize extent table */
1206 		rc = _spdk_blob_serialize_extent_table(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1207 	} else {
1208 		/* Serialize extents */
1209 		rc = _spdk_blob_serialize_extents_rle(blob, pages, cur_page, page_count, &buf, &remaining_sz);
1210 	}
1211 
1212 	return rc;
1213 }
1214 
1215 struct spdk_blob_load_ctx {
1216 	struct spdk_blob		*blob;
1217 
1218 	struct spdk_blob_md_page	*pages;
1219 	uint32_t			num_pages;
1220 	uint32_t			next_extent_page;
1221 	spdk_bs_sequence_t	        *seq;
1222 
1223 	spdk_bs_sequence_cpl		cb_fn;
1224 	void				*cb_arg;
1225 };
1226 
1227 static uint32_t
1228 _spdk_blob_md_page_calc_crc(void *page)
1229 {
1230 	uint32_t		crc;
1231 
1232 	crc = BLOB_CRC32C_INITIAL;
1233 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
1234 	crc ^= BLOB_CRC32C_INITIAL;
1235 
1236 	return crc;
1237 
1238 }
1239 
1240 static void
1241 _spdk_blob_load_final(void *cb_arg, int bserrno)
1242 {
1243 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1244 	struct spdk_blob		*blob = ctx->blob;
1245 
1246 	if (bserrno == 0) {
1247 		_spdk_blob_mark_clean(blob);
1248 	}
1249 
1250 	ctx->cb_fn(ctx->seq, ctx->cb_arg, bserrno);
1251 
1252 	/* Free the memory */
1253 	spdk_free(ctx->pages);
1254 	free(ctx);
1255 }
1256 
1257 static void
1258 _spdk_blob_load_snapshot_cpl(void *cb_arg, struct spdk_blob *snapshot, int bserrno)
1259 {
1260 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1261 	struct spdk_blob		*blob = ctx->blob;
1262 
1263 	if (bserrno == 0) {
1264 		blob->back_bs_dev = bs_create_blob_bs_dev(snapshot);
1265 		if (blob->back_bs_dev == NULL) {
1266 			bserrno = -ENOMEM;
1267 		}
1268 	}
1269 	if (bserrno != 0) {
1270 		SPDK_ERRLOG("Snapshot fail\n");
1271 	}
1272 
1273 	_spdk_blob_load_final(ctx, bserrno);
1274 }
1275 
1276 static void _spdk_blob_update_clear_method(struct spdk_blob *blob);
1277 
1278 static void
1279 _spdk_blob_load_backing_dev(void *cb_arg)
1280 {
1281 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1282 	struct spdk_blob		*blob = ctx->blob;
1283 	const void			*value;
1284 	size_t				len;
1285 	int				rc;
1286 
1287 	if (spdk_blob_is_thin_provisioned(blob)) {
1288 		rc = _spdk_blob_get_xattr_value(blob, BLOB_SNAPSHOT, &value, &len, true);
1289 		if (rc == 0) {
1290 			if (len != sizeof(spdk_blob_id)) {
1291 				_spdk_blob_load_final(ctx, -EINVAL);
1292 				return;
1293 			}
1294 			/* open snapshot blob and continue in the callback function */
1295 			blob->parent_id = *(spdk_blob_id *)value;
1296 			spdk_bs_open_blob(blob->bs, blob->parent_id,
1297 					  _spdk_blob_load_snapshot_cpl, ctx);
1298 			return;
1299 		} else {
1300 			/* add zeroes_dev for thin provisioned blob */
1301 			blob->back_bs_dev = bs_create_zeroes_dev();
1302 		}
1303 	} else {
1304 		/* standard blob */
1305 		blob->back_bs_dev = NULL;
1306 	}
1307 	_spdk_blob_load_final(ctx, 0);
1308 }
1309 
1310 static void
1311 _spdk_blob_load_cpl_extents_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1312 {
1313 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1314 	struct spdk_blob		*blob = ctx->blob;
1315 	struct spdk_blob_md_page	*page;
1316 	uint64_t			i;
1317 	uint32_t			crc;
1318 	uint64_t			lba;
1319 	void				*tmp;
1320 	uint64_t			sz;
1321 
1322 	if (bserrno) {
1323 		SPDK_ERRLOG("Extent page read failed: %d\n", bserrno);
1324 		_spdk_blob_load_final(ctx, bserrno);
1325 		return;
1326 	}
1327 
1328 	if (ctx->pages == NULL) {
1329 		/* First iteration of this function, allocate buffer for single EXTENT_PAGE */
1330 		ctx->pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE, NULL, SPDK_ENV_SOCKET_ID_ANY,
1331 					  SPDK_MALLOC_DMA);
1332 		if (!ctx->pages) {
1333 			_spdk_blob_load_final(ctx, -ENOMEM);
1334 			return;
1335 		}
1336 		ctx->num_pages = 1;
1337 		ctx->next_extent_page = 0;
1338 	} else {
1339 		page = &ctx->pages[0];
1340 		crc = _spdk_blob_md_page_calc_crc(page);
1341 		if (crc != page->crc) {
1342 			_spdk_blob_load_final(ctx, -EINVAL);
1343 			return;
1344 		}
1345 
1346 		if (page->next != SPDK_INVALID_MD_PAGE) {
1347 			_spdk_blob_load_final(ctx, -EINVAL);
1348 			return;
1349 		}
1350 
1351 		bserrno = _spdk_blob_parse_extent_page(page, blob);
1352 		if (bserrno) {
1353 			_spdk_blob_load_final(ctx, bserrno);
1354 			return;
1355 		}
1356 	}
1357 
1358 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
1359 		if (blob->active.extent_pages[i] != 0) {
1360 			/* Extent page was allocated, read and parse it. */
1361 			lba = _spdk_bs_md_page_to_lba(blob->bs, blob->active.extent_pages[i]);
1362 			ctx->next_extent_page = i + 1;
1363 
1364 			bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1365 					     _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
1366 					     _spdk_blob_load_cpl_extents_cpl, ctx);
1367 			return;
1368 		} else {
1369 			/* Thin provisioned blobs can point to unallocated extent pages.
1370 			 * In this case blob size should be increased by up to the amount left in remaining_clusters_in_et. */
1371 
1372 			sz = spdk_min(blob->remaining_clusters_in_et, SPDK_EXTENTS_PER_EP);
1373 			blob->active.num_clusters += sz;
1374 			blob->remaining_clusters_in_et -= sz;
1375 
1376 			assert(spdk_blob_is_thin_provisioned(blob));
1377 			assert(i + 1 < blob->active.num_extent_pages || blob->remaining_clusters_in_et == 0);
1378 
1379 			tmp = realloc(blob->active.clusters, blob->active.num_clusters * sizeof(*blob->active.clusters));
1380 			if (tmp == NULL) {
1381 				_spdk_blob_load_final(ctx, -ENOMEM);
1382 				return;
1383 			}
1384 			memset(tmp + blob->active.cluster_array_size, 0,
1385 			       sizeof(*blob->active.clusters) * (blob->active.num_clusters - blob->active.cluster_array_size));
1386 			blob->active.clusters = tmp;
1387 			blob->active.cluster_array_size = blob->active.num_clusters;
1388 		}
1389 	}
1390 
1391 	_spdk_blob_load_backing_dev(ctx);
1392 }
1393 
1394 static void
1395 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1396 {
1397 	struct spdk_blob_load_ctx	*ctx = cb_arg;
1398 	struct spdk_blob		*blob = ctx->blob;
1399 	struct spdk_blob_md_page	*page;
1400 	int				rc;
1401 	uint32_t			crc;
1402 
1403 	if (bserrno) {
1404 		SPDK_ERRLOG("Metadata page read failed: %d\n", bserrno);
1405 		_spdk_blob_load_final(ctx, bserrno);
1406 		return;
1407 	}
1408 
1409 	page = &ctx->pages[ctx->num_pages - 1];
1410 	crc = _spdk_blob_md_page_calc_crc(page);
1411 	if (crc != page->crc) {
1412 		SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages);
1413 		_spdk_blob_load_final(ctx, -EINVAL);
1414 		return;
1415 	}
1416 
1417 	if (page->next != SPDK_INVALID_MD_PAGE) {
1418 		uint32_t next_page = page->next;
1419 		uint64_t next_lba = _spdk_bs_md_page_to_lba(blob->bs, next_page);
1420 
1421 		/* Read the next page */
1422 		ctx->num_pages++;
1423 		ctx->pages = spdk_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
1424 					  sizeof(*page));
1425 		if (ctx->pages == NULL) {
1426 			_spdk_blob_load_final(ctx, -ENOMEM);
1427 			return;
1428 		}
1429 
1430 		bs_sequence_read_dev(seq, &ctx->pages[ctx->num_pages - 1],
1431 				     next_lba,
1432 				     _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)),
1433 				     _spdk_blob_load_cpl, ctx);
1434 		return;
1435 	}
1436 
1437 	/* Parse the pages */
1438 	rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob);
1439 	if (rc) {
1440 		_spdk_blob_load_final(ctx, rc);
1441 		return;
1442 	}
1443 
1444 	if (blob->extent_table_found == true) {
1445 		/* If EXTENT_TABLE was found, that means support for it should be enabled. */
1446 		assert(blob->extent_rle_found == false);
1447 		blob->use_extent_table = true;
1448 	} else {
1449 		/* If EXTENT_RLE or no extent_* descriptor was found disable support
1450 		 * for extent table. No extent_* descriptors means that blob has length of 0
1451 		 * and no extent_rle descriptors were persisted for it.
1452 		 * EXTENT_TABLE if used, is always present in metadata regardless of length. */
1453 		blob->use_extent_table = false;
1454 	}
1455 
1456 	/* Check the clear_method stored in metadata vs what may have been passed
1457 	 * via spdk_bs_open_blob_ext() and update accordingly.
1458 	 */
1459 	_spdk_blob_update_clear_method(blob);
1460 
1461 	spdk_free(ctx->pages);
1462 	ctx->pages = NULL;
1463 
1464 	if (blob->extent_table_found) {
1465 		_spdk_blob_load_cpl_extents_cpl(seq, ctx, 0);
1466 	} else {
1467 		_spdk_blob_load_backing_dev(ctx);
1468 	}
1469 }
1470 
1471 /* Load a blob from disk given a blobid */
1472 static void
1473 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
1474 		spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1475 {
1476 	struct spdk_blob_load_ctx *ctx;
1477 	struct spdk_blob_store *bs;
1478 	uint32_t page_num;
1479 	uint64_t lba;
1480 
1481 	_spdk_blob_verify_md_op(blob);
1482 
1483 	bs = blob->bs;
1484 
1485 	ctx = calloc(1, sizeof(*ctx));
1486 	if (!ctx) {
1487 		cb_fn(seq, cb_arg, -ENOMEM);
1488 		return;
1489 	}
1490 
1491 	ctx->blob = blob;
1492 	ctx->pages = spdk_realloc(ctx->pages, SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE);
1493 	if (!ctx->pages) {
1494 		free(ctx);
1495 		cb_fn(seq, cb_arg, -ENOMEM);
1496 		return;
1497 	}
1498 	ctx->num_pages = 1;
1499 	ctx->cb_fn = cb_fn;
1500 	ctx->cb_arg = cb_arg;
1501 	ctx->seq = seq;
1502 
1503 	page_num = _spdk_bs_blobid_to_page(blob->id);
1504 	lba = _spdk_bs_md_page_to_lba(blob->bs, page_num);
1505 
1506 	blob->state = SPDK_BLOB_STATE_LOADING;
1507 
1508 	bs_sequence_read_dev(seq, &ctx->pages[0], lba,
1509 			     _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
1510 			     _spdk_blob_load_cpl, ctx);
1511 }
1512 
1513 struct spdk_blob_persist_ctx {
1514 	struct spdk_blob		*blob;
1515 
1516 	struct spdk_bs_super_block	*super;
1517 
1518 	struct spdk_blob_md_page	*pages;
1519 	uint32_t			next_extent_page;
1520 	struct spdk_blob_md_page	*extent_page;
1521 
1522 	spdk_bs_sequence_t		*seq;
1523 	spdk_bs_sequence_cpl		cb_fn;
1524 	void				*cb_arg;
1525 	TAILQ_ENTRY(spdk_blob_persist_ctx) link;
1526 };
1527 
1528 static void
1529 spdk_bs_batch_clear_dev(struct spdk_blob_persist_ctx *ctx, spdk_bs_batch_t *batch, uint64_t lba,
1530 			uint32_t lba_count)
1531 {
1532 	switch (ctx->blob->clear_method) {
1533 	case BLOB_CLEAR_WITH_DEFAULT:
1534 	case BLOB_CLEAR_WITH_UNMAP:
1535 		bs_batch_unmap_dev(batch, lba, lba_count);
1536 		break;
1537 	case BLOB_CLEAR_WITH_WRITE_ZEROES:
1538 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
1539 		break;
1540 	case BLOB_CLEAR_WITH_NONE:
1541 	default:
1542 		break;
1543 	}
1544 }
1545 
1546 static void _spdk_blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx);
1547 
1548 static void
1549 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1550 {
1551 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1552 	struct spdk_blob_persist_ctx	*next_persist;
1553 	struct spdk_blob		*blob = ctx->blob;
1554 
1555 	if (bserrno == 0) {
1556 		_spdk_blob_mark_clean(blob);
1557 	}
1558 
1559 	assert(ctx == TAILQ_FIRST(&blob->pending_persists));
1560 	TAILQ_REMOVE(&blob->pending_persists, ctx, link);
1561 
1562 	next_persist = TAILQ_FIRST(&blob->pending_persists);
1563 
1564 	/* Call user callback */
1565 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
1566 
1567 	/* Free the memory */
1568 	spdk_free(ctx->pages);
1569 	free(ctx);
1570 
1571 	if (next_persist != NULL) {
1572 		_spdk_blob_persist_check_dirty(next_persist);
1573 	}
1574 }
1575 
1576 static void
1577 _spdk_blob_persist_clear_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1578 {
1579 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1580 	struct spdk_blob		*blob = ctx->blob;
1581 	struct spdk_blob_store		*bs = blob->bs;
1582 	size_t				i;
1583 
1584 	if (bserrno != 0) {
1585 		_spdk_blob_persist_complete(seq, ctx, bserrno);
1586 		return;
1587 	}
1588 
1589 	/* Release all clusters that were truncated */
1590 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1591 		uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]);
1592 
1593 		/* Nothing to release if it was not allocated */
1594 		if (blob->active.clusters[i] != 0) {
1595 			_spdk_bs_release_cluster(bs, cluster_num);
1596 		}
1597 	}
1598 
1599 	if (blob->active.num_clusters == 0) {
1600 		free(blob->active.clusters);
1601 		blob->active.clusters = NULL;
1602 		blob->active.cluster_array_size = 0;
1603 	} else if (blob->active.num_clusters != blob->active.cluster_array_size) {
1604 #ifndef __clang_analyzer__
1605 		void *tmp;
1606 
1607 		/* scan-build really can't figure reallocs, workaround it */
1608 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * blob->active.num_clusters);
1609 		assert(tmp != NULL);
1610 		blob->active.clusters = tmp;
1611 
1612 		tmp = realloc(blob->active.extent_pages, sizeof(uint32_t) * blob->active.num_extent_pages);
1613 		assert(tmp != NULL);
1614 		blob->active.extent_pages = tmp;
1615 #endif
1616 		blob->active.extent_pages_array_size = blob->active.num_extent_pages;
1617 		blob->active.cluster_array_size = blob->active.num_clusters;
1618 	}
1619 
1620 	/* TODO: Add path to persist clear extent pages. */
1621 	_spdk_blob_persist_complete(seq, ctx, bserrno);
1622 }
1623 
1624 static void
1625 _spdk_blob_persist_clear_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1626 {
1627 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1628 	struct spdk_blob		*blob = ctx->blob;
1629 	struct spdk_blob_store		*bs = blob->bs;
1630 	spdk_bs_batch_t			*batch;
1631 	size_t				i;
1632 	uint64_t			lba;
1633 	uint32_t			lba_count;
1634 
1635 	if (bserrno != 0) {
1636 		_spdk_blob_persist_complete(seq, ctx, bserrno);
1637 		return;
1638 	}
1639 
1640 	/* Clusters don't move around in blobs. The list shrinks or grows
1641 	 * at the end, but no changes ever occur in the middle of the list.
1642 	 */
1643 
1644 	batch = bs_sequence_to_batch(seq, _spdk_blob_persist_clear_clusters_cpl, ctx);
1645 
1646 	/* Clear all clusters that were truncated */
1647 	lba = 0;
1648 	lba_count = 0;
1649 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
1650 		uint64_t next_lba = blob->active.clusters[i];
1651 		uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1);
1652 
1653 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
1654 			/* This cluster is contiguous with the previous one. */
1655 			lba_count += next_lba_count;
1656 			continue;
1657 		}
1658 
1659 		/* This cluster is not contiguous with the previous one. */
1660 
1661 		/* If a run of LBAs previously existing, clear them now */
1662 		if (lba_count > 0) {
1663 			spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
1664 		}
1665 
1666 		/* Start building the next batch */
1667 		lba = next_lba;
1668 		if (next_lba > 0) {
1669 			lba_count = next_lba_count;
1670 		} else {
1671 			lba_count = 0;
1672 		}
1673 	}
1674 
1675 	/* If we ended with a contiguous set of LBAs, clear them now */
1676 	if (lba_count > 0) {
1677 		spdk_bs_batch_clear_dev(ctx, batch, lba, lba_count);
1678 	}
1679 
1680 	bs_batch_close(batch);
1681 }
1682 
1683 static void
1684 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1685 {
1686 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1687 	struct spdk_blob		*blob = ctx->blob;
1688 	struct spdk_blob_store		*bs = blob->bs;
1689 	size_t				i;
1690 
1691 	if (bserrno != 0) {
1692 		_spdk_blob_persist_complete(seq, ctx, bserrno);
1693 		return;
1694 	}
1695 
1696 	/* This loop starts at 1 because the first page is special and handled
1697 	 * below. The pages (except the first) are never written in place,
1698 	 * so any pages in the clean list must be zeroed.
1699 	 */
1700 	for (i = 1; i < blob->clean.num_pages; i++) {
1701 		_spdk_bs_release_md_page(bs, blob->clean.pages[i]);
1702 	}
1703 
1704 	if (blob->active.num_pages == 0) {
1705 		uint32_t page_num;
1706 
1707 		page_num = _spdk_bs_blobid_to_page(blob->id);
1708 		_spdk_bs_release_md_page(bs, page_num);
1709 	}
1710 
1711 	/* Move on to clearing clusters */
1712 	_spdk_blob_persist_clear_clusters(seq, ctx, 0);
1713 }
1714 
1715 static void
1716 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1717 {
1718 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1719 	struct spdk_blob		*blob = ctx->blob;
1720 	struct spdk_blob_store		*bs = blob->bs;
1721 	uint64_t			lba;
1722 	uint32_t			lba_count;
1723 	spdk_bs_batch_t			*batch;
1724 	size_t				i;
1725 
1726 	if (bserrno != 0) {
1727 		_spdk_blob_persist_complete(seq, ctx, bserrno);
1728 		return;
1729 	}
1730 
1731 	batch = bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx);
1732 
1733 	lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
1734 
1735 	/* This loop starts at 1 because the first page is special and handled
1736 	 * below. The pages (except the first) are never written in place,
1737 	 * so any pages in the clean list must be zeroed.
1738 	 */
1739 	for (i = 1; i < blob->clean.num_pages; i++) {
1740 		lba = _spdk_bs_md_page_to_lba(bs, blob->clean.pages[i]);
1741 
1742 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
1743 	}
1744 
1745 	/* The first page will only be zeroed if this is a delete. */
1746 	if (blob->active.num_pages == 0) {
1747 		uint32_t page_num;
1748 
1749 		/* The first page in the metadata goes where the blobid indicates */
1750 		page_num = _spdk_bs_blobid_to_page(blob->id);
1751 		lba = _spdk_bs_md_page_to_lba(bs, page_num);
1752 
1753 		bs_batch_write_zeroes_dev(batch, lba, lba_count);
1754 	}
1755 
1756 	bs_batch_close(batch);
1757 }
1758 
1759 static void
1760 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1761 {
1762 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1763 	struct spdk_blob		*blob = ctx->blob;
1764 	struct spdk_blob_store		*bs = blob->bs;
1765 	uint64_t			lba;
1766 	uint32_t			lba_count;
1767 	struct spdk_blob_md_page	*page;
1768 
1769 	if (bserrno != 0) {
1770 		_spdk_blob_persist_complete(seq, ctx, bserrno);
1771 		return;
1772 	}
1773 
1774 	if (blob->active.num_pages == 0) {
1775 		/* Move on to the next step */
1776 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1777 		return;
1778 	}
1779 
1780 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1781 
1782 	page = &ctx->pages[0];
1783 	/* The first page in the metadata goes where the blobid indicates */
1784 	lba = _spdk_bs_md_page_to_lba(bs, _spdk_bs_blobid_to_page(blob->id));
1785 
1786 	bs_sequence_write_dev(seq, page, lba, lba_count,
1787 			      _spdk_blob_persist_zero_pages, ctx);
1788 }
1789 
1790 static void
1791 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1792 {
1793 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
1794 	struct spdk_blob		*blob = ctx->blob;
1795 	struct spdk_blob_store		*bs = blob->bs;
1796 	uint64_t			lba;
1797 	uint32_t			lba_count;
1798 	struct spdk_blob_md_page	*page;
1799 	spdk_bs_batch_t			*batch;
1800 	size_t				i;
1801 
1802 	if (bserrno != 0) {
1803 		_spdk_blob_persist_complete(seq, ctx, bserrno);
1804 		return;
1805 	}
1806 
1807 	/* Clusters don't move around in blobs. The list shrinks or grows
1808 	 * at the end, but no changes ever occur in the middle of the list.
1809 	 */
1810 
1811 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1812 
1813 	batch = bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx);
1814 
1815 	/* This starts at 1. The root page is not written until
1816 	 * all of the others are finished
1817 	 */
1818 	for (i = 1; i < blob->active.num_pages; i++) {
1819 		page = &ctx->pages[i];
1820 		assert(page->sequence_num == i);
1821 
1822 		lba = _spdk_bs_md_page_to_lba(bs, blob->active.pages[i]);
1823 
1824 		bs_batch_write_dev(batch, page, lba, lba_count);
1825 	}
1826 
1827 	bs_batch_close(batch);
1828 }
1829 
1830 static int
1831 _spdk_blob_resize(struct spdk_blob *blob, uint64_t sz)
1832 {
1833 	uint64_t	i;
1834 	uint64_t	*tmp;
1835 	uint64_t	lfc; /* lowest free cluster */
1836 	uint32_t	lfmd; /*  lowest free md page */
1837 	uint64_t	num_clusters;
1838 	uint32_t	*ep_tmp;
1839 	uint64_t	new_num_ep = 0, current_num_ep = 0;
1840 	struct spdk_blob_store *bs;
1841 
1842 	bs = blob->bs;
1843 
1844 	_spdk_blob_verify_md_op(blob);
1845 
1846 	if (blob->active.num_clusters == sz) {
1847 		return 0;
1848 	}
1849 
1850 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
1851 		/* If this blob was resized to be larger, then smaller, then
1852 		 * larger without syncing, then the cluster array already
1853 		 * contains spare assigned clusters we can use.
1854 		 */
1855 		num_clusters = spdk_min(blob->active.cluster_array_size,
1856 					sz);
1857 	} else {
1858 		num_clusters = blob->active.num_clusters;
1859 	}
1860 
1861 	if (blob->use_extent_table) {
1862 		/* Round up since every cluster beyond current Extent Table size,
1863 		 * requires new extent page. */
1864 		new_num_ep = spdk_divide_round_up(sz, SPDK_EXTENTS_PER_EP);
1865 		current_num_ep = spdk_divide_round_up(num_clusters, SPDK_EXTENTS_PER_EP);
1866 	}
1867 
1868 	/* Do two passes - one to verify that we can obtain enough clusters
1869 	 * and md pages, another to actually claim them.
1870 	 */
1871 
1872 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1873 		lfc = 0;
1874 		for (i = num_clusters; i < sz; i++) {
1875 			lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
1876 			if (lfc == UINT32_MAX) {
1877 				/* No more free clusters. Cannot satisfy the request */
1878 				return -ENOSPC;
1879 			}
1880 			lfc++;
1881 		}
1882 		lfmd = 0;
1883 		for (i = current_num_ep; i < new_num_ep ; i++) {
1884 			lfmd = spdk_bit_array_find_first_clear(blob->bs->used_md_pages, lfmd);
1885 			if (lfmd == UINT32_MAX) {
1886 				/* No more free md pages. Cannot satisfy the request */
1887 				return -ENOSPC;
1888 			}
1889 		}
1890 	}
1891 
1892 	if (sz > num_clusters) {
1893 		/* Expand the cluster array if necessary.
1894 		 * We only shrink the array when persisting.
1895 		 */
1896 		tmp = realloc(blob->active.clusters, sizeof(*blob->active.clusters) * sz);
1897 		if (sz > 0 && tmp == NULL) {
1898 			return -ENOMEM;
1899 		}
1900 		memset(tmp + blob->active.cluster_array_size, 0,
1901 		       sizeof(*blob->active.clusters) * (sz - blob->active.cluster_array_size));
1902 		blob->active.clusters = tmp;
1903 		blob->active.cluster_array_size = sz;
1904 
1905 		/* Expand the extents table, only if enough clusters were added */
1906 		if (new_num_ep > current_num_ep && blob->use_extent_table) {
1907 			ep_tmp = realloc(blob->active.extent_pages, sizeof(*blob->active.extent_pages) * new_num_ep);
1908 			if (new_num_ep > 0 && ep_tmp == NULL) {
1909 				return -ENOMEM;
1910 			}
1911 			memset(ep_tmp + blob->active.extent_pages_array_size, 0,
1912 			       sizeof(*blob->active.extent_pages) * (new_num_ep - blob->active.extent_pages_array_size));
1913 			blob->active.extent_pages = ep_tmp;
1914 			blob->active.extent_pages_array_size = new_num_ep;
1915 		}
1916 	}
1917 
1918 	blob->state = SPDK_BLOB_STATE_DIRTY;
1919 
1920 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1921 		lfc = 0;
1922 		lfmd = 0;
1923 		for (i = num_clusters; i < sz; i++) {
1924 			_spdk_bs_allocate_cluster(blob, i, &lfc, &lfmd, true);
1925 			lfc++;
1926 			lfmd++;
1927 		}
1928 	}
1929 
1930 	blob->active.num_clusters = sz;
1931 	blob->active.num_extent_pages = new_num_ep;
1932 
1933 	return 0;
1934 }
1935 
1936 static void
1937 _spdk_blob_persist_generate_new_md(struct spdk_blob_persist_ctx *ctx)
1938 {
1939 	spdk_bs_sequence_t *seq = ctx->seq;
1940 	struct spdk_blob *blob = ctx->blob;
1941 	struct spdk_blob_store *bs = blob->bs;
1942 	uint64_t i;
1943 	uint32_t page_num;
1944 	void *tmp;
1945 	int rc;
1946 
1947 	/* Generate the new metadata */
1948 	rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
1949 	if (rc < 0) {
1950 		_spdk_blob_persist_complete(seq, ctx, rc);
1951 		return;
1952 	}
1953 
1954 	assert(blob->active.num_pages >= 1);
1955 
1956 	/* Resize the cache of page indices */
1957 	tmp = realloc(blob->active.pages, blob->active.num_pages * sizeof(*blob->active.pages));
1958 	if (!tmp) {
1959 		_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1960 		return;
1961 	}
1962 	blob->active.pages = tmp;
1963 
1964 	/* Assign this metadata to pages. This requires two passes -
1965 	 * one to verify that there are enough pages and a second
1966 	 * to actually claim them. */
1967 	page_num = 0;
1968 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
1969 	for (i = 1; i < blob->active.num_pages; i++) {
1970 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1971 		if (page_num == UINT32_MAX) {
1972 			_spdk_blob_persist_complete(seq, ctx, -ENOMEM);
1973 			return;
1974 		}
1975 		page_num++;
1976 	}
1977 
1978 	page_num = 0;
1979 	blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id);
1980 	for (i = 1; i < blob->active.num_pages; i++) {
1981 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1982 		ctx->pages[i - 1].next = page_num;
1983 		/* Now that previous metadata page is complete, calculate the crc for it. */
1984 		ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1985 		blob->active.pages[i] = page_num;
1986 		_spdk_bs_claim_md_page(bs, page_num);
1987 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id);
1988 		page_num++;
1989 	}
1990 	ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1991 	/* Start writing the metadata from last page to first */
1992 	blob->state = SPDK_BLOB_STATE_CLEAN;
1993 	_spdk_blob_persist_write_page_chain(seq, ctx, 0);
1994 }
1995 
1996 static void
1997 _spdk_blob_persist_write_extent_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1998 {
1999 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
2000 	struct spdk_blob		*blob = ctx->blob;
2001 	size_t				i;
2002 	uint32_t			extent_page_id;
2003 	uint32_t                        page_count = 0;
2004 	int				rc;
2005 
2006 	if (ctx->extent_page != NULL) {
2007 		spdk_free(ctx->extent_page);
2008 		ctx->extent_page = NULL;
2009 	}
2010 
2011 	if (bserrno != 0) {
2012 		_spdk_blob_persist_complete(seq, ctx, bserrno);
2013 		return;
2014 	}
2015 
2016 	/* Only write out changed extent pages */
2017 	for (i = ctx->next_extent_page; i < blob->active.num_extent_pages; i++) {
2018 		extent_page_id = blob->active.extent_pages[i];
2019 		if (extent_page_id == 0) {
2020 			/* No Extent Page to persist */
2021 			assert(spdk_blob_is_thin_provisioned(blob));
2022 			continue;
2023 		}
2024 		/* Writing out new extent page for the first time. Either active extent pages is larger
2025 		 * than clean extent pages or there was no extent page assigned due to thin provisioning. */
2026 		if (i >= blob->clean.extent_pages_array_size || blob->clean.extent_pages[i] == 0) {
2027 			blob->state = SPDK_BLOB_STATE_DIRTY;
2028 			assert(spdk_bit_array_get(blob->bs->used_md_pages, extent_page_id));
2029 			ctx->next_extent_page = i + 1;
2030 			rc = _spdk_blob_serialize_add_page(ctx->blob, &ctx->extent_page, &page_count, &ctx->extent_page);
2031 			if (rc < 0) {
2032 				_spdk_blob_persist_complete(seq, ctx, rc);
2033 				return;
2034 			}
2035 
2036 			_spdk_blob_serialize_extent_page(blob, i * SPDK_EXTENTS_PER_EP, ctx->extent_page);
2037 
2038 			ctx->extent_page->crc = _spdk_blob_md_page_calc_crc(ctx->extent_page);
2039 
2040 			bs_sequence_write_dev(seq, ctx->extent_page, _spdk_bs_md_page_to_lba(blob->bs, extent_page_id),
2041 					      _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
2042 					      _spdk_blob_persist_write_extent_pages, ctx);
2043 			return;
2044 		}
2045 		assert(blob->clean.extent_pages[i] != 0);
2046 	}
2047 
2048 	_spdk_blob_persist_generate_new_md(ctx);
2049 }
2050 
2051 static void
2052 _spdk_blob_persist_start(struct spdk_blob_persist_ctx *ctx)
2053 {
2054 	spdk_bs_sequence_t *seq = ctx->seq;
2055 	struct spdk_blob *blob = ctx->blob;
2056 
2057 	if (blob->active.num_pages == 0) {
2058 		/* This is the signal that the blob should be deleted.
2059 		 * Immediately jump to the clean up routine. */
2060 		assert(blob->clean.num_pages > 0);
2061 		blob->state = SPDK_BLOB_STATE_CLEAN;
2062 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
2063 		return;
2064 
2065 	}
2066 
2067 	_spdk_blob_persist_write_extent_pages(seq, ctx, 0);
2068 }
2069 
2070 static void
2071 _spdk_blob_persist_dirty_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2072 {
2073 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2074 
2075 	spdk_free(ctx->super);
2076 
2077 	if (bserrno != 0) {
2078 		_spdk_blob_persist_complete(seq, ctx, bserrno);
2079 		return;
2080 	}
2081 
2082 	ctx->blob->bs->clean = 0;
2083 
2084 	_spdk_blob_persist_start(ctx);
2085 }
2086 
2087 static void
2088 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
2089 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg);
2090 
2091 
2092 static void
2093 _spdk_blob_persist_dirty(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2094 {
2095 	struct spdk_blob_persist_ctx *ctx = cb_arg;
2096 
2097 	if (bserrno != 0) {
2098 		spdk_free(ctx->super);
2099 		_spdk_blob_persist_complete(seq, ctx, bserrno);
2100 		return;
2101 	}
2102 
2103 	ctx->super->clean = 0;
2104 	if (ctx->super->size == 0) {
2105 		ctx->super->size = ctx->blob->bs->dev->blockcnt * ctx->blob->bs->dev->blocklen;
2106 	}
2107 
2108 	_spdk_bs_write_super(seq, ctx->blob->bs, ctx->super, _spdk_blob_persist_dirty_cpl, ctx);
2109 }
2110 
2111 static void
2112 _spdk_blob_persist_check_dirty(struct spdk_blob_persist_ctx *ctx)
2113 {
2114 	if (ctx->blob->bs->clean) {
2115 		ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
2116 					  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2117 		if (!ctx->super) {
2118 			_spdk_blob_persist_complete(ctx->seq, ctx, -ENOMEM);
2119 			return;
2120 		}
2121 
2122 		bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(ctx->blob->bs, 0),
2123 				     _spdk_bs_byte_to_lba(ctx->blob->bs, sizeof(*ctx->super)),
2124 				     _spdk_blob_persist_dirty, ctx);
2125 	} else {
2126 		_spdk_blob_persist_start(ctx);
2127 	}
2128 }
2129 
2130 /* Write a blob to disk */
2131 static void
2132 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob *blob,
2133 		   spdk_bs_sequence_cpl cb_fn, void *cb_arg)
2134 {
2135 	struct spdk_blob_persist_ctx *ctx;
2136 
2137 	_spdk_blob_verify_md_op(blob);
2138 
2139 	if (blob->state == SPDK_BLOB_STATE_CLEAN && TAILQ_EMPTY(&blob->pending_persists)) {
2140 		cb_fn(seq, cb_arg, 0);
2141 		return;
2142 	}
2143 
2144 	ctx = calloc(1, sizeof(*ctx));
2145 	if (!ctx) {
2146 		cb_fn(seq, cb_arg, -ENOMEM);
2147 		return;
2148 	}
2149 	ctx->blob = blob;
2150 	ctx->seq = seq;
2151 	ctx->cb_fn = cb_fn;
2152 	ctx->cb_arg = cb_arg;
2153 	ctx->next_extent_page = 0;
2154 
2155 	/* Multiple blob persists can affect one another, via blob->state or
2156 	 * blob mutable data changes. To prevent it, queue up the persists. */
2157 	if (!TAILQ_EMPTY(&blob->pending_persists)) {
2158 		TAILQ_INSERT_TAIL(&blob->pending_persists, ctx, link);
2159 		return;
2160 	}
2161 	TAILQ_INSERT_HEAD(&blob->pending_persists, ctx, link);
2162 
2163 	_spdk_blob_persist_check_dirty(ctx);
2164 }
2165 
2166 struct spdk_blob_copy_cluster_ctx {
2167 	struct spdk_blob *blob;
2168 	uint8_t *buf;
2169 	uint64_t page;
2170 	uint64_t new_cluster;
2171 	uint32_t new_extent_page;
2172 	spdk_bs_sequence_t *seq;
2173 };
2174 
2175 static void
2176 _spdk_blob_allocate_and_copy_cluster_cpl(void *cb_arg, int bserrno)
2177 {
2178 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2179 	struct spdk_bs_request_set *set = (struct spdk_bs_request_set *)ctx->seq;
2180 	TAILQ_HEAD(, spdk_bs_request_set) requests;
2181 	spdk_bs_user_op_t *op;
2182 
2183 	TAILQ_INIT(&requests);
2184 	TAILQ_SWAP(&set->channel->need_cluster_alloc, &requests, spdk_bs_request_set, link);
2185 
2186 	while (!TAILQ_EMPTY(&requests)) {
2187 		op = TAILQ_FIRST(&requests);
2188 		TAILQ_REMOVE(&requests, op, link);
2189 		if (bserrno == 0) {
2190 			bs_user_op_execute(op);
2191 		} else {
2192 			bs_user_op_abort(op);
2193 		}
2194 	}
2195 
2196 	spdk_free(ctx->buf);
2197 	free(ctx);
2198 }
2199 
2200 static void
2201 _spdk_blob_insert_cluster_cpl(void *cb_arg, int bserrno)
2202 {
2203 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2204 
2205 	if (bserrno) {
2206 		if (bserrno == -EEXIST) {
2207 			/* The metadata insert failed because another thread
2208 			 * allocated the cluster first. Free our cluster
2209 			 * but continue without error. */
2210 			bserrno = 0;
2211 		}
2212 		_spdk_bs_release_cluster(ctx->blob->bs, ctx->new_cluster);
2213 		if (ctx->new_extent_page != 0) {
2214 			_spdk_bs_release_md_page(ctx->blob->bs, ctx->new_extent_page);
2215 		}
2216 	}
2217 
2218 	bs_sequence_finish(ctx->seq, bserrno);
2219 }
2220 
2221 static void
2222 _spdk_blob_write_copy_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2223 {
2224 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2225 	uint32_t cluster_number;
2226 
2227 	if (bserrno) {
2228 		/* The write failed, so jump to the final completion handler */
2229 		bs_sequence_finish(seq, bserrno);
2230 		return;
2231 	}
2232 
2233 	cluster_number = _spdk_bs_page_to_cluster(ctx->blob->bs, ctx->page);
2234 
2235 	_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2236 					       ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx);
2237 }
2238 
2239 static void
2240 _spdk_blob_write_copy(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2241 {
2242 	struct spdk_blob_copy_cluster_ctx *ctx = cb_arg;
2243 
2244 	if (bserrno != 0) {
2245 		/* The read failed, so jump to the final completion handler */
2246 		bs_sequence_finish(seq, bserrno);
2247 		return;
2248 	}
2249 
2250 	/* Write whole cluster */
2251 	bs_sequence_write_dev(seq, ctx->buf,
2252 			      _spdk_bs_cluster_to_lba(ctx->blob->bs, ctx->new_cluster),
2253 			      _spdk_bs_cluster_to_lba(ctx->blob->bs, 1),
2254 			      _spdk_blob_write_copy_cpl, ctx);
2255 }
2256 
2257 static void
2258 _spdk_bs_allocate_and_copy_cluster(struct spdk_blob *blob,
2259 				   struct spdk_io_channel *_ch,
2260 				   uint64_t io_unit, spdk_bs_user_op_t *op)
2261 {
2262 	struct spdk_bs_cpl cpl;
2263 	struct spdk_bs_channel *ch;
2264 	struct spdk_blob_copy_cluster_ctx *ctx;
2265 	uint32_t cluster_start_page;
2266 	uint32_t cluster_number;
2267 	int rc;
2268 
2269 	ch = spdk_io_channel_get_ctx(_ch);
2270 
2271 	if (!TAILQ_EMPTY(&ch->need_cluster_alloc)) {
2272 		/* There are already operations pending. Queue this user op
2273 		 * and return because it will be re-executed when the outstanding
2274 		 * cluster allocation completes. */
2275 		TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2276 		return;
2277 	}
2278 
2279 	/* Round the io_unit offset down to the first page in the cluster */
2280 	cluster_start_page = _spdk_bs_io_unit_to_cluster_start(blob, io_unit);
2281 
2282 	/* Calculate which index in the metadata cluster array the corresponding
2283 	 * cluster is supposed to be at. */
2284 	cluster_number = _spdk_bs_io_unit_to_cluster_number(blob, io_unit);
2285 
2286 	ctx = calloc(1, sizeof(*ctx));
2287 	if (!ctx) {
2288 		bs_user_op_abort(op);
2289 		return;
2290 	}
2291 
2292 	assert(blob->bs->cluster_sz % blob->back_bs_dev->blocklen == 0);
2293 
2294 	ctx->blob = blob;
2295 	ctx->page = cluster_start_page;
2296 
2297 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
2298 		ctx->buf = spdk_malloc(blob->bs->cluster_sz, blob->back_bs_dev->blocklen,
2299 				       NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
2300 		if (!ctx->buf) {
2301 			SPDK_ERRLOG("DMA allocation for cluster of size = %" PRIu32 " failed.\n",
2302 				    blob->bs->cluster_sz);
2303 			free(ctx);
2304 			bs_user_op_abort(op);
2305 			return;
2306 		}
2307 	}
2308 
2309 	rc = _spdk_bs_allocate_cluster(blob, cluster_number, &ctx->new_cluster, &ctx->new_extent_page,
2310 				       false);
2311 	if (rc != 0) {
2312 		spdk_free(ctx->buf);
2313 		free(ctx);
2314 		bs_user_op_abort(op);
2315 		return;
2316 	}
2317 
2318 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2319 	cpl.u.blob_basic.cb_fn = _spdk_blob_allocate_and_copy_cluster_cpl;
2320 	cpl.u.blob_basic.cb_arg = ctx;
2321 
2322 	ctx->seq = bs_sequence_start(_ch, &cpl);
2323 	if (!ctx->seq) {
2324 		_spdk_bs_release_cluster(blob->bs, ctx->new_cluster);
2325 		spdk_free(ctx->buf);
2326 		free(ctx);
2327 		bs_user_op_abort(op);
2328 		return;
2329 	}
2330 
2331 	/* Queue the user op to block other incoming operations */
2332 	TAILQ_INSERT_TAIL(&ch->need_cluster_alloc, op, link);
2333 
2334 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
2335 		/* Read cluster from backing device */
2336 		bs_sequence_read_bs_dev(ctx->seq, blob->back_bs_dev, ctx->buf,
2337 					_spdk_bs_dev_page_to_lba(blob->back_bs_dev, cluster_start_page),
2338 					_spdk_bs_dev_byte_to_lba(blob->back_bs_dev, blob->bs->cluster_sz),
2339 					_spdk_blob_write_copy, ctx);
2340 	} else {
2341 		_spdk_blob_insert_cluster_on_md_thread(ctx->blob, cluster_number, ctx->new_cluster,
2342 						       ctx->new_extent_page, _spdk_blob_insert_cluster_cpl, ctx);
2343 	}
2344 }
2345 
2346 static inline void
2347 _spdk_blob_calculate_lba_and_lba_count(struct spdk_blob *blob, uint64_t io_unit, uint64_t length,
2348 				       uint64_t *lba,	uint32_t *lba_count)
2349 {
2350 	*lba_count = length;
2351 
2352 	if (!_spdk_bs_io_unit_is_allocated(blob, io_unit)) {
2353 		assert(blob->back_bs_dev != NULL);
2354 		*lba = _spdk_bs_io_unit_to_back_dev_lba(blob, io_unit);
2355 		*lba_count = _spdk_bs_io_unit_to_back_dev_lba(blob, *lba_count);
2356 	} else {
2357 		*lba = _spdk_bs_blob_io_unit_to_lba(blob, io_unit);
2358 	}
2359 }
2360 
2361 struct op_split_ctx {
2362 	struct spdk_blob *blob;
2363 	struct spdk_io_channel *channel;
2364 	uint64_t io_unit_offset;
2365 	uint64_t io_units_remaining;
2366 	void *curr_payload;
2367 	enum spdk_blob_op_type op_type;
2368 	spdk_bs_sequence_t *seq;
2369 };
2370 
2371 static void
2372 _spdk_blob_request_submit_op_split_next(void *cb_arg, int bserrno)
2373 {
2374 	struct op_split_ctx	*ctx = cb_arg;
2375 	struct spdk_blob	*blob = ctx->blob;
2376 	struct spdk_io_channel	*ch = ctx->channel;
2377 	enum spdk_blob_op_type	op_type = ctx->op_type;
2378 	uint8_t			*buf = ctx->curr_payload;
2379 	uint64_t		offset = ctx->io_unit_offset;
2380 	uint64_t		length = ctx->io_units_remaining;
2381 	uint64_t		op_length;
2382 
2383 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2384 		bs_sequence_finish(ctx->seq, bserrno);
2385 		free(ctx);
2386 		return;
2387 	}
2388 
2389 	op_length = spdk_min(length, _spdk_bs_num_io_units_to_cluster_boundary(blob,
2390 			     offset));
2391 
2392 	/* Update length and payload for next operation */
2393 	ctx->io_units_remaining -= op_length;
2394 	ctx->io_unit_offset += op_length;
2395 	if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
2396 		ctx->curr_payload += op_length * blob->bs->io_unit_size;
2397 	}
2398 
2399 	switch (op_type) {
2400 	case SPDK_BLOB_READ:
2401 		spdk_blob_io_read(blob, ch, buf, offset, op_length,
2402 				  _spdk_blob_request_submit_op_split_next, ctx);
2403 		break;
2404 	case SPDK_BLOB_WRITE:
2405 		spdk_blob_io_write(blob, ch, buf, offset, op_length,
2406 				   _spdk_blob_request_submit_op_split_next, ctx);
2407 		break;
2408 	case SPDK_BLOB_UNMAP:
2409 		spdk_blob_io_unmap(blob, ch, offset, op_length,
2410 				   _spdk_blob_request_submit_op_split_next, ctx);
2411 		break;
2412 	case SPDK_BLOB_WRITE_ZEROES:
2413 		spdk_blob_io_write_zeroes(blob, ch, offset, op_length,
2414 					  _spdk_blob_request_submit_op_split_next, ctx);
2415 		break;
2416 	case SPDK_BLOB_READV:
2417 	case SPDK_BLOB_WRITEV:
2418 		SPDK_ERRLOG("readv/write not valid\n");
2419 		bs_sequence_finish(ctx->seq, -EINVAL);
2420 		free(ctx);
2421 		break;
2422 	}
2423 }
2424 
2425 static void
2426 _spdk_blob_request_submit_op_split(struct spdk_io_channel *ch, struct spdk_blob *blob,
2427 				   void *payload, uint64_t offset, uint64_t length,
2428 				   spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2429 {
2430 	struct op_split_ctx *ctx;
2431 	spdk_bs_sequence_t *seq;
2432 	struct spdk_bs_cpl cpl;
2433 
2434 	assert(blob != NULL);
2435 
2436 	ctx = calloc(1, sizeof(struct op_split_ctx));
2437 	if (ctx == NULL) {
2438 		cb_fn(cb_arg, -ENOMEM);
2439 		return;
2440 	}
2441 
2442 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2443 	cpl.u.blob_basic.cb_fn = cb_fn;
2444 	cpl.u.blob_basic.cb_arg = cb_arg;
2445 
2446 	seq = bs_sequence_start(ch, &cpl);
2447 	if (!seq) {
2448 		free(ctx);
2449 		cb_fn(cb_arg, -ENOMEM);
2450 		return;
2451 	}
2452 
2453 	ctx->blob = blob;
2454 	ctx->channel = ch;
2455 	ctx->curr_payload = payload;
2456 	ctx->io_unit_offset = offset;
2457 	ctx->io_units_remaining = length;
2458 	ctx->op_type = op_type;
2459 	ctx->seq = seq;
2460 
2461 	_spdk_blob_request_submit_op_split_next(ctx, 0);
2462 }
2463 
2464 static void
2465 _spdk_blob_request_submit_op_single(struct spdk_io_channel *_ch, struct spdk_blob *blob,
2466 				    void *payload, uint64_t offset, uint64_t length,
2467 				    spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2468 {
2469 	struct spdk_bs_cpl cpl;
2470 	uint64_t lba;
2471 	uint32_t lba_count;
2472 
2473 	assert(blob != NULL);
2474 
2475 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2476 	cpl.u.blob_basic.cb_fn = cb_fn;
2477 	cpl.u.blob_basic.cb_arg = cb_arg;
2478 
2479 	_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
2480 
2481 	if (blob->frozen_refcnt) {
2482 		/* This blob I/O is frozen */
2483 		spdk_bs_user_op_t *op;
2484 		struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_ch);
2485 
2486 		op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
2487 		if (!op) {
2488 			cb_fn(cb_arg, -ENOMEM);
2489 			return;
2490 		}
2491 
2492 		TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
2493 
2494 		return;
2495 	}
2496 
2497 	switch (op_type) {
2498 	case SPDK_BLOB_READ: {
2499 		spdk_bs_batch_t *batch;
2500 
2501 		batch = bs_batch_open(_ch, &cpl);
2502 		if (!batch) {
2503 			cb_fn(cb_arg, -ENOMEM);
2504 			return;
2505 		}
2506 
2507 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2508 			/* Read from the blob */
2509 			bs_batch_read_dev(batch, payload, lba, lba_count);
2510 		} else {
2511 			/* Read from the backing block device */
2512 			spdk_bs_batch_read_bs_dev(batch, blob->back_bs_dev, payload, lba, lba_count);
2513 		}
2514 
2515 		bs_batch_close(batch);
2516 		break;
2517 	}
2518 	case SPDK_BLOB_WRITE:
2519 	case SPDK_BLOB_WRITE_ZEROES: {
2520 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2521 			/* Write to the blob */
2522 			spdk_bs_batch_t *batch;
2523 
2524 			if (lba_count == 0) {
2525 				cb_fn(cb_arg, 0);
2526 				return;
2527 			}
2528 
2529 			batch = bs_batch_open(_ch, &cpl);
2530 			if (!batch) {
2531 				cb_fn(cb_arg, -ENOMEM);
2532 				return;
2533 			}
2534 
2535 			if (op_type == SPDK_BLOB_WRITE) {
2536 				bs_batch_write_dev(batch, payload, lba, lba_count);
2537 			} else {
2538 				bs_batch_write_zeroes_dev(batch, lba, lba_count);
2539 			}
2540 
2541 			bs_batch_close(batch);
2542 		} else {
2543 			/* Queue this operation and allocate the cluster */
2544 			spdk_bs_user_op_t *op;
2545 
2546 			op = bs_user_op_alloc(_ch, &cpl, op_type, blob, payload, 0, offset, length);
2547 			if (!op) {
2548 				cb_fn(cb_arg, -ENOMEM);
2549 				return;
2550 			}
2551 
2552 			_spdk_bs_allocate_and_copy_cluster(blob, _ch, offset, op);
2553 		}
2554 		break;
2555 	}
2556 	case SPDK_BLOB_UNMAP: {
2557 		spdk_bs_batch_t *batch;
2558 
2559 		batch = bs_batch_open(_ch, &cpl);
2560 		if (!batch) {
2561 			cb_fn(cb_arg, -ENOMEM);
2562 			return;
2563 		}
2564 
2565 		if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2566 			bs_batch_unmap_dev(batch, lba, lba_count);
2567 		}
2568 
2569 		bs_batch_close(batch);
2570 		break;
2571 	}
2572 	case SPDK_BLOB_READV:
2573 	case SPDK_BLOB_WRITEV:
2574 		SPDK_ERRLOG("readv/write not valid\n");
2575 		cb_fn(cb_arg, -EINVAL);
2576 		break;
2577 	}
2578 }
2579 
2580 static void
2581 _spdk_blob_request_submit_op(struct spdk_blob *blob, struct spdk_io_channel *_channel,
2582 			     void *payload, uint64_t offset, uint64_t length,
2583 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
2584 {
2585 	assert(blob != NULL);
2586 
2587 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
2588 		cb_fn(cb_arg, -EPERM);
2589 		return;
2590 	}
2591 
2592 	if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
2593 		cb_fn(cb_arg, -EINVAL);
2594 		return;
2595 	}
2596 	if (length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset)) {
2597 		_spdk_blob_request_submit_op_single(_channel, blob, payload, offset, length,
2598 						    cb_fn, cb_arg, op_type);
2599 	} else {
2600 		_spdk_blob_request_submit_op_split(_channel, blob, payload, offset, length,
2601 						   cb_fn, cb_arg, op_type);
2602 	}
2603 }
2604 
2605 struct rw_iov_ctx {
2606 	struct spdk_blob *blob;
2607 	struct spdk_io_channel *channel;
2608 	spdk_blob_op_complete cb_fn;
2609 	void *cb_arg;
2610 	bool read;
2611 	int iovcnt;
2612 	struct iovec *orig_iov;
2613 	uint64_t io_unit_offset;
2614 	uint64_t io_units_remaining;
2615 	uint64_t io_units_done;
2616 	struct iovec iov[0];
2617 };
2618 
2619 static void
2620 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2621 {
2622 	assert(cb_arg == NULL);
2623 	bs_sequence_finish(seq, bserrno);
2624 }
2625 
2626 static void
2627 _spdk_rw_iov_split_next(void *cb_arg, int bserrno)
2628 {
2629 	struct rw_iov_ctx *ctx = cb_arg;
2630 	struct spdk_blob *blob = ctx->blob;
2631 	struct iovec *iov, *orig_iov;
2632 	int iovcnt;
2633 	size_t orig_iovoff;
2634 	uint64_t io_units_count, io_units_to_boundary, io_unit_offset;
2635 	uint64_t byte_count;
2636 
2637 	if (bserrno != 0 || ctx->io_units_remaining == 0) {
2638 		ctx->cb_fn(ctx->cb_arg, bserrno);
2639 		free(ctx);
2640 		return;
2641 	}
2642 
2643 	io_unit_offset = ctx->io_unit_offset;
2644 	io_units_to_boundary = _spdk_bs_num_io_units_to_cluster_boundary(blob, io_unit_offset);
2645 	io_units_count = spdk_min(ctx->io_units_remaining, io_units_to_boundary);
2646 	/*
2647 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
2648 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
2649 	 *  point to the current position in the I/O sequence.
2650 	 */
2651 	byte_count = ctx->io_units_done * blob->bs->io_unit_size;
2652 	orig_iov = &ctx->orig_iov[0];
2653 	orig_iovoff = 0;
2654 	while (byte_count > 0) {
2655 		if (byte_count >= orig_iov->iov_len) {
2656 			byte_count -= orig_iov->iov_len;
2657 			orig_iov++;
2658 		} else {
2659 			orig_iovoff = byte_count;
2660 			byte_count = 0;
2661 		}
2662 	}
2663 
2664 	/*
2665 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
2666 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
2667 	 */
2668 	byte_count = io_units_count * blob->bs->io_unit_size;
2669 	iov = &ctx->iov[0];
2670 	iovcnt = 0;
2671 	while (byte_count > 0) {
2672 		assert(iovcnt < ctx->iovcnt);
2673 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
2674 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
2675 		byte_count -= iov->iov_len;
2676 		orig_iovoff = 0;
2677 		orig_iov++;
2678 		iov++;
2679 		iovcnt++;
2680 	}
2681 
2682 	ctx->io_unit_offset += io_units_count;
2683 	ctx->io_units_remaining -= io_units_count;
2684 	ctx->io_units_done += io_units_count;
2685 	iov = &ctx->iov[0];
2686 
2687 	if (ctx->read) {
2688 		spdk_blob_io_readv(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
2689 				   io_units_count, _spdk_rw_iov_split_next, ctx);
2690 	} else {
2691 		spdk_blob_io_writev(ctx->blob, ctx->channel, iov, iovcnt, io_unit_offset,
2692 				    io_units_count, _spdk_rw_iov_split_next, ctx);
2693 	}
2694 }
2695 
2696 static void
2697 _spdk_blob_request_submit_rw_iov(struct spdk_blob *blob, struct spdk_io_channel *_channel,
2698 				 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
2699 				 spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
2700 {
2701 	struct spdk_bs_cpl	cpl;
2702 
2703 	assert(blob != NULL);
2704 
2705 	if (!read && blob->data_ro) {
2706 		cb_fn(cb_arg, -EPERM);
2707 		return;
2708 	}
2709 
2710 	if (length == 0) {
2711 		cb_fn(cb_arg, 0);
2712 		return;
2713 	}
2714 
2715 	if (offset + length > _spdk_bs_cluster_to_lba(blob->bs, blob->active.num_clusters)) {
2716 		cb_fn(cb_arg, -EINVAL);
2717 		return;
2718 	}
2719 
2720 	/*
2721 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
2722 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
2723 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
2724 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
2725 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
2726 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
2727 	 *  but since this case happens very infrequently, any performance impact will be negligible.
2728 	 *
2729 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
2730 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
2731 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
2732 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
2733 	 */
2734 	if (spdk_likely(length <= _spdk_bs_num_io_units_to_cluster_boundary(blob, offset))) {
2735 		uint32_t lba_count;
2736 		uint64_t lba;
2737 
2738 		cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2739 		cpl.u.blob_basic.cb_fn = cb_fn;
2740 		cpl.u.blob_basic.cb_arg = cb_arg;
2741 
2742 		if (blob->frozen_refcnt) {
2743 			/* This blob I/O is frozen */
2744 			enum spdk_blob_op_type op_type;
2745 			spdk_bs_user_op_t *op;
2746 			struct spdk_bs_channel *bs_channel = spdk_io_channel_get_ctx(_channel);
2747 
2748 			op_type = read ? SPDK_BLOB_READV : SPDK_BLOB_WRITEV;
2749 			op = bs_user_op_alloc(_channel, &cpl, op_type, blob, iov, iovcnt, offset, length);
2750 			if (!op) {
2751 				cb_fn(cb_arg, -ENOMEM);
2752 				return;
2753 			}
2754 
2755 			TAILQ_INSERT_TAIL(&bs_channel->queued_io, op, link);
2756 
2757 			return;
2758 		}
2759 
2760 		_spdk_blob_calculate_lba_and_lba_count(blob, offset, length, &lba, &lba_count);
2761 
2762 		if (read) {
2763 			spdk_bs_sequence_t *seq;
2764 
2765 			seq = bs_sequence_start(_channel, &cpl);
2766 			if (!seq) {
2767 				cb_fn(cb_arg, -ENOMEM);
2768 				return;
2769 			}
2770 
2771 			if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2772 				bs_sequence_readv_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2773 			} else {
2774 				bs_sequence_readv_bs_dev(seq, blob->back_bs_dev, iov, iovcnt, lba, lba_count,
2775 							 _spdk_rw_iov_done, NULL);
2776 			}
2777 		} else {
2778 			if (_spdk_bs_io_unit_is_allocated(blob, offset)) {
2779 				spdk_bs_sequence_t *seq;
2780 
2781 				seq = bs_sequence_start(_channel, &cpl);
2782 				if (!seq) {
2783 					cb_fn(cb_arg, -ENOMEM);
2784 					return;
2785 				}
2786 
2787 				bs_sequence_writev_dev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
2788 			} else {
2789 				/* Queue this operation and allocate the cluster */
2790 				spdk_bs_user_op_t *op;
2791 
2792 				op = bs_user_op_alloc(_channel, &cpl, SPDK_BLOB_WRITEV, blob, iov, iovcnt, offset,
2793 						      length);
2794 				if (!op) {
2795 					cb_fn(cb_arg, -ENOMEM);
2796 					return;
2797 				}
2798 
2799 				_spdk_bs_allocate_and_copy_cluster(blob, _channel, offset, op);
2800 			}
2801 		}
2802 	} else {
2803 		struct rw_iov_ctx *ctx;
2804 
2805 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
2806 		if (ctx == NULL) {
2807 			cb_fn(cb_arg, -ENOMEM);
2808 			return;
2809 		}
2810 
2811 		ctx->blob = blob;
2812 		ctx->channel = _channel;
2813 		ctx->cb_fn = cb_fn;
2814 		ctx->cb_arg = cb_arg;
2815 		ctx->read = read;
2816 		ctx->orig_iov = iov;
2817 		ctx->iovcnt = iovcnt;
2818 		ctx->io_unit_offset = offset;
2819 		ctx->io_units_remaining = length;
2820 		ctx->io_units_done = 0;
2821 
2822 		_spdk_rw_iov_split_next(ctx, 0);
2823 	}
2824 }
2825 
2826 static struct spdk_blob *
2827 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
2828 {
2829 	struct spdk_blob *blob;
2830 
2831 	TAILQ_FOREACH(blob, &bs->blobs, link) {
2832 		if (blob->id == blobid) {
2833 			return blob;
2834 		}
2835 	}
2836 
2837 	return NULL;
2838 }
2839 
2840 static void
2841 _spdk_blob_get_snapshot_and_clone_entries(struct spdk_blob *blob,
2842 		struct spdk_blob_list **snapshot_entry, struct spdk_blob_list **clone_entry)
2843 {
2844 	assert(blob != NULL);
2845 	*snapshot_entry = NULL;
2846 	*clone_entry = NULL;
2847 
2848 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
2849 		return;
2850 	}
2851 
2852 	TAILQ_FOREACH(*snapshot_entry, &blob->bs->snapshots, link) {
2853 		if ((*snapshot_entry)->id == blob->parent_id) {
2854 			break;
2855 		}
2856 	}
2857 
2858 	if (*snapshot_entry != NULL) {
2859 		TAILQ_FOREACH(*clone_entry, &(*snapshot_entry)->clones, link) {
2860 			if ((*clone_entry)->id == blob->id) {
2861 				break;
2862 			}
2863 		}
2864 
2865 		assert(clone_entry != NULL);
2866 	}
2867 }
2868 
2869 static int
2870 _spdk_bs_channel_create(void *io_device, void *ctx_buf)
2871 {
2872 	struct spdk_blob_store		*bs = io_device;
2873 	struct spdk_bs_channel		*channel = ctx_buf;
2874 	struct spdk_bs_dev		*dev;
2875 	uint32_t			max_ops = bs->max_channel_ops;
2876 	uint32_t			i;
2877 
2878 	dev = bs->dev;
2879 
2880 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
2881 	if (!channel->req_mem) {
2882 		return -1;
2883 	}
2884 
2885 	TAILQ_INIT(&channel->reqs);
2886 
2887 	for (i = 0; i < max_ops; i++) {
2888 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
2889 	}
2890 
2891 	channel->bs = bs;
2892 	channel->dev = dev;
2893 	channel->dev_channel = dev->create_channel(dev);
2894 
2895 	if (!channel->dev_channel) {
2896 		SPDK_ERRLOG("Failed to create device channel.\n");
2897 		free(channel->req_mem);
2898 		return -1;
2899 	}
2900 
2901 	TAILQ_INIT(&channel->need_cluster_alloc);
2902 	TAILQ_INIT(&channel->queued_io);
2903 
2904 	return 0;
2905 }
2906 
2907 static void
2908 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf)
2909 {
2910 	struct spdk_bs_channel *channel = ctx_buf;
2911 	spdk_bs_user_op_t *op;
2912 
2913 	while (!TAILQ_EMPTY(&channel->need_cluster_alloc)) {
2914 		op = TAILQ_FIRST(&channel->need_cluster_alloc);
2915 		TAILQ_REMOVE(&channel->need_cluster_alloc, op, link);
2916 		bs_user_op_abort(op);
2917 	}
2918 
2919 	while (!TAILQ_EMPTY(&channel->queued_io)) {
2920 		op = TAILQ_FIRST(&channel->queued_io);
2921 		TAILQ_REMOVE(&channel->queued_io, op, link);
2922 		bs_user_op_abort(op);
2923 	}
2924 
2925 	free(channel->req_mem);
2926 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
2927 }
2928 
2929 static void
2930 _spdk_bs_dev_destroy(void *io_device)
2931 {
2932 	struct spdk_blob_store *bs = io_device;
2933 	struct spdk_blob	*blob, *blob_tmp;
2934 
2935 	bs->dev->destroy(bs->dev);
2936 
2937 	TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) {
2938 		TAILQ_REMOVE(&bs->blobs, blob, link);
2939 		_spdk_blob_free(blob);
2940 	}
2941 
2942 	pthread_mutex_destroy(&bs->used_clusters_mutex);
2943 
2944 	spdk_bit_array_free(&bs->used_blobids);
2945 	spdk_bit_array_free(&bs->used_md_pages);
2946 	spdk_bit_array_free(&bs->used_clusters);
2947 	/*
2948 	 * If this function is called for any reason except a successful unload,
2949 	 * the unload_cpl type will be NONE and this will be a nop.
2950 	 */
2951 	bs_call_cpl(&bs->unload_cpl, bs->unload_err);
2952 
2953 	free(bs);
2954 }
2955 
2956 static int
2957 _spdk_bs_blob_list_add(struct spdk_blob *blob)
2958 {
2959 	spdk_blob_id snapshot_id;
2960 	struct spdk_blob_list *snapshot_entry = NULL;
2961 	struct spdk_blob_list *clone_entry = NULL;
2962 
2963 	assert(blob != NULL);
2964 
2965 	snapshot_id = blob->parent_id;
2966 	if (snapshot_id == SPDK_BLOBID_INVALID) {
2967 		return 0;
2968 	}
2969 
2970 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, snapshot_id);
2971 	if (snapshot_entry == NULL) {
2972 		/* Snapshot not found */
2973 		snapshot_entry = calloc(1, sizeof(struct spdk_blob_list));
2974 		if (snapshot_entry == NULL) {
2975 			return -ENOMEM;
2976 		}
2977 		snapshot_entry->id = snapshot_id;
2978 		TAILQ_INIT(&snapshot_entry->clones);
2979 		TAILQ_INSERT_TAIL(&blob->bs->snapshots, snapshot_entry, link);
2980 	} else {
2981 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
2982 			if (clone_entry->id == blob->id) {
2983 				break;
2984 			}
2985 		}
2986 	}
2987 
2988 	if (clone_entry == NULL) {
2989 		/* Clone not found */
2990 		clone_entry = calloc(1, sizeof(struct spdk_blob_list));
2991 		if (clone_entry == NULL) {
2992 			return -ENOMEM;
2993 		}
2994 		clone_entry->id = blob->id;
2995 		TAILQ_INIT(&clone_entry->clones);
2996 		TAILQ_INSERT_TAIL(&snapshot_entry->clones, clone_entry, link);
2997 		snapshot_entry->clone_count++;
2998 	}
2999 
3000 	return 0;
3001 }
3002 
3003 static void
3004 _spdk_bs_blob_list_remove(struct spdk_blob *blob)
3005 {
3006 	struct spdk_blob_list *snapshot_entry = NULL;
3007 	struct spdk_blob_list *clone_entry = NULL;
3008 
3009 	_spdk_blob_get_snapshot_and_clone_entries(blob, &snapshot_entry, &clone_entry);
3010 
3011 	if (snapshot_entry == NULL) {
3012 		return;
3013 	}
3014 
3015 	blob->parent_id = SPDK_BLOBID_INVALID;
3016 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3017 	free(clone_entry);
3018 
3019 	snapshot_entry->clone_count--;
3020 }
3021 
3022 static int
3023 _spdk_bs_blob_list_free(struct spdk_blob_store *bs)
3024 {
3025 	struct spdk_blob_list *snapshot_entry;
3026 	struct spdk_blob_list *snapshot_entry_tmp;
3027 	struct spdk_blob_list *clone_entry;
3028 	struct spdk_blob_list *clone_entry_tmp;
3029 
3030 	TAILQ_FOREACH_SAFE(snapshot_entry, &bs->snapshots, link, snapshot_entry_tmp) {
3031 		TAILQ_FOREACH_SAFE(clone_entry, &snapshot_entry->clones, link, clone_entry_tmp) {
3032 			TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
3033 			free(clone_entry);
3034 		}
3035 		TAILQ_REMOVE(&bs->snapshots, snapshot_entry, link);
3036 		free(snapshot_entry);
3037 	}
3038 
3039 	return 0;
3040 }
3041 
3042 static void
3043 _spdk_bs_free(struct spdk_blob_store *bs)
3044 {
3045 	_spdk_bs_blob_list_free(bs);
3046 
3047 	spdk_bs_unregister_md_thread(bs);
3048 	spdk_io_device_unregister(bs, _spdk_bs_dev_destroy);
3049 }
3050 
3051 void
3052 spdk_bs_opts_init(struct spdk_bs_opts *opts)
3053 {
3054 	opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
3055 	opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
3056 	opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
3057 	opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS;
3058 	opts->clear_method = BS_CLEAR_WITH_UNMAP;
3059 	memset(&opts->bstype, 0, sizeof(opts->bstype));
3060 	opts->iter_cb_fn = NULL;
3061 	opts->iter_cb_arg = NULL;
3062 }
3063 
3064 static int
3065 _spdk_bs_opts_verify(struct spdk_bs_opts *opts)
3066 {
3067 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
3068 	    opts->max_channel_ops == 0) {
3069 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
3070 		return -1;
3071 	}
3072 
3073 	return 0;
3074 }
3075 
3076 static int
3077 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts, struct spdk_blob_store **_bs)
3078 {
3079 	struct spdk_blob_store	*bs;
3080 	uint64_t dev_size;
3081 	int rc;
3082 
3083 	dev_size = dev->blocklen * dev->blockcnt;
3084 	if (dev_size < opts->cluster_sz) {
3085 		/* Device size cannot be smaller than cluster size of blobstore */
3086 		SPDK_INFOLOG(SPDK_LOG_BLOB, "Device size %" PRIu64 " is smaller than cluster size %" PRIu32 "\n",
3087 			     dev_size, opts->cluster_sz);
3088 		return -ENOSPC;
3089 	}
3090 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
3091 		/* Cluster size cannot be smaller than page size */
3092 		SPDK_ERRLOG("Cluster size %" PRIu32 " is smaller than page size %d\n",
3093 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
3094 		return -EINVAL;
3095 	}
3096 	bs = calloc(1, sizeof(struct spdk_blob_store));
3097 	if (!bs) {
3098 		return -ENOMEM;
3099 	}
3100 
3101 	TAILQ_INIT(&bs->blobs);
3102 	TAILQ_INIT(&bs->snapshots);
3103 	bs->dev = dev;
3104 	bs->md_thread = spdk_get_thread();
3105 	assert(bs->md_thread != NULL);
3106 
3107 	/*
3108 	 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an
3109 	 *  even multiple of the cluster size.
3110 	 */
3111 	bs->cluster_sz = opts->cluster_sz;
3112 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
3113 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
3114 	if (spdk_u32_is_pow2(bs->pages_per_cluster)) {
3115 		bs->pages_per_cluster_shift = spdk_u32log2(bs->pages_per_cluster);
3116 	}
3117 	bs->num_free_clusters = bs->total_clusters;
3118 	bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
3119 	bs->io_unit_size = dev->blocklen;
3120 	if (bs->used_clusters == NULL) {
3121 		free(bs);
3122 		return -ENOMEM;
3123 	}
3124 
3125 	bs->max_channel_ops = opts->max_channel_ops;
3126 	bs->super_blob = SPDK_BLOBID_INVALID;
3127 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
3128 
3129 	/* The metadata is assumed to be at least 1 page */
3130 	bs->used_md_pages = spdk_bit_array_create(1);
3131 	bs->used_blobids = spdk_bit_array_create(0);
3132 
3133 	pthread_mutex_init(&bs->used_clusters_mutex, NULL);
3134 
3135 	spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy,
3136 				sizeof(struct spdk_bs_channel), "blobstore");
3137 	rc = spdk_bs_register_md_thread(bs);
3138 	if (rc == -1) {
3139 		spdk_io_device_unregister(bs, NULL);
3140 		pthread_mutex_destroy(&bs->used_clusters_mutex);
3141 		spdk_bit_array_free(&bs->used_blobids);
3142 		spdk_bit_array_free(&bs->used_md_pages);
3143 		spdk_bit_array_free(&bs->used_clusters);
3144 		free(bs);
3145 		/* FIXME: this is a lie but don't know how to get a proper error code here */
3146 		return -ENOMEM;
3147 	}
3148 
3149 	*_bs = bs;
3150 	return 0;
3151 }
3152 
3153 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */
3154 
3155 struct spdk_bs_load_ctx {
3156 	struct spdk_blob_store		*bs;
3157 	struct spdk_bs_super_block	*super;
3158 
3159 	struct spdk_bs_md_mask		*mask;
3160 	bool				in_page_chain;
3161 	uint32_t			page_index;
3162 	uint32_t			cur_page;
3163 	struct spdk_blob_md_page	*page;
3164 
3165 	uint64_t			num_extent_pages;
3166 	uint32_t			*extent_page_num;
3167 	struct spdk_blob_md_page	*extent_pages;
3168 
3169 	spdk_bs_sequence_t			*seq;
3170 	spdk_blob_op_with_handle_complete	iter_cb_fn;
3171 	void					*iter_cb_arg;
3172 	struct spdk_blob			*blob;
3173 	spdk_blob_id				blobid;
3174 };
3175 
3176 static void
3177 _spdk_bs_load_ctx_fail(struct spdk_bs_load_ctx *ctx, int bserrno)
3178 {
3179 	assert(bserrno != 0);
3180 
3181 	spdk_free(ctx->super);
3182 	bs_sequence_finish(ctx->seq, bserrno);
3183 	_spdk_bs_free(ctx->bs);
3184 	free(ctx);
3185 }
3186 
3187 static void
3188 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask)
3189 {
3190 	uint32_t i = 0;
3191 
3192 	while (true) {
3193 		i = spdk_bit_array_find_first_set(array, i);
3194 		if (i >= mask->length) {
3195 			break;
3196 		}
3197 		mask->mask[i / 8] |= 1U << (i % 8);
3198 		i++;
3199 	}
3200 }
3201 
3202 static int
3203 _spdk_bs_load_mask(struct spdk_bit_array **array_ptr, struct spdk_bs_md_mask *mask)
3204 {
3205 	struct spdk_bit_array *array;
3206 	uint32_t i;
3207 
3208 	if (spdk_bit_array_resize(array_ptr, mask->length) < 0) {
3209 		return -ENOMEM;
3210 	}
3211 
3212 	array = *array_ptr;
3213 	for (i = 0; i < mask->length; i++) {
3214 		if (mask->mask[i / 8] & (1U << (i % 8))) {
3215 			spdk_bit_array_set(array, i);
3216 		}
3217 	}
3218 
3219 	return 0;
3220 }
3221 
3222 static void
3223 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
3224 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
3225 {
3226 	/* Update the values in the super block */
3227 	super->super_blob = bs->super_blob;
3228 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
3229 	super->crc = _spdk_blob_md_page_calc_crc(super);
3230 	bs_sequence_write_dev(seq, super, _spdk_bs_page_to_lba(bs, 0),
3231 			      _spdk_bs_byte_to_lba(bs, sizeof(*super)),
3232 			      cb_fn, cb_arg);
3233 }
3234 
3235 static void
3236 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3237 {
3238 	struct spdk_bs_load_ctx	*ctx = arg;
3239 	uint64_t	mask_size, lba, lba_count;
3240 
3241 	/* Write out the used clusters mask */
3242 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3243 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3244 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3245 	if (!ctx->mask) {
3246 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3247 		return;
3248 	}
3249 
3250 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
3251 	ctx->mask->length = ctx->bs->total_clusters;
3252 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters));
3253 
3254 	_spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask);
3255 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3256 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3257 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3258 }
3259 
3260 static void
3261 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3262 {
3263 	struct spdk_bs_load_ctx	*ctx = arg;
3264 	uint64_t	mask_size, lba, lba_count;
3265 
3266 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3267 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3268 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3269 	if (!ctx->mask) {
3270 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3271 		return;
3272 	}
3273 
3274 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
3275 	ctx->mask->length = ctx->super->md_len;
3276 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
3277 
3278 	_spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask);
3279 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3280 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3281 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3282 }
3283 
3284 static void
3285 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
3286 {
3287 	struct spdk_bs_load_ctx	*ctx = arg;
3288 	uint64_t	mask_size, lba, lba_count;
3289 
3290 	if (ctx->super->used_blobid_mask_len == 0) {
3291 		/*
3292 		 * This is a pre-v3 on-disk format where the blobid mask does not get
3293 		 *  written to disk.
3294 		 */
3295 		cb_fn(seq, arg, 0);
3296 		return;
3297 	}
3298 
3299 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3300 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3301 				 SPDK_MALLOC_DMA);
3302 	if (!ctx->mask) {
3303 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3304 		return;
3305 	}
3306 
3307 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
3308 	ctx->mask->length = ctx->super->md_len;
3309 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
3310 
3311 	_spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask);
3312 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
3313 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
3314 	bs_sequence_write_dev(seq, ctx->mask, lba, lba_count, cb_fn, arg);
3315 }
3316 
3317 static void
3318 _spdk_blob_set_thin_provision(struct spdk_blob *blob)
3319 {
3320 	_spdk_blob_verify_md_op(blob);
3321 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
3322 	blob->state = SPDK_BLOB_STATE_DIRTY;
3323 }
3324 
3325 static void
3326 _spdk_blob_set_clear_method(struct spdk_blob *blob, enum blob_clear_method clear_method)
3327 {
3328 	_spdk_blob_verify_md_op(blob);
3329 	blob->clear_method = clear_method;
3330 	blob->md_ro_flags |= (clear_method << SPDK_BLOB_CLEAR_METHOD_SHIFT);
3331 	blob->state = SPDK_BLOB_STATE_DIRTY;
3332 }
3333 
3334 static void _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno);
3335 
3336 static void
3337 _spdk_bs_delete_corrupted_blob_cpl(void *cb_arg, int bserrno)
3338 {
3339 	struct spdk_bs_load_ctx *ctx = cb_arg;
3340 	spdk_blob_id id;
3341 	int64_t page_num;
3342 
3343 	/* Iterate to next blob (we can't use spdk_bs_iter_next function as our
3344 	 * last blob has been removed */
3345 	page_num = _spdk_bs_blobid_to_page(ctx->blobid);
3346 	page_num++;
3347 	page_num = spdk_bit_array_find_first_set(ctx->bs->used_blobids, page_num);
3348 	if (page_num >= spdk_bit_array_capacity(ctx->bs->used_blobids)) {
3349 		_spdk_bs_load_iter(ctx, NULL, -ENOENT);
3350 		return;
3351 	}
3352 
3353 	id = _spdk_bs_page_to_blobid(page_num);
3354 
3355 	spdk_bs_open_blob(ctx->bs, id, _spdk_bs_load_iter, ctx);
3356 }
3357 
3358 static void
3359 _spdk_bs_delete_corrupted_close_cb(void *cb_arg, int bserrno)
3360 {
3361 	struct spdk_bs_load_ctx *ctx = cb_arg;
3362 
3363 	if (bserrno != 0) {
3364 		SPDK_ERRLOG("Failed to close corrupted blob\n");
3365 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3366 		return;
3367 	}
3368 
3369 	spdk_bs_delete_blob(ctx->bs, ctx->blobid, _spdk_bs_delete_corrupted_blob_cpl, ctx);
3370 }
3371 
3372 static void
3373 _spdk_bs_delete_corrupted_blob(void *cb_arg, int bserrno)
3374 {
3375 	struct spdk_bs_load_ctx *ctx = cb_arg;
3376 	uint64_t i;
3377 
3378 	if (bserrno != 0) {
3379 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
3380 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3381 		return;
3382 	}
3383 
3384 	/* Snapshot and clone have the same copy of cluster map and extent pages
3385 	 * at this point. Let's clear both for snpashot now,
3386 	 * so that it won't be cleared for clone later when we remove snapshot.
3387 	 * Also set thin provision to pass data corruption check */
3388 	for (i = 0; i < ctx->blob->active.num_clusters; i++) {
3389 		ctx->blob->active.clusters[i] = 0;
3390 	}
3391 	for (i = 0; i < ctx->blob->active.num_extent_pages; i++) {
3392 		ctx->blob->active.extent_pages[i] = 0;
3393 	}
3394 
3395 	ctx->blob->md_ro = false;
3396 
3397 	_spdk_blob_set_thin_provision(ctx->blob);
3398 
3399 	ctx->blobid = ctx->blob->id;
3400 
3401 	spdk_blob_close(ctx->blob, _spdk_bs_delete_corrupted_close_cb, ctx);
3402 }
3403 
3404 static void
3405 _spdk_bs_update_corrupted_blob(void *cb_arg, int bserrno)
3406 {
3407 	struct spdk_bs_load_ctx *ctx = cb_arg;
3408 
3409 	if (bserrno != 0) {
3410 		SPDK_ERRLOG("Failed to close clone of a corrupted blob\n");
3411 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3412 		return;
3413 	}
3414 
3415 	ctx->blob->md_ro = false;
3416 	_spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_PENDING_REMOVAL, true);
3417 	_spdk_blob_remove_xattr(ctx->blob, SNAPSHOT_IN_PROGRESS, true);
3418 	spdk_blob_set_read_only(ctx->blob);
3419 
3420 	if (ctx->iter_cb_fn) {
3421 		ctx->iter_cb_fn(ctx->iter_cb_arg, ctx->blob, 0);
3422 	}
3423 	_spdk_bs_blob_list_add(ctx->blob);
3424 
3425 	spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3426 }
3427 
3428 static void
3429 _spdk_bs_examine_clone(void *cb_arg, struct spdk_blob *blob, int bserrno)
3430 {
3431 	struct spdk_bs_load_ctx *ctx = cb_arg;
3432 
3433 	if (bserrno != 0) {
3434 		SPDK_ERRLOG("Failed to open clone of a corrupted blob\n");
3435 		spdk_bs_iter_next(ctx->bs, ctx->blob, _spdk_bs_load_iter, ctx);
3436 		return;
3437 	}
3438 
3439 	if (blob->parent_id == ctx->blob->id) {
3440 		/* Power failure occured before updating clone (snapshot delete case)
3441 		 * or after updating clone (creating snapshot case) - keep snapshot */
3442 		spdk_blob_close(blob, _spdk_bs_update_corrupted_blob, ctx);
3443 	} else {
3444 		/* Power failure occured after updating clone (snapshot delete case)
3445 		 * or before updating clone (creating snapshot case) - remove snapshot */
3446 		spdk_blob_close(blob, _spdk_bs_delete_corrupted_blob, ctx);
3447 	}
3448 }
3449 
3450 static void
3451 _spdk_bs_load_iter(void *arg, struct spdk_blob *blob, int bserrno)
3452 {
3453 	struct spdk_bs_load_ctx *ctx = arg;
3454 	const void *value;
3455 	size_t len;
3456 	int rc = 0;
3457 
3458 	if (bserrno == 0) {
3459 		/* Examine blob if it is corrupted after power failure. Fix
3460 		 * the ones that can be fixed and remove any other corrupted
3461 		 * ones. If it is not corrupted just process it */
3462 		rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_PENDING_REMOVAL, &value, &len, true);
3463 		if (rc != 0) {
3464 			rc = _spdk_blob_get_xattr_value(blob, SNAPSHOT_IN_PROGRESS, &value, &len, true);
3465 			if (rc != 0) {
3466 				/* Not corrupted - process it and continue with iterating through blobs */
3467 				if (ctx->iter_cb_fn) {
3468 					ctx->iter_cb_fn(ctx->iter_cb_arg, blob, 0);
3469 				}
3470 				_spdk_bs_blob_list_add(blob);
3471 				spdk_bs_iter_next(ctx->bs, blob, _spdk_bs_load_iter, ctx);
3472 				return;
3473 			}
3474 
3475 		}
3476 
3477 		assert(len == sizeof(spdk_blob_id));
3478 
3479 		ctx->blob = blob;
3480 
3481 		/* Open clone to check if we are able to fix this blob or should we remove it */
3482 		spdk_bs_open_blob(ctx->bs, *(spdk_blob_id *)value, _spdk_bs_examine_clone, ctx);
3483 		return;
3484 	} else if (bserrno == -ENOENT) {
3485 		bserrno = 0;
3486 	} else {
3487 		/*
3488 		 * This case needs to be looked at further.  Same problem
3489 		 *  exists with applications that rely on explicit blob
3490 		 *  iteration.  We should just skip the blob that failed
3491 		 *  to load and continue on to the next one.
3492 		 */
3493 		SPDK_ERRLOG("Error in iterating blobs\n");
3494 	}
3495 
3496 	ctx->iter_cb_fn = NULL;
3497 
3498 	spdk_free(ctx->super);
3499 	spdk_free(ctx->mask);
3500 	bs_sequence_finish(ctx->seq, bserrno);
3501 	free(ctx);
3502 }
3503 
3504 static void
3505 _spdk_bs_load_complete(struct spdk_bs_load_ctx *ctx)
3506 {
3507 	spdk_bs_iter_first(ctx->bs, _spdk_bs_load_iter, ctx);
3508 }
3509 
3510 static void
3511 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3512 {
3513 	struct spdk_bs_load_ctx *ctx = cb_arg;
3514 	int rc;
3515 
3516 	/* The type must be correct */
3517 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
3518 
3519 	/* The length of the mask (in bits) must not be greater than
3520 	 * the length of the buffer (converted to bits) */
3521 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
3522 
3523 	/* The length of the mask must be exactly equal to the size
3524 	 * (in pages) of the metadata region */
3525 	assert(ctx->mask->length == ctx->super->md_len);
3526 
3527 	rc = _spdk_bs_load_mask(&ctx->bs->used_blobids, ctx->mask);
3528 	if (rc < 0) {
3529 		spdk_free(ctx->mask);
3530 		_spdk_bs_load_ctx_fail(ctx, rc);
3531 		return;
3532 	}
3533 
3534 	_spdk_bs_load_complete(ctx);
3535 }
3536 
3537 static void
3538 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3539 {
3540 	struct spdk_bs_load_ctx *ctx = cb_arg;
3541 	uint64_t		lba, lba_count, mask_size;
3542 	int			rc;
3543 
3544 	if (bserrno != 0) {
3545 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3546 		return;
3547 	}
3548 
3549 	/* The type must be correct */
3550 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3551 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
3552 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
3553 					     struct spdk_blob_md_page) * 8));
3554 	/* The length of the mask must be exactly equal to the total number of clusters */
3555 	assert(ctx->mask->length == ctx->bs->total_clusters);
3556 
3557 	rc = _spdk_bs_load_mask(&ctx->bs->used_clusters, ctx->mask);
3558 	if (rc < 0) {
3559 		spdk_free(ctx->mask);
3560 		_spdk_bs_load_ctx_fail(ctx, rc);
3561 		return;
3562 	}
3563 
3564 	ctx->bs->num_free_clusters = spdk_bit_array_count_clear(ctx->bs->used_clusters);
3565 	assert(ctx->bs->num_free_clusters <= ctx->bs->total_clusters);
3566 
3567 	spdk_free(ctx->mask);
3568 
3569 	/* Read the used blobids mask */
3570 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
3571 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3572 				 SPDK_MALLOC_DMA);
3573 	if (!ctx->mask) {
3574 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3575 		return;
3576 	}
3577 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
3578 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
3579 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
3580 			     _spdk_bs_load_used_blobids_cpl, ctx);
3581 }
3582 
3583 static void
3584 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3585 {
3586 	struct spdk_bs_load_ctx *ctx = cb_arg;
3587 	uint64_t		lba, lba_count, mask_size;
3588 	int			rc;
3589 
3590 	if (bserrno != 0) {
3591 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3592 		return;
3593 	}
3594 
3595 	/* The type must be correct */
3596 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
3597 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
3598 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
3599 				     8));
3600 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
3601 	assert(ctx->mask->length == ctx->super->md_len);
3602 
3603 	rc = _spdk_bs_load_mask(&ctx->bs->used_md_pages, ctx->mask);
3604 	if (rc < 0) {
3605 		spdk_free(ctx->mask);
3606 		_spdk_bs_load_ctx_fail(ctx, rc);
3607 		return;
3608 	}
3609 
3610 	spdk_free(ctx->mask);
3611 
3612 	/* Read the used clusters mask */
3613 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
3614 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL, SPDK_ENV_SOCKET_ID_ANY,
3615 				 SPDK_MALLOC_DMA);
3616 	if (!ctx->mask) {
3617 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3618 		return;
3619 	}
3620 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
3621 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
3622 	bs_sequence_read_dev(seq, ctx->mask, lba, lba_count,
3623 			     _spdk_bs_load_used_clusters_cpl, ctx);
3624 }
3625 
3626 static void
3627 _spdk_bs_load_read_used_pages(struct spdk_bs_load_ctx *ctx)
3628 {
3629 	uint64_t lba, lba_count, mask_size;
3630 
3631 	/* Read the used pages mask */
3632 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
3633 	ctx->mask = spdk_zmalloc(mask_size, 0x1000, NULL,
3634 				 SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3635 	if (!ctx->mask) {
3636 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3637 		return;
3638 	}
3639 
3640 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
3641 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
3642 	bs_sequence_read_dev(ctx->seq, ctx->mask, lba, lba_count,
3643 			     _spdk_bs_load_used_pages_cpl, ctx);
3644 }
3645 
3646 static int
3647 _spdk_bs_load_replay_md_parse_page(struct spdk_bs_load_ctx *ctx, struct spdk_blob_md_page *page)
3648 {
3649 	struct spdk_blob_store *bs = ctx->bs;
3650 	struct spdk_blob_md_descriptor *desc;
3651 	size_t	cur_desc = 0;
3652 
3653 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
3654 	while (cur_desc < sizeof(page->descriptors)) {
3655 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
3656 			if (desc->length == 0) {
3657 				/* If padding and length are 0, this terminates the page */
3658 				break;
3659 			}
3660 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
3661 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
3662 			unsigned int				i, j;
3663 			unsigned int				cluster_count = 0;
3664 			uint32_t				cluster_idx;
3665 
3666 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
3667 
3668 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
3669 				for (j = 0; j < desc_extent_rle->extents[i].length; j++) {
3670 					cluster_idx = desc_extent_rle->extents[i].cluster_idx;
3671 					/*
3672 					 * cluster_idx = 0 means an unallocated cluster - don't mark that
3673 					 * in the used cluster map.
3674 					 */
3675 					if (cluster_idx != 0) {
3676 						spdk_bit_array_set(bs->used_clusters, cluster_idx + j);
3677 						if (bs->num_free_clusters == 0) {
3678 							return -ENOSPC;
3679 						}
3680 						bs->num_free_clusters--;
3681 					}
3682 					cluster_count++;
3683 				}
3684 			}
3685 			if (cluster_count == 0) {
3686 				return -EINVAL;
3687 			}
3688 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
3689 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
3690 			uint32_t					i;
3691 			uint32_t					cluster_count = 0;
3692 			uint32_t					cluster_idx;
3693 			size_t						cluster_idx_length;
3694 
3695 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
3696 			cluster_idx_length = desc_extent->length - sizeof(desc_extent->start_cluster_idx);
3697 
3698 			if (desc_extent->length <= sizeof(desc_extent->start_cluster_idx) ||
3699 			    (cluster_idx_length % sizeof(desc_extent->cluster_idx[0]) != 0)) {
3700 				return -EINVAL;
3701 			}
3702 
3703 			for (i = 0; i < cluster_idx_length / sizeof(desc_extent->cluster_idx[0]); i++) {
3704 				cluster_idx = desc_extent->cluster_idx[i];
3705 				/*
3706 				 * cluster_idx = 0 means an unallocated cluster - don't mark that
3707 				 * in the used cluster map.
3708 				 */
3709 				if (cluster_idx != 0) {
3710 					if (cluster_idx < desc_extent->start_cluster_idx &&
3711 					    cluster_idx >= desc_extent->start_cluster_idx + cluster_count) {
3712 						return -EINVAL;
3713 					}
3714 					spdk_bit_array_set(bs->used_clusters, cluster_idx);
3715 					if (bs->num_free_clusters == 0) {
3716 						return -ENOSPC;
3717 					}
3718 					bs->num_free_clusters--;
3719 				}
3720 				cluster_count++;
3721 			}
3722 
3723 			if (cluster_count == 0) {
3724 				return -EINVAL;
3725 			}
3726 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
3727 			/* Skip this item */
3728 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
3729 			/* Skip this item */
3730 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
3731 			/* Skip this item */
3732 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_TABLE) {
3733 			struct spdk_blob_md_descriptor_extent_table *desc_extent_table;
3734 			uint32_t num_extent_pages = ctx->num_extent_pages;
3735 			uint32_t i;
3736 			size_t extent_pages_length;
3737 			void *tmp;
3738 
3739 			desc_extent_table = (struct spdk_blob_md_descriptor_extent_table *)desc;
3740 			extent_pages_length = desc_extent_table->length - sizeof(desc_extent_table->num_clusters);
3741 
3742 			if (desc_extent_table->length == 0 ||
3743 			    (extent_pages_length % sizeof(desc_extent_table->extent_page[0]) != 0)) {
3744 				return -EINVAL;
3745 			}
3746 
3747 			for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
3748 				if (desc_extent_table->extent_page[i].page_idx != 0) {
3749 					if (desc_extent_table->extent_page[i].num_pages != 1) {
3750 						return -EINVAL;
3751 					}
3752 					num_extent_pages += 1;
3753 				}
3754 			}
3755 
3756 			if (num_extent_pages > 0) {
3757 				tmp = realloc(ctx->extent_page_num, num_extent_pages * sizeof(uint32_t));
3758 				if (tmp == NULL) {
3759 					return -ENOMEM;
3760 				}
3761 				ctx->extent_page_num = tmp;
3762 
3763 				/* Extent table entries contain md page numbers for extent pages.
3764 				 * Zeroes represent unallocated extent pages, those are run-length-encoded.
3765 				 */
3766 				for (i = 0; i < extent_pages_length / sizeof(desc_extent_table->extent_page[0]); i++) {
3767 					if (desc_extent_table->extent_page[i].page_idx != 0) {
3768 						ctx->extent_page_num[ctx->num_extent_pages] = desc_extent_table->extent_page[i].page_idx;
3769 						ctx->num_extent_pages += 1;
3770 					}
3771 				}
3772 			}
3773 		} else {
3774 			/* Error */
3775 			return -EINVAL;
3776 		}
3777 		/* Advance to the next descriptor */
3778 		cur_desc += sizeof(*desc) + desc->length;
3779 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
3780 			break;
3781 		}
3782 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
3783 	}
3784 	return 0;
3785 }
3786 
3787 static bool _spdk_bs_load_cur_extent_page_valid(struct spdk_blob_md_page *page)
3788 {
3789 	uint32_t crc;
3790 	struct spdk_blob_md_descriptor *desc = (struct spdk_blob_md_descriptor *)page->descriptors;
3791 	size_t desc_len;
3792 
3793 	crc = _spdk_blob_md_page_calc_crc(page);
3794 	if (crc != page->crc) {
3795 		return false;
3796 	}
3797 
3798 	/* Extent page should always be of sequence num 0. */
3799 	if (page->sequence_num != 0) {
3800 		return false;
3801 	}
3802 
3803 	/* Descriptor type must be EXTENT_PAGE. */
3804 	if (desc->type != SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
3805 		return false;
3806 	}
3807 
3808 	/* Descriptor length cannot exceed the page. */
3809 	desc_len = sizeof(*desc) + desc->length;
3810 	if (desc_len > sizeof(page->descriptors)) {
3811 		return false;
3812 	}
3813 
3814 	/* It has to be the only descriptor in the page. */
3815 	if (desc_len + sizeof(*desc) <= sizeof(page->descriptors)) {
3816 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + desc_len);
3817 		if (desc->length != 0) {
3818 			return false;
3819 		}
3820 	}
3821 
3822 	return true;
3823 }
3824 
3825 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
3826 {
3827 	uint32_t crc;
3828 	struct spdk_blob_md_page *page = ctx->page;
3829 
3830 	crc = _spdk_blob_md_page_calc_crc(page);
3831 	if (crc != page->crc) {
3832 		return false;
3833 	}
3834 
3835 	/* First page of a sequence should match the blobid. */
3836 	if (page->sequence_num == 0 &&
3837 	    _spdk_bs_page_to_blobid(ctx->cur_page) != page->id) {
3838 		return false;
3839 	}
3840 	assert(_spdk_bs_load_cur_extent_page_valid(page) == false);
3841 
3842 	return true;
3843 }
3844 
3845 static void
3846 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx);
3847 
3848 static void
3849 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3850 {
3851 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3852 
3853 	if (bserrno != 0) {
3854 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3855 		return;
3856 	}
3857 
3858 	_spdk_bs_load_complete(ctx);
3859 }
3860 
3861 static void
3862 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3863 {
3864 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3865 
3866 	spdk_free(ctx->mask);
3867 	ctx->mask = NULL;
3868 
3869 	if (bserrno != 0) {
3870 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3871 		return;
3872 	}
3873 
3874 	_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_load_write_used_clusters_cpl);
3875 }
3876 
3877 static void
3878 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3879 {
3880 	struct spdk_bs_load_ctx	*ctx = cb_arg;
3881 
3882 	spdk_free(ctx->mask);
3883 	ctx->mask = NULL;
3884 
3885 	if (bserrno != 0) {
3886 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3887 		return;
3888 	}
3889 
3890 	_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_load_write_used_blobids_cpl);
3891 }
3892 
3893 static void
3894 _spdk_bs_load_write_used_md(struct spdk_bs_load_ctx *ctx)
3895 {
3896 	_spdk_bs_write_used_md(ctx->seq, ctx, _spdk_bs_load_write_used_pages_cpl);
3897 }
3898 
3899 static void
3900 _spdk_bs_load_replay_md_chain_cpl(struct spdk_bs_load_ctx *ctx)
3901 {
3902 	uint64_t num_md_clusters;
3903 	uint64_t i;
3904 
3905 	ctx->in_page_chain = false;
3906 
3907 	do {
3908 		ctx->page_index++;
3909 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
3910 
3911 	if (ctx->page_index < ctx->super->md_len) {
3912 		ctx->cur_page = ctx->page_index;
3913 		_spdk_bs_load_replay_cur_md_page(ctx);
3914 	} else {
3915 		/* Claim all of the clusters used by the metadata */
3916 		num_md_clusters = spdk_divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
3917 		for (i = 0; i < num_md_clusters; i++) {
3918 			_spdk_bs_claim_cluster(ctx->bs, i);
3919 		}
3920 		spdk_free(ctx->page);
3921 		_spdk_bs_load_write_used_md(ctx);
3922 	}
3923 }
3924 
3925 static void
3926 _spdk_bs_load_replay_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3927 {
3928 	struct spdk_bs_load_ctx *ctx = cb_arg;
3929 	uint32_t page_num;
3930 	uint64_t i;
3931 
3932 	if (bserrno != 0) {
3933 		spdk_free(ctx->extent_pages);
3934 		_spdk_bs_load_ctx_fail(ctx, bserrno);
3935 		return;
3936 	}
3937 
3938 	for (i = 0; i < ctx->num_extent_pages; i++) {
3939 		/* Extent pages are only read when present within in chain md.
3940 		 * Integrity of md is not right if that page was not a valid extent page. */
3941 		if (_spdk_bs_load_cur_extent_page_valid(&ctx->extent_pages[i]) != true) {
3942 			spdk_free(ctx->extent_pages);
3943 			_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3944 			return;
3945 		}
3946 
3947 		page_num = ctx->extent_page_num[i];
3948 		spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
3949 		if (_spdk_bs_load_replay_md_parse_page(ctx, &ctx->extent_pages[i])) {
3950 			spdk_free(ctx->extent_pages);
3951 			_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
3952 			return;
3953 		}
3954 	}
3955 
3956 	spdk_free(ctx->extent_pages);
3957 	free(ctx->extent_page_num);
3958 	ctx->extent_page_num = NULL;
3959 	ctx->num_extent_pages = 0;
3960 
3961 	_spdk_bs_load_replay_md_chain_cpl(ctx);
3962 }
3963 
3964 static void
3965 _spdk_bs_load_replay_extent_pages(struct spdk_bs_load_ctx *ctx)
3966 {
3967 	spdk_bs_batch_t *batch;
3968 	uint32_t page;
3969 	uint64_t lba;
3970 	uint64_t i;
3971 
3972 	ctx->extent_pages = spdk_zmalloc(SPDK_BS_PAGE_SIZE * ctx->num_extent_pages, SPDK_BS_PAGE_SIZE,
3973 					 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
3974 	if (!ctx->extent_pages) {
3975 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
3976 		return;
3977 	}
3978 
3979 	batch = bs_sequence_to_batch(ctx->seq, _spdk_bs_load_replay_extent_page_cpl, ctx);
3980 
3981 	for (i = 0; i < ctx->num_extent_pages; i++) {
3982 		page = ctx->extent_page_num[i];
3983 		assert(page < ctx->super->md_len);
3984 		lba = _spdk_bs_md_page_to_lba(ctx->bs, page);
3985 		bs_batch_read_dev(batch, &ctx->extent_pages[i], lba,
3986 				  _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE));
3987 	}
3988 
3989 	bs_batch_close(batch);
3990 }
3991 
3992 static void
3993 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3994 {
3995 	struct spdk_bs_load_ctx *ctx = cb_arg;
3996 	uint32_t page_num;
3997 	struct spdk_blob_md_page *page;
3998 
3999 	if (bserrno != 0) {
4000 		_spdk_bs_load_ctx_fail(ctx, bserrno);
4001 		return;
4002 	}
4003 
4004 	page_num = ctx->cur_page;
4005 	page = ctx->page;
4006 	if (_spdk_bs_load_cur_md_page_valid(ctx) == true) {
4007 		if (page->sequence_num == 0 || ctx->in_page_chain == true) {
4008 			_spdk_bs_claim_md_page(ctx->bs, page_num);
4009 			if (page->sequence_num == 0) {
4010 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
4011 			}
4012 			if (_spdk_bs_load_replay_md_parse_page(ctx, page)) {
4013 				_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4014 				return;
4015 			}
4016 			if (page->next != SPDK_INVALID_MD_PAGE) {
4017 				ctx->in_page_chain = true;
4018 				ctx->cur_page = page->next;
4019 				_spdk_bs_load_replay_cur_md_page(ctx);
4020 				return;
4021 			}
4022 			if (ctx->num_extent_pages != 0) {
4023 				_spdk_bs_load_replay_extent_pages(ctx);
4024 				return;
4025 			}
4026 		}
4027 	}
4028 	_spdk_bs_load_replay_md_chain_cpl(ctx);
4029 }
4030 
4031 static void
4032 _spdk_bs_load_replay_cur_md_page(struct spdk_bs_load_ctx *ctx)
4033 {
4034 	uint64_t lba;
4035 
4036 	assert(ctx->cur_page < ctx->super->md_len);
4037 	lba = _spdk_bs_md_page_to_lba(ctx->bs, ctx->cur_page);
4038 	bs_sequence_read_dev(ctx->seq, ctx->page, lba,
4039 			     _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
4040 			     _spdk_bs_load_replay_md_cpl, ctx);
4041 }
4042 
4043 static void
4044 _spdk_bs_load_replay_md(struct spdk_bs_load_ctx *ctx)
4045 {
4046 	ctx->page_index = 0;
4047 	ctx->cur_page = 0;
4048 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
4049 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4050 	if (!ctx->page) {
4051 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4052 		return;
4053 	}
4054 	_spdk_bs_load_replay_cur_md_page(ctx);
4055 }
4056 
4057 static void
4058 _spdk_bs_recover(struct spdk_bs_load_ctx *ctx)
4059 {
4060 	int		rc;
4061 
4062 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
4063 	if (rc < 0) {
4064 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4065 		return;
4066 	}
4067 
4068 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
4069 	if (rc < 0) {
4070 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4071 		return;
4072 	}
4073 
4074 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
4075 	if (rc < 0) {
4076 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4077 		return;
4078 	}
4079 
4080 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
4081 	_spdk_bs_load_replay_md(ctx);
4082 }
4083 
4084 static void
4085 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4086 {
4087 	struct spdk_bs_load_ctx *ctx = cb_arg;
4088 	uint32_t	crc;
4089 	int		rc;
4090 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
4091 
4092 	if (ctx->super->version > SPDK_BS_VERSION ||
4093 	    ctx->super->version < SPDK_BS_INITIAL_VERSION) {
4094 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4095 		return;
4096 	}
4097 
4098 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4099 		   sizeof(ctx->super->signature)) != 0) {
4100 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4101 		return;
4102 	}
4103 
4104 	crc = _spdk_blob_md_page_calc_crc(ctx->super);
4105 	if (crc != ctx->super->crc) {
4106 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4107 		return;
4108 	}
4109 
4110 	if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
4111 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n");
4112 	} else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
4113 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n");
4114 	} else {
4115 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n");
4116 		SPDK_LOGDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
4117 		SPDK_LOGDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
4118 		_spdk_bs_load_ctx_fail(ctx, -ENXIO);
4119 		return;
4120 	}
4121 
4122 	if (ctx->super->size > ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen) {
4123 		SPDK_NOTICELOG("Size mismatch, dev size: %lu, blobstore size: %lu\n",
4124 			       ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen, ctx->super->size);
4125 		_spdk_bs_load_ctx_fail(ctx, -EILSEQ);
4126 		return;
4127 	}
4128 
4129 	if (ctx->super->size == 0) {
4130 		ctx->super->size = ctx->bs->dev->blockcnt * ctx->bs->dev->blocklen;
4131 	}
4132 
4133 	if (ctx->super->io_unit_size == 0) {
4134 		ctx->super->io_unit_size = SPDK_BS_PAGE_SIZE;
4135 	}
4136 
4137 	/* Parse the super block */
4138 	ctx->bs->clean = 1;
4139 	ctx->bs->cluster_sz = ctx->super->cluster_size;
4140 	ctx->bs->total_clusters = ctx->super->size / ctx->super->cluster_size;
4141 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
4142 	if (spdk_u32_is_pow2(ctx->bs->pages_per_cluster)) {
4143 		ctx->bs->pages_per_cluster_shift = spdk_u32log2(ctx->bs->pages_per_cluster);
4144 	}
4145 	ctx->bs->io_unit_size = ctx->super->io_unit_size;
4146 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
4147 	if (rc < 0) {
4148 		_spdk_bs_load_ctx_fail(ctx, -ENOMEM);
4149 		return;
4150 	}
4151 	ctx->bs->md_start = ctx->super->md_start;
4152 	ctx->bs->md_len = ctx->super->md_len;
4153 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - spdk_divide_round_up(
4154 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
4155 	ctx->bs->super_blob = ctx->super->super_blob;
4156 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
4157 
4158 	if (ctx->super->used_blobid_mask_len == 0 || ctx->super->clean == 0) {
4159 		_spdk_bs_recover(ctx);
4160 	} else {
4161 		_spdk_bs_load_read_used_pages(ctx);
4162 	}
4163 }
4164 
4165 void
4166 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4167 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4168 {
4169 	struct spdk_blob_store	*bs;
4170 	struct spdk_bs_cpl	cpl;
4171 	struct spdk_bs_load_ctx *ctx;
4172 	struct spdk_bs_opts	opts = {};
4173 	int err;
4174 
4175 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev);
4176 
4177 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4178 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "unsupported dev block length of %d\n", dev->blocklen);
4179 		dev->destroy(dev);
4180 		cb_fn(cb_arg, NULL, -EINVAL);
4181 		return;
4182 	}
4183 
4184 	if (o) {
4185 		opts = *o;
4186 	} else {
4187 		spdk_bs_opts_init(&opts);
4188 	}
4189 
4190 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
4191 		dev->destroy(dev);
4192 		cb_fn(cb_arg, NULL, -EINVAL);
4193 		return;
4194 	}
4195 
4196 	err = _spdk_bs_alloc(dev, &opts, &bs);
4197 	if (err) {
4198 		dev->destroy(dev);
4199 		cb_fn(cb_arg, NULL, err);
4200 		return;
4201 	}
4202 
4203 	ctx = calloc(1, sizeof(*ctx));
4204 	if (!ctx) {
4205 		_spdk_bs_free(bs);
4206 		cb_fn(cb_arg, NULL, -ENOMEM);
4207 		return;
4208 	}
4209 
4210 	ctx->bs = bs;
4211 	ctx->iter_cb_fn = opts.iter_cb_fn;
4212 	ctx->iter_cb_arg = opts.iter_cb_arg;
4213 
4214 	/* Allocate memory for the super block */
4215 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4216 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4217 	if (!ctx->super) {
4218 		free(ctx);
4219 		_spdk_bs_free(bs);
4220 		cb_fn(cb_arg, NULL, -ENOMEM);
4221 		return;
4222 	}
4223 
4224 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4225 	cpl.u.bs_handle.cb_fn = cb_fn;
4226 	cpl.u.bs_handle.cb_arg = cb_arg;
4227 	cpl.u.bs_handle.bs = bs;
4228 
4229 	ctx->seq = bs_sequence_start(bs->md_channel, &cpl);
4230 	if (!ctx->seq) {
4231 		spdk_free(ctx->super);
4232 		free(ctx);
4233 		_spdk_bs_free(bs);
4234 		cb_fn(cb_arg, NULL, -ENOMEM);
4235 		return;
4236 	}
4237 
4238 	/* Read the super block */
4239 	bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4240 			     _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4241 			     _spdk_bs_load_super_cpl, ctx);
4242 }
4243 
4244 /* END spdk_bs_load */
4245 
4246 /* START spdk_bs_dump */
4247 
4248 struct spdk_bs_dump_ctx {
4249 	struct spdk_blob_store		*bs;
4250 	struct spdk_bs_super_block	*super;
4251 	uint32_t			cur_page;
4252 	struct spdk_blob_md_page	*page;
4253 	spdk_bs_sequence_t		*seq;
4254 	FILE				*fp;
4255 	spdk_bs_dump_print_xattr	print_xattr_fn;
4256 	char				xattr_name[4096];
4257 };
4258 
4259 static void
4260 _spdk_bs_dump_finish(spdk_bs_sequence_t *seq, struct spdk_bs_dump_ctx *ctx, int bserrno)
4261 {
4262 	spdk_free(ctx->super);
4263 
4264 	/*
4265 	 * We need to defer calling bs_call_cpl() until after
4266 	 * dev destruction, so tuck these away for later use.
4267 	 */
4268 	ctx->bs->unload_err = bserrno;
4269 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4270 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4271 
4272 	bs_sequence_finish(seq, 0);
4273 	_spdk_bs_free(ctx->bs);
4274 	free(ctx);
4275 }
4276 
4277 static void _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
4278 
4279 static void
4280 _spdk_bs_dump_print_md_page(struct spdk_bs_dump_ctx *ctx)
4281 {
4282 	uint32_t page_idx = ctx->cur_page;
4283 	struct spdk_blob_md_page *page = ctx->page;
4284 	struct spdk_blob_md_descriptor *desc;
4285 	size_t cur_desc = 0;
4286 	uint32_t crc;
4287 
4288 	fprintf(ctx->fp, "=========\n");
4289 	fprintf(ctx->fp, "Metadata Page Index: %" PRIu32 " (0x%" PRIx32 ")\n", page_idx, page_idx);
4290 	fprintf(ctx->fp, "Blob ID: 0x%" PRIx64 "\n", page->id);
4291 
4292 	crc = _spdk_blob_md_page_calc_crc(page);
4293 	fprintf(ctx->fp, "CRC: 0x%" PRIx32 " (%s)\n", page->crc, crc == page->crc ? "OK" : "Mismatch");
4294 
4295 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
4296 	while (cur_desc < sizeof(page->descriptors)) {
4297 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
4298 			if (desc->length == 0) {
4299 				/* If padding and length are 0, this terminates the page */
4300 				break;
4301 			}
4302 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_RLE) {
4303 			struct spdk_blob_md_descriptor_extent_rle	*desc_extent_rle;
4304 			unsigned int				i;
4305 
4306 			desc_extent_rle = (struct spdk_blob_md_descriptor_extent_rle *)desc;
4307 
4308 			for (i = 0; i < desc_extent_rle->length / sizeof(desc_extent_rle->extents[0]); i++) {
4309 				if (desc_extent_rle->extents[i].cluster_idx != 0) {
4310 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
4311 						desc_extent_rle->extents[i].cluster_idx);
4312 				} else {
4313 					fprintf(ctx->fp, "Unallocated Extent - ");
4314 				}
4315 				fprintf(ctx->fp, " Length: %" PRIu32, desc_extent_rle->extents[i].length);
4316 				fprintf(ctx->fp, "\n");
4317 			}
4318 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT_PAGE) {
4319 			struct spdk_blob_md_descriptor_extent_page	*desc_extent;
4320 			unsigned int					i;
4321 
4322 			desc_extent = (struct spdk_blob_md_descriptor_extent_page *)desc;
4323 
4324 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->cluster_idx[0]); i++) {
4325 				if (desc_extent->cluster_idx[i] != 0) {
4326 					fprintf(ctx->fp, "Allocated Extent - Start: %" PRIu32,
4327 						desc_extent->cluster_idx[i]);
4328 				} else {
4329 					fprintf(ctx->fp, "Unallocated Extent");
4330 				}
4331 				fprintf(ctx->fp, "\n");
4332 			}
4333 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
4334 			struct spdk_blob_md_descriptor_xattr *desc_xattr;
4335 			uint32_t i;
4336 
4337 			desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc;
4338 
4339 			if (desc_xattr->length !=
4340 			    sizeof(desc_xattr->name_length) + sizeof(desc_xattr->value_length) +
4341 			    desc_xattr->name_length + desc_xattr->value_length) {
4342 			}
4343 
4344 			memcpy(ctx->xattr_name, desc_xattr->name, desc_xattr->name_length);
4345 			ctx->xattr_name[desc_xattr->name_length] = '\0';
4346 			fprintf(ctx->fp, "XATTR: name = \"%s\"\n", ctx->xattr_name);
4347 			fprintf(ctx->fp, "       value = \"");
4348 			ctx->print_xattr_fn(ctx->fp, ctx->super->bstype.bstype, ctx->xattr_name,
4349 					    (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
4350 					    desc_xattr->value_length);
4351 			fprintf(ctx->fp, "\"\n");
4352 			for (i = 0; i < desc_xattr->value_length; i++) {
4353 				if (i % 16 == 0) {
4354 					fprintf(ctx->fp, "               ");
4355 				}
4356 				fprintf(ctx->fp, "%02" PRIx8 " ", *((uint8_t *)desc_xattr->name + desc_xattr->name_length + i));
4357 				if ((i + 1) % 16 == 0) {
4358 					fprintf(ctx->fp, "\n");
4359 				}
4360 			}
4361 			if (i % 16 != 0) {
4362 				fprintf(ctx->fp, "\n");
4363 			}
4364 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR_INTERNAL) {
4365 			/* TODO */
4366 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
4367 			/* TODO */
4368 		} else {
4369 			/* Error */
4370 		}
4371 		/* Advance to the next descriptor */
4372 		cur_desc += sizeof(*desc) + desc->length;
4373 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
4374 			break;
4375 		}
4376 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
4377 	}
4378 }
4379 
4380 static void
4381 _spdk_bs_dump_read_md_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4382 {
4383 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4384 
4385 	if (bserrno != 0) {
4386 		_spdk_bs_dump_finish(seq, ctx, bserrno);
4387 		return;
4388 	}
4389 
4390 	if (ctx->page->id != 0) {
4391 		_spdk_bs_dump_print_md_page(ctx);
4392 	}
4393 
4394 	ctx->cur_page++;
4395 
4396 	if (ctx->cur_page < ctx->super->md_len) {
4397 		_spdk_bs_dump_read_md_page(seq, ctx);
4398 	} else {
4399 		spdk_free(ctx->page);
4400 		_spdk_bs_dump_finish(seq, ctx, 0);
4401 	}
4402 }
4403 
4404 static void
4405 _spdk_bs_dump_read_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
4406 {
4407 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4408 	uint64_t lba;
4409 
4410 	assert(ctx->cur_page < ctx->super->md_len);
4411 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
4412 	bs_sequence_read_dev(seq, ctx->page, lba,
4413 			     _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
4414 			     _spdk_bs_dump_read_md_page_cpl, ctx);
4415 }
4416 
4417 static void
4418 _spdk_bs_dump_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4419 {
4420 	struct spdk_bs_dump_ctx *ctx = cb_arg;
4421 
4422 	fprintf(ctx->fp, "Signature: \"%.8s\" ", ctx->super->signature);
4423 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4424 		   sizeof(ctx->super->signature)) != 0) {
4425 		fprintf(ctx->fp, "(Mismatch)\n");
4426 		_spdk_bs_dump_finish(seq, ctx, bserrno);
4427 		return;
4428 	} else {
4429 		fprintf(ctx->fp, "(OK)\n");
4430 	}
4431 	fprintf(ctx->fp, "Version: %" PRIu32 "\n", ctx->super->version);
4432 	fprintf(ctx->fp, "CRC: 0x%x (%s)\n", ctx->super->crc,
4433 		(ctx->super->crc == _spdk_blob_md_page_calc_crc(ctx->super)) ? "OK" : "Mismatch");
4434 	fprintf(ctx->fp, "Blobstore Type: %.*s\n", SPDK_BLOBSTORE_TYPE_LENGTH, ctx->super->bstype.bstype);
4435 	fprintf(ctx->fp, "Cluster Size: %" PRIu32 "\n", ctx->super->cluster_size);
4436 	fprintf(ctx->fp, "Super Blob ID: ");
4437 	if (ctx->super->super_blob == SPDK_BLOBID_INVALID) {
4438 		fprintf(ctx->fp, "(None)\n");
4439 	} else {
4440 		fprintf(ctx->fp, "%" PRIu64 "\n", ctx->super->super_blob);
4441 	}
4442 	fprintf(ctx->fp, "Clean: %" PRIu32 "\n", ctx->super->clean);
4443 	fprintf(ctx->fp, "Used Metadata Page Mask Start: %" PRIu32 "\n", ctx->super->used_page_mask_start);
4444 	fprintf(ctx->fp, "Used Metadata Page Mask Length: %" PRIu32 "\n", ctx->super->used_page_mask_len);
4445 	fprintf(ctx->fp, "Used Cluster Mask Start: %" PRIu32 "\n", ctx->super->used_cluster_mask_start);
4446 	fprintf(ctx->fp, "Used Cluster Mask Length: %" PRIu32 "\n", ctx->super->used_cluster_mask_len);
4447 	fprintf(ctx->fp, "Used Blob ID Mask Start: %" PRIu32 "\n", ctx->super->used_blobid_mask_start);
4448 	fprintf(ctx->fp, "Used Blob ID Mask Length: %" PRIu32 "\n", ctx->super->used_blobid_mask_len);
4449 	fprintf(ctx->fp, "Metadata Start: %" PRIu32 "\n", ctx->super->md_start);
4450 	fprintf(ctx->fp, "Metadata Length: %" PRIu32 "\n", ctx->super->md_len);
4451 
4452 	ctx->cur_page = 0;
4453 	ctx->page = spdk_zmalloc(SPDK_BS_PAGE_SIZE, SPDK_BS_PAGE_SIZE,
4454 				 NULL, SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4455 	if (!ctx->page) {
4456 		_spdk_bs_dump_finish(seq, ctx, -ENOMEM);
4457 		return;
4458 	}
4459 	_spdk_bs_dump_read_md_page(seq, ctx);
4460 }
4461 
4462 void
4463 spdk_bs_dump(struct spdk_bs_dev *dev, FILE *fp, spdk_bs_dump_print_xattr print_xattr_fn,
4464 	     spdk_bs_op_complete cb_fn, void *cb_arg)
4465 {
4466 	struct spdk_blob_store	*bs;
4467 	struct spdk_bs_cpl	cpl;
4468 	spdk_bs_sequence_t	*seq;
4469 	struct spdk_bs_dump_ctx *ctx;
4470 	struct spdk_bs_opts	opts = {};
4471 	int err;
4472 
4473 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Dumping blobstore from dev %p\n", dev);
4474 
4475 	spdk_bs_opts_init(&opts);
4476 
4477 	err = _spdk_bs_alloc(dev, &opts, &bs);
4478 	if (err) {
4479 		dev->destroy(dev);
4480 		cb_fn(cb_arg, err);
4481 		return;
4482 	}
4483 
4484 	ctx = calloc(1, sizeof(*ctx));
4485 	if (!ctx) {
4486 		_spdk_bs_free(bs);
4487 		cb_fn(cb_arg, -ENOMEM);
4488 		return;
4489 	}
4490 
4491 	ctx->bs = bs;
4492 	ctx->fp = fp;
4493 	ctx->print_xattr_fn = print_xattr_fn;
4494 
4495 	/* Allocate memory for the super block */
4496 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4497 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4498 	if (!ctx->super) {
4499 		free(ctx);
4500 		_spdk_bs_free(bs);
4501 		cb_fn(cb_arg, -ENOMEM);
4502 		return;
4503 	}
4504 
4505 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4506 	cpl.u.bs_basic.cb_fn = cb_fn;
4507 	cpl.u.bs_basic.cb_arg = cb_arg;
4508 
4509 	seq = bs_sequence_start(bs->md_channel, &cpl);
4510 	if (!seq) {
4511 		spdk_free(ctx->super);
4512 		free(ctx);
4513 		_spdk_bs_free(bs);
4514 		cb_fn(cb_arg, -ENOMEM);
4515 		return;
4516 	}
4517 
4518 	/* Read the super block */
4519 	bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4520 			     _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4521 			     _spdk_bs_dump_super_cpl, ctx);
4522 }
4523 
4524 /* END spdk_bs_dump */
4525 
4526 /* START spdk_bs_init */
4527 
4528 struct spdk_bs_init_ctx {
4529 	struct spdk_blob_store		*bs;
4530 	struct spdk_bs_super_block	*super;
4531 };
4532 
4533 static void
4534 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4535 {
4536 	struct spdk_bs_init_ctx *ctx = cb_arg;
4537 
4538 	spdk_free(ctx->super);
4539 	free(ctx);
4540 
4541 	bs_sequence_finish(seq, bserrno);
4542 }
4543 
4544 static void
4545 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4546 {
4547 	struct spdk_bs_init_ctx *ctx = cb_arg;
4548 
4549 	/* Write super block */
4550 	bs_sequence_write_dev(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0),
4551 			      _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
4552 			      _spdk_bs_init_persist_super_cpl, ctx);
4553 }
4554 
4555 void
4556 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
4557 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
4558 {
4559 	struct spdk_bs_init_ctx *ctx;
4560 	struct spdk_blob_store	*bs;
4561 	struct spdk_bs_cpl	cpl;
4562 	spdk_bs_sequence_t	*seq;
4563 	spdk_bs_batch_t		*batch;
4564 	uint64_t		num_md_lba;
4565 	uint64_t		num_md_pages;
4566 	uint64_t		num_md_clusters;
4567 	uint32_t		i;
4568 	struct spdk_bs_opts	opts = {};
4569 	int			rc;
4570 
4571 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev);
4572 
4573 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
4574 		SPDK_ERRLOG("unsupported dev block length of %d\n",
4575 			    dev->blocklen);
4576 		dev->destroy(dev);
4577 		cb_fn(cb_arg, NULL, -EINVAL);
4578 		return;
4579 	}
4580 
4581 	if (o) {
4582 		opts = *o;
4583 	} else {
4584 		spdk_bs_opts_init(&opts);
4585 	}
4586 
4587 	if (_spdk_bs_opts_verify(&opts) != 0) {
4588 		dev->destroy(dev);
4589 		cb_fn(cb_arg, NULL, -EINVAL);
4590 		return;
4591 	}
4592 
4593 	rc = _spdk_bs_alloc(dev, &opts, &bs);
4594 	if (rc) {
4595 		dev->destroy(dev);
4596 		cb_fn(cb_arg, NULL, rc);
4597 		return;
4598 	}
4599 
4600 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
4601 		/* By default, allocate 1 page per cluster.
4602 		 * Technically, this over-allocates metadata
4603 		 * because more metadata will reduce the number
4604 		 * of usable clusters. This can be addressed with
4605 		 * more complex math in the future.
4606 		 */
4607 		bs->md_len = bs->total_clusters;
4608 	} else {
4609 		bs->md_len = opts.num_md_pages;
4610 	}
4611 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
4612 	if (rc < 0) {
4613 		_spdk_bs_free(bs);
4614 		cb_fn(cb_arg, NULL, -ENOMEM);
4615 		return;
4616 	}
4617 
4618 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
4619 	if (rc < 0) {
4620 		_spdk_bs_free(bs);
4621 		cb_fn(cb_arg, NULL, -ENOMEM);
4622 		return;
4623 	}
4624 
4625 	ctx = calloc(1, sizeof(*ctx));
4626 	if (!ctx) {
4627 		_spdk_bs_free(bs);
4628 		cb_fn(cb_arg, NULL, -ENOMEM);
4629 		return;
4630 	}
4631 
4632 	ctx->bs = bs;
4633 
4634 	/* Allocate memory for the super block */
4635 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4636 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4637 	if (!ctx->super) {
4638 		free(ctx);
4639 		_spdk_bs_free(bs);
4640 		cb_fn(cb_arg, NULL, -ENOMEM);
4641 		return;
4642 	}
4643 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
4644 	       sizeof(ctx->super->signature));
4645 	ctx->super->version = SPDK_BS_VERSION;
4646 	ctx->super->length = sizeof(*ctx->super);
4647 	ctx->super->super_blob = bs->super_blob;
4648 	ctx->super->clean = 0;
4649 	ctx->super->cluster_size = bs->cluster_sz;
4650 	ctx->super->io_unit_size = bs->io_unit_size;
4651 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
4652 
4653 	/* Calculate how many pages the metadata consumes at the front
4654 	 * of the disk.
4655 	 */
4656 
4657 	/* The super block uses 1 page */
4658 	num_md_pages = 1;
4659 
4660 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
4661 	 * up to the nearest page, plus a header.
4662 	 */
4663 	ctx->super->used_page_mask_start = num_md_pages;
4664 	ctx->super->used_page_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4665 					 spdk_divide_round_up(bs->md_len, 8),
4666 					 SPDK_BS_PAGE_SIZE);
4667 	num_md_pages += ctx->super->used_page_mask_len;
4668 
4669 	/* The used_clusters mask requires 1 bit per cluster, rounded
4670 	 * up to the nearest page, plus a header.
4671 	 */
4672 	ctx->super->used_cluster_mask_start = num_md_pages;
4673 	ctx->super->used_cluster_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4674 					    spdk_divide_round_up(bs->total_clusters, 8),
4675 					    SPDK_BS_PAGE_SIZE);
4676 	num_md_pages += ctx->super->used_cluster_mask_len;
4677 
4678 	/* The used_blobids mask requires 1 bit per metadata page, rounded
4679 	 * up to the nearest page, plus a header.
4680 	 */
4681 	ctx->super->used_blobid_mask_start = num_md_pages;
4682 	ctx->super->used_blobid_mask_len = spdk_divide_round_up(sizeof(struct spdk_bs_md_mask) +
4683 					   spdk_divide_round_up(bs->md_len, 8),
4684 					   SPDK_BS_PAGE_SIZE);
4685 	num_md_pages += ctx->super->used_blobid_mask_len;
4686 
4687 	/* The metadata region size was chosen above */
4688 	ctx->super->md_start = bs->md_start = num_md_pages;
4689 	ctx->super->md_len = bs->md_len;
4690 	num_md_pages += bs->md_len;
4691 
4692 	num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages);
4693 
4694 	ctx->super->size = dev->blockcnt * dev->blocklen;
4695 
4696 	ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super);
4697 
4698 	num_md_clusters = spdk_divide_round_up(num_md_pages, bs->pages_per_cluster);
4699 	if (num_md_clusters > bs->total_clusters) {
4700 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
4701 			    "please decrease number of pages reserved for metadata "
4702 			    "or increase cluster size.\n");
4703 		spdk_free(ctx->super);
4704 		free(ctx);
4705 		_spdk_bs_free(bs);
4706 		cb_fn(cb_arg, NULL, -ENOMEM);
4707 		return;
4708 	}
4709 	/* Claim all of the clusters used by the metadata */
4710 	for (i = 0; i < num_md_clusters; i++) {
4711 		_spdk_bs_claim_cluster(bs, i);
4712 	}
4713 
4714 	bs->total_data_clusters = bs->num_free_clusters;
4715 
4716 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
4717 	cpl.u.bs_handle.cb_fn = cb_fn;
4718 	cpl.u.bs_handle.cb_arg = cb_arg;
4719 	cpl.u.bs_handle.bs = bs;
4720 
4721 	seq = bs_sequence_start(bs->md_channel, &cpl);
4722 	if (!seq) {
4723 		spdk_free(ctx->super);
4724 		free(ctx);
4725 		_spdk_bs_free(bs);
4726 		cb_fn(cb_arg, NULL, -ENOMEM);
4727 		return;
4728 	}
4729 
4730 	batch = bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx);
4731 
4732 	/* Clear metadata space */
4733 	bs_batch_write_zeroes_dev(batch, 0, num_md_lba);
4734 
4735 	switch (opts.clear_method) {
4736 	case BS_CLEAR_WITH_UNMAP:
4737 		/* Trim data clusters */
4738 		bs_batch_unmap_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
4739 		break;
4740 	case BS_CLEAR_WITH_WRITE_ZEROES:
4741 		/* Write_zeroes to data clusters */
4742 		bs_batch_write_zeroes_dev(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
4743 		break;
4744 	case BS_CLEAR_WITH_NONE:
4745 	default:
4746 		break;
4747 	}
4748 
4749 	bs_batch_close(batch);
4750 }
4751 
4752 /* END spdk_bs_init */
4753 
4754 /* START spdk_bs_destroy */
4755 
4756 static void
4757 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4758 {
4759 	struct spdk_bs_init_ctx *ctx = cb_arg;
4760 	struct spdk_blob_store *bs = ctx->bs;
4761 
4762 	/*
4763 	 * We need to defer calling bs_call_cpl() until after
4764 	 * dev destruction, so tuck these away for later use.
4765 	 */
4766 	bs->unload_err = bserrno;
4767 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4768 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4769 
4770 	bs_sequence_finish(seq, bserrno);
4771 
4772 	_spdk_bs_free(bs);
4773 	free(ctx);
4774 }
4775 
4776 void
4777 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
4778 		void *cb_arg)
4779 {
4780 	struct spdk_bs_cpl	cpl;
4781 	spdk_bs_sequence_t	*seq;
4782 	struct spdk_bs_init_ctx *ctx;
4783 
4784 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n");
4785 
4786 	if (!TAILQ_EMPTY(&bs->blobs)) {
4787 		SPDK_ERRLOG("Blobstore still has open blobs\n");
4788 		cb_fn(cb_arg, -EBUSY);
4789 		return;
4790 	}
4791 
4792 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4793 	cpl.u.bs_basic.cb_fn = cb_fn;
4794 	cpl.u.bs_basic.cb_arg = cb_arg;
4795 
4796 	ctx = calloc(1, sizeof(*ctx));
4797 	if (!ctx) {
4798 		cb_fn(cb_arg, -ENOMEM);
4799 		return;
4800 	}
4801 
4802 	ctx->bs = bs;
4803 
4804 	seq = bs_sequence_start(bs->md_channel, &cpl);
4805 	if (!seq) {
4806 		free(ctx);
4807 		cb_fn(cb_arg, -ENOMEM);
4808 		return;
4809 	}
4810 
4811 	/* Write zeroes to the super block */
4812 	bs_sequence_write_zeroes_dev(seq,
4813 				     _spdk_bs_page_to_lba(bs, 0),
4814 				     _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
4815 				     _spdk_bs_destroy_trim_cpl, ctx);
4816 }
4817 
4818 /* END spdk_bs_destroy */
4819 
4820 /* START spdk_bs_unload */
4821 
4822 static void
4823 _spdk_bs_unload_finish(struct spdk_bs_load_ctx *ctx, int bserrno)
4824 {
4825 	spdk_bs_sequence_t *seq = ctx->seq;
4826 
4827 	spdk_free(ctx->super);
4828 
4829 	/*
4830 	 * We need to defer calling bs_call_cpl() until after
4831 	 * dev destruction, so tuck these away for later use.
4832 	 */
4833 	ctx->bs->unload_err = bserrno;
4834 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
4835 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
4836 
4837 	bs_sequence_finish(seq, bserrno);
4838 
4839 	_spdk_bs_free(ctx->bs);
4840 	free(ctx);
4841 }
4842 
4843 static void
4844 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4845 {
4846 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4847 
4848 	_spdk_bs_unload_finish(ctx, bserrno);
4849 }
4850 
4851 static void
4852 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4853 {
4854 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4855 
4856 	spdk_free(ctx->mask);
4857 
4858 	if (bserrno != 0) {
4859 		_spdk_bs_unload_finish(ctx, bserrno);
4860 		return;
4861 	}
4862 
4863 	ctx->super->clean = 1;
4864 
4865 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
4866 }
4867 
4868 static void
4869 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4870 {
4871 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4872 
4873 	spdk_free(ctx->mask);
4874 	ctx->mask = NULL;
4875 
4876 	if (bserrno != 0) {
4877 		_spdk_bs_unload_finish(ctx, bserrno);
4878 		return;
4879 	}
4880 
4881 	_spdk_bs_write_used_clusters(seq, ctx, _spdk_bs_unload_write_used_clusters_cpl);
4882 }
4883 
4884 static void
4885 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4886 {
4887 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4888 
4889 	spdk_free(ctx->mask);
4890 	ctx->mask = NULL;
4891 
4892 	if (bserrno != 0) {
4893 		_spdk_bs_unload_finish(ctx, bserrno);
4894 		return;
4895 	}
4896 
4897 	_spdk_bs_write_used_blobids(seq, ctx, _spdk_bs_unload_write_used_blobids_cpl);
4898 }
4899 
4900 static void
4901 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4902 {
4903 	struct spdk_bs_load_ctx	*ctx = cb_arg;
4904 
4905 	if (bserrno != 0) {
4906 		_spdk_bs_unload_finish(ctx, bserrno);
4907 		return;
4908 	}
4909 
4910 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl);
4911 }
4912 
4913 void
4914 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
4915 {
4916 	struct spdk_bs_cpl	cpl;
4917 	struct spdk_bs_load_ctx *ctx;
4918 
4919 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n");
4920 
4921 	if (!TAILQ_EMPTY(&bs->blobs)) {
4922 		SPDK_ERRLOG("Blobstore still has open blobs\n");
4923 		cb_fn(cb_arg, -EBUSY);
4924 		return;
4925 	}
4926 
4927 	ctx = calloc(1, sizeof(*ctx));
4928 	if (!ctx) {
4929 		cb_fn(cb_arg, -ENOMEM);
4930 		return;
4931 	}
4932 
4933 	ctx->bs = bs;
4934 
4935 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
4936 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
4937 	if (!ctx->super) {
4938 		free(ctx);
4939 		cb_fn(cb_arg, -ENOMEM);
4940 		return;
4941 	}
4942 
4943 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
4944 	cpl.u.bs_basic.cb_fn = cb_fn;
4945 	cpl.u.bs_basic.cb_arg = cb_arg;
4946 
4947 	ctx->seq = bs_sequence_start(bs->md_channel, &cpl);
4948 	if (!ctx->seq) {
4949 		spdk_free(ctx->super);
4950 		free(ctx);
4951 		cb_fn(cb_arg, -ENOMEM);
4952 		return;
4953 	}
4954 
4955 	/* Read super block */
4956 	bs_sequence_read_dev(ctx->seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
4957 			     _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
4958 			     _spdk_bs_unload_read_super_cpl, ctx);
4959 }
4960 
4961 /* END spdk_bs_unload */
4962 
4963 /* START spdk_bs_set_super */
4964 
4965 struct spdk_bs_set_super_ctx {
4966 	struct spdk_blob_store		*bs;
4967 	struct spdk_bs_super_block	*super;
4968 };
4969 
4970 static void
4971 _spdk_bs_set_super_write_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4972 {
4973 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
4974 
4975 	if (bserrno != 0) {
4976 		SPDK_ERRLOG("Unable to write to super block of blobstore\n");
4977 	}
4978 
4979 	spdk_free(ctx->super);
4980 
4981 	bs_sequence_finish(seq, bserrno);
4982 
4983 	free(ctx);
4984 }
4985 
4986 static void
4987 _spdk_bs_set_super_read_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
4988 {
4989 	struct spdk_bs_set_super_ctx	*ctx = cb_arg;
4990 
4991 	if (bserrno != 0) {
4992 		SPDK_ERRLOG("Unable to read super block of blobstore\n");
4993 		spdk_free(ctx->super);
4994 		bs_sequence_finish(seq, bserrno);
4995 		free(ctx);
4996 		return;
4997 	}
4998 
4999 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_set_super_write_cpl, ctx);
5000 }
5001 
5002 void
5003 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
5004 		  spdk_bs_op_complete cb_fn, void *cb_arg)
5005 {
5006 	struct spdk_bs_cpl		cpl;
5007 	spdk_bs_sequence_t		*seq;
5008 	struct spdk_bs_set_super_ctx	*ctx;
5009 
5010 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Setting super blob id on blobstore\n");
5011 
5012 	ctx = calloc(1, sizeof(*ctx));
5013 	if (!ctx) {
5014 		cb_fn(cb_arg, -ENOMEM);
5015 		return;
5016 	}
5017 
5018 	ctx->bs = bs;
5019 
5020 	ctx->super = spdk_zmalloc(sizeof(*ctx->super), 0x1000, NULL,
5021 				  SPDK_ENV_SOCKET_ID_ANY, SPDK_MALLOC_DMA);
5022 	if (!ctx->super) {
5023 		free(ctx);
5024 		cb_fn(cb_arg, -ENOMEM);
5025 		return;
5026 	}
5027 
5028 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
5029 	cpl.u.bs_basic.cb_fn = cb_fn;
5030 	cpl.u.bs_basic.cb_arg = cb_arg;
5031 
5032 	seq = bs_sequence_start(bs->md_channel, &cpl);
5033 	if (!seq) {
5034 		spdk_free(ctx->super);
5035 		free(ctx);
5036 		cb_fn(cb_arg, -ENOMEM);
5037 		return;
5038 	}
5039 
5040 	bs->super_blob = blobid;
5041 
5042 	/* Read super block */
5043 	bs_sequence_read_dev(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
5044 			     _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
5045 			     _spdk_bs_set_super_read_cpl, ctx);
5046 }
5047 
5048 /* END spdk_bs_set_super */
5049 
5050 void
5051 spdk_bs_get_super(struct spdk_blob_store *bs,
5052 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5053 {
5054 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
5055 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
5056 	} else {
5057 		cb_fn(cb_arg, bs->super_blob, 0);
5058 	}
5059 }
5060 
5061 uint64_t
5062 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
5063 {
5064 	return bs->cluster_sz;
5065 }
5066 
5067 uint64_t
5068 spdk_bs_get_page_size(struct spdk_blob_store *bs)
5069 {
5070 	return SPDK_BS_PAGE_SIZE;
5071 }
5072 
5073 uint64_t
5074 spdk_bs_get_io_unit_size(struct spdk_blob_store *bs)
5075 {
5076 	return bs->io_unit_size;
5077 }
5078 
5079 uint64_t
5080 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
5081 {
5082 	return bs->num_free_clusters;
5083 }
5084 
5085 uint64_t
5086 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
5087 {
5088 	return bs->total_data_clusters;
5089 }
5090 
5091 static int
5092 spdk_bs_register_md_thread(struct spdk_blob_store *bs)
5093 {
5094 	bs->md_channel = spdk_get_io_channel(bs);
5095 	if (!bs->md_channel) {
5096 		SPDK_ERRLOG("Failed to get IO channel.\n");
5097 		return -1;
5098 	}
5099 
5100 	return 0;
5101 }
5102 
5103 static int
5104 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
5105 {
5106 	spdk_put_io_channel(bs->md_channel);
5107 
5108 	return 0;
5109 }
5110 
5111 spdk_blob_id spdk_blob_get_id(struct spdk_blob *blob)
5112 {
5113 	assert(blob != NULL);
5114 
5115 	return blob->id;
5116 }
5117 
5118 uint64_t spdk_blob_get_num_pages(struct spdk_blob *blob)
5119 {
5120 	assert(blob != NULL);
5121 
5122 	return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters);
5123 }
5124 
5125 uint64_t spdk_blob_get_num_io_units(struct spdk_blob *blob)
5126 {
5127 	assert(blob != NULL);
5128 
5129 	return spdk_blob_get_num_pages(blob) * _spdk_bs_io_unit_per_page(blob->bs);
5130 }
5131 
5132 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *blob)
5133 {
5134 	assert(blob != NULL);
5135 
5136 	return blob->active.num_clusters;
5137 }
5138 
5139 /* START spdk_bs_create_blob */
5140 
5141 static void
5142 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
5143 {
5144 	struct spdk_blob *blob = cb_arg;
5145 	uint32_t page_idx = _spdk_bs_blobid_to_page(blob->id);
5146 
5147 	if (bserrno != 0) {
5148 		spdk_bit_array_clear(blob->bs->used_blobids, page_idx);
5149 		_spdk_bs_release_md_page(blob->bs, page_idx);
5150 	}
5151 
5152 	_spdk_blob_free(blob);
5153 
5154 	bs_sequence_finish(seq, bserrno);
5155 }
5156 
5157 static int
5158 _spdk_blob_set_xattrs(struct spdk_blob *blob, const struct spdk_blob_xattr_opts *xattrs,
5159 		      bool internal)
5160 {
5161 	uint64_t i;
5162 	size_t value_len = 0;
5163 	int rc;
5164 	const void *value = NULL;
5165 	if (xattrs->count > 0 && xattrs->get_value == NULL) {
5166 		return -EINVAL;
5167 	}
5168 	for (i = 0; i < xattrs->count; i++) {
5169 		xattrs->get_value(xattrs->ctx, xattrs->names[i], &value, &value_len);
5170 		if (value == NULL || value_len == 0) {
5171 			return -EINVAL;
5172 		}
5173 		rc = _spdk_blob_set_xattr(blob, xattrs->names[i], value, value_len, internal);
5174 		if (rc < 0) {
5175 			return rc;
5176 		}
5177 	}
5178 	return 0;
5179 }
5180 
5181 static void
5182 _spdk_bs_create_blob(struct spdk_blob_store *bs,
5183 		     const struct spdk_blob_opts *opts,
5184 		     const struct spdk_blob_xattr_opts *internal_xattrs,
5185 		     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5186 {
5187 	struct spdk_blob	*blob;
5188 	uint32_t		page_idx;
5189 	struct spdk_bs_cpl	cpl;
5190 	struct spdk_blob_opts	opts_default;
5191 	struct spdk_blob_xattr_opts internal_xattrs_default;
5192 	spdk_bs_sequence_t	*seq;
5193 	spdk_blob_id		id;
5194 	int rc;
5195 
5196 	assert(spdk_get_thread() == bs->md_thread);
5197 
5198 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
5199 	if (page_idx == UINT32_MAX) {
5200 		cb_fn(cb_arg, 0, -ENOMEM);
5201 		return;
5202 	}
5203 	spdk_bit_array_set(bs->used_blobids, page_idx);
5204 	_spdk_bs_claim_md_page(bs, page_idx);
5205 
5206 	id = _spdk_bs_page_to_blobid(page_idx);
5207 
5208 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx);
5209 
5210 	blob = _spdk_blob_alloc(bs, id);
5211 	if (!blob) {
5212 		spdk_bit_array_clear(bs->used_blobids, page_idx);
5213 		_spdk_bs_release_md_page(bs, page_idx);
5214 		cb_fn(cb_arg, 0, -ENOMEM);
5215 		return;
5216 	}
5217 
5218 	if (!opts) {
5219 		spdk_blob_opts_init(&opts_default);
5220 		opts = &opts_default;
5221 	}
5222 
5223 	blob->use_extent_table = opts->use_extent_table;
5224 	if (blob->use_extent_table) {
5225 		blob->invalid_flags |= SPDK_BLOB_EXTENT_TABLE;
5226 	}
5227 
5228 	if (!internal_xattrs) {
5229 		_spdk_blob_xattrs_init(&internal_xattrs_default);
5230 		internal_xattrs = &internal_xattrs_default;
5231 	}
5232 
5233 	rc = _spdk_blob_set_xattrs(blob, &opts->xattrs, false);
5234 	if (rc < 0) {
5235 		_spdk_blob_free(blob);
5236 		spdk_bit_array_clear(bs->used_blobids, page_idx);
5237 		_spdk_bs_release_md_page(bs, page_idx);
5238 		cb_fn(cb_arg, 0, rc);
5239 		return;
5240 	}
5241 
5242 	rc = _spdk_blob_set_xattrs(blob, internal_xattrs, true);
5243 	if (rc < 0) {
5244 		_spdk_blob_free(blob);
5245 		spdk_bit_array_clear(bs->used_blobids, page_idx);
5246 		_spdk_bs_release_md_page(bs, page_idx);
5247 		cb_fn(cb_arg, 0, rc);
5248 		return;
5249 	}
5250 
5251 	if (opts->thin_provision) {
5252 		_spdk_blob_set_thin_provision(blob);
5253 	}
5254 
5255 	_spdk_blob_set_clear_method(blob, opts->clear_method);
5256 
5257 	rc = _spdk_blob_resize(blob, opts->num_clusters);
5258 	if (rc < 0) {
5259 		_spdk_blob_free(blob);
5260 		spdk_bit_array_clear(bs->used_blobids, page_idx);
5261 		_spdk_bs_release_md_page(bs, page_idx);
5262 		cb_fn(cb_arg, 0, rc);
5263 		return;
5264 	}
5265 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5266 	cpl.u.blobid.cb_fn = cb_fn;
5267 	cpl.u.blobid.cb_arg = cb_arg;
5268 	cpl.u.blobid.blobid = blob->id;
5269 
5270 	seq = bs_sequence_start(bs->md_channel, &cpl);
5271 	if (!seq) {
5272 		_spdk_blob_free(blob);
5273 		spdk_bit_array_clear(bs->used_blobids, page_idx);
5274 		_spdk_bs_release_md_page(bs, page_idx);
5275 		cb_fn(cb_arg, 0, -ENOMEM);
5276 		return;
5277 	}
5278 
5279 	_spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob);
5280 }
5281 
5282 void spdk_bs_create_blob(struct spdk_blob_store *bs,
5283 			 spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5284 {
5285 	_spdk_bs_create_blob(bs, NULL, NULL, cb_fn, cb_arg);
5286 }
5287 
5288 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
5289 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5290 {
5291 	_spdk_bs_create_blob(bs, opts, NULL, cb_fn, cb_arg);
5292 }
5293 
5294 /* END spdk_bs_create_blob */
5295 
5296 /* START blob_cleanup */
5297 
5298 struct spdk_clone_snapshot_ctx {
5299 	struct spdk_bs_cpl      cpl;
5300 	int bserrno;
5301 	bool frozen;
5302 
5303 	struct spdk_io_channel *channel;
5304 
5305 	/* Current cluster for inflate operation */
5306 	uint64_t cluster;
5307 
5308 	/* For inflation force allocation of all unallocated clusters and remove
5309 	 * thin-provisioning. Otherwise only decouple parent and keep clone thin. */
5310 	bool allocate_all;
5311 
5312 	struct {
5313 		spdk_blob_id id;
5314 		struct spdk_blob *blob;
5315 	} original;
5316 	struct {
5317 		spdk_blob_id id;
5318 		struct spdk_blob *blob;
5319 	} new;
5320 
5321 	/* xattrs specified for snapshot/clones only. They have no impact on
5322 	 * the original blobs xattrs. */
5323 	const struct spdk_blob_xattr_opts *xattrs;
5324 };
5325 
5326 static void
5327 _spdk_bs_clone_snapshot_cleanup_finish(void *cb_arg, int bserrno)
5328 {
5329 	struct spdk_clone_snapshot_ctx *ctx = cb_arg;
5330 	struct spdk_bs_cpl *cpl = &ctx->cpl;
5331 
5332 	if (bserrno != 0) {
5333 		if (ctx->bserrno != 0) {
5334 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5335 		} else {
5336 			ctx->bserrno = bserrno;
5337 		}
5338 	}
5339 
5340 	switch (cpl->type) {
5341 	case SPDK_BS_CPL_TYPE_BLOBID:
5342 		cpl->u.blobid.cb_fn(cpl->u.blobid.cb_arg, cpl->u.blobid.blobid, ctx->bserrno);
5343 		break;
5344 	case SPDK_BS_CPL_TYPE_BLOB_BASIC:
5345 		cpl->u.blob_basic.cb_fn(cpl->u.blob_basic.cb_arg, ctx->bserrno);
5346 		break;
5347 	default:
5348 		SPDK_UNREACHABLE();
5349 		break;
5350 	}
5351 
5352 	free(ctx);
5353 }
5354 
5355 static void
5356 _spdk_bs_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
5357 {
5358 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5359 	struct spdk_blob *origblob = ctx->original.blob;
5360 
5361 	if (bserrno != 0) {
5362 		if (ctx->bserrno != 0) {
5363 			SPDK_ERRLOG("Unfreeze error %d\n", bserrno);
5364 		} else {
5365 			ctx->bserrno = bserrno;
5366 		}
5367 	}
5368 
5369 	ctx->original.id = origblob->id;
5370 	origblob->locked_operation_in_progress = false;
5371 
5372 	spdk_blob_close(origblob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5373 }
5374 
5375 static void
5376 _spdk_bs_clone_snapshot_origblob_cleanup(void *cb_arg, int bserrno)
5377 {
5378 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5379 	struct spdk_blob *origblob = ctx->original.blob;
5380 
5381 	if (bserrno != 0) {
5382 		if (ctx->bserrno != 0) {
5383 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5384 		} else {
5385 			ctx->bserrno = bserrno;
5386 		}
5387 	}
5388 
5389 	if (ctx->frozen) {
5390 		/* Unfreeze any outstanding I/O */
5391 		_spdk_blob_unfreeze_io(origblob, _spdk_bs_snapshot_unfreeze_cpl, ctx);
5392 	} else {
5393 		_spdk_bs_snapshot_unfreeze_cpl(ctx, 0);
5394 	}
5395 
5396 }
5397 
5398 static void
5399 _spdk_bs_clone_snapshot_newblob_cleanup(void *cb_arg, int bserrno)
5400 {
5401 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5402 	struct spdk_blob *newblob = ctx->new.blob;
5403 
5404 	if (bserrno != 0) {
5405 		if (ctx->bserrno != 0) {
5406 			SPDK_ERRLOG("Cleanup error %d\n", bserrno);
5407 		} else {
5408 			ctx->bserrno = bserrno;
5409 		}
5410 	}
5411 
5412 	ctx->new.id = newblob->id;
5413 	spdk_blob_close(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5414 }
5415 
5416 /* END blob_cleanup */
5417 
5418 /* START spdk_bs_create_snapshot */
5419 
5420 static void
5421 _spdk_bs_snapshot_swap_cluster_maps(struct spdk_blob *blob1, struct spdk_blob *blob2)
5422 {
5423 	uint64_t *cluster_temp;
5424 	uint32_t *extent_page_temp;
5425 
5426 	cluster_temp = blob1->active.clusters;
5427 	blob1->active.clusters = blob2->active.clusters;
5428 	blob2->active.clusters = cluster_temp;
5429 
5430 	extent_page_temp = blob1->active.extent_pages;
5431 	blob1->active.extent_pages = blob2->active.extent_pages;
5432 	blob2->active.extent_pages = extent_page_temp;
5433 }
5434 
5435 static void
5436 _spdk_bs_snapshot_origblob_sync_cpl(void *cb_arg, int bserrno)
5437 {
5438 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5439 	struct spdk_blob *origblob = ctx->original.blob;
5440 	struct spdk_blob *newblob = ctx->new.blob;
5441 
5442 	if (bserrno != 0) {
5443 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5444 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5445 		return;
5446 	}
5447 
5448 	/* Remove metadata descriptor SNAPSHOT_IN_PROGRESS */
5449 	bserrno = _spdk_blob_remove_xattr(newblob, SNAPSHOT_IN_PROGRESS, true);
5450 	if (bserrno != 0) {
5451 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5452 		return;
5453 	}
5454 
5455 	_spdk_bs_blob_list_add(ctx->original.blob);
5456 
5457 	spdk_blob_set_read_only(newblob);
5458 
5459 	/* sync snapshot metadata */
5460 	spdk_blob_sync_md(newblob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5461 }
5462 
5463 static void
5464 _spdk_bs_snapshot_newblob_sync_cpl(void *cb_arg, int bserrno)
5465 {
5466 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5467 	struct spdk_blob *origblob = ctx->original.blob;
5468 	struct spdk_blob *newblob = ctx->new.blob;
5469 
5470 	if (bserrno != 0) {
5471 		/* return cluster map back to original */
5472 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5473 
5474 		/* Newblob md sync failed. Valid clusters are only present in origblob.
5475 		 * Since I/O is frozen on origblob, not changes to zeroed out cluster map should have occured.
5476 		 * Newblob needs to be reverted to thin_provisioned state at creation to properly close. */
5477 		_spdk_blob_set_thin_provision(newblob);
5478 		assert(spdk_mem_all_zero(newblob->active.clusters,
5479 					 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
5480 		assert(spdk_mem_all_zero(newblob->active.extent_pages,
5481 					 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
5482 
5483 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5484 		return;
5485 	}
5486 
5487 	/* Set internal xattr for snapshot id */
5488 	bserrno = _spdk_blob_set_xattr(origblob, BLOB_SNAPSHOT, &newblob->id, sizeof(spdk_blob_id), true);
5489 	if (bserrno != 0) {
5490 		/* return cluster map back to original */
5491 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5492 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5493 		return;
5494 	}
5495 
5496 	_spdk_bs_blob_list_remove(origblob);
5497 	origblob->parent_id = newblob->id;
5498 
5499 	/* Create new back_bs_dev for snapshot */
5500 	origblob->back_bs_dev = bs_create_blob_bs_dev(newblob);
5501 	if (origblob->back_bs_dev == NULL) {
5502 		/* return cluster map back to original */
5503 		_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5504 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, -EINVAL);
5505 		return;
5506 	}
5507 
5508 	/* set clone blob as thin provisioned */
5509 	_spdk_blob_set_thin_provision(origblob);
5510 
5511 	_spdk_bs_blob_list_add(newblob);
5512 
5513 	/* sync clone metadata */
5514 	spdk_blob_sync_md(origblob, _spdk_bs_snapshot_origblob_sync_cpl, ctx);
5515 }
5516 
5517 static void
5518 _spdk_bs_snapshot_freeze_cpl(void *cb_arg, int rc)
5519 {
5520 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5521 	struct spdk_blob *origblob = ctx->original.blob;
5522 	struct spdk_blob *newblob = ctx->new.blob;
5523 	int bserrno;
5524 
5525 	if (rc != 0) {
5526 		_spdk_bs_clone_snapshot_newblob_cleanup(ctx, rc);
5527 		return;
5528 	}
5529 
5530 	ctx->frozen = true;
5531 
5532 	/* set new back_bs_dev for snapshot */
5533 	newblob->back_bs_dev = origblob->back_bs_dev;
5534 	/* Set invalid flags from origblob */
5535 	newblob->invalid_flags = origblob->invalid_flags;
5536 
5537 	/* inherit parent from original blob if set */
5538 	newblob->parent_id = origblob->parent_id;
5539 	if (origblob->parent_id != SPDK_BLOBID_INVALID) {
5540 		/* Set internal xattr for snapshot id */
5541 		bserrno = _spdk_blob_set_xattr(newblob, BLOB_SNAPSHOT,
5542 					       &origblob->parent_id, sizeof(spdk_blob_id), true);
5543 		if (bserrno != 0) {
5544 			_spdk_bs_clone_snapshot_newblob_cleanup(ctx, bserrno);
5545 			return;
5546 		}
5547 	}
5548 
5549 	/* swap cluster maps */
5550 	_spdk_bs_snapshot_swap_cluster_maps(newblob, origblob);
5551 
5552 	/* Set the clear method on the new blob to match the original. */
5553 	_spdk_blob_set_clear_method(newblob, origblob->clear_method);
5554 
5555 	/* sync snapshot metadata */
5556 	spdk_blob_sync_md(newblob, _spdk_bs_snapshot_newblob_sync_cpl, ctx);
5557 }
5558 
5559 static void
5560 _spdk_bs_snapshot_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5561 {
5562 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5563 	struct spdk_blob *origblob = ctx->original.blob;
5564 	struct spdk_blob *newblob = _blob;
5565 
5566 	if (bserrno != 0) {
5567 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5568 		return;
5569 	}
5570 
5571 	ctx->new.blob = newblob;
5572 	assert(spdk_blob_is_thin_provisioned(newblob));
5573 	assert(spdk_mem_all_zero(newblob->active.clusters,
5574 				 newblob->active.num_clusters * sizeof(*newblob->active.clusters)));
5575 	assert(spdk_mem_all_zero(newblob->active.extent_pages,
5576 				 newblob->active.num_extent_pages * sizeof(*newblob->active.extent_pages)));
5577 
5578 	_spdk_blob_freeze_io(origblob, _spdk_bs_snapshot_freeze_cpl, ctx);
5579 }
5580 
5581 static void
5582 _spdk_bs_snapshot_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
5583 {
5584 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5585 	struct spdk_blob *origblob = ctx->original.blob;
5586 
5587 	if (bserrno != 0) {
5588 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5589 		return;
5590 	}
5591 
5592 	ctx->new.id = blobid;
5593 	ctx->cpl.u.blobid.blobid = blobid;
5594 
5595 	spdk_bs_open_blob(origblob->bs, ctx->new.id, _spdk_bs_snapshot_newblob_open_cpl, ctx);
5596 }
5597 
5598 
5599 static void
5600 _spdk_bs_xattr_snapshot(void *arg, const char *name,
5601 			const void **value, size_t *value_len)
5602 {
5603 	assert(strncmp(name, SNAPSHOT_IN_PROGRESS, sizeof(SNAPSHOT_IN_PROGRESS)) == 0);
5604 
5605 	struct spdk_blob *blob = (struct spdk_blob *)arg;
5606 	*value = &blob->id;
5607 	*value_len = sizeof(blob->id);
5608 }
5609 
5610 static void
5611 _spdk_bs_snapshot_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5612 {
5613 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5614 	struct spdk_blob_opts opts;
5615 	struct spdk_blob_xattr_opts internal_xattrs;
5616 	char *xattrs_names[] = { SNAPSHOT_IN_PROGRESS };
5617 
5618 	if (bserrno != 0) {
5619 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5620 		return;
5621 	}
5622 
5623 	ctx->original.blob = _blob;
5624 
5625 	if (_blob->data_ro || _blob->md_ro) {
5626 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot from read only blob with id %lu\n",
5627 			      _blob->id);
5628 		ctx->bserrno = -EINVAL;
5629 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5630 		return;
5631 	}
5632 
5633 	if (_blob->locked_operation_in_progress) {
5634 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create snapshot - another operation in progress\n");
5635 		ctx->bserrno = -EBUSY;
5636 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5637 		return;
5638 	}
5639 
5640 	_blob->locked_operation_in_progress = true;
5641 
5642 	spdk_blob_opts_init(&opts);
5643 	_spdk_blob_xattrs_init(&internal_xattrs);
5644 
5645 	/* Change the size of new blob to the same as in original blob,
5646 	 * but do not allocate clusters */
5647 	opts.thin_provision = true;
5648 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
5649 	opts.use_extent_table = _blob->use_extent_table;
5650 
5651 	/* If there are any xattrs specified for snapshot, set them now */
5652 	if (ctx->xattrs) {
5653 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
5654 	}
5655 	/* Set internal xattr SNAPSHOT_IN_PROGRESS */
5656 	internal_xattrs.count = 1;
5657 	internal_xattrs.ctx = _blob;
5658 	internal_xattrs.names = xattrs_names;
5659 	internal_xattrs.get_value = _spdk_bs_xattr_snapshot;
5660 
5661 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
5662 			     _spdk_bs_snapshot_newblob_create_cpl, ctx);
5663 }
5664 
5665 void spdk_bs_create_snapshot(struct spdk_blob_store *bs, spdk_blob_id blobid,
5666 			     const struct spdk_blob_xattr_opts *snapshot_xattrs,
5667 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5668 {
5669 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
5670 
5671 	if (!ctx) {
5672 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
5673 		return;
5674 	}
5675 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5676 	ctx->cpl.u.blobid.cb_fn = cb_fn;
5677 	ctx->cpl.u.blobid.cb_arg = cb_arg;
5678 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
5679 	ctx->bserrno = 0;
5680 	ctx->frozen = false;
5681 	ctx->original.id = blobid;
5682 	ctx->xattrs = snapshot_xattrs;
5683 
5684 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_snapshot_origblob_open_cpl, ctx);
5685 }
5686 /* END spdk_bs_create_snapshot */
5687 
5688 /* START spdk_bs_create_clone */
5689 
5690 static void
5691 _spdk_bs_xattr_clone(void *arg, const char *name,
5692 		     const void **value, size_t *value_len)
5693 {
5694 	assert(strncmp(name, BLOB_SNAPSHOT, sizeof(BLOB_SNAPSHOT)) == 0);
5695 
5696 	struct spdk_blob *blob = (struct spdk_blob *)arg;
5697 	*value = &blob->id;
5698 	*value_len = sizeof(blob->id);
5699 }
5700 
5701 static void
5702 _spdk_bs_clone_newblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5703 {
5704 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5705 	struct spdk_blob *clone = _blob;
5706 
5707 	ctx->new.blob = clone;
5708 	_spdk_bs_blob_list_add(clone);
5709 
5710 	spdk_blob_close(clone, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5711 }
5712 
5713 static void
5714 _spdk_bs_clone_newblob_create_cpl(void *cb_arg, spdk_blob_id blobid, int bserrno)
5715 {
5716 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5717 
5718 	ctx->cpl.u.blobid.blobid = blobid;
5719 	spdk_bs_open_blob(ctx->original.blob->bs, blobid, _spdk_bs_clone_newblob_open_cpl, ctx);
5720 }
5721 
5722 static void
5723 _spdk_bs_clone_origblob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5724 {
5725 	struct spdk_clone_snapshot_ctx	*ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5726 	struct spdk_blob_opts		opts;
5727 	struct spdk_blob_xattr_opts internal_xattrs;
5728 	char *xattr_names[] = { BLOB_SNAPSHOT };
5729 
5730 	if (bserrno != 0) {
5731 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5732 		return;
5733 	}
5734 
5735 	ctx->original.blob = _blob;
5736 
5737 	if (!_blob->data_ro || !_blob->md_ro) {
5738 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Clone not from read-only blob\n");
5739 		ctx->bserrno = -EINVAL;
5740 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5741 		return;
5742 	}
5743 
5744 	if (_blob->locked_operation_in_progress) {
5745 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot create clone - another operation in progress\n");
5746 		ctx->bserrno = -EBUSY;
5747 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5748 		return;
5749 	}
5750 
5751 	_blob->locked_operation_in_progress = true;
5752 
5753 	spdk_blob_opts_init(&opts);
5754 	_spdk_blob_xattrs_init(&internal_xattrs);
5755 
5756 	opts.thin_provision = true;
5757 	opts.num_clusters = spdk_blob_get_num_clusters(_blob);
5758 	opts.use_extent_table = _blob->use_extent_table;
5759 	if (ctx->xattrs) {
5760 		memcpy(&opts.xattrs, ctx->xattrs, sizeof(*ctx->xattrs));
5761 	}
5762 
5763 	/* Set internal xattr BLOB_SNAPSHOT */
5764 	internal_xattrs.count = 1;
5765 	internal_xattrs.ctx = _blob;
5766 	internal_xattrs.names = xattr_names;
5767 	internal_xattrs.get_value = _spdk_bs_xattr_clone;
5768 
5769 	_spdk_bs_create_blob(_blob->bs, &opts, &internal_xattrs,
5770 			     _spdk_bs_clone_newblob_create_cpl, ctx);
5771 }
5772 
5773 void spdk_bs_create_clone(struct spdk_blob_store *bs, spdk_blob_id blobid,
5774 			  const struct spdk_blob_xattr_opts *clone_xattrs,
5775 			  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
5776 {
5777 	struct spdk_clone_snapshot_ctx	*ctx = calloc(1, sizeof(*ctx));
5778 
5779 	if (!ctx) {
5780 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOMEM);
5781 		return;
5782 	}
5783 
5784 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
5785 	ctx->cpl.u.blobid.cb_fn = cb_fn;
5786 	ctx->cpl.u.blobid.cb_arg = cb_arg;
5787 	ctx->cpl.u.blobid.blobid = SPDK_BLOBID_INVALID;
5788 	ctx->bserrno = 0;
5789 	ctx->xattrs = clone_xattrs;
5790 	ctx->original.id = blobid;
5791 
5792 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_clone_origblob_open_cpl, ctx);
5793 }
5794 
5795 /* END spdk_bs_create_clone */
5796 
5797 /* START spdk_bs_inflate_blob */
5798 
5799 static void
5800 _spdk_bs_inflate_blob_set_parent_cpl(void *cb_arg, struct spdk_blob *_parent, int bserrno)
5801 {
5802 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5803 	struct spdk_blob *_blob = ctx->original.blob;
5804 
5805 	if (bserrno != 0) {
5806 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5807 		return;
5808 	}
5809 
5810 	assert(_parent != NULL);
5811 
5812 	_spdk_bs_blob_list_remove(_blob);
5813 	_blob->parent_id = _parent->id;
5814 	_spdk_blob_set_xattr(_blob, BLOB_SNAPSHOT, &_blob->parent_id,
5815 			     sizeof(spdk_blob_id), true);
5816 
5817 	_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5818 	_blob->back_bs_dev = bs_create_blob_bs_dev(_parent);
5819 	_spdk_bs_blob_list_add(_blob);
5820 
5821 	spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5822 }
5823 
5824 static void
5825 _spdk_bs_inflate_blob_done(void *cb_arg, int bserrno)
5826 {
5827 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5828 	struct spdk_blob *_blob = ctx->original.blob;
5829 	struct spdk_blob *_parent;
5830 
5831 	if (bserrno != 0) {
5832 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5833 		return;
5834 	}
5835 
5836 	if (ctx->allocate_all) {
5837 		/* remove thin provisioning */
5838 		_spdk_bs_blob_list_remove(_blob);
5839 		_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
5840 		_blob->invalid_flags = _blob->invalid_flags & ~SPDK_BLOB_THIN_PROV;
5841 		_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5842 		_blob->back_bs_dev = NULL;
5843 		_blob->parent_id = SPDK_BLOBID_INVALID;
5844 	} else {
5845 		_parent = ((struct spdk_blob_bs_dev *)(_blob->back_bs_dev))->blob;
5846 		if (_parent->parent_id != SPDK_BLOBID_INVALID) {
5847 			/* We must change the parent of the inflated blob */
5848 			spdk_bs_open_blob(_blob->bs, _parent->parent_id,
5849 					  _spdk_bs_inflate_blob_set_parent_cpl, ctx);
5850 			return;
5851 		}
5852 
5853 		_spdk_bs_blob_list_remove(_blob);
5854 		_spdk_blob_remove_xattr(_blob, BLOB_SNAPSHOT, true);
5855 		_blob->parent_id = SPDK_BLOBID_INVALID;
5856 		_blob->back_bs_dev->destroy(_blob->back_bs_dev);
5857 		_blob->back_bs_dev = bs_create_zeroes_dev();
5858 	}
5859 
5860 	_blob->state = SPDK_BLOB_STATE_DIRTY;
5861 	spdk_blob_sync_md(_blob, _spdk_bs_clone_snapshot_origblob_cleanup, ctx);
5862 }
5863 
5864 /* Check if cluster needs allocation */
5865 static inline bool
5866 _spdk_bs_cluster_needs_allocation(struct spdk_blob *blob, uint64_t cluster, bool allocate_all)
5867 {
5868 	struct spdk_blob_bs_dev *b;
5869 
5870 	assert(blob != NULL);
5871 
5872 	if (blob->active.clusters[cluster] != 0) {
5873 		/* Cluster is already allocated */
5874 		return false;
5875 	}
5876 
5877 	if (blob->parent_id == SPDK_BLOBID_INVALID) {
5878 		/* Blob have no parent blob */
5879 		return allocate_all;
5880 	}
5881 
5882 	b = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
5883 	return (allocate_all || b->blob->active.clusters[cluster] != 0);
5884 }
5885 
5886 static void
5887 _spdk_bs_inflate_blob_touch_next(void *cb_arg, int bserrno)
5888 {
5889 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5890 	struct spdk_blob *_blob = ctx->original.blob;
5891 	uint64_t offset;
5892 
5893 	if (bserrno != 0) {
5894 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, bserrno);
5895 		return;
5896 	}
5897 
5898 	for (; ctx->cluster < _blob->active.num_clusters; ctx->cluster++) {
5899 		if (_spdk_bs_cluster_needs_allocation(_blob, ctx->cluster, ctx->allocate_all)) {
5900 			break;
5901 		}
5902 	}
5903 
5904 	if (ctx->cluster < _blob->active.num_clusters) {
5905 		offset = _spdk_bs_cluster_to_lba(_blob->bs, ctx->cluster);
5906 
5907 		/* We may safely increment a cluster before write */
5908 		ctx->cluster++;
5909 
5910 		/* Use zero length write to touch a cluster */
5911 		spdk_blob_io_write(_blob, ctx->channel, NULL, offset, 0,
5912 				   _spdk_bs_inflate_blob_touch_next, ctx);
5913 	} else {
5914 		_spdk_bs_inflate_blob_done(cb_arg, bserrno);
5915 	}
5916 }
5917 
5918 static void
5919 _spdk_bs_inflate_blob_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
5920 {
5921 	struct spdk_clone_snapshot_ctx *ctx = (struct spdk_clone_snapshot_ctx *)cb_arg;
5922 	uint64_t lfc; /* lowest free cluster */
5923 	uint64_t i;
5924 
5925 	if (bserrno != 0) {
5926 		_spdk_bs_clone_snapshot_cleanup_finish(ctx, bserrno);
5927 		return;
5928 	}
5929 
5930 	ctx->original.blob = _blob;
5931 
5932 	if (_blob->locked_operation_in_progress) {
5933 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot inflate blob - another operation in progress\n");
5934 		ctx->bserrno = -EBUSY;
5935 		spdk_blob_close(_blob, _spdk_bs_clone_snapshot_cleanup_finish, ctx);
5936 		return;
5937 	}
5938 
5939 	_blob->locked_operation_in_progress = true;
5940 
5941 	if (!ctx->allocate_all && _blob->parent_id == SPDK_BLOBID_INVALID) {
5942 		/* This blob have no parent, so we cannot decouple it. */
5943 		SPDK_ERRLOG("Cannot decouple parent of blob with no parent.\n");
5944 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -EINVAL);
5945 		return;
5946 	}
5947 
5948 	if (spdk_blob_is_thin_provisioned(_blob) == false) {
5949 		/* This is not thin provisioned blob. No need to inflate. */
5950 		_spdk_bs_clone_snapshot_origblob_cleanup(ctx, 0);
5951 		return;
5952 	}
5953 
5954 	/* Do two passes - one to verify that we can obtain enough clusters
5955 	 * and another to actually claim them.
5956 	 */
5957 	lfc = 0;
5958 	for (i = 0; i < _blob->active.num_clusters; i++) {
5959 		if (_spdk_bs_cluster_needs_allocation(_blob, i, ctx->allocate_all)) {
5960 			lfc = spdk_bit_array_find_first_clear(_blob->bs->used_clusters, lfc);
5961 			if (lfc == UINT32_MAX) {
5962 				/* No more free clusters. Cannot satisfy the request */
5963 				_spdk_bs_clone_snapshot_origblob_cleanup(ctx, -ENOSPC);
5964 				return;
5965 			}
5966 			lfc++;
5967 		}
5968 	}
5969 
5970 	ctx->cluster = 0;
5971 	_spdk_bs_inflate_blob_touch_next(ctx, 0);
5972 }
5973 
5974 static void
5975 _spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5976 		      spdk_blob_id blobid, bool allocate_all, spdk_blob_op_complete cb_fn, void *cb_arg)
5977 {
5978 	struct spdk_clone_snapshot_ctx *ctx = calloc(1, sizeof(*ctx));
5979 
5980 	if (!ctx) {
5981 		cb_fn(cb_arg, -ENOMEM);
5982 		return;
5983 	}
5984 	ctx->cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
5985 	ctx->cpl.u.bs_basic.cb_fn = cb_fn;
5986 	ctx->cpl.u.bs_basic.cb_arg = cb_arg;
5987 	ctx->bserrno = 0;
5988 	ctx->original.id = blobid;
5989 	ctx->channel = channel;
5990 	ctx->allocate_all = allocate_all;
5991 
5992 	spdk_bs_open_blob(bs, ctx->original.id, _spdk_bs_inflate_blob_open_cpl, ctx);
5993 }
5994 
5995 void
5996 spdk_bs_inflate_blob(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
5997 		     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
5998 {
5999 	_spdk_bs_inflate_blob(bs, channel, blobid, true, cb_fn, cb_arg);
6000 }
6001 
6002 void
6003 spdk_bs_blob_decouple_parent(struct spdk_blob_store *bs, struct spdk_io_channel *channel,
6004 			     spdk_blob_id blobid, spdk_blob_op_complete cb_fn, void *cb_arg)
6005 {
6006 	_spdk_bs_inflate_blob(bs, channel, blobid, false, cb_fn, cb_arg);
6007 }
6008 /* END spdk_bs_inflate_blob */
6009 
6010 /* START spdk_blob_resize */
6011 struct spdk_bs_resize_ctx {
6012 	spdk_blob_op_complete cb_fn;
6013 	void *cb_arg;
6014 	struct spdk_blob *blob;
6015 	uint64_t sz;
6016 	int rc;
6017 };
6018 
6019 static void
6020 _spdk_bs_resize_unfreeze_cpl(void *cb_arg, int rc)
6021 {
6022 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
6023 
6024 	if (rc != 0) {
6025 		SPDK_ERRLOG("Unfreeze failed, rc=%d\n", rc);
6026 	}
6027 
6028 	if (ctx->rc != 0) {
6029 		SPDK_ERRLOG("Unfreeze failed, ctx->rc=%d\n", ctx->rc);
6030 		rc = ctx->rc;
6031 	}
6032 
6033 	ctx->blob->locked_operation_in_progress = false;
6034 
6035 	ctx->cb_fn(ctx->cb_arg, rc);
6036 	free(ctx);
6037 }
6038 
6039 static void
6040 _spdk_bs_resize_freeze_cpl(void *cb_arg, int rc)
6041 {
6042 	struct spdk_bs_resize_ctx *ctx = (struct spdk_bs_resize_ctx *)cb_arg;
6043 
6044 	if (rc != 0) {
6045 		ctx->blob->locked_operation_in_progress = false;
6046 		ctx->cb_fn(ctx->cb_arg, rc);
6047 		free(ctx);
6048 		return;
6049 	}
6050 
6051 	ctx->rc = _spdk_blob_resize(ctx->blob, ctx->sz);
6052 
6053 	_spdk_blob_unfreeze_io(ctx->blob, _spdk_bs_resize_unfreeze_cpl, ctx);
6054 }
6055 
6056 void
6057 spdk_blob_resize(struct spdk_blob *blob, uint64_t sz, spdk_blob_op_complete cb_fn, void *cb_arg)
6058 {
6059 	struct spdk_bs_resize_ctx *ctx;
6060 
6061 	_spdk_blob_verify_md_op(blob);
6062 
6063 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz);
6064 
6065 	if (blob->md_ro) {
6066 		cb_fn(cb_arg, -EPERM);
6067 		return;
6068 	}
6069 
6070 	if (sz == blob->active.num_clusters) {
6071 		cb_fn(cb_arg, 0);
6072 		return;
6073 	}
6074 
6075 	if (blob->locked_operation_in_progress) {
6076 		cb_fn(cb_arg, -EBUSY);
6077 		return;
6078 	}
6079 
6080 	ctx = calloc(1, sizeof(*ctx));
6081 	if (!ctx) {
6082 		cb_fn(cb_arg, -ENOMEM);
6083 		return;
6084 	}
6085 
6086 	blob->locked_operation_in_progress = true;
6087 	ctx->cb_fn = cb_fn;
6088 	ctx->cb_arg = cb_arg;
6089 	ctx->blob = blob;
6090 	ctx->sz = sz;
6091 	_spdk_blob_freeze_io(blob, _spdk_bs_resize_freeze_cpl, ctx);
6092 }
6093 
6094 /* END spdk_blob_resize */
6095 
6096 
6097 /* START spdk_bs_delete_blob */
6098 
6099 static void
6100 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
6101 {
6102 	spdk_bs_sequence_t *seq = cb_arg;
6103 
6104 	bs_sequence_finish(seq, bserrno);
6105 }
6106 
6107 static void
6108 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6109 {
6110 	struct spdk_blob *blob = cb_arg;
6111 
6112 	if (bserrno != 0) {
6113 		/*
6114 		 * We already removed this blob from the blobstore tailq, so
6115 		 *  we need to free it here since this is the last reference
6116 		 *  to it.
6117 		 */
6118 		_spdk_blob_free(blob);
6119 		_spdk_bs_delete_close_cpl(seq, bserrno);
6120 		return;
6121 	}
6122 
6123 	/*
6124 	 * This will immediately decrement the ref_count and call
6125 	 *  the completion routine since the metadata state is clean.
6126 	 *  By calling spdk_blob_close, we reduce the number of call
6127 	 *  points into code that touches the blob->open_ref count
6128 	 *  and the blobstore's blob list.
6129 	 */
6130 	spdk_blob_close(blob, _spdk_bs_delete_close_cpl, seq);
6131 }
6132 
6133 struct delete_snapshot_ctx {
6134 	struct spdk_blob_list *parent_snapshot_entry;
6135 	struct spdk_blob *snapshot;
6136 	bool snapshot_md_ro;
6137 	struct spdk_blob *clone;
6138 	bool clone_md_ro;
6139 	spdk_blob_op_with_handle_complete cb_fn;
6140 	void *cb_arg;
6141 	int bserrno;
6142 };
6143 
6144 static void
6145 _spdk_delete_blob_cleanup_finish(void *cb_arg, int bserrno)
6146 {
6147 	struct delete_snapshot_ctx *ctx = cb_arg;
6148 
6149 	if (bserrno != 0) {
6150 		SPDK_ERRLOG("Snapshot cleanup error %d\n", bserrno);
6151 	}
6152 
6153 	assert(ctx != NULL);
6154 
6155 	if (bserrno != 0 && ctx->bserrno == 0) {
6156 		ctx->bserrno = bserrno;
6157 	}
6158 
6159 	ctx->cb_fn(ctx->cb_arg, ctx->snapshot, ctx->bserrno);
6160 	free(ctx);
6161 }
6162 
6163 static void
6164 _spdk_delete_snapshot_cleanup_snapshot(void *cb_arg, int bserrno)
6165 {
6166 	struct delete_snapshot_ctx *ctx = cb_arg;
6167 
6168 	if (bserrno != 0) {
6169 		ctx->bserrno = bserrno;
6170 		SPDK_ERRLOG("Clone cleanup error %d\n", bserrno);
6171 	}
6172 
6173 	if (ctx->bserrno != 0) {
6174 		assert(_spdk_blob_lookup(ctx->snapshot->bs, ctx->snapshot->id) == NULL);
6175 		TAILQ_INSERT_HEAD(&ctx->snapshot->bs->blobs, ctx->snapshot, link);
6176 	}
6177 
6178 	ctx->snapshot->locked_operation_in_progress = false;
6179 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
6180 
6181 	spdk_blob_close(ctx->snapshot, _spdk_delete_blob_cleanup_finish, ctx);
6182 }
6183 
6184 static void
6185 _spdk_delete_snapshot_cleanup_clone(void *cb_arg, int bserrno)
6186 {
6187 	struct delete_snapshot_ctx *ctx = cb_arg;
6188 
6189 	ctx->clone->locked_operation_in_progress = false;
6190 	ctx->clone->md_ro = ctx->clone_md_ro;
6191 
6192 	spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
6193 }
6194 
6195 static void
6196 _spdk_delete_snapshot_unfreeze_cpl(void *cb_arg, int bserrno)
6197 {
6198 	struct delete_snapshot_ctx *ctx = cb_arg;
6199 
6200 	if (bserrno) {
6201 		ctx->bserrno = bserrno;
6202 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6203 		return;
6204 	}
6205 
6206 	ctx->clone->locked_operation_in_progress = false;
6207 	spdk_blob_close(ctx->clone, _spdk_delete_blob_cleanup_finish, ctx);
6208 }
6209 
6210 static void
6211 _spdk_delete_snapshot_sync_snapshot_cpl(void *cb_arg, int bserrno)
6212 {
6213 	struct delete_snapshot_ctx *ctx = cb_arg;
6214 	struct spdk_blob_list *parent_snapshot_entry = NULL;
6215 	struct spdk_blob_list *snapshot_entry = NULL;
6216 	struct spdk_blob_list *clone_entry = NULL;
6217 	struct spdk_blob_list *snapshot_clone_entry = NULL;
6218 
6219 	if (bserrno) {
6220 		SPDK_ERRLOG("Failed to sync MD on blob\n");
6221 		ctx->bserrno = bserrno;
6222 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6223 		return;
6224 	}
6225 
6226 	/* Get snapshot entry for the snapshot we want to remove */
6227 	snapshot_entry = _spdk_bs_get_snapshot_entry(ctx->snapshot->bs, ctx->snapshot->id);
6228 
6229 	assert(snapshot_entry != NULL);
6230 
6231 	/* Remove clone entry in this snapshot (at this point there can be only one clone) */
6232 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6233 	assert(clone_entry != NULL);
6234 	TAILQ_REMOVE(&snapshot_entry->clones, clone_entry, link);
6235 	snapshot_entry->clone_count--;
6236 	assert(TAILQ_EMPTY(&snapshot_entry->clones));
6237 
6238 	if (ctx->snapshot->parent_id != SPDK_BLOBID_INVALID) {
6239 		/* This snapshot is at the same time a clone of another snapshot - we need to
6240 		 * update parent snapshot (remove current clone, add new one inherited from
6241 		 * the snapshot that is being removed) */
6242 
6243 		/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
6244 		 * snapshot that we are removing */
6245 		_spdk_blob_get_snapshot_and_clone_entries(ctx->snapshot, &parent_snapshot_entry,
6246 				&snapshot_clone_entry);
6247 
6248 		/* Switch clone entry in parent snapshot */
6249 		TAILQ_INSERT_TAIL(&parent_snapshot_entry->clones, clone_entry, link);
6250 		TAILQ_REMOVE(&parent_snapshot_entry->clones, snapshot_clone_entry, link);
6251 		free(snapshot_clone_entry);
6252 	} else {
6253 		/* No parent snapshot - just remove clone entry */
6254 		free(clone_entry);
6255 	}
6256 
6257 	/* Restore md_ro flags */
6258 	ctx->clone->md_ro = ctx->clone_md_ro;
6259 	ctx->snapshot->md_ro = ctx->snapshot_md_ro;
6260 
6261 	_spdk_blob_unfreeze_io(ctx->clone, _spdk_delete_snapshot_unfreeze_cpl, ctx);
6262 }
6263 
6264 static void
6265 _spdk_delete_snapshot_sync_clone_cpl(void *cb_arg, int bserrno)
6266 {
6267 	struct delete_snapshot_ctx *ctx = cb_arg;
6268 	uint64_t i;
6269 
6270 	ctx->snapshot->md_ro = false;
6271 
6272 	if (bserrno) {
6273 		SPDK_ERRLOG("Failed to sync MD on clone\n");
6274 		ctx->bserrno = bserrno;
6275 
6276 		/* Restore snapshot to previous state */
6277 		bserrno = _spdk_blob_remove_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, true);
6278 		if (bserrno != 0) {
6279 			_spdk_delete_snapshot_cleanup_clone(ctx, bserrno);
6280 			return;
6281 		}
6282 
6283 		spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_cleanup_clone, ctx);
6284 		return;
6285 	}
6286 
6287 	/* Clear cluster map entries for snapshot */
6288 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
6289 		if (ctx->clone->active.clusters[i] == ctx->snapshot->active.clusters[i]) {
6290 			ctx->snapshot->active.clusters[i] = 0;
6291 		}
6292 	}
6293 	for (i = 0; i < ctx->snapshot->active.num_extent_pages &&
6294 	     i < ctx->clone->active.num_extent_pages; i++) {
6295 		if (ctx->clone->active.extent_pages[i] == ctx->snapshot->active.extent_pages[i]) {
6296 			ctx->snapshot->active.extent_pages[i] = 0;
6297 		}
6298 	}
6299 
6300 	_spdk_blob_set_thin_provision(ctx->snapshot);
6301 	ctx->snapshot->state = SPDK_BLOB_STATE_DIRTY;
6302 
6303 	if (ctx->parent_snapshot_entry != NULL) {
6304 		ctx->snapshot->back_bs_dev = NULL;
6305 	}
6306 
6307 	spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_cpl, ctx);
6308 }
6309 
6310 static void
6311 _spdk_delete_snapshot_sync_snapshot_xattr_cpl(void *cb_arg, int bserrno)
6312 {
6313 	struct delete_snapshot_ctx *ctx = cb_arg;
6314 	uint64_t i;
6315 
6316 	/* Temporarily override md_ro flag for clone for MD modification */
6317 	ctx->clone_md_ro = ctx->clone->md_ro;
6318 	ctx->clone->md_ro = false;
6319 
6320 	if (bserrno) {
6321 		SPDK_ERRLOG("Failed to sync MD with xattr on blob\n");
6322 		ctx->bserrno = bserrno;
6323 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6324 		return;
6325 	}
6326 
6327 	/* Copy snapshot map to clone map (only unallocated clusters in clone) */
6328 	for (i = 0; i < ctx->snapshot->active.num_clusters && i < ctx->clone->active.num_clusters; i++) {
6329 		if (ctx->clone->active.clusters[i] == 0) {
6330 			ctx->clone->active.clusters[i] = ctx->snapshot->active.clusters[i];
6331 		}
6332 	}
6333 	for (i = 0; i < ctx->snapshot->active.num_extent_pages &&
6334 	     i < ctx->clone->active.num_extent_pages; i++) {
6335 		if (ctx->clone->active.extent_pages[i] == 0) {
6336 			ctx->clone->active.extent_pages[i] = ctx->snapshot->active.extent_pages[i];
6337 		}
6338 	}
6339 
6340 	/* Delete old backing bs_dev from clone (related to snapshot that will be removed) */
6341 	ctx->clone->back_bs_dev->destroy(ctx->clone->back_bs_dev);
6342 
6343 	/* Set/remove snapshot xattr and switch parent ID and backing bs_dev on clone... */
6344 	if (ctx->parent_snapshot_entry != NULL) {
6345 		/* ...to parent snapshot */
6346 		ctx->clone->parent_id = ctx->parent_snapshot_entry->id;
6347 		ctx->clone->back_bs_dev = ctx->snapshot->back_bs_dev;
6348 		_spdk_blob_set_xattr(ctx->clone, BLOB_SNAPSHOT, &ctx->parent_snapshot_entry->id,
6349 				     sizeof(spdk_blob_id),
6350 				     true);
6351 	} else {
6352 		/* ...to blobid invalid and zeroes dev */
6353 		ctx->clone->parent_id = SPDK_BLOBID_INVALID;
6354 		ctx->clone->back_bs_dev = bs_create_zeroes_dev();
6355 		_spdk_blob_remove_xattr(ctx->clone, BLOB_SNAPSHOT, true);
6356 	}
6357 
6358 	spdk_blob_sync_md(ctx->clone, _spdk_delete_snapshot_sync_clone_cpl, ctx);
6359 }
6360 
6361 static void
6362 _spdk_delete_snapshot_freeze_io_cb(void *cb_arg, int bserrno)
6363 {
6364 	struct delete_snapshot_ctx *ctx = cb_arg;
6365 
6366 	if (bserrno) {
6367 		SPDK_ERRLOG("Failed to freeze I/O on clone\n");
6368 		ctx->bserrno = bserrno;
6369 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6370 		return;
6371 	}
6372 
6373 	/* Temporarily override md_ro flag for snapshot for MD modification */
6374 	ctx->snapshot_md_ro = ctx->snapshot->md_ro;
6375 	ctx->snapshot->md_ro = false;
6376 
6377 	/* Mark blob as pending for removal for power failure safety, use clone id for recovery */
6378 	ctx->bserrno = _spdk_blob_set_xattr(ctx->snapshot, SNAPSHOT_PENDING_REMOVAL, &ctx->clone->id,
6379 					    sizeof(spdk_blob_id), true);
6380 	if (ctx->bserrno != 0) {
6381 		_spdk_delete_snapshot_cleanup_clone(ctx, 0);
6382 		return;
6383 	}
6384 
6385 	spdk_blob_sync_md(ctx->snapshot, _spdk_delete_snapshot_sync_snapshot_xattr_cpl, ctx);
6386 }
6387 
6388 static void
6389 _spdk_delete_snapshot_open_clone_cb(void *cb_arg, struct spdk_blob *clone, int bserrno)
6390 {
6391 	struct delete_snapshot_ctx *ctx = cb_arg;
6392 
6393 	if (bserrno) {
6394 		SPDK_ERRLOG("Failed to open clone\n");
6395 		ctx->bserrno = bserrno;
6396 		_spdk_delete_snapshot_cleanup_snapshot(ctx, 0);
6397 		return;
6398 	}
6399 
6400 	ctx->clone = clone;
6401 
6402 	if (clone->locked_operation_in_progress) {
6403 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress on its clone\n");
6404 		ctx->bserrno = -EBUSY;
6405 		spdk_blob_close(ctx->clone, _spdk_delete_snapshot_cleanup_snapshot, ctx);
6406 		return;
6407 	}
6408 
6409 	clone->locked_operation_in_progress = true;
6410 
6411 	_spdk_blob_freeze_io(clone, _spdk_delete_snapshot_freeze_io_cb, ctx);
6412 }
6413 
6414 static void
6415 _spdk_update_clone_on_snapshot_deletion(struct spdk_blob *snapshot, struct delete_snapshot_ctx *ctx)
6416 {
6417 	struct spdk_blob_list *snapshot_entry = NULL;
6418 	struct spdk_blob_list *clone_entry = NULL;
6419 	struct spdk_blob_list *snapshot_clone_entry = NULL;
6420 
6421 	/* Get snapshot entry for the snapshot we want to remove */
6422 	snapshot_entry = _spdk_bs_get_snapshot_entry(snapshot->bs, snapshot->id);
6423 
6424 	assert(snapshot_entry != NULL);
6425 
6426 	/* Get clone of the snapshot (at this point there can be only one clone) */
6427 	clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6428 	assert(snapshot_entry->clone_count == 1);
6429 	assert(clone_entry != NULL);
6430 
6431 	/* Get snapshot entry for parent snapshot and clone entry within that snapshot for
6432 	 * snapshot that we are removing */
6433 	_spdk_blob_get_snapshot_and_clone_entries(snapshot, &ctx->parent_snapshot_entry,
6434 			&snapshot_clone_entry);
6435 
6436 	spdk_bs_open_blob(snapshot->bs, clone_entry->id, _spdk_delete_snapshot_open_clone_cb, ctx);
6437 }
6438 
6439 static void
6440 _spdk_bs_delete_blob_finish(void *cb_arg, struct spdk_blob *blob, int bserrno)
6441 {
6442 	spdk_bs_sequence_t *seq = cb_arg;
6443 	struct spdk_blob_list *snapshot_entry = NULL;
6444 	uint32_t page_num;
6445 
6446 	if (bserrno) {
6447 		SPDK_ERRLOG("Failed to remove blob\n");
6448 		bs_sequence_finish(seq, bserrno);
6449 		return;
6450 	}
6451 
6452 	/* Remove snapshot from the list */
6453 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
6454 	if (snapshot_entry != NULL) {
6455 		TAILQ_REMOVE(&blob->bs->snapshots, snapshot_entry, link);
6456 		free(snapshot_entry);
6457 	}
6458 
6459 	page_num = _spdk_bs_blobid_to_page(blob->id);
6460 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
6461 	blob->state = SPDK_BLOB_STATE_DIRTY;
6462 	blob->active.num_pages = 0;
6463 	_spdk_blob_resize(blob, 0);
6464 
6465 	_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, blob);
6466 }
6467 
6468 static int
6469 _spdk_bs_is_blob_deletable(struct spdk_blob *blob, bool *update_clone)
6470 {
6471 	struct spdk_blob_list *snapshot_entry = NULL;
6472 	struct spdk_blob_list *clone_entry = NULL;
6473 	struct spdk_blob *clone = NULL;
6474 	bool has_one_clone = false;
6475 
6476 	/* Check if this is a snapshot with clones */
6477 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
6478 	if (snapshot_entry != NULL) {
6479 		if (snapshot_entry->clone_count > 1) {
6480 			SPDK_ERRLOG("Cannot remove snapshot with more than one clone\n");
6481 			return -EBUSY;
6482 		} else if (snapshot_entry->clone_count == 1) {
6483 			has_one_clone = true;
6484 		}
6485 	}
6486 
6487 	/* Check if someone has this blob open (besides this delete context):
6488 	 * - open_ref = 1 - only this context opened blob, so it is ok to remove it
6489 	 * - open_ref <= 2 && has_one_clone = true - clone is holding snapshot
6490 	 *	and that is ok, because we will update it accordingly */
6491 	if (blob->open_ref <= 2 && has_one_clone) {
6492 		clone_entry = TAILQ_FIRST(&snapshot_entry->clones);
6493 		assert(clone_entry != NULL);
6494 		clone = _spdk_blob_lookup(blob->bs, clone_entry->id);
6495 
6496 		if (blob->open_ref == 2 && clone == NULL) {
6497 			/* Clone is closed and someone else opened this blob */
6498 			SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
6499 			return -EBUSY;
6500 		}
6501 
6502 		*update_clone = true;
6503 		return 0;
6504 	}
6505 
6506 	if (blob->open_ref > 1) {
6507 		SPDK_ERRLOG("Cannot remove snapshot because it is open\n");
6508 		return -EBUSY;
6509 	}
6510 
6511 	assert(has_one_clone == false);
6512 	*update_clone = false;
6513 	return 0;
6514 }
6515 
6516 static void
6517 _spdk_bs_delete_enomem_close_cpl(void *cb_arg, int bserrno)
6518 {
6519 	spdk_bs_sequence_t *seq = cb_arg;
6520 
6521 	bs_sequence_finish(seq, -ENOMEM);
6522 }
6523 
6524 static void
6525 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *blob, int bserrno)
6526 {
6527 	spdk_bs_sequence_t *seq = cb_arg;
6528 	struct delete_snapshot_ctx *ctx;
6529 	bool update_clone = false;
6530 
6531 	if (bserrno != 0) {
6532 		bs_sequence_finish(seq, bserrno);
6533 		return;
6534 	}
6535 
6536 	_spdk_blob_verify_md_op(blob);
6537 
6538 	ctx = calloc(1, sizeof(*ctx));
6539 	if (ctx == NULL) {
6540 		spdk_blob_close(blob, _spdk_bs_delete_enomem_close_cpl, seq);
6541 		return;
6542 	}
6543 
6544 	ctx->snapshot = blob;
6545 	ctx->cb_fn = _spdk_bs_delete_blob_finish;
6546 	ctx->cb_arg = seq;
6547 
6548 	/* Check if blob can be removed and if it is a snapshot with clone on top of it */
6549 	ctx->bserrno = _spdk_bs_is_blob_deletable(blob, &update_clone);
6550 	if (ctx->bserrno) {
6551 		spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
6552 		return;
6553 	}
6554 
6555 	if (blob->locked_operation_in_progress) {
6556 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Cannot remove blob - another operation in progress\n");
6557 		ctx->bserrno = -EBUSY;
6558 		spdk_blob_close(blob, _spdk_delete_blob_cleanup_finish, ctx);
6559 		return;
6560 	}
6561 
6562 	blob->locked_operation_in_progress = true;
6563 
6564 	/*
6565 	 * Remove the blob from the blob_store list now, to ensure it does not
6566 	 *  get returned after this point by _spdk_blob_lookup().
6567 	 */
6568 	TAILQ_REMOVE(&blob->bs->blobs, blob, link);
6569 
6570 	if (update_clone) {
6571 		/* This blob is a snapshot with active clone - update clone first */
6572 		_spdk_update_clone_on_snapshot_deletion(blob, ctx);
6573 	} else {
6574 		/* This blob does not have any clones - just remove it */
6575 		_spdk_bs_blob_list_remove(blob);
6576 		_spdk_bs_delete_blob_finish(seq, blob, 0);
6577 		free(ctx);
6578 	}
6579 }
6580 
6581 void
6582 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6583 		    spdk_blob_op_complete cb_fn, void *cb_arg)
6584 {
6585 	struct spdk_bs_cpl	cpl;
6586 	spdk_bs_sequence_t	*seq;
6587 
6588 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid);
6589 
6590 	assert(spdk_get_thread() == bs->md_thread);
6591 
6592 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6593 	cpl.u.blob_basic.cb_fn = cb_fn;
6594 	cpl.u.blob_basic.cb_arg = cb_arg;
6595 
6596 	seq = bs_sequence_start(bs->md_channel, &cpl);
6597 	if (!seq) {
6598 		cb_fn(cb_arg, -ENOMEM);
6599 		return;
6600 	}
6601 
6602 	spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq);
6603 }
6604 
6605 /* END spdk_bs_delete_blob */
6606 
6607 /* START spdk_bs_open_blob */
6608 
6609 static void
6610 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6611 {
6612 	struct spdk_blob *blob = cb_arg;
6613 
6614 	if (bserrno != 0) {
6615 		_spdk_blob_free(blob);
6616 		seq->cpl.u.blob_handle.blob = NULL;
6617 		bs_sequence_finish(seq, bserrno);
6618 		return;
6619 	}
6620 
6621 	blob->open_ref++;
6622 
6623 	TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link);
6624 
6625 	bs_sequence_finish(seq, bserrno);
6626 }
6627 
6628 static void _spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6629 			       struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6630 {
6631 	struct spdk_blob		*blob;
6632 	struct spdk_bs_cpl		cpl;
6633 	struct spdk_blob_open_opts	opts_default;
6634 	spdk_bs_sequence_t		*seq;
6635 	uint32_t			page_num;
6636 
6637 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid);
6638 	assert(spdk_get_thread() == bs->md_thread);
6639 
6640 	page_num = _spdk_bs_blobid_to_page(blobid);
6641 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
6642 		/* Invalid blobid */
6643 		cb_fn(cb_arg, NULL, -ENOENT);
6644 		return;
6645 	}
6646 
6647 	blob = _spdk_blob_lookup(bs, blobid);
6648 	if (blob) {
6649 		blob->open_ref++;
6650 		cb_fn(cb_arg, blob, 0);
6651 		return;
6652 	}
6653 
6654 	blob = _spdk_blob_alloc(bs, blobid);
6655 	if (!blob) {
6656 		cb_fn(cb_arg, NULL, -ENOMEM);
6657 		return;
6658 	}
6659 
6660 	if (!opts) {
6661 		spdk_blob_open_opts_init(&opts_default);
6662 		opts = &opts_default;
6663 	}
6664 
6665 	blob->clear_method = opts->clear_method;
6666 
6667 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
6668 	cpl.u.blob_handle.cb_fn = cb_fn;
6669 	cpl.u.blob_handle.cb_arg = cb_arg;
6670 	cpl.u.blob_handle.blob = blob;
6671 
6672 	seq = bs_sequence_start(bs->md_channel, &cpl);
6673 	if (!seq) {
6674 		_spdk_blob_free(blob);
6675 		cb_fn(cb_arg, NULL, -ENOMEM);
6676 		return;
6677 	}
6678 
6679 	_spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob);
6680 }
6681 
6682 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
6683 		       spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6684 {
6685 	_spdk_bs_open_blob(bs, blobid, NULL, cb_fn, cb_arg);
6686 }
6687 
6688 void spdk_bs_open_blob_ext(struct spdk_blob_store *bs, spdk_blob_id blobid,
6689 			   struct spdk_blob_open_opts *opts, spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
6690 {
6691 	_spdk_bs_open_blob(bs, blobid, opts, cb_fn, cb_arg);
6692 }
6693 
6694 /* END spdk_bs_open_blob */
6695 
6696 /* START spdk_blob_set_read_only */
6697 int spdk_blob_set_read_only(struct spdk_blob *blob)
6698 {
6699 	_spdk_blob_verify_md_op(blob);
6700 
6701 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
6702 
6703 	blob->state = SPDK_BLOB_STATE_DIRTY;
6704 	return 0;
6705 }
6706 /* END spdk_blob_set_read_only */
6707 
6708 /* START spdk_blob_sync_md */
6709 
6710 static void
6711 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6712 {
6713 	struct spdk_blob *blob = cb_arg;
6714 
6715 	if (bserrno == 0 && (blob->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
6716 		blob->data_ro = true;
6717 		blob->md_ro = true;
6718 	}
6719 
6720 	bs_sequence_finish(seq, bserrno);
6721 }
6722 
6723 static void
6724 _spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6725 {
6726 	struct spdk_bs_cpl	cpl;
6727 	spdk_bs_sequence_t	*seq;
6728 
6729 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6730 	cpl.u.blob_basic.cb_fn = cb_fn;
6731 	cpl.u.blob_basic.cb_arg = cb_arg;
6732 
6733 	seq = bs_sequence_start(blob->bs->md_channel, &cpl);
6734 	if (!seq) {
6735 		cb_fn(cb_arg, -ENOMEM);
6736 		return;
6737 	}
6738 
6739 	_spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob);
6740 }
6741 
6742 void
6743 spdk_blob_sync_md(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6744 {
6745 	_spdk_blob_verify_md_op(blob);
6746 
6747 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id);
6748 
6749 	if (blob->md_ro) {
6750 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
6751 		cb_fn(cb_arg, 0);
6752 		return;
6753 	}
6754 
6755 	_spdk_blob_sync_md(blob, cb_fn, cb_arg);
6756 }
6757 
6758 /* END spdk_blob_sync_md */
6759 
6760 struct spdk_blob_insert_cluster_ctx {
6761 	struct spdk_thread	*thread;
6762 	struct spdk_blob	*blob;
6763 	uint32_t		cluster_num;	/* cluster index in blob */
6764 	uint32_t		cluster;	/* cluster on disk */
6765 	uint32_t		extent_page;	/* extent page on disk */
6766 	int			rc;
6767 	spdk_blob_op_complete	cb_fn;
6768 	void			*cb_arg;
6769 };
6770 
6771 static void
6772 _spdk_blob_insert_cluster_msg_cpl(void *arg)
6773 {
6774 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6775 
6776 	ctx->cb_fn(ctx->cb_arg, ctx->rc);
6777 	free(ctx);
6778 }
6779 
6780 static void
6781 _spdk_blob_insert_cluster_msg_cb(void *arg, int bserrno)
6782 {
6783 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6784 
6785 	ctx->rc = bserrno;
6786 	spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
6787 }
6788 
6789 static void
6790 _spdk_blob_persist_extent_page_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6791 {
6792 	struct spdk_blob_md_page        *page = cb_arg;
6793 
6794 	bs_sequence_finish(seq, bserrno);
6795 	spdk_free(page);
6796 }
6797 
6798 static void
6799 _spdk_blob_insert_extent(struct spdk_blob *blob, uint32_t extent, uint64_t cluster_num,
6800 			 spdk_blob_op_complete cb_fn, void *cb_arg)
6801 {
6802 	spdk_bs_sequence_t		*seq;
6803 	struct spdk_bs_cpl		cpl;
6804 	struct spdk_blob_md_page	*page = NULL;
6805 	uint32_t			page_count = 0;
6806 	int				rc;
6807 
6808 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6809 	cpl.u.blob_basic.cb_fn = cb_fn;
6810 	cpl.u.blob_basic.cb_arg = cb_arg;
6811 
6812 	seq = bs_sequence_start(blob->bs->md_channel, &cpl);
6813 	if (!seq) {
6814 		cb_fn(cb_arg, -ENOMEM);
6815 		return;
6816 	}
6817 	rc = _spdk_blob_serialize_add_page(blob, &page, &page_count, &page);
6818 	if (rc < 0) {
6819 		bs_sequence_finish(seq, rc);
6820 		return;
6821 	}
6822 
6823 	_spdk_blob_serialize_extent_page(blob, cluster_num, page);
6824 
6825 	page->crc = _spdk_blob_md_page_calc_crc(page);
6826 
6827 	assert(spdk_bit_array_get(blob->bs->used_md_pages, extent) == true);
6828 
6829 	bs_sequence_write_dev(seq, page, _spdk_bs_md_page_to_lba(blob->bs, extent),
6830 			      _spdk_bs_byte_to_lba(blob->bs, SPDK_BS_PAGE_SIZE),
6831 			      _spdk_blob_persist_extent_page_cpl, page);
6832 }
6833 
6834 static void
6835 _spdk_blob_insert_cluster_msg(void *arg)
6836 {
6837 	struct spdk_blob_insert_cluster_ctx *ctx = arg;
6838 	uint32_t *extent_page;
6839 
6840 	ctx->rc = _spdk_blob_insert_cluster(ctx->blob, ctx->cluster_num, ctx->cluster);
6841 	if (ctx->rc != 0) {
6842 		spdk_thread_send_msg(ctx->thread, _spdk_blob_insert_cluster_msg_cpl, ctx);
6843 		return;
6844 	}
6845 
6846 	if (ctx->blob->use_extent_table == false) {
6847 		/* Extent table is not used, proceed with sync of md that will only use extents_rle. */
6848 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
6849 		_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
6850 		return;
6851 	}
6852 
6853 	extent_page = _spdk_bs_cluster_to_extent_page(ctx->blob, ctx->cluster_num);
6854 	if (*extent_page == 0) {
6855 		/* Extent page requires allocation.
6856 		 * It was already claimed in the used_md_pages map and placed in ctx.
6857 		 * Blob persist will take care of writing out new extent page on disk. */
6858 		assert(ctx->extent_page != 0);
6859 		assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
6860 		*extent_page = ctx->extent_page;
6861 		ctx->blob->state = SPDK_BLOB_STATE_DIRTY;
6862 		_spdk_blob_sync_md(ctx->blob, _spdk_blob_insert_cluster_msg_cb, ctx);
6863 	} else {
6864 		/* It is possible for original thread to allocate extent page for
6865 		 * different cluster in the same extent page. In such case proceed with
6866 		 * updating the existing extent page, but release the additional one. */
6867 		if (ctx->extent_page != 0) {
6868 			assert(spdk_bit_array_get(ctx->blob->bs->used_md_pages, ctx->extent_page) == true);
6869 			_spdk_bs_release_md_page(ctx->blob->bs, ctx->extent_page);
6870 		}
6871 		/* Extent page already allocated.
6872 		 * Every cluster allocation, requires just an update of single extent page. */
6873 		_spdk_blob_insert_extent(ctx->blob, *extent_page, ctx->cluster_num,
6874 					 _spdk_blob_insert_cluster_msg_cb, ctx);
6875 	}
6876 }
6877 
6878 static void
6879 _spdk_blob_insert_cluster_on_md_thread(struct spdk_blob *blob, uint32_t cluster_num,
6880 				       uint64_t cluster, uint32_t extent_page, spdk_blob_op_complete cb_fn, void *cb_arg)
6881 {
6882 	struct spdk_blob_insert_cluster_ctx *ctx;
6883 
6884 	ctx = calloc(1, sizeof(*ctx));
6885 	if (ctx == NULL) {
6886 		cb_fn(cb_arg, -ENOMEM);
6887 		return;
6888 	}
6889 
6890 	ctx->thread = spdk_get_thread();
6891 	ctx->blob = blob;
6892 	ctx->cluster_num = cluster_num;
6893 	ctx->cluster = cluster;
6894 	ctx->extent_page = extent_page;
6895 	ctx->cb_fn = cb_fn;
6896 	ctx->cb_arg = cb_arg;
6897 
6898 	spdk_thread_send_msg(blob->bs->md_thread, _spdk_blob_insert_cluster_msg, ctx);
6899 }
6900 
6901 /* START spdk_blob_close */
6902 
6903 static void
6904 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
6905 {
6906 	struct spdk_blob *blob = cb_arg;
6907 
6908 	if (bserrno == 0) {
6909 		blob->open_ref--;
6910 		if (blob->open_ref == 0) {
6911 			/*
6912 			 * Blobs with active.num_pages == 0 are deleted blobs.
6913 			 *  these blobs are removed from the blob_store list
6914 			 *  when the deletion process starts - so don't try to
6915 			 *  remove them again.
6916 			 */
6917 			if (blob->active.num_pages > 0) {
6918 				TAILQ_REMOVE(&blob->bs->blobs, blob, link);
6919 			}
6920 			_spdk_blob_free(blob);
6921 		}
6922 	}
6923 
6924 	bs_sequence_finish(seq, bserrno);
6925 }
6926 
6927 void spdk_blob_close(struct spdk_blob *blob, spdk_blob_op_complete cb_fn, void *cb_arg)
6928 {
6929 	struct spdk_bs_cpl	cpl;
6930 	spdk_bs_sequence_t	*seq;
6931 
6932 	_spdk_blob_verify_md_op(blob);
6933 
6934 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id);
6935 
6936 	if (blob->open_ref == 0) {
6937 		cb_fn(cb_arg, -EBADF);
6938 		return;
6939 	}
6940 
6941 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
6942 	cpl.u.blob_basic.cb_fn = cb_fn;
6943 	cpl.u.blob_basic.cb_arg = cb_arg;
6944 
6945 	seq = bs_sequence_start(blob->bs->md_channel, &cpl);
6946 	if (!seq) {
6947 		cb_fn(cb_arg, -ENOMEM);
6948 		return;
6949 	}
6950 
6951 	/* Sync metadata */
6952 	_spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob);
6953 }
6954 
6955 /* END spdk_blob_close */
6956 
6957 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
6958 {
6959 	return spdk_get_io_channel(bs);
6960 }
6961 
6962 void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
6963 {
6964 	spdk_put_io_channel(channel);
6965 }
6966 
6967 void spdk_blob_io_unmap(struct spdk_blob *blob, struct spdk_io_channel *channel,
6968 			uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
6969 {
6970 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
6971 				     SPDK_BLOB_UNMAP);
6972 }
6973 
6974 void spdk_blob_io_write_zeroes(struct spdk_blob *blob, struct spdk_io_channel *channel,
6975 			       uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
6976 {
6977 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
6978 				     SPDK_BLOB_WRITE_ZEROES);
6979 }
6980 
6981 void spdk_blob_io_write(struct spdk_blob *blob, struct spdk_io_channel *channel,
6982 			void *payload, uint64_t offset, uint64_t length,
6983 			spdk_blob_op_complete cb_fn, void *cb_arg)
6984 {
6985 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
6986 				     SPDK_BLOB_WRITE);
6987 }
6988 
6989 void spdk_blob_io_read(struct spdk_blob *blob, struct spdk_io_channel *channel,
6990 		       void *payload, uint64_t offset, uint64_t length,
6991 		       spdk_blob_op_complete cb_fn, void *cb_arg)
6992 {
6993 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
6994 				     SPDK_BLOB_READ);
6995 }
6996 
6997 void spdk_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6998 			 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6999 			 spdk_blob_op_complete cb_fn, void *cb_arg)
7000 {
7001 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
7002 }
7003 
7004 void spdk_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
7005 			struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7006 			spdk_blob_op_complete cb_fn, void *cb_arg)
7007 {
7008 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
7009 }
7010 
7011 struct spdk_bs_iter_ctx {
7012 	int64_t page_num;
7013 	struct spdk_blob_store *bs;
7014 
7015 	spdk_blob_op_with_handle_complete cb_fn;
7016 	void *cb_arg;
7017 };
7018 
7019 static void
7020 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
7021 {
7022 	struct spdk_bs_iter_ctx *ctx = cb_arg;
7023 	struct spdk_blob_store *bs = ctx->bs;
7024 	spdk_blob_id id;
7025 
7026 	if (bserrno == 0) {
7027 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
7028 		free(ctx);
7029 		return;
7030 	}
7031 
7032 	ctx->page_num++;
7033 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
7034 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
7035 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
7036 		free(ctx);
7037 		return;
7038 	}
7039 
7040 	id = _spdk_bs_page_to_blobid(ctx->page_num);
7041 
7042 	spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx);
7043 }
7044 
7045 void
7046 spdk_bs_iter_first(struct spdk_blob_store *bs,
7047 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
7048 {
7049 	struct spdk_bs_iter_ctx *ctx;
7050 
7051 	ctx = calloc(1, sizeof(*ctx));
7052 	if (!ctx) {
7053 		cb_fn(cb_arg, NULL, -ENOMEM);
7054 		return;
7055 	}
7056 
7057 	ctx->page_num = -1;
7058 	ctx->bs = bs;
7059 	ctx->cb_fn = cb_fn;
7060 	ctx->cb_arg = cb_arg;
7061 
7062 	_spdk_bs_iter_cpl(ctx, NULL, -1);
7063 }
7064 
7065 static void
7066 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno)
7067 {
7068 	struct spdk_bs_iter_ctx *ctx = cb_arg;
7069 
7070 	_spdk_bs_iter_cpl(ctx, NULL, -1);
7071 }
7072 
7073 void
7074 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *blob,
7075 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
7076 {
7077 	struct spdk_bs_iter_ctx *ctx;
7078 
7079 	assert(blob != NULL);
7080 
7081 	ctx = calloc(1, sizeof(*ctx));
7082 	if (!ctx) {
7083 		cb_fn(cb_arg, NULL, -ENOMEM);
7084 		return;
7085 	}
7086 
7087 	ctx->page_num = _spdk_bs_blobid_to_page(blob->id);
7088 	ctx->bs = bs;
7089 	ctx->cb_fn = cb_fn;
7090 	ctx->cb_arg = cb_arg;
7091 
7092 	/* Close the existing blob */
7093 	spdk_blob_close(blob, _spdk_bs_iter_close_cpl, ctx);
7094 }
7095 
7096 static int
7097 _spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
7098 		     uint16_t value_len, bool internal)
7099 {
7100 	struct spdk_xattr_tailq *xattrs;
7101 	struct spdk_xattr	*xattr;
7102 	size_t			desc_size;
7103 
7104 	_spdk_blob_verify_md_op(blob);
7105 
7106 	if (blob->md_ro) {
7107 		return -EPERM;
7108 	}
7109 
7110 	desc_size = sizeof(struct spdk_blob_md_descriptor_xattr) + strlen(name) + value_len;
7111 	if (desc_size > SPDK_BS_MAX_DESC_SIZE) {
7112 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Xattr '%s' of size %ld does not fix into single page %ld\n", name,
7113 			      desc_size, SPDK_BS_MAX_DESC_SIZE);
7114 		return -ENOMEM;
7115 	}
7116 
7117 	if (internal) {
7118 		xattrs = &blob->xattrs_internal;
7119 		blob->invalid_flags |= SPDK_BLOB_INTERNAL_XATTR;
7120 	} else {
7121 		xattrs = &blob->xattrs;
7122 	}
7123 
7124 	TAILQ_FOREACH(xattr, xattrs, link) {
7125 		if (!strcmp(name, xattr->name)) {
7126 			free(xattr->value);
7127 			xattr->value_len = value_len;
7128 			xattr->value = malloc(value_len);
7129 			memcpy(xattr->value, value, value_len);
7130 
7131 			blob->state = SPDK_BLOB_STATE_DIRTY;
7132 
7133 			return 0;
7134 		}
7135 	}
7136 
7137 	xattr = calloc(1, sizeof(*xattr));
7138 	if (!xattr) {
7139 		return -ENOMEM;
7140 	}
7141 	xattr->name = strdup(name);
7142 	xattr->value_len = value_len;
7143 	xattr->value = malloc(value_len);
7144 	memcpy(xattr->value, value, value_len);
7145 	TAILQ_INSERT_TAIL(xattrs, xattr, link);
7146 
7147 	blob->state = SPDK_BLOB_STATE_DIRTY;
7148 
7149 	return 0;
7150 }
7151 
7152 int
7153 spdk_blob_set_xattr(struct spdk_blob *blob, const char *name, const void *value,
7154 		    uint16_t value_len)
7155 {
7156 	return _spdk_blob_set_xattr(blob, name, value, value_len, false);
7157 }
7158 
7159 static int
7160 _spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name, bool internal)
7161 {
7162 	struct spdk_xattr_tailq *xattrs;
7163 	struct spdk_xattr	*xattr;
7164 
7165 	_spdk_blob_verify_md_op(blob);
7166 
7167 	if (blob->md_ro) {
7168 		return -EPERM;
7169 	}
7170 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
7171 
7172 	TAILQ_FOREACH(xattr, xattrs, link) {
7173 		if (!strcmp(name, xattr->name)) {
7174 			TAILQ_REMOVE(xattrs, xattr, link);
7175 			free(xattr->value);
7176 			free(xattr->name);
7177 			free(xattr);
7178 
7179 			if (internal && TAILQ_EMPTY(&blob->xattrs_internal)) {
7180 				blob->invalid_flags &= ~SPDK_BLOB_INTERNAL_XATTR;
7181 			}
7182 			blob->state = SPDK_BLOB_STATE_DIRTY;
7183 
7184 			return 0;
7185 		}
7186 	}
7187 
7188 	return -ENOENT;
7189 }
7190 
7191 int
7192 spdk_blob_remove_xattr(struct spdk_blob *blob, const char *name)
7193 {
7194 	return _spdk_blob_remove_xattr(blob, name, false);
7195 }
7196 
7197 static int
7198 _spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
7199 			   const void **value, size_t *value_len, bool internal)
7200 {
7201 	struct spdk_xattr	*xattr;
7202 	struct spdk_xattr_tailq *xattrs;
7203 
7204 	xattrs = internal ? &blob->xattrs_internal : &blob->xattrs;
7205 
7206 	TAILQ_FOREACH(xattr, xattrs, link) {
7207 		if (!strcmp(name, xattr->name)) {
7208 			*value = xattr->value;
7209 			*value_len = xattr->value_len;
7210 			return 0;
7211 		}
7212 	}
7213 	return -ENOENT;
7214 }
7215 
7216 int
7217 spdk_blob_get_xattr_value(struct spdk_blob *blob, const char *name,
7218 			  const void **value, size_t *value_len)
7219 {
7220 	_spdk_blob_verify_md_op(blob);
7221 
7222 	return _spdk_blob_get_xattr_value(blob, name, value, value_len, false);
7223 }
7224 
7225 struct spdk_xattr_names {
7226 	uint32_t	count;
7227 	const char	*names[0];
7228 };
7229 
7230 static int
7231 _spdk_blob_get_xattr_names(struct spdk_xattr_tailq *xattrs, struct spdk_xattr_names **names)
7232 {
7233 	struct spdk_xattr	*xattr;
7234 	int			count = 0;
7235 
7236 	TAILQ_FOREACH(xattr, xattrs, link) {
7237 		count++;
7238 	}
7239 
7240 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
7241 	if (*names == NULL) {
7242 		return -ENOMEM;
7243 	}
7244 
7245 	TAILQ_FOREACH(xattr, xattrs, link) {
7246 		(*names)->names[(*names)->count++] = xattr->name;
7247 	}
7248 
7249 	return 0;
7250 }
7251 
7252 int
7253 spdk_blob_get_xattr_names(struct spdk_blob *blob, struct spdk_xattr_names **names)
7254 {
7255 	_spdk_blob_verify_md_op(blob);
7256 
7257 	return _spdk_blob_get_xattr_names(&blob->xattrs, names);
7258 }
7259 
7260 uint32_t
7261 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
7262 {
7263 	assert(names != NULL);
7264 
7265 	return names->count;
7266 }
7267 
7268 const char *
7269 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
7270 {
7271 	if (index >= names->count) {
7272 		return NULL;
7273 	}
7274 
7275 	return names->names[index];
7276 }
7277 
7278 void
7279 spdk_xattr_names_free(struct spdk_xattr_names *names)
7280 {
7281 	free(names);
7282 }
7283 
7284 struct spdk_bs_type
7285 spdk_bs_get_bstype(struct spdk_blob_store *bs)
7286 {
7287 	return bs->bstype;
7288 }
7289 
7290 void
7291 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
7292 {
7293 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
7294 }
7295 
7296 bool
7297 spdk_blob_is_read_only(struct spdk_blob *blob)
7298 {
7299 	assert(blob != NULL);
7300 	return (blob->data_ro || blob->md_ro);
7301 }
7302 
7303 bool
7304 spdk_blob_is_snapshot(struct spdk_blob *blob)
7305 {
7306 	struct spdk_blob_list *snapshot_entry;
7307 
7308 	assert(blob != NULL);
7309 
7310 	snapshot_entry = _spdk_bs_get_snapshot_entry(blob->bs, blob->id);
7311 	if (snapshot_entry == NULL) {
7312 		return false;
7313 	}
7314 
7315 	return true;
7316 }
7317 
7318 bool
7319 spdk_blob_is_clone(struct spdk_blob *blob)
7320 {
7321 	assert(blob != NULL);
7322 
7323 	if (blob->parent_id != SPDK_BLOBID_INVALID) {
7324 		assert(spdk_blob_is_thin_provisioned(blob));
7325 		return true;
7326 	}
7327 
7328 	return false;
7329 }
7330 
7331 bool
7332 spdk_blob_is_thin_provisioned(struct spdk_blob *blob)
7333 {
7334 	assert(blob != NULL);
7335 	return !!(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
7336 }
7337 
7338 static void
7339 _spdk_blob_update_clear_method(struct spdk_blob *blob)
7340 {
7341 	enum blob_clear_method stored_cm;
7342 
7343 	assert(blob != NULL);
7344 
7345 	/* If BLOB_CLEAR_WITH_DEFAULT was passed in, use the setting stored
7346 	 * in metadata previously.  If something other than the default was
7347 	 * specified, ignore stored value and used what was passed in.
7348 	 */
7349 	stored_cm = ((blob->md_ro_flags & SPDK_BLOB_CLEAR_METHOD) >> SPDK_BLOB_CLEAR_METHOD_SHIFT);
7350 
7351 	if (blob->clear_method == BLOB_CLEAR_WITH_DEFAULT) {
7352 		blob->clear_method = stored_cm;
7353 	} else if (blob->clear_method != stored_cm) {
7354 		SPDK_WARNLOG("Using passed in clear method 0x%x instead of stored value of 0x%x\n",
7355 			     blob->clear_method, stored_cm);
7356 	}
7357 }
7358 
7359 spdk_blob_id
7360 spdk_blob_get_parent_snapshot(struct spdk_blob_store *bs, spdk_blob_id blob_id)
7361 {
7362 	struct spdk_blob_list *snapshot_entry = NULL;
7363 	struct spdk_blob_list *clone_entry = NULL;
7364 
7365 	TAILQ_FOREACH(snapshot_entry, &bs->snapshots, link) {
7366 		TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
7367 			if (clone_entry->id == blob_id) {
7368 				return snapshot_entry->id;
7369 			}
7370 		}
7371 	}
7372 
7373 	return SPDK_BLOBID_INVALID;
7374 }
7375 
7376 int
7377 spdk_blob_get_clones(struct spdk_blob_store *bs, spdk_blob_id blobid, spdk_blob_id *ids,
7378 		     size_t *count)
7379 {
7380 	struct spdk_blob_list *snapshot_entry, *clone_entry;
7381 	size_t n;
7382 
7383 	snapshot_entry = _spdk_bs_get_snapshot_entry(bs, blobid);
7384 	if (snapshot_entry == NULL) {
7385 		*count = 0;
7386 		return 0;
7387 	}
7388 
7389 	if (ids == NULL || *count < snapshot_entry->clone_count) {
7390 		*count = snapshot_entry->clone_count;
7391 		return -ENOMEM;
7392 	}
7393 	*count = snapshot_entry->clone_count;
7394 
7395 	n = 0;
7396 	TAILQ_FOREACH(clone_entry, &snapshot_entry->clones, link) {
7397 		ids[n++] = clone_entry->id;
7398 	}
7399 
7400 	return 0;
7401 }
7402 
7403 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB)
7404