xref: /spdk/lib/blob/blobstore.c (revision 5e799ff470261620a1d3952cad88c003785108a2)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk/blob.h"
37 #include "spdk/crc32.h"
38 #include "spdk/env.h"
39 #include "spdk/queue.h"
40 #include "spdk/io_channel.h"
41 #include "spdk/bit_array.h"
42 #include "spdk/likely.h"
43 
44 #include "spdk_internal/log.h"
45 
46 #include "blobstore.h"
47 
48 #define BLOB_CRC32C_INITIAL    0xffffffffUL
49 
50 static int spdk_bs_register_md_thread(struct spdk_blob_store *bs);
51 static int spdk_bs_unregister_md_thread(struct spdk_blob_store *bs);
52 static void _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno);
53 
54 static inline size_t
55 divide_round_up(size_t num, size_t divisor)
56 {
57 	return (num + divisor - 1) / divisor;
58 }
59 
60 static void
61 _spdk_bs_claim_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
62 {
63 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
64 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == false);
65 	assert(bs->num_free_clusters > 0);
66 
67 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %u\n", cluster_num);
68 
69 	spdk_bit_array_set(bs->used_clusters, cluster_num);
70 	bs->num_free_clusters--;
71 }
72 
73 static int
74 _spdk_bs_allocate_cluster(struct spdk_blob_data *blob, uint32_t cluster_num,
75 			  uint64_t *lowest_free_cluster)
76 {
77 	*lowest_free_cluster = spdk_bit_array_find_first_clear(blob->bs->used_clusters,
78 			       *lowest_free_cluster);
79 	if (*lowest_free_cluster >= blob->bs->total_clusters) {
80 		/* No more free clusters. Cannot satisfy the request */
81 		return -ENOSPC;
82 	}
83 
84 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming cluster %lu for blob %lu\n", *lowest_free_cluster, blob->id);
85 	_spdk_bs_claim_cluster(blob->bs, *lowest_free_cluster);
86 	blob->active.clusters[cluster_num] = _spdk_bs_cluster_to_lba(blob->bs, *lowest_free_cluster);
87 
88 	return 0;
89 }
90 
91 static void
92 _spdk_bs_release_cluster(struct spdk_blob_store *bs, uint32_t cluster_num)
93 {
94 	assert(cluster_num < spdk_bit_array_capacity(bs->used_clusters));
95 	assert(spdk_bit_array_get(bs->used_clusters, cluster_num) == true);
96 	assert(bs->num_free_clusters < bs->total_clusters);
97 
98 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Releasing cluster %u\n", cluster_num);
99 
100 	spdk_bit_array_clear(bs->used_clusters, cluster_num);
101 	bs->num_free_clusters++;
102 }
103 
104 void
105 spdk_blob_opts_init(struct spdk_blob_opts *opts)
106 {
107 	opts->num_clusters = 0;
108 	opts->thin_provision = false;
109 	opts->xattr_count = 0;
110 	opts->xattr_names = NULL;
111 	opts->xattr_ctx = NULL;
112 	opts->get_xattr_value = NULL;
113 }
114 
115 static struct spdk_blob_data *
116 _spdk_blob_alloc(struct spdk_blob_store *bs, spdk_blob_id id)
117 {
118 	struct spdk_blob_data *blob;
119 
120 	blob = calloc(1, sizeof(*blob));
121 	if (!blob) {
122 		return NULL;
123 	}
124 
125 	blob->id = id;
126 	blob->bs = bs;
127 
128 	blob->state = SPDK_BLOB_STATE_DIRTY;
129 	blob->active.num_pages = 1;
130 	blob->active.pages = calloc(1, sizeof(*blob->active.pages));
131 	if (!blob->active.pages) {
132 		free(blob);
133 		return NULL;
134 	}
135 
136 	blob->active.pages[0] = _spdk_bs_blobid_to_page(id);
137 
138 	TAILQ_INIT(&blob->xattrs);
139 
140 	return blob;
141 }
142 
143 static void
144 _spdk_blob_free(struct spdk_blob_data *blob)
145 {
146 	struct spdk_xattr 	*xattr, *xattr_tmp;
147 
148 	assert(blob != NULL);
149 
150 	free(blob->active.clusters);
151 	free(blob->clean.clusters);
152 	free(blob->active.pages);
153 	free(blob->clean.pages);
154 
155 	TAILQ_FOREACH_SAFE(xattr, &blob->xattrs, link, xattr_tmp) {
156 		TAILQ_REMOVE(&blob->xattrs, xattr, link);
157 		free(xattr->name);
158 		free(xattr->value);
159 		free(xattr);
160 	}
161 
162 	free(blob);
163 }
164 
165 static int
166 _spdk_blob_mark_clean(struct spdk_blob_data *blob)
167 {
168 	uint64_t *clusters = NULL;
169 	uint32_t *pages = NULL;
170 
171 	assert(blob != NULL);
172 	assert(blob->state == SPDK_BLOB_STATE_LOADING ||
173 	       blob->state == SPDK_BLOB_STATE_SYNCING);
174 
175 	if (blob->active.num_clusters) {
176 		assert(blob->active.clusters);
177 		clusters = calloc(blob->active.num_clusters, sizeof(*blob->active.clusters));
178 		if (!clusters) {
179 			return -1;
180 		}
181 		memcpy(clusters, blob->active.clusters, blob->active.num_clusters * sizeof(*clusters));
182 	}
183 
184 	if (blob->active.num_pages) {
185 		assert(blob->active.pages);
186 		pages = calloc(blob->active.num_pages, sizeof(*blob->active.pages));
187 		if (!pages) {
188 			free(clusters);
189 			return -1;
190 		}
191 		memcpy(pages, blob->active.pages, blob->active.num_pages * sizeof(*pages));
192 	}
193 
194 	free(blob->clean.clusters);
195 	free(blob->clean.pages);
196 
197 	blob->clean.num_clusters = blob->active.num_clusters;
198 	blob->clean.clusters = blob->active.clusters;
199 	blob->clean.num_pages = blob->active.num_pages;
200 	blob->clean.pages = blob->active.pages;
201 
202 	blob->active.clusters = clusters;
203 	blob->active.pages = pages;
204 
205 	blob->state = SPDK_BLOB_STATE_CLEAN;
206 
207 	return 0;
208 }
209 
210 static int
211 _spdk_blob_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_data *blob)
212 {
213 	struct spdk_blob_md_descriptor *desc;
214 	size_t	cur_desc = 0;
215 	void *tmp;
216 
217 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
218 	while (cur_desc < sizeof(page->descriptors)) {
219 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
220 			if (desc->length == 0) {
221 				/* If padding and length are 0, this terminates the page */
222 				break;
223 			}
224 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
225 			struct spdk_blob_md_descriptor_flags	*desc_flags;
226 
227 			desc_flags = (struct spdk_blob_md_descriptor_flags *)desc;
228 
229 			if (desc_flags->length != sizeof(*desc_flags) - sizeof(*desc)) {
230 				return -EINVAL;
231 			}
232 
233 			if ((desc_flags->invalid_flags | SPDK_BLOB_INVALID_FLAGS_MASK) !=
234 			    SPDK_BLOB_INVALID_FLAGS_MASK) {
235 				return -EINVAL;
236 			}
237 
238 			if ((desc_flags->data_ro_flags | SPDK_BLOB_DATA_RO_FLAGS_MASK) !=
239 			    SPDK_BLOB_DATA_RO_FLAGS_MASK) {
240 				blob->data_ro = true;
241 				blob->md_ro = true;
242 			}
243 
244 			if ((desc_flags->md_ro_flags | SPDK_BLOB_MD_RO_FLAGS_MASK) !=
245 			    SPDK_BLOB_MD_RO_FLAGS_MASK) {
246 				blob->md_ro = true;
247 			}
248 
249 			if ((desc_flags->data_ro_flags & SPDK_BLOB_READ_ONLY)) {
250 				blob->data_ro = true;
251 				blob->md_ro = true;
252 			}
253 
254 			blob->invalid_flags = desc_flags->invalid_flags;
255 			blob->data_ro_flags = desc_flags->data_ro_flags;
256 			blob->md_ro_flags = desc_flags->md_ro_flags;
257 
258 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) {
259 			struct spdk_blob_md_descriptor_extent	*desc_extent;
260 			unsigned int				i, j;
261 			unsigned int				cluster_count = blob->active.num_clusters;
262 
263 			desc_extent = (struct spdk_blob_md_descriptor_extent *)desc;
264 
265 			if (desc_extent->length == 0 ||
266 			    (desc_extent->length % sizeof(desc_extent->extents[0]) != 0)) {
267 				return -EINVAL;
268 			}
269 
270 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
271 				for (j = 0; j < desc_extent->extents[i].length; j++) {
272 					if (!spdk_bit_array_get(blob->bs->used_clusters,
273 								desc_extent->extents[i].cluster_idx + j)) {
274 						return -EINVAL;
275 					}
276 					cluster_count++;
277 				}
278 			}
279 
280 			if (cluster_count == 0) {
281 				return -EINVAL;
282 			}
283 			tmp = realloc(blob->active.clusters, cluster_count * sizeof(uint64_t));
284 			if (tmp == NULL) {
285 				return -ENOMEM;
286 			}
287 			blob->active.clusters = tmp;
288 			blob->active.cluster_array_size = cluster_count;
289 
290 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
291 				for (j = 0; j < desc_extent->extents[i].length; j++) {
292 					if (desc_extent->extents[i].cluster_idx != 0) {
293 						blob->active.clusters[blob->active.num_clusters++] = _spdk_bs_cluster_to_lba(blob->bs,
294 								desc_extent->extents[i].cluster_idx + j);
295 					} else if (spdk_blob_is_thin_provisioned(blob)) {
296 						blob->active.clusters[blob->active.num_clusters++] = 0;
297 					} else {
298 						return -EINVAL;
299 					}
300 				}
301 			}
302 
303 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
304 			struct spdk_blob_md_descriptor_xattr	*desc_xattr;
305 			struct spdk_xattr 			*xattr;
306 
307 			desc_xattr = (struct spdk_blob_md_descriptor_xattr *)desc;
308 
309 			if (desc_xattr->length != sizeof(desc_xattr->name_length) +
310 			    sizeof(desc_xattr->value_length) +
311 			    desc_xattr->name_length + desc_xattr->value_length) {
312 				return -EINVAL;
313 			}
314 
315 			xattr = calloc(1, sizeof(*xattr));
316 			if (xattr == NULL) {
317 				return -ENOMEM;
318 			}
319 
320 			xattr->name = malloc(desc_xattr->name_length + 1);
321 			if (xattr->name == NULL) {
322 				free(xattr);
323 				return -ENOMEM;
324 			}
325 			strncpy(xattr->name, desc_xattr->name, desc_xattr->name_length);
326 			xattr->name[desc_xattr->name_length] = '\0';
327 
328 			xattr->value = malloc(desc_xattr->value_length);
329 			if (xattr->value == NULL) {
330 				free(xattr->name);
331 				free(xattr);
332 				return -ENOMEM;
333 			}
334 			xattr->value_len = desc_xattr->value_length;
335 			memcpy(xattr->value,
336 			       (void *)((uintptr_t)desc_xattr->name + desc_xattr->name_length),
337 			       desc_xattr->value_length);
338 
339 			TAILQ_INSERT_TAIL(&blob->xattrs, xattr, link);
340 		} else {
341 			/* Unrecognized descriptor type.  Do not fail - just continue to the
342 			 *  next descriptor.  If this descriptor is associated with some feature
343 			 *  defined in a newer version of blobstore, that version of blobstore
344 			 *  should create and set an associated feature flag to specify if this
345 			 *  blob can be loaded or not.
346 			 */
347 		}
348 
349 		/* Advance to the next descriptor */
350 		cur_desc += sizeof(*desc) + desc->length;
351 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
352 			break;
353 		}
354 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
355 	}
356 
357 	return 0;
358 }
359 
360 static int
361 _spdk_blob_parse(const struct spdk_blob_md_page *pages, uint32_t page_count,
362 		 struct spdk_blob_data *blob)
363 {
364 	const struct spdk_blob_md_page *page;
365 	uint32_t i;
366 	int rc;
367 
368 	assert(page_count > 0);
369 	assert(pages[0].sequence_num == 0);
370 	assert(blob != NULL);
371 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
372 	assert(blob->active.clusters == NULL);
373 	assert(blob->state == SPDK_BLOB_STATE_LOADING);
374 
375 	/* The blobid provided doesn't match what's in the MD, this can
376 	 * happen for example if a bogus blobid is passed in through open.
377 	 */
378 	if (blob->id != pages[0].id) {
379 		SPDK_ERRLOG("Blobid (%lu) doesn't match what's in metadata (%lu)\n",
380 			    blob->id, pages[0].id);
381 		return -ENOENT;
382 	}
383 
384 	for (i = 0; i < page_count; i++) {
385 		page = &pages[i];
386 
387 		assert(page->id == blob->id);
388 		assert(page->sequence_num == i);
389 
390 		rc = _spdk_blob_parse_page(page, blob);
391 		if (rc != 0) {
392 			return rc;
393 		}
394 	}
395 
396 	return 0;
397 }
398 
399 static int
400 _spdk_blob_serialize_add_page(const struct spdk_blob_data *blob,
401 			      struct spdk_blob_md_page **pages,
402 			      uint32_t *page_count,
403 			      struct spdk_blob_md_page **last_page)
404 {
405 	struct spdk_blob_md_page *page;
406 
407 	assert(pages != NULL);
408 	assert(page_count != NULL);
409 
410 	if (*page_count == 0) {
411 		assert(*pages == NULL);
412 		*page_count = 1;
413 		*pages = spdk_dma_malloc(SPDK_BS_PAGE_SIZE,
414 					 SPDK_BS_PAGE_SIZE,
415 					 NULL);
416 	} else {
417 		assert(*pages != NULL);
418 		(*page_count)++;
419 		*pages = spdk_dma_realloc(*pages,
420 					  SPDK_BS_PAGE_SIZE * (*page_count),
421 					  SPDK_BS_PAGE_SIZE,
422 					  NULL);
423 	}
424 
425 	if (*pages == NULL) {
426 		*page_count = 0;
427 		*last_page = NULL;
428 		return -ENOMEM;
429 	}
430 
431 	page = &(*pages)[*page_count - 1];
432 	memset(page, 0, sizeof(*page));
433 	page->id = blob->id;
434 	page->sequence_num = *page_count - 1;
435 	page->next = SPDK_INVALID_MD_PAGE;
436 	*last_page = page;
437 
438 	return 0;
439 }
440 
441 /* Transform the in-memory representation 'xattr' into an on-disk xattr descriptor.
442  * Update required_sz on both success and failure.
443  *
444  */
445 static int
446 _spdk_blob_serialize_xattr(const struct spdk_xattr *xattr,
447 			   uint8_t *buf, size_t buf_sz,
448 			   size_t *required_sz)
449 {
450 	struct spdk_blob_md_descriptor_xattr	*desc;
451 
452 	*required_sz = sizeof(struct spdk_blob_md_descriptor_xattr) +
453 		       strlen(xattr->name) +
454 		       xattr->value_len;
455 
456 	if (buf_sz < *required_sz) {
457 		return -1;
458 	}
459 
460 	desc = (struct spdk_blob_md_descriptor_xattr *)buf;
461 
462 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_XATTR;
463 	desc->length = sizeof(desc->name_length) +
464 		       sizeof(desc->value_length) +
465 		       strlen(xattr->name) +
466 		       xattr->value_len;
467 	desc->name_length = strlen(xattr->name);
468 	desc->value_length = xattr->value_len;
469 
470 	memcpy(desc->name, xattr->name, desc->name_length);
471 	memcpy((void *)((uintptr_t)desc->name + desc->name_length),
472 	       xattr->value,
473 	       desc->value_length);
474 
475 	return 0;
476 }
477 
478 static void
479 _spdk_blob_serialize_extent(const struct spdk_blob_data *blob,
480 			    uint64_t start_cluster, uint64_t *next_cluster,
481 			    uint8_t *buf, size_t buf_sz)
482 {
483 	struct spdk_blob_md_descriptor_extent *desc;
484 	size_t cur_sz;
485 	uint64_t i, extent_idx;
486 	uint32_t lba, lba_per_cluster, lba_count;
487 
488 	/* The buffer must have room for at least one extent */
489 	cur_sz = sizeof(struct spdk_blob_md_descriptor) + sizeof(desc->extents[0]);
490 	if (buf_sz < cur_sz) {
491 		*next_cluster = start_cluster;
492 		return;
493 	}
494 
495 	desc = (struct spdk_blob_md_descriptor_extent *)buf;
496 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_EXTENT;
497 
498 	lba_per_cluster = _spdk_bs_cluster_to_lba(blob->bs, 1);
499 
500 	lba = blob->active.clusters[start_cluster];
501 	lba_count = lba_per_cluster;
502 	extent_idx = 0;
503 	for (i = start_cluster + 1; i < blob->active.num_clusters; i++) {
504 		if ((lba + lba_count) == blob->active.clusters[i]) {
505 			lba_count += lba_per_cluster;
506 			continue;
507 		}
508 		desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
509 		desc->extents[extent_idx].length = lba_count / lba_per_cluster;
510 		extent_idx++;
511 
512 		cur_sz += sizeof(desc->extents[extent_idx]);
513 
514 		if (buf_sz < cur_sz) {
515 			/* If we ran out of buffer space, return */
516 			desc->length = sizeof(desc->extents[0]) * extent_idx;
517 			*next_cluster = i;
518 			return;
519 		}
520 
521 		lba = blob->active.clusters[i];
522 		lba_count = lba_per_cluster;
523 	}
524 
525 	desc->extents[extent_idx].cluster_idx = lba / lba_per_cluster;
526 	desc->extents[extent_idx].length = lba_count / lba_per_cluster;
527 	extent_idx++;
528 
529 	desc->length = sizeof(desc->extents[0]) * extent_idx;
530 	*next_cluster = blob->active.num_clusters;
531 
532 	return;
533 }
534 
535 static void
536 _spdk_blob_serialize_flags(const struct spdk_blob_data *blob,
537 			   uint8_t *buf, size_t *buf_sz)
538 {
539 	struct spdk_blob_md_descriptor_flags *desc;
540 
541 	/*
542 	 * Flags get serialized first, so we should always have room for the flags
543 	 *  descriptor.
544 	 */
545 	assert(*buf_sz >= sizeof(*desc));
546 
547 	desc = (struct spdk_blob_md_descriptor_flags *)buf;
548 	desc->type = SPDK_MD_DESCRIPTOR_TYPE_FLAGS;
549 	desc->length = sizeof(*desc) - sizeof(struct spdk_blob_md_descriptor);
550 	desc->invalid_flags = blob->invalid_flags;
551 	desc->data_ro_flags = blob->data_ro_flags;
552 	desc->md_ro_flags = blob->md_ro_flags;
553 
554 	*buf_sz -= sizeof(*desc);
555 }
556 
557 static int
558 _spdk_blob_serialize(const struct spdk_blob_data *blob, struct spdk_blob_md_page **pages,
559 		     uint32_t *page_count)
560 {
561 	struct spdk_blob_md_page		*cur_page;
562 	const struct spdk_xattr			*xattr;
563 	int 					rc;
564 	uint8_t					*buf;
565 	size_t					remaining_sz;
566 	uint64_t				last_cluster;
567 
568 	assert(pages != NULL);
569 	assert(page_count != NULL);
570 	assert(blob != NULL);
571 	assert(blob->state == SPDK_BLOB_STATE_SYNCING);
572 
573 	*pages = NULL;
574 	*page_count = 0;
575 
576 	/* A blob always has at least 1 page, even if it has no descriptors */
577 	rc = _spdk_blob_serialize_add_page(blob, pages, page_count, &cur_page);
578 	if (rc < 0) {
579 		return rc;
580 	}
581 
582 	buf = (uint8_t *)cur_page->descriptors;
583 	remaining_sz = sizeof(cur_page->descriptors);
584 
585 	/* Serialize flags */
586 	_spdk_blob_serialize_flags(blob, buf, &remaining_sz);
587 	buf += sizeof(struct spdk_blob_md_descriptor_flags);
588 
589 	/* Serialize xattrs */
590 	TAILQ_FOREACH(xattr, &blob->xattrs, link) {
591 		size_t required_sz = 0;
592 		rc = _spdk_blob_serialize_xattr(xattr,
593 						buf, remaining_sz,
594 						&required_sz);
595 		if (rc < 0) {
596 			/* Need to add a new page to the chain */
597 			rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
598 							   &cur_page);
599 			if (rc < 0) {
600 				spdk_dma_free(*pages);
601 				*pages = NULL;
602 				*page_count = 0;
603 				return rc;
604 			}
605 
606 			buf = (uint8_t *)cur_page->descriptors;
607 			remaining_sz = sizeof(cur_page->descriptors);
608 
609 			/* Try again */
610 			required_sz = 0;
611 			rc = _spdk_blob_serialize_xattr(xattr,
612 							buf, remaining_sz,
613 							&required_sz);
614 
615 			if (rc < 0) {
616 				spdk_dma_free(*pages);
617 				*pages = NULL;
618 				*page_count = 0;
619 				return -1;
620 			}
621 		}
622 
623 		remaining_sz -= required_sz;
624 		buf += required_sz;
625 	}
626 
627 	/* Serialize extents */
628 	last_cluster = 0;
629 	while (last_cluster < blob->active.num_clusters) {
630 		_spdk_blob_serialize_extent(blob, last_cluster, &last_cluster,
631 					    buf, remaining_sz);
632 
633 		if (last_cluster == blob->active.num_clusters) {
634 			break;
635 		}
636 
637 		rc = _spdk_blob_serialize_add_page(blob, pages, page_count,
638 						   &cur_page);
639 		if (rc < 0) {
640 			return rc;
641 		}
642 
643 		buf = (uint8_t *)cur_page->descriptors;
644 		remaining_sz = sizeof(cur_page->descriptors);
645 	}
646 
647 	return 0;
648 }
649 
650 struct spdk_blob_load_ctx {
651 	struct spdk_blob_data 		*blob;
652 
653 	struct spdk_blob_md_page	*pages;
654 	uint32_t			num_pages;
655 
656 	spdk_bs_sequence_cpl		cb_fn;
657 	void				*cb_arg;
658 };
659 
660 static uint32_t
661 _spdk_blob_md_page_calc_crc(void *page)
662 {
663 	uint32_t		crc;
664 
665 	crc = BLOB_CRC32C_INITIAL;
666 	crc = spdk_crc32c_update(page, SPDK_BS_PAGE_SIZE - 4, crc);
667 	crc ^= BLOB_CRC32C_INITIAL;
668 
669 	return crc;
670 
671 }
672 
673 static void
674 _spdk_blob_load_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
675 {
676 	struct spdk_blob_load_ctx 	*ctx = cb_arg;
677 	struct spdk_blob_data 		*blob = ctx->blob;
678 	struct spdk_blob_md_page	*page;
679 	int				rc;
680 	uint32_t			crc;
681 
682 	page = &ctx->pages[ctx->num_pages - 1];
683 	crc = _spdk_blob_md_page_calc_crc(page);
684 	if (crc != page->crc) {
685 		SPDK_ERRLOG("Metadata page %d crc mismatch\n", ctx->num_pages);
686 		_spdk_blob_free(blob);
687 		ctx->cb_fn(seq, NULL, -EINVAL);
688 		spdk_dma_free(ctx->pages);
689 		free(ctx);
690 		return;
691 	}
692 
693 	if (page->next != SPDK_INVALID_MD_PAGE) {
694 		uint32_t next_page = page->next;
695 		uint64_t next_lba = _spdk_bs_page_to_lba(blob->bs, blob->bs->md_start + next_page);
696 
697 
698 		assert(next_lba < (blob->bs->md_start + blob->bs->md_len));
699 
700 		/* Read the next page */
701 		ctx->num_pages++;
702 		ctx->pages = spdk_dma_realloc(ctx->pages, (sizeof(*page) * ctx->num_pages),
703 					      sizeof(*page), NULL);
704 		if (ctx->pages == NULL) {
705 			ctx->cb_fn(seq, ctx->cb_arg, -ENOMEM);
706 			free(ctx);
707 			return;
708 		}
709 
710 		spdk_bs_sequence_read(seq, &ctx->pages[ctx->num_pages - 1],
711 				      next_lba,
712 				      _spdk_bs_byte_to_lba(blob->bs, sizeof(*page)),
713 				      _spdk_blob_load_cpl, ctx);
714 		return;
715 	}
716 
717 	/* Parse the pages */
718 	rc = _spdk_blob_parse(ctx->pages, ctx->num_pages, blob);
719 	if (rc) {
720 		_spdk_blob_free(blob);
721 		ctx->cb_fn(seq, NULL, rc);
722 		spdk_dma_free(ctx->pages);
723 		free(ctx);
724 		return;
725 	}
726 
727 	_spdk_blob_mark_clean(blob);
728 
729 	ctx->cb_fn(seq, ctx->cb_arg, rc);
730 
731 	/* Free the memory */
732 	spdk_dma_free(ctx->pages);
733 	free(ctx);
734 }
735 
736 /* Load a blob from disk given a blobid */
737 static void
738 _spdk_blob_load(spdk_bs_sequence_t *seq, struct spdk_blob_data *blob,
739 		spdk_bs_sequence_cpl cb_fn, void *cb_arg)
740 {
741 	struct spdk_blob_load_ctx *ctx;
742 	struct spdk_blob_store *bs;
743 	uint32_t page_num;
744 	uint64_t lba;
745 
746 	assert(blob != NULL);
747 	assert(blob->state == SPDK_BLOB_STATE_CLEAN ||
748 	       blob->state == SPDK_BLOB_STATE_DIRTY);
749 
750 	bs = blob->bs;
751 
752 	ctx = calloc(1, sizeof(*ctx));
753 	if (!ctx) {
754 		cb_fn(seq, cb_arg, -ENOMEM);
755 		return;
756 	}
757 
758 	ctx->blob = blob;
759 	ctx->pages = spdk_dma_realloc(ctx->pages, SPDK_BS_PAGE_SIZE,
760 				      SPDK_BS_PAGE_SIZE, NULL);
761 	if (!ctx->pages) {
762 		free(ctx);
763 		cb_fn(seq, cb_arg, -ENOMEM);
764 		return;
765 	}
766 	ctx->num_pages = 1;
767 	ctx->cb_fn = cb_fn;
768 	ctx->cb_arg = cb_arg;
769 
770 	page_num = _spdk_bs_blobid_to_page(blob->id);
771 	lba = _spdk_bs_page_to_lba(blob->bs, bs->md_start + page_num);
772 
773 	blob->state = SPDK_BLOB_STATE_LOADING;
774 
775 	spdk_bs_sequence_read(seq, &ctx->pages[0], lba,
776 			      _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE),
777 			      _spdk_blob_load_cpl, ctx);
778 }
779 
780 struct spdk_blob_persist_ctx {
781 	struct spdk_blob_data 		*blob;
782 
783 	struct spdk_blob_md_page	*pages;
784 
785 	uint64_t			idx;
786 
787 	spdk_bs_sequence_cpl		cb_fn;
788 	void				*cb_arg;
789 };
790 
791 static void
792 _spdk_blob_persist_complete(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
793 {
794 	struct spdk_blob_persist_ctx 	*ctx = cb_arg;
795 	struct spdk_blob_data 		*blob = ctx->blob;
796 
797 	if (bserrno == 0) {
798 		_spdk_blob_mark_clean(blob);
799 	}
800 
801 	/* Call user callback */
802 	ctx->cb_fn(seq, ctx->cb_arg, bserrno);
803 
804 	/* Free the memory */
805 	spdk_dma_free(ctx->pages);
806 	free(ctx);
807 }
808 
809 static void
810 _spdk_blob_persist_unmap_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
811 {
812 	struct spdk_blob_persist_ctx 	*ctx = cb_arg;
813 	struct spdk_blob_data 		*blob = ctx->blob;
814 	struct spdk_blob_store		*bs = blob->bs;
815 	void				*tmp;
816 	size_t				i;
817 
818 	/* Release all clusters that were truncated */
819 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
820 		uint32_t cluster_num = _spdk_bs_lba_to_cluster(bs, blob->active.clusters[i]);
821 
822 		/* Nothing to release if it was not allocated */
823 		if (blob->active.clusters[i] != 0) {
824 			_spdk_bs_release_cluster(bs, cluster_num);
825 		}
826 	}
827 
828 	if (blob->active.num_clusters == 0) {
829 		free(blob->active.clusters);
830 		blob->active.clusters = NULL;
831 		blob->active.cluster_array_size = 0;
832 	} else {
833 		tmp = realloc(blob->active.clusters, sizeof(uint64_t) * blob->active.num_clusters);
834 		assert(tmp != NULL);
835 		blob->active.clusters = tmp;
836 		blob->active.cluster_array_size = blob->active.num_clusters;
837 	}
838 
839 	_spdk_blob_persist_complete(seq, ctx, bserrno);
840 }
841 
842 static void
843 _spdk_blob_persist_unmap_clusters(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
844 {
845 	struct spdk_blob_persist_ctx 	*ctx = cb_arg;
846 	struct spdk_blob_data 		*blob = ctx->blob;
847 	struct spdk_blob_store		*bs = blob->bs;
848 	spdk_bs_batch_t			*batch;
849 	size_t				i;
850 	uint64_t			lba;
851 	uint32_t			lba_count;
852 
853 	/* Clusters don't move around in blobs. The list shrinks or grows
854 	 * at the end, but no changes ever occur in the middle of the list.
855 	 */
856 
857 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_unmap_clusters_cpl, ctx);
858 
859 	/* Unmap all clusters that were truncated */
860 	lba = 0;
861 	lba_count = 0;
862 	for (i = blob->active.num_clusters; i < blob->active.cluster_array_size; i++) {
863 		uint64_t next_lba = blob->active.clusters[i];
864 		uint32_t next_lba_count = _spdk_bs_cluster_to_lba(bs, 1);
865 
866 		if (next_lba > 0 && (lba + lba_count) == next_lba) {
867 			/* This cluster is contiguous with the previous one. */
868 			lba_count += next_lba_count;
869 			continue;
870 		}
871 
872 		/* This cluster is not contiguous with the previous one. */
873 
874 		/* If a run of LBAs previously existing, send them
875 		 * as an unmap.
876 		 */
877 		if (lba_count > 0) {
878 			spdk_bs_batch_unmap(batch, lba, lba_count);
879 		}
880 
881 		/* Start building the next batch */
882 		lba = next_lba;
883 		if (next_lba > 0) {
884 			lba_count = next_lba_count;
885 		} else {
886 			lba_count = 0;
887 		}
888 	}
889 
890 	/* If we ended with a contiguous set of LBAs, send the unmap now */
891 	if (lba_count > 0) {
892 		spdk_bs_batch_unmap(batch, lba, lba_count);
893 	}
894 
895 	spdk_bs_batch_close(batch);
896 }
897 
898 static void
899 _spdk_blob_persist_zero_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
900 {
901 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
902 	struct spdk_blob_data 		*blob = ctx->blob;
903 	struct spdk_blob_store		*bs = blob->bs;
904 	size_t				i;
905 
906 	/* This loop starts at 1 because the first page is special and handled
907 	 * below. The pages (except the first) are never written in place,
908 	 * so any pages in the clean list must be zeroed.
909 	 */
910 	for (i = 1; i < blob->clean.num_pages; i++) {
911 		spdk_bit_array_clear(bs->used_md_pages, blob->clean.pages[i]);
912 	}
913 
914 	if (blob->active.num_pages == 0) {
915 		uint32_t page_num;
916 
917 		page_num = _spdk_bs_blobid_to_page(blob->id);
918 		spdk_bit_array_clear(bs->used_md_pages, page_num);
919 	}
920 
921 	/* Move on to unmapping clusters */
922 	_spdk_blob_persist_unmap_clusters(seq, ctx, 0);
923 }
924 
925 static void
926 _spdk_blob_persist_zero_pages(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
927 {
928 	struct spdk_blob_persist_ctx 	*ctx = cb_arg;
929 	struct spdk_blob_data 		*blob = ctx->blob;
930 	struct spdk_blob_store		*bs = blob->bs;
931 	uint64_t			lba;
932 	uint32_t			lba_count;
933 	spdk_bs_batch_t			*batch;
934 	size_t				i;
935 
936 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_zero_pages_cpl, ctx);
937 
938 	lba_count = _spdk_bs_byte_to_lba(bs, SPDK_BS_PAGE_SIZE);
939 
940 	/* This loop starts at 1 because the first page is special and handled
941 	 * below. The pages (except the first) are never written in place,
942 	 * so any pages in the clean list must be zeroed.
943 	 */
944 	for (i = 1; i < blob->clean.num_pages; i++) {
945 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->clean.pages[i]);
946 
947 		spdk_bs_batch_write_zeroes(batch, lba, lba_count);
948 	}
949 
950 	/* The first page will only be zeroed if this is a delete. */
951 	if (blob->active.num_pages == 0) {
952 		uint32_t page_num;
953 
954 		/* The first page in the metadata goes where the blobid indicates */
955 		page_num = _spdk_bs_blobid_to_page(blob->id);
956 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + page_num);
957 
958 		spdk_bs_batch_write_zeroes(batch, lba, lba_count);
959 	}
960 
961 	spdk_bs_batch_close(batch);
962 }
963 
964 static void
965 _spdk_blob_persist_write_page_root(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
966 {
967 	struct spdk_blob_persist_ctx	*ctx = cb_arg;
968 	struct spdk_blob_data		*blob = ctx->blob;
969 	struct spdk_blob_store		*bs = blob->bs;
970 	uint64_t			lba;
971 	uint32_t			lba_count;
972 	struct spdk_blob_md_page	*page;
973 
974 	if (blob->active.num_pages == 0) {
975 		/* Move on to the next step */
976 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
977 		return;
978 	}
979 
980 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
981 
982 	page = &ctx->pages[0];
983 	/* The first page in the metadata goes where the blobid indicates */
984 	lba = _spdk_bs_page_to_lba(bs, bs->md_start + _spdk_bs_blobid_to_page(blob->id));
985 
986 	spdk_bs_sequence_write(seq, page, lba, lba_count,
987 			       _spdk_blob_persist_zero_pages, ctx);
988 }
989 
990 static void
991 _spdk_blob_persist_write_page_chain(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
992 {
993 	struct spdk_blob_persist_ctx 	*ctx = cb_arg;
994 	struct spdk_blob_data 		*blob = ctx->blob;
995 	struct spdk_blob_store		*bs = blob->bs;
996 	uint64_t 			lba;
997 	uint32_t			lba_count;
998 	struct spdk_blob_md_page	*page;
999 	spdk_bs_batch_t			*batch;
1000 	size_t				i;
1001 
1002 	/* Clusters don't move around in blobs. The list shrinks or grows
1003 	 * at the end, but no changes ever occur in the middle of the list.
1004 	 */
1005 
1006 	lba_count = _spdk_bs_byte_to_lba(bs, sizeof(*page));
1007 
1008 	batch = spdk_bs_sequence_to_batch(seq, _spdk_blob_persist_write_page_root, ctx);
1009 
1010 	/* This starts at 1. The root page is not written until
1011 	 * all of the others are finished
1012 	 */
1013 	for (i = 1; i < blob->active.num_pages; i++) {
1014 		page = &ctx->pages[i];
1015 		assert(page->sequence_num == i);
1016 
1017 		lba = _spdk_bs_page_to_lba(bs, bs->md_start + blob->active.pages[i]);
1018 
1019 		spdk_bs_batch_write(batch, page, lba, lba_count);
1020 	}
1021 
1022 	spdk_bs_batch_close(batch);
1023 }
1024 
1025 static int
1026 _spdk_resize_blob(struct spdk_blob_data *blob, uint64_t sz)
1027 {
1028 	uint64_t	i;
1029 	uint64_t	*tmp;
1030 	uint64_t	lfc; /* lowest free cluster */
1031 	uint64_t	num_clusters;
1032 	struct spdk_blob_store *bs;
1033 
1034 	bs = blob->bs;
1035 
1036 	assert(blob->state != SPDK_BLOB_STATE_LOADING &&
1037 	       blob->state != SPDK_BLOB_STATE_SYNCING);
1038 
1039 	if (blob->active.num_clusters == sz) {
1040 		return 0;
1041 	}
1042 
1043 	if (blob->active.num_clusters < blob->active.cluster_array_size) {
1044 		/* If this blob was resized to be larger, then smaller, then
1045 		 * larger without syncing, then the cluster array already
1046 		 * contains spare assigned clusters we can use.
1047 		 */
1048 		num_clusters = spdk_min(blob->active.cluster_array_size,
1049 					sz);
1050 	} else {
1051 		num_clusters = blob->active.num_clusters;
1052 	}
1053 
1054 	/* Do two passes - one to verify that we can obtain enough clusters
1055 	 * and another to actually claim them.
1056 	 */
1057 
1058 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1059 		lfc = 0;
1060 		for (i = num_clusters; i < sz; i++) {
1061 			lfc = spdk_bit_array_find_first_clear(bs->used_clusters, lfc);
1062 			if (lfc >= bs->total_clusters) {
1063 				/* No more free clusters. Cannot satisfy the request */
1064 				return -ENOSPC;
1065 			}
1066 			lfc++;
1067 		}
1068 	}
1069 
1070 	if (sz > num_clusters) {
1071 		/* Expand the cluster array if necessary.
1072 		 * We only shrink the array when persisting.
1073 		 */
1074 		tmp = realloc(blob->active.clusters, sizeof(uint64_t) * sz);
1075 		if (sz > 0 && tmp == NULL) {
1076 			return -ENOMEM;
1077 		}
1078 		memset(tmp + blob->active.cluster_array_size, 0,
1079 		       sizeof(uint64_t) * (sz - blob->active.cluster_array_size));
1080 		blob->active.clusters = tmp;
1081 		blob->active.cluster_array_size = sz;
1082 	}
1083 
1084 	blob->state = SPDK_BLOB_STATE_DIRTY;
1085 
1086 	if (spdk_blob_is_thin_provisioned(blob) == false) {
1087 		lfc = 0;
1088 		for (i = num_clusters; i < sz; i++) {
1089 			_spdk_bs_allocate_cluster(blob, i, &lfc);
1090 			lfc++;
1091 		}
1092 	}
1093 
1094 	blob->active.num_clusters = sz;
1095 
1096 	return 0;
1097 }
1098 
1099 /* Write a blob to disk */
1100 static void
1101 _spdk_blob_persist(spdk_bs_sequence_t *seq, struct spdk_blob_data *blob,
1102 		   spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1103 {
1104 	struct spdk_blob_persist_ctx *ctx;
1105 	int rc;
1106 	uint64_t i;
1107 	uint32_t page_num;
1108 	struct spdk_blob_store *bs;
1109 
1110 	assert(blob != NULL);
1111 	assert(blob->state == SPDK_BLOB_STATE_CLEAN ||
1112 	       blob->state == SPDK_BLOB_STATE_DIRTY);
1113 
1114 	if (blob->state == SPDK_BLOB_STATE_CLEAN) {
1115 		cb_fn(seq, cb_arg, 0);
1116 		return;
1117 	}
1118 
1119 	bs = blob->bs;
1120 
1121 	ctx = calloc(1, sizeof(*ctx));
1122 	if (!ctx) {
1123 		cb_fn(seq, cb_arg, -ENOMEM);
1124 		return;
1125 	}
1126 	ctx->blob = blob;
1127 	ctx->cb_fn = cb_fn;
1128 	ctx->cb_arg = cb_arg;
1129 
1130 	blob->state = SPDK_BLOB_STATE_SYNCING;
1131 
1132 	if (blob->active.num_pages == 0) {
1133 		/* This is the signal that the blob should be deleted.
1134 		 * Immediately jump to the clean up routine. */
1135 		assert(blob->clean.num_pages > 0);
1136 		ctx->idx = blob->clean.num_pages - 1;
1137 		_spdk_blob_persist_zero_pages(seq, ctx, 0);
1138 		return;
1139 
1140 	}
1141 
1142 	/* Generate the new metadata */
1143 	rc = _spdk_blob_serialize(blob, &ctx->pages, &blob->active.num_pages);
1144 	if (rc < 0) {
1145 		free(ctx);
1146 		cb_fn(seq, cb_arg, rc);
1147 		return;
1148 	}
1149 
1150 	assert(blob->active.num_pages >= 1);
1151 
1152 	/* Resize the cache of page indices */
1153 	blob->active.pages = realloc(blob->active.pages,
1154 				     blob->active.num_pages * sizeof(*blob->active.pages));
1155 	if (!blob->active.pages) {
1156 		free(ctx);
1157 		cb_fn(seq, cb_arg, -ENOMEM);
1158 		return;
1159 	}
1160 
1161 	/* Assign this metadata to pages. This requires two passes -
1162 	 * one to verify that there are enough pages and a second
1163 	 * to actually claim them. */
1164 	page_num = 0;
1165 	/* Note that this loop starts at one. The first page location is fixed by the blobid. */
1166 	for (i = 1; i < blob->active.num_pages; i++) {
1167 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1168 		if (page_num >= spdk_bit_array_capacity(bs->used_md_pages)) {
1169 			spdk_dma_free(ctx->pages);
1170 			free(ctx);
1171 			blob->state = SPDK_BLOB_STATE_DIRTY;
1172 			cb_fn(seq, cb_arg, -ENOMEM);
1173 			return;
1174 		}
1175 		page_num++;
1176 	}
1177 
1178 	page_num = 0;
1179 	blob->active.pages[0] = _spdk_bs_blobid_to_page(blob->id);
1180 	for (i = 1; i < blob->active.num_pages; i++) {
1181 		page_num = spdk_bit_array_find_first_clear(bs->used_md_pages, page_num);
1182 		ctx->pages[i - 1].next = page_num;
1183 		/* Now that previous metadata page is complete, calculate the crc for it. */
1184 		ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1185 		blob->active.pages[i] = page_num;
1186 		spdk_bit_array_set(bs->used_md_pages, page_num);
1187 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Claiming page %u for blob %lu\n", page_num, blob->id);
1188 		page_num++;
1189 	}
1190 	ctx->pages[i - 1].crc = _spdk_blob_md_page_calc_crc(&ctx->pages[i - 1]);
1191 	/* Start writing the metadata from last page to first */
1192 	ctx->idx = blob->active.num_pages - 1;
1193 	_spdk_blob_persist_write_page_chain(seq, ctx, 0);
1194 }
1195 
1196 static void
1197 _spdk_blob_request_submit_op(struct spdk_blob *_blob, struct spdk_io_channel *_channel,
1198 			     void *payload, uint64_t offset, uint64_t length,
1199 			     spdk_blob_op_complete cb_fn, void *cb_arg, enum spdk_blob_op_type op_type)
1200 {
1201 	struct spdk_blob_data		*blob = __blob_to_data(_blob);
1202 	spdk_bs_batch_t			*batch;
1203 	struct spdk_bs_cpl		cpl;
1204 	uint64_t			lba;
1205 	uint32_t			lba_count;
1206 	uint8_t				*buf;
1207 	uint64_t			page;
1208 
1209 	assert(blob != NULL);
1210 
1211 	if (blob->data_ro && op_type != SPDK_BLOB_READ) {
1212 		cb_fn(cb_arg, -EPERM);
1213 		return;
1214 	}
1215 
1216 	if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
1217 		cb_fn(cb_arg, -EINVAL);
1218 		return;
1219 	}
1220 
1221 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1222 	cpl.u.blob_basic.cb_fn = cb_fn;
1223 	cpl.u.blob_basic.cb_arg = cb_arg;
1224 
1225 	batch = spdk_bs_batch_open(_channel, &cpl);
1226 	if (!batch) {
1227 		cb_fn(cb_arg, -ENOMEM);
1228 		return;
1229 	}
1230 
1231 	length = _spdk_bs_page_to_lba(blob->bs, length);
1232 	page = offset;
1233 	buf = payload;
1234 	while (length > 0) {
1235 		lba = _spdk_bs_blob_page_to_lba(blob, page);
1236 		lba_count = spdk_min(length,
1237 				     _spdk_bs_page_to_lba(blob->bs,
1238 						     _spdk_bs_num_pages_to_cluster_boundary(blob, page)));
1239 
1240 		switch (op_type) {
1241 		case SPDK_BLOB_READ:
1242 			spdk_bs_batch_read(batch, buf, lba, lba_count);
1243 			break;
1244 		case SPDK_BLOB_WRITE:
1245 			spdk_bs_batch_write(batch, buf, lba, lba_count);
1246 			break;
1247 		case SPDK_BLOB_UNMAP:
1248 			spdk_bs_batch_unmap(batch, lba, lba_count);
1249 			break;
1250 		case SPDK_BLOB_WRITE_ZEROES:
1251 			spdk_bs_batch_write_zeroes(batch, lba, lba_count);
1252 			break;
1253 		}
1254 
1255 		length -= lba_count;
1256 		page += _spdk_bs_lba_to_page(blob->bs, lba_count);
1257 		if (op_type == SPDK_BLOB_WRITE || op_type == SPDK_BLOB_READ) {
1258 			buf += _spdk_bs_lba_to_byte(blob->bs, lba_count);
1259 		}
1260 	}
1261 
1262 	spdk_bs_batch_close(batch);
1263 }
1264 
1265 struct rw_iov_ctx {
1266 	struct spdk_blob_data *blob;
1267 	bool read;
1268 	int iovcnt;
1269 	struct iovec *orig_iov;
1270 	uint64_t page_offset;
1271 	uint64_t pages_remaining;
1272 	uint64_t pages_done;
1273 	struct iovec iov[0];
1274 };
1275 
1276 static void
1277 _spdk_rw_iov_done(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1278 {
1279 	assert(cb_arg == NULL);
1280 	spdk_bs_sequence_finish(seq, bserrno);
1281 }
1282 
1283 static void
1284 _spdk_rw_iov_split_next(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1285 {
1286 	struct rw_iov_ctx *ctx = cb_arg;
1287 	struct iovec *iov, *orig_iov;
1288 	int iovcnt;
1289 	size_t orig_iovoff;
1290 	uint64_t lba;
1291 	uint64_t page_count, pages_to_boundary;
1292 	uint32_t lba_count;
1293 	uint64_t byte_count;
1294 
1295 	if (bserrno != 0 || ctx->pages_remaining == 0) {
1296 		free(ctx);
1297 		spdk_bs_sequence_finish(seq, bserrno);
1298 		return;
1299 	}
1300 
1301 	pages_to_boundary = _spdk_bs_num_pages_to_cluster_boundary(ctx->blob, ctx->page_offset);
1302 	page_count = spdk_min(ctx->pages_remaining, pages_to_boundary);
1303 	lba = _spdk_bs_blob_page_to_lba(ctx->blob, ctx->page_offset);
1304 	lba_count = _spdk_bs_page_to_lba(ctx->blob->bs, page_count);
1305 
1306 	/*
1307 	 * Get index and offset into the original iov array for our current position in the I/O sequence.
1308 	 *  byte_count will keep track of how many bytes remaining until orig_iov and orig_iovoff will
1309 	 *  point to the current position in the I/O sequence.
1310 	 */
1311 	byte_count = ctx->pages_done * sizeof(struct spdk_blob_md_page);
1312 	orig_iov = &ctx->orig_iov[0];
1313 	orig_iovoff = 0;
1314 	while (byte_count > 0) {
1315 		if (byte_count >= orig_iov->iov_len) {
1316 			byte_count -= orig_iov->iov_len;
1317 			orig_iov++;
1318 		} else {
1319 			orig_iovoff = byte_count;
1320 			byte_count = 0;
1321 		}
1322 	}
1323 
1324 	/*
1325 	 * Build an iov array for the next I/O in the sequence.  byte_count will keep track of how many
1326 	 *  bytes of this next I/O remain to be accounted for in the new iov array.
1327 	 */
1328 	byte_count = page_count * sizeof(struct spdk_blob_md_page);
1329 	iov = &ctx->iov[0];
1330 	iovcnt = 0;
1331 	while (byte_count > 0) {
1332 		iov->iov_len = spdk_min(byte_count, orig_iov->iov_len - orig_iovoff);
1333 		iov->iov_base = orig_iov->iov_base + orig_iovoff;
1334 		byte_count -= iov->iov_len;
1335 		orig_iovoff = 0;
1336 		orig_iov++;
1337 		iov++;
1338 		iovcnt++;
1339 	}
1340 
1341 	ctx->page_offset += page_count;
1342 	ctx->pages_done += page_count;
1343 	ctx->pages_remaining -= page_count;
1344 	iov = &ctx->iov[0];
1345 
1346 	if (ctx->read) {
1347 		spdk_bs_sequence_readv(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_split_next, ctx);
1348 	} else {
1349 		spdk_bs_sequence_writev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_split_next, ctx);
1350 	}
1351 }
1352 
1353 static void
1354 _spdk_blob_request_submit_rw_iov(struct spdk_blob *_blob, struct spdk_io_channel *_channel,
1355 				 struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
1356 				 spdk_blob_op_complete cb_fn, void *cb_arg, bool read)
1357 {
1358 	struct spdk_blob_data		*blob = __blob_to_data(_blob);
1359 	spdk_bs_sequence_t		*seq;
1360 	struct spdk_bs_cpl		cpl;
1361 
1362 	assert(blob != NULL);
1363 
1364 	if (!read && blob->data_ro) {
1365 		cb_fn(cb_arg, -EPERM);
1366 		return;
1367 	}
1368 
1369 	if (length == 0) {
1370 		cb_fn(cb_arg, 0);
1371 		return;
1372 	}
1373 
1374 	if (offset + length > blob->active.num_clusters * blob->bs->pages_per_cluster) {
1375 		cb_fn(cb_arg, -EINVAL);
1376 		return;
1377 	}
1378 
1379 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
1380 	cpl.u.blob_basic.cb_fn = cb_fn;
1381 	cpl.u.blob_basic.cb_arg = cb_arg;
1382 
1383 	/*
1384 	 * For now, we implement readv/writev using a sequence (instead of a batch) to account for having
1385 	 *  to split a request that spans a cluster boundary.  For I/O that do not span a cluster boundary,
1386 	 *  there will be no noticeable difference compared to using a batch.  For I/O that do span a cluster
1387 	 *  boundary, the target LBAs (after blob offset to LBA translation) may not be contiguous, so we need
1388 	 *  to allocate a separate iov array and split the I/O such that none of the resulting
1389 	 *  smaller I/O cross a cluster boundary.  These smaller I/O will be issued in sequence (not in parallel)
1390 	 *  but since this case happens very infrequently, any performance impact will be negligible.
1391 	 *
1392 	 * This could be optimized in the future to allocate a big enough iov array to account for all of the iovs
1393 	 *  for all of the smaller I/Os, pre-build all of the iov arrays for the smaller I/Os, then issue them
1394 	 *  in a batch.  That would also require creating an intermediate spdk_bs_cpl that would get called
1395 	 *  when the batch was completed, to allow for freeing the memory for the iov arrays.
1396 	 */
1397 	seq = spdk_bs_sequence_start(_channel, &cpl);
1398 	if (!seq) {
1399 		cb_fn(cb_arg, -ENOMEM);
1400 		return;
1401 	}
1402 
1403 	if (spdk_likely(length <= _spdk_bs_num_pages_to_cluster_boundary(blob, offset))) {
1404 		uint64_t lba = _spdk_bs_blob_page_to_lba(blob, offset);
1405 		uint32_t lba_count = _spdk_bs_page_to_lba(blob->bs, length);
1406 
1407 		if (read) {
1408 			spdk_bs_sequence_readv(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
1409 		} else {
1410 			spdk_bs_sequence_writev(seq, iov, iovcnt, lba, lba_count, _spdk_rw_iov_done, NULL);
1411 		}
1412 	} else {
1413 		struct rw_iov_ctx *ctx;
1414 
1415 		ctx = calloc(1, sizeof(struct rw_iov_ctx) + iovcnt * sizeof(struct iovec));
1416 		if (ctx == NULL) {
1417 			spdk_bs_sequence_finish(seq, -ENOMEM);
1418 			return;
1419 		}
1420 
1421 		ctx->blob = blob;
1422 		ctx->read = read;
1423 		ctx->orig_iov = iov;
1424 		ctx->iovcnt = iovcnt;
1425 		ctx->page_offset = offset;
1426 		ctx->pages_remaining = length;
1427 		ctx->pages_done = 0;
1428 
1429 		_spdk_rw_iov_split_next(seq, ctx, 0);
1430 	}
1431 }
1432 
1433 static struct spdk_blob_data *
1434 _spdk_blob_lookup(struct spdk_blob_store *bs, spdk_blob_id blobid)
1435 {
1436 	struct spdk_blob_data *blob;
1437 
1438 	TAILQ_FOREACH(blob, &bs->blobs, link) {
1439 		if (blob->id == blobid) {
1440 			return blob;
1441 		}
1442 	}
1443 
1444 	return NULL;
1445 }
1446 
1447 static int
1448 _spdk_bs_channel_create(void *io_device, void *ctx_buf)
1449 {
1450 	struct spdk_blob_store		*bs = io_device;
1451 	struct spdk_bs_channel		*channel = ctx_buf;
1452 	struct spdk_bs_dev		*dev;
1453 	uint32_t			max_ops = bs->max_channel_ops;
1454 	uint32_t			i;
1455 
1456 	dev = bs->dev;
1457 
1458 	channel->req_mem = calloc(max_ops, sizeof(struct spdk_bs_request_set));
1459 	if (!channel->req_mem) {
1460 		return -1;
1461 	}
1462 
1463 	TAILQ_INIT(&channel->reqs);
1464 
1465 	for (i = 0; i < max_ops; i++) {
1466 		TAILQ_INSERT_TAIL(&channel->reqs, &channel->req_mem[i], link);
1467 	}
1468 
1469 	channel->bs = bs;
1470 	channel->dev = dev;
1471 	channel->dev_channel = dev->create_channel(dev);
1472 
1473 	if (!channel->dev_channel) {
1474 		SPDK_ERRLOG("Failed to create device channel.\n");
1475 		free(channel->req_mem);
1476 		return -1;
1477 	}
1478 
1479 	return 0;
1480 }
1481 
1482 static void
1483 _spdk_bs_channel_destroy(void *io_device, void *ctx_buf)
1484 {
1485 	struct spdk_bs_channel *channel = ctx_buf;
1486 
1487 	free(channel->req_mem);
1488 	channel->dev->destroy_channel(channel->dev, channel->dev_channel);
1489 }
1490 
1491 static void
1492 _spdk_bs_dev_destroy(void *io_device)
1493 {
1494 	struct spdk_blob_store *bs = io_device;
1495 	struct spdk_blob_data	*blob, *blob_tmp;
1496 
1497 	bs->dev->destroy(bs->dev);
1498 
1499 	TAILQ_FOREACH_SAFE(blob, &bs->blobs, link, blob_tmp) {
1500 		TAILQ_REMOVE(&bs->blobs, blob, link);
1501 		_spdk_blob_free(blob);
1502 	}
1503 
1504 	spdk_bit_array_free(&bs->used_blobids);
1505 	spdk_bit_array_free(&bs->used_md_pages);
1506 	spdk_bit_array_free(&bs->used_clusters);
1507 	/*
1508 	 * If this function is called for any reason except a successful unload,
1509 	 * the unload_cpl type will be NONE and this will be a nop.
1510 	 */
1511 	spdk_bs_call_cpl(&bs->unload_cpl, bs->unload_err);
1512 
1513 	free(bs);
1514 }
1515 
1516 static void
1517 _spdk_bs_free(struct spdk_blob_store *bs)
1518 {
1519 	spdk_bs_unregister_md_thread(bs);
1520 	spdk_io_device_unregister(bs, _spdk_bs_dev_destroy);
1521 }
1522 
1523 void
1524 spdk_bs_opts_init(struct spdk_bs_opts *opts)
1525 {
1526 	opts->cluster_sz = SPDK_BLOB_OPTS_CLUSTER_SZ;
1527 	opts->num_md_pages = SPDK_BLOB_OPTS_NUM_MD_PAGES;
1528 	opts->max_md_ops = SPDK_BLOB_OPTS_MAX_MD_OPS;
1529 	opts->max_channel_ops = SPDK_BLOB_OPTS_DEFAULT_CHANNEL_OPS;
1530 	memset(&opts->bstype, 0, sizeof(opts->bstype));
1531 }
1532 
1533 static int
1534 _spdk_bs_opts_verify(struct spdk_bs_opts *opts)
1535 {
1536 	if (opts->cluster_sz == 0 || opts->num_md_pages == 0 || opts->max_md_ops == 0 ||
1537 	    opts->max_channel_ops == 0) {
1538 		SPDK_ERRLOG("Blobstore options cannot be set to 0\n");
1539 		return -1;
1540 	}
1541 
1542 	return 0;
1543 }
1544 
1545 static struct spdk_blob_store *
1546 _spdk_bs_alloc(struct spdk_bs_dev *dev, struct spdk_bs_opts *opts)
1547 {
1548 	struct spdk_blob_store	*bs;
1549 	uint64_t dev_size;
1550 	int rc;
1551 
1552 	dev_size = dev->blocklen * dev->blockcnt;
1553 	if (dev_size < opts->cluster_sz) {
1554 		/* Device size cannot be smaller than cluster size of blobstore */
1555 		SPDK_ERRLOG("Device size %" PRIu64 " is smaller than cluster size %d\n", dev_size,
1556 			    opts->cluster_sz);
1557 		return NULL;
1558 	}
1559 	if (opts->cluster_sz < SPDK_BS_PAGE_SIZE) {
1560 		/* Cluster size cannot be smaller than page size */
1561 		SPDK_ERRLOG("Cluster size %d is smaller than page size %d\n",
1562 			    opts->cluster_sz, SPDK_BS_PAGE_SIZE);
1563 		return NULL;
1564 	}
1565 	bs = calloc(1, sizeof(struct spdk_blob_store));
1566 	if (!bs) {
1567 		return NULL;
1568 	}
1569 
1570 	TAILQ_INIT(&bs->blobs);
1571 	bs->dev = dev;
1572 
1573 	/*
1574 	 * Do not use _spdk_bs_lba_to_cluster() here since blockcnt may not be an
1575 	 *  even multiple of the cluster size.
1576 	 */
1577 	bs->cluster_sz = opts->cluster_sz;
1578 	bs->total_clusters = dev->blockcnt / (bs->cluster_sz / dev->blocklen);
1579 	bs->pages_per_cluster = bs->cluster_sz / SPDK_BS_PAGE_SIZE;
1580 	bs->num_free_clusters = bs->total_clusters;
1581 	bs->used_clusters = spdk_bit_array_create(bs->total_clusters);
1582 	if (bs->used_clusters == NULL) {
1583 		free(bs);
1584 		return NULL;
1585 	}
1586 
1587 	bs->max_channel_ops = opts->max_channel_ops;
1588 	bs->super_blob = SPDK_BLOBID_INVALID;
1589 	memcpy(&bs->bstype, &opts->bstype, sizeof(opts->bstype));
1590 
1591 	/* The metadata is assumed to be at least 1 page */
1592 	bs->used_md_pages = spdk_bit_array_create(1);
1593 	bs->used_blobids = spdk_bit_array_create(0);
1594 
1595 	spdk_io_device_register(bs, _spdk_bs_channel_create, _spdk_bs_channel_destroy,
1596 				sizeof(struct spdk_bs_channel));
1597 	rc = spdk_bs_register_md_thread(bs);
1598 	if (rc == -1) {
1599 		spdk_io_device_unregister(bs, NULL);
1600 		spdk_bit_array_free(&bs->used_blobids);
1601 		spdk_bit_array_free(&bs->used_md_pages);
1602 		spdk_bit_array_free(&bs->used_clusters);
1603 		free(bs);
1604 		return NULL;
1605 	}
1606 
1607 	return bs;
1608 }
1609 
1610 /* START spdk_bs_load, spdk_bs_load_ctx will used for both load and unload. */
1611 
1612 struct spdk_bs_load_ctx {
1613 	struct spdk_blob_store		*bs;
1614 	struct spdk_bs_super_block	*super;
1615 
1616 	struct spdk_bs_md_mask		*mask;
1617 	bool				in_page_chain;
1618 	uint32_t			page_index;
1619 	uint32_t			cur_page;
1620 	struct spdk_blob_md_page	*page;
1621 	bool				is_load;
1622 };
1623 
1624 static void
1625 _spdk_bs_load_ctx_fail(spdk_bs_sequence_t *seq, struct spdk_bs_load_ctx *ctx, int bserrno)
1626 {
1627 	assert(bserrno != 0);
1628 
1629 	spdk_dma_free(ctx->super);
1630 	/*
1631 	 * Only free the blobstore when a load fails.  If an unload fails (for some reason)
1632 	 *  we want to keep the blobstore in case the caller wants to try again.
1633 	 */
1634 	if (ctx->is_load) {
1635 		_spdk_bs_free(ctx->bs);
1636 	}
1637 	free(ctx);
1638 	spdk_bs_sequence_finish(seq, bserrno);
1639 }
1640 
1641 static void
1642 _spdk_bs_set_mask(struct spdk_bit_array *array, struct spdk_bs_md_mask *mask)
1643 {
1644 	uint32_t i = 0;
1645 
1646 	while (true) {
1647 		i = spdk_bit_array_find_first_set(array, i);
1648 		if (i >= mask->length) {
1649 			break;
1650 		}
1651 		mask->mask[i / 8] |= 1U << (i % 8);
1652 		i++;
1653 	}
1654 }
1655 
1656 static void
1657 _spdk_bs_write_super(spdk_bs_sequence_t *seq, struct spdk_blob_store *bs,
1658 		     struct spdk_bs_super_block *super, spdk_bs_sequence_cpl cb_fn, void *cb_arg)
1659 {
1660 	/* Update the values in the super block */
1661 	super->super_blob = bs->super_blob;
1662 	memcpy(&super->bstype, &bs->bstype, sizeof(bs->bstype));
1663 	super->crc = _spdk_blob_md_page_calc_crc(super);
1664 	spdk_bs_sequence_write(seq, super, _spdk_bs_page_to_lba(bs, 0),
1665 			       _spdk_bs_byte_to_lba(bs, sizeof(*super)),
1666 			       cb_fn, cb_arg);
1667 }
1668 
1669 static void
1670 _spdk_bs_write_used_clusters(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
1671 {
1672 	struct spdk_bs_load_ctx	*ctx = arg;
1673 	uint64_t	mask_size, lba, lba_count;
1674 
1675 	/* Write out the used clusters mask */
1676 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
1677 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
1678 	if (!ctx->mask) {
1679 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
1680 		return;
1681 	}
1682 
1683 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_CLUSTERS;
1684 	ctx->mask->length = ctx->bs->total_clusters;
1685 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_clusters));
1686 
1687 	_spdk_bs_set_mask(ctx->bs->used_clusters, ctx->mask);
1688 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
1689 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
1690 	spdk_bs_sequence_write(seq, ctx->mask, lba, lba_count, cb_fn, arg);
1691 }
1692 
1693 static void
1694 _spdk_bs_write_used_md(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
1695 {
1696 	struct spdk_bs_load_ctx	*ctx = arg;
1697 	uint64_t	mask_size, lba, lba_count;
1698 
1699 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
1700 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
1701 	if (!ctx->mask) {
1702 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
1703 		return;
1704 	}
1705 
1706 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_PAGES;
1707 	ctx->mask->length = ctx->super->md_len;
1708 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_md_pages));
1709 
1710 	_spdk_bs_set_mask(ctx->bs->used_md_pages, ctx->mask);
1711 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
1712 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
1713 	spdk_bs_sequence_write(seq, ctx->mask, lba, lba_count, cb_fn, arg);
1714 }
1715 
1716 static void
1717 _spdk_bs_write_used_blobids(spdk_bs_sequence_t *seq, void *arg, spdk_bs_sequence_cpl cb_fn)
1718 {
1719 	struct spdk_bs_load_ctx	*ctx = arg;
1720 	uint64_t	mask_size, lba, lba_count;
1721 
1722 	if (ctx->super->used_blobid_mask_len == 0) {
1723 		/*
1724 		 * This is a pre-v3 on-disk format where the blobid mask does not get
1725 		 *  written to disk.
1726 		 */
1727 		cb_fn(seq, arg, 0);
1728 		return;
1729 	}
1730 
1731 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
1732 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
1733 	if (!ctx->mask) {
1734 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
1735 		return;
1736 	}
1737 
1738 	ctx->mask->type = SPDK_MD_MASK_TYPE_USED_BLOBIDS;
1739 	ctx->mask->length = ctx->super->md_len;
1740 	assert(ctx->mask->length == spdk_bit_array_capacity(ctx->bs->used_blobids));
1741 
1742 	_spdk_bs_set_mask(ctx->bs->used_blobids, ctx->mask);
1743 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
1744 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
1745 	spdk_bs_sequence_write(seq, ctx->mask, lba, lba_count, cb_fn, arg);
1746 }
1747 
1748 static void
1749 _spdk_bs_load_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1750 {
1751 	struct spdk_bs_load_ctx *ctx = cb_arg;
1752 	uint32_t i, j;
1753 	int rc;
1754 
1755 	/* The type must be correct */
1756 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_BLOBIDS);
1757 
1758 	/* The length of the mask (in bits) must not be greater than
1759 	 * the length of the buffer (converted to bits) */
1760 	assert(ctx->mask->length <= (ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE * 8));
1761 
1762 	/* The length of the mask must be exactly equal to the size
1763 	 * (in pages) of the metadata region */
1764 	assert(ctx->mask->length == ctx->super->md_len);
1765 
1766 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->mask->length);
1767 	if (rc < 0) {
1768 		spdk_dma_free(ctx->mask);
1769 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
1770 		return;
1771 	}
1772 
1773 	for (i = 0; i < ctx->mask->length / 8; i++) {
1774 		uint8_t segment = ctx->mask->mask[i];
1775 		for (j = 0; segment; j++) {
1776 			if (segment & 1U) {
1777 				spdk_bit_array_set(ctx->bs->used_blobids, (i * 8) + j);
1778 			}
1779 			segment >>= 1U;
1780 		}
1781 	}
1782 
1783 	spdk_dma_free(ctx->super);
1784 	spdk_dma_free(ctx->mask);
1785 	free(ctx);
1786 
1787 	spdk_bs_sequence_finish(seq, bserrno);
1788 }
1789 
1790 static void
1791 _spdk_bs_load_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1792 {
1793 	struct spdk_bs_load_ctx *ctx = cb_arg;
1794 	uint64_t		lba, lba_count, mask_size;
1795 	uint32_t		i, j;
1796 	int			rc;
1797 
1798 	/* The type must be correct */
1799 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
1800 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
1801 	assert(ctx->mask->length <= (ctx->super->used_cluster_mask_len * sizeof(
1802 					     struct spdk_blob_md_page) * 8));
1803 	/* The length of the mask must be exactly equal to the total number of clusters */
1804 	assert(ctx->mask->length == ctx->bs->total_clusters);
1805 
1806 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
1807 	if (rc < 0) {
1808 		spdk_dma_free(ctx->mask);
1809 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
1810 		return;
1811 	}
1812 
1813 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
1814 	for (i = 0; i < ctx->mask->length / 8; i++) {
1815 		uint8_t segment = ctx->mask->mask[i];
1816 		for (j = 0; segment && (j < 8); j++) {
1817 			if (segment & 1U) {
1818 				spdk_bit_array_set(ctx->bs->used_clusters, (i * 8) + j);
1819 				assert(ctx->bs->num_free_clusters > 0);
1820 				ctx->bs->num_free_clusters--;
1821 			}
1822 			segment >>= 1U;
1823 		}
1824 	}
1825 
1826 	spdk_dma_free(ctx->mask);
1827 
1828 	/* Read the used blobids mask */
1829 	mask_size = ctx->super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE;
1830 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
1831 	if (!ctx->mask) {
1832 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
1833 		return;
1834 	}
1835 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_start);
1836 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_blobid_mask_len);
1837 	spdk_bs_sequence_read(seq, ctx->mask, lba, lba_count,
1838 			      _spdk_bs_load_used_blobids_cpl, ctx);
1839 }
1840 
1841 static void
1842 _spdk_bs_load_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1843 {
1844 	struct spdk_bs_load_ctx *ctx = cb_arg;
1845 	uint64_t		lba, lba_count, mask_size;
1846 	uint32_t		i, j;
1847 	int			rc;
1848 
1849 	/* The type must be correct */
1850 	assert(ctx->mask->type == SPDK_MD_MASK_TYPE_USED_PAGES);
1851 	/* The length of the mask (in bits) must not be greater than the length of the buffer (converted to bits) */
1852 	assert(ctx->mask->length <= (ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE *
1853 				     8));
1854 	/* The length of the mask must be exactly equal to the size (in pages) of the metadata region */
1855 	assert(ctx->mask->length == ctx->super->md_len);
1856 
1857 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->mask->length);
1858 	if (rc < 0) {
1859 		spdk_dma_free(ctx->mask);
1860 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
1861 		return;
1862 	}
1863 
1864 	for (i = 0; i < ctx->mask->length / 8; i++) {
1865 		uint8_t segment = ctx->mask->mask[i];
1866 		for (j = 0; segment && (j < 8); j++) {
1867 			if (segment & 1U) {
1868 				spdk_bit_array_set(ctx->bs->used_md_pages, (i * 8) + j);
1869 			}
1870 			segment >>= 1U;
1871 		}
1872 	}
1873 	spdk_dma_free(ctx->mask);
1874 
1875 	/* Read the used clusters mask */
1876 	mask_size = ctx->super->used_cluster_mask_len * SPDK_BS_PAGE_SIZE;
1877 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
1878 	if (!ctx->mask) {
1879 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
1880 		return;
1881 	}
1882 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_start);
1883 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_cluster_mask_len);
1884 	spdk_bs_sequence_read(seq, ctx->mask, lba, lba_count,
1885 			      _spdk_bs_load_used_clusters_cpl, ctx);
1886 }
1887 
1888 static void
1889 _spdk_bs_load_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1890 {
1891 	struct spdk_bs_load_ctx	*ctx = cb_arg;
1892 	uint64_t lba, lba_count, mask_size;
1893 
1894 	/* Read the used pages mask */
1895 	mask_size = ctx->super->used_page_mask_len * SPDK_BS_PAGE_SIZE;
1896 	ctx->mask = spdk_dma_zmalloc(mask_size, 0x1000, NULL);
1897 	if (!ctx->mask) {
1898 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
1899 		return;
1900 	}
1901 
1902 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_start);
1903 	lba_count = _spdk_bs_page_to_lba(ctx->bs, ctx->super->used_page_mask_len);
1904 	spdk_bs_sequence_read(seq, ctx->mask, lba, lba_count,
1905 			      _spdk_bs_load_used_pages_cpl, ctx);
1906 }
1907 
1908 static int
1909 _spdk_bs_load_replay_md_parse_page(const struct spdk_blob_md_page *page, struct spdk_blob_store *bs)
1910 {
1911 	struct spdk_blob_md_descriptor *desc;
1912 	size_t	cur_desc = 0;
1913 
1914 	desc = (struct spdk_blob_md_descriptor *)page->descriptors;
1915 	while (cur_desc < sizeof(page->descriptors)) {
1916 		if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_PADDING) {
1917 			if (desc->length == 0) {
1918 				/* If padding and length are 0, this terminates the page */
1919 				break;
1920 			}
1921 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_EXTENT) {
1922 			struct spdk_blob_md_descriptor_extent	*desc_extent;
1923 			unsigned int				i, j;
1924 			unsigned int				cluster_count = 0;
1925 
1926 			desc_extent = (struct spdk_blob_md_descriptor_extent *)desc;
1927 
1928 			for (i = 0; i < desc_extent->length / sizeof(desc_extent->extents[0]); i++) {
1929 				for (j = 0; j < desc_extent->extents[i].length; j++) {
1930 					spdk_bit_array_set(bs->used_clusters, desc_extent->extents[i].cluster_idx + j);
1931 					if (bs->num_free_clusters == 0) {
1932 						return -1;
1933 					}
1934 					bs->num_free_clusters--;
1935 					cluster_count++;
1936 				}
1937 			}
1938 			if (cluster_count == 0) {
1939 				return -1;
1940 			}
1941 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_XATTR) {
1942 			/* Skip this item */
1943 		} else if (desc->type == SPDK_MD_DESCRIPTOR_TYPE_FLAGS) {
1944 			/* Skip this item */
1945 		} else {
1946 			/* Error */
1947 			return -1;
1948 		}
1949 		/* Advance to the next descriptor */
1950 		cur_desc += sizeof(*desc) + desc->length;
1951 		if (cur_desc + sizeof(*desc) > sizeof(page->descriptors)) {
1952 			break;
1953 		}
1954 		desc = (struct spdk_blob_md_descriptor *)((uintptr_t)page->descriptors + cur_desc);
1955 	}
1956 	return 0;
1957 }
1958 
1959 static bool _spdk_bs_load_cur_md_page_valid(struct spdk_bs_load_ctx *ctx)
1960 {
1961 	uint32_t crc;
1962 
1963 	crc = _spdk_blob_md_page_calc_crc(ctx->page);
1964 	if (crc != ctx->page->crc) {
1965 		return false;
1966 	}
1967 
1968 	if (_spdk_bs_page_to_blobid(ctx->cur_page) != ctx->page->id) {
1969 		return false;
1970 	}
1971 	return true;
1972 }
1973 
1974 static void
1975 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg);
1976 
1977 static void
1978 _spdk_bs_load_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1979 {
1980 	struct spdk_bs_load_ctx	*ctx = cb_arg;
1981 
1982 	spdk_dma_free(ctx->mask);
1983 	spdk_dma_free(ctx->super);
1984 	spdk_bs_sequence_finish(seq, bserrno);
1985 	free(ctx);
1986 }
1987 
1988 static void
1989 _spdk_bs_load_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
1990 {
1991 	struct spdk_bs_load_ctx	*ctx = cb_arg;
1992 
1993 	spdk_dma_free(ctx->mask);
1994 	ctx->mask = NULL;
1995 
1996 	_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_load_write_used_clusters_cpl);
1997 }
1998 
1999 static void
2000 _spdk_bs_load_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2001 {
2002 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2003 
2004 	spdk_dma_free(ctx->mask);
2005 	ctx->mask = NULL;
2006 
2007 	_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_load_write_used_blobids_cpl);
2008 }
2009 
2010 static void
2011 _spdk_bs_load_write_used_md(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2012 {
2013 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_load_write_used_pages_cpl);
2014 }
2015 
2016 static void
2017 _spdk_bs_load_replay_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2018 {
2019 	struct spdk_bs_load_ctx *ctx = cb_arg;
2020 	uint64_t num_md_clusters;
2021 	uint64_t i;
2022 	uint32_t page_num;
2023 
2024 	if (bserrno != 0) {
2025 		_spdk_bs_load_ctx_fail(seq, ctx, bserrno);
2026 		return;
2027 	}
2028 
2029 	page_num = ctx->cur_page;
2030 	if (_spdk_bs_load_cur_md_page_valid(ctx) == true) {
2031 		if (ctx->page->sequence_num == 0 || ctx->in_page_chain == true) {
2032 			spdk_bit_array_set(ctx->bs->used_md_pages, page_num);
2033 			if (ctx->page->sequence_num == 0) {
2034 				spdk_bit_array_set(ctx->bs->used_blobids, page_num);
2035 			}
2036 			if (_spdk_bs_load_replay_md_parse_page(ctx->page, ctx->bs)) {
2037 				_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2038 				return;
2039 			}
2040 			if (ctx->page->next != SPDK_INVALID_MD_PAGE) {
2041 				ctx->in_page_chain = true;
2042 				ctx->cur_page = ctx->page->next;
2043 				_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2044 				return;
2045 			}
2046 		}
2047 	}
2048 
2049 	ctx->in_page_chain = false;
2050 
2051 	do {
2052 		ctx->page_index++;
2053 	} while (spdk_bit_array_get(ctx->bs->used_md_pages, ctx->page_index) == true);
2054 
2055 	if (ctx->page_index < ctx->super->md_len) {
2056 		ctx->cur_page = ctx->page_index;
2057 		_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2058 	} else {
2059 		/* Claim all of the clusters used by the metadata */
2060 		num_md_clusters = divide_round_up(ctx->super->md_len, ctx->bs->pages_per_cluster);
2061 		for (i = 0; i < num_md_clusters; i++) {
2062 			_spdk_bs_claim_cluster(ctx->bs, i);
2063 		}
2064 		spdk_dma_free(ctx->page);
2065 		_spdk_bs_load_write_used_md(seq, ctx, bserrno);
2066 	}
2067 }
2068 
2069 static void
2070 _spdk_bs_load_replay_cur_md_page(spdk_bs_sequence_t *seq, void *cb_arg)
2071 {
2072 	struct spdk_bs_load_ctx *ctx = cb_arg;
2073 	uint64_t lba;
2074 
2075 	assert(ctx->cur_page < ctx->super->md_len);
2076 	lba = _spdk_bs_page_to_lba(ctx->bs, ctx->super->md_start + ctx->cur_page);
2077 	spdk_bs_sequence_read(seq, ctx->page, lba,
2078 			      _spdk_bs_byte_to_lba(ctx->bs, SPDK_BS_PAGE_SIZE),
2079 			      _spdk_bs_load_replay_md_cpl, ctx);
2080 }
2081 
2082 static void
2083 _spdk_bs_load_replay_md(spdk_bs_sequence_t *seq, void *cb_arg)
2084 {
2085 	struct spdk_bs_load_ctx *ctx = cb_arg;
2086 
2087 	ctx->page_index = 0;
2088 	ctx->cur_page = 0;
2089 	ctx->page = spdk_dma_zmalloc(SPDK_BS_PAGE_SIZE,
2090 				     SPDK_BS_PAGE_SIZE,
2091 				     NULL);
2092 	if (!ctx->page) {
2093 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2094 		return;
2095 	}
2096 	_spdk_bs_load_replay_cur_md_page(seq, cb_arg);
2097 }
2098 
2099 static void
2100 _spdk_bs_recover(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2101 {
2102 	struct spdk_bs_load_ctx *ctx = cb_arg;
2103 	int 		rc;
2104 
2105 	if (bserrno != 0) {
2106 		_spdk_bs_load_ctx_fail(seq, ctx, -EIO);
2107 		return;
2108 	}
2109 
2110 	rc = spdk_bit_array_resize(&ctx->bs->used_md_pages, ctx->super->md_len);
2111 	if (rc < 0) {
2112 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2113 		return;
2114 	}
2115 
2116 	rc = spdk_bit_array_resize(&ctx->bs->used_blobids, ctx->super->md_len);
2117 	if (rc < 0) {
2118 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2119 		return;
2120 	}
2121 
2122 	rc = spdk_bit_array_resize(&ctx->bs->used_clusters, ctx->bs->total_clusters);
2123 	if (rc < 0) {
2124 		_spdk_bs_load_ctx_fail(seq, ctx, -ENOMEM);
2125 		return;
2126 	}
2127 
2128 	ctx->bs->num_free_clusters = ctx->bs->total_clusters;
2129 	_spdk_bs_load_replay_md(seq, cb_arg);
2130 }
2131 
2132 static void
2133 _spdk_bs_load_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2134 {
2135 	struct spdk_bs_load_ctx *ctx = cb_arg;
2136 	uint32_t	crc;
2137 	static const char zeros[SPDK_BLOBSTORE_TYPE_LENGTH];
2138 
2139 	if (ctx->super->version > SPDK_BS_VERSION ||
2140 	    ctx->super->version < SPDK_BS_INITIAL_VERSION) {
2141 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2142 		return;
2143 	}
2144 
2145 	if (memcmp(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
2146 		   sizeof(ctx->super->signature)) != 0) {
2147 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2148 		return;
2149 	}
2150 
2151 	crc = _spdk_blob_md_page_calc_crc(ctx->super);
2152 	if (crc != ctx->super->crc) {
2153 		_spdk_bs_load_ctx_fail(seq, ctx, -EILSEQ);
2154 		return;
2155 	}
2156 
2157 	if (memcmp(&ctx->bs->bstype, &ctx->super->bstype, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
2158 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype matched - loading blobstore\n");
2159 	} else if (memcmp(&ctx->bs->bstype, zeros, SPDK_BLOBSTORE_TYPE_LENGTH) == 0) {
2160 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Bstype wildcard used - loading blobstore regardless bstype\n");
2161 	} else {
2162 		SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Unexpected bstype\n");
2163 		SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Expected:", ctx->bs->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
2164 		SPDK_TRACEDUMP(SPDK_LOG_BLOB, "Found:", ctx->super->bstype.bstype, SPDK_BLOBSTORE_TYPE_LENGTH);
2165 		_spdk_bs_load_ctx_fail(seq, ctx, -ENXIO);
2166 		return;
2167 	}
2168 
2169 	/* Parse the super block */
2170 	ctx->bs->cluster_sz = ctx->super->cluster_size;
2171 	ctx->bs->total_clusters = ctx->bs->dev->blockcnt / (ctx->bs->cluster_sz / ctx->bs->dev->blocklen);
2172 	ctx->bs->pages_per_cluster = ctx->bs->cluster_sz / SPDK_BS_PAGE_SIZE;
2173 	ctx->bs->md_start = ctx->super->md_start;
2174 	ctx->bs->md_len = ctx->super->md_len;
2175 	ctx->bs->total_data_clusters = ctx->bs->total_clusters - divide_round_up(
2176 					       ctx->bs->md_start + ctx->bs->md_len, ctx->bs->pages_per_cluster);
2177 	ctx->bs->super_blob = ctx->super->super_blob;
2178 	memcpy(&ctx->bs->bstype, &ctx->super->bstype, sizeof(ctx->super->bstype));
2179 
2180 	if (ctx->super->clean == 0) {
2181 		_spdk_bs_recover(seq, ctx, 0);
2182 	} else if (ctx->super->used_blobid_mask_len == 0) {
2183 		/*
2184 		 * Metadata is clean, but this is an old metadata format without
2185 		 *  a blobid mask.  Clear the clean bit and then build the masks
2186 		 *  using _spdk_bs_recover.
2187 		 */
2188 		ctx->super->clean = 0;
2189 		_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_recover, ctx);
2190 	} else {
2191 		ctx->super->clean = 0;
2192 		_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_load_write_super_cpl, ctx);
2193 	}
2194 }
2195 
2196 void
2197 spdk_bs_load(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
2198 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
2199 {
2200 	struct spdk_blob_store	*bs;
2201 	struct spdk_bs_cpl	cpl;
2202 	spdk_bs_sequence_t	*seq;
2203 	struct spdk_bs_load_ctx *ctx;
2204 	struct spdk_bs_opts	opts = {};
2205 
2206 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Loading blobstore from dev %p\n", dev);
2207 
2208 	if (o) {
2209 		opts = *o;
2210 	} else {
2211 		spdk_bs_opts_init(&opts);
2212 	}
2213 
2214 	if (opts.max_md_ops == 0 || opts.max_channel_ops == 0) {
2215 		cb_fn(cb_arg, NULL, -EINVAL);
2216 		return;
2217 	}
2218 
2219 	bs = _spdk_bs_alloc(dev, &opts);
2220 	if (!bs) {
2221 		cb_fn(cb_arg, NULL, -ENOMEM);
2222 		return;
2223 	}
2224 
2225 	ctx = calloc(1, sizeof(*ctx));
2226 	if (!ctx) {
2227 		_spdk_bs_free(bs);
2228 		cb_fn(cb_arg, NULL, -ENOMEM);
2229 		return;
2230 	}
2231 
2232 	ctx->bs = bs;
2233 	ctx->is_load = true;
2234 
2235 	/* Allocate memory for the super block */
2236 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
2237 	if (!ctx->super) {
2238 		free(ctx);
2239 		_spdk_bs_free(bs);
2240 		return;
2241 	}
2242 
2243 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
2244 	cpl.u.bs_handle.cb_fn = cb_fn;
2245 	cpl.u.bs_handle.cb_arg = cb_arg;
2246 	cpl.u.bs_handle.bs = bs;
2247 
2248 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
2249 	if (!seq) {
2250 		spdk_dma_free(ctx->super);
2251 		free(ctx);
2252 		_spdk_bs_free(bs);
2253 		cb_fn(cb_arg, NULL, -ENOMEM);
2254 		return;
2255 	}
2256 
2257 	/* Read the super block */
2258 	spdk_bs_sequence_read(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
2259 			      _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
2260 			      _spdk_bs_load_super_cpl, ctx);
2261 }
2262 
2263 /* END spdk_bs_load */
2264 
2265 /* START spdk_bs_init */
2266 
2267 struct spdk_bs_init_ctx {
2268 	struct spdk_blob_store		*bs;
2269 	struct spdk_bs_super_block	*super;
2270 };
2271 
2272 static void
2273 _spdk_bs_init_persist_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2274 {
2275 	struct spdk_bs_init_ctx *ctx = cb_arg;
2276 
2277 	spdk_dma_free(ctx->super);
2278 	free(ctx);
2279 
2280 	spdk_bs_sequence_finish(seq, bserrno);
2281 }
2282 
2283 static void
2284 _spdk_bs_init_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2285 {
2286 	struct spdk_bs_init_ctx *ctx = cb_arg;
2287 
2288 	/* Write super block */
2289 	spdk_bs_sequence_write(seq, ctx->super, _spdk_bs_page_to_lba(ctx->bs, 0),
2290 			       _spdk_bs_byte_to_lba(ctx->bs, sizeof(*ctx->super)),
2291 			       _spdk_bs_init_persist_super_cpl, ctx);
2292 }
2293 
2294 void
2295 spdk_bs_init(struct spdk_bs_dev *dev, struct spdk_bs_opts *o,
2296 	     spdk_bs_op_with_handle_complete cb_fn, void *cb_arg)
2297 {
2298 	struct spdk_bs_init_ctx *ctx;
2299 	struct spdk_blob_store	*bs;
2300 	struct spdk_bs_cpl	cpl;
2301 	spdk_bs_sequence_t	*seq;
2302 	spdk_bs_batch_t		*batch;
2303 	uint64_t		num_md_lba;
2304 	uint64_t		num_md_pages;
2305 	uint64_t		num_md_clusters;
2306 	uint32_t		i;
2307 	struct spdk_bs_opts	opts = {};
2308 	int			rc;
2309 
2310 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Initializing blobstore on dev %p\n", dev);
2311 
2312 	if ((SPDK_BS_PAGE_SIZE % dev->blocklen) != 0) {
2313 		SPDK_ERRLOG("unsupported dev block length of %d\n",
2314 			    dev->blocklen);
2315 		dev->destroy(dev);
2316 		cb_fn(cb_arg, NULL, -EINVAL);
2317 		return;
2318 	}
2319 
2320 	if (o) {
2321 		opts = *o;
2322 	} else {
2323 		spdk_bs_opts_init(&opts);
2324 	}
2325 
2326 	if (_spdk_bs_opts_verify(&opts) != 0) {
2327 		dev->destroy(dev);
2328 		cb_fn(cb_arg, NULL, -EINVAL);
2329 		return;
2330 	}
2331 
2332 	bs = _spdk_bs_alloc(dev, &opts);
2333 	if (!bs) {
2334 		dev->destroy(dev);
2335 		cb_fn(cb_arg, NULL, -ENOMEM);
2336 		return;
2337 	}
2338 
2339 	if (opts.num_md_pages == SPDK_BLOB_OPTS_NUM_MD_PAGES) {
2340 		/* By default, allocate 1 page per cluster.
2341 		 * Technically, this over-allocates metadata
2342 		 * because more metadata will reduce the number
2343 		 * of usable clusters. This can be addressed with
2344 		 * more complex math in the future.
2345 		 */
2346 		bs->md_len = bs->total_clusters;
2347 	} else {
2348 		bs->md_len = opts.num_md_pages;
2349 	}
2350 
2351 	rc = spdk_bit_array_resize(&bs->used_md_pages, bs->md_len);
2352 	if (rc < 0) {
2353 		_spdk_bs_free(bs);
2354 		cb_fn(cb_arg, NULL, -ENOMEM);
2355 		return;
2356 	}
2357 
2358 	rc = spdk_bit_array_resize(&bs->used_blobids, bs->md_len);
2359 	if (rc < 0) {
2360 		_spdk_bs_free(bs);
2361 		cb_fn(cb_arg, NULL, -ENOMEM);
2362 		return;
2363 	}
2364 
2365 	ctx = calloc(1, sizeof(*ctx));
2366 	if (!ctx) {
2367 		_spdk_bs_free(bs);
2368 		cb_fn(cb_arg, NULL, -ENOMEM);
2369 		return;
2370 	}
2371 
2372 	ctx->bs = bs;
2373 
2374 	/* Allocate memory for the super block */
2375 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
2376 	if (!ctx->super) {
2377 		free(ctx);
2378 		_spdk_bs_free(bs);
2379 		return;
2380 	}
2381 	memcpy(ctx->super->signature, SPDK_BS_SUPER_BLOCK_SIG,
2382 	       sizeof(ctx->super->signature));
2383 	ctx->super->version = SPDK_BS_VERSION;
2384 	ctx->super->length = sizeof(*ctx->super);
2385 	ctx->super->super_blob = bs->super_blob;
2386 	ctx->super->clean = 0;
2387 	ctx->super->cluster_size = bs->cluster_sz;
2388 	memcpy(&ctx->super->bstype, &bs->bstype, sizeof(bs->bstype));
2389 
2390 	/* Calculate how many pages the metadata consumes at the front
2391 	 * of the disk.
2392 	 */
2393 
2394 	/* The super block uses 1 page */
2395 	num_md_pages = 1;
2396 
2397 	/* The used_md_pages mask requires 1 bit per metadata page, rounded
2398 	 * up to the nearest page, plus a header.
2399 	 */
2400 	ctx->super->used_page_mask_start = num_md_pages;
2401 	ctx->super->used_page_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
2402 					 divide_round_up(bs->md_len, 8),
2403 					 SPDK_BS_PAGE_SIZE);
2404 	num_md_pages += ctx->super->used_page_mask_len;
2405 
2406 	/* The used_clusters mask requires 1 bit per cluster, rounded
2407 	 * up to the nearest page, plus a header.
2408 	 */
2409 	ctx->super->used_cluster_mask_start = num_md_pages;
2410 	ctx->super->used_cluster_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
2411 					    divide_round_up(bs->total_clusters, 8),
2412 					    SPDK_BS_PAGE_SIZE);
2413 	num_md_pages += ctx->super->used_cluster_mask_len;
2414 
2415 	/* The used_blobids mask requires 1 bit per metadata page, rounded
2416 	 * up to the nearest page, plus a header.
2417 	 */
2418 	ctx->super->used_blobid_mask_start = num_md_pages;
2419 	ctx->super->used_blobid_mask_len = divide_round_up(sizeof(struct spdk_bs_md_mask) +
2420 					   divide_round_up(bs->md_len, 8),
2421 					   SPDK_BS_PAGE_SIZE);
2422 	num_md_pages += ctx->super->used_blobid_mask_len;
2423 
2424 	/* The metadata region size was chosen above */
2425 	ctx->super->md_start = bs->md_start = num_md_pages;
2426 	ctx->super->md_len = bs->md_len;
2427 	num_md_pages += bs->md_len;
2428 
2429 	num_md_lba = _spdk_bs_page_to_lba(bs, num_md_pages);
2430 
2431 	ctx->super->crc = _spdk_blob_md_page_calc_crc(ctx->super);
2432 
2433 	num_md_clusters = divide_round_up(num_md_pages, bs->pages_per_cluster);
2434 	if (num_md_clusters > bs->total_clusters) {
2435 		SPDK_ERRLOG("Blobstore metadata cannot use more clusters than is available, "
2436 			    "please decrease number of pages reserved for metadata "
2437 			    "or increase cluster size.\n");
2438 		spdk_dma_free(ctx->super);
2439 		free(ctx);
2440 		_spdk_bs_free(bs);
2441 		cb_fn(cb_arg, NULL, -ENOMEM);
2442 		return;
2443 	}
2444 	/* Claim all of the clusters used by the metadata */
2445 	for (i = 0; i < num_md_clusters; i++) {
2446 		_spdk_bs_claim_cluster(bs, i);
2447 	}
2448 
2449 	bs->total_data_clusters = bs->num_free_clusters;
2450 
2451 	cpl.type = SPDK_BS_CPL_TYPE_BS_HANDLE;
2452 	cpl.u.bs_handle.cb_fn = cb_fn;
2453 	cpl.u.bs_handle.cb_arg = cb_arg;
2454 	cpl.u.bs_handle.bs = bs;
2455 
2456 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
2457 	if (!seq) {
2458 		spdk_dma_free(ctx->super);
2459 		free(ctx);
2460 		_spdk_bs_free(bs);
2461 		cb_fn(cb_arg, NULL, -ENOMEM);
2462 		return;
2463 	}
2464 
2465 	batch = spdk_bs_sequence_to_batch(seq, _spdk_bs_init_trim_cpl, ctx);
2466 
2467 	/* Clear metadata space */
2468 	spdk_bs_batch_write_zeroes(batch, 0, num_md_lba);
2469 	/* Trim data clusters */
2470 	spdk_bs_batch_unmap(batch, num_md_lba, ctx->bs->dev->blockcnt - num_md_lba);
2471 
2472 	spdk_bs_batch_close(batch);
2473 }
2474 
2475 /* END spdk_bs_init */
2476 
2477 /* START spdk_bs_destroy */
2478 
2479 static void
2480 _spdk_bs_destroy_trim_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2481 {
2482 	struct spdk_bs_init_ctx *ctx = cb_arg;
2483 	struct spdk_blob_store *bs = ctx->bs;
2484 
2485 	/*
2486 	 * We need to defer calling spdk_bs_call_cpl() until after
2487 	 * dev destruction, so tuck these away for later use.
2488 	 */
2489 	bs->unload_err = bserrno;
2490 	memcpy(&bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
2491 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
2492 
2493 	spdk_bs_sequence_finish(seq, bserrno);
2494 
2495 	_spdk_bs_free(bs);
2496 	free(ctx);
2497 }
2498 
2499 void
2500 spdk_bs_destroy(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn,
2501 		void *cb_arg)
2502 {
2503 	struct spdk_bs_cpl	cpl;
2504 	spdk_bs_sequence_t	*seq;
2505 	struct spdk_bs_init_ctx *ctx;
2506 
2507 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Destroying blobstore\n");
2508 
2509 	if (!TAILQ_EMPTY(&bs->blobs)) {
2510 		SPDK_ERRLOG("Blobstore still has open blobs\n");
2511 		cb_fn(cb_arg, -EBUSY);
2512 		return;
2513 	}
2514 
2515 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
2516 	cpl.u.bs_basic.cb_fn = cb_fn;
2517 	cpl.u.bs_basic.cb_arg = cb_arg;
2518 
2519 	ctx = calloc(1, sizeof(*ctx));
2520 	if (!ctx) {
2521 		cb_fn(cb_arg, -ENOMEM);
2522 		return;
2523 	}
2524 
2525 	ctx->bs = bs;
2526 
2527 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
2528 	if (!seq) {
2529 		free(ctx);
2530 		cb_fn(cb_arg, -ENOMEM);
2531 		return;
2532 	}
2533 
2534 	/* Write zeroes to the super block */
2535 	spdk_bs_sequence_write_zeroes(seq,
2536 				      _spdk_bs_page_to_lba(bs, 0),
2537 				      _spdk_bs_byte_to_lba(bs, sizeof(struct spdk_bs_super_block)),
2538 				      _spdk_bs_destroy_trim_cpl, ctx);
2539 }
2540 
2541 /* END spdk_bs_destroy */
2542 
2543 /* START spdk_bs_unload */
2544 
2545 static void
2546 _spdk_bs_unload_write_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2547 {
2548 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2549 
2550 	spdk_dma_free(ctx->super);
2551 
2552 	/*
2553 	 * We need to defer calling spdk_bs_call_cpl() until after
2554 	 * dev destuction, so tuck these away for later use.
2555 	 */
2556 	ctx->bs->unload_err = bserrno;
2557 	memcpy(&ctx->bs->unload_cpl, &seq->cpl, sizeof(struct spdk_bs_cpl));
2558 	seq->cpl.type = SPDK_BS_CPL_TYPE_NONE;
2559 
2560 	spdk_bs_sequence_finish(seq, bserrno);
2561 
2562 	_spdk_bs_free(ctx->bs);
2563 	free(ctx);
2564 }
2565 
2566 static void
2567 _spdk_bs_unload_write_used_clusters_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2568 {
2569 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2570 
2571 	spdk_dma_free(ctx->mask);
2572 	ctx->super->clean = 1;
2573 
2574 	_spdk_bs_write_super(seq, ctx->bs, ctx->super, _spdk_bs_unload_write_super_cpl, ctx);
2575 }
2576 
2577 static void
2578 _spdk_bs_unload_write_used_blobids_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2579 {
2580 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2581 
2582 	spdk_dma_free(ctx->mask);
2583 	ctx->mask = NULL;
2584 
2585 	_spdk_bs_write_used_clusters(seq, cb_arg, _spdk_bs_unload_write_used_clusters_cpl);
2586 }
2587 
2588 static void
2589 _spdk_bs_unload_write_used_pages_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2590 {
2591 	struct spdk_bs_load_ctx	*ctx = cb_arg;
2592 
2593 	spdk_dma_free(ctx->mask);
2594 	ctx->mask = NULL;
2595 
2596 	_spdk_bs_write_used_blobids(seq, cb_arg, _spdk_bs_unload_write_used_blobids_cpl);
2597 }
2598 
2599 static void
2600 _spdk_bs_unload_read_super_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2601 {
2602 	_spdk_bs_write_used_md(seq, cb_arg, _spdk_bs_unload_write_used_pages_cpl);
2603 }
2604 
2605 void
2606 spdk_bs_unload(struct spdk_blob_store *bs, spdk_bs_op_complete cb_fn, void *cb_arg)
2607 {
2608 	struct spdk_bs_cpl	cpl;
2609 	spdk_bs_sequence_t	*seq;
2610 	struct spdk_bs_load_ctx *ctx;
2611 
2612 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blobstore\n");
2613 
2614 	if (!TAILQ_EMPTY(&bs->blobs)) {
2615 		SPDK_ERRLOG("Blobstore still has open blobs\n");
2616 		cb_fn(cb_arg, -EBUSY);
2617 		return;
2618 	}
2619 
2620 	ctx = calloc(1, sizeof(*ctx));
2621 	if (!ctx) {
2622 		cb_fn(cb_arg, -ENOMEM);
2623 		return;
2624 	}
2625 
2626 	ctx->bs = bs;
2627 	ctx->is_load = false;
2628 
2629 	ctx->super = spdk_dma_zmalloc(sizeof(*ctx->super), 0x1000, NULL);
2630 	if (!ctx->super) {
2631 		free(ctx);
2632 		cb_fn(cb_arg, -ENOMEM);
2633 		return;
2634 	}
2635 
2636 	cpl.type = SPDK_BS_CPL_TYPE_BS_BASIC;
2637 	cpl.u.bs_basic.cb_fn = cb_fn;
2638 	cpl.u.bs_basic.cb_arg = cb_arg;
2639 
2640 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
2641 	if (!seq) {
2642 		spdk_dma_free(ctx->super);
2643 		free(ctx);
2644 		cb_fn(cb_arg, -ENOMEM);
2645 		return;
2646 	}
2647 
2648 	/* Read super block */
2649 	spdk_bs_sequence_read(seq, ctx->super, _spdk_bs_page_to_lba(bs, 0),
2650 			      _spdk_bs_byte_to_lba(bs, sizeof(*ctx->super)),
2651 			      _spdk_bs_unload_read_super_cpl, ctx);
2652 }
2653 
2654 /* END spdk_bs_unload */
2655 
2656 void
2657 spdk_bs_set_super(struct spdk_blob_store *bs, spdk_blob_id blobid,
2658 		  spdk_bs_op_complete cb_fn, void *cb_arg)
2659 {
2660 	bs->super_blob = blobid;
2661 	cb_fn(cb_arg, 0);
2662 }
2663 
2664 void
2665 spdk_bs_get_super(struct spdk_blob_store *bs,
2666 		  spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
2667 {
2668 	if (bs->super_blob == SPDK_BLOBID_INVALID) {
2669 		cb_fn(cb_arg, SPDK_BLOBID_INVALID, -ENOENT);
2670 	} else {
2671 		cb_fn(cb_arg, bs->super_blob, 0);
2672 	}
2673 }
2674 
2675 uint64_t
2676 spdk_bs_get_cluster_size(struct spdk_blob_store *bs)
2677 {
2678 	return bs->cluster_sz;
2679 }
2680 
2681 uint64_t
2682 spdk_bs_get_page_size(struct spdk_blob_store *bs)
2683 {
2684 	return SPDK_BS_PAGE_SIZE;
2685 }
2686 
2687 uint64_t
2688 spdk_bs_free_cluster_count(struct spdk_blob_store *bs)
2689 {
2690 	return bs->num_free_clusters;
2691 }
2692 
2693 uint64_t
2694 spdk_bs_total_data_cluster_count(struct spdk_blob_store *bs)
2695 {
2696 	return bs->total_data_clusters;
2697 }
2698 
2699 static int
2700 spdk_bs_register_md_thread(struct spdk_blob_store *bs)
2701 {
2702 	bs->md_channel = spdk_get_io_channel(bs);
2703 	if (!bs->md_channel) {
2704 		SPDK_ERRLOG("Failed to get IO channel.\n");
2705 		return -1;
2706 	}
2707 
2708 	return 0;
2709 }
2710 
2711 static int
2712 spdk_bs_unregister_md_thread(struct spdk_blob_store *bs)
2713 {
2714 	spdk_put_io_channel(bs->md_channel);
2715 
2716 	return 0;
2717 }
2718 
2719 spdk_blob_id spdk_blob_get_id(struct spdk_blob *_blob)
2720 {
2721 	struct spdk_blob_data *blob = __blob_to_data(_blob);
2722 
2723 	assert(blob != NULL);
2724 
2725 	return blob->id;
2726 }
2727 
2728 uint64_t spdk_blob_get_num_pages(struct spdk_blob *_blob)
2729 {
2730 	struct spdk_blob_data *blob = __blob_to_data(_blob);
2731 
2732 	assert(blob != NULL);
2733 
2734 	return _spdk_bs_cluster_to_page(blob->bs, blob->active.num_clusters);
2735 }
2736 
2737 uint64_t spdk_blob_get_num_clusters(struct spdk_blob *_blob)
2738 {
2739 	struct spdk_blob_data *blob = __blob_to_data(_blob);
2740 
2741 	assert(blob != NULL);
2742 
2743 	return blob->active.num_clusters;
2744 }
2745 
2746 /* START spdk_bs_create_blob */
2747 
2748 static void
2749 _spdk_bs_create_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2750 {
2751 	struct spdk_blob_data *blob = cb_arg;
2752 
2753 	_spdk_blob_free(blob);
2754 
2755 	spdk_bs_sequence_finish(seq, bserrno);
2756 }
2757 
2758 static int
2759 _spdk_blob_set_xattrs(struct spdk_blob	*blob, const struct spdk_blob_opts *opts)
2760 {
2761 	uint64_t i;
2762 	size_t value_len = 0;
2763 	int rc;
2764 	const void *value = NULL;
2765 	if (opts->xattr_count > 0 && opts->get_xattr_value == NULL) {
2766 		return -EINVAL;
2767 	}
2768 	for (i = 0; i < opts->xattr_count; i++) {
2769 		opts->get_xattr_value(opts->xattr_ctx, opts->xattr_names[i], &value, &value_len);
2770 		if (value == NULL || value_len == 0) {
2771 			return -EINVAL;
2772 		}
2773 		rc = spdk_blob_set_xattr(blob, opts->xattr_names[i], value, value_len);
2774 		if (rc < 0) {
2775 			return rc;
2776 		}
2777 	}
2778 	return 0;
2779 }
2780 
2781 static void
2782 _spdk_blob_set_thin_provision(struct spdk_blob_data *blob)
2783 {
2784 	blob->invalid_flags |= SPDK_BLOB_THIN_PROV;
2785 	blob->state = SPDK_BLOB_STATE_DIRTY;
2786 }
2787 
2788 void spdk_bs_create_blob_ext(struct spdk_blob_store *bs, const struct spdk_blob_opts *opts,
2789 			     spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
2790 {
2791 	struct spdk_blob_data	*blob;
2792 	uint32_t		page_idx;
2793 	struct spdk_bs_cpl 	cpl;
2794 	struct spdk_blob_opts	opts_default;
2795 	spdk_bs_sequence_t	*seq;
2796 	spdk_blob_id		id;
2797 	int rc;
2798 
2799 	page_idx = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
2800 	if (page_idx >= spdk_bit_array_capacity(bs->used_md_pages)) {
2801 		cb_fn(cb_arg, 0, -ENOMEM);
2802 		return;
2803 	}
2804 	spdk_bit_array_set(bs->used_blobids, page_idx);
2805 	spdk_bit_array_set(bs->used_md_pages, page_idx);
2806 
2807 	id = _spdk_bs_page_to_blobid(page_idx);
2808 
2809 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Creating blob with id %lu at page %u\n", id, page_idx);
2810 
2811 	blob = _spdk_blob_alloc(bs, id);
2812 	if (!blob) {
2813 		cb_fn(cb_arg, 0, -ENOMEM);
2814 		return;
2815 	}
2816 
2817 	if (!opts) {
2818 		spdk_blob_opts_init(&opts_default);
2819 		opts = &opts_default;
2820 	}
2821 	rc = _spdk_blob_set_xattrs(__data_to_blob(blob), opts);
2822 	if (rc < 0) {
2823 		_spdk_blob_free(blob);
2824 		cb_fn(cb_arg, 0, rc);
2825 		return;
2826 	}
2827 	if (opts->thin_provision) {
2828 		_spdk_blob_set_thin_provision(blob);
2829 	}
2830 
2831 	rc = spdk_blob_resize(__data_to_blob(blob), opts->num_clusters);
2832 	if (rc < 0) {
2833 		_spdk_blob_free(blob);
2834 		cb_fn(cb_arg, 0, rc);
2835 		return;
2836 	}
2837 	cpl.type = SPDK_BS_CPL_TYPE_BLOBID;
2838 	cpl.u.blobid.cb_fn = cb_fn;
2839 	cpl.u.blobid.cb_arg = cb_arg;
2840 	cpl.u.blobid.blobid = blob->id;
2841 
2842 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
2843 	if (!seq) {
2844 		_spdk_blob_free(blob);
2845 		cb_fn(cb_arg, 0, -ENOMEM);
2846 		return;
2847 	}
2848 
2849 	_spdk_blob_persist(seq, blob, _spdk_bs_create_blob_cpl, blob);
2850 }
2851 
2852 void spdk_bs_create_blob(struct spdk_blob_store *bs,
2853 			 spdk_blob_op_with_id_complete cb_fn, void *cb_arg)
2854 {
2855 	spdk_bs_create_blob_ext(bs, NULL, cb_fn, cb_arg);
2856 }
2857 
2858 /* END spdk_bs_create_blob */
2859 
2860 /* START spdk_blob_resize */
2861 int
2862 spdk_blob_resize(struct spdk_blob *_blob, uint64_t sz)
2863 {
2864 	struct spdk_blob_data	*blob = __blob_to_data(_blob);
2865 	int			rc;
2866 
2867 	assert(blob != NULL);
2868 
2869 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Resizing blob %lu to %lu clusters\n", blob->id, sz);
2870 
2871 	if (blob->md_ro) {
2872 		return -EPERM;
2873 	}
2874 
2875 	if (sz == blob->active.num_clusters) {
2876 		return 0;
2877 	}
2878 
2879 	rc = _spdk_resize_blob(blob, sz);
2880 	if (rc < 0) {
2881 		return rc;
2882 	}
2883 
2884 	return 0;
2885 }
2886 
2887 /* END spdk_blob_resize */
2888 
2889 
2890 /* START spdk_bs_delete_blob */
2891 
2892 static void
2893 _spdk_bs_delete_close_cpl(void *cb_arg, int bserrno)
2894 {
2895 	spdk_bs_sequence_t *seq = cb_arg;
2896 
2897 	spdk_bs_sequence_finish(seq, bserrno);
2898 }
2899 
2900 static void
2901 _spdk_bs_delete_persist_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2902 {
2903 	struct spdk_blob *_blob = cb_arg;
2904 	struct spdk_blob_data *blob = __blob_to_data(_blob);
2905 
2906 	if (bserrno != 0) {
2907 		/*
2908 		 * We already removed this blob from the blobstore tailq, so
2909 		 *  we need to free it here since this is the last reference
2910 		 *  to it.
2911 		 */
2912 		_spdk_blob_free(blob);
2913 		_spdk_bs_delete_close_cpl(seq, bserrno);
2914 		return;
2915 	}
2916 
2917 	/*
2918 	 * This will immediately decrement the ref_count and call
2919 	 *  the completion routine since the metadata state is clean.
2920 	 *  By calling spdk_blob_close, we reduce the number of call
2921 	 *  points into code that touches the blob->open_ref count
2922 	 *  and the blobstore's blob list.
2923 	 */
2924 	spdk_blob_close(_blob, _spdk_bs_delete_close_cpl, seq);
2925 }
2926 
2927 static void
2928 _spdk_bs_delete_open_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
2929 {
2930 	spdk_bs_sequence_t *seq = cb_arg;
2931 	struct spdk_blob_data *blob = __blob_to_data(_blob);
2932 	uint32_t page_num;
2933 
2934 	if (bserrno != 0) {
2935 		spdk_bs_sequence_finish(seq, bserrno);
2936 		return;
2937 	}
2938 
2939 	if (blob->open_ref > 1) {
2940 		/*
2941 		 * Someone has this blob open (besides this delete context).
2942 		 *  Decrement the ref count directly and return -EBUSY.
2943 		 */
2944 		blob->open_ref--;
2945 		spdk_bs_sequence_finish(seq, -EBUSY);
2946 		return;
2947 	}
2948 
2949 	/*
2950 	 * Remove the blob from the blob_store list now, to ensure it does not
2951 	 *  get returned after this point by _spdk_blob_lookup().
2952 	 */
2953 	TAILQ_REMOVE(&blob->bs->blobs, blob, link);
2954 	page_num = _spdk_bs_blobid_to_page(blob->id);
2955 	spdk_bit_array_clear(blob->bs->used_blobids, page_num);
2956 	blob->state = SPDK_BLOB_STATE_DIRTY;
2957 	blob->active.num_pages = 0;
2958 	_spdk_resize_blob(blob, 0);
2959 
2960 	_spdk_blob_persist(seq, blob, _spdk_bs_delete_persist_cpl, _blob);
2961 }
2962 
2963 void
2964 spdk_bs_delete_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
2965 		    spdk_blob_op_complete cb_fn, void *cb_arg)
2966 {
2967 	struct spdk_bs_cpl	cpl;
2968 	spdk_bs_sequence_t 	*seq;
2969 
2970 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Deleting blob %lu\n", blobid);
2971 
2972 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
2973 	cpl.u.blob_basic.cb_fn = cb_fn;
2974 	cpl.u.blob_basic.cb_arg = cb_arg;
2975 
2976 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
2977 	if (!seq) {
2978 		cb_fn(cb_arg, -ENOMEM);
2979 		return;
2980 	}
2981 
2982 	spdk_bs_open_blob(bs, blobid, _spdk_bs_delete_open_cpl, seq);
2983 }
2984 
2985 /* END spdk_bs_delete_blob */
2986 
2987 /* START spdk_bs_open_blob */
2988 
2989 static void
2990 _spdk_bs_open_blob_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
2991 {
2992 	struct spdk_blob_data *blob = cb_arg;
2993 
2994 	/* If the blob have crc error, we just return NULL. */
2995 	if (blob == NULL) {
2996 		seq->cpl.u.blob_handle.blob = NULL;
2997 		spdk_bs_sequence_finish(seq, bserrno);
2998 		return;
2999 	}
3000 
3001 	blob->open_ref++;
3002 
3003 	TAILQ_INSERT_HEAD(&blob->bs->blobs, blob, link);
3004 
3005 	spdk_bs_sequence_finish(seq, bserrno);
3006 }
3007 
3008 void spdk_bs_open_blob(struct spdk_blob_store *bs, spdk_blob_id blobid,
3009 		       spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
3010 {
3011 	struct spdk_blob_data		*blob;
3012 	struct spdk_bs_cpl		cpl;
3013 	spdk_bs_sequence_t		*seq;
3014 	uint32_t			page_num;
3015 
3016 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Opening blob %lu\n", blobid);
3017 
3018 	page_num = _spdk_bs_blobid_to_page(blobid);
3019 	if (spdk_bit_array_get(bs->used_blobids, page_num) == false) {
3020 		/* Invalid blobid */
3021 		cb_fn(cb_arg, NULL, -ENOENT);
3022 		return;
3023 	}
3024 
3025 	blob = _spdk_blob_lookup(bs, blobid);
3026 	if (blob) {
3027 		blob->open_ref++;
3028 		cb_fn(cb_arg, __data_to_blob(blob), 0);
3029 		return;
3030 	}
3031 
3032 	blob = _spdk_blob_alloc(bs, blobid);
3033 	if (!blob) {
3034 		cb_fn(cb_arg, NULL, -ENOMEM);
3035 		return;
3036 	}
3037 
3038 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_HANDLE;
3039 	cpl.u.blob_handle.cb_fn = cb_fn;
3040 	cpl.u.blob_handle.cb_arg = cb_arg;
3041 	cpl.u.blob_handle.blob = __data_to_blob(blob);
3042 
3043 	seq = spdk_bs_sequence_start(bs->md_channel, &cpl);
3044 	if (!seq) {
3045 		_spdk_blob_free(blob);
3046 		cb_fn(cb_arg, NULL, -ENOMEM);
3047 		return;
3048 	}
3049 
3050 	_spdk_blob_load(seq, blob, _spdk_bs_open_blob_cpl, blob);
3051 }
3052 /* END spdk_bs_open_blob */
3053 
3054 /* START spdk_blob_set_read_only */
3055 void spdk_blob_set_read_only(struct spdk_blob *b)
3056 {
3057 	struct spdk_blob_data *blob = __blob_to_data(b);
3058 
3059 	blob->data_ro = true;
3060 	blob->md_ro = true;
3061 	blob->data_ro_flags |= SPDK_BLOB_READ_ONLY;
3062 
3063 	blob->state = SPDK_BLOB_STATE_DIRTY;
3064 }
3065 /* END spdk_blob_set_read_only */
3066 
3067 /* START spdk_blob_sync_md */
3068 
3069 static void
3070 _spdk_blob_sync_md_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3071 {
3072 	spdk_bs_sequence_finish(seq, bserrno);
3073 }
3074 
3075 void
3076 spdk_blob_sync_md(struct spdk_blob *_blob, spdk_blob_op_complete cb_fn, void *cb_arg)
3077 {
3078 	struct spdk_blob_data	*blob = __blob_to_data(_blob);
3079 	struct spdk_bs_cpl	cpl;
3080 	spdk_bs_sequence_t	*seq;
3081 
3082 	assert(blob != NULL);
3083 
3084 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Syncing blob %lu\n", blob->id);
3085 
3086 	assert(blob->state != SPDK_BLOB_STATE_LOADING &&
3087 	       blob->state != SPDK_BLOB_STATE_SYNCING);
3088 
3089 	if (blob->md_ro) {
3090 		assert(blob->state == SPDK_BLOB_STATE_CLEAN);
3091 		cb_fn(cb_arg, 0);
3092 		return;
3093 	}
3094 
3095 	if (blob->state == SPDK_BLOB_STATE_CLEAN) {
3096 		cb_fn(cb_arg, 0);
3097 		return;
3098 	}
3099 
3100 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3101 	cpl.u.blob_basic.cb_fn = cb_fn;
3102 	cpl.u.blob_basic.cb_arg = cb_arg;
3103 
3104 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
3105 	if (!seq) {
3106 		cb_fn(cb_arg, -ENOMEM);
3107 		return;
3108 	}
3109 
3110 	_spdk_blob_persist(seq, blob, _spdk_blob_sync_md_cpl, blob);
3111 }
3112 
3113 /* END spdk_blob_sync_md */
3114 
3115 /* START spdk_blob_close */
3116 
3117 static void
3118 _spdk_blob_close_cpl(spdk_bs_sequence_t *seq, void *cb_arg, int bserrno)
3119 {
3120 	struct spdk_blob_data *blob = cb_arg;
3121 
3122 	if (bserrno == 0) {
3123 		blob->open_ref--;
3124 		if (blob->open_ref == 0) {
3125 			/*
3126 			 * Blobs with active.num_pages == 0 are deleted blobs.
3127 			 *  these blobs are removed from the blob_store list
3128 			 *  when the deletion process starts - so don't try to
3129 			 *  remove them again.
3130 			 */
3131 			if (blob->active.num_pages > 0) {
3132 				TAILQ_REMOVE(&blob->bs->blobs, blob, link);
3133 			}
3134 			_spdk_blob_free(blob);
3135 		}
3136 	}
3137 
3138 	spdk_bs_sequence_finish(seq, bserrno);
3139 }
3140 
3141 void spdk_blob_close(struct spdk_blob *b, spdk_blob_op_complete cb_fn, void *cb_arg)
3142 {
3143 	struct spdk_bs_cpl	cpl;
3144 	struct spdk_blob_data	*blob;
3145 	spdk_bs_sequence_t	*seq;
3146 
3147 	assert(b != NULL);
3148 	blob = __blob_to_data(b);
3149 	assert(blob != NULL);
3150 
3151 	SPDK_DEBUGLOG(SPDK_LOG_BLOB, "Closing blob %lu\n", blob->id);
3152 
3153 	assert(blob->state != SPDK_BLOB_STATE_LOADING &&
3154 	       blob->state != SPDK_BLOB_STATE_SYNCING);
3155 
3156 	if (blob->open_ref == 0) {
3157 		cb_fn(cb_arg, -EBADF);
3158 		return;
3159 	}
3160 
3161 	cpl.type = SPDK_BS_CPL_TYPE_BLOB_BASIC;
3162 	cpl.u.blob_basic.cb_fn = cb_fn;
3163 	cpl.u.blob_basic.cb_arg = cb_arg;
3164 
3165 	seq = spdk_bs_sequence_start(blob->bs->md_channel, &cpl);
3166 	if (!seq) {
3167 		cb_fn(cb_arg, -ENOMEM);
3168 		return;
3169 	}
3170 
3171 	if (blob->state == SPDK_BLOB_STATE_CLEAN) {
3172 		_spdk_blob_close_cpl(seq, blob, 0);
3173 		return;
3174 	}
3175 
3176 	/* Sync metadata */
3177 	_spdk_blob_persist(seq, blob, _spdk_blob_close_cpl, blob);
3178 }
3179 
3180 /* END spdk_blob_close */
3181 
3182 struct spdk_io_channel *spdk_bs_alloc_io_channel(struct spdk_blob_store *bs)
3183 {
3184 	return spdk_get_io_channel(bs);
3185 }
3186 
3187 void spdk_bs_free_io_channel(struct spdk_io_channel *channel)
3188 {
3189 	spdk_put_io_channel(channel);
3190 }
3191 
3192 void spdk_bs_io_unmap_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
3193 			   uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
3194 {
3195 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
3196 				     SPDK_BLOB_UNMAP);
3197 }
3198 
3199 void spdk_bs_io_write_zeroes_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
3200 				  uint64_t offset, uint64_t length, spdk_blob_op_complete cb_fn, void *cb_arg)
3201 {
3202 	_spdk_blob_request_submit_op(blob, channel, NULL, offset, length, cb_fn, cb_arg,
3203 				     SPDK_BLOB_WRITE_ZEROES);
3204 }
3205 
3206 void spdk_bs_io_write_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
3207 			   void *payload, uint64_t offset, uint64_t length,
3208 			   spdk_blob_op_complete cb_fn, void *cb_arg)
3209 {
3210 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
3211 				     SPDK_BLOB_WRITE);
3212 }
3213 
3214 void spdk_bs_io_read_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
3215 			  void *payload, uint64_t offset, uint64_t length,
3216 			  spdk_blob_op_complete cb_fn, void *cb_arg)
3217 {
3218 	_spdk_blob_request_submit_op(blob, channel, payload, offset, length, cb_fn, cb_arg,
3219 				     SPDK_BLOB_READ);
3220 }
3221 
3222 void spdk_bs_io_writev_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
3223 			    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
3224 			    spdk_blob_op_complete cb_fn, void *cb_arg)
3225 {
3226 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, false);
3227 }
3228 
3229 void spdk_bs_io_readv_blob(struct spdk_blob *blob, struct spdk_io_channel *channel,
3230 			   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
3231 			   spdk_blob_op_complete cb_fn, void *cb_arg)
3232 {
3233 	_spdk_blob_request_submit_rw_iov(blob, channel, iov, iovcnt, offset, length, cb_fn, cb_arg, true);
3234 }
3235 
3236 struct spdk_bs_iter_ctx {
3237 	int64_t page_num;
3238 	struct spdk_blob_store *bs;
3239 
3240 	spdk_blob_op_with_handle_complete cb_fn;
3241 	void *cb_arg;
3242 };
3243 
3244 static void
3245 _spdk_bs_iter_cpl(void *cb_arg, struct spdk_blob *_blob, int bserrno)
3246 {
3247 	struct spdk_bs_iter_ctx *ctx = cb_arg;
3248 	struct spdk_blob_store *bs = ctx->bs;
3249 	spdk_blob_id id;
3250 
3251 	if (bserrno == 0) {
3252 		ctx->cb_fn(ctx->cb_arg, _blob, bserrno);
3253 		free(ctx);
3254 		return;
3255 	}
3256 
3257 	ctx->page_num++;
3258 	ctx->page_num = spdk_bit_array_find_first_set(bs->used_blobids, ctx->page_num);
3259 	if (ctx->page_num >= spdk_bit_array_capacity(bs->used_blobids)) {
3260 		ctx->cb_fn(ctx->cb_arg, NULL, -ENOENT);
3261 		free(ctx);
3262 		return;
3263 	}
3264 
3265 	id = _spdk_bs_page_to_blobid(ctx->page_num);
3266 
3267 	spdk_bs_open_blob(bs, id, _spdk_bs_iter_cpl, ctx);
3268 }
3269 
3270 void
3271 spdk_bs_iter_first(struct spdk_blob_store *bs,
3272 		   spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
3273 {
3274 	struct spdk_bs_iter_ctx *ctx;
3275 
3276 	ctx = calloc(1, sizeof(*ctx));
3277 	if (!ctx) {
3278 		cb_fn(cb_arg, NULL, -ENOMEM);
3279 		return;
3280 	}
3281 
3282 	ctx->page_num = -1;
3283 	ctx->bs = bs;
3284 	ctx->cb_fn = cb_fn;
3285 	ctx->cb_arg = cb_arg;
3286 
3287 	_spdk_bs_iter_cpl(ctx, NULL, -1);
3288 }
3289 
3290 static void
3291 _spdk_bs_iter_close_cpl(void *cb_arg, int bserrno)
3292 {
3293 	struct spdk_bs_iter_ctx *ctx = cb_arg;
3294 
3295 	_spdk_bs_iter_cpl(ctx, NULL, -1);
3296 }
3297 
3298 void
3299 spdk_bs_iter_next(struct spdk_blob_store *bs, struct spdk_blob *b,
3300 		  spdk_blob_op_with_handle_complete cb_fn, void *cb_arg)
3301 {
3302 	struct spdk_bs_iter_ctx *ctx;
3303 	struct spdk_blob_data	*blob;
3304 
3305 	assert(b != NULL);
3306 	blob = __blob_to_data(b);
3307 	assert(blob != NULL);
3308 
3309 	ctx = calloc(1, sizeof(*ctx));
3310 	if (!ctx) {
3311 		cb_fn(cb_arg, NULL, -ENOMEM);
3312 		return;
3313 	}
3314 
3315 	ctx->page_num = _spdk_bs_blobid_to_page(blob->id);
3316 	ctx->bs = bs;
3317 	ctx->cb_fn = cb_fn;
3318 	ctx->cb_arg = cb_arg;
3319 
3320 	/* Close the existing blob */
3321 	spdk_blob_close(b, _spdk_bs_iter_close_cpl, ctx);
3322 }
3323 
3324 int
3325 spdk_blob_set_xattr(struct spdk_blob *_blob, const char *name, const void *value,
3326 		    uint16_t value_len)
3327 {
3328 	struct spdk_blob_data	*blob = __blob_to_data(_blob);
3329 	struct spdk_xattr 	*xattr;
3330 
3331 	assert(blob != NULL);
3332 
3333 	assert(blob->state != SPDK_BLOB_STATE_LOADING &&
3334 	       blob->state != SPDK_BLOB_STATE_SYNCING);
3335 
3336 	if (blob->md_ro) {
3337 		return -EPERM;
3338 	}
3339 
3340 	TAILQ_FOREACH(xattr, &blob->xattrs, link) {
3341 		if (!strcmp(name, xattr->name)) {
3342 			free(xattr->value);
3343 			xattr->value_len = value_len;
3344 			xattr->value = malloc(value_len);
3345 			memcpy(xattr->value, value, value_len);
3346 
3347 			blob->state = SPDK_BLOB_STATE_DIRTY;
3348 
3349 			return 0;
3350 		}
3351 	}
3352 
3353 	xattr = calloc(1, sizeof(*xattr));
3354 	if (!xattr) {
3355 		return -1;
3356 	}
3357 	xattr->name = strdup(name);
3358 	xattr->value_len = value_len;
3359 	xattr->value = malloc(value_len);
3360 	memcpy(xattr->value, value, value_len);
3361 	TAILQ_INSERT_TAIL(&blob->xattrs, xattr, link);
3362 
3363 	blob->state = SPDK_BLOB_STATE_DIRTY;
3364 
3365 	return 0;
3366 }
3367 
3368 int
3369 spdk_blob_remove_xattr(struct spdk_blob *_blob, const char *name)
3370 {
3371 	struct spdk_blob_data	*blob = __blob_to_data(_blob);
3372 	struct spdk_xattr	*xattr;
3373 
3374 	assert(blob != NULL);
3375 
3376 	assert(blob->state != SPDK_BLOB_STATE_LOADING &&
3377 	       blob->state != SPDK_BLOB_STATE_SYNCING);
3378 
3379 	if (blob->md_ro) {
3380 		return -EPERM;
3381 	}
3382 
3383 	TAILQ_FOREACH(xattr, &blob->xattrs, link) {
3384 		if (!strcmp(name, xattr->name)) {
3385 			TAILQ_REMOVE(&blob->xattrs, xattr, link);
3386 			free(xattr->value);
3387 			free(xattr->name);
3388 			free(xattr);
3389 
3390 			blob->state = SPDK_BLOB_STATE_DIRTY;
3391 
3392 			return 0;
3393 		}
3394 	}
3395 
3396 	return -ENOENT;
3397 }
3398 
3399 int
3400 spdk_blob_get_xattr_value(struct spdk_blob *_blob, const char *name,
3401 			  const void **value, size_t *value_len)
3402 {
3403 	struct spdk_blob_data	*blob = __blob_to_data(_blob);
3404 	struct spdk_xattr	*xattr;
3405 
3406 	TAILQ_FOREACH(xattr, &blob->xattrs, link) {
3407 		if (!strcmp(name, xattr->name)) {
3408 			*value = xattr->value;
3409 			*value_len = xattr->value_len;
3410 			return 0;
3411 		}
3412 	}
3413 
3414 	return -ENOENT;
3415 }
3416 
3417 struct spdk_xattr_names {
3418 	uint32_t	count;
3419 	const char	*names[0];
3420 };
3421 
3422 int
3423 spdk_blob_get_xattr_names(struct spdk_blob *_blob, struct spdk_xattr_names **names)
3424 {
3425 	struct spdk_blob_data	*blob = __blob_to_data(_blob);
3426 	struct spdk_xattr	*xattr;
3427 	int			count = 0;
3428 
3429 	TAILQ_FOREACH(xattr, &blob->xattrs, link) {
3430 		count++;
3431 	}
3432 
3433 	*names = calloc(1, sizeof(struct spdk_xattr_names) + count * sizeof(char *));
3434 	if (*names == NULL) {
3435 		return -ENOMEM;
3436 	}
3437 
3438 	TAILQ_FOREACH(xattr, &blob->xattrs, link) {
3439 		(*names)->names[(*names)->count++] = xattr->name;
3440 	}
3441 
3442 	return 0;
3443 }
3444 
3445 uint32_t
3446 spdk_xattr_names_get_count(struct spdk_xattr_names *names)
3447 {
3448 	assert(names != NULL);
3449 
3450 	return names->count;
3451 }
3452 
3453 const char *
3454 spdk_xattr_names_get_name(struct spdk_xattr_names *names, uint32_t index)
3455 {
3456 	if (index >= names->count) {
3457 		return NULL;
3458 	}
3459 
3460 	return names->names[index];
3461 }
3462 
3463 void
3464 spdk_xattr_names_free(struct spdk_xattr_names *names)
3465 {
3466 	free(names);
3467 }
3468 
3469 struct spdk_bs_type
3470 spdk_bs_get_bstype(struct spdk_blob_store *bs)
3471 {
3472 	return bs->bstype;
3473 }
3474 
3475 void
3476 spdk_bs_set_bstype(struct spdk_blob_store *bs, struct spdk_bs_type bstype)
3477 {
3478 	memcpy(&bs->bstype, &bstype, sizeof(bstype));
3479 }
3480 
3481 SPDK_LOG_REGISTER_COMPONENT("blob", SPDK_LOG_BLOB)
3482