xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 7bcd316de1f71f5ca5303d08ac26df20dcd05669)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "spdk_cunit.h"
38 #include "spdk/blob.h"
39 #include "spdk/string.h"
40 
41 #include "common/lib/ut_multithread.c"
42 #include "../bs_dev_common.c"
43 #include "blob/blobstore.c"
44 #include "blob/request.c"
45 #include "blob/zeroes.c"
46 #include "blob/blob_bs_dev.c"
47 
48 struct spdk_blob_store *g_bs;
49 spdk_blob_id g_blobid;
50 struct spdk_blob *g_blob, *g_blob2;
51 int g_bserrno, g_bserrno2;
52 struct spdk_xattr_names *g_names;
53 int g_done;
54 char *g_xattr_names[] = {"first", "second", "third"};
55 char *g_xattr_values[] = {"one", "two", "three"};
56 uint64_t g_ctx = 1729;
57 bool g_use_extent_table = false;
58 
59 struct spdk_bs_super_block_ver1 {
60 	uint8_t		signature[8];
61 	uint32_t        version;
62 	uint32_t        length;
63 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
64 	spdk_blob_id	super_blob;
65 
66 	uint32_t	cluster_size; /* In bytes */
67 
68 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
69 	uint32_t	used_page_mask_len; /* Count, in pages */
70 
71 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
72 	uint32_t	used_cluster_mask_len; /* Count, in pages */
73 
74 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
75 	uint32_t	md_len; /* Count, in pages */
76 
77 	uint8_t		reserved[4036];
78 	uint32_t	crc;
79 } __attribute__((packed));
80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
81 
82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
83 		struct spdk_blob_opts *blob_opts);
84 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
85 static void suite_blob_setup(void);
86 static void suite_blob_cleanup(void);
87 
88 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
89 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
90 		void *cpl_cb_arg), 0);
91 
92 static void
93 _get_xattr_value(void *arg, const char *name,
94 		 const void **value, size_t *value_len)
95 {
96 	uint64_t i;
97 
98 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
99 	SPDK_CU_ASSERT_FATAL(value != NULL);
100 	CU_ASSERT(arg == &g_ctx);
101 
102 	for (i = 0; i < sizeof(g_xattr_names); i++) {
103 		if (!strcmp(name, g_xattr_names[i])) {
104 			*value_len = strlen(g_xattr_values[i]);
105 			*value = g_xattr_values[i];
106 			break;
107 		}
108 	}
109 }
110 
111 static void
112 _get_xattr_value_null(void *arg, const char *name,
113 		      const void **value, size_t *value_len)
114 {
115 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
116 	SPDK_CU_ASSERT_FATAL(value != NULL);
117 	CU_ASSERT(arg == NULL);
118 
119 	*value_len = 0;
120 	*value = NULL;
121 }
122 
123 static int
124 _get_snapshots_count(struct spdk_blob_store *bs)
125 {
126 	struct spdk_blob_list *snapshot = NULL;
127 	int count = 0;
128 
129 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
130 		count += 1;
131 	}
132 
133 	return count;
134 }
135 
136 static void
137 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
138 {
139 	spdk_blob_opts_init(opts, sizeof(*opts));
140 	opts->use_extent_table = g_use_extent_table;
141 }
142 
143 static void
144 bs_op_complete(void *cb_arg, int bserrno)
145 {
146 	g_bserrno = bserrno;
147 }
148 
149 static void
150 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
151 			   int bserrno)
152 {
153 	g_bs = bs;
154 	g_bserrno = bserrno;
155 }
156 
157 static void
158 blob_op_complete(void *cb_arg, int bserrno)
159 {
160 	g_bserrno = bserrno;
161 }
162 
163 static void
164 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
165 {
166 	g_blobid = blobid;
167 	g_bserrno = bserrno;
168 }
169 
170 static void
171 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
172 {
173 	g_blob = blb;
174 	g_bserrno = bserrno;
175 }
176 
177 static void
178 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
179 {
180 	if (g_blob == NULL) {
181 		g_blob = blob;
182 		g_bserrno = bserrno;
183 	} else {
184 		g_blob2 = blob;
185 		g_bserrno2 = bserrno;
186 	}
187 }
188 
189 static void
190 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
191 {
192 	struct spdk_bs_dev *dev;
193 
194 	/* Unload the blob store */
195 	spdk_bs_unload(*bs, bs_op_complete, NULL);
196 	poll_threads();
197 	CU_ASSERT(g_bserrno == 0);
198 
199 	dev = init_dev();
200 	/* Load an existing blob store */
201 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
202 	poll_threads();
203 	CU_ASSERT(g_bserrno == 0);
204 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
205 	*bs = g_bs;
206 
207 	g_bserrno = -1;
208 }
209 
210 static void
211 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
212 {
213 	struct spdk_bs_dev *dev;
214 
215 	/* Dirty shutdown */
216 	bs_free(*bs);
217 
218 	dev = init_dev();
219 	/* Load an existing blob store */
220 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
221 	poll_threads();
222 	CU_ASSERT(g_bserrno == 0);
223 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
224 	*bs = g_bs;
225 
226 	g_bserrno = -1;
227 }
228 
229 static void
230 blob_init(void)
231 {
232 	struct spdk_blob_store *bs;
233 	struct spdk_bs_dev *dev;
234 
235 	dev = init_dev();
236 
237 	/* should fail for an unsupported blocklen */
238 	dev->blocklen = 500;
239 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
240 	poll_threads();
241 	CU_ASSERT(g_bserrno == -EINVAL);
242 
243 	dev = init_dev();
244 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
245 	poll_threads();
246 	CU_ASSERT(g_bserrno == 0);
247 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
248 	bs = g_bs;
249 
250 	spdk_bs_unload(bs, bs_op_complete, NULL);
251 	poll_threads();
252 	CU_ASSERT(g_bserrno == 0);
253 	g_bs = NULL;
254 }
255 
256 static void
257 blob_super(void)
258 {
259 	struct spdk_blob_store *bs = g_bs;
260 	spdk_blob_id blobid;
261 	struct spdk_blob_opts blob_opts;
262 
263 	/* Get the super blob without having set one */
264 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
265 	poll_threads();
266 	CU_ASSERT(g_bserrno == -ENOENT);
267 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
268 
269 	/* Create a blob */
270 	ut_spdk_blob_opts_init(&blob_opts);
271 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
272 	poll_threads();
273 	CU_ASSERT(g_bserrno == 0);
274 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
275 	blobid = g_blobid;
276 
277 	/* Set the blob as the super blob */
278 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
279 	poll_threads();
280 	CU_ASSERT(g_bserrno == 0);
281 
282 	/* Get the super blob */
283 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
284 	poll_threads();
285 	CU_ASSERT(g_bserrno == 0);
286 	CU_ASSERT(blobid == g_blobid);
287 }
288 
289 static void
290 blob_open(void)
291 {
292 	struct spdk_blob_store *bs = g_bs;
293 	struct spdk_blob *blob;
294 	struct spdk_blob_opts blob_opts;
295 	spdk_blob_id blobid, blobid2;
296 
297 	ut_spdk_blob_opts_init(&blob_opts);
298 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
299 	poll_threads();
300 	CU_ASSERT(g_bserrno == 0);
301 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
302 	blobid = g_blobid;
303 
304 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
305 	poll_threads();
306 	CU_ASSERT(g_bserrno == 0);
307 	CU_ASSERT(g_blob != NULL);
308 	blob = g_blob;
309 
310 	blobid2 = spdk_blob_get_id(blob);
311 	CU_ASSERT(blobid == blobid2);
312 
313 	/* Try to open file again.  It should return success. */
314 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
315 	poll_threads();
316 	CU_ASSERT(g_bserrno == 0);
317 	CU_ASSERT(blob == g_blob);
318 
319 	spdk_blob_close(blob, blob_op_complete, NULL);
320 	poll_threads();
321 	CU_ASSERT(g_bserrno == 0);
322 
323 	/*
324 	 * Close the file a second time, releasing the second reference.  This
325 	 *  should succeed.
326 	 */
327 	blob = g_blob;
328 	spdk_blob_close(blob, blob_op_complete, NULL);
329 	poll_threads();
330 	CU_ASSERT(g_bserrno == 0);
331 
332 	/*
333 	 * Try to open file again.  It should succeed.  This tests the case
334 	 *  where the file is opened, closed, then re-opened again.
335 	 */
336 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
337 	poll_threads();
338 	CU_ASSERT(g_bserrno == 0);
339 	CU_ASSERT(g_blob != NULL);
340 	blob = g_blob;
341 	spdk_blob_close(blob, blob_op_complete, NULL);
342 	poll_threads();
343 	CU_ASSERT(g_bserrno == 0);
344 
345 	/* Try to open file twice in succession.  This should return the same
346 	 * blob object.
347 	 */
348 	g_blob = NULL;
349 	g_blob2 = NULL;
350 	g_bserrno = -1;
351 	g_bserrno2 = -1;
352 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
353 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
354 	poll_threads();
355 	CU_ASSERT(g_bserrno == 0);
356 	CU_ASSERT(g_bserrno2 == 0);
357 	CU_ASSERT(g_blob != NULL);
358 	CU_ASSERT(g_blob2 != NULL);
359 	CU_ASSERT(g_blob == g_blob2);
360 
361 	g_bserrno = -1;
362 	spdk_blob_close(g_blob, blob_op_complete, NULL);
363 	poll_threads();
364 	CU_ASSERT(g_bserrno == 0);
365 
366 	ut_blob_close_and_delete(bs, g_blob);
367 }
368 
369 static void
370 blob_create(void)
371 {
372 	struct spdk_blob_store *bs = g_bs;
373 	struct spdk_blob *blob;
374 	struct spdk_blob_opts opts;
375 	spdk_blob_id blobid;
376 
377 	/* Create blob with 10 clusters */
378 
379 	ut_spdk_blob_opts_init(&opts);
380 	opts.num_clusters = 10;
381 
382 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
383 	poll_threads();
384 	CU_ASSERT(g_bserrno == 0);
385 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
386 	blobid = g_blobid;
387 
388 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
389 	poll_threads();
390 	CU_ASSERT(g_bserrno == 0);
391 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
392 	blob = g_blob;
393 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
394 
395 	spdk_blob_close(blob, blob_op_complete, NULL);
396 	poll_threads();
397 	CU_ASSERT(g_bserrno == 0);
398 
399 	/* Create blob with 0 clusters */
400 
401 	ut_spdk_blob_opts_init(&opts);
402 	opts.num_clusters = 0;
403 
404 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
405 	poll_threads();
406 	CU_ASSERT(g_bserrno == 0);
407 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
408 	blobid = g_blobid;
409 
410 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
411 	poll_threads();
412 	CU_ASSERT(g_bserrno == 0);
413 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
414 	blob = g_blob;
415 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
416 
417 	spdk_blob_close(blob, blob_op_complete, NULL);
418 	poll_threads();
419 	CU_ASSERT(g_bserrno == 0);
420 
421 	/* Create blob with default options (opts == NULL) */
422 
423 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
424 	poll_threads();
425 	CU_ASSERT(g_bserrno == 0);
426 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
427 	blobid = g_blobid;
428 
429 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
430 	poll_threads();
431 	CU_ASSERT(g_bserrno == 0);
432 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
433 	blob = g_blob;
434 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
435 
436 	spdk_blob_close(blob, blob_op_complete, NULL);
437 	poll_threads();
438 	CU_ASSERT(g_bserrno == 0);
439 
440 	/* Try to create blob with size larger than blobstore */
441 
442 	ut_spdk_blob_opts_init(&opts);
443 	opts.num_clusters = bs->total_clusters + 1;
444 
445 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
446 	poll_threads();
447 	CU_ASSERT(g_bserrno == -ENOSPC);
448 }
449 
450 static void
451 blob_create_zero_extent(void)
452 {
453 	struct spdk_blob_store *bs = g_bs;
454 	struct spdk_blob *blob;
455 	spdk_blob_id blobid;
456 
457 	/* Create blob with default options (opts == NULL) */
458 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
459 	poll_threads();
460 	CU_ASSERT(g_bserrno == 0);
461 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
462 	blobid = g_blobid;
463 
464 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
465 	poll_threads();
466 	CU_ASSERT(g_bserrno == 0);
467 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
468 	blob = g_blob;
469 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
470 	CU_ASSERT(blob->extent_table_found == true);
471 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
472 	CU_ASSERT(blob->active.extent_pages == NULL);
473 
474 	spdk_blob_close(blob, blob_op_complete, NULL);
475 	poll_threads();
476 	CU_ASSERT(g_bserrno == 0);
477 
478 	/* Create blob with NULL internal options  */
479 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
480 	poll_threads();
481 	CU_ASSERT(g_bserrno == 0);
482 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
483 	blobid = g_blobid;
484 
485 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
486 	poll_threads();
487 	CU_ASSERT(g_bserrno == 0);
488 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
489 	blob = g_blob;
490 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
491 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
492 	CU_ASSERT(blob->extent_table_found == true);
493 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
494 	CU_ASSERT(blob->active.extent_pages == NULL);
495 
496 	spdk_blob_close(blob, blob_op_complete, NULL);
497 	poll_threads();
498 	CU_ASSERT(g_bserrno == 0);
499 }
500 
501 /*
502  * Create and delete one blob in a loop over and over again.  This helps ensure
503  * that the internal bit masks tracking used clusters and md_pages are being
504  * tracked correctly.
505  */
506 static void
507 blob_create_loop(void)
508 {
509 	struct spdk_blob_store *bs = g_bs;
510 	struct spdk_blob_opts opts;
511 	uint32_t i, loop_count;
512 
513 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
514 				  spdk_bit_pool_capacity(bs->used_clusters));
515 
516 	for (i = 0; i < loop_count; i++) {
517 		ut_spdk_blob_opts_init(&opts);
518 		opts.num_clusters = 1;
519 		g_bserrno = -1;
520 		g_blobid = SPDK_BLOBID_INVALID;
521 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
522 		poll_threads();
523 		CU_ASSERT(g_bserrno == 0);
524 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
525 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
526 		poll_threads();
527 		CU_ASSERT(g_bserrno == 0);
528 	}
529 }
530 
531 static void
532 blob_create_fail(void)
533 {
534 	struct spdk_blob_store *bs = g_bs;
535 	struct spdk_blob_opts opts;
536 	spdk_blob_id blobid;
537 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
538 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
539 
540 	/* NULL callback */
541 	ut_spdk_blob_opts_init(&opts);
542 	opts.xattrs.names = g_xattr_names;
543 	opts.xattrs.get_value = NULL;
544 	opts.xattrs.count = 1;
545 	opts.xattrs.ctx = &g_ctx;
546 
547 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
548 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
549 	poll_threads();
550 	CU_ASSERT(g_bserrno == -EINVAL);
551 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
552 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
553 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
554 
555 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
556 	poll_threads();
557 	CU_ASSERT(g_bserrno == -ENOENT);
558 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
559 
560 	ut_bs_reload(&bs, NULL);
561 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
562 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
563 
564 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
565 	poll_threads();
566 	CU_ASSERT(g_blob == NULL);
567 	CU_ASSERT(g_bserrno == -ENOENT);
568 }
569 
570 static void
571 blob_create_internal(void)
572 {
573 	struct spdk_blob_store *bs = g_bs;
574 	struct spdk_blob *blob;
575 	struct spdk_blob_opts opts;
576 	struct spdk_blob_xattr_opts internal_xattrs;
577 	const void *value;
578 	size_t value_len;
579 	spdk_blob_id blobid;
580 	int rc;
581 
582 	/* Create blob with custom xattrs */
583 
584 	ut_spdk_blob_opts_init(&opts);
585 	blob_xattrs_init(&internal_xattrs);
586 	internal_xattrs.count = 3;
587 	internal_xattrs.names = g_xattr_names;
588 	internal_xattrs.get_value = _get_xattr_value;
589 	internal_xattrs.ctx = &g_ctx;
590 
591 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
592 	poll_threads();
593 	CU_ASSERT(g_bserrno == 0);
594 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
595 	blobid = g_blobid;
596 
597 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
598 	poll_threads();
599 	CU_ASSERT(g_bserrno == 0);
600 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
601 	blob = g_blob;
602 
603 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
604 	CU_ASSERT(rc == 0);
605 	SPDK_CU_ASSERT_FATAL(value != NULL);
606 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
607 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
608 
609 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
610 	CU_ASSERT(rc == 0);
611 	SPDK_CU_ASSERT_FATAL(value != NULL);
612 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
613 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
614 
615 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
616 	CU_ASSERT(rc == 0);
617 	SPDK_CU_ASSERT_FATAL(value != NULL);
618 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
619 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
620 
621 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
622 	CU_ASSERT(rc != 0);
623 
624 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
625 	CU_ASSERT(rc != 0);
626 
627 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
628 	CU_ASSERT(rc != 0);
629 
630 	spdk_blob_close(blob, blob_op_complete, NULL);
631 	poll_threads();
632 	CU_ASSERT(g_bserrno == 0);
633 
634 	/* Create blob with NULL internal options  */
635 
636 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
637 	poll_threads();
638 	CU_ASSERT(g_bserrno == 0);
639 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
640 	blobid = g_blobid;
641 
642 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
643 	poll_threads();
644 	CU_ASSERT(g_bserrno == 0);
645 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
646 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
647 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
648 
649 	blob = g_blob;
650 
651 	spdk_blob_close(blob, blob_op_complete, NULL);
652 	poll_threads();
653 	CU_ASSERT(g_bserrno == 0);
654 }
655 
656 static void
657 blob_thin_provision(void)
658 {
659 	struct spdk_blob_store *bs;
660 	struct spdk_bs_dev *dev;
661 	struct spdk_blob *blob;
662 	struct spdk_blob_opts opts;
663 	struct spdk_bs_opts bs_opts;
664 	spdk_blob_id blobid;
665 
666 	dev = init_dev();
667 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
668 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
669 
670 	/* Initialize a new blob store */
671 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
672 	poll_threads();
673 	CU_ASSERT(g_bserrno == 0);
674 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
675 
676 	bs = g_bs;
677 
678 	/* Create blob with thin provisioning enabled */
679 
680 	ut_spdk_blob_opts_init(&opts);
681 	opts.thin_provision = true;
682 	opts.num_clusters = 10;
683 
684 	blob = ut_blob_create_and_open(bs, &opts);
685 	blobid = spdk_blob_get_id(blob);
686 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
687 	/* In thin provisioning with num_clusters is set, if not using the
688 	 * extent table, there is no allocation. If extent table is used,
689 	 * there is related allocation happened. */
690 	if (blob->extent_table_found == true) {
691 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
692 		CU_ASSERT(blob->active.extent_pages != NULL);
693 	} else {
694 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
695 		CU_ASSERT(blob->active.extent_pages == NULL);
696 	}
697 
698 	spdk_blob_close(blob, blob_op_complete, NULL);
699 	CU_ASSERT(g_bserrno == 0);
700 
701 	/* Do not shut down cleanly.  This makes sure that when we load again
702 	 *  and try to recover a valid used_cluster map, that blobstore will
703 	 *  ignore clusters with index 0 since these are unallocated clusters.
704 	 */
705 	ut_bs_dirty_load(&bs, &bs_opts);
706 
707 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
708 	poll_threads();
709 	CU_ASSERT(g_bserrno == 0);
710 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
711 	blob = g_blob;
712 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
713 
714 	ut_blob_close_and_delete(bs, blob);
715 
716 	spdk_bs_unload(bs, bs_op_complete, NULL);
717 	poll_threads();
718 	CU_ASSERT(g_bserrno == 0);
719 	g_bs = NULL;
720 }
721 
722 static void
723 blob_snapshot(void)
724 {
725 	struct spdk_blob_store *bs = g_bs;
726 	struct spdk_blob *blob;
727 	struct spdk_blob *snapshot, *snapshot2;
728 	struct spdk_blob_bs_dev *blob_bs_dev;
729 	struct spdk_blob_opts opts;
730 	struct spdk_blob_xattr_opts xattrs;
731 	spdk_blob_id blobid;
732 	spdk_blob_id snapshotid;
733 	spdk_blob_id snapshotid2;
734 	const void *value;
735 	size_t value_len;
736 	int rc;
737 	spdk_blob_id ids[2];
738 	size_t count;
739 
740 	/* Create blob with 10 clusters */
741 	ut_spdk_blob_opts_init(&opts);
742 	opts.num_clusters = 10;
743 
744 	blob = ut_blob_create_and_open(bs, &opts);
745 	blobid = spdk_blob_get_id(blob);
746 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
747 
748 	/* Create snapshot from blob */
749 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
750 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
751 	poll_threads();
752 	CU_ASSERT(g_bserrno == 0);
753 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
754 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
755 	snapshotid = g_blobid;
756 
757 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
758 	poll_threads();
759 	CU_ASSERT(g_bserrno == 0);
760 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
761 	snapshot = g_blob;
762 	CU_ASSERT(snapshot->data_ro == true);
763 	CU_ASSERT(snapshot->md_ro == true);
764 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
765 
766 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
767 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
768 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
769 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
770 
771 	/* Try to create snapshot from clone with xattrs */
772 	xattrs.names = g_xattr_names;
773 	xattrs.get_value = _get_xattr_value;
774 	xattrs.count = 3;
775 	xattrs.ctx = &g_ctx;
776 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
777 	poll_threads();
778 	CU_ASSERT(g_bserrno == 0);
779 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
780 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
781 	snapshotid2 = g_blobid;
782 
783 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
784 	CU_ASSERT(g_bserrno == 0);
785 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
786 	snapshot2 = g_blob;
787 	CU_ASSERT(snapshot2->data_ro == true);
788 	CU_ASSERT(snapshot2->md_ro == true);
789 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
790 
791 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
792 	CU_ASSERT(snapshot->back_bs_dev == NULL);
793 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
794 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
795 
796 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
797 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
798 
799 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
800 	CU_ASSERT(blob_bs_dev->blob == snapshot);
801 
802 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
803 	CU_ASSERT(rc == 0);
804 	SPDK_CU_ASSERT_FATAL(value != NULL);
805 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
806 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
807 
808 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
809 	CU_ASSERT(rc == 0);
810 	SPDK_CU_ASSERT_FATAL(value != NULL);
811 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
812 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
813 
814 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
815 	CU_ASSERT(rc == 0);
816 	SPDK_CU_ASSERT_FATAL(value != NULL);
817 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
818 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
819 
820 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
821 	count = 2;
822 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
823 	CU_ASSERT(count == 1);
824 	CU_ASSERT(ids[0] == blobid);
825 
826 	count = 2;
827 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
828 	CU_ASSERT(count == 1);
829 	CU_ASSERT(ids[0] == snapshotid2);
830 
831 	/* Try to create snapshot from snapshot */
832 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
833 	poll_threads();
834 	CU_ASSERT(g_bserrno == -EINVAL);
835 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
836 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
837 
838 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
839 	ut_blob_close_and_delete(bs, blob);
840 	count = 2;
841 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
842 	CU_ASSERT(count == 0);
843 
844 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
845 	ut_blob_close_and_delete(bs, snapshot2);
846 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
847 	count = 2;
848 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
849 	CU_ASSERT(count == 0);
850 
851 	ut_blob_close_and_delete(bs, snapshot);
852 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
853 }
854 
855 static void
856 blob_snapshot_freeze_io(void)
857 {
858 	struct spdk_io_channel *channel;
859 	struct spdk_bs_channel *bs_channel;
860 	struct spdk_blob_store *bs = g_bs;
861 	struct spdk_blob *blob;
862 	struct spdk_blob_opts opts;
863 	spdk_blob_id blobid;
864 	uint32_t num_of_pages = 10;
865 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
866 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
867 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
868 
869 	memset(payload_write, 0xE5, sizeof(payload_write));
870 	memset(payload_read, 0x00, sizeof(payload_read));
871 	memset(payload_zero, 0x00, sizeof(payload_zero));
872 
873 	/* Test freeze I/O during snapshot */
874 	channel = spdk_bs_alloc_io_channel(bs);
875 	bs_channel = spdk_io_channel_get_ctx(channel);
876 
877 	/* Create blob with 10 clusters */
878 	ut_spdk_blob_opts_init(&opts);
879 	opts.num_clusters = 10;
880 	opts.thin_provision = false;
881 
882 	blob = ut_blob_create_and_open(bs, &opts);
883 	blobid = spdk_blob_get_id(blob);
884 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
885 
886 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
887 
888 	/* This is implementation specific.
889 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
890 	 * Four async I/O operations happen before that. */
891 	poll_thread_times(0, 5);
892 
893 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
894 
895 	/* Blob I/O should be frozen here */
896 	CU_ASSERT(blob->frozen_refcnt == 1);
897 
898 	/* Write to the blob */
899 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
900 
901 	/* Verify that I/O is queued */
902 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
903 	/* Verify that payload is not written to disk, at this point the blobs already switched */
904 	CU_ASSERT(blob->active.clusters[0] == 0);
905 
906 	/* Finish all operations including spdk_bs_create_snapshot */
907 	poll_threads();
908 
909 	/* Verify snapshot */
910 	CU_ASSERT(g_bserrno == 0);
911 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
912 
913 	/* Verify that blob has unset frozen_io */
914 	CU_ASSERT(blob->frozen_refcnt == 0);
915 
916 	/* Verify that postponed I/O completed successfully by comparing payload */
917 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
918 	poll_threads();
919 	CU_ASSERT(g_bserrno == 0);
920 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
921 
922 	spdk_bs_free_io_channel(channel);
923 	poll_threads();
924 
925 	ut_blob_close_and_delete(bs, blob);
926 }
927 
928 static void
929 blob_clone(void)
930 {
931 	struct spdk_blob_store *bs = g_bs;
932 	struct spdk_blob_opts opts;
933 	struct spdk_blob *blob, *snapshot, *clone;
934 	spdk_blob_id blobid, cloneid, snapshotid;
935 	struct spdk_blob_xattr_opts xattrs;
936 	const void *value;
937 	size_t value_len;
938 	int rc;
939 
940 	/* Create blob with 10 clusters */
941 
942 	ut_spdk_blob_opts_init(&opts);
943 	opts.num_clusters = 10;
944 
945 	blob = ut_blob_create_and_open(bs, &opts);
946 	blobid = spdk_blob_get_id(blob);
947 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
948 
949 	/* Create snapshot */
950 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
951 	poll_threads();
952 	CU_ASSERT(g_bserrno == 0);
953 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
954 	snapshotid = g_blobid;
955 
956 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
957 	poll_threads();
958 	CU_ASSERT(g_bserrno == 0);
959 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
960 	snapshot = g_blob;
961 	CU_ASSERT(snapshot->data_ro == true);
962 	CU_ASSERT(snapshot->md_ro == true);
963 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
964 
965 	spdk_blob_close(snapshot, blob_op_complete, NULL);
966 	poll_threads();
967 	CU_ASSERT(g_bserrno == 0);
968 
969 	/* Create clone from snapshot with xattrs */
970 	xattrs.names = g_xattr_names;
971 	xattrs.get_value = _get_xattr_value;
972 	xattrs.count = 3;
973 	xattrs.ctx = &g_ctx;
974 
975 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
976 	poll_threads();
977 	CU_ASSERT(g_bserrno == 0);
978 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
979 	cloneid = g_blobid;
980 
981 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
982 	poll_threads();
983 	CU_ASSERT(g_bserrno == 0);
984 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
985 	clone = g_blob;
986 	CU_ASSERT(clone->data_ro == false);
987 	CU_ASSERT(clone->md_ro == false);
988 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
989 
990 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
991 	CU_ASSERT(rc == 0);
992 	SPDK_CU_ASSERT_FATAL(value != NULL);
993 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
994 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
995 
996 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
997 	CU_ASSERT(rc == 0);
998 	SPDK_CU_ASSERT_FATAL(value != NULL);
999 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1000 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1001 
1002 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1003 	CU_ASSERT(rc == 0);
1004 	SPDK_CU_ASSERT_FATAL(value != NULL);
1005 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1006 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1007 
1008 
1009 	spdk_blob_close(clone, blob_op_complete, NULL);
1010 	poll_threads();
1011 	CU_ASSERT(g_bserrno == 0);
1012 
1013 	/* Try to create clone from not read only blob */
1014 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1015 	poll_threads();
1016 	CU_ASSERT(g_bserrno == -EINVAL);
1017 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1018 
1019 	/* Mark blob as read only */
1020 	spdk_blob_set_read_only(blob);
1021 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1022 	poll_threads();
1023 	CU_ASSERT(g_bserrno == 0);
1024 
1025 	/* Create clone from read only blob */
1026 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1027 	poll_threads();
1028 	CU_ASSERT(g_bserrno == 0);
1029 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1030 	cloneid = g_blobid;
1031 
1032 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1033 	poll_threads();
1034 	CU_ASSERT(g_bserrno == 0);
1035 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1036 	clone = g_blob;
1037 	CU_ASSERT(clone->data_ro == false);
1038 	CU_ASSERT(clone->md_ro == false);
1039 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1040 
1041 	ut_blob_close_and_delete(bs, clone);
1042 	ut_blob_close_and_delete(bs, blob);
1043 }
1044 
1045 static void
1046 _blob_inflate(bool decouple_parent)
1047 {
1048 	struct spdk_blob_store *bs = g_bs;
1049 	struct spdk_blob_opts opts;
1050 	struct spdk_blob *blob, *snapshot;
1051 	spdk_blob_id blobid, snapshotid;
1052 	struct spdk_io_channel *channel;
1053 	uint64_t free_clusters;
1054 
1055 	channel = spdk_bs_alloc_io_channel(bs);
1056 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1057 
1058 	/* Create blob with 10 clusters */
1059 
1060 	ut_spdk_blob_opts_init(&opts);
1061 	opts.num_clusters = 10;
1062 	opts.thin_provision = true;
1063 
1064 	blob = ut_blob_create_and_open(bs, &opts);
1065 	blobid = spdk_blob_get_id(blob);
1066 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1067 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1068 
1069 	/* 1) Blob with no parent */
1070 	if (decouple_parent) {
1071 		/* Decouple parent of blob with no parent (should fail) */
1072 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1073 		poll_threads();
1074 		CU_ASSERT(g_bserrno != 0);
1075 	} else {
1076 		/* Inflate of thin blob with no parent should made it thick */
1077 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1078 		poll_threads();
1079 		CU_ASSERT(g_bserrno == 0);
1080 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1081 	}
1082 
1083 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1084 	poll_threads();
1085 	CU_ASSERT(g_bserrno == 0);
1086 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1087 	snapshotid = g_blobid;
1088 
1089 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1090 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1091 
1092 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1093 	poll_threads();
1094 	CU_ASSERT(g_bserrno == 0);
1095 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1096 	snapshot = g_blob;
1097 	CU_ASSERT(snapshot->data_ro == true);
1098 	CU_ASSERT(snapshot->md_ro == true);
1099 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1100 
1101 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1102 	poll_threads();
1103 	CU_ASSERT(g_bserrno == 0);
1104 
1105 	free_clusters = spdk_bs_free_cluster_count(bs);
1106 
1107 	/* 2) Blob with parent */
1108 	if (!decouple_parent) {
1109 		/* Do full blob inflation */
1110 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1111 		poll_threads();
1112 		CU_ASSERT(g_bserrno == 0);
1113 		/* all 10 clusters should be allocated */
1114 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1115 	} else {
1116 		/* Decouple parent of blob */
1117 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1118 		poll_threads();
1119 		CU_ASSERT(g_bserrno == 0);
1120 		/* when only parent is removed, none of the clusters should be allocated */
1121 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1122 	}
1123 
1124 	/* Now, it should be possible to delete snapshot */
1125 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1126 	poll_threads();
1127 	CU_ASSERT(g_bserrno == 0);
1128 
1129 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1130 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1131 
1132 	spdk_bs_free_io_channel(channel);
1133 	poll_threads();
1134 
1135 	ut_blob_close_and_delete(bs, blob);
1136 }
1137 
1138 static void
1139 blob_inflate(void)
1140 {
1141 	_blob_inflate(false);
1142 	_blob_inflate(true);
1143 }
1144 
1145 static void
1146 blob_delete(void)
1147 {
1148 	struct spdk_blob_store *bs = g_bs;
1149 	struct spdk_blob_opts blob_opts;
1150 	spdk_blob_id blobid;
1151 
1152 	/* Create a blob and then delete it. */
1153 	ut_spdk_blob_opts_init(&blob_opts);
1154 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1155 	poll_threads();
1156 	CU_ASSERT(g_bserrno == 0);
1157 	CU_ASSERT(g_blobid > 0);
1158 	blobid = g_blobid;
1159 
1160 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1161 	poll_threads();
1162 	CU_ASSERT(g_bserrno == 0);
1163 
1164 	/* Try to open the blob */
1165 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1166 	poll_threads();
1167 	CU_ASSERT(g_bserrno == -ENOENT);
1168 }
1169 
1170 static void
1171 blob_resize_test(void)
1172 {
1173 	struct spdk_blob_store *bs = g_bs;
1174 	struct spdk_blob *blob;
1175 	uint64_t free_clusters;
1176 
1177 	free_clusters = spdk_bs_free_cluster_count(bs);
1178 
1179 	blob = ut_blob_create_and_open(bs, NULL);
1180 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1181 
1182 	/* Confirm that resize fails if blob is marked read-only. */
1183 	blob->md_ro = true;
1184 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1185 	poll_threads();
1186 	CU_ASSERT(g_bserrno == -EPERM);
1187 	blob->md_ro = false;
1188 
1189 	/* The blob started at 0 clusters. Resize it to be 5. */
1190 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1191 	poll_threads();
1192 	CU_ASSERT(g_bserrno == 0);
1193 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1194 
1195 	/* Shrink the blob to 3 clusters. This will not actually release
1196 	 * the old clusters until the blob is synced.
1197 	 */
1198 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1199 	poll_threads();
1200 	CU_ASSERT(g_bserrno == 0);
1201 	/* Verify there are still 5 clusters in use */
1202 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1203 
1204 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1205 	poll_threads();
1206 	CU_ASSERT(g_bserrno == 0);
1207 	/* Now there are only 3 clusters in use */
1208 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1209 
1210 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1211 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1212 	poll_threads();
1213 	CU_ASSERT(g_bserrno == 0);
1214 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1215 
1216 	/* Try to resize the blob to size larger than blobstore. */
1217 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1218 	poll_threads();
1219 	CU_ASSERT(g_bserrno == -ENOSPC);
1220 
1221 	ut_blob_close_and_delete(bs, blob);
1222 }
1223 
1224 static void
1225 blob_read_only(void)
1226 {
1227 	struct spdk_blob_store *bs;
1228 	struct spdk_bs_dev *dev;
1229 	struct spdk_blob *blob;
1230 	struct spdk_bs_opts opts;
1231 	spdk_blob_id blobid;
1232 	int rc;
1233 
1234 	dev = init_dev();
1235 	spdk_bs_opts_init(&opts, sizeof(opts));
1236 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1237 
1238 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1239 	poll_threads();
1240 	CU_ASSERT(g_bserrno == 0);
1241 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1242 	bs = g_bs;
1243 
1244 	blob = ut_blob_create_and_open(bs, NULL);
1245 	blobid = spdk_blob_get_id(blob);
1246 
1247 	rc = spdk_blob_set_read_only(blob);
1248 	CU_ASSERT(rc == 0);
1249 
1250 	CU_ASSERT(blob->data_ro == false);
1251 	CU_ASSERT(blob->md_ro == false);
1252 
1253 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1254 	poll_threads();
1255 
1256 	CU_ASSERT(blob->data_ro == true);
1257 	CU_ASSERT(blob->md_ro == true);
1258 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1259 
1260 	spdk_blob_close(blob, blob_op_complete, NULL);
1261 	poll_threads();
1262 	CU_ASSERT(g_bserrno == 0);
1263 
1264 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1265 	poll_threads();
1266 	CU_ASSERT(g_bserrno == 0);
1267 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1268 	blob = g_blob;
1269 
1270 	CU_ASSERT(blob->data_ro == true);
1271 	CU_ASSERT(blob->md_ro == true);
1272 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1273 
1274 	spdk_blob_close(blob, blob_op_complete, NULL);
1275 	poll_threads();
1276 	CU_ASSERT(g_bserrno == 0);
1277 
1278 	ut_bs_reload(&bs, &opts);
1279 
1280 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1281 	poll_threads();
1282 	CU_ASSERT(g_bserrno == 0);
1283 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1284 	blob = g_blob;
1285 
1286 	CU_ASSERT(blob->data_ro == true);
1287 	CU_ASSERT(blob->md_ro == true);
1288 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1289 
1290 	ut_blob_close_and_delete(bs, blob);
1291 
1292 	spdk_bs_unload(bs, bs_op_complete, NULL);
1293 	poll_threads();
1294 	CU_ASSERT(g_bserrno == 0);
1295 }
1296 
1297 static void
1298 channel_ops(void)
1299 {
1300 	struct spdk_blob_store *bs = g_bs;
1301 	struct spdk_io_channel *channel;
1302 
1303 	channel = spdk_bs_alloc_io_channel(bs);
1304 	CU_ASSERT(channel != NULL);
1305 
1306 	spdk_bs_free_io_channel(channel);
1307 	poll_threads();
1308 }
1309 
1310 static void
1311 blob_write(void)
1312 {
1313 	struct spdk_blob_store *bs = g_bs;
1314 	struct spdk_blob *blob = g_blob;
1315 	struct spdk_io_channel *channel;
1316 	uint64_t pages_per_cluster;
1317 	uint8_t payload[10 * 4096];
1318 
1319 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1320 
1321 	channel = spdk_bs_alloc_io_channel(bs);
1322 	CU_ASSERT(channel != NULL);
1323 
1324 	/* Write to a blob with 0 size */
1325 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1326 	poll_threads();
1327 	CU_ASSERT(g_bserrno == -EINVAL);
1328 
1329 	/* Resize the blob */
1330 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1331 	poll_threads();
1332 	CU_ASSERT(g_bserrno == 0);
1333 
1334 	/* Confirm that write fails if blob is marked read-only. */
1335 	blob->data_ro = true;
1336 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1337 	poll_threads();
1338 	CU_ASSERT(g_bserrno == -EPERM);
1339 	blob->data_ro = false;
1340 
1341 	/* Write to the blob */
1342 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1343 	poll_threads();
1344 	CU_ASSERT(g_bserrno == 0);
1345 
1346 	/* Write starting beyond the end */
1347 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1348 			   NULL);
1349 	poll_threads();
1350 	CU_ASSERT(g_bserrno == -EINVAL);
1351 
1352 	/* Write starting at a valid location but going off the end */
1353 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1354 			   blob_op_complete, NULL);
1355 	poll_threads();
1356 	CU_ASSERT(g_bserrno == -EINVAL);
1357 
1358 	spdk_bs_free_io_channel(channel);
1359 	poll_threads();
1360 }
1361 
1362 static void
1363 blob_read(void)
1364 {
1365 	struct spdk_blob_store *bs = g_bs;
1366 	struct spdk_blob *blob = g_blob;
1367 	struct spdk_io_channel *channel;
1368 	uint64_t pages_per_cluster;
1369 	uint8_t payload[10 * 4096];
1370 
1371 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1372 
1373 	channel = spdk_bs_alloc_io_channel(bs);
1374 	CU_ASSERT(channel != NULL);
1375 
1376 	/* Read from a blob with 0 size */
1377 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1378 	poll_threads();
1379 	CU_ASSERT(g_bserrno == -EINVAL);
1380 
1381 	/* Resize the blob */
1382 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1383 	poll_threads();
1384 	CU_ASSERT(g_bserrno == 0);
1385 
1386 	/* Confirm that read passes if blob is marked read-only. */
1387 	blob->data_ro = true;
1388 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1389 	poll_threads();
1390 	CU_ASSERT(g_bserrno == 0);
1391 	blob->data_ro = false;
1392 
1393 	/* Read from the blob */
1394 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1395 	poll_threads();
1396 	CU_ASSERT(g_bserrno == 0);
1397 
1398 	/* Read starting beyond the end */
1399 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1400 			  NULL);
1401 	poll_threads();
1402 	CU_ASSERT(g_bserrno == -EINVAL);
1403 
1404 	/* Read starting at a valid location but going off the end */
1405 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1406 			  blob_op_complete, NULL);
1407 	poll_threads();
1408 	CU_ASSERT(g_bserrno == -EINVAL);
1409 
1410 	spdk_bs_free_io_channel(channel);
1411 	poll_threads();
1412 }
1413 
1414 static void
1415 blob_rw_verify(void)
1416 {
1417 	struct spdk_blob_store *bs = g_bs;
1418 	struct spdk_blob *blob = g_blob;
1419 	struct spdk_io_channel *channel;
1420 	uint8_t payload_read[10 * 4096];
1421 	uint8_t payload_write[10 * 4096];
1422 
1423 	channel = spdk_bs_alloc_io_channel(bs);
1424 	CU_ASSERT(channel != NULL);
1425 
1426 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1427 	poll_threads();
1428 	CU_ASSERT(g_bserrno == 0);
1429 
1430 	memset(payload_write, 0xE5, sizeof(payload_write));
1431 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1432 	poll_threads();
1433 	CU_ASSERT(g_bserrno == 0);
1434 
1435 	memset(payload_read, 0x00, sizeof(payload_read));
1436 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1437 	poll_threads();
1438 	CU_ASSERT(g_bserrno == 0);
1439 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1440 
1441 	spdk_bs_free_io_channel(channel);
1442 	poll_threads();
1443 }
1444 
1445 static void
1446 blob_rw_verify_iov(void)
1447 {
1448 	struct spdk_blob_store *bs = g_bs;
1449 	struct spdk_blob *blob;
1450 	struct spdk_io_channel *channel;
1451 	uint8_t payload_read[10 * 4096];
1452 	uint8_t payload_write[10 * 4096];
1453 	struct iovec iov_read[3];
1454 	struct iovec iov_write[3];
1455 	void *buf;
1456 
1457 	channel = spdk_bs_alloc_io_channel(bs);
1458 	CU_ASSERT(channel != NULL);
1459 
1460 	blob = ut_blob_create_and_open(bs, NULL);
1461 
1462 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1463 	poll_threads();
1464 	CU_ASSERT(g_bserrno == 0);
1465 
1466 	/*
1467 	 * Manually adjust the offset of the blob's second cluster.  This allows
1468 	 *  us to make sure that the readv/write code correctly accounts for I/O
1469 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1470 	 *  clusters are where we expect before modifying the second cluster.
1471 	 */
1472 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1473 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1474 	blob->active.clusters[1] = 3 * 256;
1475 
1476 	memset(payload_write, 0xE5, sizeof(payload_write));
1477 	iov_write[0].iov_base = payload_write;
1478 	iov_write[0].iov_len = 1 * 4096;
1479 	iov_write[1].iov_base = payload_write + 1 * 4096;
1480 	iov_write[1].iov_len = 5 * 4096;
1481 	iov_write[2].iov_base = payload_write + 6 * 4096;
1482 	iov_write[2].iov_len = 4 * 4096;
1483 	/*
1484 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1485 	 *  will get written to the first cluster, the last 4 to the second cluster.
1486 	 */
1487 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1488 	poll_threads();
1489 	CU_ASSERT(g_bserrno == 0);
1490 
1491 	memset(payload_read, 0xAA, sizeof(payload_read));
1492 	iov_read[0].iov_base = payload_read;
1493 	iov_read[0].iov_len = 3 * 4096;
1494 	iov_read[1].iov_base = payload_read + 3 * 4096;
1495 	iov_read[1].iov_len = 4 * 4096;
1496 	iov_read[2].iov_base = payload_read + 7 * 4096;
1497 	iov_read[2].iov_len = 3 * 4096;
1498 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1499 	poll_threads();
1500 	CU_ASSERT(g_bserrno == 0);
1501 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1502 
1503 	buf = calloc(1, 256 * 4096);
1504 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1505 	/* Check that cluster 2 on "disk" was not modified. */
1506 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1507 	free(buf);
1508 
1509 	spdk_blob_close(blob, blob_op_complete, NULL);
1510 	poll_threads();
1511 	CU_ASSERT(g_bserrno == 0);
1512 
1513 	spdk_bs_free_io_channel(channel);
1514 	poll_threads();
1515 }
1516 
1517 static uint32_t
1518 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1519 {
1520 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1521 	struct spdk_bs_request_set *set;
1522 	uint32_t count = 0;
1523 
1524 	TAILQ_FOREACH(set, &channel->reqs, link) {
1525 		count++;
1526 	}
1527 
1528 	return count;
1529 }
1530 
1531 static void
1532 blob_rw_verify_iov_nomem(void)
1533 {
1534 	struct spdk_blob_store *bs = g_bs;
1535 	struct spdk_blob *blob = g_blob;
1536 	struct spdk_io_channel *channel;
1537 	uint8_t payload_write[10 * 4096];
1538 	struct iovec iov_write[3];
1539 	uint32_t req_count;
1540 
1541 	channel = spdk_bs_alloc_io_channel(bs);
1542 	CU_ASSERT(channel != NULL);
1543 
1544 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1545 	poll_threads();
1546 	CU_ASSERT(g_bserrno == 0);
1547 
1548 	/*
1549 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1550 	 *  will get written to the first cluster, the last 4 to the second cluster.
1551 	 */
1552 	iov_write[0].iov_base = payload_write;
1553 	iov_write[0].iov_len = 1 * 4096;
1554 	iov_write[1].iov_base = payload_write + 1 * 4096;
1555 	iov_write[1].iov_len = 5 * 4096;
1556 	iov_write[2].iov_base = payload_write + 6 * 4096;
1557 	iov_write[2].iov_len = 4 * 4096;
1558 	MOCK_SET(calloc, NULL);
1559 	req_count = bs_channel_get_req_count(channel);
1560 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1561 	poll_threads();
1562 	CU_ASSERT(g_bserrno = -ENOMEM);
1563 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1564 	MOCK_CLEAR(calloc);
1565 
1566 	spdk_bs_free_io_channel(channel);
1567 	poll_threads();
1568 }
1569 
1570 static void
1571 blob_rw_iov_read_only(void)
1572 {
1573 	struct spdk_blob_store *bs = g_bs;
1574 	struct spdk_blob *blob = g_blob;
1575 	struct spdk_io_channel *channel;
1576 	uint8_t payload_read[4096];
1577 	uint8_t payload_write[4096];
1578 	struct iovec iov_read;
1579 	struct iovec iov_write;
1580 
1581 	channel = spdk_bs_alloc_io_channel(bs);
1582 	CU_ASSERT(channel != NULL);
1583 
1584 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1585 	poll_threads();
1586 	CU_ASSERT(g_bserrno == 0);
1587 
1588 	/* Verify that writev failed if read_only flag is set. */
1589 	blob->data_ro = true;
1590 	iov_write.iov_base = payload_write;
1591 	iov_write.iov_len = sizeof(payload_write);
1592 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1593 	poll_threads();
1594 	CU_ASSERT(g_bserrno == -EPERM);
1595 
1596 	/* Verify that reads pass if data_ro flag is set. */
1597 	iov_read.iov_base = payload_read;
1598 	iov_read.iov_len = sizeof(payload_read);
1599 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1600 	poll_threads();
1601 	CU_ASSERT(g_bserrno == 0);
1602 
1603 	spdk_bs_free_io_channel(channel);
1604 	poll_threads();
1605 }
1606 
1607 static void
1608 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1609 		       uint8_t *payload, uint64_t offset, uint64_t length,
1610 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1611 {
1612 	uint64_t i;
1613 	uint8_t *buf;
1614 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1615 
1616 	/* To be sure that operation is NOT splitted, read one page at the time */
1617 	buf = payload;
1618 	for (i = 0; i < length; i++) {
1619 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1620 		poll_threads();
1621 		if (g_bserrno != 0) {
1622 			/* Pass the error code up */
1623 			break;
1624 		}
1625 		buf += page_size;
1626 	}
1627 
1628 	cb_fn(cb_arg, g_bserrno);
1629 }
1630 
1631 static void
1632 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1633 			uint8_t *payload, uint64_t offset, uint64_t length,
1634 			spdk_blob_op_complete cb_fn, void *cb_arg)
1635 {
1636 	uint64_t i;
1637 	uint8_t *buf;
1638 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1639 
1640 	/* To be sure that operation is NOT splitted, write one page at the time */
1641 	buf = payload;
1642 	for (i = 0; i < length; i++) {
1643 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1644 		poll_threads();
1645 		if (g_bserrno != 0) {
1646 			/* Pass the error code up */
1647 			break;
1648 		}
1649 		buf += page_size;
1650 	}
1651 
1652 	cb_fn(cb_arg, g_bserrno);
1653 }
1654 
1655 static void
1656 blob_operation_split_rw(void)
1657 {
1658 	struct spdk_blob_store *bs = g_bs;
1659 	struct spdk_blob *blob;
1660 	struct spdk_io_channel *channel;
1661 	struct spdk_blob_opts opts;
1662 	uint64_t cluster_size;
1663 
1664 	uint64_t payload_size;
1665 	uint8_t *payload_read;
1666 	uint8_t *payload_write;
1667 	uint8_t *payload_pattern;
1668 
1669 	uint64_t page_size;
1670 	uint64_t pages_per_cluster;
1671 	uint64_t pages_per_payload;
1672 
1673 	uint64_t i;
1674 
1675 	cluster_size = spdk_bs_get_cluster_size(bs);
1676 	page_size = spdk_bs_get_page_size(bs);
1677 	pages_per_cluster = cluster_size / page_size;
1678 	pages_per_payload = pages_per_cluster * 5;
1679 	payload_size = cluster_size * 5;
1680 
1681 	payload_read = malloc(payload_size);
1682 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1683 
1684 	payload_write = malloc(payload_size);
1685 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1686 
1687 	payload_pattern = malloc(payload_size);
1688 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1689 
1690 	/* Prepare random pattern to write */
1691 	memset(payload_pattern, 0xFF, payload_size);
1692 	for (i = 0; i < pages_per_payload; i++) {
1693 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1694 	}
1695 
1696 	channel = spdk_bs_alloc_io_channel(bs);
1697 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1698 
1699 	/* Create blob */
1700 	ut_spdk_blob_opts_init(&opts);
1701 	opts.thin_provision = false;
1702 	opts.num_clusters = 5;
1703 
1704 	blob = ut_blob_create_and_open(bs, &opts);
1705 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1706 
1707 	/* Initial read should return zeroed payload */
1708 	memset(payload_read, 0xFF, payload_size);
1709 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1710 	poll_threads();
1711 	CU_ASSERT(g_bserrno == 0);
1712 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1713 
1714 	/* Fill whole blob except last page */
1715 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1716 			   blob_op_complete, NULL);
1717 	poll_threads();
1718 	CU_ASSERT(g_bserrno == 0);
1719 
1720 	/* Write last page with a pattern */
1721 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1722 			   blob_op_complete, NULL);
1723 	poll_threads();
1724 	CU_ASSERT(g_bserrno == 0);
1725 
1726 	/* Read whole blob and check consistency */
1727 	memset(payload_read, 0xFF, payload_size);
1728 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1729 	poll_threads();
1730 	CU_ASSERT(g_bserrno == 0);
1731 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1732 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1733 
1734 	/* Fill whole blob except first page */
1735 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1736 			   blob_op_complete, NULL);
1737 	poll_threads();
1738 	CU_ASSERT(g_bserrno == 0);
1739 
1740 	/* Write first page with a pattern */
1741 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1742 			   blob_op_complete, NULL);
1743 	poll_threads();
1744 	CU_ASSERT(g_bserrno == 0);
1745 
1746 	/* Read whole blob and check consistency */
1747 	memset(payload_read, 0xFF, payload_size);
1748 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1749 	poll_threads();
1750 	CU_ASSERT(g_bserrno == 0);
1751 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1752 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1753 
1754 
1755 	/* Fill whole blob with a pattern (5 clusters) */
1756 
1757 	/* 1. Read test. */
1758 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1759 				blob_op_complete, NULL);
1760 	poll_threads();
1761 	CU_ASSERT(g_bserrno == 0);
1762 
1763 	memset(payload_read, 0xFF, payload_size);
1764 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1765 	poll_threads();
1766 	poll_threads();
1767 	CU_ASSERT(g_bserrno == 0);
1768 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1769 
1770 	/* 2. Write test. */
1771 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1772 			   blob_op_complete, NULL);
1773 	poll_threads();
1774 	CU_ASSERT(g_bserrno == 0);
1775 
1776 	memset(payload_read, 0xFF, payload_size);
1777 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1778 	poll_threads();
1779 	CU_ASSERT(g_bserrno == 0);
1780 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1781 
1782 	spdk_bs_free_io_channel(channel);
1783 	poll_threads();
1784 
1785 	g_blob = NULL;
1786 	g_blobid = 0;
1787 
1788 	free(payload_read);
1789 	free(payload_write);
1790 	free(payload_pattern);
1791 
1792 	ut_blob_close_and_delete(bs, blob);
1793 }
1794 
1795 static void
1796 blob_operation_split_rw_iov(void)
1797 {
1798 	struct spdk_blob_store *bs = g_bs;
1799 	struct spdk_blob *blob;
1800 	struct spdk_io_channel *channel;
1801 	struct spdk_blob_opts opts;
1802 	uint64_t cluster_size;
1803 
1804 	uint64_t payload_size;
1805 	uint8_t *payload_read;
1806 	uint8_t *payload_write;
1807 	uint8_t *payload_pattern;
1808 
1809 	uint64_t page_size;
1810 	uint64_t pages_per_cluster;
1811 	uint64_t pages_per_payload;
1812 
1813 	struct iovec iov_read[2];
1814 	struct iovec iov_write[2];
1815 
1816 	uint64_t i, j;
1817 
1818 	cluster_size = spdk_bs_get_cluster_size(bs);
1819 	page_size = spdk_bs_get_page_size(bs);
1820 	pages_per_cluster = cluster_size / page_size;
1821 	pages_per_payload = pages_per_cluster * 5;
1822 	payload_size = cluster_size * 5;
1823 
1824 	payload_read = malloc(payload_size);
1825 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1826 
1827 	payload_write = malloc(payload_size);
1828 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1829 
1830 	payload_pattern = malloc(payload_size);
1831 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1832 
1833 	/* Prepare random pattern to write */
1834 	for (i = 0; i < pages_per_payload; i++) {
1835 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1836 			uint64_t *tmp;
1837 
1838 			tmp = (uint64_t *)payload_pattern;
1839 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1840 			*tmp = i + 1;
1841 		}
1842 	}
1843 
1844 	channel = spdk_bs_alloc_io_channel(bs);
1845 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1846 
1847 	/* Create blob */
1848 	ut_spdk_blob_opts_init(&opts);
1849 	opts.thin_provision = false;
1850 	opts.num_clusters = 5;
1851 
1852 	blob = ut_blob_create_and_open(bs, &opts);
1853 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1854 
1855 	/* Initial read should return zeroes payload */
1856 	memset(payload_read, 0xFF, payload_size);
1857 	iov_read[0].iov_base = payload_read;
1858 	iov_read[0].iov_len = cluster_size * 3;
1859 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1860 	iov_read[1].iov_len = cluster_size * 2;
1861 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1862 	poll_threads();
1863 	CU_ASSERT(g_bserrno == 0);
1864 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1865 
1866 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1867 	 *  with a pattern. */
1868 	iov_write[0].iov_base = payload_pattern;
1869 	iov_write[0].iov_len = payload_size - page_size;
1870 	iov_write[1].iov_base = payload_pattern;
1871 	iov_write[1].iov_len = page_size;
1872 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1873 	poll_threads();
1874 	CU_ASSERT(g_bserrno == 0);
1875 
1876 	/* Read whole blob and check consistency */
1877 	memset(payload_read, 0xFF, payload_size);
1878 	iov_read[0].iov_base = payload_read;
1879 	iov_read[0].iov_len = cluster_size * 2;
1880 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1881 	iov_read[1].iov_len = cluster_size * 3;
1882 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1883 	poll_threads();
1884 	CU_ASSERT(g_bserrno == 0);
1885 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1886 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1887 
1888 	/* First of iovs fills only first page and second of iovs writes whole blob except
1889 	 *  first page with a pattern. */
1890 	iov_write[0].iov_base = payload_pattern;
1891 	iov_write[0].iov_len = page_size;
1892 	iov_write[1].iov_base = payload_pattern;
1893 	iov_write[1].iov_len = payload_size - page_size;
1894 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1895 	poll_threads();
1896 	CU_ASSERT(g_bserrno == 0);
1897 
1898 	/* Read whole blob and check consistency */
1899 	memset(payload_read, 0xFF, payload_size);
1900 	iov_read[0].iov_base = payload_read;
1901 	iov_read[0].iov_len = cluster_size * 4;
1902 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1903 	iov_read[1].iov_len = cluster_size;
1904 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1905 	poll_threads();
1906 	CU_ASSERT(g_bserrno == 0);
1907 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1908 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1909 
1910 
1911 	/* Fill whole blob with a pattern (5 clusters) */
1912 
1913 	/* 1. Read test. */
1914 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1915 				blob_op_complete, NULL);
1916 	poll_threads();
1917 	CU_ASSERT(g_bserrno == 0);
1918 
1919 	memset(payload_read, 0xFF, payload_size);
1920 	iov_read[0].iov_base = payload_read;
1921 	iov_read[0].iov_len = cluster_size;
1922 	iov_read[1].iov_base = payload_read + cluster_size;
1923 	iov_read[1].iov_len = cluster_size * 4;
1924 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1925 	poll_threads();
1926 	CU_ASSERT(g_bserrno == 0);
1927 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1928 
1929 	/* 2. Write test. */
1930 	iov_write[0].iov_base = payload_read;
1931 	iov_write[0].iov_len = cluster_size * 2;
1932 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1933 	iov_write[1].iov_len = cluster_size * 3;
1934 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1935 	poll_threads();
1936 	CU_ASSERT(g_bserrno == 0);
1937 
1938 	memset(payload_read, 0xFF, payload_size);
1939 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1940 	poll_threads();
1941 	CU_ASSERT(g_bserrno == 0);
1942 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1943 
1944 	spdk_bs_free_io_channel(channel);
1945 	poll_threads();
1946 
1947 	g_blob = NULL;
1948 	g_blobid = 0;
1949 
1950 	free(payload_read);
1951 	free(payload_write);
1952 	free(payload_pattern);
1953 
1954 	ut_blob_close_and_delete(bs, blob);
1955 }
1956 
1957 static void
1958 blob_unmap(void)
1959 {
1960 	struct spdk_blob_store *bs = g_bs;
1961 	struct spdk_blob *blob;
1962 	struct spdk_io_channel *channel;
1963 	struct spdk_blob_opts opts;
1964 	uint8_t payload[4096];
1965 	int i;
1966 
1967 	channel = spdk_bs_alloc_io_channel(bs);
1968 	CU_ASSERT(channel != NULL);
1969 
1970 	ut_spdk_blob_opts_init(&opts);
1971 	opts.num_clusters = 10;
1972 
1973 	blob = ut_blob_create_and_open(bs, &opts);
1974 
1975 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1976 	poll_threads();
1977 	CU_ASSERT(g_bserrno == 0);
1978 
1979 	memset(payload, 0, sizeof(payload));
1980 	payload[0] = 0xFF;
1981 
1982 	/*
1983 	 * Set first byte of every cluster to 0xFF.
1984 	 * First cluster on device is reserved so let's start from cluster number 1
1985 	 */
1986 	for (i = 1; i < 11; i++) {
1987 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1988 	}
1989 
1990 	/* Confirm writes */
1991 	for (i = 0; i < 10; i++) {
1992 		payload[0] = 0;
1993 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1994 				  blob_op_complete, NULL);
1995 		poll_threads();
1996 		CU_ASSERT(g_bserrno == 0);
1997 		CU_ASSERT(payload[0] == 0xFF);
1998 	}
1999 
2000 	/* Mark some clusters as unallocated */
2001 	blob->active.clusters[1] = 0;
2002 	blob->active.clusters[2] = 0;
2003 	blob->active.clusters[3] = 0;
2004 	blob->active.clusters[6] = 0;
2005 	blob->active.clusters[8] = 0;
2006 
2007 	/* Unmap clusters by resizing to 0 */
2008 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2009 	poll_threads();
2010 	CU_ASSERT(g_bserrno == 0);
2011 
2012 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2013 	poll_threads();
2014 	CU_ASSERT(g_bserrno == 0);
2015 
2016 	/* Confirm that only 'allocated' clusters were unmapped */
2017 	for (i = 1; i < 11; i++) {
2018 		switch (i) {
2019 		case 2:
2020 		case 3:
2021 		case 4:
2022 		case 7:
2023 		case 9:
2024 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2025 			break;
2026 		default:
2027 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2028 			break;
2029 		}
2030 	}
2031 
2032 	spdk_bs_free_io_channel(channel);
2033 	poll_threads();
2034 
2035 	ut_blob_close_and_delete(bs, blob);
2036 }
2037 
2038 static void
2039 blob_iter(void)
2040 {
2041 	struct spdk_blob_store *bs = g_bs;
2042 	struct spdk_blob *blob;
2043 	spdk_blob_id blobid;
2044 	struct spdk_blob_opts blob_opts;
2045 
2046 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2047 	poll_threads();
2048 	CU_ASSERT(g_blob == NULL);
2049 	CU_ASSERT(g_bserrno == -ENOENT);
2050 
2051 	ut_spdk_blob_opts_init(&blob_opts);
2052 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2053 	poll_threads();
2054 	CU_ASSERT(g_bserrno == 0);
2055 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2056 	blobid = g_blobid;
2057 
2058 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2059 	poll_threads();
2060 	CU_ASSERT(g_blob != NULL);
2061 	CU_ASSERT(g_bserrno == 0);
2062 	blob = g_blob;
2063 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2064 
2065 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2066 	poll_threads();
2067 	CU_ASSERT(g_blob == NULL);
2068 	CU_ASSERT(g_bserrno == -ENOENT);
2069 }
2070 
2071 static void
2072 blob_xattr(void)
2073 {
2074 	struct spdk_blob_store *bs = g_bs;
2075 	struct spdk_blob *blob = g_blob;
2076 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2077 	uint64_t length;
2078 	int rc;
2079 	const char *name1, *name2;
2080 	const void *value;
2081 	size_t value_len;
2082 	struct spdk_xattr_names *names;
2083 
2084 	/* Test that set_xattr fails if md_ro flag is set. */
2085 	blob->md_ro = true;
2086 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2087 	CU_ASSERT(rc == -EPERM);
2088 
2089 	blob->md_ro = false;
2090 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2091 	CU_ASSERT(rc == 0);
2092 
2093 	length = 2345;
2094 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2095 	CU_ASSERT(rc == 0);
2096 
2097 	/* Overwrite "length" xattr. */
2098 	length = 3456;
2099 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2100 	CU_ASSERT(rc == 0);
2101 
2102 	/* get_xattr should still work even if md_ro flag is set. */
2103 	value = NULL;
2104 	blob->md_ro = true;
2105 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2106 	CU_ASSERT(rc == 0);
2107 	SPDK_CU_ASSERT_FATAL(value != NULL);
2108 	CU_ASSERT(*(uint64_t *)value == length);
2109 	CU_ASSERT(value_len == 8);
2110 	blob->md_ro = false;
2111 
2112 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2113 	CU_ASSERT(rc == -ENOENT);
2114 
2115 	names = NULL;
2116 	rc = spdk_blob_get_xattr_names(blob, &names);
2117 	CU_ASSERT(rc == 0);
2118 	SPDK_CU_ASSERT_FATAL(names != NULL);
2119 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2120 	name1 = spdk_xattr_names_get_name(names, 0);
2121 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2122 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2123 	name2 = spdk_xattr_names_get_name(names, 1);
2124 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2125 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2126 	CU_ASSERT(strcmp(name1, name2));
2127 	spdk_xattr_names_free(names);
2128 
2129 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2130 	blob->md_ro = true;
2131 	rc = spdk_blob_remove_xattr(blob, "name");
2132 	CU_ASSERT(rc == -EPERM);
2133 
2134 	blob->md_ro = false;
2135 	rc = spdk_blob_remove_xattr(blob, "name");
2136 	CU_ASSERT(rc == 0);
2137 
2138 	rc = spdk_blob_remove_xattr(blob, "foobar");
2139 	CU_ASSERT(rc == -ENOENT);
2140 
2141 	/* Set internal xattr */
2142 	length = 7898;
2143 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2144 	CU_ASSERT(rc == 0);
2145 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2146 	CU_ASSERT(rc == 0);
2147 	CU_ASSERT(*(uint64_t *)value == length);
2148 	/* try to get public xattr with same name */
2149 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2150 	CU_ASSERT(rc != 0);
2151 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2152 	CU_ASSERT(rc != 0);
2153 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2154 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2155 		  SPDK_BLOB_INTERNAL_XATTR);
2156 
2157 	spdk_blob_close(blob, blob_op_complete, NULL);
2158 	poll_threads();
2159 
2160 	/* Check if xattrs are persisted */
2161 	ut_bs_reload(&bs, NULL);
2162 
2163 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2164 	poll_threads();
2165 	CU_ASSERT(g_bserrno == 0);
2166 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2167 	blob = g_blob;
2168 
2169 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2170 	CU_ASSERT(rc == 0);
2171 	CU_ASSERT(*(uint64_t *)value == length);
2172 
2173 	/* try to get internal xattr trough public call */
2174 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2175 	CU_ASSERT(rc != 0);
2176 
2177 	rc = blob_remove_xattr(blob, "internal", true);
2178 	CU_ASSERT(rc == 0);
2179 
2180 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2181 }
2182 
2183 static void
2184 blob_parse_md(void)
2185 {
2186 	struct spdk_blob_store *bs = g_bs;
2187 	struct spdk_blob *blob;
2188 	int rc;
2189 	uint32_t used_pages;
2190 	size_t xattr_length;
2191 	char *xattr;
2192 
2193 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2194 	blob = ut_blob_create_and_open(bs, NULL);
2195 
2196 	/* Create large extent to force more than 1 page of metadata. */
2197 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2198 		       strlen("large_xattr");
2199 	xattr = calloc(xattr_length, sizeof(char));
2200 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2201 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2202 	free(xattr);
2203 	SPDK_CU_ASSERT_FATAL(rc == 0);
2204 
2205 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2206 	poll_threads();
2207 
2208 	/* Delete the blob and verify that number of pages returned to before its creation. */
2209 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2210 	ut_blob_close_and_delete(bs, blob);
2211 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2212 }
2213 
2214 static void
2215 bs_load(void)
2216 {
2217 	struct spdk_blob_store *bs;
2218 	struct spdk_bs_dev *dev;
2219 	spdk_blob_id blobid;
2220 	struct spdk_blob *blob;
2221 	struct spdk_bs_super_block *super_block;
2222 	uint64_t length;
2223 	int rc;
2224 	const void *value;
2225 	size_t value_len;
2226 	struct spdk_bs_opts opts;
2227 	struct spdk_blob_opts blob_opts;
2228 
2229 	dev = init_dev();
2230 	spdk_bs_opts_init(&opts, sizeof(opts));
2231 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2232 
2233 	/* Initialize a new blob store */
2234 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2235 	poll_threads();
2236 	CU_ASSERT(g_bserrno == 0);
2237 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2238 	bs = g_bs;
2239 
2240 	/* Try to open a blobid that does not exist */
2241 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2242 	poll_threads();
2243 	CU_ASSERT(g_bserrno == -ENOENT);
2244 	CU_ASSERT(g_blob == NULL);
2245 
2246 	/* Create a blob */
2247 	blob = ut_blob_create_and_open(bs, NULL);
2248 	blobid = spdk_blob_get_id(blob);
2249 
2250 	/* Try again to open valid blob but without the upper bit set */
2251 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2252 	poll_threads();
2253 	CU_ASSERT(g_bserrno == -ENOENT);
2254 	CU_ASSERT(g_blob == NULL);
2255 
2256 	/* Set some xattrs */
2257 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2258 	CU_ASSERT(rc == 0);
2259 
2260 	length = 2345;
2261 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2262 	CU_ASSERT(rc == 0);
2263 
2264 	/* Resize the blob */
2265 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2266 	poll_threads();
2267 	CU_ASSERT(g_bserrno == 0);
2268 
2269 	spdk_blob_close(blob, blob_op_complete, NULL);
2270 	poll_threads();
2271 	CU_ASSERT(g_bserrno == 0);
2272 	blob = NULL;
2273 	g_blob = NULL;
2274 	g_blobid = SPDK_BLOBID_INVALID;
2275 
2276 	/* Unload the blob store */
2277 	spdk_bs_unload(bs, bs_op_complete, NULL);
2278 	poll_threads();
2279 	CU_ASSERT(g_bserrno == 0);
2280 	g_bs = NULL;
2281 	g_blob = NULL;
2282 	g_blobid = 0;
2283 
2284 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2285 	CU_ASSERT(super_block->clean == 1);
2286 
2287 	/* Load should fail for device with an unsupported blocklen */
2288 	dev = init_dev();
2289 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2290 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2291 	poll_threads();
2292 	CU_ASSERT(g_bserrno == -EINVAL);
2293 
2294 	/* Load should when max_md_ops is set to zero */
2295 	dev = init_dev();
2296 	spdk_bs_opts_init(&opts, sizeof(opts));
2297 	opts.max_md_ops = 0;
2298 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2299 	poll_threads();
2300 	CU_ASSERT(g_bserrno == -EINVAL);
2301 
2302 	/* Load should when max_channel_ops is set to zero */
2303 	dev = init_dev();
2304 	spdk_bs_opts_init(&opts, sizeof(opts));
2305 	opts.max_channel_ops = 0;
2306 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2307 	poll_threads();
2308 	CU_ASSERT(g_bserrno == -EINVAL);
2309 
2310 	/* Load an existing blob store */
2311 	dev = init_dev();
2312 	spdk_bs_opts_init(&opts, sizeof(opts));
2313 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2314 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2315 	poll_threads();
2316 	CU_ASSERT(g_bserrno == 0);
2317 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2318 	bs = g_bs;
2319 
2320 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2321 	CU_ASSERT(super_block->clean == 1);
2322 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2323 
2324 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2325 	poll_threads();
2326 	CU_ASSERT(g_bserrno == 0);
2327 	CU_ASSERT(g_blob != NULL);
2328 	blob = g_blob;
2329 
2330 	/* Verify that blobstore is marked dirty after first metadata sync */
2331 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2332 	CU_ASSERT(super_block->clean == 1);
2333 
2334 	/* Get the xattrs */
2335 	value = NULL;
2336 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2337 	CU_ASSERT(rc == 0);
2338 	SPDK_CU_ASSERT_FATAL(value != NULL);
2339 	CU_ASSERT(*(uint64_t *)value == length);
2340 	CU_ASSERT(value_len == 8);
2341 
2342 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2343 	CU_ASSERT(rc == -ENOENT);
2344 
2345 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2346 
2347 	spdk_blob_close(blob, blob_op_complete, NULL);
2348 	poll_threads();
2349 	CU_ASSERT(g_bserrno == 0);
2350 	blob = NULL;
2351 	g_blob = NULL;
2352 
2353 	spdk_bs_unload(bs, bs_op_complete, NULL);
2354 	poll_threads();
2355 	CU_ASSERT(g_bserrno == 0);
2356 	g_bs = NULL;
2357 
2358 	/* Load should fail: bdev size < saved size */
2359 	dev = init_dev();
2360 	dev->blockcnt /= 2;
2361 
2362 	spdk_bs_opts_init(&opts, sizeof(opts));
2363 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2364 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2365 	poll_threads();
2366 
2367 	CU_ASSERT(g_bserrno == -EILSEQ);
2368 
2369 	/* Load should succeed: bdev size > saved size */
2370 	dev = init_dev();
2371 	dev->blockcnt *= 4;
2372 
2373 	spdk_bs_opts_init(&opts, sizeof(opts));
2374 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2375 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2376 	poll_threads();
2377 	CU_ASSERT(g_bserrno == 0);
2378 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2379 	bs = g_bs;
2380 
2381 	CU_ASSERT(g_bserrno == 0);
2382 	spdk_bs_unload(bs, bs_op_complete, NULL);
2383 	poll_threads();
2384 
2385 
2386 	/* Test compatibility mode */
2387 
2388 	dev = init_dev();
2389 	super_block->size = 0;
2390 	super_block->crc = blob_md_page_calc_crc(super_block);
2391 
2392 	spdk_bs_opts_init(&opts, sizeof(opts));
2393 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2394 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2395 	poll_threads();
2396 	CU_ASSERT(g_bserrno == 0);
2397 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2398 	bs = g_bs;
2399 
2400 	/* Create a blob */
2401 	ut_spdk_blob_opts_init(&blob_opts);
2402 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2403 	poll_threads();
2404 	CU_ASSERT(g_bserrno == 0);
2405 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2406 
2407 	/* Blobstore should update number of blocks in super_block */
2408 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2409 	CU_ASSERT(super_block->clean == 0);
2410 
2411 	spdk_bs_unload(bs, bs_op_complete, NULL);
2412 	poll_threads();
2413 	CU_ASSERT(g_bserrno == 0);
2414 	CU_ASSERT(super_block->clean == 1);
2415 	g_bs = NULL;
2416 
2417 }
2418 
2419 static void
2420 bs_load_pending_removal(void)
2421 {
2422 	struct spdk_blob_store *bs = g_bs;
2423 	struct spdk_blob_opts opts;
2424 	struct spdk_blob *blob, *snapshot;
2425 	spdk_blob_id blobid, snapshotid;
2426 	const void *value;
2427 	size_t value_len;
2428 	int rc;
2429 
2430 	/* Create blob */
2431 	ut_spdk_blob_opts_init(&opts);
2432 	opts.num_clusters = 10;
2433 
2434 	blob = ut_blob_create_and_open(bs, &opts);
2435 	blobid = spdk_blob_get_id(blob);
2436 
2437 	/* Create snapshot */
2438 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2439 	poll_threads();
2440 	CU_ASSERT(g_bserrno == 0);
2441 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2442 	snapshotid = g_blobid;
2443 
2444 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2445 	poll_threads();
2446 	CU_ASSERT(g_bserrno == 0);
2447 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2448 	snapshot = g_blob;
2449 
2450 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2451 	snapshot->md_ro = false;
2452 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2453 	CU_ASSERT(rc == 0);
2454 	snapshot->md_ro = true;
2455 
2456 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2457 	poll_threads();
2458 	CU_ASSERT(g_bserrno == 0);
2459 
2460 	spdk_blob_close(blob, blob_op_complete, NULL);
2461 	poll_threads();
2462 	CU_ASSERT(g_bserrno == 0);
2463 
2464 	/* Reload blobstore */
2465 	ut_bs_reload(&bs, NULL);
2466 
2467 	/* Snapshot should not be removed as blob is still pointing to it */
2468 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2469 	poll_threads();
2470 	CU_ASSERT(g_bserrno == 0);
2471 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2472 	snapshot = g_blob;
2473 
2474 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2475 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2476 	CU_ASSERT(rc != 0);
2477 
2478 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2479 	snapshot->md_ro = false;
2480 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2481 	CU_ASSERT(rc == 0);
2482 	snapshot->md_ro = true;
2483 
2484 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2485 	poll_threads();
2486 	CU_ASSERT(g_bserrno == 0);
2487 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2488 	blob = g_blob;
2489 
2490 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2491 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2492 
2493 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2494 	poll_threads();
2495 	CU_ASSERT(g_bserrno == 0);
2496 
2497 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2498 	poll_threads();
2499 	CU_ASSERT(g_bserrno == 0);
2500 
2501 	spdk_blob_close(blob, blob_op_complete, NULL);
2502 	poll_threads();
2503 	CU_ASSERT(g_bserrno == 0);
2504 
2505 	/* Reload blobstore */
2506 	ut_bs_reload(&bs, NULL);
2507 
2508 	/* Snapshot should be removed as blob is not pointing to it anymore */
2509 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2510 	poll_threads();
2511 	CU_ASSERT(g_bserrno != 0);
2512 }
2513 
2514 static void
2515 bs_load_custom_cluster_size(void)
2516 {
2517 	struct spdk_blob_store *bs;
2518 	struct spdk_bs_dev *dev;
2519 	struct spdk_bs_super_block *super_block;
2520 	struct spdk_bs_opts opts;
2521 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2522 	uint32_t cluster_sz;
2523 	uint64_t total_clusters;
2524 
2525 	dev = init_dev();
2526 	spdk_bs_opts_init(&opts, sizeof(opts));
2527 	opts.cluster_sz = custom_cluster_size;
2528 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2529 
2530 	/* Initialize a new blob store */
2531 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2532 	poll_threads();
2533 	CU_ASSERT(g_bserrno == 0);
2534 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2535 	bs = g_bs;
2536 	cluster_sz = bs->cluster_sz;
2537 	total_clusters = bs->total_clusters;
2538 
2539 	/* Unload the blob store */
2540 	spdk_bs_unload(bs, bs_op_complete, NULL);
2541 	poll_threads();
2542 	CU_ASSERT(g_bserrno == 0);
2543 	g_bs = NULL;
2544 	g_blob = NULL;
2545 	g_blobid = 0;
2546 
2547 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2548 	CU_ASSERT(super_block->clean == 1);
2549 
2550 	/* Load an existing blob store */
2551 	dev = init_dev();
2552 	spdk_bs_opts_init(&opts, sizeof(opts));
2553 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2554 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2555 	poll_threads();
2556 	CU_ASSERT(g_bserrno == 0);
2557 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2558 	bs = g_bs;
2559 	/* Compare cluster size and number to one after initialization */
2560 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2561 	CU_ASSERT(total_clusters == bs->total_clusters);
2562 
2563 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2564 	CU_ASSERT(super_block->clean == 1);
2565 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2566 
2567 	spdk_bs_unload(bs, bs_op_complete, NULL);
2568 	poll_threads();
2569 	CU_ASSERT(g_bserrno == 0);
2570 	CU_ASSERT(super_block->clean == 1);
2571 	g_bs = NULL;
2572 }
2573 
2574 static void
2575 bs_type(void)
2576 {
2577 	struct spdk_blob_store *bs;
2578 	struct spdk_bs_dev *dev;
2579 	struct spdk_bs_opts opts;
2580 
2581 	dev = init_dev();
2582 	spdk_bs_opts_init(&opts, sizeof(opts));
2583 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2584 
2585 	/* Initialize a new blob store */
2586 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2587 	poll_threads();
2588 	CU_ASSERT(g_bserrno == 0);
2589 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2590 	bs = g_bs;
2591 
2592 	/* Unload the blob store */
2593 	spdk_bs_unload(bs, bs_op_complete, NULL);
2594 	poll_threads();
2595 	CU_ASSERT(g_bserrno == 0);
2596 	g_bs = NULL;
2597 	g_blob = NULL;
2598 	g_blobid = 0;
2599 
2600 	/* Load non existing blobstore type */
2601 	dev = init_dev();
2602 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2603 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2604 	poll_threads();
2605 	CU_ASSERT(g_bserrno != 0);
2606 
2607 	/* Load with empty blobstore type */
2608 	dev = init_dev();
2609 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2610 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2611 	poll_threads();
2612 	CU_ASSERT(g_bserrno == 0);
2613 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2614 	bs = g_bs;
2615 
2616 	spdk_bs_unload(bs, bs_op_complete, NULL);
2617 	poll_threads();
2618 	CU_ASSERT(g_bserrno == 0);
2619 	g_bs = NULL;
2620 
2621 	/* Initialize a new blob store with empty bstype */
2622 	dev = init_dev();
2623 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2624 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2625 	poll_threads();
2626 	CU_ASSERT(g_bserrno == 0);
2627 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2628 	bs = g_bs;
2629 
2630 	spdk_bs_unload(bs, bs_op_complete, NULL);
2631 	poll_threads();
2632 	CU_ASSERT(g_bserrno == 0);
2633 	g_bs = NULL;
2634 
2635 	/* Load non existing blobstore type */
2636 	dev = init_dev();
2637 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2638 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2639 	poll_threads();
2640 	CU_ASSERT(g_bserrno != 0);
2641 
2642 	/* Load with empty blobstore type */
2643 	dev = init_dev();
2644 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2645 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2646 	poll_threads();
2647 	CU_ASSERT(g_bserrno == 0);
2648 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2649 	bs = g_bs;
2650 
2651 	spdk_bs_unload(bs, bs_op_complete, NULL);
2652 	poll_threads();
2653 	CU_ASSERT(g_bserrno == 0);
2654 	g_bs = NULL;
2655 }
2656 
2657 static void
2658 bs_super_block(void)
2659 {
2660 	struct spdk_blob_store *bs;
2661 	struct spdk_bs_dev *dev;
2662 	struct spdk_bs_super_block *super_block;
2663 	struct spdk_bs_opts opts;
2664 	struct spdk_bs_super_block_ver1 super_block_v1;
2665 
2666 	dev = init_dev();
2667 	spdk_bs_opts_init(&opts, sizeof(opts));
2668 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2669 
2670 	/* Initialize a new blob store */
2671 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2672 	poll_threads();
2673 	CU_ASSERT(g_bserrno == 0);
2674 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2675 	bs = g_bs;
2676 
2677 	/* Unload the blob store */
2678 	spdk_bs_unload(bs, bs_op_complete, NULL);
2679 	poll_threads();
2680 	CU_ASSERT(g_bserrno == 0);
2681 	g_bs = NULL;
2682 	g_blob = NULL;
2683 	g_blobid = 0;
2684 
2685 	/* Load an existing blob store with version newer than supported */
2686 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2687 	super_block->version++;
2688 
2689 	dev = init_dev();
2690 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2691 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2692 	poll_threads();
2693 	CU_ASSERT(g_bserrno != 0);
2694 
2695 	/* Create a new blob store with super block version 1 */
2696 	dev = init_dev();
2697 	super_block_v1.version = 1;
2698 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2699 	super_block_v1.length = 0x1000;
2700 	super_block_v1.clean = 1;
2701 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2702 	super_block_v1.cluster_size = 0x100000;
2703 	super_block_v1.used_page_mask_start = 0x01;
2704 	super_block_v1.used_page_mask_len = 0x01;
2705 	super_block_v1.used_cluster_mask_start = 0x02;
2706 	super_block_v1.used_cluster_mask_len = 0x01;
2707 	super_block_v1.md_start = 0x03;
2708 	super_block_v1.md_len = 0x40;
2709 	memset(super_block_v1.reserved, 0, 4036);
2710 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2711 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2712 
2713 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2714 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2715 	poll_threads();
2716 	CU_ASSERT(g_bserrno == 0);
2717 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2718 	bs = g_bs;
2719 
2720 	spdk_bs_unload(bs, bs_op_complete, NULL);
2721 	poll_threads();
2722 	CU_ASSERT(g_bserrno == 0);
2723 	g_bs = NULL;
2724 }
2725 
2726 static void
2727 bs_test_recover_cluster_count(void)
2728 {
2729 	struct spdk_blob_store *bs;
2730 	struct spdk_bs_dev *dev;
2731 	struct spdk_bs_super_block super_block;
2732 	struct spdk_bs_opts opts;
2733 
2734 	dev = init_dev();
2735 	spdk_bs_opts_init(&opts, sizeof(opts));
2736 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2737 
2738 	super_block.version = 3;
2739 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2740 	super_block.length = 0x1000;
2741 	super_block.clean = 0;
2742 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2743 	super_block.cluster_size = 4096;
2744 	super_block.used_page_mask_start = 0x01;
2745 	super_block.used_page_mask_len = 0x01;
2746 	super_block.used_cluster_mask_start = 0x02;
2747 	super_block.used_cluster_mask_len = 0x01;
2748 	super_block.used_blobid_mask_start = 0x03;
2749 	super_block.used_blobid_mask_len = 0x01;
2750 	super_block.md_start = 0x04;
2751 	super_block.md_len = 0x40;
2752 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2753 	super_block.size = dev->blockcnt * dev->blocklen;
2754 	super_block.io_unit_size = 0x1000;
2755 	memset(super_block.reserved, 0, 4000);
2756 	super_block.crc = blob_md_page_calc_crc(&super_block);
2757 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2758 
2759 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2760 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2761 	poll_threads();
2762 	CU_ASSERT(g_bserrno == 0);
2763 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2764 	bs = g_bs;
2765 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2766 			super_block.md_len));
2767 
2768 	spdk_bs_unload(bs, bs_op_complete, NULL);
2769 	poll_threads();
2770 	CU_ASSERT(g_bserrno == 0);
2771 	g_bs = NULL;
2772 }
2773 
2774 /*
2775  * Create a blobstore and then unload it.
2776  */
2777 static void
2778 bs_unload(void)
2779 {
2780 	struct spdk_blob_store *bs = g_bs;
2781 	struct spdk_blob *blob;
2782 
2783 	/* Create a blob and open it. */
2784 	blob = ut_blob_create_and_open(bs, NULL);
2785 
2786 	/* Try to unload blobstore, should fail with open blob */
2787 	g_bserrno = -1;
2788 	spdk_bs_unload(bs, bs_op_complete, NULL);
2789 	poll_threads();
2790 	CU_ASSERT(g_bserrno == -EBUSY);
2791 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2792 
2793 	/* Close the blob, then successfully unload blobstore */
2794 	g_bserrno = -1;
2795 	spdk_blob_close(blob, blob_op_complete, NULL);
2796 	poll_threads();
2797 	CU_ASSERT(g_bserrno == 0);
2798 }
2799 
2800 /*
2801  * Create a blobstore with a cluster size different than the default, and ensure it is
2802  *  persisted.
2803  */
2804 static void
2805 bs_cluster_sz(void)
2806 {
2807 	struct spdk_blob_store *bs;
2808 	struct spdk_bs_dev *dev;
2809 	struct spdk_bs_opts opts;
2810 	uint32_t cluster_sz;
2811 
2812 	/* Set cluster size to zero */
2813 	dev = init_dev();
2814 	spdk_bs_opts_init(&opts, sizeof(opts));
2815 	opts.cluster_sz = 0;
2816 
2817 	/* Initialize a new blob store */
2818 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2819 	poll_threads();
2820 	CU_ASSERT(g_bserrno == -EINVAL);
2821 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2822 
2823 	/*
2824 	 * Set cluster size to blobstore page size,
2825 	 * to work it is required to be at least twice the blobstore page size.
2826 	 */
2827 	dev = init_dev();
2828 	spdk_bs_opts_init(&opts, sizeof(opts));
2829 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
2830 
2831 	/* Initialize a new blob store */
2832 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2833 	poll_threads();
2834 	CU_ASSERT(g_bserrno == -ENOMEM);
2835 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2836 
2837 	/*
2838 	 * Set cluster size to lower than page size,
2839 	 * to work it is required to be at least twice the blobstore page size.
2840 	 */
2841 	dev = init_dev();
2842 	spdk_bs_opts_init(&opts, sizeof(opts));
2843 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
2844 
2845 	/* Initialize a new blob store */
2846 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2847 	poll_threads();
2848 	CU_ASSERT(g_bserrno == -EINVAL);
2849 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2850 
2851 	/* Set cluster size to twice the default */
2852 	dev = init_dev();
2853 	spdk_bs_opts_init(&opts, sizeof(opts));
2854 	opts.cluster_sz *= 2;
2855 	cluster_sz = opts.cluster_sz;
2856 
2857 	/* Initialize a new blob store */
2858 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2859 	poll_threads();
2860 	CU_ASSERT(g_bserrno == 0);
2861 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2862 	bs = g_bs;
2863 
2864 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2865 
2866 	ut_bs_reload(&bs, &opts);
2867 
2868 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2869 
2870 	spdk_bs_unload(bs, bs_op_complete, NULL);
2871 	poll_threads();
2872 	CU_ASSERT(g_bserrno == 0);
2873 	g_bs = NULL;
2874 }
2875 
2876 /*
2877  * Create a blobstore, reload it and ensure total usable cluster count
2878  *  stays the same.
2879  */
2880 static void
2881 bs_usable_clusters(void)
2882 {
2883 	struct spdk_blob_store *bs = g_bs;
2884 	struct spdk_blob *blob;
2885 	uint32_t clusters;
2886 	int i;
2887 
2888 
2889 	clusters = spdk_bs_total_data_cluster_count(bs);
2890 
2891 	ut_bs_reload(&bs, NULL);
2892 
2893 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2894 
2895 	/* Create and resize blobs to make sure that useable cluster count won't change */
2896 	for (i = 0; i < 4; i++) {
2897 		g_bserrno = -1;
2898 		g_blobid = SPDK_BLOBID_INVALID;
2899 		blob = ut_blob_create_and_open(bs, NULL);
2900 
2901 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2902 		poll_threads();
2903 		CU_ASSERT(g_bserrno == 0);
2904 
2905 		g_bserrno = -1;
2906 		spdk_blob_close(blob, blob_op_complete, NULL);
2907 		poll_threads();
2908 		CU_ASSERT(g_bserrno == 0);
2909 
2910 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2911 	}
2912 
2913 	/* Reload the blob store to make sure that nothing changed */
2914 	ut_bs_reload(&bs, NULL);
2915 
2916 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2917 }
2918 
2919 /*
2920  * Test resizing of the metadata blob.  This requires creating enough blobs
2921  *  so that one cluster is not enough to fit the metadata for those blobs.
2922  *  To induce this condition to happen more quickly, we reduce the cluster
2923  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
2924  */
2925 static void
2926 bs_resize_md(void)
2927 {
2928 	struct spdk_blob_store *bs;
2929 	const int CLUSTER_PAGE_COUNT = 4;
2930 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
2931 	struct spdk_bs_dev *dev;
2932 	struct spdk_bs_opts opts;
2933 	struct spdk_blob *blob;
2934 	struct spdk_blob_opts blob_opts;
2935 	uint32_t cluster_sz;
2936 	spdk_blob_id blobids[NUM_BLOBS];
2937 	int i;
2938 
2939 
2940 	dev = init_dev();
2941 	spdk_bs_opts_init(&opts, sizeof(opts));
2942 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
2943 	cluster_sz = opts.cluster_sz;
2944 
2945 	/* Initialize a new blob store */
2946 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2947 	poll_threads();
2948 	CU_ASSERT(g_bserrno == 0);
2949 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2950 	bs = g_bs;
2951 
2952 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2953 
2954 	ut_spdk_blob_opts_init(&blob_opts);
2955 
2956 	for (i = 0; i < NUM_BLOBS; i++) {
2957 		g_bserrno = -1;
2958 		g_blobid = SPDK_BLOBID_INVALID;
2959 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2960 		poll_threads();
2961 		CU_ASSERT(g_bserrno == 0);
2962 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
2963 		blobids[i] = g_blobid;
2964 	}
2965 
2966 	ut_bs_reload(&bs, &opts);
2967 
2968 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2969 
2970 	for (i = 0; i < NUM_BLOBS; i++) {
2971 		g_bserrno = -1;
2972 		g_blob = NULL;
2973 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
2974 		poll_threads();
2975 		CU_ASSERT(g_bserrno == 0);
2976 		CU_ASSERT(g_blob !=  NULL);
2977 		blob = g_blob;
2978 		g_bserrno = -1;
2979 		spdk_blob_close(blob, blob_op_complete, NULL);
2980 		poll_threads();
2981 		CU_ASSERT(g_bserrno == 0);
2982 	}
2983 
2984 	spdk_bs_unload(bs, bs_op_complete, NULL);
2985 	poll_threads();
2986 	CU_ASSERT(g_bserrno == 0);
2987 	g_bs = NULL;
2988 }
2989 
2990 static void
2991 bs_destroy(void)
2992 {
2993 	struct spdk_blob_store *bs;
2994 	struct spdk_bs_dev *dev;
2995 
2996 	/* Initialize a new blob store */
2997 	dev = init_dev();
2998 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2999 	poll_threads();
3000 	CU_ASSERT(g_bserrno == 0);
3001 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3002 	bs = g_bs;
3003 
3004 	/* Destroy the blob store */
3005 	g_bserrno = -1;
3006 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3007 	poll_threads();
3008 	CU_ASSERT(g_bserrno == 0);
3009 
3010 	/* Loading an non-existent blob store should fail. */
3011 	g_bs = NULL;
3012 	dev = init_dev();
3013 
3014 	g_bserrno = 0;
3015 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3016 	poll_threads();
3017 	CU_ASSERT(g_bserrno != 0);
3018 }
3019 
3020 /* Try to hit all of the corner cases associated with serializing
3021  * a blob to disk
3022  */
3023 static void
3024 blob_serialize_test(void)
3025 {
3026 	struct spdk_bs_dev *dev;
3027 	struct spdk_bs_opts opts;
3028 	struct spdk_blob_store *bs;
3029 	spdk_blob_id blobid[2];
3030 	struct spdk_blob *blob[2];
3031 	uint64_t i;
3032 	char *value;
3033 	int rc;
3034 
3035 	dev = init_dev();
3036 
3037 	/* Initialize a new blobstore with very small clusters */
3038 	spdk_bs_opts_init(&opts, sizeof(opts));
3039 	opts.cluster_sz = dev->blocklen * 8;
3040 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3041 	poll_threads();
3042 	CU_ASSERT(g_bserrno == 0);
3043 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3044 	bs = g_bs;
3045 
3046 	/* Create and open two blobs */
3047 	for (i = 0; i < 2; i++) {
3048 		blob[i] = ut_blob_create_and_open(bs, NULL);
3049 		blobid[i] = spdk_blob_get_id(blob[i]);
3050 
3051 		/* Set a fairly large xattr on both blobs to eat up
3052 		 * metadata space
3053 		 */
3054 		value = calloc(dev->blocklen - 64, sizeof(char));
3055 		SPDK_CU_ASSERT_FATAL(value != NULL);
3056 		memset(value, i, dev->blocklen / 2);
3057 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3058 		CU_ASSERT(rc == 0);
3059 		free(value);
3060 	}
3061 
3062 	/* Resize the blobs, alternating 1 cluster at a time.
3063 	 * This thwarts run length encoding and will cause spill
3064 	 * over of the extents.
3065 	 */
3066 	for (i = 0; i < 6; i++) {
3067 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3068 		poll_threads();
3069 		CU_ASSERT(g_bserrno == 0);
3070 	}
3071 
3072 	for (i = 0; i < 2; i++) {
3073 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3074 		poll_threads();
3075 		CU_ASSERT(g_bserrno == 0);
3076 	}
3077 
3078 	/* Close the blobs */
3079 	for (i = 0; i < 2; i++) {
3080 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3081 		poll_threads();
3082 		CU_ASSERT(g_bserrno == 0);
3083 	}
3084 
3085 	ut_bs_reload(&bs, &opts);
3086 
3087 	for (i = 0; i < 2; i++) {
3088 		blob[i] = NULL;
3089 
3090 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3091 		poll_threads();
3092 		CU_ASSERT(g_bserrno == 0);
3093 		CU_ASSERT(g_blob != NULL);
3094 		blob[i] = g_blob;
3095 
3096 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3097 
3098 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3099 		poll_threads();
3100 		CU_ASSERT(g_bserrno == 0);
3101 	}
3102 
3103 	spdk_bs_unload(bs, bs_op_complete, NULL);
3104 	poll_threads();
3105 	CU_ASSERT(g_bserrno == 0);
3106 	g_bs = NULL;
3107 }
3108 
3109 static void
3110 blob_crc(void)
3111 {
3112 	struct spdk_blob_store *bs = g_bs;
3113 	struct spdk_blob *blob;
3114 	spdk_blob_id blobid;
3115 	uint32_t page_num;
3116 	int index;
3117 	struct spdk_blob_md_page *page;
3118 
3119 	blob = ut_blob_create_and_open(bs, NULL);
3120 	blobid = spdk_blob_get_id(blob);
3121 
3122 	spdk_blob_close(blob, blob_op_complete, NULL);
3123 	poll_threads();
3124 	CU_ASSERT(g_bserrno == 0);
3125 
3126 	page_num = bs_blobid_to_page(blobid);
3127 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3128 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3129 	page->crc = 0;
3130 
3131 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3132 	poll_threads();
3133 	CU_ASSERT(g_bserrno == -EINVAL);
3134 	CU_ASSERT(g_blob == NULL);
3135 	g_bserrno = 0;
3136 
3137 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3138 	poll_threads();
3139 	CU_ASSERT(g_bserrno == -EINVAL);
3140 }
3141 
3142 static void
3143 super_block_crc(void)
3144 {
3145 	struct spdk_blob_store *bs;
3146 	struct spdk_bs_dev *dev;
3147 	struct spdk_bs_super_block *super_block;
3148 
3149 	dev = init_dev();
3150 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3151 	poll_threads();
3152 	CU_ASSERT(g_bserrno == 0);
3153 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3154 	bs = g_bs;
3155 
3156 	spdk_bs_unload(bs, bs_op_complete, NULL);
3157 	poll_threads();
3158 	CU_ASSERT(g_bserrno == 0);
3159 	g_bs = NULL;
3160 
3161 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3162 	super_block->crc = 0;
3163 	dev = init_dev();
3164 
3165 	/* Load an existing blob store */
3166 	g_bserrno = 0;
3167 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3168 	poll_threads();
3169 	CU_ASSERT(g_bserrno == -EILSEQ);
3170 }
3171 
3172 /* For blob dirty shutdown test case we do the following sub-test cases:
3173  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3174  *   dirty shutdown and reload the blob store and verify the xattrs.
3175  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3176  *   reload the blob store and verify the clusters number.
3177  * 3 Create the second blob and then dirty shutdown, reload the blob store
3178  *   and verify the second blob.
3179  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3180  *   and verify the second blob is invalid.
3181  * 5 Create the second blob again and also create the third blob, modify the
3182  *   md of second blob which makes the md invalid, and then dirty shutdown,
3183  *   reload the blob store verify the second blob, it should invalid and also
3184  *   verify the third blob, it should correct.
3185  */
3186 static void
3187 blob_dirty_shutdown(void)
3188 {
3189 	int rc;
3190 	int index;
3191 	struct spdk_blob_store *bs = g_bs;
3192 	spdk_blob_id blobid1, blobid2, blobid3;
3193 	struct spdk_blob *blob = g_blob;
3194 	uint64_t length;
3195 	uint64_t free_clusters;
3196 	const void *value;
3197 	size_t value_len;
3198 	uint32_t page_num;
3199 	struct spdk_blob_md_page *page;
3200 	struct spdk_blob_opts blob_opts;
3201 
3202 	/* Create first blob */
3203 	blobid1 = spdk_blob_get_id(blob);
3204 
3205 	/* Set some xattrs */
3206 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3207 	CU_ASSERT(rc == 0);
3208 
3209 	length = 2345;
3210 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3211 	CU_ASSERT(rc == 0);
3212 
3213 	/* Put xattr that fits exactly single page.
3214 	 * This results in adding additional pages to MD.
3215 	 * First is flags and smaller xattr, second the large xattr,
3216 	 * third are just the extents.
3217 	 */
3218 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3219 			      strlen("large_xattr");
3220 	char *xattr = calloc(xattr_length, sizeof(char));
3221 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3222 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3223 	free(xattr);
3224 	SPDK_CU_ASSERT_FATAL(rc == 0);
3225 
3226 	/* Resize the blob */
3227 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3228 	poll_threads();
3229 	CU_ASSERT(g_bserrno == 0);
3230 
3231 	/* Set the blob as the super blob */
3232 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3233 	poll_threads();
3234 	CU_ASSERT(g_bserrno == 0);
3235 
3236 	free_clusters = spdk_bs_free_cluster_count(bs);
3237 
3238 	spdk_blob_close(blob, blob_op_complete, NULL);
3239 	poll_threads();
3240 	CU_ASSERT(g_bserrno == 0);
3241 	blob = NULL;
3242 	g_blob = NULL;
3243 	g_blobid = SPDK_BLOBID_INVALID;
3244 
3245 	ut_bs_dirty_load(&bs, NULL);
3246 
3247 	/* Get the super blob */
3248 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3249 	poll_threads();
3250 	CU_ASSERT(g_bserrno == 0);
3251 	CU_ASSERT(blobid1 == g_blobid);
3252 
3253 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3254 	poll_threads();
3255 	CU_ASSERT(g_bserrno == 0);
3256 	CU_ASSERT(g_blob != NULL);
3257 	blob = g_blob;
3258 
3259 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3260 
3261 	/* Get the xattrs */
3262 	value = NULL;
3263 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3264 	CU_ASSERT(rc == 0);
3265 	SPDK_CU_ASSERT_FATAL(value != NULL);
3266 	CU_ASSERT(*(uint64_t *)value == length);
3267 	CU_ASSERT(value_len == 8);
3268 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3269 
3270 	/* Resize the blob */
3271 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3272 	poll_threads();
3273 	CU_ASSERT(g_bserrno == 0);
3274 
3275 	free_clusters = spdk_bs_free_cluster_count(bs);
3276 
3277 	spdk_blob_close(blob, blob_op_complete, NULL);
3278 	poll_threads();
3279 	CU_ASSERT(g_bserrno == 0);
3280 	blob = NULL;
3281 	g_blob = NULL;
3282 	g_blobid = SPDK_BLOBID_INVALID;
3283 
3284 	ut_bs_dirty_load(&bs, NULL);
3285 
3286 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3287 	poll_threads();
3288 	CU_ASSERT(g_bserrno == 0);
3289 	CU_ASSERT(g_blob != NULL);
3290 	blob = g_blob;
3291 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3292 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3293 
3294 	spdk_blob_close(blob, blob_op_complete, NULL);
3295 	poll_threads();
3296 	CU_ASSERT(g_bserrno == 0);
3297 	blob = NULL;
3298 	g_blob = NULL;
3299 	g_blobid = SPDK_BLOBID_INVALID;
3300 
3301 	/* Create second blob */
3302 	blob = ut_blob_create_and_open(bs, NULL);
3303 	blobid2 = spdk_blob_get_id(blob);
3304 
3305 	/* Set some xattrs */
3306 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3307 	CU_ASSERT(rc == 0);
3308 
3309 	length = 5432;
3310 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3311 	CU_ASSERT(rc == 0);
3312 
3313 	/* Resize the blob */
3314 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3315 	poll_threads();
3316 	CU_ASSERT(g_bserrno == 0);
3317 
3318 	free_clusters = spdk_bs_free_cluster_count(bs);
3319 
3320 	spdk_blob_close(blob, blob_op_complete, NULL);
3321 	poll_threads();
3322 	CU_ASSERT(g_bserrno == 0);
3323 	blob = NULL;
3324 	g_blob = NULL;
3325 	g_blobid = SPDK_BLOBID_INVALID;
3326 
3327 	ut_bs_dirty_load(&bs, NULL);
3328 
3329 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3330 	poll_threads();
3331 	CU_ASSERT(g_bserrno == 0);
3332 	CU_ASSERT(g_blob != NULL);
3333 	blob = g_blob;
3334 
3335 	/* Get the xattrs */
3336 	value = NULL;
3337 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3338 	CU_ASSERT(rc == 0);
3339 	SPDK_CU_ASSERT_FATAL(value != NULL);
3340 	CU_ASSERT(*(uint64_t *)value == length);
3341 	CU_ASSERT(value_len == 8);
3342 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3343 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3344 
3345 	ut_blob_close_and_delete(bs, blob);
3346 
3347 	free_clusters = spdk_bs_free_cluster_count(bs);
3348 
3349 	ut_bs_dirty_load(&bs, NULL);
3350 
3351 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3352 	poll_threads();
3353 	CU_ASSERT(g_bserrno != 0);
3354 	CU_ASSERT(g_blob == NULL);
3355 
3356 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3357 	poll_threads();
3358 	CU_ASSERT(g_bserrno == 0);
3359 	CU_ASSERT(g_blob != NULL);
3360 	blob = g_blob;
3361 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3362 	spdk_blob_close(blob, blob_op_complete, NULL);
3363 	poll_threads();
3364 	CU_ASSERT(g_bserrno == 0);
3365 
3366 	ut_bs_reload(&bs, NULL);
3367 
3368 	/* Create second blob */
3369 	ut_spdk_blob_opts_init(&blob_opts);
3370 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3371 	poll_threads();
3372 	CU_ASSERT(g_bserrno == 0);
3373 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3374 	blobid2 = g_blobid;
3375 
3376 	/* Create third blob */
3377 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3378 	poll_threads();
3379 	CU_ASSERT(g_bserrno == 0);
3380 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3381 	blobid3 = g_blobid;
3382 
3383 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3384 	poll_threads();
3385 	CU_ASSERT(g_bserrno == 0);
3386 	CU_ASSERT(g_blob != NULL);
3387 	blob = g_blob;
3388 
3389 	/* Set some xattrs for second blob */
3390 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3391 	CU_ASSERT(rc == 0);
3392 
3393 	length = 5432;
3394 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3395 	CU_ASSERT(rc == 0);
3396 
3397 	spdk_blob_close(blob, blob_op_complete, NULL);
3398 	poll_threads();
3399 	CU_ASSERT(g_bserrno == 0);
3400 	blob = NULL;
3401 	g_blob = NULL;
3402 	g_blobid = SPDK_BLOBID_INVALID;
3403 
3404 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3405 	poll_threads();
3406 	CU_ASSERT(g_bserrno == 0);
3407 	CU_ASSERT(g_blob != NULL);
3408 	blob = g_blob;
3409 
3410 	/* Set some xattrs for third blob */
3411 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3412 	CU_ASSERT(rc == 0);
3413 
3414 	length = 5432;
3415 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3416 	CU_ASSERT(rc == 0);
3417 
3418 	spdk_blob_close(blob, blob_op_complete, NULL);
3419 	poll_threads();
3420 	CU_ASSERT(g_bserrno == 0);
3421 	blob = NULL;
3422 	g_blob = NULL;
3423 	g_blobid = SPDK_BLOBID_INVALID;
3424 
3425 	/* Mark second blob as invalid */
3426 	page_num = bs_blobid_to_page(blobid2);
3427 
3428 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3429 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3430 	page->sequence_num = 1;
3431 	page->crc = blob_md_page_calc_crc(page);
3432 
3433 	free_clusters = spdk_bs_free_cluster_count(bs);
3434 
3435 	ut_bs_dirty_load(&bs, NULL);
3436 
3437 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3438 	poll_threads();
3439 	CU_ASSERT(g_bserrno != 0);
3440 	CU_ASSERT(g_blob == NULL);
3441 
3442 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3443 	poll_threads();
3444 	CU_ASSERT(g_bserrno == 0);
3445 	CU_ASSERT(g_blob != NULL);
3446 	blob = g_blob;
3447 
3448 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3449 }
3450 
3451 static void
3452 blob_flags(void)
3453 {
3454 	struct spdk_blob_store *bs = g_bs;
3455 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3456 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3457 	struct spdk_blob_opts blob_opts;
3458 	int rc;
3459 
3460 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3461 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3462 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3463 
3464 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3465 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3466 
3467 	ut_spdk_blob_opts_init(&blob_opts);
3468 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3469 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3470 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3471 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3472 
3473 	/* Change the size of blob_data_ro to check if flags are serialized
3474 	 * when blob has non zero number of extents */
3475 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3476 	poll_threads();
3477 	CU_ASSERT(g_bserrno == 0);
3478 
3479 	/* Set the xattr to check if flags are serialized
3480 	 * when blob has non zero number of xattrs */
3481 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3482 	CU_ASSERT(rc == 0);
3483 
3484 	blob_invalid->invalid_flags = (1ULL << 63);
3485 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3486 	blob_data_ro->data_ro_flags = (1ULL << 62);
3487 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3488 	blob_md_ro->md_ro_flags = (1ULL << 61);
3489 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3490 
3491 	g_bserrno = -1;
3492 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3493 	poll_threads();
3494 	CU_ASSERT(g_bserrno == 0);
3495 	g_bserrno = -1;
3496 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3497 	poll_threads();
3498 	CU_ASSERT(g_bserrno == 0);
3499 	g_bserrno = -1;
3500 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3501 	poll_threads();
3502 	CU_ASSERT(g_bserrno == 0);
3503 
3504 	g_bserrno = -1;
3505 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3506 	poll_threads();
3507 	CU_ASSERT(g_bserrno == 0);
3508 	blob_invalid = NULL;
3509 	g_bserrno = -1;
3510 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3511 	poll_threads();
3512 	CU_ASSERT(g_bserrno == 0);
3513 	blob_data_ro = NULL;
3514 	g_bserrno = -1;
3515 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3516 	poll_threads();
3517 	CU_ASSERT(g_bserrno == 0);
3518 	blob_md_ro = NULL;
3519 
3520 	g_blob = NULL;
3521 	g_blobid = SPDK_BLOBID_INVALID;
3522 
3523 	ut_bs_reload(&bs, NULL);
3524 
3525 	g_blob = NULL;
3526 	g_bserrno = 0;
3527 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3528 	poll_threads();
3529 	CU_ASSERT(g_bserrno != 0);
3530 	CU_ASSERT(g_blob == NULL);
3531 
3532 	g_blob = NULL;
3533 	g_bserrno = -1;
3534 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3535 	poll_threads();
3536 	CU_ASSERT(g_bserrno == 0);
3537 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3538 	blob_data_ro = g_blob;
3539 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3540 	CU_ASSERT(blob_data_ro->data_ro == true);
3541 	CU_ASSERT(blob_data_ro->md_ro == true);
3542 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3543 
3544 	g_blob = NULL;
3545 	g_bserrno = -1;
3546 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3547 	poll_threads();
3548 	CU_ASSERT(g_bserrno == 0);
3549 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3550 	blob_md_ro = g_blob;
3551 	CU_ASSERT(blob_md_ro->data_ro == false);
3552 	CU_ASSERT(blob_md_ro->md_ro == true);
3553 
3554 	g_bserrno = -1;
3555 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3556 	poll_threads();
3557 	CU_ASSERT(g_bserrno == 0);
3558 
3559 	ut_blob_close_and_delete(bs, blob_data_ro);
3560 	ut_blob_close_and_delete(bs, blob_md_ro);
3561 }
3562 
3563 static void
3564 bs_version(void)
3565 {
3566 	struct spdk_bs_super_block *super;
3567 	struct spdk_blob_store *bs = g_bs;
3568 	struct spdk_bs_dev *dev;
3569 	struct spdk_blob *blob;
3570 	struct spdk_blob_opts blob_opts;
3571 	spdk_blob_id blobid;
3572 
3573 	/* Unload the blob store */
3574 	spdk_bs_unload(bs, bs_op_complete, NULL);
3575 	poll_threads();
3576 	CU_ASSERT(g_bserrno == 0);
3577 	g_bs = NULL;
3578 
3579 	/*
3580 	 * Change the bs version on disk.  This will allow us to
3581 	 *  test that the version does not get modified automatically
3582 	 *  when loading and unloading the blobstore.
3583 	 */
3584 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3585 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3586 	CU_ASSERT(super->clean == 1);
3587 	super->version = 2;
3588 	/*
3589 	 * Version 2 metadata does not have a used blobid mask, so clear
3590 	 *  those fields in the super block and zero the corresponding
3591 	 *  region on "disk".  We will use this to ensure blob IDs are
3592 	 *  correctly reconstructed.
3593 	 */
3594 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3595 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3596 	super->used_blobid_mask_start = 0;
3597 	super->used_blobid_mask_len = 0;
3598 	super->crc = blob_md_page_calc_crc(super);
3599 
3600 	/* Load an existing blob store */
3601 	dev = init_dev();
3602 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3603 	poll_threads();
3604 	CU_ASSERT(g_bserrno == 0);
3605 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3606 	CU_ASSERT(super->clean == 1);
3607 	bs = g_bs;
3608 
3609 	/*
3610 	 * Create a blob - just to make sure that when we unload it
3611 	 *  results in writing the super block (since metadata pages
3612 	 *  were allocated.
3613 	 */
3614 	ut_spdk_blob_opts_init(&blob_opts);
3615 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3616 	poll_threads();
3617 	CU_ASSERT(g_bserrno == 0);
3618 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3619 	blobid = g_blobid;
3620 
3621 	/* Unload the blob store */
3622 	spdk_bs_unload(bs, bs_op_complete, NULL);
3623 	poll_threads();
3624 	CU_ASSERT(g_bserrno == 0);
3625 	g_bs = NULL;
3626 	CU_ASSERT(super->version == 2);
3627 	CU_ASSERT(super->used_blobid_mask_start == 0);
3628 	CU_ASSERT(super->used_blobid_mask_len == 0);
3629 
3630 	dev = init_dev();
3631 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3632 	poll_threads();
3633 	CU_ASSERT(g_bserrno == 0);
3634 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3635 	bs = g_bs;
3636 
3637 	g_blob = NULL;
3638 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3639 	poll_threads();
3640 	CU_ASSERT(g_bserrno == 0);
3641 	CU_ASSERT(g_blob != NULL);
3642 	blob = g_blob;
3643 
3644 	ut_blob_close_and_delete(bs, blob);
3645 
3646 	CU_ASSERT(super->version == 2);
3647 	CU_ASSERT(super->used_blobid_mask_start == 0);
3648 	CU_ASSERT(super->used_blobid_mask_len == 0);
3649 }
3650 
3651 static void
3652 blob_set_xattrs_test(void)
3653 {
3654 	struct spdk_blob_store *bs = g_bs;
3655 	struct spdk_blob *blob;
3656 	struct spdk_blob_opts opts;
3657 	const void *value;
3658 	size_t value_len;
3659 	char *xattr;
3660 	size_t xattr_length;
3661 	int rc;
3662 
3663 	/* Create blob with extra attributes */
3664 	ut_spdk_blob_opts_init(&opts);
3665 
3666 	opts.xattrs.names = g_xattr_names;
3667 	opts.xattrs.get_value = _get_xattr_value;
3668 	opts.xattrs.count = 3;
3669 	opts.xattrs.ctx = &g_ctx;
3670 
3671 	blob = ut_blob_create_and_open(bs, &opts);
3672 
3673 	/* Get the xattrs */
3674 	value = NULL;
3675 
3676 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3677 	CU_ASSERT(rc == 0);
3678 	SPDK_CU_ASSERT_FATAL(value != NULL);
3679 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3680 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3681 
3682 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3683 	CU_ASSERT(rc == 0);
3684 	SPDK_CU_ASSERT_FATAL(value != NULL);
3685 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3686 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3687 
3688 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3689 	CU_ASSERT(rc == 0);
3690 	SPDK_CU_ASSERT_FATAL(value != NULL);
3691 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3692 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3693 
3694 	/* Try to get non existing attribute */
3695 
3696 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3697 	CU_ASSERT(rc == -ENOENT);
3698 
3699 	/* Try xattr exceeding maximum length of descriptor in single page */
3700 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3701 		       strlen("large_xattr") + 1;
3702 	xattr = calloc(xattr_length, sizeof(char));
3703 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3704 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3705 	free(xattr);
3706 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3707 
3708 	spdk_blob_close(blob, blob_op_complete, NULL);
3709 	poll_threads();
3710 	CU_ASSERT(g_bserrno == 0);
3711 	blob = NULL;
3712 	g_blob = NULL;
3713 	g_blobid = SPDK_BLOBID_INVALID;
3714 
3715 	/* NULL callback */
3716 	ut_spdk_blob_opts_init(&opts);
3717 	opts.xattrs.names = g_xattr_names;
3718 	opts.xattrs.get_value = NULL;
3719 	opts.xattrs.count = 1;
3720 	opts.xattrs.ctx = &g_ctx;
3721 
3722 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3723 	poll_threads();
3724 	CU_ASSERT(g_bserrno == -EINVAL);
3725 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3726 
3727 	/* NULL values */
3728 	ut_spdk_blob_opts_init(&opts);
3729 	opts.xattrs.names = g_xattr_names;
3730 	opts.xattrs.get_value = _get_xattr_value_null;
3731 	opts.xattrs.count = 1;
3732 	opts.xattrs.ctx = NULL;
3733 
3734 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3735 	poll_threads();
3736 	CU_ASSERT(g_bserrno == -EINVAL);
3737 }
3738 
3739 static void
3740 blob_thin_prov_alloc(void)
3741 {
3742 	struct spdk_blob_store *bs = g_bs;
3743 	struct spdk_blob *blob;
3744 	struct spdk_blob_opts opts;
3745 	spdk_blob_id blobid;
3746 	uint64_t free_clusters;
3747 
3748 	free_clusters = spdk_bs_free_cluster_count(bs);
3749 
3750 	/* Set blob as thin provisioned */
3751 	ut_spdk_blob_opts_init(&opts);
3752 	opts.thin_provision = true;
3753 
3754 	blob = ut_blob_create_and_open(bs, &opts);
3755 	blobid = spdk_blob_get_id(blob);
3756 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3757 
3758 	CU_ASSERT(blob->active.num_clusters == 0);
3759 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3760 
3761 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3762 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3763 	poll_threads();
3764 	CU_ASSERT(g_bserrno == 0);
3765 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3766 	CU_ASSERT(blob->active.num_clusters == 5);
3767 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3768 
3769 	/* Grow it to 1TB - still unallocated */
3770 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3771 	poll_threads();
3772 	CU_ASSERT(g_bserrno == 0);
3773 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3774 	CU_ASSERT(blob->active.num_clusters == 262144);
3775 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3776 
3777 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3778 	poll_threads();
3779 	CU_ASSERT(g_bserrno == 0);
3780 	/* Sync must not change anything */
3781 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3782 	CU_ASSERT(blob->active.num_clusters == 262144);
3783 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3784 	/* Since clusters are not allocated,
3785 	 * number of metadata pages is expected to be minimal.
3786 	 */
3787 	CU_ASSERT(blob->active.num_pages == 1);
3788 
3789 	/* Shrink the blob to 3 clusters - still unallocated */
3790 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3791 	poll_threads();
3792 	CU_ASSERT(g_bserrno == 0);
3793 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3794 	CU_ASSERT(blob->active.num_clusters == 3);
3795 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3796 
3797 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3798 	poll_threads();
3799 	CU_ASSERT(g_bserrno == 0);
3800 	/* Sync must not change anything */
3801 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3802 	CU_ASSERT(blob->active.num_clusters == 3);
3803 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3804 
3805 	spdk_blob_close(blob, blob_op_complete, NULL);
3806 	poll_threads();
3807 	CU_ASSERT(g_bserrno == 0);
3808 
3809 	ut_bs_reload(&bs, NULL);
3810 
3811 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3812 	poll_threads();
3813 	CU_ASSERT(g_bserrno == 0);
3814 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3815 	blob = g_blob;
3816 
3817 	/* Check that clusters allocation and size is still the same */
3818 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3819 	CU_ASSERT(blob->active.num_clusters == 3);
3820 
3821 	ut_blob_close_and_delete(bs, blob);
3822 }
3823 
3824 static void
3825 blob_insert_cluster_msg_test(void)
3826 {
3827 	struct spdk_blob_store *bs = g_bs;
3828 	struct spdk_blob *blob;
3829 	struct spdk_blob_opts opts;
3830 	spdk_blob_id blobid;
3831 	uint64_t free_clusters;
3832 	uint64_t new_cluster = 0;
3833 	uint32_t cluster_num = 3;
3834 	uint32_t extent_page = 0;
3835 
3836 	free_clusters = spdk_bs_free_cluster_count(bs);
3837 
3838 	/* Set blob as thin provisioned */
3839 	ut_spdk_blob_opts_init(&opts);
3840 	opts.thin_provision = true;
3841 	opts.num_clusters = 4;
3842 
3843 	blob = ut_blob_create_and_open(bs, &opts);
3844 	blobid = spdk_blob_get_id(blob);
3845 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3846 
3847 	CU_ASSERT(blob->active.num_clusters == 4);
3848 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
3849 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3850 
3851 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
3852 	 * This is to simulate behaviour when cluster is allocated after blob creation.
3853 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
3854 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
3855 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3856 
3857 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page,
3858 					 blob_op_complete, NULL);
3859 	poll_threads();
3860 
3861 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3862 
3863 	spdk_blob_close(blob, blob_op_complete, NULL);
3864 	poll_threads();
3865 	CU_ASSERT(g_bserrno == 0);
3866 
3867 	ut_bs_reload(&bs, NULL);
3868 
3869 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3870 	poll_threads();
3871 	CU_ASSERT(g_bserrno == 0);
3872 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3873 	blob = g_blob;
3874 
3875 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3876 
3877 	ut_blob_close_and_delete(bs, blob);
3878 }
3879 
3880 static void
3881 blob_thin_prov_rw(void)
3882 {
3883 	static const uint8_t zero[10 * 4096] = { 0 };
3884 	struct spdk_blob_store *bs = g_bs;
3885 	struct spdk_blob *blob, *blob_id0;
3886 	struct spdk_io_channel *channel, *channel_thread1;
3887 	struct spdk_blob_opts opts;
3888 	uint64_t free_clusters;
3889 	uint64_t page_size;
3890 	uint8_t payload_read[10 * 4096];
3891 	uint8_t payload_write[10 * 4096];
3892 	uint64_t write_bytes;
3893 	uint64_t read_bytes;
3894 
3895 	free_clusters = spdk_bs_free_cluster_count(bs);
3896 	page_size = spdk_bs_get_page_size(bs);
3897 
3898 	channel = spdk_bs_alloc_io_channel(bs);
3899 	CU_ASSERT(channel != NULL);
3900 
3901 	ut_spdk_blob_opts_init(&opts);
3902 	opts.thin_provision = true;
3903 
3904 	/* Create and delete blob at md page 0, so that next md page allocation
3905 	 * for extent will use that. */
3906 	blob_id0 = ut_blob_create_and_open(bs, &opts);
3907 	blob = ut_blob_create_and_open(bs, &opts);
3908 	ut_blob_close_and_delete(bs, blob_id0);
3909 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3910 
3911 	CU_ASSERT(blob->active.num_clusters == 0);
3912 
3913 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3914 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3915 	poll_threads();
3916 	CU_ASSERT(g_bserrno == 0);
3917 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3918 	CU_ASSERT(blob->active.num_clusters == 5);
3919 
3920 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3921 	poll_threads();
3922 	CU_ASSERT(g_bserrno == 0);
3923 	/* Sync must not change anything */
3924 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3925 	CU_ASSERT(blob->active.num_clusters == 5);
3926 
3927 	/* Payload should be all zeros from unallocated clusters */
3928 	memset(payload_read, 0xFF, sizeof(payload_read));
3929 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3930 	poll_threads();
3931 	CU_ASSERT(g_bserrno == 0);
3932 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
3933 
3934 	write_bytes = g_dev_write_bytes;
3935 	read_bytes = g_dev_read_bytes;
3936 
3937 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
3938 	set_thread(1);
3939 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
3940 	CU_ASSERT(channel_thread1 != NULL);
3941 	memset(payload_write, 0xE5, sizeof(payload_write));
3942 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
3943 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3944 	/* Perform write on thread 0. That will try to allocate cluster,
3945 	 * but fail due to another thread issuing the cluster allocation first. */
3946 	set_thread(0);
3947 	memset(payload_write, 0xE5, sizeof(payload_write));
3948 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
3949 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
3950 	poll_threads();
3951 	CU_ASSERT(g_bserrno == 0);
3952 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3953 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
3954 	 * read 0 bytes */
3955 	if (g_use_extent_table) {
3956 		/* Add one more page for EXTENT_PAGE write */
3957 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
3958 	} else {
3959 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
3960 	}
3961 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
3962 
3963 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3964 	poll_threads();
3965 	CU_ASSERT(g_bserrno == 0);
3966 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
3967 
3968 	ut_blob_close_and_delete(bs, blob);
3969 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3970 
3971 	set_thread(1);
3972 	spdk_bs_free_io_channel(channel_thread1);
3973 	set_thread(0);
3974 	spdk_bs_free_io_channel(channel);
3975 	poll_threads();
3976 	g_blob = NULL;
3977 	g_blobid = 0;
3978 }
3979 
3980 static void
3981 blob_thin_prov_write_count_io(void)
3982 {
3983 	struct spdk_blob_store *bs;
3984 	struct spdk_blob *blob;
3985 	struct spdk_io_channel *ch;
3986 	struct spdk_bs_dev *dev;
3987 	struct spdk_bs_opts bs_opts;
3988 	struct spdk_blob_opts opts;
3989 	uint64_t free_clusters;
3990 	uint64_t page_size;
3991 	uint8_t payload_write[4096];
3992 	uint64_t write_bytes;
3993 	uint64_t read_bytes;
3994 	const uint32_t CLUSTER_SZ = 16384;
3995 	uint32_t pages_per_cluster;
3996 	uint32_t pages_per_extent_page;
3997 	uint32_t i;
3998 
3999 	/* Use a very small cluster size for this test.  This ensures we need multiple
4000 	 * extent pages to hold all of the clusters even for relatively small blobs like
4001 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4002 	 * buffers).
4003 	 */
4004 	dev = init_dev();
4005 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4006 	bs_opts.cluster_sz = CLUSTER_SZ;
4007 
4008 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4009 	poll_threads();
4010 	CU_ASSERT(g_bserrno == 0);
4011 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4012 	bs = g_bs;
4013 
4014 	free_clusters = spdk_bs_free_cluster_count(bs);
4015 	page_size = spdk_bs_get_page_size(bs);
4016 	pages_per_cluster = CLUSTER_SZ / page_size;
4017 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4018 
4019 	ch = spdk_bs_alloc_io_channel(bs);
4020 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4021 
4022 	ut_spdk_blob_opts_init(&opts);
4023 	opts.thin_provision = true;
4024 
4025 	blob = ut_blob_create_and_open(bs, &opts);
4026 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4027 
4028 	/* Resize the blob so that it will require 8 extent pages to hold all of
4029 	 * the clusters.
4030 	 */
4031 	g_bserrno = -1;
4032 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4033 	poll_threads();
4034 	CU_ASSERT(g_bserrno == 0);
4035 
4036 	g_bserrno = -1;
4037 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4038 	poll_threads();
4039 	CU_ASSERT(g_bserrno == 0);
4040 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4041 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4042 
4043 	memset(payload_write, 0, sizeof(payload_write));
4044 	for (i = 0; i < 8; i++) {
4045 		write_bytes = g_dev_write_bytes;
4046 		read_bytes = g_dev_read_bytes;
4047 
4048 		g_bserrno = -1;
4049 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4050 		poll_threads();
4051 		CU_ASSERT(g_bserrno == 0);
4052 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4053 
4054 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4055 		if (!g_use_extent_table) {
4056 			/* For legacy metadata, we should have written two pages - one for the
4057 			 * write I/O itself, another for the blob's primary metadata.
4058 			 */
4059 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4060 		} else {
4061 			/* For extent table metadata, we should have written three pages - one
4062 			 * for the write I/O, one for the extent page, one for the blob's primary
4063 			 * metadata.
4064 			 */
4065 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4066 		}
4067 
4068 		/* The write should have synced the metadata already.  Do another sync here
4069 		 * just to confirm.
4070 		 */
4071 		write_bytes = g_dev_write_bytes;
4072 		read_bytes = g_dev_read_bytes;
4073 
4074 		g_bserrno = -1;
4075 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4076 		poll_threads();
4077 		CU_ASSERT(g_bserrno == 0);
4078 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4079 
4080 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4081 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4082 
4083 		/* Now write to another unallocated cluster that is part of the same extent page. */
4084 		g_bserrno = -1;
4085 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4086 				   1, blob_op_complete, NULL);
4087 		poll_threads();
4088 		CU_ASSERT(g_bserrno == 0);
4089 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4090 
4091 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4092 		/*
4093 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4094 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4095 		 */
4096 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4097 	}
4098 
4099 	ut_blob_close_and_delete(bs, blob);
4100 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4101 
4102 	spdk_bs_free_io_channel(ch);
4103 	poll_threads();
4104 	g_blob = NULL;
4105 	g_blobid = 0;
4106 
4107 	spdk_bs_unload(bs, bs_op_complete, NULL);
4108 	poll_threads();
4109 	CU_ASSERT(g_bserrno == 0);
4110 	g_bs = NULL;
4111 }
4112 
4113 static void
4114 blob_thin_prov_rle(void)
4115 {
4116 	static const uint8_t zero[10 * 4096] = { 0 };
4117 	struct spdk_blob_store *bs = g_bs;
4118 	struct spdk_blob *blob;
4119 	struct spdk_io_channel *channel;
4120 	struct spdk_blob_opts opts;
4121 	spdk_blob_id blobid;
4122 	uint64_t free_clusters;
4123 	uint64_t page_size;
4124 	uint8_t payload_read[10 * 4096];
4125 	uint8_t payload_write[10 * 4096];
4126 	uint64_t write_bytes;
4127 	uint64_t read_bytes;
4128 	uint64_t io_unit;
4129 
4130 	free_clusters = spdk_bs_free_cluster_count(bs);
4131 	page_size = spdk_bs_get_page_size(bs);
4132 
4133 	ut_spdk_blob_opts_init(&opts);
4134 	opts.thin_provision = true;
4135 	opts.num_clusters = 5;
4136 
4137 	blob = ut_blob_create_and_open(bs, &opts);
4138 	blobid = spdk_blob_get_id(blob);
4139 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4140 
4141 	channel = spdk_bs_alloc_io_channel(bs);
4142 	CU_ASSERT(channel != NULL);
4143 
4144 	/* Target specifically second cluster in a blob as first allocation */
4145 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4146 
4147 	/* Payload should be all zeros from unallocated clusters */
4148 	memset(payload_read, 0xFF, sizeof(payload_read));
4149 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4150 	poll_threads();
4151 	CU_ASSERT(g_bserrno == 0);
4152 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4153 
4154 	write_bytes = g_dev_write_bytes;
4155 	read_bytes = g_dev_read_bytes;
4156 
4157 	/* Issue write to second cluster in a blob */
4158 	memset(payload_write, 0xE5, sizeof(payload_write));
4159 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4160 	poll_threads();
4161 	CU_ASSERT(g_bserrno == 0);
4162 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4163 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4164 	 * read 0 bytes */
4165 	if (g_use_extent_table) {
4166 		/* Add one more page for EXTENT_PAGE write */
4167 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4168 	} else {
4169 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4170 	}
4171 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4172 
4173 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4174 	poll_threads();
4175 	CU_ASSERT(g_bserrno == 0);
4176 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4177 
4178 	spdk_bs_free_io_channel(channel);
4179 	poll_threads();
4180 
4181 	spdk_blob_close(blob, blob_op_complete, NULL);
4182 	poll_threads();
4183 	CU_ASSERT(g_bserrno == 0);
4184 
4185 	ut_bs_reload(&bs, NULL);
4186 
4187 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4188 	poll_threads();
4189 	CU_ASSERT(g_bserrno == 0);
4190 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4191 	blob = g_blob;
4192 
4193 	channel = spdk_bs_alloc_io_channel(bs);
4194 	CU_ASSERT(channel != NULL);
4195 
4196 	/* Read second cluster after blob reload to confirm data written */
4197 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4198 	poll_threads();
4199 	CU_ASSERT(g_bserrno == 0);
4200 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4201 
4202 	spdk_bs_free_io_channel(channel);
4203 	poll_threads();
4204 
4205 	ut_blob_close_and_delete(bs, blob);
4206 }
4207 
4208 static void
4209 blob_thin_prov_rw_iov(void)
4210 {
4211 	static const uint8_t zero[10 * 4096] = { 0 };
4212 	struct spdk_blob_store *bs = g_bs;
4213 	struct spdk_blob *blob;
4214 	struct spdk_io_channel *channel;
4215 	struct spdk_blob_opts opts;
4216 	uint64_t free_clusters;
4217 	uint8_t payload_read[10 * 4096];
4218 	uint8_t payload_write[10 * 4096];
4219 	struct iovec iov_read[3];
4220 	struct iovec iov_write[3];
4221 
4222 	free_clusters = spdk_bs_free_cluster_count(bs);
4223 
4224 	channel = spdk_bs_alloc_io_channel(bs);
4225 	CU_ASSERT(channel != NULL);
4226 
4227 	ut_spdk_blob_opts_init(&opts);
4228 	opts.thin_provision = true;
4229 
4230 	blob = ut_blob_create_and_open(bs, &opts);
4231 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4232 
4233 	CU_ASSERT(blob->active.num_clusters == 0);
4234 
4235 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4236 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4237 	poll_threads();
4238 	CU_ASSERT(g_bserrno == 0);
4239 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4240 	CU_ASSERT(blob->active.num_clusters == 5);
4241 
4242 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4243 	poll_threads();
4244 	CU_ASSERT(g_bserrno == 0);
4245 	/* Sync must not change anything */
4246 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4247 	CU_ASSERT(blob->active.num_clusters == 5);
4248 
4249 	/* Payload should be all zeros from unallocated clusters */
4250 	memset(payload_read, 0xAA, sizeof(payload_read));
4251 	iov_read[0].iov_base = payload_read;
4252 	iov_read[0].iov_len = 3 * 4096;
4253 	iov_read[1].iov_base = payload_read + 3 * 4096;
4254 	iov_read[1].iov_len = 4 * 4096;
4255 	iov_read[2].iov_base = payload_read + 7 * 4096;
4256 	iov_read[2].iov_len = 3 * 4096;
4257 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4258 	poll_threads();
4259 	CU_ASSERT(g_bserrno == 0);
4260 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4261 
4262 	memset(payload_write, 0xE5, sizeof(payload_write));
4263 	iov_write[0].iov_base = payload_write;
4264 	iov_write[0].iov_len = 1 * 4096;
4265 	iov_write[1].iov_base = payload_write + 1 * 4096;
4266 	iov_write[1].iov_len = 5 * 4096;
4267 	iov_write[2].iov_base = payload_write + 6 * 4096;
4268 	iov_write[2].iov_len = 4 * 4096;
4269 
4270 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4271 	poll_threads();
4272 	CU_ASSERT(g_bserrno == 0);
4273 
4274 	memset(payload_read, 0xAA, sizeof(payload_read));
4275 	iov_read[0].iov_base = payload_read;
4276 	iov_read[0].iov_len = 3 * 4096;
4277 	iov_read[1].iov_base = payload_read + 3 * 4096;
4278 	iov_read[1].iov_len = 4 * 4096;
4279 	iov_read[2].iov_base = payload_read + 7 * 4096;
4280 	iov_read[2].iov_len = 3 * 4096;
4281 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4282 	poll_threads();
4283 	CU_ASSERT(g_bserrno == 0);
4284 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4285 
4286 	spdk_bs_free_io_channel(channel);
4287 	poll_threads();
4288 
4289 	ut_blob_close_and_delete(bs, blob);
4290 }
4291 
4292 struct iter_ctx {
4293 	int		current_iter;
4294 	spdk_blob_id	blobid[4];
4295 };
4296 
4297 static void
4298 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4299 {
4300 	struct iter_ctx *iter_ctx = arg;
4301 	spdk_blob_id blobid;
4302 
4303 	CU_ASSERT(bserrno == 0);
4304 	blobid = spdk_blob_get_id(blob);
4305 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4306 }
4307 
4308 static void
4309 bs_load_iter_test(void)
4310 {
4311 	struct spdk_blob_store *bs;
4312 	struct spdk_bs_dev *dev;
4313 	struct iter_ctx iter_ctx = { 0 };
4314 	struct spdk_blob *blob;
4315 	int i, rc;
4316 	struct spdk_bs_opts opts;
4317 
4318 	dev = init_dev();
4319 	spdk_bs_opts_init(&opts, sizeof(opts));
4320 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4321 
4322 	/* Initialize a new blob store */
4323 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4324 	poll_threads();
4325 	CU_ASSERT(g_bserrno == 0);
4326 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4327 	bs = g_bs;
4328 
4329 	for (i = 0; i < 4; i++) {
4330 		blob = ut_blob_create_and_open(bs, NULL);
4331 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4332 
4333 		/* Just save the blobid as an xattr for testing purposes. */
4334 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4335 		CU_ASSERT(rc == 0);
4336 
4337 		/* Resize the blob */
4338 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4339 		poll_threads();
4340 		CU_ASSERT(g_bserrno == 0);
4341 
4342 		spdk_blob_close(blob, blob_op_complete, NULL);
4343 		poll_threads();
4344 		CU_ASSERT(g_bserrno == 0);
4345 	}
4346 
4347 	g_bserrno = -1;
4348 	spdk_bs_unload(bs, bs_op_complete, NULL);
4349 	poll_threads();
4350 	CU_ASSERT(g_bserrno == 0);
4351 
4352 	dev = init_dev();
4353 	spdk_bs_opts_init(&opts, sizeof(opts));
4354 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4355 	opts.iter_cb_fn = test_iter;
4356 	opts.iter_cb_arg = &iter_ctx;
4357 
4358 	/* Test blob iteration during load after a clean shutdown. */
4359 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4360 	poll_threads();
4361 	CU_ASSERT(g_bserrno == 0);
4362 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4363 	bs = g_bs;
4364 
4365 	/* Dirty shutdown */
4366 	bs_free(bs);
4367 
4368 	dev = init_dev();
4369 	spdk_bs_opts_init(&opts, sizeof(opts));
4370 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4371 	opts.iter_cb_fn = test_iter;
4372 	iter_ctx.current_iter = 0;
4373 	opts.iter_cb_arg = &iter_ctx;
4374 
4375 	/* Test blob iteration during load after a dirty shutdown. */
4376 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4377 	poll_threads();
4378 	CU_ASSERT(g_bserrno == 0);
4379 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4380 	bs = g_bs;
4381 
4382 	spdk_bs_unload(bs, bs_op_complete, NULL);
4383 	poll_threads();
4384 	CU_ASSERT(g_bserrno == 0);
4385 	g_bs = NULL;
4386 }
4387 
4388 static void
4389 blob_snapshot_rw(void)
4390 {
4391 	static const uint8_t zero[10 * 4096] = { 0 };
4392 	struct spdk_blob_store *bs = g_bs;
4393 	struct spdk_blob *blob, *snapshot;
4394 	struct spdk_io_channel *channel;
4395 	struct spdk_blob_opts opts;
4396 	spdk_blob_id blobid, snapshotid;
4397 	uint64_t free_clusters;
4398 	uint64_t cluster_size;
4399 	uint64_t page_size;
4400 	uint8_t payload_read[10 * 4096];
4401 	uint8_t payload_write[10 * 4096];
4402 	uint64_t write_bytes;
4403 	uint64_t read_bytes;
4404 
4405 	free_clusters = spdk_bs_free_cluster_count(bs);
4406 	cluster_size = spdk_bs_get_cluster_size(bs);
4407 	page_size = spdk_bs_get_page_size(bs);
4408 
4409 	channel = spdk_bs_alloc_io_channel(bs);
4410 	CU_ASSERT(channel != NULL);
4411 
4412 	ut_spdk_blob_opts_init(&opts);
4413 	opts.thin_provision = true;
4414 	opts.num_clusters = 5;
4415 
4416 	blob = ut_blob_create_and_open(bs, &opts);
4417 	blobid = spdk_blob_get_id(blob);
4418 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4419 
4420 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4421 
4422 	memset(payload_read, 0xFF, sizeof(payload_read));
4423 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4424 	poll_threads();
4425 	CU_ASSERT(g_bserrno == 0);
4426 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4427 
4428 	memset(payload_write, 0xE5, sizeof(payload_write));
4429 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4430 	poll_threads();
4431 	CU_ASSERT(g_bserrno == 0);
4432 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4433 
4434 	/* Create snapshot from blob */
4435 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4436 	poll_threads();
4437 	CU_ASSERT(g_bserrno == 0);
4438 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4439 	snapshotid = g_blobid;
4440 
4441 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4442 	poll_threads();
4443 	CU_ASSERT(g_bserrno == 0);
4444 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4445 	snapshot = g_blob;
4446 	CU_ASSERT(snapshot->data_ro == true);
4447 	CU_ASSERT(snapshot->md_ro == true);
4448 
4449 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4450 
4451 	write_bytes = g_dev_write_bytes;
4452 	read_bytes = g_dev_read_bytes;
4453 
4454 	memset(payload_write, 0xAA, sizeof(payload_write));
4455 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4456 	poll_threads();
4457 	CU_ASSERT(g_bserrno == 0);
4458 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4459 
4460 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4461 	 * and then write 10 pages of payload.
4462 	 */
4463 	if (g_use_extent_table) {
4464 		/* Add one more page for EXTENT_PAGE write */
4465 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
4466 	} else {
4467 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
4468 	}
4469 	CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
4470 
4471 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4472 	poll_threads();
4473 	CU_ASSERT(g_bserrno == 0);
4474 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4475 
4476 	/* Data on snapshot should not change after write to clone */
4477 	memset(payload_write, 0xE5, sizeof(payload_write));
4478 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4479 	poll_threads();
4480 	CU_ASSERT(g_bserrno == 0);
4481 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4482 
4483 	ut_blob_close_and_delete(bs, blob);
4484 	ut_blob_close_and_delete(bs, snapshot);
4485 
4486 	spdk_bs_free_io_channel(channel);
4487 	poll_threads();
4488 	g_blob = NULL;
4489 	g_blobid = 0;
4490 }
4491 
4492 static void
4493 blob_snapshot_rw_iov(void)
4494 {
4495 	static const uint8_t zero[10 * 4096] = { 0 };
4496 	struct spdk_blob_store *bs = g_bs;
4497 	struct spdk_blob *blob, *snapshot;
4498 	struct spdk_io_channel *channel;
4499 	struct spdk_blob_opts opts;
4500 	spdk_blob_id blobid, snapshotid;
4501 	uint64_t free_clusters;
4502 	uint8_t payload_read[10 * 4096];
4503 	uint8_t payload_write[10 * 4096];
4504 	struct iovec iov_read[3];
4505 	struct iovec iov_write[3];
4506 
4507 	free_clusters = spdk_bs_free_cluster_count(bs);
4508 
4509 	channel = spdk_bs_alloc_io_channel(bs);
4510 	CU_ASSERT(channel != NULL);
4511 
4512 	ut_spdk_blob_opts_init(&opts);
4513 	opts.thin_provision = true;
4514 	opts.num_clusters = 5;
4515 
4516 	blob = ut_blob_create_and_open(bs, &opts);
4517 	blobid = spdk_blob_get_id(blob);
4518 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4519 
4520 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4521 
4522 	/* Create snapshot from blob */
4523 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4524 	poll_threads();
4525 	CU_ASSERT(g_bserrno == 0);
4526 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4527 	snapshotid = g_blobid;
4528 
4529 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4530 	poll_threads();
4531 	CU_ASSERT(g_bserrno == 0);
4532 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4533 	snapshot = g_blob;
4534 	CU_ASSERT(snapshot->data_ro == true);
4535 	CU_ASSERT(snapshot->md_ro == true);
4536 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4537 
4538 	/* Payload should be all zeros from unallocated clusters */
4539 	memset(payload_read, 0xAA, sizeof(payload_read));
4540 	iov_read[0].iov_base = payload_read;
4541 	iov_read[0].iov_len = 3 * 4096;
4542 	iov_read[1].iov_base = payload_read + 3 * 4096;
4543 	iov_read[1].iov_len = 4 * 4096;
4544 	iov_read[2].iov_base = payload_read + 7 * 4096;
4545 	iov_read[2].iov_len = 3 * 4096;
4546 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4547 	poll_threads();
4548 	CU_ASSERT(g_bserrno == 0);
4549 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4550 
4551 	memset(payload_write, 0xE5, sizeof(payload_write));
4552 	iov_write[0].iov_base = payload_write;
4553 	iov_write[0].iov_len = 1 * 4096;
4554 	iov_write[1].iov_base = payload_write + 1 * 4096;
4555 	iov_write[1].iov_len = 5 * 4096;
4556 	iov_write[2].iov_base = payload_write + 6 * 4096;
4557 	iov_write[2].iov_len = 4 * 4096;
4558 
4559 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4560 	poll_threads();
4561 	CU_ASSERT(g_bserrno == 0);
4562 
4563 	memset(payload_read, 0xAA, sizeof(payload_read));
4564 	iov_read[0].iov_base = payload_read;
4565 	iov_read[0].iov_len = 3 * 4096;
4566 	iov_read[1].iov_base = payload_read + 3 * 4096;
4567 	iov_read[1].iov_len = 4 * 4096;
4568 	iov_read[2].iov_base = payload_read + 7 * 4096;
4569 	iov_read[2].iov_len = 3 * 4096;
4570 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4571 	poll_threads();
4572 	CU_ASSERT(g_bserrno == 0);
4573 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4574 
4575 	spdk_bs_free_io_channel(channel);
4576 	poll_threads();
4577 
4578 	ut_blob_close_and_delete(bs, blob);
4579 	ut_blob_close_and_delete(bs, snapshot);
4580 }
4581 
4582 /**
4583  * Inflate / decouple parent rw unit tests.
4584  *
4585  * --------------
4586  * original blob:         0         1         2         3         4
4587  *                   ,---------+---------+---------+---------+---------.
4588  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4589  *                   +---------+---------+---------+---------+---------+
4590  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4591  *                   +---------+---------+---------+---------+---------+
4592  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4593  *                   '---------+---------+---------+---------+---------'
4594  *                   .         .         .         .         .         .
4595  * --------          .         .         .         .         .         .
4596  * inflate:          .         .         .         .         .         .
4597  *                   ,---------+---------+---------+---------+---------.
4598  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4599  *                   '---------+---------+---------+---------+---------'
4600  *
4601  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4602  *               on snapshot2 and snapshot removed .         .         .
4603  *                   .         .         .         .         .         .
4604  * ----------------  .         .         .         .         .         .
4605  * decouple parent:  .         .         .         .         .         .
4606  *                   ,---------+---------+---------+---------+---------.
4607  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4608  *                   +---------+---------+---------+---------+---------+
4609  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4610  *                   '---------+---------+---------+---------+---------'
4611  *
4612  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4613  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4614  *               should remain a clone of snapshot.
4615  */
4616 static void
4617 _blob_inflate_rw(bool decouple_parent)
4618 {
4619 	struct spdk_blob_store *bs = g_bs;
4620 	struct spdk_blob *blob, *snapshot, *snapshot2;
4621 	struct spdk_io_channel *channel;
4622 	struct spdk_blob_opts opts;
4623 	spdk_blob_id blobid, snapshotid, snapshot2id;
4624 	uint64_t free_clusters;
4625 	uint64_t cluster_size;
4626 
4627 	uint64_t payload_size;
4628 	uint8_t *payload_read;
4629 	uint8_t *payload_write;
4630 	uint8_t *payload_clone;
4631 
4632 	uint64_t pages_per_cluster;
4633 	uint64_t pages_per_payload;
4634 
4635 	int i;
4636 	spdk_blob_id ids[2];
4637 	size_t count;
4638 
4639 	free_clusters = spdk_bs_free_cluster_count(bs);
4640 	cluster_size = spdk_bs_get_cluster_size(bs);
4641 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4642 	pages_per_payload = pages_per_cluster * 5;
4643 
4644 	payload_size = cluster_size * 5;
4645 
4646 	payload_read = malloc(payload_size);
4647 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4648 
4649 	payload_write = malloc(payload_size);
4650 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4651 
4652 	payload_clone = malloc(payload_size);
4653 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4654 
4655 	channel = spdk_bs_alloc_io_channel(bs);
4656 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4657 
4658 	/* Create blob */
4659 	ut_spdk_blob_opts_init(&opts);
4660 	opts.thin_provision = true;
4661 	opts.num_clusters = 5;
4662 
4663 	blob = ut_blob_create_and_open(bs, &opts);
4664 	blobid = spdk_blob_get_id(blob);
4665 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4666 
4667 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4668 
4669 	/* 1) Initial read should return zeroed payload */
4670 	memset(payload_read, 0xFF, payload_size);
4671 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4672 			  blob_op_complete, NULL);
4673 	poll_threads();
4674 	CU_ASSERT(g_bserrno == 0);
4675 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4676 
4677 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4678 	 * isn't allocated) */
4679 	memset(payload_write, 0xE5, payload_size - cluster_size);
4680 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4681 			   pages_per_cluster, blob_op_complete, NULL);
4682 	poll_threads();
4683 	CU_ASSERT(g_bserrno == 0);
4684 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4685 
4686 	/* 2) Create snapshot from blob (first level) */
4687 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4688 	poll_threads();
4689 	CU_ASSERT(g_bserrno == 0);
4690 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4691 	snapshotid = g_blobid;
4692 
4693 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4694 	poll_threads();
4695 	CU_ASSERT(g_bserrno == 0);
4696 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4697 	snapshot = g_blob;
4698 	CU_ASSERT(snapshot->data_ro == true);
4699 	CU_ASSERT(snapshot->md_ro == true);
4700 
4701 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4702 
4703 	/* Write every second cluster with a pattern.
4704 	 *
4705 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4706 	 * doesn't allocate it.
4707 	 *
4708 	 * payload_clone stores expected result on "blob" read at the time and
4709 	 * is used only to check data consistency on clone before and after
4710 	 * inflation. Initially we fill it with a backing snapshots pattern
4711 	 * used before.
4712 	 */
4713 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4714 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4715 	memset(payload_write, 0xAA, payload_size);
4716 	for (i = 1; i < 5; i += 2) {
4717 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4718 				   pages_per_cluster, blob_op_complete, NULL);
4719 		poll_threads();
4720 		CU_ASSERT(g_bserrno == 0);
4721 
4722 		/* Update expected result */
4723 		memcpy(payload_clone + (cluster_size * i), payload_write,
4724 		       cluster_size);
4725 	}
4726 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4727 
4728 	/* Check data consistency on clone */
4729 	memset(payload_read, 0xFF, payload_size);
4730 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4731 			  blob_op_complete, NULL);
4732 	poll_threads();
4733 	CU_ASSERT(g_bserrno == 0);
4734 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4735 
4736 	/* 3) Create second levels snapshot from blob */
4737 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4738 	poll_threads();
4739 	CU_ASSERT(g_bserrno == 0);
4740 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4741 	snapshot2id = g_blobid;
4742 
4743 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4744 	poll_threads();
4745 	CU_ASSERT(g_bserrno == 0);
4746 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4747 	snapshot2 = g_blob;
4748 	CU_ASSERT(snapshot2->data_ro == true);
4749 	CU_ASSERT(snapshot2->md_ro == true);
4750 
4751 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4752 
4753 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4754 
4755 	/* Write one cluster on the top level blob. This cluster (1) covers
4756 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4757 	 * at all */
4758 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4759 			   pages_per_cluster, blob_op_complete, NULL);
4760 	poll_threads();
4761 	CU_ASSERT(g_bserrno == 0);
4762 
4763 	/* Update expected result */
4764 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4765 
4766 	/* Check data consistency on clone */
4767 	memset(payload_read, 0xFF, payload_size);
4768 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4769 			  blob_op_complete, NULL);
4770 	poll_threads();
4771 	CU_ASSERT(g_bserrno == 0);
4772 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4773 
4774 
4775 	/* Close all blobs */
4776 	spdk_blob_close(blob, blob_op_complete, NULL);
4777 	poll_threads();
4778 	CU_ASSERT(g_bserrno == 0);
4779 
4780 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4781 	poll_threads();
4782 	CU_ASSERT(g_bserrno == 0);
4783 
4784 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4785 	poll_threads();
4786 	CU_ASSERT(g_bserrno == 0);
4787 
4788 	/* Check snapshot-clone relations */
4789 	count = 2;
4790 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4791 	CU_ASSERT(count == 1);
4792 	CU_ASSERT(ids[0] == snapshot2id);
4793 
4794 	count = 2;
4795 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4796 	CU_ASSERT(count == 1);
4797 	CU_ASSERT(ids[0] == blobid);
4798 
4799 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4800 
4801 	free_clusters = spdk_bs_free_cluster_count(bs);
4802 	if (!decouple_parent) {
4803 		/* Do full blob inflation */
4804 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4805 		poll_threads();
4806 		CU_ASSERT(g_bserrno == 0);
4807 
4808 		/* All clusters should be inflated (except one already allocated
4809 		 * in a top level blob) */
4810 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
4811 
4812 		/* Check if relation tree updated correctly */
4813 		count = 2;
4814 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4815 
4816 		/* snapshotid have one clone */
4817 		CU_ASSERT(count == 1);
4818 		CU_ASSERT(ids[0] == snapshot2id);
4819 
4820 		/* snapshot2id have no clones */
4821 		count = 2;
4822 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4823 		CU_ASSERT(count == 0);
4824 
4825 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4826 	} else {
4827 		/* Decouple parent of blob */
4828 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
4829 		poll_threads();
4830 		CU_ASSERT(g_bserrno == 0);
4831 
4832 		/* Only one cluster from a parent should be inflated (second one
4833 		 * is covered by a cluster written on a top level blob, and
4834 		 * already allocated) */
4835 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
4836 
4837 		/* Check if relation tree updated correctly */
4838 		count = 2;
4839 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4840 
4841 		/* snapshotid have two clones now */
4842 		CU_ASSERT(count == 2);
4843 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4844 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
4845 
4846 		/* snapshot2id have no clones */
4847 		count = 2;
4848 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4849 		CU_ASSERT(count == 0);
4850 
4851 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4852 	}
4853 
4854 	/* Try to delete snapshot2 (should pass) */
4855 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
4856 	poll_threads();
4857 	CU_ASSERT(g_bserrno == 0);
4858 
4859 	/* Try to delete base snapshot */
4860 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
4861 	poll_threads();
4862 	CU_ASSERT(g_bserrno == 0);
4863 
4864 	/* Reopen blob after snapshot deletion */
4865 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4866 	poll_threads();
4867 	CU_ASSERT(g_bserrno == 0);
4868 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4869 	blob = g_blob;
4870 
4871 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4872 
4873 	/* Check data consistency on inflated blob */
4874 	memset(payload_read, 0xFF, payload_size);
4875 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4876 			  blob_op_complete, NULL);
4877 	poll_threads();
4878 	CU_ASSERT(g_bserrno == 0);
4879 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4880 
4881 	spdk_bs_free_io_channel(channel);
4882 	poll_threads();
4883 
4884 	free(payload_read);
4885 	free(payload_write);
4886 	free(payload_clone);
4887 
4888 	ut_blob_close_and_delete(bs, blob);
4889 }
4890 
4891 static void
4892 blob_inflate_rw(void)
4893 {
4894 	_blob_inflate_rw(false);
4895 	_blob_inflate_rw(true);
4896 }
4897 
4898 /**
4899  * Snapshot-clones relation test
4900  *
4901  *         snapshot
4902  *            |
4903  *      +-----+-----+
4904  *      |           |
4905  *   blob(ro)   snapshot2
4906  *      |           |
4907  *   clone2      clone
4908  */
4909 static void
4910 blob_relations(void)
4911 {
4912 	struct spdk_blob_store *bs;
4913 	struct spdk_bs_dev *dev;
4914 	struct spdk_bs_opts bs_opts;
4915 	struct spdk_blob_opts opts;
4916 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
4917 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
4918 	int rc;
4919 	size_t count;
4920 	spdk_blob_id ids[10] = {};
4921 
4922 	dev = init_dev();
4923 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4924 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
4925 
4926 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4927 	poll_threads();
4928 	CU_ASSERT(g_bserrno == 0);
4929 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4930 	bs = g_bs;
4931 
4932 	/* 1. Create blob with 10 clusters */
4933 
4934 	ut_spdk_blob_opts_init(&opts);
4935 	opts.num_clusters = 10;
4936 
4937 	blob = ut_blob_create_and_open(bs, &opts);
4938 	blobid = spdk_blob_get_id(blob);
4939 
4940 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4941 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4942 	CU_ASSERT(!spdk_blob_is_clone(blob));
4943 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
4944 
4945 	/* blob should not have underlying snapshot nor clones */
4946 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
4947 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4948 	count = SPDK_COUNTOF(ids);
4949 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
4950 	CU_ASSERT(rc == 0);
4951 	CU_ASSERT(count == 0);
4952 
4953 
4954 	/* 2. Create snapshot */
4955 
4956 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4957 	poll_threads();
4958 	CU_ASSERT(g_bserrno == 0);
4959 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4960 	snapshotid = g_blobid;
4961 
4962 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4963 	poll_threads();
4964 	CU_ASSERT(g_bserrno == 0);
4965 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4966 	snapshot = g_blob;
4967 
4968 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
4969 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
4970 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
4971 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
4972 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
4973 
4974 	/* Check if original blob is converted to the clone of snapshot */
4975 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4976 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4977 	CU_ASSERT(spdk_blob_is_clone(blob));
4978 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
4979 	CU_ASSERT(blob->parent_id == snapshotid);
4980 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4981 
4982 	count = SPDK_COUNTOF(ids);
4983 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4984 	CU_ASSERT(rc == 0);
4985 	CU_ASSERT(count == 1);
4986 	CU_ASSERT(ids[0] == blobid);
4987 
4988 
4989 	/* 3. Create clone from snapshot */
4990 
4991 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
4992 	poll_threads();
4993 	CU_ASSERT(g_bserrno == 0);
4994 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4995 	cloneid = g_blobid;
4996 
4997 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
4998 	poll_threads();
4999 	CU_ASSERT(g_bserrno == 0);
5000 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5001 	clone = g_blob;
5002 
5003 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5004 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5005 	CU_ASSERT(spdk_blob_is_clone(clone));
5006 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5007 	CU_ASSERT(clone->parent_id == snapshotid);
5008 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5009 
5010 	count = SPDK_COUNTOF(ids);
5011 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5012 	CU_ASSERT(rc == 0);
5013 	CU_ASSERT(count == 0);
5014 
5015 	/* Check if clone is on the snapshot's list */
5016 	count = SPDK_COUNTOF(ids);
5017 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5018 	CU_ASSERT(rc == 0);
5019 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5020 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5021 
5022 
5023 	/* 4. Create snapshot of the clone */
5024 
5025 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5026 	poll_threads();
5027 	CU_ASSERT(g_bserrno == 0);
5028 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5029 	snapshotid2 = g_blobid;
5030 
5031 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5032 	poll_threads();
5033 	CU_ASSERT(g_bserrno == 0);
5034 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5035 	snapshot2 = g_blob;
5036 
5037 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5038 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5039 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5040 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5041 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5042 
5043 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5044 	 * is a child of snapshot */
5045 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5046 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5047 	CU_ASSERT(spdk_blob_is_clone(clone));
5048 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5049 	CU_ASSERT(clone->parent_id == snapshotid2);
5050 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5051 
5052 	count = SPDK_COUNTOF(ids);
5053 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5054 	CU_ASSERT(rc == 0);
5055 	CU_ASSERT(count == 1);
5056 	CU_ASSERT(ids[0] == cloneid);
5057 
5058 
5059 	/* 5. Try to create clone from read only blob */
5060 
5061 	/* Mark blob as read only */
5062 	spdk_blob_set_read_only(blob);
5063 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5064 	poll_threads();
5065 	CU_ASSERT(g_bserrno == 0);
5066 
5067 	/* Check if previously created blob is read only clone */
5068 	CU_ASSERT(spdk_blob_is_read_only(blob));
5069 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5070 	CU_ASSERT(spdk_blob_is_clone(blob));
5071 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5072 
5073 	/* Create clone from read only blob */
5074 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5075 	poll_threads();
5076 	CU_ASSERT(g_bserrno == 0);
5077 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5078 	cloneid2 = g_blobid;
5079 
5080 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5081 	poll_threads();
5082 	CU_ASSERT(g_bserrno == 0);
5083 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5084 	clone2 = g_blob;
5085 
5086 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5087 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5088 	CU_ASSERT(spdk_blob_is_clone(clone2));
5089 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5090 
5091 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5092 
5093 	count = SPDK_COUNTOF(ids);
5094 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5095 	CU_ASSERT(rc == 0);
5096 
5097 	CU_ASSERT(count == 1);
5098 	CU_ASSERT(ids[0] == cloneid2);
5099 
5100 	/* Close blobs */
5101 
5102 	spdk_blob_close(clone2, blob_op_complete, NULL);
5103 	poll_threads();
5104 	CU_ASSERT(g_bserrno == 0);
5105 
5106 	spdk_blob_close(blob, blob_op_complete, NULL);
5107 	poll_threads();
5108 	CU_ASSERT(g_bserrno == 0);
5109 
5110 	spdk_blob_close(clone, blob_op_complete, NULL);
5111 	poll_threads();
5112 	CU_ASSERT(g_bserrno == 0);
5113 
5114 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5115 	poll_threads();
5116 	CU_ASSERT(g_bserrno == 0);
5117 
5118 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5119 	poll_threads();
5120 	CU_ASSERT(g_bserrno == 0);
5121 
5122 	/* Try to delete snapshot with more than 1 clone */
5123 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5124 	poll_threads();
5125 	CU_ASSERT(g_bserrno != 0);
5126 
5127 	ut_bs_reload(&bs, &bs_opts);
5128 
5129 	/* NULL ids array should return number of clones in count */
5130 	count = SPDK_COUNTOF(ids);
5131 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5132 	CU_ASSERT(rc == -ENOMEM);
5133 	CU_ASSERT(count == 2);
5134 
5135 	/* incorrect array size */
5136 	count = 1;
5137 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5138 	CU_ASSERT(rc == -ENOMEM);
5139 	CU_ASSERT(count == 2);
5140 
5141 
5142 	/* Verify structure of loaded blob store */
5143 
5144 	/* snapshot */
5145 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5146 
5147 	count = SPDK_COUNTOF(ids);
5148 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5149 	CU_ASSERT(rc == 0);
5150 	CU_ASSERT(count == 2);
5151 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5152 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5153 
5154 	/* blob */
5155 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5156 	count = SPDK_COUNTOF(ids);
5157 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5158 	CU_ASSERT(rc == 0);
5159 	CU_ASSERT(count == 1);
5160 	CU_ASSERT(ids[0] == cloneid2);
5161 
5162 	/* clone */
5163 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5164 	count = SPDK_COUNTOF(ids);
5165 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5166 	CU_ASSERT(rc == 0);
5167 	CU_ASSERT(count == 0);
5168 
5169 	/* snapshot2 */
5170 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5171 	count = SPDK_COUNTOF(ids);
5172 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5173 	CU_ASSERT(rc == 0);
5174 	CU_ASSERT(count == 1);
5175 	CU_ASSERT(ids[0] == cloneid);
5176 
5177 	/* clone2 */
5178 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5179 	count = SPDK_COUNTOF(ids);
5180 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5181 	CU_ASSERT(rc == 0);
5182 	CU_ASSERT(count == 0);
5183 
5184 	/* Try to delete blob that user should not be able to remove */
5185 
5186 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5187 	poll_threads();
5188 	CU_ASSERT(g_bserrno != 0);
5189 
5190 	/* Remove all blobs */
5191 
5192 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5193 	poll_threads();
5194 	CU_ASSERT(g_bserrno == 0);
5195 
5196 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5197 	poll_threads();
5198 	CU_ASSERT(g_bserrno == 0);
5199 
5200 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5201 	poll_threads();
5202 	CU_ASSERT(g_bserrno == 0);
5203 
5204 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5205 	poll_threads();
5206 	CU_ASSERT(g_bserrno == 0);
5207 
5208 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5209 	poll_threads();
5210 	CU_ASSERT(g_bserrno == 0);
5211 
5212 	spdk_bs_unload(bs, bs_op_complete, NULL);
5213 	poll_threads();
5214 	CU_ASSERT(g_bserrno == 0);
5215 
5216 	g_bs = NULL;
5217 }
5218 
5219 /**
5220  * Snapshot-clones relation test 2
5221  *
5222  *         snapshot1
5223  *            |
5224  *         snapshot2
5225  *            |
5226  *      +-----+-----+
5227  *      |           |
5228  *   blob(ro)   snapshot3
5229  *      |           |
5230  *      |       snapshot4
5231  *      |        |     |
5232  *   clone2   clone  clone3
5233  */
5234 static void
5235 blob_relations2(void)
5236 {
5237 	struct spdk_blob_store *bs;
5238 	struct spdk_bs_dev *dev;
5239 	struct spdk_bs_opts bs_opts;
5240 	struct spdk_blob_opts opts;
5241 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5242 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5243 		     cloneid3;
5244 	int rc;
5245 	size_t count;
5246 	spdk_blob_id ids[10] = {};
5247 
5248 	dev = init_dev();
5249 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5250 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5251 
5252 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5253 	poll_threads();
5254 	CU_ASSERT(g_bserrno == 0);
5255 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5256 	bs = g_bs;
5257 
5258 	/* 1. Create blob with 10 clusters */
5259 
5260 	ut_spdk_blob_opts_init(&opts);
5261 	opts.num_clusters = 10;
5262 
5263 	blob = ut_blob_create_and_open(bs, &opts);
5264 	blobid = spdk_blob_get_id(blob);
5265 
5266 	/* 2. Create snapshot1 */
5267 
5268 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5269 	poll_threads();
5270 	CU_ASSERT(g_bserrno == 0);
5271 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5272 	snapshotid1 = g_blobid;
5273 
5274 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5275 	poll_threads();
5276 	CU_ASSERT(g_bserrno == 0);
5277 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5278 	snapshot1 = g_blob;
5279 
5280 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5281 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5282 
5283 	CU_ASSERT(blob->parent_id == snapshotid1);
5284 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5285 
5286 	/* Check if blob is the clone of snapshot1 */
5287 	CU_ASSERT(blob->parent_id == snapshotid1);
5288 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5289 
5290 	count = SPDK_COUNTOF(ids);
5291 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5292 	CU_ASSERT(rc == 0);
5293 	CU_ASSERT(count == 1);
5294 	CU_ASSERT(ids[0] == blobid);
5295 
5296 	/* 3. Create another snapshot */
5297 
5298 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5299 	poll_threads();
5300 	CU_ASSERT(g_bserrno == 0);
5301 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5302 	snapshotid2 = g_blobid;
5303 
5304 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5305 	poll_threads();
5306 	CU_ASSERT(g_bserrno == 0);
5307 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5308 	snapshot2 = g_blob;
5309 
5310 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5311 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5312 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5313 
5314 	/* Check if snapshot2 is the clone of snapshot1 and blob
5315 	 * is a child of snapshot2 */
5316 	CU_ASSERT(blob->parent_id == snapshotid2);
5317 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5318 
5319 	count = SPDK_COUNTOF(ids);
5320 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5321 	CU_ASSERT(rc == 0);
5322 	CU_ASSERT(count == 1);
5323 	CU_ASSERT(ids[0] == blobid);
5324 
5325 	/* 4. Create clone from snapshot */
5326 
5327 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5328 	poll_threads();
5329 	CU_ASSERT(g_bserrno == 0);
5330 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5331 	cloneid = g_blobid;
5332 
5333 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5334 	poll_threads();
5335 	CU_ASSERT(g_bserrno == 0);
5336 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5337 	clone = g_blob;
5338 
5339 	CU_ASSERT(clone->parent_id == snapshotid2);
5340 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5341 
5342 	/* Check if clone is on the snapshot's list */
5343 	count = SPDK_COUNTOF(ids);
5344 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5345 	CU_ASSERT(rc == 0);
5346 	CU_ASSERT(count == 2);
5347 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5348 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5349 
5350 	/* 5. Create snapshot of the clone */
5351 
5352 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5353 	poll_threads();
5354 	CU_ASSERT(g_bserrno == 0);
5355 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5356 	snapshotid3 = g_blobid;
5357 
5358 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5359 	poll_threads();
5360 	CU_ASSERT(g_bserrno == 0);
5361 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5362 	snapshot3 = g_blob;
5363 
5364 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5365 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5366 
5367 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5368 	 * is a child of snapshot2 */
5369 	CU_ASSERT(clone->parent_id == snapshotid3);
5370 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5371 
5372 	count = SPDK_COUNTOF(ids);
5373 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5374 	CU_ASSERT(rc == 0);
5375 	CU_ASSERT(count == 1);
5376 	CU_ASSERT(ids[0] == cloneid);
5377 
5378 	/* 6. Create another snapshot of the clone */
5379 
5380 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5381 	poll_threads();
5382 	CU_ASSERT(g_bserrno == 0);
5383 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5384 	snapshotid4 = g_blobid;
5385 
5386 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5387 	poll_threads();
5388 	CU_ASSERT(g_bserrno == 0);
5389 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5390 	snapshot4 = g_blob;
5391 
5392 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5393 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5394 
5395 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5396 	 * is a child of snapshot3 */
5397 	CU_ASSERT(clone->parent_id == snapshotid4);
5398 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5399 
5400 	count = SPDK_COUNTOF(ids);
5401 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5402 	CU_ASSERT(rc == 0);
5403 	CU_ASSERT(count == 1);
5404 	CU_ASSERT(ids[0] == cloneid);
5405 
5406 	/* 7. Remove snapshot 4 */
5407 
5408 	ut_blob_close_and_delete(bs, snapshot4);
5409 
5410 	/* Check if relations are back to state from before creating snapshot 4 */
5411 	CU_ASSERT(clone->parent_id == snapshotid3);
5412 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5413 
5414 	count = SPDK_COUNTOF(ids);
5415 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5416 	CU_ASSERT(rc == 0);
5417 	CU_ASSERT(count == 1);
5418 	CU_ASSERT(ids[0] == cloneid);
5419 
5420 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5421 
5422 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5423 	poll_threads();
5424 	CU_ASSERT(g_bserrno == 0);
5425 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5426 	cloneid3 = g_blobid;
5427 
5428 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5429 	poll_threads();
5430 	CU_ASSERT(g_bserrno != 0);
5431 
5432 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5433 
5434 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5435 	poll_threads();
5436 	CU_ASSERT(g_bserrno == 0);
5437 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5438 	snapshot3 = g_blob;
5439 
5440 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5441 	poll_threads();
5442 	CU_ASSERT(g_bserrno != 0);
5443 
5444 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5445 	poll_threads();
5446 	CU_ASSERT(g_bserrno == 0);
5447 
5448 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5449 	poll_threads();
5450 	CU_ASSERT(g_bserrno == 0);
5451 
5452 	/* 10. Remove snapshot 1 */
5453 
5454 	ut_blob_close_and_delete(bs, snapshot1);
5455 
5456 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5457 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5458 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5459 
5460 	count = SPDK_COUNTOF(ids);
5461 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5462 	CU_ASSERT(rc == 0);
5463 	CU_ASSERT(count == 2);
5464 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5465 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5466 
5467 	/* 11. Try to create clone from read only blob */
5468 
5469 	/* Mark blob as read only */
5470 	spdk_blob_set_read_only(blob);
5471 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5472 	poll_threads();
5473 	CU_ASSERT(g_bserrno == 0);
5474 
5475 	/* Create clone from read only blob */
5476 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5477 	poll_threads();
5478 	CU_ASSERT(g_bserrno == 0);
5479 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5480 	cloneid2 = g_blobid;
5481 
5482 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5483 	poll_threads();
5484 	CU_ASSERT(g_bserrno == 0);
5485 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5486 	clone2 = g_blob;
5487 
5488 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5489 
5490 	count = SPDK_COUNTOF(ids);
5491 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5492 	CU_ASSERT(rc == 0);
5493 	CU_ASSERT(count == 1);
5494 	CU_ASSERT(ids[0] == cloneid2);
5495 
5496 	/* Close blobs */
5497 
5498 	spdk_blob_close(clone2, blob_op_complete, NULL);
5499 	poll_threads();
5500 	CU_ASSERT(g_bserrno == 0);
5501 
5502 	spdk_blob_close(blob, blob_op_complete, NULL);
5503 	poll_threads();
5504 	CU_ASSERT(g_bserrno == 0);
5505 
5506 	spdk_blob_close(clone, blob_op_complete, NULL);
5507 	poll_threads();
5508 	CU_ASSERT(g_bserrno == 0);
5509 
5510 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5511 	poll_threads();
5512 	CU_ASSERT(g_bserrno == 0);
5513 
5514 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5515 	poll_threads();
5516 	CU_ASSERT(g_bserrno == 0);
5517 
5518 	ut_bs_reload(&bs, &bs_opts);
5519 
5520 	/* Verify structure of loaded blob store */
5521 
5522 	/* snapshot2 */
5523 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5524 
5525 	count = SPDK_COUNTOF(ids);
5526 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5527 	CU_ASSERT(rc == 0);
5528 	CU_ASSERT(count == 2);
5529 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5530 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5531 
5532 	/* blob */
5533 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5534 	count = SPDK_COUNTOF(ids);
5535 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5536 	CU_ASSERT(rc == 0);
5537 	CU_ASSERT(count == 1);
5538 	CU_ASSERT(ids[0] == cloneid2);
5539 
5540 	/* clone */
5541 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5542 	count = SPDK_COUNTOF(ids);
5543 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5544 	CU_ASSERT(rc == 0);
5545 	CU_ASSERT(count == 0);
5546 
5547 	/* snapshot3 */
5548 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5549 	count = SPDK_COUNTOF(ids);
5550 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5551 	CU_ASSERT(rc == 0);
5552 	CU_ASSERT(count == 1);
5553 	CU_ASSERT(ids[0] == cloneid);
5554 
5555 	/* clone2 */
5556 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5557 	count = SPDK_COUNTOF(ids);
5558 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5559 	CU_ASSERT(rc == 0);
5560 	CU_ASSERT(count == 0);
5561 
5562 	/* Try to delete all blobs in the worse possible order */
5563 
5564 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5565 	poll_threads();
5566 	CU_ASSERT(g_bserrno != 0);
5567 
5568 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5569 	poll_threads();
5570 	CU_ASSERT(g_bserrno == 0);
5571 
5572 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5573 	poll_threads();
5574 	CU_ASSERT(g_bserrno != 0);
5575 
5576 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5577 	poll_threads();
5578 	CU_ASSERT(g_bserrno == 0);
5579 
5580 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5581 	poll_threads();
5582 	CU_ASSERT(g_bserrno == 0);
5583 
5584 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5585 	poll_threads();
5586 	CU_ASSERT(g_bserrno == 0);
5587 
5588 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5589 	poll_threads();
5590 	CU_ASSERT(g_bserrno == 0);
5591 
5592 	spdk_bs_unload(bs, bs_op_complete, NULL);
5593 	poll_threads();
5594 	CU_ASSERT(g_bserrno == 0);
5595 
5596 	g_bs = NULL;
5597 }
5598 
5599 /**
5600  * Snapshot-clones relation test 3
5601  *
5602  *         snapshot0
5603  *            |
5604  *         snapshot1
5605  *            |
5606  *         snapshot2
5607  *            |
5608  *           blob
5609  */
5610 static void
5611 blob_relations3(void)
5612 {
5613 	struct spdk_blob_store *bs;
5614 	struct spdk_bs_dev *dev;
5615 	struct spdk_io_channel *channel;
5616 	struct spdk_bs_opts bs_opts;
5617 	struct spdk_blob_opts opts;
5618 	struct spdk_blob *blob;
5619 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
5620 
5621 	dev = init_dev();
5622 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5623 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5624 
5625 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5626 	poll_threads();
5627 	CU_ASSERT(g_bserrno == 0);
5628 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5629 	bs = g_bs;
5630 
5631 	channel = spdk_bs_alloc_io_channel(bs);
5632 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5633 
5634 	/* 1. Create blob with 10 clusters */
5635 	ut_spdk_blob_opts_init(&opts);
5636 	opts.num_clusters = 10;
5637 
5638 	blob = ut_blob_create_and_open(bs, &opts);
5639 	blobid = spdk_blob_get_id(blob);
5640 
5641 	/* 2. Create snapshot0 */
5642 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5643 	poll_threads();
5644 	CU_ASSERT(g_bserrno == 0);
5645 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5646 	snapshotid0 = g_blobid;
5647 
5648 	/* 3. Create snapshot1 */
5649 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5650 	poll_threads();
5651 	CU_ASSERT(g_bserrno == 0);
5652 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5653 	snapshotid1 = g_blobid;
5654 
5655 	/* 4. Create snapshot2 */
5656 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5657 	poll_threads();
5658 	CU_ASSERT(g_bserrno == 0);
5659 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5660 	snapshotid2 = g_blobid;
5661 
5662 	/* 5. Decouple blob */
5663 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5664 	poll_threads();
5665 	CU_ASSERT(g_bserrno == 0);
5666 
5667 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
5668 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
5669 	poll_threads();
5670 	CU_ASSERT(g_bserrno == 0);
5671 
5672 	/* 7. Delete blob */
5673 	spdk_blob_close(blob, blob_op_complete, NULL);
5674 	poll_threads();
5675 	CU_ASSERT(g_bserrno == 0);
5676 
5677 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5678 	poll_threads();
5679 	CU_ASSERT(g_bserrno == 0);
5680 
5681 	/* 8. Delete snapshot2.
5682 	 * If md of snapshot 2 was updated, it should be possible to delete it */
5683 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5684 	poll_threads();
5685 	CU_ASSERT(g_bserrno == 0);
5686 
5687 	/* Remove remaining blobs and unload bs */
5688 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
5689 	poll_threads();
5690 	CU_ASSERT(g_bserrno == 0);
5691 
5692 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
5693 	poll_threads();
5694 	CU_ASSERT(g_bserrno == 0);
5695 
5696 	spdk_bs_free_io_channel(channel);
5697 	poll_threads();
5698 
5699 	spdk_bs_unload(bs, bs_op_complete, NULL);
5700 	poll_threads();
5701 	CU_ASSERT(g_bserrno == 0);
5702 
5703 	g_bs = NULL;
5704 }
5705 
5706 static void
5707 blobstore_clean_power_failure(void)
5708 {
5709 	struct spdk_blob_store *bs;
5710 	struct spdk_blob *blob;
5711 	struct spdk_power_failure_thresholds thresholds = {};
5712 	bool clean = false;
5713 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5714 	struct spdk_bs_super_block super_copy = {};
5715 
5716 	thresholds.general_threshold = 1;
5717 	while (!clean) {
5718 		/* Create bs and blob */
5719 		suite_blob_setup();
5720 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5721 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5722 		bs = g_bs;
5723 		blob = g_blob;
5724 
5725 		/* Super block should not change for rest of the UT,
5726 		 * save it and compare later. */
5727 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5728 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5729 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5730 
5731 		/* Force bs/super block in a clean state.
5732 		 * Along with marking blob dirty, to cause blob persist. */
5733 		blob->state = SPDK_BLOB_STATE_DIRTY;
5734 		bs->clean = 1;
5735 		super->clean = 1;
5736 		super->crc = blob_md_page_calc_crc(super);
5737 
5738 		g_bserrno = -1;
5739 		dev_set_power_failure_thresholds(thresholds);
5740 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5741 		poll_threads();
5742 		dev_reset_power_failure_event();
5743 
5744 		if (g_bserrno == 0) {
5745 			/* After successful md sync, both bs and super block
5746 			 * should be marked as not clean. */
5747 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5748 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5749 			clean = true;
5750 		}
5751 
5752 		/* Depending on the point of failure, super block was either updated or not. */
5753 		super_copy.clean = super->clean;
5754 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5755 		/* Compare that the values in super block remained unchanged. */
5756 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5757 
5758 		/* Delete blob and unload bs */
5759 		suite_blob_cleanup();
5760 
5761 		thresholds.general_threshold++;
5762 	}
5763 }
5764 
5765 static void
5766 blob_delete_snapshot_power_failure(void)
5767 {
5768 	struct spdk_bs_dev *dev;
5769 	struct spdk_blob_store *bs;
5770 	struct spdk_blob_opts opts;
5771 	struct spdk_blob *blob, *snapshot;
5772 	struct spdk_power_failure_thresholds thresholds = {};
5773 	spdk_blob_id blobid, snapshotid;
5774 	const void *value;
5775 	size_t value_len;
5776 	size_t count;
5777 	spdk_blob_id ids[3] = {};
5778 	int rc;
5779 	bool deleted = false;
5780 	int delete_snapshot_bserrno = -1;
5781 
5782 	thresholds.general_threshold = 1;
5783 	while (!deleted) {
5784 		dev = init_dev();
5785 
5786 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5787 		poll_threads();
5788 		CU_ASSERT(g_bserrno == 0);
5789 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5790 		bs = g_bs;
5791 
5792 		/* Create blob */
5793 		ut_spdk_blob_opts_init(&opts);
5794 		opts.num_clusters = 10;
5795 
5796 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5797 		poll_threads();
5798 		CU_ASSERT(g_bserrno == 0);
5799 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5800 		blobid = g_blobid;
5801 
5802 		/* Create snapshot */
5803 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5804 		poll_threads();
5805 		CU_ASSERT(g_bserrno == 0);
5806 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5807 		snapshotid = g_blobid;
5808 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5809 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5810 
5811 		dev_set_power_failure_thresholds(thresholds);
5812 
5813 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5814 		poll_threads();
5815 		delete_snapshot_bserrno = g_bserrno;
5816 
5817 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
5818 		 * reports success, changes to both blobs should already persisted. */
5819 		dev_reset_power_failure_event();
5820 		ut_bs_dirty_load(&bs, NULL);
5821 
5822 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5823 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5824 
5825 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5826 		poll_threads();
5827 		CU_ASSERT(g_bserrno == 0);
5828 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5829 		blob = g_blob;
5830 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5831 
5832 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5833 		poll_threads();
5834 
5835 		if (g_bserrno == 0) {
5836 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5837 			snapshot = g_blob;
5838 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5839 			count = SPDK_COUNTOF(ids);
5840 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5841 			CU_ASSERT(rc == 0);
5842 			CU_ASSERT(count == 1);
5843 			CU_ASSERT(ids[0] == blobid);
5844 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
5845 			CU_ASSERT(rc != 0);
5846 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5847 
5848 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5849 			poll_threads();
5850 			CU_ASSERT(g_bserrno == 0);
5851 		} else {
5852 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5853 			/* Snapshot might have been left in unrecoverable state, so it does not open.
5854 			 * Yet delete might perform further changes to the clone after that.
5855 			 * This UT should test until snapshot is deleted and delete call succeeds. */
5856 			if (delete_snapshot_bserrno == 0) {
5857 				deleted = true;
5858 			}
5859 		}
5860 
5861 		spdk_blob_close(blob, blob_op_complete, NULL);
5862 		poll_threads();
5863 		CU_ASSERT(g_bserrno == 0);
5864 
5865 		spdk_bs_unload(bs, bs_op_complete, NULL);
5866 		poll_threads();
5867 		CU_ASSERT(g_bserrno == 0);
5868 
5869 		thresholds.general_threshold++;
5870 	}
5871 }
5872 
5873 static void
5874 blob_create_snapshot_power_failure(void)
5875 {
5876 	struct spdk_blob_store *bs = g_bs;
5877 	struct spdk_bs_dev *dev;
5878 	struct spdk_blob_opts opts;
5879 	struct spdk_blob *blob, *snapshot;
5880 	struct spdk_power_failure_thresholds thresholds = {};
5881 	spdk_blob_id blobid, snapshotid;
5882 	const void *value;
5883 	size_t value_len;
5884 	size_t count;
5885 	spdk_blob_id ids[3] = {};
5886 	int rc;
5887 	bool created = false;
5888 	int create_snapshot_bserrno = -1;
5889 
5890 	thresholds.general_threshold = 1;
5891 	while (!created) {
5892 		dev = init_dev();
5893 
5894 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5895 		poll_threads();
5896 		CU_ASSERT(g_bserrno == 0);
5897 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5898 		bs = g_bs;
5899 
5900 		/* Create blob */
5901 		ut_spdk_blob_opts_init(&opts);
5902 		opts.num_clusters = 10;
5903 
5904 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5905 		poll_threads();
5906 		CU_ASSERT(g_bserrno == 0);
5907 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5908 		blobid = g_blobid;
5909 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5910 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5911 
5912 		dev_set_power_failure_thresholds(thresholds);
5913 
5914 		/* Create snapshot */
5915 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5916 		poll_threads();
5917 		create_snapshot_bserrno = g_bserrno;
5918 		snapshotid = g_blobid;
5919 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5920 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5921 
5922 		/* Do not shut down cleanly. Assumption is that after create snapshot
5923 		 * reports success, both blobs should be power-fail safe. */
5924 		dev_reset_power_failure_event();
5925 		ut_bs_dirty_load(&bs, NULL);
5926 
5927 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5928 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5929 
5930 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5931 		poll_threads();
5932 		CU_ASSERT(g_bserrno == 0);
5933 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5934 		blob = g_blob;
5935 
5936 		if (snapshotid != SPDK_BLOBID_INVALID) {
5937 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5938 			poll_threads();
5939 		}
5940 
5941 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
5942 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5943 			snapshot = g_blob;
5944 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5945 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5946 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5947 			count = SPDK_COUNTOF(ids);
5948 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5949 			CU_ASSERT(rc == 0);
5950 			CU_ASSERT(count == 1);
5951 			CU_ASSERT(ids[0] == blobid);
5952 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
5953 			CU_ASSERT(rc != 0);
5954 
5955 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5956 			poll_threads();
5957 			CU_ASSERT(g_bserrno == 0);
5958 			if (create_snapshot_bserrno == 0) {
5959 				created = true;
5960 			}
5961 		} else {
5962 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5963 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
5964 		}
5965 
5966 		spdk_blob_close(blob, blob_op_complete, NULL);
5967 		poll_threads();
5968 		CU_ASSERT(g_bserrno == 0);
5969 
5970 		spdk_bs_unload(bs, bs_op_complete, NULL);
5971 		poll_threads();
5972 		CU_ASSERT(g_bserrno == 0);
5973 
5974 		thresholds.general_threshold++;
5975 	}
5976 }
5977 
5978 static void
5979 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5980 {
5981 	uint8_t payload_ff[64 * 512];
5982 	uint8_t payload_aa[64 * 512];
5983 	uint8_t payload_00[64 * 512];
5984 	uint8_t *cluster0, *cluster1;
5985 
5986 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5987 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5988 	memset(payload_00, 0x00, sizeof(payload_00));
5989 
5990 	/* Try to perform I/O with io unit = 512 */
5991 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
5992 	poll_threads();
5993 	CU_ASSERT(g_bserrno == 0);
5994 
5995 	/* If thin provisioned is set cluster should be allocated now */
5996 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
5997 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
5998 
5999 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6000 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6001 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6002 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6003 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6004 
6005 	/* Verify write with offset on first page */
6006 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6007 	poll_threads();
6008 	CU_ASSERT(g_bserrno == 0);
6009 
6010 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6011 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6012 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6013 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6014 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6015 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6016 
6017 	/* Verify write with offset on first page */
6018 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6019 	poll_threads();
6020 
6021 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6022 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6023 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6024 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6025 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6026 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6027 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6028 
6029 	/* Verify write with offset on second page */
6030 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6031 	poll_threads();
6032 
6033 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6034 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6035 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6036 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6037 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6038 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6039 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6040 
6041 	/* Verify write across multiple pages */
6042 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6043 	poll_threads();
6044 
6045 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6046 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6047 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6048 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6049 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6050 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6051 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6052 
6053 	/* Verify write across multiple clusters */
6054 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6055 	poll_threads();
6056 
6057 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6058 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6059 
6060 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6061 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6062 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6063 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6064 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6065 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6066 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6067 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6068 
6069 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6070 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6071 
6072 	/* Verify write to second cluster */
6073 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6074 	poll_threads();
6075 
6076 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6077 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6078 
6079 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6080 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6081 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6082 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6083 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6084 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6085 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6086 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6087 
6088 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6089 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6090 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6091 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6092 }
6093 
6094 static void
6095 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6096 {
6097 	uint8_t payload_read[64 * 512];
6098 	uint8_t payload_ff[64 * 512];
6099 	uint8_t payload_aa[64 * 512];
6100 	uint8_t payload_00[64 * 512];
6101 
6102 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6103 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6104 	memset(payload_00, 0x00, sizeof(payload_00));
6105 
6106 	/* Read only first io unit */
6107 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6108 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6109 	 * payload_read: F000 0000 | 0000 0000 ... */
6110 	memset(payload_read, 0x00, sizeof(payload_read));
6111 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6112 	poll_threads();
6113 	CU_ASSERT(g_bserrno == 0);
6114 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6115 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6116 
6117 	/* Read four io_units starting from offset = 2
6118 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6119 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6120 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6121 
6122 	memset(payload_read, 0x00, sizeof(payload_read));
6123 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6124 	poll_threads();
6125 	CU_ASSERT(g_bserrno == 0);
6126 
6127 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6128 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6129 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6130 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6131 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6132 
6133 	/* Read eight io_units across multiple pages
6134 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6135 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6136 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6137 	memset(payload_read, 0x00, sizeof(payload_read));
6138 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6139 	poll_threads();
6140 	CU_ASSERT(g_bserrno == 0);
6141 
6142 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6143 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6144 
6145 	/* Read eight io_units across multiple clusters
6146 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6147 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6148 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6149 	memset(payload_read, 0x00, sizeof(payload_read));
6150 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6151 	poll_threads();
6152 	CU_ASSERT(g_bserrno == 0);
6153 
6154 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6155 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6156 
6157 	/* Read four io_units from second cluster
6158 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6159 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6160 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6161 	memset(payload_read, 0x00, sizeof(payload_read));
6162 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6163 	poll_threads();
6164 	CU_ASSERT(g_bserrno == 0);
6165 
6166 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6167 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6168 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6169 
6170 	/* Read second cluster
6171 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6172 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6173 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6174 	memset(payload_read, 0x00, sizeof(payload_read));
6175 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6176 	poll_threads();
6177 	CU_ASSERT(g_bserrno == 0);
6178 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6179 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6180 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6181 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6182 
6183 	/* Read whole two clusters
6184 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6185 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6186 	memset(payload_read, 0x00, sizeof(payload_read));
6187 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6188 	poll_threads();
6189 	CU_ASSERT(g_bserrno == 0);
6190 
6191 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6192 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6193 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6194 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6195 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6196 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6197 
6198 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6199 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6200 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6201 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6202 }
6203 
6204 
6205 static void
6206 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6207 {
6208 	uint8_t payload_ff[64 * 512];
6209 	uint8_t payload_aa[64 * 512];
6210 	uint8_t payload_00[64 * 512];
6211 	uint8_t *cluster0, *cluster1;
6212 
6213 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6214 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6215 	memset(payload_00, 0x00, sizeof(payload_00));
6216 
6217 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6218 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6219 
6220 	/* Unmap */
6221 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6222 	poll_threads();
6223 
6224 	CU_ASSERT(g_bserrno == 0);
6225 
6226 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6227 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6228 }
6229 
6230 static void
6231 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6232 {
6233 	uint8_t payload_ff[64 * 512];
6234 	uint8_t payload_aa[64 * 512];
6235 	uint8_t payload_00[64 * 512];
6236 	uint8_t *cluster0, *cluster1;
6237 
6238 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6239 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6240 	memset(payload_00, 0x00, sizeof(payload_00));
6241 
6242 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6243 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6244 
6245 	/* Write zeroes  */
6246 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6247 	poll_threads();
6248 
6249 	CU_ASSERT(g_bserrno == 0);
6250 
6251 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6252 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6253 }
6254 
6255 static inline void
6256 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6257 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6258 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6259 {
6260 	if (io_opts) {
6261 		g_dev_writev_ext_called = false;
6262 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6263 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
6264 					io_opts);
6265 	} else {
6266 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6267 	}
6268 	poll_threads();
6269 	CU_ASSERT(g_bserrno == 0);
6270 	if (io_opts) {
6271 		CU_ASSERT(g_dev_writev_ext_called);
6272 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6273 	}
6274 }
6275 
6276 static void
6277 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6278 	       bool ext_api)
6279 {
6280 	uint8_t payload_ff[64 * 512];
6281 	uint8_t payload_aa[64 * 512];
6282 	uint8_t payload_00[64 * 512];
6283 	uint8_t *cluster0, *cluster1;
6284 	struct iovec iov[4];
6285 	struct spdk_blob_ext_io_opts ext_opts = {
6286 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6287 		.memory_domain_ctx = (void *)0xf00df00d,
6288 		.size = sizeof(struct spdk_blob_ext_io_opts),
6289 		.user_ctx = (void *)123,
6290 	};
6291 
6292 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6293 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6294 	memset(payload_00, 0x00, sizeof(payload_00));
6295 
6296 	/* Try to perform I/O with io unit = 512 */
6297 	iov[0].iov_base = payload_ff;
6298 	iov[0].iov_len = 1 * 512;
6299 
6300 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
6301 			    ext_api ? &ext_opts : NULL);
6302 
6303 	/* If thin provisioned is set cluster should be allocated now */
6304 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6305 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6306 
6307 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6308 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6309 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6310 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6311 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6312 
6313 	/* Verify write with offset on first page */
6314 	iov[0].iov_base = payload_ff;
6315 	iov[0].iov_len = 1 * 512;
6316 
6317 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
6318 			    ext_api ? &ext_opts : NULL);
6319 
6320 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6321 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6322 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6323 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6324 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6325 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6326 
6327 	/* Verify write with offset on first page */
6328 	iov[0].iov_base = payload_ff;
6329 	iov[0].iov_len = 4 * 512;
6330 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6331 	poll_threads();
6332 
6333 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6334 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6335 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6336 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6337 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6338 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6339 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6340 
6341 	/* Verify write with offset on second page */
6342 	iov[0].iov_base = payload_ff;
6343 	iov[0].iov_len = 4 * 512;
6344 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6345 	poll_threads();
6346 
6347 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6348 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6349 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6350 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6351 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6352 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6353 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6354 
6355 	/* Verify write across multiple pages */
6356 	iov[0].iov_base = payload_aa;
6357 	iov[0].iov_len = 8 * 512;
6358 
6359 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
6360 			    ext_api ? &ext_opts : NULL);
6361 
6362 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6363 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6364 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6365 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6366 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6367 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6368 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6369 
6370 	/* Verify write across multiple clusters */
6371 
6372 	iov[0].iov_base = payload_ff;
6373 	iov[0].iov_len = 8 * 512;
6374 
6375 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
6376 			    ext_api ? &ext_opts : NULL);
6377 
6378 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6379 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6380 
6381 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6382 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6383 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6384 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6385 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6386 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6387 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6388 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6389 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6390 
6391 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6392 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6393 
6394 	/* Verify write to second cluster */
6395 
6396 	iov[0].iov_base = payload_ff;
6397 	iov[0].iov_len = 2 * 512;
6398 
6399 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
6400 			    ext_api ? &ext_opts : NULL);
6401 
6402 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6403 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6404 
6405 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6406 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6407 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6408 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6409 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6410 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6411 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6412 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6413 
6414 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6415 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6416 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6417 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6418 }
6419 
6420 static inline void
6421 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6422 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6423 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6424 {
6425 	if (io_opts) {
6426 		g_dev_readv_ext_called = false;
6427 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6428 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
6429 	} else {
6430 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6431 	}
6432 	poll_threads();
6433 	CU_ASSERT(g_bserrno == 0);
6434 	if (io_opts) {
6435 		CU_ASSERT(g_dev_readv_ext_called);
6436 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6437 	}
6438 }
6439 
6440 static void
6441 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6442 	      bool ext_api)
6443 {
6444 	uint8_t payload_read[64 * 512];
6445 	uint8_t payload_ff[64 * 512];
6446 	uint8_t payload_aa[64 * 512];
6447 	uint8_t payload_00[64 * 512];
6448 	struct iovec iov[4];
6449 	struct spdk_blob_ext_io_opts ext_opts = {
6450 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6451 		.memory_domain_ctx = (void *)0xf00df00d,
6452 		.size = sizeof(struct spdk_blob_ext_io_opts),
6453 		.user_ctx = (void *)123,
6454 	};
6455 
6456 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6457 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6458 	memset(payload_00, 0x00, sizeof(payload_00));
6459 
6460 	/* Read only first io unit */
6461 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6462 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6463 	 * payload_read: F000 0000 | 0000 0000 ... */
6464 	memset(payload_read, 0x00, sizeof(payload_read));
6465 	iov[0].iov_base = payload_read;
6466 	iov[0].iov_len = 1 * 512;
6467 
6468 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6469 
6470 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6471 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6472 
6473 	/* Read four io_units starting from offset = 2
6474 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6475 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6476 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6477 
6478 	memset(payload_read, 0x00, sizeof(payload_read));
6479 	iov[0].iov_base = payload_read;
6480 	iov[0].iov_len = 4 * 512;
6481 
6482 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6483 
6484 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6485 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6486 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6487 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6488 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6489 
6490 	/* Read eight io_units across multiple pages
6491 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6492 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6493 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6494 	memset(payload_read, 0x00, sizeof(payload_read));
6495 	iov[0].iov_base = payload_read;
6496 	iov[0].iov_len = 4 * 512;
6497 	iov[1].iov_base = payload_read + 4 * 512;
6498 	iov[1].iov_len = 4 * 512;
6499 
6500 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6501 
6502 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6503 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6504 
6505 	/* Read eight io_units across multiple clusters
6506 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6507 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6508 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6509 	memset(payload_read, 0x00, sizeof(payload_read));
6510 	iov[0].iov_base = payload_read;
6511 	iov[0].iov_len = 2 * 512;
6512 	iov[1].iov_base = payload_read + 2 * 512;
6513 	iov[1].iov_len = 2 * 512;
6514 	iov[2].iov_base = payload_read + 4 * 512;
6515 	iov[2].iov_len = 2 * 512;
6516 	iov[3].iov_base = payload_read + 6 * 512;
6517 	iov[3].iov_len = 2 * 512;
6518 
6519 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
6520 			   ext_api ? &ext_opts : NULL);
6521 
6522 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6523 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6524 
6525 	/* Read four io_units from second cluster
6526 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6527 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6528 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6529 	memset(payload_read, 0x00, sizeof(payload_read));
6530 	iov[0].iov_base = payload_read;
6531 	iov[0].iov_len = 1 * 512;
6532 	iov[1].iov_base = payload_read + 1 * 512;
6533 	iov[1].iov_len = 3 * 512;
6534 
6535 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
6536 			   ext_api ? &ext_opts : NULL);
6537 
6538 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6539 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6540 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6541 
6542 	/* Read second cluster
6543 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6544 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6545 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6546 	memset(payload_read, 0x00, sizeof(payload_read));
6547 	iov[0].iov_base = payload_read;
6548 	iov[0].iov_len = 1 * 512;
6549 	iov[1].iov_base = payload_read + 1 * 512;
6550 	iov[1].iov_len = 2 * 512;
6551 	iov[2].iov_base = payload_read + 3 * 512;
6552 	iov[2].iov_len = 4 * 512;
6553 	iov[3].iov_base = payload_read + 7 * 512;
6554 	iov[3].iov_len = 25 * 512;
6555 
6556 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
6557 			   ext_api ? &ext_opts : NULL);
6558 
6559 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6560 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6561 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6562 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6563 
6564 	/* Read whole two clusters
6565 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6566 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6567 	memset(payload_read, 0x00, sizeof(payload_read));
6568 	iov[0].iov_base = payload_read;
6569 	iov[0].iov_len = 1 * 512;
6570 	iov[1].iov_base = payload_read + 1 * 512;
6571 	iov[1].iov_len = 8 * 512;
6572 	iov[2].iov_base = payload_read + 9 * 512;
6573 	iov[2].iov_len = 16 * 512;
6574 	iov[3].iov_base = payload_read + 25 * 512;
6575 	iov[3].iov_len = 39 * 512;
6576 
6577 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
6578 			   ext_api ? &ext_opts : NULL);
6579 
6580 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6581 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6582 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6583 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6584 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6585 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6586 
6587 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6588 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6589 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6590 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6591 }
6592 
6593 static void
6594 blob_io_unit(void)
6595 {
6596 	struct spdk_bs_opts bsopts;
6597 	struct spdk_blob_opts opts;
6598 	struct spdk_blob_store *bs;
6599 	struct spdk_bs_dev *dev;
6600 	struct spdk_blob *blob, *snapshot, *clone;
6601 	spdk_blob_id blobid;
6602 	struct spdk_io_channel *channel;
6603 
6604 	/* Create dev with 512 bytes io unit size */
6605 
6606 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6607 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6608 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6609 
6610 	/* Try to initialize a new blob store with unsupported io_unit */
6611 	dev = init_dev();
6612 	dev->blocklen = 512;
6613 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6614 
6615 	/* Initialize a new blob store */
6616 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6617 	poll_threads();
6618 	CU_ASSERT(g_bserrno == 0);
6619 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6620 	bs = g_bs;
6621 
6622 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6623 	channel = spdk_bs_alloc_io_channel(bs);
6624 
6625 	/* Create thick provisioned blob */
6626 	ut_spdk_blob_opts_init(&opts);
6627 	opts.thin_provision = false;
6628 	opts.num_clusters = 32;
6629 
6630 	blob = ut_blob_create_and_open(bs, &opts);
6631 	blobid = spdk_blob_get_id(blob);
6632 
6633 	test_io_write(dev, blob, channel);
6634 	test_io_read(dev, blob, channel);
6635 	test_io_zeroes(dev, blob, channel);
6636 
6637 	test_iov_write(dev, blob, channel, false);
6638 	test_iov_read(dev, blob, channel, false);
6639 	test_io_zeroes(dev, blob, channel);
6640 
6641 	test_iov_write(dev, blob, channel, true);
6642 	test_iov_read(dev, blob, channel, true);
6643 
6644 	test_io_unmap(dev, blob, channel);
6645 
6646 	spdk_blob_close(blob, blob_op_complete, NULL);
6647 	poll_threads();
6648 	CU_ASSERT(g_bserrno == 0);
6649 	blob = NULL;
6650 	g_blob = NULL;
6651 
6652 	/* Create thin provisioned blob */
6653 
6654 	ut_spdk_blob_opts_init(&opts);
6655 	opts.thin_provision = true;
6656 	opts.num_clusters = 32;
6657 
6658 	blob = ut_blob_create_and_open(bs, &opts);
6659 	blobid = spdk_blob_get_id(blob);
6660 
6661 	test_io_write(dev, blob, channel);
6662 	test_io_read(dev, blob, channel);
6663 	test_io_zeroes(dev, blob, channel);
6664 
6665 	test_iov_write(dev, blob, channel, false);
6666 	test_iov_read(dev, blob, channel, false);
6667 	test_io_zeroes(dev, blob, channel);
6668 
6669 	test_iov_write(dev, blob, channel, true);
6670 	test_iov_read(dev, blob, channel, true);
6671 
6672 	/* Create snapshot */
6673 
6674 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6675 	poll_threads();
6676 	CU_ASSERT(g_bserrno == 0);
6677 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6678 	blobid = g_blobid;
6679 
6680 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6681 	poll_threads();
6682 	CU_ASSERT(g_bserrno == 0);
6683 	CU_ASSERT(g_blob != NULL);
6684 	snapshot = g_blob;
6685 
6686 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6687 	poll_threads();
6688 	CU_ASSERT(g_bserrno == 0);
6689 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6690 	blobid = g_blobid;
6691 
6692 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6693 	poll_threads();
6694 	CU_ASSERT(g_bserrno == 0);
6695 	CU_ASSERT(g_blob != NULL);
6696 	clone = g_blob;
6697 
6698 	test_io_read(dev, blob, channel);
6699 	test_io_read(dev, snapshot, channel);
6700 	test_io_read(dev, clone, channel);
6701 
6702 	test_iov_read(dev, blob, channel, false);
6703 	test_iov_read(dev, snapshot, channel, false);
6704 	test_iov_read(dev, clone, channel, false);
6705 
6706 	test_iov_read(dev, blob, channel, true);
6707 	test_iov_read(dev, snapshot, channel, true);
6708 	test_iov_read(dev, clone, channel, true);
6709 
6710 	/* Inflate clone */
6711 
6712 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6713 	poll_threads();
6714 
6715 	CU_ASSERT(g_bserrno == 0);
6716 
6717 	test_io_read(dev, clone, channel);
6718 
6719 	test_io_unmap(dev, clone, channel);
6720 
6721 	test_iov_write(dev, clone, channel, false);
6722 	test_iov_read(dev, clone, channel, false);
6723 	test_io_unmap(dev, clone, channel);
6724 
6725 	test_iov_write(dev, clone, channel, true);
6726 	test_iov_read(dev, clone, channel, true);
6727 
6728 	spdk_blob_close(blob, blob_op_complete, NULL);
6729 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6730 	spdk_blob_close(clone, blob_op_complete, NULL);
6731 	poll_threads();
6732 	CU_ASSERT(g_bserrno == 0);
6733 	blob = NULL;
6734 	g_blob = NULL;
6735 
6736 	spdk_bs_free_io_channel(channel);
6737 	poll_threads();
6738 
6739 	/* Unload the blob store */
6740 	spdk_bs_unload(bs, bs_op_complete, NULL);
6741 	poll_threads();
6742 	CU_ASSERT(g_bserrno == 0);
6743 	g_bs = NULL;
6744 	g_blob = NULL;
6745 	g_blobid = 0;
6746 }
6747 
6748 static void
6749 blob_io_unit_compatibility(void)
6750 {
6751 	struct spdk_bs_opts bsopts;
6752 	struct spdk_blob_store *bs;
6753 	struct spdk_bs_dev *dev;
6754 	struct spdk_bs_super_block *super;
6755 
6756 	/* Create dev with 512 bytes io unit size */
6757 
6758 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6759 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6760 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6761 
6762 	/* Try to initialize a new blob store with unsupported io_unit */
6763 	dev = init_dev();
6764 	dev->blocklen = 512;
6765 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6766 
6767 	/* Initialize a new blob store */
6768 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6769 	poll_threads();
6770 	CU_ASSERT(g_bserrno == 0);
6771 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6772 	bs = g_bs;
6773 
6774 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6775 
6776 	/* Unload the blob store */
6777 	spdk_bs_unload(bs, bs_op_complete, NULL);
6778 	poll_threads();
6779 	CU_ASSERT(g_bserrno == 0);
6780 
6781 	/* Modify super block to behave like older version.
6782 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6783 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6784 	super->io_unit_size = 0;
6785 	super->crc = blob_md_page_calc_crc(super);
6786 
6787 	dev = init_dev();
6788 	dev->blocklen = 512;
6789 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6790 
6791 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6792 	poll_threads();
6793 	CU_ASSERT(g_bserrno == 0);
6794 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6795 	bs = g_bs;
6796 
6797 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6798 
6799 	/* Unload the blob store */
6800 	spdk_bs_unload(bs, bs_op_complete, NULL);
6801 	poll_threads();
6802 	CU_ASSERT(g_bserrno == 0);
6803 
6804 	g_bs = NULL;
6805 	g_blob = NULL;
6806 	g_blobid = 0;
6807 }
6808 
6809 static void
6810 first_sync_complete(void *cb_arg, int bserrno)
6811 {
6812 	struct spdk_blob *blob = cb_arg;
6813 	int rc;
6814 
6815 	CU_ASSERT(bserrno == 0);
6816 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
6817 	CU_ASSERT(rc == 0);
6818 	CU_ASSERT(g_bserrno == -1);
6819 
6820 	/* Keep g_bserrno at -1, only the
6821 	 * second sync completion should set it at 0. */
6822 }
6823 
6824 static void
6825 second_sync_complete(void *cb_arg, int bserrno)
6826 {
6827 	struct spdk_blob *blob = cb_arg;
6828 	const void *value;
6829 	size_t value_len;
6830 	int rc;
6831 
6832 	CU_ASSERT(bserrno == 0);
6833 
6834 	/* Verify that the first sync completion had a chance to execute */
6835 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
6836 	CU_ASSERT(rc == 0);
6837 	SPDK_CU_ASSERT_FATAL(value != NULL);
6838 	CU_ASSERT(value_len == strlen("second") + 1);
6839 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
6840 
6841 	CU_ASSERT(g_bserrno == -1);
6842 	g_bserrno = bserrno;
6843 }
6844 
6845 static void
6846 blob_simultaneous_operations(void)
6847 {
6848 	struct spdk_blob_store *bs = g_bs;
6849 	struct spdk_blob_opts opts;
6850 	struct spdk_blob *blob, *snapshot;
6851 	spdk_blob_id blobid, snapshotid;
6852 	struct spdk_io_channel *channel;
6853 	int rc;
6854 
6855 	channel = spdk_bs_alloc_io_channel(bs);
6856 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6857 
6858 	ut_spdk_blob_opts_init(&opts);
6859 	opts.num_clusters = 10;
6860 
6861 	blob = ut_blob_create_and_open(bs, &opts);
6862 	blobid = spdk_blob_get_id(blob);
6863 
6864 	/* Create snapshot and try to remove blob in the same time:
6865 	 * - snapshot should be created successfully
6866 	 * - delete operation should fail w -EBUSY */
6867 	CU_ASSERT(blob->locked_operation_in_progress == false);
6868 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6869 	CU_ASSERT(blob->locked_operation_in_progress == true);
6870 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6871 	CU_ASSERT(blob->locked_operation_in_progress == true);
6872 	/* Deletion failure */
6873 	CU_ASSERT(g_bserrno == -EBUSY);
6874 	poll_threads();
6875 	CU_ASSERT(blob->locked_operation_in_progress == false);
6876 	/* Snapshot creation success */
6877 	CU_ASSERT(g_bserrno == 0);
6878 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6879 
6880 	snapshotid = g_blobid;
6881 
6882 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6883 	poll_threads();
6884 	CU_ASSERT(g_bserrno == 0);
6885 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6886 	snapshot = g_blob;
6887 
6888 	/* Inflate blob and try to remove blob in the same time:
6889 	 * - blob should be inflated successfully
6890 	 * - delete operation should fail w -EBUSY */
6891 	CU_ASSERT(blob->locked_operation_in_progress == false);
6892 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6893 	CU_ASSERT(blob->locked_operation_in_progress == true);
6894 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6895 	CU_ASSERT(blob->locked_operation_in_progress == true);
6896 	/* Deletion failure */
6897 	CU_ASSERT(g_bserrno == -EBUSY);
6898 	poll_threads();
6899 	CU_ASSERT(blob->locked_operation_in_progress == false);
6900 	/* Inflation success */
6901 	CU_ASSERT(g_bserrno == 0);
6902 
6903 	/* Clone snapshot and try to remove snapshot in the same time:
6904 	 * - snapshot should be cloned successfully
6905 	 * - delete operation should fail w -EBUSY */
6906 	CU_ASSERT(blob->locked_operation_in_progress == false);
6907 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
6908 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6909 	/* Deletion failure */
6910 	CU_ASSERT(g_bserrno == -EBUSY);
6911 	poll_threads();
6912 	CU_ASSERT(blob->locked_operation_in_progress == false);
6913 	/* Clone created */
6914 	CU_ASSERT(g_bserrno == 0);
6915 
6916 	/* Resize blob and try to remove blob in the same time:
6917 	 * - blob should be resized successfully
6918 	 * - delete operation should fail w -EBUSY */
6919 	CU_ASSERT(blob->locked_operation_in_progress == false);
6920 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
6921 	CU_ASSERT(blob->locked_operation_in_progress == true);
6922 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6923 	CU_ASSERT(blob->locked_operation_in_progress == true);
6924 	/* Deletion failure */
6925 	CU_ASSERT(g_bserrno == -EBUSY);
6926 	poll_threads();
6927 	CU_ASSERT(blob->locked_operation_in_progress == false);
6928 	/* Blob resized successfully */
6929 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6930 	poll_threads();
6931 	CU_ASSERT(g_bserrno == 0);
6932 
6933 	/* Issue two consecutive blob syncs, neither should fail.
6934 	 * Force sync to actually occur by marking blob dirty each time.
6935 	 * Execution of sync should not be enough to complete the operation,
6936 	 * since disk I/O is required to complete it. */
6937 	g_bserrno = -1;
6938 
6939 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
6940 	CU_ASSERT(rc == 0);
6941 	spdk_blob_sync_md(blob, first_sync_complete, blob);
6942 	CU_ASSERT(g_bserrno == -1);
6943 
6944 	spdk_blob_sync_md(blob, second_sync_complete, blob);
6945 	CU_ASSERT(g_bserrno == -1);
6946 
6947 	poll_threads();
6948 	CU_ASSERT(g_bserrno == 0);
6949 
6950 	spdk_bs_free_io_channel(channel);
6951 	poll_threads();
6952 
6953 	ut_blob_close_and_delete(bs, snapshot);
6954 	ut_blob_close_and_delete(bs, blob);
6955 }
6956 
6957 static void
6958 blob_persist_test(void)
6959 {
6960 	struct spdk_blob_store *bs = g_bs;
6961 	struct spdk_blob_opts opts;
6962 	struct spdk_blob *blob;
6963 	spdk_blob_id blobid;
6964 	struct spdk_io_channel *channel;
6965 	char *xattr;
6966 	size_t xattr_length;
6967 	int rc;
6968 	uint32_t page_count_clear, page_count_xattr;
6969 	uint64_t poller_iterations;
6970 	bool run_poller;
6971 
6972 	channel = spdk_bs_alloc_io_channel(bs);
6973 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6974 
6975 	ut_spdk_blob_opts_init(&opts);
6976 	opts.num_clusters = 10;
6977 
6978 	blob = ut_blob_create_and_open(bs, &opts);
6979 	blobid = spdk_blob_get_id(blob);
6980 
6981 	/* Save the amount of md pages used after creation of a blob.
6982 	 * This should be consistent after removing xattr. */
6983 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
6984 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
6985 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
6986 
6987 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
6988 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
6989 		       strlen("large_xattr");
6990 	xattr = calloc(xattr_length, sizeof(char));
6991 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
6992 
6993 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6994 	SPDK_CU_ASSERT_FATAL(rc == 0);
6995 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6996 	poll_threads();
6997 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
6998 
6999 	/* Save the amount of md pages used after adding the large xattr */
7000 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7001 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7002 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7003 
7004 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7005 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7006 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7007 	poller_iterations = 1;
7008 	run_poller = true;
7009 	while (run_poller) {
7010 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7011 		SPDK_CU_ASSERT_FATAL(rc == 0);
7012 		g_bserrno = -1;
7013 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7014 		poll_thread_times(0, poller_iterations);
7015 		if (g_bserrno == 0) {
7016 			/* Poller iteration count was high enough for first sync to complete.
7017 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7018 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7019 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7020 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7021 			run_poller = false;
7022 		}
7023 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7024 		SPDK_CU_ASSERT_FATAL(rc == 0);
7025 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7026 		poll_threads();
7027 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7028 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7029 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7030 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7031 
7032 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7033 		spdk_blob_close(blob, blob_op_complete, NULL);
7034 		poll_threads();
7035 		CU_ASSERT(g_bserrno == 0);
7036 
7037 		ut_bs_reload(&bs, NULL);
7038 
7039 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7040 		poll_threads();
7041 		CU_ASSERT(g_bserrno == 0);
7042 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7043 		blob = g_blob;
7044 
7045 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7046 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7047 
7048 		poller_iterations++;
7049 		/* Stop at high iteration count to prevent infinite loop.
7050 		 * This value should be enough for first md sync to complete in any case. */
7051 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7052 	}
7053 
7054 	free(xattr);
7055 
7056 	ut_blob_close_and_delete(bs, blob);
7057 
7058 	spdk_bs_free_io_channel(channel);
7059 	poll_threads();
7060 }
7061 
7062 static void
7063 blob_decouple_snapshot(void)
7064 {
7065 	struct spdk_blob_store *bs = g_bs;
7066 	struct spdk_blob_opts opts;
7067 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7068 	struct spdk_io_channel *channel;
7069 	spdk_blob_id blobid, snapshotid;
7070 	uint64_t cluster;
7071 
7072 	channel = spdk_bs_alloc_io_channel(bs);
7073 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7074 
7075 	ut_spdk_blob_opts_init(&opts);
7076 	opts.num_clusters = 10;
7077 	opts.thin_provision = false;
7078 
7079 	blob = ut_blob_create_and_open(bs, &opts);
7080 	blobid = spdk_blob_get_id(blob);
7081 
7082 	/* Create first snapshot */
7083 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7084 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7085 	poll_threads();
7086 	CU_ASSERT(g_bserrno == 0);
7087 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7088 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7089 	snapshotid = g_blobid;
7090 
7091 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7092 	poll_threads();
7093 	CU_ASSERT(g_bserrno == 0);
7094 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7095 	snapshot1 = g_blob;
7096 
7097 	/* Create the second one */
7098 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7099 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7100 	poll_threads();
7101 	CU_ASSERT(g_bserrno == 0);
7102 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7103 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7104 	snapshotid = g_blobid;
7105 
7106 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7107 	poll_threads();
7108 	CU_ASSERT(g_bserrno == 0);
7109 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7110 	snapshot2 = g_blob;
7111 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7112 
7113 	/* Now decouple the second snapshot forcing it to copy the written clusters */
7114 	spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7115 	poll_threads();
7116 	CU_ASSERT(g_bserrno == 0);
7117 
7118 	/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7119 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7120 	for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7121 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7122 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7123 				    snapshot1->active.clusters[cluster]);
7124 	}
7125 
7126 	spdk_bs_free_io_channel(channel);
7127 
7128 	ut_blob_close_and_delete(bs, snapshot2);
7129 	ut_blob_close_and_delete(bs, snapshot1);
7130 	ut_blob_close_and_delete(bs, blob);
7131 	poll_threads();
7132 }
7133 
7134 static void
7135 suite_bs_setup(void)
7136 {
7137 	struct spdk_bs_dev *dev;
7138 
7139 	dev = init_dev();
7140 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7141 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
7142 	poll_threads();
7143 	CU_ASSERT(g_bserrno == 0);
7144 	CU_ASSERT(g_bs != NULL);
7145 }
7146 
7147 static void
7148 suite_bs_cleanup(void)
7149 {
7150 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
7151 	poll_threads();
7152 	CU_ASSERT(g_bserrno == 0);
7153 	g_bs = NULL;
7154 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7155 }
7156 
7157 static struct spdk_blob *
7158 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
7159 {
7160 	struct spdk_blob *blob;
7161 	struct spdk_blob_opts create_blob_opts;
7162 	spdk_blob_id blobid;
7163 
7164 	if (blob_opts == NULL) {
7165 		ut_spdk_blob_opts_init(&create_blob_opts);
7166 		blob_opts = &create_blob_opts;
7167 	}
7168 
7169 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
7170 	poll_threads();
7171 	CU_ASSERT(g_bserrno == 0);
7172 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7173 	blobid = g_blobid;
7174 	g_blobid = -1;
7175 
7176 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7177 	poll_threads();
7178 	CU_ASSERT(g_bserrno == 0);
7179 	CU_ASSERT(g_blob != NULL);
7180 	blob = g_blob;
7181 
7182 	g_blob = NULL;
7183 	g_bserrno = -1;
7184 
7185 	return blob;
7186 }
7187 
7188 static void
7189 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
7190 {
7191 	spdk_blob_id blobid = spdk_blob_get_id(blob);
7192 
7193 	spdk_blob_close(blob, blob_op_complete, NULL);
7194 	poll_threads();
7195 	CU_ASSERT(g_bserrno == 0);
7196 	g_blob = NULL;
7197 
7198 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7199 	poll_threads();
7200 	CU_ASSERT(g_bserrno == 0);
7201 	g_bserrno = -1;
7202 }
7203 
7204 static void
7205 suite_blob_setup(void)
7206 {
7207 	suite_bs_setup();
7208 	CU_ASSERT(g_bs != NULL);
7209 
7210 	g_blob = ut_blob_create_and_open(g_bs, NULL);
7211 	CU_ASSERT(g_blob != NULL);
7212 }
7213 
7214 static void
7215 suite_blob_cleanup(void)
7216 {
7217 	ut_blob_close_and_delete(g_bs, g_blob);
7218 	CU_ASSERT(g_blob == NULL);
7219 
7220 	suite_bs_cleanup();
7221 	CU_ASSERT(g_bs == NULL);
7222 }
7223 
7224 int main(int argc, char **argv)
7225 {
7226 	CU_pSuite	suite, suite_bs, suite_blob;
7227 	unsigned int	num_failures;
7228 
7229 	CU_set_error_action(CUEA_ABORT);
7230 	CU_initialize_registry();
7231 
7232 	suite = CU_add_suite("blob", NULL, NULL);
7233 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
7234 			suite_bs_setup, suite_bs_cleanup);
7235 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
7236 			suite_blob_setup, suite_blob_cleanup);
7237 
7238 	CU_ADD_TEST(suite, blob_init);
7239 	CU_ADD_TEST(suite_bs, blob_open);
7240 	CU_ADD_TEST(suite_bs, blob_create);
7241 	CU_ADD_TEST(suite_bs, blob_create_loop);
7242 	CU_ADD_TEST(suite_bs, blob_create_fail);
7243 	CU_ADD_TEST(suite_bs, blob_create_internal);
7244 	CU_ADD_TEST(suite_bs, blob_create_zero_extent);
7245 	CU_ADD_TEST(suite, blob_thin_provision);
7246 	CU_ADD_TEST(suite_bs, blob_snapshot);
7247 	CU_ADD_TEST(suite_bs, blob_clone);
7248 	CU_ADD_TEST(suite_bs, blob_inflate);
7249 	CU_ADD_TEST(suite_bs, blob_delete);
7250 	CU_ADD_TEST(suite_bs, blob_resize_test);
7251 	CU_ADD_TEST(suite, blob_read_only);
7252 	CU_ADD_TEST(suite_bs, channel_ops);
7253 	CU_ADD_TEST(suite_bs, blob_super);
7254 	CU_ADD_TEST(suite_blob, blob_write);
7255 	CU_ADD_TEST(suite_blob, blob_read);
7256 	CU_ADD_TEST(suite_blob, blob_rw_verify);
7257 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
7258 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
7259 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
7260 	CU_ADD_TEST(suite_bs, blob_unmap);
7261 	CU_ADD_TEST(suite_bs, blob_iter);
7262 	CU_ADD_TEST(suite_blob, blob_xattr);
7263 	CU_ADD_TEST(suite_bs, blob_parse_md);
7264 	CU_ADD_TEST(suite, bs_load);
7265 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
7266 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
7267 	CU_ADD_TEST(suite_bs, bs_unload);
7268 	CU_ADD_TEST(suite, bs_cluster_sz);
7269 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
7270 	CU_ADD_TEST(suite, bs_resize_md);
7271 	CU_ADD_TEST(suite, bs_destroy);
7272 	CU_ADD_TEST(suite, bs_type);
7273 	CU_ADD_TEST(suite, bs_super_block);
7274 	CU_ADD_TEST(suite, bs_test_recover_cluster_count);
7275 	CU_ADD_TEST(suite, blob_serialize_test);
7276 	CU_ADD_TEST(suite_bs, blob_crc);
7277 	CU_ADD_TEST(suite, super_block_crc);
7278 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
7279 	CU_ADD_TEST(suite_bs, blob_flags);
7280 	CU_ADD_TEST(suite_bs, bs_version);
7281 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
7282 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
7283 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
7284 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
7285 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
7286 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
7287 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
7288 	CU_ADD_TEST(suite, bs_load_iter_test);
7289 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
7290 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
7291 	CU_ADD_TEST(suite, blob_relations);
7292 	CU_ADD_TEST(suite, blob_relations2);
7293 	CU_ADD_TEST(suite, blob_relations3);
7294 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
7295 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
7296 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
7297 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
7298 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
7299 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
7300 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
7301 	CU_ADD_TEST(suite, blob_io_unit);
7302 	CU_ADD_TEST(suite, blob_io_unit_compatibility);
7303 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
7304 	CU_ADD_TEST(suite_bs, blob_persist_test);
7305 	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
7306 
7307 	allocate_threads(2);
7308 	set_thread(0);
7309 
7310 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
7311 
7312 	CU_basic_set_mode(CU_BRM_VERBOSE);
7313 	g_use_extent_table = false;
7314 	CU_basic_run_tests();
7315 	num_failures = CU_get_number_of_failures();
7316 	g_use_extent_table = true;
7317 	CU_basic_run_tests();
7318 	num_failures += CU_get_number_of_failures();
7319 	CU_cleanup_registry();
7320 
7321 	free(g_dev_buffer);
7322 
7323 	free_threads();
7324 
7325 	return num_failures;
7326 }
7327