xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 927f1fd57bd004df581518466ec4c1b8083e5d23)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *   Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
7  *
8  *   Redistribution and use in source and binary forms, with or without
9  *   modification, are permitted provided that the following conditions
10  *   are met:
11  *
12  *     * Redistributions of source code must retain the above copyright
13  *       notice, this list of conditions and the following disclaimer.
14  *     * Redistributions in binary form must reproduce the above copyright
15  *       notice, this list of conditions and the following disclaimer in
16  *       the documentation and/or other materials provided with the
17  *       distribution.
18  *     * Neither the name of Intel Corporation nor the names of its
19  *       contributors may be used to endorse or promote products derived
20  *       from this software without specific prior written permission.
21  *
22  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
25  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
27  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
28  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
32  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 #include "spdk/stdinc.h"
36 
37 #include "spdk_cunit.h"
38 #include "spdk/blob.h"
39 #include "spdk/string.h"
40 
41 #include "common/lib/ut_multithread.c"
42 #include "../bs_dev_common.c"
43 #include "blob/blobstore.c"
44 #include "blob/request.c"
45 #include "blob/zeroes.c"
46 #include "blob/blob_bs_dev.c"
47 
48 struct spdk_blob_store *g_bs;
49 spdk_blob_id g_blobid;
50 struct spdk_blob *g_blob, *g_blob2;
51 int g_bserrno, g_bserrno2;
52 struct spdk_xattr_names *g_names;
53 int g_done;
54 char *g_xattr_names[] = {"first", "second", "third"};
55 char *g_xattr_values[] = {"one", "two", "three"};
56 uint64_t g_ctx = 1729;
57 bool g_use_extent_table = false;
58 
59 struct spdk_bs_super_block_ver1 {
60 	uint8_t		signature[8];
61 	uint32_t        version;
62 	uint32_t        length;
63 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
64 	spdk_blob_id	super_blob;
65 
66 	uint32_t	cluster_size; /* In bytes */
67 
68 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
69 	uint32_t	used_page_mask_len; /* Count, in pages */
70 
71 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
72 	uint32_t	used_cluster_mask_len; /* Count, in pages */
73 
74 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
75 	uint32_t	md_len; /* Count, in pages */
76 
77 	uint8_t		reserved[4036];
78 	uint32_t	crc;
79 } __attribute__((packed));
80 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
81 
82 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
83 		struct spdk_blob_opts *blob_opts);
84 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
85 static void suite_blob_setup(void);
86 static void suite_blob_cleanup(void);
87 
88 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
89 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
90 		void *cpl_cb_arg), 0);
91 
92 static void
93 _get_xattr_value(void *arg, const char *name,
94 		 const void **value, size_t *value_len)
95 {
96 	uint64_t i;
97 
98 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
99 	SPDK_CU_ASSERT_FATAL(value != NULL);
100 	CU_ASSERT(arg == &g_ctx);
101 
102 	for (i = 0; i < sizeof(g_xattr_names); i++) {
103 		if (!strcmp(name, g_xattr_names[i])) {
104 			*value_len = strlen(g_xattr_values[i]);
105 			*value = g_xattr_values[i];
106 			break;
107 		}
108 	}
109 }
110 
111 static void
112 _get_xattr_value_null(void *arg, const char *name,
113 		      const void **value, size_t *value_len)
114 {
115 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
116 	SPDK_CU_ASSERT_FATAL(value != NULL);
117 	CU_ASSERT(arg == NULL);
118 
119 	*value_len = 0;
120 	*value = NULL;
121 }
122 
123 static int
124 _get_snapshots_count(struct spdk_blob_store *bs)
125 {
126 	struct spdk_blob_list *snapshot = NULL;
127 	int count = 0;
128 
129 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
130 		count += 1;
131 	}
132 
133 	return count;
134 }
135 
136 static void
137 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
138 {
139 	spdk_blob_opts_init(opts, sizeof(*opts));
140 	opts->use_extent_table = g_use_extent_table;
141 }
142 
143 static void
144 bs_op_complete(void *cb_arg, int bserrno)
145 {
146 	g_bserrno = bserrno;
147 }
148 
149 static void
150 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
151 			   int bserrno)
152 {
153 	g_bs = bs;
154 	g_bserrno = bserrno;
155 }
156 
157 static void
158 blob_op_complete(void *cb_arg, int bserrno)
159 {
160 	g_bserrno = bserrno;
161 }
162 
163 static void
164 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
165 {
166 	g_blobid = blobid;
167 	g_bserrno = bserrno;
168 }
169 
170 static void
171 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
172 {
173 	g_blob = blb;
174 	g_bserrno = bserrno;
175 }
176 
177 static void
178 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
179 {
180 	if (g_blob == NULL) {
181 		g_blob = blob;
182 		g_bserrno = bserrno;
183 	} else {
184 		g_blob2 = blob;
185 		g_bserrno2 = bserrno;
186 	}
187 }
188 
189 static void
190 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
191 {
192 	struct spdk_bs_dev *dev;
193 
194 	/* Unload the blob store */
195 	spdk_bs_unload(*bs, bs_op_complete, NULL);
196 	poll_threads();
197 	CU_ASSERT(g_bserrno == 0);
198 
199 	dev = init_dev();
200 	/* Load an existing blob store */
201 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
202 	poll_threads();
203 	CU_ASSERT(g_bserrno == 0);
204 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
205 	*bs = g_bs;
206 
207 	g_bserrno = -1;
208 }
209 
210 static void
211 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
212 {
213 	struct spdk_bs_dev *dev;
214 
215 	/* Dirty shutdown */
216 	bs_free(*bs);
217 
218 	dev = init_dev();
219 	/* Load an existing blob store */
220 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
221 	poll_threads();
222 	CU_ASSERT(g_bserrno == 0);
223 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
224 	*bs = g_bs;
225 
226 	g_bserrno = -1;
227 }
228 
229 static void
230 blob_init(void)
231 {
232 	struct spdk_blob_store *bs;
233 	struct spdk_bs_dev *dev;
234 
235 	dev = init_dev();
236 
237 	/* should fail for an unsupported blocklen */
238 	dev->blocklen = 500;
239 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
240 	poll_threads();
241 	CU_ASSERT(g_bserrno == -EINVAL);
242 
243 	dev = init_dev();
244 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
245 	poll_threads();
246 	CU_ASSERT(g_bserrno == 0);
247 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
248 	bs = g_bs;
249 
250 	spdk_bs_unload(bs, bs_op_complete, NULL);
251 	poll_threads();
252 	CU_ASSERT(g_bserrno == 0);
253 	g_bs = NULL;
254 }
255 
256 static void
257 blob_super(void)
258 {
259 	struct spdk_blob_store *bs = g_bs;
260 	spdk_blob_id blobid;
261 	struct spdk_blob_opts blob_opts;
262 
263 	/* Get the super blob without having set one */
264 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
265 	poll_threads();
266 	CU_ASSERT(g_bserrno == -ENOENT);
267 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
268 
269 	/* Create a blob */
270 	ut_spdk_blob_opts_init(&blob_opts);
271 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
272 	poll_threads();
273 	CU_ASSERT(g_bserrno == 0);
274 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
275 	blobid = g_blobid;
276 
277 	/* Set the blob as the super blob */
278 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
279 	poll_threads();
280 	CU_ASSERT(g_bserrno == 0);
281 
282 	/* Get the super blob */
283 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
284 	poll_threads();
285 	CU_ASSERT(g_bserrno == 0);
286 	CU_ASSERT(blobid == g_blobid);
287 }
288 
289 static void
290 blob_open(void)
291 {
292 	struct spdk_blob_store *bs = g_bs;
293 	struct spdk_blob *blob;
294 	struct spdk_blob_opts blob_opts;
295 	spdk_blob_id blobid, blobid2;
296 
297 	ut_spdk_blob_opts_init(&blob_opts);
298 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
299 	poll_threads();
300 	CU_ASSERT(g_bserrno == 0);
301 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
302 	blobid = g_blobid;
303 
304 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
305 	poll_threads();
306 	CU_ASSERT(g_bserrno == 0);
307 	CU_ASSERT(g_blob != NULL);
308 	blob = g_blob;
309 
310 	blobid2 = spdk_blob_get_id(blob);
311 	CU_ASSERT(blobid == blobid2);
312 
313 	/* Try to open file again.  It should return success. */
314 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
315 	poll_threads();
316 	CU_ASSERT(g_bserrno == 0);
317 	CU_ASSERT(blob == g_blob);
318 
319 	spdk_blob_close(blob, blob_op_complete, NULL);
320 	poll_threads();
321 	CU_ASSERT(g_bserrno == 0);
322 
323 	/*
324 	 * Close the file a second time, releasing the second reference.  This
325 	 *  should succeed.
326 	 */
327 	blob = g_blob;
328 	spdk_blob_close(blob, blob_op_complete, NULL);
329 	poll_threads();
330 	CU_ASSERT(g_bserrno == 0);
331 
332 	/*
333 	 * Try to open file again.  It should succeed.  This tests the case
334 	 *  where the file is opened, closed, then re-opened again.
335 	 */
336 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
337 	poll_threads();
338 	CU_ASSERT(g_bserrno == 0);
339 	CU_ASSERT(g_blob != NULL);
340 	blob = g_blob;
341 	spdk_blob_close(blob, blob_op_complete, NULL);
342 	poll_threads();
343 	CU_ASSERT(g_bserrno == 0);
344 
345 	/* Try to open file twice in succession.  This should return the same
346 	 * blob object.
347 	 */
348 	g_blob = NULL;
349 	g_blob2 = NULL;
350 	g_bserrno = -1;
351 	g_bserrno2 = -1;
352 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
353 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
354 	poll_threads();
355 	CU_ASSERT(g_bserrno == 0);
356 	CU_ASSERT(g_bserrno2 == 0);
357 	CU_ASSERT(g_blob != NULL);
358 	CU_ASSERT(g_blob2 != NULL);
359 	CU_ASSERT(g_blob == g_blob2);
360 
361 	g_bserrno = -1;
362 	spdk_blob_close(g_blob, blob_op_complete, NULL);
363 	poll_threads();
364 	CU_ASSERT(g_bserrno == 0);
365 
366 	ut_blob_close_and_delete(bs, g_blob);
367 }
368 
369 static void
370 blob_create(void)
371 {
372 	struct spdk_blob_store *bs = g_bs;
373 	struct spdk_blob *blob;
374 	struct spdk_blob_opts opts;
375 	spdk_blob_id blobid;
376 
377 	/* Create blob with 10 clusters */
378 
379 	ut_spdk_blob_opts_init(&opts);
380 	opts.num_clusters = 10;
381 
382 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
383 	poll_threads();
384 	CU_ASSERT(g_bserrno == 0);
385 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
386 	blobid = g_blobid;
387 
388 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
389 	poll_threads();
390 	CU_ASSERT(g_bserrno == 0);
391 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
392 	blob = g_blob;
393 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
394 
395 	spdk_blob_close(blob, blob_op_complete, NULL);
396 	poll_threads();
397 	CU_ASSERT(g_bserrno == 0);
398 
399 	/* Create blob with 0 clusters */
400 
401 	ut_spdk_blob_opts_init(&opts);
402 	opts.num_clusters = 0;
403 
404 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
405 	poll_threads();
406 	CU_ASSERT(g_bserrno == 0);
407 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
408 	blobid = g_blobid;
409 
410 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
411 	poll_threads();
412 	CU_ASSERT(g_bserrno == 0);
413 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
414 	blob = g_blob;
415 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
416 
417 	spdk_blob_close(blob, blob_op_complete, NULL);
418 	poll_threads();
419 	CU_ASSERT(g_bserrno == 0);
420 
421 	/* Create blob with default options (opts == NULL) */
422 
423 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
424 	poll_threads();
425 	CU_ASSERT(g_bserrno == 0);
426 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
427 	blobid = g_blobid;
428 
429 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
430 	poll_threads();
431 	CU_ASSERT(g_bserrno == 0);
432 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
433 	blob = g_blob;
434 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
435 
436 	spdk_blob_close(blob, blob_op_complete, NULL);
437 	poll_threads();
438 	CU_ASSERT(g_bserrno == 0);
439 
440 	/* Try to create blob with size larger than blobstore */
441 
442 	ut_spdk_blob_opts_init(&opts);
443 	opts.num_clusters = bs->total_clusters + 1;
444 
445 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
446 	poll_threads();
447 	CU_ASSERT(g_bserrno == -ENOSPC);
448 }
449 
450 static void
451 blob_create_zero_extent(void)
452 {
453 	struct spdk_blob_store *bs = g_bs;
454 	struct spdk_blob *blob;
455 	spdk_blob_id blobid;
456 
457 	/* Create blob with default options (opts == NULL) */
458 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
459 	poll_threads();
460 	CU_ASSERT(g_bserrno == 0);
461 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
462 	blobid = g_blobid;
463 
464 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
465 	poll_threads();
466 	CU_ASSERT(g_bserrno == 0);
467 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
468 	blob = g_blob;
469 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
470 	CU_ASSERT(blob->extent_table_found == true);
471 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
472 	CU_ASSERT(blob->active.extent_pages == NULL);
473 
474 	spdk_blob_close(blob, blob_op_complete, NULL);
475 	poll_threads();
476 	CU_ASSERT(g_bserrno == 0);
477 
478 	/* Create blob with NULL internal options  */
479 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
480 	poll_threads();
481 	CU_ASSERT(g_bserrno == 0);
482 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
483 	blobid = g_blobid;
484 
485 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
486 	poll_threads();
487 	CU_ASSERT(g_bserrno == 0);
488 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
489 	blob = g_blob;
490 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
491 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
492 	CU_ASSERT(blob->extent_table_found == true);
493 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
494 	CU_ASSERT(blob->active.extent_pages == NULL);
495 
496 	spdk_blob_close(blob, blob_op_complete, NULL);
497 	poll_threads();
498 	CU_ASSERT(g_bserrno == 0);
499 }
500 
501 /*
502  * Create and delete one blob in a loop over and over again.  This helps ensure
503  * that the internal bit masks tracking used clusters and md_pages are being
504  * tracked correctly.
505  */
506 static void
507 blob_create_loop(void)
508 {
509 	struct spdk_blob_store *bs = g_bs;
510 	struct spdk_blob_opts opts;
511 	uint32_t i, loop_count;
512 
513 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
514 				  spdk_bit_pool_capacity(bs->used_clusters));
515 
516 	for (i = 0; i < loop_count; i++) {
517 		ut_spdk_blob_opts_init(&opts);
518 		opts.num_clusters = 1;
519 		g_bserrno = -1;
520 		g_blobid = SPDK_BLOBID_INVALID;
521 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
522 		poll_threads();
523 		CU_ASSERT(g_bserrno == 0);
524 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
525 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
526 		poll_threads();
527 		CU_ASSERT(g_bserrno == 0);
528 	}
529 }
530 
531 static void
532 blob_create_fail(void)
533 {
534 	struct spdk_blob_store *bs = g_bs;
535 	struct spdk_blob_opts opts;
536 	spdk_blob_id blobid;
537 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
538 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
539 
540 	/* NULL callback */
541 	ut_spdk_blob_opts_init(&opts);
542 	opts.xattrs.names = g_xattr_names;
543 	opts.xattrs.get_value = NULL;
544 	opts.xattrs.count = 1;
545 	opts.xattrs.ctx = &g_ctx;
546 
547 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
548 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
549 	poll_threads();
550 	CU_ASSERT(g_bserrno == -EINVAL);
551 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
552 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
553 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
554 
555 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
556 	poll_threads();
557 	CU_ASSERT(g_bserrno == -ENOENT);
558 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
559 
560 	ut_bs_reload(&bs, NULL);
561 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
562 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
563 
564 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
565 	poll_threads();
566 	CU_ASSERT(g_blob == NULL);
567 	CU_ASSERT(g_bserrno == -ENOENT);
568 }
569 
570 static void
571 blob_create_internal(void)
572 {
573 	struct spdk_blob_store *bs = g_bs;
574 	struct spdk_blob *blob;
575 	struct spdk_blob_opts opts;
576 	struct spdk_blob_xattr_opts internal_xattrs;
577 	const void *value;
578 	size_t value_len;
579 	spdk_blob_id blobid;
580 	int rc;
581 
582 	/* Create blob with custom xattrs */
583 
584 	ut_spdk_blob_opts_init(&opts);
585 	blob_xattrs_init(&internal_xattrs);
586 	internal_xattrs.count = 3;
587 	internal_xattrs.names = g_xattr_names;
588 	internal_xattrs.get_value = _get_xattr_value;
589 	internal_xattrs.ctx = &g_ctx;
590 
591 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
592 	poll_threads();
593 	CU_ASSERT(g_bserrno == 0);
594 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
595 	blobid = g_blobid;
596 
597 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
598 	poll_threads();
599 	CU_ASSERT(g_bserrno == 0);
600 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
601 	blob = g_blob;
602 
603 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
604 	CU_ASSERT(rc == 0);
605 	SPDK_CU_ASSERT_FATAL(value != NULL);
606 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
607 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
608 
609 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
610 	CU_ASSERT(rc == 0);
611 	SPDK_CU_ASSERT_FATAL(value != NULL);
612 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
613 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
614 
615 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
616 	CU_ASSERT(rc == 0);
617 	SPDK_CU_ASSERT_FATAL(value != NULL);
618 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
619 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
620 
621 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
622 	CU_ASSERT(rc != 0);
623 
624 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
625 	CU_ASSERT(rc != 0);
626 
627 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
628 	CU_ASSERT(rc != 0);
629 
630 	spdk_blob_close(blob, blob_op_complete, NULL);
631 	poll_threads();
632 	CU_ASSERT(g_bserrno == 0);
633 
634 	/* Create blob with NULL internal options  */
635 
636 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
637 	poll_threads();
638 	CU_ASSERT(g_bserrno == 0);
639 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
640 	blobid = g_blobid;
641 
642 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
643 	poll_threads();
644 	CU_ASSERT(g_bserrno == 0);
645 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
646 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
647 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
648 
649 	blob = g_blob;
650 
651 	spdk_blob_close(blob, blob_op_complete, NULL);
652 	poll_threads();
653 	CU_ASSERT(g_bserrno == 0);
654 }
655 
656 static void
657 blob_thin_provision(void)
658 {
659 	struct spdk_blob_store *bs;
660 	struct spdk_bs_dev *dev;
661 	struct spdk_blob *blob;
662 	struct spdk_blob_opts opts;
663 	struct spdk_bs_opts bs_opts;
664 	spdk_blob_id blobid;
665 
666 	dev = init_dev();
667 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
668 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
669 
670 	/* Initialize a new blob store */
671 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
672 	poll_threads();
673 	CU_ASSERT(g_bserrno == 0);
674 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
675 
676 	bs = g_bs;
677 
678 	/* Create blob with thin provisioning enabled */
679 
680 	ut_spdk_blob_opts_init(&opts);
681 	opts.thin_provision = true;
682 	opts.num_clusters = 10;
683 
684 	blob = ut_blob_create_and_open(bs, &opts);
685 	blobid = spdk_blob_get_id(blob);
686 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
687 	/* In thin provisioning with num_clusters is set, if not using the
688 	 * extent table, there is no allocation. If extent table is used,
689 	 * there is related allocation happened. */
690 	if (blob->extent_table_found == true) {
691 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
692 		CU_ASSERT(blob->active.extent_pages != NULL);
693 	} else {
694 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
695 		CU_ASSERT(blob->active.extent_pages == NULL);
696 	}
697 
698 	spdk_blob_close(blob, blob_op_complete, NULL);
699 	CU_ASSERT(g_bserrno == 0);
700 
701 	/* Do not shut down cleanly.  This makes sure that when we load again
702 	 *  and try to recover a valid used_cluster map, that blobstore will
703 	 *  ignore clusters with index 0 since these are unallocated clusters.
704 	 */
705 	ut_bs_dirty_load(&bs, &bs_opts);
706 
707 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
708 	poll_threads();
709 	CU_ASSERT(g_bserrno == 0);
710 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
711 	blob = g_blob;
712 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
713 
714 	ut_blob_close_and_delete(bs, blob);
715 
716 	spdk_bs_unload(bs, bs_op_complete, NULL);
717 	poll_threads();
718 	CU_ASSERT(g_bserrno == 0);
719 	g_bs = NULL;
720 }
721 
722 static void
723 blob_snapshot(void)
724 {
725 	struct spdk_blob_store *bs = g_bs;
726 	struct spdk_blob *blob;
727 	struct spdk_blob *snapshot, *snapshot2;
728 	struct spdk_blob_bs_dev *blob_bs_dev;
729 	struct spdk_blob_opts opts;
730 	struct spdk_blob_xattr_opts xattrs;
731 	spdk_blob_id blobid;
732 	spdk_blob_id snapshotid;
733 	spdk_blob_id snapshotid2;
734 	const void *value;
735 	size_t value_len;
736 	int rc;
737 	spdk_blob_id ids[2];
738 	size_t count;
739 
740 	/* Create blob with 10 clusters */
741 	ut_spdk_blob_opts_init(&opts);
742 	opts.num_clusters = 10;
743 
744 	blob = ut_blob_create_and_open(bs, &opts);
745 	blobid = spdk_blob_get_id(blob);
746 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
747 
748 	/* Create snapshot from blob */
749 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
750 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
751 	poll_threads();
752 	CU_ASSERT(g_bserrno == 0);
753 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
754 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
755 	snapshotid = g_blobid;
756 
757 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
758 	poll_threads();
759 	CU_ASSERT(g_bserrno == 0);
760 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
761 	snapshot = g_blob;
762 	CU_ASSERT(snapshot->data_ro == true);
763 	CU_ASSERT(snapshot->md_ro == true);
764 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
765 
766 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
767 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
768 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
769 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
770 
771 	/* Try to create snapshot from clone with xattrs */
772 	xattrs.names = g_xattr_names;
773 	xattrs.get_value = _get_xattr_value;
774 	xattrs.count = 3;
775 	xattrs.ctx = &g_ctx;
776 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
777 	poll_threads();
778 	CU_ASSERT(g_bserrno == 0);
779 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
780 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
781 	snapshotid2 = g_blobid;
782 
783 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
784 	CU_ASSERT(g_bserrno == 0);
785 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
786 	snapshot2 = g_blob;
787 	CU_ASSERT(snapshot2->data_ro == true);
788 	CU_ASSERT(snapshot2->md_ro == true);
789 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
790 
791 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
792 	CU_ASSERT(snapshot->back_bs_dev == NULL);
793 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
794 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
795 
796 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
797 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
798 
799 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
800 	CU_ASSERT(blob_bs_dev->blob == snapshot);
801 
802 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
803 	CU_ASSERT(rc == 0);
804 	SPDK_CU_ASSERT_FATAL(value != NULL);
805 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
806 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
807 
808 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
809 	CU_ASSERT(rc == 0);
810 	SPDK_CU_ASSERT_FATAL(value != NULL);
811 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
812 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
813 
814 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
815 	CU_ASSERT(rc == 0);
816 	SPDK_CU_ASSERT_FATAL(value != NULL);
817 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
818 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
819 
820 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
821 	count = 2;
822 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
823 	CU_ASSERT(count == 1);
824 	CU_ASSERT(ids[0] == blobid);
825 
826 	count = 2;
827 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
828 	CU_ASSERT(count == 1);
829 	CU_ASSERT(ids[0] == snapshotid2);
830 
831 	/* Try to create snapshot from snapshot */
832 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
833 	poll_threads();
834 	CU_ASSERT(g_bserrno == -EINVAL);
835 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
836 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
837 
838 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
839 	ut_blob_close_and_delete(bs, blob);
840 	count = 2;
841 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
842 	CU_ASSERT(count == 0);
843 
844 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
845 	ut_blob_close_and_delete(bs, snapshot2);
846 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
847 	count = 2;
848 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
849 	CU_ASSERT(count == 0);
850 
851 	ut_blob_close_and_delete(bs, snapshot);
852 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
853 }
854 
855 static void
856 blob_snapshot_freeze_io(void)
857 {
858 	struct spdk_io_channel *channel;
859 	struct spdk_bs_channel *bs_channel;
860 	struct spdk_blob_store *bs = g_bs;
861 	struct spdk_blob *blob;
862 	struct spdk_blob_opts opts;
863 	spdk_blob_id blobid;
864 	uint32_t num_of_pages = 10;
865 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
866 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
867 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
868 
869 	memset(payload_write, 0xE5, sizeof(payload_write));
870 	memset(payload_read, 0x00, sizeof(payload_read));
871 	memset(payload_zero, 0x00, sizeof(payload_zero));
872 
873 	/* Test freeze I/O during snapshot */
874 	channel = spdk_bs_alloc_io_channel(bs);
875 	bs_channel = spdk_io_channel_get_ctx(channel);
876 
877 	/* Create blob with 10 clusters */
878 	ut_spdk_blob_opts_init(&opts);
879 	opts.num_clusters = 10;
880 	opts.thin_provision = false;
881 
882 	blob = ut_blob_create_and_open(bs, &opts);
883 	blobid = spdk_blob_get_id(blob);
884 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
885 
886 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
887 
888 	/* This is implementation specific.
889 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
890 	 * Four async I/O operations happen before that. */
891 	poll_thread_times(0, 5);
892 
893 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
894 
895 	/* Blob I/O should be frozen here */
896 	CU_ASSERT(blob->frozen_refcnt == 1);
897 
898 	/* Write to the blob */
899 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
900 
901 	/* Verify that I/O is queued */
902 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
903 	/* Verify that payload is not written to disk, at this point the blobs already switched */
904 	CU_ASSERT(blob->active.clusters[0] == 0);
905 
906 	/* Finish all operations including spdk_bs_create_snapshot */
907 	poll_threads();
908 
909 	/* Verify snapshot */
910 	CU_ASSERT(g_bserrno == 0);
911 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
912 
913 	/* Verify that blob has unset frozen_io */
914 	CU_ASSERT(blob->frozen_refcnt == 0);
915 
916 	/* Verify that postponed I/O completed successfully by comparing payload */
917 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
918 	poll_threads();
919 	CU_ASSERT(g_bserrno == 0);
920 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
921 
922 	spdk_bs_free_io_channel(channel);
923 	poll_threads();
924 
925 	ut_blob_close_and_delete(bs, blob);
926 }
927 
928 static void
929 blob_clone(void)
930 {
931 	struct spdk_blob_store *bs = g_bs;
932 	struct spdk_blob_opts opts;
933 	struct spdk_blob *blob, *snapshot, *clone;
934 	spdk_blob_id blobid, cloneid, snapshotid;
935 	struct spdk_blob_xattr_opts xattrs;
936 	const void *value;
937 	size_t value_len;
938 	int rc;
939 
940 	/* Create blob with 10 clusters */
941 
942 	ut_spdk_blob_opts_init(&opts);
943 	opts.num_clusters = 10;
944 
945 	blob = ut_blob_create_and_open(bs, &opts);
946 	blobid = spdk_blob_get_id(blob);
947 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
948 
949 	/* Create snapshot */
950 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
951 	poll_threads();
952 	CU_ASSERT(g_bserrno == 0);
953 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
954 	snapshotid = g_blobid;
955 
956 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
957 	poll_threads();
958 	CU_ASSERT(g_bserrno == 0);
959 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
960 	snapshot = g_blob;
961 	CU_ASSERT(snapshot->data_ro == true);
962 	CU_ASSERT(snapshot->md_ro == true);
963 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
964 
965 	spdk_blob_close(snapshot, blob_op_complete, NULL);
966 	poll_threads();
967 	CU_ASSERT(g_bserrno == 0);
968 
969 	/* Create clone from snapshot with xattrs */
970 	xattrs.names = g_xattr_names;
971 	xattrs.get_value = _get_xattr_value;
972 	xattrs.count = 3;
973 	xattrs.ctx = &g_ctx;
974 
975 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
976 	poll_threads();
977 	CU_ASSERT(g_bserrno == 0);
978 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
979 	cloneid = g_blobid;
980 
981 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
982 	poll_threads();
983 	CU_ASSERT(g_bserrno == 0);
984 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
985 	clone = g_blob;
986 	CU_ASSERT(clone->data_ro == false);
987 	CU_ASSERT(clone->md_ro == false);
988 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
989 
990 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
991 	CU_ASSERT(rc == 0);
992 	SPDK_CU_ASSERT_FATAL(value != NULL);
993 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
994 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
995 
996 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
997 	CU_ASSERT(rc == 0);
998 	SPDK_CU_ASSERT_FATAL(value != NULL);
999 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1000 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1001 
1002 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1003 	CU_ASSERT(rc == 0);
1004 	SPDK_CU_ASSERT_FATAL(value != NULL);
1005 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1006 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1007 
1008 
1009 	spdk_blob_close(clone, blob_op_complete, NULL);
1010 	poll_threads();
1011 	CU_ASSERT(g_bserrno == 0);
1012 
1013 	/* Try to create clone from not read only blob */
1014 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1015 	poll_threads();
1016 	CU_ASSERT(g_bserrno == -EINVAL);
1017 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1018 
1019 	/* Mark blob as read only */
1020 	spdk_blob_set_read_only(blob);
1021 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1022 	poll_threads();
1023 	CU_ASSERT(g_bserrno == 0);
1024 
1025 	/* Create clone from read only blob */
1026 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1027 	poll_threads();
1028 	CU_ASSERT(g_bserrno == 0);
1029 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1030 	cloneid = g_blobid;
1031 
1032 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1033 	poll_threads();
1034 	CU_ASSERT(g_bserrno == 0);
1035 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1036 	clone = g_blob;
1037 	CU_ASSERT(clone->data_ro == false);
1038 	CU_ASSERT(clone->md_ro == false);
1039 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1040 
1041 	ut_blob_close_and_delete(bs, clone);
1042 	ut_blob_close_and_delete(bs, blob);
1043 }
1044 
1045 static void
1046 _blob_inflate(bool decouple_parent)
1047 {
1048 	struct spdk_blob_store *bs = g_bs;
1049 	struct spdk_blob_opts opts;
1050 	struct spdk_blob *blob, *snapshot;
1051 	spdk_blob_id blobid, snapshotid;
1052 	struct spdk_io_channel *channel;
1053 	uint64_t free_clusters;
1054 
1055 	channel = spdk_bs_alloc_io_channel(bs);
1056 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1057 
1058 	/* Create blob with 10 clusters */
1059 
1060 	ut_spdk_blob_opts_init(&opts);
1061 	opts.num_clusters = 10;
1062 	opts.thin_provision = true;
1063 
1064 	blob = ut_blob_create_and_open(bs, &opts);
1065 	blobid = spdk_blob_get_id(blob);
1066 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1067 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1068 
1069 	/* 1) Blob with no parent */
1070 	if (decouple_parent) {
1071 		/* Decouple parent of blob with no parent (should fail) */
1072 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1073 		poll_threads();
1074 		CU_ASSERT(g_bserrno != 0);
1075 	} else {
1076 		/* Inflate of thin blob with no parent should made it thick */
1077 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1078 		poll_threads();
1079 		CU_ASSERT(g_bserrno == 0);
1080 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1081 	}
1082 
1083 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1084 	poll_threads();
1085 	CU_ASSERT(g_bserrno == 0);
1086 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1087 	snapshotid = g_blobid;
1088 
1089 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1090 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1091 
1092 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1093 	poll_threads();
1094 	CU_ASSERT(g_bserrno == 0);
1095 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1096 	snapshot = g_blob;
1097 	CU_ASSERT(snapshot->data_ro == true);
1098 	CU_ASSERT(snapshot->md_ro == true);
1099 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1100 
1101 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1102 	poll_threads();
1103 	CU_ASSERT(g_bserrno == 0);
1104 
1105 	free_clusters = spdk_bs_free_cluster_count(bs);
1106 
1107 	/* 2) Blob with parent */
1108 	if (!decouple_parent) {
1109 		/* Do full blob inflation */
1110 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1111 		poll_threads();
1112 		CU_ASSERT(g_bserrno == 0);
1113 		/* all 10 clusters should be allocated */
1114 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1115 	} else {
1116 		/* Decouple parent of blob */
1117 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1118 		poll_threads();
1119 		CU_ASSERT(g_bserrno == 0);
1120 		/* when only parent is removed, none of the clusters should be allocated */
1121 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1122 	}
1123 
1124 	/* Now, it should be possible to delete snapshot */
1125 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1126 	poll_threads();
1127 	CU_ASSERT(g_bserrno == 0);
1128 
1129 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1130 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1131 
1132 	spdk_bs_free_io_channel(channel);
1133 	poll_threads();
1134 
1135 	ut_blob_close_and_delete(bs, blob);
1136 }
1137 
1138 static void
1139 blob_inflate(void)
1140 {
1141 	_blob_inflate(false);
1142 	_blob_inflate(true);
1143 }
1144 
1145 static void
1146 blob_delete(void)
1147 {
1148 	struct spdk_blob_store *bs = g_bs;
1149 	struct spdk_blob_opts blob_opts;
1150 	spdk_blob_id blobid;
1151 
1152 	/* Create a blob and then delete it. */
1153 	ut_spdk_blob_opts_init(&blob_opts);
1154 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1155 	poll_threads();
1156 	CU_ASSERT(g_bserrno == 0);
1157 	CU_ASSERT(g_blobid > 0);
1158 	blobid = g_blobid;
1159 
1160 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1161 	poll_threads();
1162 	CU_ASSERT(g_bserrno == 0);
1163 
1164 	/* Try to open the blob */
1165 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1166 	poll_threads();
1167 	CU_ASSERT(g_bserrno == -ENOENT);
1168 }
1169 
1170 static void
1171 blob_resize_test(void)
1172 {
1173 	struct spdk_blob_store *bs = g_bs;
1174 	struct spdk_blob *blob;
1175 	uint64_t free_clusters;
1176 
1177 	free_clusters = spdk_bs_free_cluster_count(bs);
1178 
1179 	blob = ut_blob_create_and_open(bs, NULL);
1180 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1181 
1182 	/* Confirm that resize fails if blob is marked read-only. */
1183 	blob->md_ro = true;
1184 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1185 	poll_threads();
1186 	CU_ASSERT(g_bserrno == -EPERM);
1187 	blob->md_ro = false;
1188 
1189 	/* The blob started at 0 clusters. Resize it to be 5. */
1190 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1191 	poll_threads();
1192 	CU_ASSERT(g_bserrno == 0);
1193 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1194 
1195 	/* Shrink the blob to 3 clusters. This will not actually release
1196 	 * the old clusters until the blob is synced.
1197 	 */
1198 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1199 	poll_threads();
1200 	CU_ASSERT(g_bserrno == 0);
1201 	/* Verify there are still 5 clusters in use */
1202 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1203 
1204 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1205 	poll_threads();
1206 	CU_ASSERT(g_bserrno == 0);
1207 	/* Now there are only 3 clusters in use */
1208 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1209 
1210 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1211 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1212 	poll_threads();
1213 	CU_ASSERT(g_bserrno == 0);
1214 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1215 
1216 	/* Try to resize the blob to size larger than blobstore. */
1217 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1218 	poll_threads();
1219 	CU_ASSERT(g_bserrno == -ENOSPC);
1220 
1221 	ut_blob_close_and_delete(bs, blob);
1222 }
1223 
1224 static void
1225 blob_read_only(void)
1226 {
1227 	struct spdk_blob_store *bs;
1228 	struct spdk_bs_dev *dev;
1229 	struct spdk_blob *blob;
1230 	struct spdk_bs_opts opts;
1231 	spdk_blob_id blobid;
1232 	int rc;
1233 
1234 	dev = init_dev();
1235 	spdk_bs_opts_init(&opts, sizeof(opts));
1236 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1237 
1238 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1239 	poll_threads();
1240 	CU_ASSERT(g_bserrno == 0);
1241 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1242 	bs = g_bs;
1243 
1244 	blob = ut_blob_create_and_open(bs, NULL);
1245 	blobid = spdk_blob_get_id(blob);
1246 
1247 	rc = spdk_blob_set_read_only(blob);
1248 	CU_ASSERT(rc == 0);
1249 
1250 	CU_ASSERT(blob->data_ro == false);
1251 	CU_ASSERT(blob->md_ro == false);
1252 
1253 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1254 	poll_threads();
1255 
1256 	CU_ASSERT(blob->data_ro == true);
1257 	CU_ASSERT(blob->md_ro == true);
1258 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1259 
1260 	spdk_blob_close(blob, blob_op_complete, NULL);
1261 	poll_threads();
1262 	CU_ASSERT(g_bserrno == 0);
1263 
1264 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1265 	poll_threads();
1266 	CU_ASSERT(g_bserrno == 0);
1267 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1268 	blob = g_blob;
1269 
1270 	CU_ASSERT(blob->data_ro == true);
1271 	CU_ASSERT(blob->md_ro == true);
1272 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1273 
1274 	spdk_blob_close(blob, blob_op_complete, NULL);
1275 	poll_threads();
1276 	CU_ASSERT(g_bserrno == 0);
1277 
1278 	ut_bs_reload(&bs, &opts);
1279 
1280 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1281 	poll_threads();
1282 	CU_ASSERT(g_bserrno == 0);
1283 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1284 	blob = g_blob;
1285 
1286 	CU_ASSERT(blob->data_ro == true);
1287 	CU_ASSERT(blob->md_ro == true);
1288 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1289 
1290 	ut_blob_close_and_delete(bs, blob);
1291 
1292 	spdk_bs_unload(bs, bs_op_complete, NULL);
1293 	poll_threads();
1294 	CU_ASSERT(g_bserrno == 0);
1295 }
1296 
1297 static void
1298 channel_ops(void)
1299 {
1300 	struct spdk_blob_store *bs = g_bs;
1301 	struct spdk_io_channel *channel;
1302 
1303 	channel = spdk_bs_alloc_io_channel(bs);
1304 	CU_ASSERT(channel != NULL);
1305 
1306 	spdk_bs_free_io_channel(channel);
1307 	poll_threads();
1308 }
1309 
1310 static void
1311 blob_write(void)
1312 {
1313 	struct spdk_blob_store *bs = g_bs;
1314 	struct spdk_blob *blob = g_blob;
1315 	struct spdk_io_channel *channel;
1316 	uint64_t pages_per_cluster;
1317 	uint8_t payload[10 * 4096];
1318 
1319 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1320 
1321 	channel = spdk_bs_alloc_io_channel(bs);
1322 	CU_ASSERT(channel != NULL);
1323 
1324 	/* Write to a blob with 0 size */
1325 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1326 	poll_threads();
1327 	CU_ASSERT(g_bserrno == -EINVAL);
1328 
1329 	/* Resize the blob */
1330 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1331 	poll_threads();
1332 	CU_ASSERT(g_bserrno == 0);
1333 
1334 	/* Confirm that write fails if blob is marked read-only. */
1335 	blob->data_ro = true;
1336 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1337 	poll_threads();
1338 	CU_ASSERT(g_bserrno == -EPERM);
1339 	blob->data_ro = false;
1340 
1341 	/* Write to the blob */
1342 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1343 	poll_threads();
1344 	CU_ASSERT(g_bserrno == 0);
1345 
1346 	/* Write starting beyond the end */
1347 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1348 			   NULL);
1349 	poll_threads();
1350 	CU_ASSERT(g_bserrno == -EINVAL);
1351 
1352 	/* Write starting at a valid location but going off the end */
1353 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1354 			   blob_op_complete, NULL);
1355 	poll_threads();
1356 	CU_ASSERT(g_bserrno == -EINVAL);
1357 
1358 	spdk_bs_free_io_channel(channel);
1359 	poll_threads();
1360 }
1361 
1362 static void
1363 blob_read(void)
1364 {
1365 	struct spdk_blob_store *bs = g_bs;
1366 	struct spdk_blob *blob = g_blob;
1367 	struct spdk_io_channel *channel;
1368 	uint64_t pages_per_cluster;
1369 	uint8_t payload[10 * 4096];
1370 
1371 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1372 
1373 	channel = spdk_bs_alloc_io_channel(bs);
1374 	CU_ASSERT(channel != NULL);
1375 
1376 	/* Read from a blob with 0 size */
1377 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1378 	poll_threads();
1379 	CU_ASSERT(g_bserrno == -EINVAL);
1380 
1381 	/* Resize the blob */
1382 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1383 	poll_threads();
1384 	CU_ASSERT(g_bserrno == 0);
1385 
1386 	/* Confirm that read passes if blob is marked read-only. */
1387 	blob->data_ro = true;
1388 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1389 	poll_threads();
1390 	CU_ASSERT(g_bserrno == 0);
1391 	blob->data_ro = false;
1392 
1393 	/* Read from the blob */
1394 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1395 	poll_threads();
1396 	CU_ASSERT(g_bserrno == 0);
1397 
1398 	/* Read starting beyond the end */
1399 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1400 			  NULL);
1401 	poll_threads();
1402 	CU_ASSERT(g_bserrno == -EINVAL);
1403 
1404 	/* Read starting at a valid location but going off the end */
1405 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1406 			  blob_op_complete, NULL);
1407 	poll_threads();
1408 	CU_ASSERT(g_bserrno == -EINVAL);
1409 
1410 	spdk_bs_free_io_channel(channel);
1411 	poll_threads();
1412 }
1413 
1414 static void
1415 blob_rw_verify(void)
1416 {
1417 	struct spdk_blob_store *bs = g_bs;
1418 	struct spdk_blob *blob = g_blob;
1419 	struct spdk_io_channel *channel;
1420 	uint8_t payload_read[10 * 4096];
1421 	uint8_t payload_write[10 * 4096];
1422 
1423 	channel = spdk_bs_alloc_io_channel(bs);
1424 	CU_ASSERT(channel != NULL);
1425 
1426 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1427 	poll_threads();
1428 	CU_ASSERT(g_bserrno == 0);
1429 
1430 	memset(payload_write, 0xE5, sizeof(payload_write));
1431 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1432 	poll_threads();
1433 	CU_ASSERT(g_bserrno == 0);
1434 
1435 	memset(payload_read, 0x00, sizeof(payload_read));
1436 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1437 	poll_threads();
1438 	CU_ASSERT(g_bserrno == 0);
1439 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1440 
1441 	spdk_bs_free_io_channel(channel);
1442 	poll_threads();
1443 }
1444 
1445 static void
1446 blob_rw_verify_iov(void)
1447 {
1448 	struct spdk_blob_store *bs = g_bs;
1449 	struct spdk_blob *blob;
1450 	struct spdk_io_channel *channel;
1451 	uint8_t payload_read[10 * 4096];
1452 	uint8_t payload_write[10 * 4096];
1453 	struct iovec iov_read[3];
1454 	struct iovec iov_write[3];
1455 	void *buf;
1456 
1457 	channel = spdk_bs_alloc_io_channel(bs);
1458 	CU_ASSERT(channel != NULL);
1459 
1460 	blob = ut_blob_create_and_open(bs, NULL);
1461 
1462 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1463 	poll_threads();
1464 	CU_ASSERT(g_bserrno == 0);
1465 
1466 	/*
1467 	 * Manually adjust the offset of the blob's second cluster.  This allows
1468 	 *  us to make sure that the readv/write code correctly accounts for I/O
1469 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1470 	 *  clusters are where we expect before modifying the second cluster.
1471 	 */
1472 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1473 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1474 	blob->active.clusters[1] = 3 * 256;
1475 
1476 	memset(payload_write, 0xE5, sizeof(payload_write));
1477 	iov_write[0].iov_base = payload_write;
1478 	iov_write[0].iov_len = 1 * 4096;
1479 	iov_write[1].iov_base = payload_write + 1 * 4096;
1480 	iov_write[1].iov_len = 5 * 4096;
1481 	iov_write[2].iov_base = payload_write + 6 * 4096;
1482 	iov_write[2].iov_len = 4 * 4096;
1483 	/*
1484 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1485 	 *  will get written to the first cluster, the last 4 to the second cluster.
1486 	 */
1487 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1488 	poll_threads();
1489 	CU_ASSERT(g_bserrno == 0);
1490 
1491 	memset(payload_read, 0xAA, sizeof(payload_read));
1492 	iov_read[0].iov_base = payload_read;
1493 	iov_read[0].iov_len = 3 * 4096;
1494 	iov_read[1].iov_base = payload_read + 3 * 4096;
1495 	iov_read[1].iov_len = 4 * 4096;
1496 	iov_read[2].iov_base = payload_read + 7 * 4096;
1497 	iov_read[2].iov_len = 3 * 4096;
1498 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1499 	poll_threads();
1500 	CU_ASSERT(g_bserrno == 0);
1501 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1502 
1503 	buf = calloc(1, 256 * 4096);
1504 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1505 	/* Check that cluster 2 on "disk" was not modified. */
1506 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1507 	free(buf);
1508 
1509 	spdk_blob_close(blob, blob_op_complete, NULL);
1510 	poll_threads();
1511 	CU_ASSERT(g_bserrno == 0);
1512 
1513 	spdk_bs_free_io_channel(channel);
1514 	poll_threads();
1515 }
1516 
1517 static uint32_t
1518 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1519 {
1520 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1521 	struct spdk_bs_request_set *set;
1522 	uint32_t count = 0;
1523 
1524 	TAILQ_FOREACH(set, &channel->reqs, link) {
1525 		count++;
1526 	}
1527 
1528 	return count;
1529 }
1530 
1531 static void
1532 blob_rw_verify_iov_nomem(void)
1533 {
1534 	struct spdk_blob_store *bs = g_bs;
1535 	struct spdk_blob *blob = g_blob;
1536 	struct spdk_io_channel *channel;
1537 	uint8_t payload_write[10 * 4096];
1538 	struct iovec iov_write[3];
1539 	uint32_t req_count;
1540 
1541 	channel = spdk_bs_alloc_io_channel(bs);
1542 	CU_ASSERT(channel != NULL);
1543 
1544 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1545 	poll_threads();
1546 	CU_ASSERT(g_bserrno == 0);
1547 
1548 	/*
1549 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1550 	 *  will get written to the first cluster, the last 4 to the second cluster.
1551 	 */
1552 	iov_write[0].iov_base = payload_write;
1553 	iov_write[0].iov_len = 1 * 4096;
1554 	iov_write[1].iov_base = payload_write + 1 * 4096;
1555 	iov_write[1].iov_len = 5 * 4096;
1556 	iov_write[2].iov_base = payload_write + 6 * 4096;
1557 	iov_write[2].iov_len = 4 * 4096;
1558 	MOCK_SET(calloc, NULL);
1559 	req_count = bs_channel_get_req_count(channel);
1560 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1561 	poll_threads();
1562 	CU_ASSERT(g_bserrno = -ENOMEM);
1563 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1564 	MOCK_CLEAR(calloc);
1565 
1566 	spdk_bs_free_io_channel(channel);
1567 	poll_threads();
1568 }
1569 
1570 static void
1571 blob_rw_iov_read_only(void)
1572 {
1573 	struct spdk_blob_store *bs = g_bs;
1574 	struct spdk_blob *blob = g_blob;
1575 	struct spdk_io_channel *channel;
1576 	uint8_t payload_read[4096];
1577 	uint8_t payload_write[4096];
1578 	struct iovec iov_read;
1579 	struct iovec iov_write;
1580 
1581 	channel = spdk_bs_alloc_io_channel(bs);
1582 	CU_ASSERT(channel != NULL);
1583 
1584 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1585 	poll_threads();
1586 	CU_ASSERT(g_bserrno == 0);
1587 
1588 	/* Verify that writev failed if read_only flag is set. */
1589 	blob->data_ro = true;
1590 	iov_write.iov_base = payload_write;
1591 	iov_write.iov_len = sizeof(payload_write);
1592 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1593 	poll_threads();
1594 	CU_ASSERT(g_bserrno == -EPERM);
1595 
1596 	/* Verify that reads pass if data_ro flag is set. */
1597 	iov_read.iov_base = payload_read;
1598 	iov_read.iov_len = sizeof(payload_read);
1599 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1600 	poll_threads();
1601 	CU_ASSERT(g_bserrno == 0);
1602 
1603 	spdk_bs_free_io_channel(channel);
1604 	poll_threads();
1605 }
1606 
1607 static void
1608 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1609 		       uint8_t *payload, uint64_t offset, uint64_t length,
1610 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1611 {
1612 	uint64_t i;
1613 	uint8_t *buf;
1614 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1615 
1616 	/* To be sure that operation is NOT splitted, read one page at the time */
1617 	buf = payload;
1618 	for (i = 0; i < length; i++) {
1619 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1620 		poll_threads();
1621 		if (g_bserrno != 0) {
1622 			/* Pass the error code up */
1623 			break;
1624 		}
1625 		buf += page_size;
1626 	}
1627 
1628 	cb_fn(cb_arg, g_bserrno);
1629 }
1630 
1631 static void
1632 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1633 			uint8_t *payload, uint64_t offset, uint64_t length,
1634 			spdk_blob_op_complete cb_fn, void *cb_arg)
1635 {
1636 	uint64_t i;
1637 	uint8_t *buf;
1638 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1639 
1640 	/* To be sure that operation is NOT splitted, write one page at the time */
1641 	buf = payload;
1642 	for (i = 0; i < length; i++) {
1643 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1644 		poll_threads();
1645 		if (g_bserrno != 0) {
1646 			/* Pass the error code up */
1647 			break;
1648 		}
1649 		buf += page_size;
1650 	}
1651 
1652 	cb_fn(cb_arg, g_bserrno);
1653 }
1654 
1655 static void
1656 blob_operation_split_rw(void)
1657 {
1658 	struct spdk_blob_store *bs = g_bs;
1659 	struct spdk_blob *blob;
1660 	struct spdk_io_channel *channel;
1661 	struct spdk_blob_opts opts;
1662 	uint64_t cluster_size;
1663 
1664 	uint64_t payload_size;
1665 	uint8_t *payload_read;
1666 	uint8_t *payload_write;
1667 	uint8_t *payload_pattern;
1668 
1669 	uint64_t page_size;
1670 	uint64_t pages_per_cluster;
1671 	uint64_t pages_per_payload;
1672 
1673 	uint64_t i;
1674 
1675 	cluster_size = spdk_bs_get_cluster_size(bs);
1676 	page_size = spdk_bs_get_page_size(bs);
1677 	pages_per_cluster = cluster_size / page_size;
1678 	pages_per_payload = pages_per_cluster * 5;
1679 	payload_size = cluster_size * 5;
1680 
1681 	payload_read = malloc(payload_size);
1682 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1683 
1684 	payload_write = malloc(payload_size);
1685 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1686 
1687 	payload_pattern = malloc(payload_size);
1688 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1689 
1690 	/* Prepare random pattern to write */
1691 	memset(payload_pattern, 0xFF, payload_size);
1692 	for (i = 0; i < pages_per_payload; i++) {
1693 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1694 	}
1695 
1696 	channel = spdk_bs_alloc_io_channel(bs);
1697 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1698 
1699 	/* Create blob */
1700 	ut_spdk_blob_opts_init(&opts);
1701 	opts.thin_provision = false;
1702 	opts.num_clusters = 5;
1703 
1704 	blob = ut_blob_create_and_open(bs, &opts);
1705 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1706 
1707 	/* Initial read should return zeroed payload */
1708 	memset(payload_read, 0xFF, payload_size);
1709 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1710 	poll_threads();
1711 	CU_ASSERT(g_bserrno == 0);
1712 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1713 
1714 	/* Fill whole blob except last page */
1715 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1716 			   blob_op_complete, NULL);
1717 	poll_threads();
1718 	CU_ASSERT(g_bserrno == 0);
1719 
1720 	/* Write last page with a pattern */
1721 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1722 			   blob_op_complete, NULL);
1723 	poll_threads();
1724 	CU_ASSERT(g_bserrno == 0);
1725 
1726 	/* Read whole blob and check consistency */
1727 	memset(payload_read, 0xFF, payload_size);
1728 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1729 	poll_threads();
1730 	CU_ASSERT(g_bserrno == 0);
1731 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1732 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1733 
1734 	/* Fill whole blob except first page */
1735 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1736 			   blob_op_complete, NULL);
1737 	poll_threads();
1738 	CU_ASSERT(g_bserrno == 0);
1739 
1740 	/* Write first page with a pattern */
1741 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1742 			   blob_op_complete, NULL);
1743 	poll_threads();
1744 	CU_ASSERT(g_bserrno == 0);
1745 
1746 	/* Read whole blob and check consistency */
1747 	memset(payload_read, 0xFF, payload_size);
1748 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1749 	poll_threads();
1750 	CU_ASSERT(g_bserrno == 0);
1751 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1752 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1753 
1754 
1755 	/* Fill whole blob with a pattern (5 clusters) */
1756 
1757 	/* 1. Read test. */
1758 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1759 				blob_op_complete, NULL);
1760 	poll_threads();
1761 	CU_ASSERT(g_bserrno == 0);
1762 
1763 	memset(payload_read, 0xFF, payload_size);
1764 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1765 	poll_threads();
1766 	poll_threads();
1767 	CU_ASSERT(g_bserrno == 0);
1768 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1769 
1770 	/* 2. Write test. */
1771 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1772 			   blob_op_complete, NULL);
1773 	poll_threads();
1774 	CU_ASSERT(g_bserrno == 0);
1775 
1776 	memset(payload_read, 0xFF, payload_size);
1777 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1778 	poll_threads();
1779 	CU_ASSERT(g_bserrno == 0);
1780 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1781 
1782 	spdk_bs_free_io_channel(channel);
1783 	poll_threads();
1784 
1785 	g_blob = NULL;
1786 	g_blobid = 0;
1787 
1788 	free(payload_read);
1789 	free(payload_write);
1790 	free(payload_pattern);
1791 
1792 	ut_blob_close_and_delete(bs, blob);
1793 }
1794 
1795 static void
1796 blob_operation_split_rw_iov(void)
1797 {
1798 	struct spdk_blob_store *bs = g_bs;
1799 	struct spdk_blob *blob;
1800 	struct spdk_io_channel *channel;
1801 	struct spdk_blob_opts opts;
1802 	uint64_t cluster_size;
1803 
1804 	uint64_t payload_size;
1805 	uint8_t *payload_read;
1806 	uint8_t *payload_write;
1807 	uint8_t *payload_pattern;
1808 
1809 	uint64_t page_size;
1810 	uint64_t pages_per_cluster;
1811 	uint64_t pages_per_payload;
1812 
1813 	struct iovec iov_read[2];
1814 	struct iovec iov_write[2];
1815 
1816 	uint64_t i, j;
1817 
1818 	cluster_size = spdk_bs_get_cluster_size(bs);
1819 	page_size = spdk_bs_get_page_size(bs);
1820 	pages_per_cluster = cluster_size / page_size;
1821 	pages_per_payload = pages_per_cluster * 5;
1822 	payload_size = cluster_size * 5;
1823 
1824 	payload_read = malloc(payload_size);
1825 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1826 
1827 	payload_write = malloc(payload_size);
1828 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1829 
1830 	payload_pattern = malloc(payload_size);
1831 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1832 
1833 	/* Prepare random pattern to write */
1834 	for (i = 0; i < pages_per_payload; i++) {
1835 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1836 			uint64_t *tmp;
1837 
1838 			tmp = (uint64_t *)payload_pattern;
1839 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1840 			*tmp = i + 1;
1841 		}
1842 	}
1843 
1844 	channel = spdk_bs_alloc_io_channel(bs);
1845 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1846 
1847 	/* Create blob */
1848 	ut_spdk_blob_opts_init(&opts);
1849 	opts.thin_provision = false;
1850 	opts.num_clusters = 5;
1851 
1852 	blob = ut_blob_create_and_open(bs, &opts);
1853 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1854 
1855 	/* Initial read should return zeroes payload */
1856 	memset(payload_read, 0xFF, payload_size);
1857 	iov_read[0].iov_base = payload_read;
1858 	iov_read[0].iov_len = cluster_size * 3;
1859 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1860 	iov_read[1].iov_len = cluster_size * 2;
1861 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1862 	poll_threads();
1863 	CU_ASSERT(g_bserrno == 0);
1864 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1865 
1866 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1867 	 *  with a pattern. */
1868 	iov_write[0].iov_base = payload_pattern;
1869 	iov_write[0].iov_len = payload_size - page_size;
1870 	iov_write[1].iov_base = payload_pattern;
1871 	iov_write[1].iov_len = page_size;
1872 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1873 	poll_threads();
1874 	CU_ASSERT(g_bserrno == 0);
1875 
1876 	/* Read whole blob and check consistency */
1877 	memset(payload_read, 0xFF, payload_size);
1878 	iov_read[0].iov_base = payload_read;
1879 	iov_read[0].iov_len = cluster_size * 2;
1880 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1881 	iov_read[1].iov_len = cluster_size * 3;
1882 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1883 	poll_threads();
1884 	CU_ASSERT(g_bserrno == 0);
1885 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1886 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1887 
1888 	/* First of iovs fills only first page and second of iovs writes whole blob except
1889 	 *  first page with a pattern. */
1890 	iov_write[0].iov_base = payload_pattern;
1891 	iov_write[0].iov_len = page_size;
1892 	iov_write[1].iov_base = payload_pattern;
1893 	iov_write[1].iov_len = payload_size - page_size;
1894 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1895 	poll_threads();
1896 	CU_ASSERT(g_bserrno == 0);
1897 
1898 	/* Read whole blob and check consistency */
1899 	memset(payload_read, 0xFF, payload_size);
1900 	iov_read[0].iov_base = payload_read;
1901 	iov_read[0].iov_len = cluster_size * 4;
1902 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1903 	iov_read[1].iov_len = cluster_size;
1904 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1905 	poll_threads();
1906 	CU_ASSERT(g_bserrno == 0);
1907 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1908 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1909 
1910 
1911 	/* Fill whole blob with a pattern (5 clusters) */
1912 
1913 	/* 1. Read test. */
1914 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1915 				blob_op_complete, NULL);
1916 	poll_threads();
1917 	CU_ASSERT(g_bserrno == 0);
1918 
1919 	memset(payload_read, 0xFF, payload_size);
1920 	iov_read[0].iov_base = payload_read;
1921 	iov_read[0].iov_len = cluster_size;
1922 	iov_read[1].iov_base = payload_read + cluster_size;
1923 	iov_read[1].iov_len = cluster_size * 4;
1924 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1925 	poll_threads();
1926 	CU_ASSERT(g_bserrno == 0);
1927 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1928 
1929 	/* 2. Write test. */
1930 	iov_write[0].iov_base = payload_read;
1931 	iov_write[0].iov_len = cluster_size * 2;
1932 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1933 	iov_write[1].iov_len = cluster_size * 3;
1934 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1935 	poll_threads();
1936 	CU_ASSERT(g_bserrno == 0);
1937 
1938 	memset(payload_read, 0xFF, payload_size);
1939 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1940 	poll_threads();
1941 	CU_ASSERT(g_bserrno == 0);
1942 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1943 
1944 	spdk_bs_free_io_channel(channel);
1945 	poll_threads();
1946 
1947 	g_blob = NULL;
1948 	g_blobid = 0;
1949 
1950 	free(payload_read);
1951 	free(payload_write);
1952 	free(payload_pattern);
1953 
1954 	ut_blob_close_and_delete(bs, blob);
1955 }
1956 
1957 static void
1958 blob_unmap(void)
1959 {
1960 	struct spdk_blob_store *bs = g_bs;
1961 	struct spdk_blob *blob;
1962 	struct spdk_io_channel *channel;
1963 	struct spdk_blob_opts opts;
1964 	uint8_t payload[4096];
1965 	int i;
1966 
1967 	channel = spdk_bs_alloc_io_channel(bs);
1968 	CU_ASSERT(channel != NULL);
1969 
1970 	ut_spdk_blob_opts_init(&opts);
1971 	opts.num_clusters = 10;
1972 
1973 	blob = ut_blob_create_and_open(bs, &opts);
1974 
1975 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1976 	poll_threads();
1977 	CU_ASSERT(g_bserrno == 0);
1978 
1979 	memset(payload, 0, sizeof(payload));
1980 	payload[0] = 0xFF;
1981 
1982 	/*
1983 	 * Set first byte of every cluster to 0xFF.
1984 	 * First cluster on device is reserved so let's start from cluster number 1
1985 	 */
1986 	for (i = 1; i < 11; i++) {
1987 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1988 	}
1989 
1990 	/* Confirm writes */
1991 	for (i = 0; i < 10; i++) {
1992 		payload[0] = 0;
1993 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1994 				  blob_op_complete, NULL);
1995 		poll_threads();
1996 		CU_ASSERT(g_bserrno == 0);
1997 		CU_ASSERT(payload[0] == 0xFF);
1998 	}
1999 
2000 	/* Mark some clusters as unallocated */
2001 	blob->active.clusters[1] = 0;
2002 	blob->active.clusters[2] = 0;
2003 	blob->active.clusters[3] = 0;
2004 	blob->active.clusters[6] = 0;
2005 	blob->active.clusters[8] = 0;
2006 
2007 	/* Unmap clusters by resizing to 0 */
2008 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2009 	poll_threads();
2010 	CU_ASSERT(g_bserrno == 0);
2011 
2012 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2013 	poll_threads();
2014 	CU_ASSERT(g_bserrno == 0);
2015 
2016 	/* Confirm that only 'allocated' clusters were unmapped */
2017 	for (i = 1; i < 11; i++) {
2018 		switch (i) {
2019 		case 2:
2020 		case 3:
2021 		case 4:
2022 		case 7:
2023 		case 9:
2024 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2025 			break;
2026 		default:
2027 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2028 			break;
2029 		}
2030 	}
2031 
2032 	spdk_bs_free_io_channel(channel);
2033 	poll_threads();
2034 
2035 	ut_blob_close_and_delete(bs, blob);
2036 }
2037 
2038 static void
2039 blob_iter(void)
2040 {
2041 	struct spdk_blob_store *bs = g_bs;
2042 	struct spdk_blob *blob;
2043 	spdk_blob_id blobid;
2044 	struct spdk_blob_opts blob_opts;
2045 
2046 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2047 	poll_threads();
2048 	CU_ASSERT(g_blob == NULL);
2049 	CU_ASSERT(g_bserrno == -ENOENT);
2050 
2051 	ut_spdk_blob_opts_init(&blob_opts);
2052 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2053 	poll_threads();
2054 	CU_ASSERT(g_bserrno == 0);
2055 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2056 	blobid = g_blobid;
2057 
2058 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2059 	poll_threads();
2060 	CU_ASSERT(g_blob != NULL);
2061 	CU_ASSERT(g_bserrno == 0);
2062 	blob = g_blob;
2063 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2064 
2065 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2066 	poll_threads();
2067 	CU_ASSERT(g_blob == NULL);
2068 	CU_ASSERT(g_bserrno == -ENOENT);
2069 }
2070 
2071 static void
2072 blob_xattr(void)
2073 {
2074 	struct spdk_blob_store *bs = g_bs;
2075 	struct spdk_blob *blob = g_blob;
2076 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2077 	uint64_t length;
2078 	int rc;
2079 	const char *name1, *name2;
2080 	const void *value;
2081 	size_t value_len;
2082 	struct spdk_xattr_names *names;
2083 
2084 	/* Test that set_xattr fails if md_ro flag is set. */
2085 	blob->md_ro = true;
2086 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2087 	CU_ASSERT(rc == -EPERM);
2088 
2089 	blob->md_ro = false;
2090 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2091 	CU_ASSERT(rc == 0);
2092 
2093 	length = 2345;
2094 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2095 	CU_ASSERT(rc == 0);
2096 
2097 	/* Overwrite "length" xattr. */
2098 	length = 3456;
2099 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2100 	CU_ASSERT(rc == 0);
2101 
2102 	/* get_xattr should still work even if md_ro flag is set. */
2103 	value = NULL;
2104 	blob->md_ro = true;
2105 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2106 	CU_ASSERT(rc == 0);
2107 	SPDK_CU_ASSERT_FATAL(value != NULL);
2108 	CU_ASSERT(*(uint64_t *)value == length);
2109 	CU_ASSERT(value_len == 8);
2110 	blob->md_ro = false;
2111 
2112 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2113 	CU_ASSERT(rc == -ENOENT);
2114 
2115 	names = NULL;
2116 	rc = spdk_blob_get_xattr_names(blob, &names);
2117 	CU_ASSERT(rc == 0);
2118 	SPDK_CU_ASSERT_FATAL(names != NULL);
2119 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2120 	name1 = spdk_xattr_names_get_name(names, 0);
2121 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2122 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2123 	name2 = spdk_xattr_names_get_name(names, 1);
2124 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2125 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2126 	CU_ASSERT(strcmp(name1, name2));
2127 	spdk_xattr_names_free(names);
2128 
2129 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2130 	blob->md_ro = true;
2131 	rc = spdk_blob_remove_xattr(blob, "name");
2132 	CU_ASSERT(rc == -EPERM);
2133 
2134 	blob->md_ro = false;
2135 	rc = spdk_blob_remove_xattr(blob, "name");
2136 	CU_ASSERT(rc == 0);
2137 
2138 	rc = spdk_blob_remove_xattr(blob, "foobar");
2139 	CU_ASSERT(rc == -ENOENT);
2140 
2141 	/* Set internal xattr */
2142 	length = 7898;
2143 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2144 	CU_ASSERT(rc == 0);
2145 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2146 	CU_ASSERT(rc == 0);
2147 	CU_ASSERT(*(uint64_t *)value == length);
2148 	/* try to get public xattr with same name */
2149 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2150 	CU_ASSERT(rc != 0);
2151 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2152 	CU_ASSERT(rc != 0);
2153 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2154 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2155 		  SPDK_BLOB_INTERNAL_XATTR);
2156 
2157 	spdk_blob_close(blob, blob_op_complete, NULL);
2158 	poll_threads();
2159 
2160 	/* Check if xattrs are persisted */
2161 	ut_bs_reload(&bs, NULL);
2162 
2163 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2164 	poll_threads();
2165 	CU_ASSERT(g_bserrno == 0);
2166 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2167 	blob = g_blob;
2168 
2169 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2170 	CU_ASSERT(rc == 0);
2171 	CU_ASSERT(*(uint64_t *)value == length);
2172 
2173 	/* try to get internal xattr trough public call */
2174 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2175 	CU_ASSERT(rc != 0);
2176 
2177 	rc = blob_remove_xattr(blob, "internal", true);
2178 	CU_ASSERT(rc == 0);
2179 
2180 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2181 }
2182 
2183 static void
2184 blob_parse_md(void)
2185 {
2186 	struct spdk_blob_store *bs = g_bs;
2187 	struct spdk_blob *blob;
2188 	int rc;
2189 	uint32_t used_pages;
2190 	size_t xattr_length;
2191 	char *xattr;
2192 
2193 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2194 	blob = ut_blob_create_and_open(bs, NULL);
2195 
2196 	/* Create large extent to force more than 1 page of metadata. */
2197 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2198 		       strlen("large_xattr");
2199 	xattr = calloc(xattr_length, sizeof(char));
2200 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2201 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2202 	free(xattr);
2203 	SPDK_CU_ASSERT_FATAL(rc == 0);
2204 
2205 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2206 	poll_threads();
2207 
2208 	/* Delete the blob and verify that number of pages returned to before its creation. */
2209 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2210 	ut_blob_close_and_delete(bs, blob);
2211 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2212 }
2213 
2214 static void
2215 bs_load(void)
2216 {
2217 	struct spdk_blob_store *bs;
2218 	struct spdk_bs_dev *dev;
2219 	spdk_blob_id blobid;
2220 	struct spdk_blob *blob;
2221 	struct spdk_bs_super_block *super_block;
2222 	uint64_t length;
2223 	int rc;
2224 	const void *value;
2225 	size_t value_len;
2226 	struct spdk_bs_opts opts;
2227 	struct spdk_blob_opts blob_opts;
2228 
2229 	dev = init_dev();
2230 	spdk_bs_opts_init(&opts, sizeof(opts));
2231 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2232 
2233 	/* Initialize a new blob store */
2234 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2235 	poll_threads();
2236 	CU_ASSERT(g_bserrno == 0);
2237 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2238 	bs = g_bs;
2239 
2240 	/* Try to open a blobid that does not exist */
2241 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2242 	poll_threads();
2243 	CU_ASSERT(g_bserrno == -ENOENT);
2244 	CU_ASSERT(g_blob == NULL);
2245 
2246 	/* Create a blob */
2247 	blob = ut_blob_create_and_open(bs, NULL);
2248 	blobid = spdk_blob_get_id(blob);
2249 
2250 	/* Try again to open valid blob but without the upper bit set */
2251 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2252 	poll_threads();
2253 	CU_ASSERT(g_bserrno == -ENOENT);
2254 	CU_ASSERT(g_blob == NULL);
2255 
2256 	/* Set some xattrs */
2257 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2258 	CU_ASSERT(rc == 0);
2259 
2260 	length = 2345;
2261 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2262 	CU_ASSERT(rc == 0);
2263 
2264 	/* Resize the blob */
2265 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2266 	poll_threads();
2267 	CU_ASSERT(g_bserrno == 0);
2268 
2269 	spdk_blob_close(blob, blob_op_complete, NULL);
2270 	poll_threads();
2271 	CU_ASSERT(g_bserrno == 0);
2272 	blob = NULL;
2273 	g_blob = NULL;
2274 	g_blobid = SPDK_BLOBID_INVALID;
2275 
2276 	/* Unload the blob store */
2277 	spdk_bs_unload(bs, bs_op_complete, NULL);
2278 	poll_threads();
2279 	CU_ASSERT(g_bserrno == 0);
2280 	g_bs = NULL;
2281 	g_blob = NULL;
2282 	g_blobid = 0;
2283 
2284 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2285 	CU_ASSERT(super_block->clean == 1);
2286 
2287 	/* Load should fail for device with an unsupported blocklen */
2288 	dev = init_dev();
2289 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2290 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2291 	poll_threads();
2292 	CU_ASSERT(g_bserrno == -EINVAL);
2293 
2294 	/* Load should when max_md_ops is set to zero */
2295 	dev = init_dev();
2296 	spdk_bs_opts_init(&opts, sizeof(opts));
2297 	opts.max_md_ops = 0;
2298 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2299 	poll_threads();
2300 	CU_ASSERT(g_bserrno == -EINVAL);
2301 
2302 	/* Load should when max_channel_ops is set to zero */
2303 	dev = init_dev();
2304 	spdk_bs_opts_init(&opts, sizeof(opts));
2305 	opts.max_channel_ops = 0;
2306 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2307 	poll_threads();
2308 	CU_ASSERT(g_bserrno == -EINVAL);
2309 
2310 	/* Load an existing blob store */
2311 	dev = init_dev();
2312 	spdk_bs_opts_init(&opts, sizeof(opts));
2313 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2314 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2315 	poll_threads();
2316 	CU_ASSERT(g_bserrno == 0);
2317 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2318 	bs = g_bs;
2319 
2320 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2321 	CU_ASSERT(super_block->clean == 1);
2322 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2323 
2324 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2325 	poll_threads();
2326 	CU_ASSERT(g_bserrno == 0);
2327 	CU_ASSERT(g_blob != NULL);
2328 	blob = g_blob;
2329 
2330 	/* Verify that blobstore is marked dirty after first metadata sync */
2331 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2332 	CU_ASSERT(super_block->clean == 1);
2333 
2334 	/* Get the xattrs */
2335 	value = NULL;
2336 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2337 	CU_ASSERT(rc == 0);
2338 	SPDK_CU_ASSERT_FATAL(value != NULL);
2339 	CU_ASSERT(*(uint64_t *)value == length);
2340 	CU_ASSERT(value_len == 8);
2341 
2342 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2343 	CU_ASSERT(rc == -ENOENT);
2344 
2345 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2346 
2347 	spdk_blob_close(blob, blob_op_complete, NULL);
2348 	poll_threads();
2349 	CU_ASSERT(g_bserrno == 0);
2350 	blob = NULL;
2351 	g_blob = NULL;
2352 
2353 	spdk_bs_unload(bs, bs_op_complete, NULL);
2354 	poll_threads();
2355 	CU_ASSERT(g_bserrno == 0);
2356 	g_bs = NULL;
2357 
2358 	/* Load should fail: bdev size < saved size */
2359 	dev = init_dev();
2360 	dev->blockcnt /= 2;
2361 
2362 	spdk_bs_opts_init(&opts, sizeof(opts));
2363 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2364 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2365 	poll_threads();
2366 
2367 	CU_ASSERT(g_bserrno == -EILSEQ);
2368 
2369 	/* Load should succeed: bdev size > saved size */
2370 	dev = init_dev();
2371 	dev->blockcnt *= 4;
2372 
2373 	spdk_bs_opts_init(&opts, sizeof(opts));
2374 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2375 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2376 	poll_threads();
2377 	CU_ASSERT(g_bserrno == 0);
2378 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2379 	bs = g_bs;
2380 
2381 	CU_ASSERT(g_bserrno == 0);
2382 	spdk_bs_unload(bs, bs_op_complete, NULL);
2383 	poll_threads();
2384 
2385 
2386 	/* Test compatibility mode */
2387 
2388 	dev = init_dev();
2389 	super_block->size = 0;
2390 	super_block->crc = blob_md_page_calc_crc(super_block);
2391 
2392 	spdk_bs_opts_init(&opts, sizeof(opts));
2393 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2394 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2395 	poll_threads();
2396 	CU_ASSERT(g_bserrno == 0);
2397 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2398 	bs = g_bs;
2399 
2400 	/* Create a blob */
2401 	ut_spdk_blob_opts_init(&blob_opts);
2402 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2403 	poll_threads();
2404 	CU_ASSERT(g_bserrno == 0);
2405 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2406 
2407 	/* Blobstore should update number of blocks in super_block */
2408 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2409 	CU_ASSERT(super_block->clean == 0);
2410 
2411 	spdk_bs_unload(bs, bs_op_complete, NULL);
2412 	poll_threads();
2413 	CU_ASSERT(g_bserrno == 0);
2414 	CU_ASSERT(super_block->clean == 1);
2415 	g_bs = NULL;
2416 
2417 }
2418 
2419 static void
2420 bs_load_pending_removal(void)
2421 {
2422 	struct spdk_blob_store *bs = g_bs;
2423 	struct spdk_blob_opts opts;
2424 	struct spdk_blob *blob, *snapshot;
2425 	spdk_blob_id blobid, snapshotid;
2426 	const void *value;
2427 	size_t value_len;
2428 	int rc;
2429 
2430 	/* Create blob */
2431 	ut_spdk_blob_opts_init(&opts);
2432 	opts.num_clusters = 10;
2433 
2434 	blob = ut_blob_create_and_open(bs, &opts);
2435 	blobid = spdk_blob_get_id(blob);
2436 
2437 	/* Create snapshot */
2438 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2439 	poll_threads();
2440 	CU_ASSERT(g_bserrno == 0);
2441 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2442 	snapshotid = g_blobid;
2443 
2444 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2445 	poll_threads();
2446 	CU_ASSERT(g_bserrno == 0);
2447 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2448 	snapshot = g_blob;
2449 
2450 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2451 	snapshot->md_ro = false;
2452 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2453 	CU_ASSERT(rc == 0);
2454 	snapshot->md_ro = true;
2455 
2456 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2457 	poll_threads();
2458 	CU_ASSERT(g_bserrno == 0);
2459 
2460 	spdk_blob_close(blob, blob_op_complete, NULL);
2461 	poll_threads();
2462 	CU_ASSERT(g_bserrno == 0);
2463 
2464 	/* Reload blobstore */
2465 	ut_bs_reload(&bs, NULL);
2466 
2467 	/* Snapshot should not be removed as blob is still pointing to it */
2468 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2469 	poll_threads();
2470 	CU_ASSERT(g_bserrno == 0);
2471 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2472 	snapshot = g_blob;
2473 
2474 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2475 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2476 	CU_ASSERT(rc != 0);
2477 
2478 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2479 	snapshot->md_ro = false;
2480 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2481 	CU_ASSERT(rc == 0);
2482 	snapshot->md_ro = true;
2483 
2484 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2485 	poll_threads();
2486 	CU_ASSERT(g_bserrno == 0);
2487 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2488 	blob = g_blob;
2489 
2490 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2491 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2492 
2493 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2494 	poll_threads();
2495 	CU_ASSERT(g_bserrno == 0);
2496 
2497 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2498 	poll_threads();
2499 	CU_ASSERT(g_bserrno == 0);
2500 
2501 	spdk_blob_close(blob, blob_op_complete, NULL);
2502 	poll_threads();
2503 	CU_ASSERT(g_bserrno == 0);
2504 
2505 	/* Reload blobstore */
2506 	ut_bs_reload(&bs, NULL);
2507 
2508 	/* Snapshot should be removed as blob is not pointing to it anymore */
2509 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2510 	poll_threads();
2511 	CU_ASSERT(g_bserrno != 0);
2512 }
2513 
2514 static void
2515 bs_load_custom_cluster_size(void)
2516 {
2517 	struct spdk_blob_store *bs;
2518 	struct spdk_bs_dev *dev;
2519 	struct spdk_bs_super_block *super_block;
2520 	struct spdk_bs_opts opts;
2521 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2522 	uint32_t cluster_sz;
2523 	uint64_t total_clusters;
2524 
2525 	dev = init_dev();
2526 	spdk_bs_opts_init(&opts, sizeof(opts));
2527 	opts.cluster_sz = custom_cluster_size;
2528 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2529 
2530 	/* Initialize a new blob store */
2531 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2532 	poll_threads();
2533 	CU_ASSERT(g_bserrno == 0);
2534 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2535 	bs = g_bs;
2536 	cluster_sz = bs->cluster_sz;
2537 	total_clusters = bs->total_clusters;
2538 
2539 	/* Unload the blob store */
2540 	spdk_bs_unload(bs, bs_op_complete, NULL);
2541 	poll_threads();
2542 	CU_ASSERT(g_bserrno == 0);
2543 	g_bs = NULL;
2544 	g_blob = NULL;
2545 	g_blobid = 0;
2546 
2547 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2548 	CU_ASSERT(super_block->clean == 1);
2549 
2550 	/* Load an existing blob store */
2551 	dev = init_dev();
2552 	spdk_bs_opts_init(&opts, sizeof(opts));
2553 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2554 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2555 	poll_threads();
2556 	CU_ASSERT(g_bserrno == 0);
2557 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2558 	bs = g_bs;
2559 	/* Compare cluster size and number to one after initialization */
2560 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2561 	CU_ASSERT(total_clusters == bs->total_clusters);
2562 
2563 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2564 	CU_ASSERT(super_block->clean == 1);
2565 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2566 
2567 	spdk_bs_unload(bs, bs_op_complete, NULL);
2568 	poll_threads();
2569 	CU_ASSERT(g_bserrno == 0);
2570 	CU_ASSERT(super_block->clean == 1);
2571 	g_bs = NULL;
2572 }
2573 
2574 static void
2575 bs_type(void)
2576 {
2577 	struct spdk_blob_store *bs;
2578 	struct spdk_bs_dev *dev;
2579 	struct spdk_bs_opts opts;
2580 
2581 	dev = init_dev();
2582 	spdk_bs_opts_init(&opts, sizeof(opts));
2583 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2584 
2585 	/* Initialize a new blob store */
2586 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2587 	poll_threads();
2588 	CU_ASSERT(g_bserrno == 0);
2589 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2590 	bs = g_bs;
2591 
2592 	/* Unload the blob store */
2593 	spdk_bs_unload(bs, bs_op_complete, NULL);
2594 	poll_threads();
2595 	CU_ASSERT(g_bserrno == 0);
2596 	g_bs = NULL;
2597 	g_blob = NULL;
2598 	g_blobid = 0;
2599 
2600 	/* Load non existing blobstore type */
2601 	dev = init_dev();
2602 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2603 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2604 	poll_threads();
2605 	CU_ASSERT(g_bserrno != 0);
2606 
2607 	/* Load with empty blobstore type */
2608 	dev = init_dev();
2609 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2610 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2611 	poll_threads();
2612 	CU_ASSERT(g_bserrno == 0);
2613 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2614 	bs = g_bs;
2615 
2616 	spdk_bs_unload(bs, bs_op_complete, NULL);
2617 	poll_threads();
2618 	CU_ASSERT(g_bserrno == 0);
2619 	g_bs = NULL;
2620 
2621 	/* Initialize a new blob store with empty bstype */
2622 	dev = init_dev();
2623 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2624 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2625 	poll_threads();
2626 	CU_ASSERT(g_bserrno == 0);
2627 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2628 	bs = g_bs;
2629 
2630 	spdk_bs_unload(bs, bs_op_complete, NULL);
2631 	poll_threads();
2632 	CU_ASSERT(g_bserrno == 0);
2633 	g_bs = NULL;
2634 
2635 	/* Load non existing blobstore type */
2636 	dev = init_dev();
2637 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2638 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2639 	poll_threads();
2640 	CU_ASSERT(g_bserrno != 0);
2641 
2642 	/* Load with empty blobstore type */
2643 	dev = init_dev();
2644 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2645 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2646 	poll_threads();
2647 	CU_ASSERT(g_bserrno == 0);
2648 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2649 	bs = g_bs;
2650 
2651 	spdk_bs_unload(bs, bs_op_complete, NULL);
2652 	poll_threads();
2653 	CU_ASSERT(g_bserrno == 0);
2654 	g_bs = NULL;
2655 }
2656 
2657 static void
2658 bs_super_block(void)
2659 {
2660 	struct spdk_blob_store *bs;
2661 	struct spdk_bs_dev *dev;
2662 	struct spdk_bs_super_block *super_block;
2663 	struct spdk_bs_opts opts;
2664 	struct spdk_bs_super_block_ver1 super_block_v1;
2665 
2666 	dev = init_dev();
2667 	spdk_bs_opts_init(&opts, sizeof(opts));
2668 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2669 
2670 	/* Initialize a new blob store */
2671 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2672 	poll_threads();
2673 	CU_ASSERT(g_bserrno == 0);
2674 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2675 	bs = g_bs;
2676 
2677 	/* Unload the blob store */
2678 	spdk_bs_unload(bs, bs_op_complete, NULL);
2679 	poll_threads();
2680 	CU_ASSERT(g_bserrno == 0);
2681 	g_bs = NULL;
2682 	g_blob = NULL;
2683 	g_blobid = 0;
2684 
2685 	/* Load an existing blob store with version newer than supported */
2686 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2687 	super_block->version++;
2688 
2689 	dev = init_dev();
2690 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2691 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2692 	poll_threads();
2693 	CU_ASSERT(g_bserrno != 0);
2694 
2695 	/* Create a new blob store with super block version 1 */
2696 	dev = init_dev();
2697 	super_block_v1.version = 1;
2698 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2699 	super_block_v1.length = 0x1000;
2700 	super_block_v1.clean = 1;
2701 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2702 	super_block_v1.cluster_size = 0x100000;
2703 	super_block_v1.used_page_mask_start = 0x01;
2704 	super_block_v1.used_page_mask_len = 0x01;
2705 	super_block_v1.used_cluster_mask_start = 0x02;
2706 	super_block_v1.used_cluster_mask_len = 0x01;
2707 	super_block_v1.md_start = 0x03;
2708 	super_block_v1.md_len = 0x40;
2709 	memset(super_block_v1.reserved, 0, 4036);
2710 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2711 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2712 
2713 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2714 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2715 	poll_threads();
2716 	CU_ASSERT(g_bserrno == 0);
2717 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2718 	bs = g_bs;
2719 
2720 	spdk_bs_unload(bs, bs_op_complete, NULL);
2721 	poll_threads();
2722 	CU_ASSERT(g_bserrno == 0);
2723 	g_bs = NULL;
2724 }
2725 
2726 static void
2727 bs_test_recover_cluster_count(void)
2728 {
2729 	struct spdk_blob_store *bs;
2730 	struct spdk_bs_dev *dev;
2731 	struct spdk_bs_super_block super_block;
2732 	struct spdk_bs_opts opts;
2733 
2734 	dev = init_dev();
2735 	spdk_bs_opts_init(&opts, sizeof(opts));
2736 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2737 
2738 	super_block.version = 3;
2739 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2740 	super_block.length = 0x1000;
2741 	super_block.clean = 0;
2742 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2743 	super_block.cluster_size = 4096;
2744 	super_block.used_page_mask_start = 0x01;
2745 	super_block.used_page_mask_len = 0x01;
2746 	super_block.used_cluster_mask_start = 0x02;
2747 	super_block.used_cluster_mask_len = 0x01;
2748 	super_block.used_blobid_mask_start = 0x03;
2749 	super_block.used_blobid_mask_len = 0x01;
2750 	super_block.md_start = 0x04;
2751 	super_block.md_len = 0x40;
2752 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2753 	super_block.size = dev->blockcnt * dev->blocklen;
2754 	super_block.io_unit_size = 0x1000;
2755 	memset(super_block.reserved, 0, 4000);
2756 	super_block.crc = blob_md_page_calc_crc(&super_block);
2757 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2758 
2759 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2760 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2761 	poll_threads();
2762 	CU_ASSERT(g_bserrno == 0);
2763 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2764 	bs = g_bs;
2765 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2766 			super_block.md_len));
2767 
2768 	spdk_bs_unload(bs, bs_op_complete, NULL);
2769 	poll_threads();
2770 	CU_ASSERT(g_bserrno == 0);
2771 	g_bs = NULL;
2772 }
2773 
2774 /*
2775  * Create a blobstore and then unload it.
2776  */
2777 static void
2778 bs_unload(void)
2779 {
2780 	struct spdk_blob_store *bs = g_bs;
2781 	struct spdk_blob *blob;
2782 
2783 	/* Create a blob and open it. */
2784 	blob = ut_blob_create_and_open(bs, NULL);
2785 
2786 	/* Try to unload blobstore, should fail with open blob */
2787 	g_bserrno = -1;
2788 	spdk_bs_unload(bs, bs_op_complete, NULL);
2789 	poll_threads();
2790 	CU_ASSERT(g_bserrno == -EBUSY);
2791 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2792 
2793 	/* Close the blob, then successfully unload blobstore */
2794 	g_bserrno = -1;
2795 	spdk_blob_close(blob, blob_op_complete, NULL);
2796 	poll_threads();
2797 	CU_ASSERT(g_bserrno == 0);
2798 }
2799 
2800 /*
2801  * Create a blobstore with a cluster size different than the default, and ensure it is
2802  *  persisted.
2803  */
2804 static void
2805 bs_cluster_sz(void)
2806 {
2807 	struct spdk_blob_store *bs;
2808 	struct spdk_bs_dev *dev;
2809 	struct spdk_bs_opts opts;
2810 	uint32_t cluster_sz;
2811 
2812 	/* Set cluster size to zero */
2813 	dev = init_dev();
2814 	spdk_bs_opts_init(&opts, sizeof(opts));
2815 	opts.cluster_sz = 0;
2816 
2817 	/* Initialize a new blob store */
2818 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2819 	poll_threads();
2820 	CU_ASSERT(g_bserrno == -EINVAL);
2821 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2822 
2823 	/*
2824 	 * Set cluster size to blobstore page size,
2825 	 * to work it is required to be at least twice the blobstore page size.
2826 	 */
2827 	dev = init_dev();
2828 	spdk_bs_opts_init(&opts, sizeof(opts));
2829 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
2830 
2831 	/* Initialize a new blob store */
2832 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2833 	poll_threads();
2834 	CU_ASSERT(g_bserrno == -ENOMEM);
2835 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2836 
2837 	/*
2838 	 * Set cluster size to lower than page size,
2839 	 * to work it is required to be at least twice the blobstore page size.
2840 	 */
2841 	dev = init_dev();
2842 	spdk_bs_opts_init(&opts, sizeof(opts));
2843 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
2844 
2845 	/* Initialize a new blob store */
2846 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2847 	poll_threads();
2848 	CU_ASSERT(g_bserrno == -EINVAL);
2849 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2850 
2851 	/* Set cluster size to twice the default */
2852 	dev = init_dev();
2853 	spdk_bs_opts_init(&opts, sizeof(opts));
2854 	opts.cluster_sz *= 2;
2855 	cluster_sz = opts.cluster_sz;
2856 
2857 	/* Initialize a new blob store */
2858 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2859 	poll_threads();
2860 	CU_ASSERT(g_bserrno == 0);
2861 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2862 	bs = g_bs;
2863 
2864 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2865 
2866 	ut_bs_reload(&bs, &opts);
2867 
2868 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2869 
2870 	spdk_bs_unload(bs, bs_op_complete, NULL);
2871 	poll_threads();
2872 	CU_ASSERT(g_bserrno == 0);
2873 	g_bs = NULL;
2874 }
2875 
2876 /*
2877  * Create a blobstore, reload it and ensure total usable cluster count
2878  *  stays the same.
2879  */
2880 static void
2881 bs_usable_clusters(void)
2882 {
2883 	struct spdk_blob_store *bs = g_bs;
2884 	struct spdk_blob *blob;
2885 	uint32_t clusters;
2886 	int i;
2887 
2888 
2889 	clusters = spdk_bs_total_data_cluster_count(bs);
2890 
2891 	ut_bs_reload(&bs, NULL);
2892 
2893 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2894 
2895 	/* Create and resize blobs to make sure that useable cluster count won't change */
2896 	for (i = 0; i < 4; i++) {
2897 		g_bserrno = -1;
2898 		g_blobid = SPDK_BLOBID_INVALID;
2899 		blob = ut_blob_create_and_open(bs, NULL);
2900 
2901 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2902 		poll_threads();
2903 		CU_ASSERT(g_bserrno == 0);
2904 
2905 		g_bserrno = -1;
2906 		spdk_blob_close(blob, blob_op_complete, NULL);
2907 		poll_threads();
2908 		CU_ASSERT(g_bserrno == 0);
2909 
2910 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2911 	}
2912 
2913 	/* Reload the blob store to make sure that nothing changed */
2914 	ut_bs_reload(&bs, NULL);
2915 
2916 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2917 }
2918 
2919 /*
2920  * Test resizing of the metadata blob.  This requires creating enough blobs
2921  *  so that one cluster is not enough to fit the metadata for those blobs.
2922  *  To induce this condition to happen more quickly, we reduce the cluster
2923  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
2924  */
2925 static void
2926 bs_resize_md(void)
2927 {
2928 	struct spdk_blob_store *bs;
2929 	const int CLUSTER_PAGE_COUNT = 4;
2930 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
2931 	struct spdk_bs_dev *dev;
2932 	struct spdk_bs_opts opts;
2933 	struct spdk_blob *blob;
2934 	struct spdk_blob_opts blob_opts;
2935 	uint32_t cluster_sz;
2936 	spdk_blob_id blobids[NUM_BLOBS];
2937 	int i;
2938 
2939 
2940 	dev = init_dev();
2941 	spdk_bs_opts_init(&opts, sizeof(opts));
2942 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
2943 	cluster_sz = opts.cluster_sz;
2944 
2945 	/* Initialize a new blob store */
2946 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2947 	poll_threads();
2948 	CU_ASSERT(g_bserrno == 0);
2949 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2950 	bs = g_bs;
2951 
2952 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2953 
2954 	ut_spdk_blob_opts_init(&blob_opts);
2955 
2956 	for (i = 0; i < NUM_BLOBS; i++) {
2957 		g_bserrno = -1;
2958 		g_blobid = SPDK_BLOBID_INVALID;
2959 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2960 		poll_threads();
2961 		CU_ASSERT(g_bserrno == 0);
2962 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
2963 		blobids[i] = g_blobid;
2964 	}
2965 
2966 	ut_bs_reload(&bs, &opts);
2967 
2968 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2969 
2970 	for (i = 0; i < NUM_BLOBS; i++) {
2971 		g_bserrno = -1;
2972 		g_blob = NULL;
2973 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
2974 		poll_threads();
2975 		CU_ASSERT(g_bserrno == 0);
2976 		CU_ASSERT(g_blob !=  NULL);
2977 		blob = g_blob;
2978 		g_bserrno = -1;
2979 		spdk_blob_close(blob, blob_op_complete, NULL);
2980 		poll_threads();
2981 		CU_ASSERT(g_bserrno == 0);
2982 	}
2983 
2984 	spdk_bs_unload(bs, bs_op_complete, NULL);
2985 	poll_threads();
2986 	CU_ASSERT(g_bserrno == 0);
2987 	g_bs = NULL;
2988 }
2989 
2990 static void
2991 bs_destroy(void)
2992 {
2993 	struct spdk_blob_store *bs;
2994 	struct spdk_bs_dev *dev;
2995 
2996 	/* Initialize a new blob store */
2997 	dev = init_dev();
2998 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2999 	poll_threads();
3000 	CU_ASSERT(g_bserrno == 0);
3001 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3002 	bs = g_bs;
3003 
3004 	/* Destroy the blob store */
3005 	g_bserrno = -1;
3006 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3007 	poll_threads();
3008 	CU_ASSERT(g_bserrno == 0);
3009 
3010 	/* Loading an non-existent blob store should fail. */
3011 	g_bs = NULL;
3012 	dev = init_dev();
3013 
3014 	g_bserrno = 0;
3015 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3016 	poll_threads();
3017 	CU_ASSERT(g_bserrno != 0);
3018 }
3019 
3020 /* Try to hit all of the corner cases associated with serializing
3021  * a blob to disk
3022  */
3023 static void
3024 blob_serialize_test(void)
3025 {
3026 	struct spdk_bs_dev *dev;
3027 	struct spdk_bs_opts opts;
3028 	struct spdk_blob_store *bs;
3029 	spdk_blob_id blobid[2];
3030 	struct spdk_blob *blob[2];
3031 	uint64_t i;
3032 	char *value;
3033 	int rc;
3034 
3035 	dev = init_dev();
3036 
3037 	/* Initialize a new blobstore with very small clusters */
3038 	spdk_bs_opts_init(&opts, sizeof(opts));
3039 	opts.cluster_sz = dev->blocklen * 8;
3040 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3041 	poll_threads();
3042 	CU_ASSERT(g_bserrno == 0);
3043 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3044 	bs = g_bs;
3045 
3046 	/* Create and open two blobs */
3047 	for (i = 0; i < 2; i++) {
3048 		blob[i] = ut_blob_create_and_open(bs, NULL);
3049 		blobid[i] = spdk_blob_get_id(blob[i]);
3050 
3051 		/* Set a fairly large xattr on both blobs to eat up
3052 		 * metadata space
3053 		 */
3054 		value = calloc(dev->blocklen - 64, sizeof(char));
3055 		SPDK_CU_ASSERT_FATAL(value != NULL);
3056 		memset(value, i, dev->blocklen / 2);
3057 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3058 		CU_ASSERT(rc == 0);
3059 		free(value);
3060 	}
3061 
3062 	/* Resize the blobs, alternating 1 cluster at a time.
3063 	 * This thwarts run length encoding and will cause spill
3064 	 * over of the extents.
3065 	 */
3066 	for (i = 0; i < 6; i++) {
3067 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3068 		poll_threads();
3069 		CU_ASSERT(g_bserrno == 0);
3070 	}
3071 
3072 	for (i = 0; i < 2; i++) {
3073 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3074 		poll_threads();
3075 		CU_ASSERT(g_bserrno == 0);
3076 	}
3077 
3078 	/* Close the blobs */
3079 	for (i = 0; i < 2; i++) {
3080 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3081 		poll_threads();
3082 		CU_ASSERT(g_bserrno == 0);
3083 	}
3084 
3085 	ut_bs_reload(&bs, &opts);
3086 
3087 	for (i = 0; i < 2; i++) {
3088 		blob[i] = NULL;
3089 
3090 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3091 		poll_threads();
3092 		CU_ASSERT(g_bserrno == 0);
3093 		CU_ASSERT(g_blob != NULL);
3094 		blob[i] = g_blob;
3095 
3096 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3097 
3098 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3099 		poll_threads();
3100 		CU_ASSERT(g_bserrno == 0);
3101 	}
3102 
3103 	spdk_bs_unload(bs, bs_op_complete, NULL);
3104 	poll_threads();
3105 	CU_ASSERT(g_bserrno == 0);
3106 	g_bs = NULL;
3107 }
3108 
3109 static void
3110 blob_crc(void)
3111 {
3112 	struct spdk_blob_store *bs = g_bs;
3113 	struct spdk_blob *blob;
3114 	spdk_blob_id blobid;
3115 	uint32_t page_num;
3116 	int index;
3117 	struct spdk_blob_md_page *page;
3118 
3119 	blob = ut_blob_create_and_open(bs, NULL);
3120 	blobid = spdk_blob_get_id(blob);
3121 
3122 	spdk_blob_close(blob, blob_op_complete, NULL);
3123 	poll_threads();
3124 	CU_ASSERT(g_bserrno == 0);
3125 
3126 	page_num = bs_blobid_to_page(blobid);
3127 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3128 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3129 	page->crc = 0;
3130 
3131 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3132 	poll_threads();
3133 	CU_ASSERT(g_bserrno == -EINVAL);
3134 	CU_ASSERT(g_blob == NULL);
3135 	g_bserrno = 0;
3136 
3137 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3138 	poll_threads();
3139 	CU_ASSERT(g_bserrno == -EINVAL);
3140 }
3141 
3142 static void
3143 super_block_crc(void)
3144 {
3145 	struct spdk_blob_store *bs;
3146 	struct spdk_bs_dev *dev;
3147 	struct spdk_bs_super_block *super_block;
3148 
3149 	dev = init_dev();
3150 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3151 	poll_threads();
3152 	CU_ASSERT(g_bserrno == 0);
3153 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3154 	bs = g_bs;
3155 
3156 	spdk_bs_unload(bs, bs_op_complete, NULL);
3157 	poll_threads();
3158 	CU_ASSERT(g_bserrno == 0);
3159 	g_bs = NULL;
3160 
3161 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3162 	super_block->crc = 0;
3163 	dev = init_dev();
3164 
3165 	/* Load an existing blob store */
3166 	g_bserrno = 0;
3167 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3168 	poll_threads();
3169 	CU_ASSERT(g_bserrno == -EILSEQ);
3170 }
3171 
3172 /* For blob dirty shutdown test case we do the following sub-test cases:
3173  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3174  *   dirty shutdown and reload the blob store and verify the xattrs.
3175  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3176  *   reload the blob store and verify the clusters number.
3177  * 3 Create the second blob and then dirty shutdown, reload the blob store
3178  *   and verify the second blob.
3179  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3180  *   and verify the second blob is invalid.
3181  * 5 Create the second blob again and also create the third blob, modify the
3182  *   md of second blob which makes the md invalid, and then dirty shutdown,
3183  *   reload the blob store verify the second blob, it should invalid and also
3184  *   verify the third blob, it should correct.
3185  */
3186 static void
3187 blob_dirty_shutdown(void)
3188 {
3189 	int rc;
3190 	int index;
3191 	struct spdk_blob_store *bs = g_bs;
3192 	spdk_blob_id blobid1, blobid2, blobid3;
3193 	struct spdk_blob *blob = g_blob;
3194 	uint64_t length;
3195 	uint64_t free_clusters;
3196 	const void *value;
3197 	size_t value_len;
3198 	uint32_t page_num;
3199 	struct spdk_blob_md_page *page;
3200 	struct spdk_blob_opts blob_opts;
3201 
3202 	/* Create first blob */
3203 	blobid1 = spdk_blob_get_id(blob);
3204 
3205 	/* Set some xattrs */
3206 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3207 	CU_ASSERT(rc == 0);
3208 
3209 	length = 2345;
3210 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3211 	CU_ASSERT(rc == 0);
3212 
3213 	/* Put xattr that fits exactly single page.
3214 	 * This results in adding additional pages to MD.
3215 	 * First is flags and smaller xattr, second the large xattr,
3216 	 * third are just the extents.
3217 	 */
3218 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3219 			      strlen("large_xattr");
3220 	char *xattr = calloc(xattr_length, sizeof(char));
3221 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3222 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3223 	free(xattr);
3224 	SPDK_CU_ASSERT_FATAL(rc == 0);
3225 
3226 	/* Resize the blob */
3227 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3228 	poll_threads();
3229 	CU_ASSERT(g_bserrno == 0);
3230 
3231 	/* Set the blob as the super blob */
3232 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3233 	poll_threads();
3234 	CU_ASSERT(g_bserrno == 0);
3235 
3236 	free_clusters = spdk_bs_free_cluster_count(bs);
3237 
3238 	spdk_blob_close(blob, blob_op_complete, NULL);
3239 	poll_threads();
3240 	CU_ASSERT(g_bserrno == 0);
3241 	blob = NULL;
3242 	g_blob = NULL;
3243 	g_blobid = SPDK_BLOBID_INVALID;
3244 
3245 	ut_bs_dirty_load(&bs, NULL);
3246 
3247 	/* Get the super blob */
3248 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3249 	poll_threads();
3250 	CU_ASSERT(g_bserrno == 0);
3251 	CU_ASSERT(blobid1 == g_blobid);
3252 
3253 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3254 	poll_threads();
3255 	CU_ASSERT(g_bserrno == 0);
3256 	CU_ASSERT(g_blob != NULL);
3257 	blob = g_blob;
3258 
3259 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3260 
3261 	/* Get the xattrs */
3262 	value = NULL;
3263 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3264 	CU_ASSERT(rc == 0);
3265 	SPDK_CU_ASSERT_FATAL(value != NULL);
3266 	CU_ASSERT(*(uint64_t *)value == length);
3267 	CU_ASSERT(value_len == 8);
3268 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3269 
3270 	/* Resize the blob */
3271 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3272 	poll_threads();
3273 	CU_ASSERT(g_bserrno == 0);
3274 
3275 	free_clusters = spdk_bs_free_cluster_count(bs);
3276 
3277 	spdk_blob_close(blob, blob_op_complete, NULL);
3278 	poll_threads();
3279 	CU_ASSERT(g_bserrno == 0);
3280 	blob = NULL;
3281 	g_blob = NULL;
3282 	g_blobid = SPDK_BLOBID_INVALID;
3283 
3284 	ut_bs_dirty_load(&bs, NULL);
3285 
3286 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3287 	poll_threads();
3288 	CU_ASSERT(g_bserrno == 0);
3289 	CU_ASSERT(g_blob != NULL);
3290 	blob = g_blob;
3291 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3292 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3293 
3294 	spdk_blob_close(blob, blob_op_complete, NULL);
3295 	poll_threads();
3296 	CU_ASSERT(g_bserrno == 0);
3297 	blob = NULL;
3298 	g_blob = NULL;
3299 	g_blobid = SPDK_BLOBID_INVALID;
3300 
3301 	/* Create second blob */
3302 	blob = ut_blob_create_and_open(bs, NULL);
3303 	blobid2 = spdk_blob_get_id(blob);
3304 
3305 	/* Set some xattrs */
3306 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3307 	CU_ASSERT(rc == 0);
3308 
3309 	length = 5432;
3310 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3311 	CU_ASSERT(rc == 0);
3312 
3313 	/* Resize the blob */
3314 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3315 	poll_threads();
3316 	CU_ASSERT(g_bserrno == 0);
3317 
3318 	free_clusters = spdk_bs_free_cluster_count(bs);
3319 
3320 	spdk_blob_close(blob, blob_op_complete, NULL);
3321 	poll_threads();
3322 	CU_ASSERT(g_bserrno == 0);
3323 	blob = NULL;
3324 	g_blob = NULL;
3325 	g_blobid = SPDK_BLOBID_INVALID;
3326 
3327 	ut_bs_dirty_load(&bs, NULL);
3328 
3329 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3330 	poll_threads();
3331 	CU_ASSERT(g_bserrno == 0);
3332 	CU_ASSERT(g_blob != NULL);
3333 	blob = g_blob;
3334 
3335 	/* Get the xattrs */
3336 	value = NULL;
3337 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3338 	CU_ASSERT(rc == 0);
3339 	SPDK_CU_ASSERT_FATAL(value != NULL);
3340 	CU_ASSERT(*(uint64_t *)value == length);
3341 	CU_ASSERT(value_len == 8);
3342 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3343 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3344 
3345 	ut_blob_close_and_delete(bs, blob);
3346 
3347 	free_clusters = spdk_bs_free_cluster_count(bs);
3348 
3349 	ut_bs_dirty_load(&bs, NULL);
3350 
3351 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3352 	poll_threads();
3353 	CU_ASSERT(g_bserrno != 0);
3354 	CU_ASSERT(g_blob == NULL);
3355 
3356 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3357 	poll_threads();
3358 	CU_ASSERT(g_bserrno == 0);
3359 	CU_ASSERT(g_blob != NULL);
3360 	blob = g_blob;
3361 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3362 	spdk_blob_close(blob, blob_op_complete, NULL);
3363 	poll_threads();
3364 	CU_ASSERT(g_bserrno == 0);
3365 
3366 	ut_bs_reload(&bs, NULL);
3367 
3368 	/* Create second blob */
3369 	ut_spdk_blob_opts_init(&blob_opts);
3370 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3371 	poll_threads();
3372 	CU_ASSERT(g_bserrno == 0);
3373 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3374 	blobid2 = g_blobid;
3375 
3376 	/* Create third blob */
3377 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3378 	poll_threads();
3379 	CU_ASSERT(g_bserrno == 0);
3380 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3381 	blobid3 = g_blobid;
3382 
3383 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3384 	poll_threads();
3385 	CU_ASSERT(g_bserrno == 0);
3386 	CU_ASSERT(g_blob != NULL);
3387 	blob = g_blob;
3388 
3389 	/* Set some xattrs for second blob */
3390 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3391 	CU_ASSERT(rc == 0);
3392 
3393 	length = 5432;
3394 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3395 	CU_ASSERT(rc == 0);
3396 
3397 	spdk_blob_close(blob, blob_op_complete, NULL);
3398 	poll_threads();
3399 	CU_ASSERT(g_bserrno == 0);
3400 	blob = NULL;
3401 	g_blob = NULL;
3402 	g_blobid = SPDK_BLOBID_INVALID;
3403 
3404 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3405 	poll_threads();
3406 	CU_ASSERT(g_bserrno == 0);
3407 	CU_ASSERT(g_blob != NULL);
3408 	blob = g_blob;
3409 
3410 	/* Set some xattrs for third blob */
3411 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3412 	CU_ASSERT(rc == 0);
3413 
3414 	length = 5432;
3415 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3416 	CU_ASSERT(rc == 0);
3417 
3418 	spdk_blob_close(blob, blob_op_complete, NULL);
3419 	poll_threads();
3420 	CU_ASSERT(g_bserrno == 0);
3421 	blob = NULL;
3422 	g_blob = NULL;
3423 	g_blobid = SPDK_BLOBID_INVALID;
3424 
3425 	/* Mark second blob as invalid */
3426 	page_num = bs_blobid_to_page(blobid2);
3427 
3428 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3429 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3430 	page->sequence_num = 1;
3431 	page->crc = blob_md_page_calc_crc(page);
3432 
3433 	free_clusters = spdk_bs_free_cluster_count(bs);
3434 
3435 	ut_bs_dirty_load(&bs, NULL);
3436 
3437 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3438 	poll_threads();
3439 	CU_ASSERT(g_bserrno != 0);
3440 	CU_ASSERT(g_blob == NULL);
3441 
3442 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3443 	poll_threads();
3444 	CU_ASSERT(g_bserrno == 0);
3445 	CU_ASSERT(g_blob != NULL);
3446 	blob = g_blob;
3447 
3448 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3449 }
3450 
3451 static void
3452 blob_flags(void)
3453 {
3454 	struct spdk_blob_store *bs = g_bs;
3455 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3456 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3457 	struct spdk_blob_opts blob_opts;
3458 	int rc;
3459 
3460 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3461 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3462 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3463 
3464 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3465 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3466 
3467 	ut_spdk_blob_opts_init(&blob_opts);
3468 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3469 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3470 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3471 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3472 
3473 	/* Change the size of blob_data_ro to check if flags are serialized
3474 	 * when blob has non zero number of extents */
3475 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3476 	poll_threads();
3477 	CU_ASSERT(g_bserrno == 0);
3478 
3479 	/* Set the xattr to check if flags are serialized
3480 	 * when blob has non zero number of xattrs */
3481 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3482 	CU_ASSERT(rc == 0);
3483 
3484 	blob_invalid->invalid_flags = (1ULL << 63);
3485 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3486 	blob_data_ro->data_ro_flags = (1ULL << 62);
3487 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3488 	blob_md_ro->md_ro_flags = (1ULL << 61);
3489 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3490 
3491 	g_bserrno = -1;
3492 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3493 	poll_threads();
3494 	CU_ASSERT(g_bserrno == 0);
3495 	g_bserrno = -1;
3496 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3497 	poll_threads();
3498 	CU_ASSERT(g_bserrno == 0);
3499 	g_bserrno = -1;
3500 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3501 	poll_threads();
3502 	CU_ASSERT(g_bserrno == 0);
3503 
3504 	g_bserrno = -1;
3505 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3506 	poll_threads();
3507 	CU_ASSERT(g_bserrno == 0);
3508 	blob_invalid = NULL;
3509 	g_bserrno = -1;
3510 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3511 	poll_threads();
3512 	CU_ASSERT(g_bserrno == 0);
3513 	blob_data_ro = NULL;
3514 	g_bserrno = -1;
3515 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3516 	poll_threads();
3517 	CU_ASSERT(g_bserrno == 0);
3518 	blob_md_ro = NULL;
3519 
3520 	g_blob = NULL;
3521 	g_blobid = SPDK_BLOBID_INVALID;
3522 
3523 	ut_bs_reload(&bs, NULL);
3524 
3525 	g_blob = NULL;
3526 	g_bserrno = 0;
3527 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3528 	poll_threads();
3529 	CU_ASSERT(g_bserrno != 0);
3530 	CU_ASSERT(g_blob == NULL);
3531 
3532 	g_blob = NULL;
3533 	g_bserrno = -1;
3534 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3535 	poll_threads();
3536 	CU_ASSERT(g_bserrno == 0);
3537 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3538 	blob_data_ro = g_blob;
3539 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3540 	CU_ASSERT(blob_data_ro->data_ro == true);
3541 	CU_ASSERT(blob_data_ro->md_ro == true);
3542 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3543 
3544 	g_blob = NULL;
3545 	g_bserrno = -1;
3546 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3547 	poll_threads();
3548 	CU_ASSERT(g_bserrno == 0);
3549 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3550 	blob_md_ro = g_blob;
3551 	CU_ASSERT(blob_md_ro->data_ro == false);
3552 	CU_ASSERT(blob_md_ro->md_ro == true);
3553 
3554 	g_bserrno = -1;
3555 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3556 	poll_threads();
3557 	CU_ASSERT(g_bserrno == 0);
3558 
3559 	ut_blob_close_and_delete(bs, blob_data_ro);
3560 	ut_blob_close_and_delete(bs, blob_md_ro);
3561 }
3562 
3563 static void
3564 bs_version(void)
3565 {
3566 	struct spdk_bs_super_block *super;
3567 	struct spdk_blob_store *bs = g_bs;
3568 	struct spdk_bs_dev *dev;
3569 	struct spdk_blob *blob;
3570 	struct spdk_blob_opts blob_opts;
3571 	spdk_blob_id blobid;
3572 
3573 	/* Unload the blob store */
3574 	spdk_bs_unload(bs, bs_op_complete, NULL);
3575 	poll_threads();
3576 	CU_ASSERT(g_bserrno == 0);
3577 	g_bs = NULL;
3578 
3579 	/*
3580 	 * Change the bs version on disk.  This will allow us to
3581 	 *  test that the version does not get modified automatically
3582 	 *  when loading and unloading the blobstore.
3583 	 */
3584 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3585 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3586 	CU_ASSERT(super->clean == 1);
3587 	super->version = 2;
3588 	/*
3589 	 * Version 2 metadata does not have a used blobid mask, so clear
3590 	 *  those fields in the super block and zero the corresponding
3591 	 *  region on "disk".  We will use this to ensure blob IDs are
3592 	 *  correctly reconstructed.
3593 	 */
3594 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3595 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3596 	super->used_blobid_mask_start = 0;
3597 	super->used_blobid_mask_len = 0;
3598 	super->crc = blob_md_page_calc_crc(super);
3599 
3600 	/* Load an existing blob store */
3601 	dev = init_dev();
3602 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3603 	poll_threads();
3604 	CU_ASSERT(g_bserrno == 0);
3605 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3606 	CU_ASSERT(super->clean == 1);
3607 	bs = g_bs;
3608 
3609 	/*
3610 	 * Create a blob - just to make sure that when we unload it
3611 	 *  results in writing the super block (since metadata pages
3612 	 *  were allocated.
3613 	 */
3614 	ut_spdk_blob_opts_init(&blob_opts);
3615 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3616 	poll_threads();
3617 	CU_ASSERT(g_bserrno == 0);
3618 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3619 	blobid = g_blobid;
3620 
3621 	/* Unload the blob store */
3622 	spdk_bs_unload(bs, bs_op_complete, NULL);
3623 	poll_threads();
3624 	CU_ASSERT(g_bserrno == 0);
3625 	g_bs = NULL;
3626 	CU_ASSERT(super->version == 2);
3627 	CU_ASSERT(super->used_blobid_mask_start == 0);
3628 	CU_ASSERT(super->used_blobid_mask_len == 0);
3629 
3630 	dev = init_dev();
3631 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3632 	poll_threads();
3633 	CU_ASSERT(g_bserrno == 0);
3634 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3635 	bs = g_bs;
3636 
3637 	g_blob = NULL;
3638 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3639 	poll_threads();
3640 	CU_ASSERT(g_bserrno == 0);
3641 	CU_ASSERT(g_blob != NULL);
3642 	blob = g_blob;
3643 
3644 	ut_blob_close_and_delete(bs, blob);
3645 
3646 	CU_ASSERT(super->version == 2);
3647 	CU_ASSERT(super->used_blobid_mask_start == 0);
3648 	CU_ASSERT(super->used_blobid_mask_len == 0);
3649 }
3650 
3651 static void
3652 blob_set_xattrs_test(void)
3653 {
3654 	struct spdk_blob_store *bs = g_bs;
3655 	struct spdk_blob *blob;
3656 	struct spdk_blob_opts opts;
3657 	const void *value;
3658 	size_t value_len;
3659 	char *xattr;
3660 	size_t xattr_length;
3661 	int rc;
3662 
3663 	/* Create blob with extra attributes */
3664 	ut_spdk_blob_opts_init(&opts);
3665 
3666 	opts.xattrs.names = g_xattr_names;
3667 	opts.xattrs.get_value = _get_xattr_value;
3668 	opts.xattrs.count = 3;
3669 	opts.xattrs.ctx = &g_ctx;
3670 
3671 	blob = ut_blob_create_and_open(bs, &opts);
3672 
3673 	/* Get the xattrs */
3674 	value = NULL;
3675 
3676 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3677 	CU_ASSERT(rc == 0);
3678 	SPDK_CU_ASSERT_FATAL(value != NULL);
3679 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3680 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3681 
3682 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3683 	CU_ASSERT(rc == 0);
3684 	SPDK_CU_ASSERT_FATAL(value != NULL);
3685 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3686 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3687 
3688 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3689 	CU_ASSERT(rc == 0);
3690 	SPDK_CU_ASSERT_FATAL(value != NULL);
3691 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3692 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3693 
3694 	/* Try to get non existing attribute */
3695 
3696 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3697 	CU_ASSERT(rc == -ENOENT);
3698 
3699 	/* Try xattr exceeding maximum length of descriptor in single page */
3700 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3701 		       strlen("large_xattr") + 1;
3702 	xattr = calloc(xattr_length, sizeof(char));
3703 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3704 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3705 	free(xattr);
3706 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3707 
3708 	spdk_blob_close(blob, blob_op_complete, NULL);
3709 	poll_threads();
3710 	CU_ASSERT(g_bserrno == 0);
3711 	blob = NULL;
3712 	g_blob = NULL;
3713 	g_blobid = SPDK_BLOBID_INVALID;
3714 
3715 	/* NULL callback */
3716 	ut_spdk_blob_opts_init(&opts);
3717 	opts.xattrs.names = g_xattr_names;
3718 	opts.xattrs.get_value = NULL;
3719 	opts.xattrs.count = 1;
3720 	opts.xattrs.ctx = &g_ctx;
3721 
3722 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3723 	poll_threads();
3724 	CU_ASSERT(g_bserrno == -EINVAL);
3725 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3726 
3727 	/* NULL values */
3728 	ut_spdk_blob_opts_init(&opts);
3729 	opts.xattrs.names = g_xattr_names;
3730 	opts.xattrs.get_value = _get_xattr_value_null;
3731 	opts.xattrs.count = 1;
3732 	opts.xattrs.ctx = NULL;
3733 
3734 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3735 	poll_threads();
3736 	CU_ASSERT(g_bserrno == -EINVAL);
3737 }
3738 
3739 static void
3740 blob_thin_prov_alloc(void)
3741 {
3742 	struct spdk_blob_store *bs = g_bs;
3743 	struct spdk_blob *blob;
3744 	struct spdk_blob_opts opts;
3745 	spdk_blob_id blobid;
3746 	uint64_t free_clusters;
3747 
3748 	free_clusters = spdk_bs_free_cluster_count(bs);
3749 
3750 	/* Set blob as thin provisioned */
3751 	ut_spdk_blob_opts_init(&opts);
3752 	opts.thin_provision = true;
3753 
3754 	blob = ut_blob_create_and_open(bs, &opts);
3755 	blobid = spdk_blob_get_id(blob);
3756 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3757 
3758 	CU_ASSERT(blob->active.num_clusters == 0);
3759 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3760 
3761 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3762 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3763 	poll_threads();
3764 	CU_ASSERT(g_bserrno == 0);
3765 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3766 	CU_ASSERT(blob->active.num_clusters == 5);
3767 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3768 
3769 	/* Grow it to 1TB - still unallocated */
3770 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3771 	poll_threads();
3772 	CU_ASSERT(g_bserrno == 0);
3773 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3774 	CU_ASSERT(blob->active.num_clusters == 262144);
3775 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3776 
3777 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3778 	poll_threads();
3779 	CU_ASSERT(g_bserrno == 0);
3780 	/* Sync must not change anything */
3781 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3782 	CU_ASSERT(blob->active.num_clusters == 262144);
3783 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3784 	/* Since clusters are not allocated,
3785 	 * number of metadata pages is expected to be minimal.
3786 	 */
3787 	CU_ASSERT(blob->active.num_pages == 1);
3788 
3789 	/* Shrink the blob to 3 clusters - still unallocated */
3790 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3791 	poll_threads();
3792 	CU_ASSERT(g_bserrno == 0);
3793 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3794 	CU_ASSERT(blob->active.num_clusters == 3);
3795 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3796 
3797 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3798 	poll_threads();
3799 	CU_ASSERT(g_bserrno == 0);
3800 	/* Sync must not change anything */
3801 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3802 	CU_ASSERT(blob->active.num_clusters == 3);
3803 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3804 
3805 	spdk_blob_close(blob, blob_op_complete, NULL);
3806 	poll_threads();
3807 	CU_ASSERT(g_bserrno == 0);
3808 
3809 	ut_bs_reload(&bs, NULL);
3810 
3811 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3812 	poll_threads();
3813 	CU_ASSERT(g_bserrno == 0);
3814 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3815 	blob = g_blob;
3816 
3817 	/* Check that clusters allocation and size is still the same */
3818 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3819 	CU_ASSERT(blob->active.num_clusters == 3);
3820 
3821 	ut_blob_close_and_delete(bs, blob);
3822 }
3823 
3824 static void
3825 blob_insert_cluster_msg_test(void)
3826 {
3827 	struct spdk_blob_store *bs = g_bs;
3828 	struct spdk_blob *blob;
3829 	struct spdk_blob_opts opts;
3830 	struct spdk_blob_md_page page = {};
3831 	spdk_blob_id blobid;
3832 	uint64_t free_clusters;
3833 	uint64_t new_cluster = 0;
3834 	uint32_t cluster_num = 3;
3835 	uint32_t extent_page = 0;
3836 
3837 	free_clusters = spdk_bs_free_cluster_count(bs);
3838 
3839 	/* Set blob as thin provisioned */
3840 	ut_spdk_blob_opts_init(&opts);
3841 	opts.thin_provision = true;
3842 	opts.num_clusters = 4;
3843 
3844 	blob = ut_blob_create_and_open(bs, &opts);
3845 	blobid = spdk_blob_get_id(blob);
3846 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3847 
3848 	CU_ASSERT(blob->active.num_clusters == 4);
3849 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
3850 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3851 
3852 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
3853 	 * This is to simulate behaviour when cluster is allocated after blob creation.
3854 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
3855 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
3856 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3857 
3858 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
3859 					 blob_op_complete, NULL);
3860 	poll_threads();
3861 
3862 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3863 
3864 	spdk_blob_close(blob, blob_op_complete, NULL);
3865 	poll_threads();
3866 	CU_ASSERT(g_bserrno == 0);
3867 
3868 	ut_bs_reload(&bs, NULL);
3869 
3870 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3871 	poll_threads();
3872 	CU_ASSERT(g_bserrno == 0);
3873 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3874 	blob = g_blob;
3875 
3876 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3877 
3878 	ut_blob_close_and_delete(bs, blob);
3879 }
3880 
3881 static void
3882 blob_thin_prov_rw(void)
3883 {
3884 	static const uint8_t zero[10 * 4096] = { 0 };
3885 	struct spdk_blob_store *bs = g_bs;
3886 	struct spdk_blob *blob, *blob_id0;
3887 	struct spdk_io_channel *channel, *channel_thread1;
3888 	struct spdk_blob_opts opts;
3889 	uint64_t free_clusters;
3890 	uint64_t page_size;
3891 	uint8_t payload_read[10 * 4096];
3892 	uint8_t payload_write[10 * 4096];
3893 	uint64_t write_bytes;
3894 	uint64_t read_bytes;
3895 
3896 	free_clusters = spdk_bs_free_cluster_count(bs);
3897 	page_size = spdk_bs_get_page_size(bs);
3898 
3899 	channel = spdk_bs_alloc_io_channel(bs);
3900 	CU_ASSERT(channel != NULL);
3901 
3902 	ut_spdk_blob_opts_init(&opts);
3903 	opts.thin_provision = true;
3904 
3905 	/* Create and delete blob at md page 0, so that next md page allocation
3906 	 * for extent will use that. */
3907 	blob_id0 = ut_blob_create_and_open(bs, &opts);
3908 	blob = ut_blob_create_and_open(bs, &opts);
3909 	ut_blob_close_and_delete(bs, blob_id0);
3910 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3911 
3912 	CU_ASSERT(blob->active.num_clusters == 0);
3913 
3914 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3915 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3916 	poll_threads();
3917 	CU_ASSERT(g_bserrno == 0);
3918 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3919 	CU_ASSERT(blob->active.num_clusters == 5);
3920 
3921 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3922 	poll_threads();
3923 	CU_ASSERT(g_bserrno == 0);
3924 	/* Sync must not change anything */
3925 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3926 	CU_ASSERT(blob->active.num_clusters == 5);
3927 
3928 	/* Payload should be all zeros from unallocated clusters */
3929 	memset(payload_read, 0xFF, sizeof(payload_read));
3930 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3931 	poll_threads();
3932 	CU_ASSERT(g_bserrno == 0);
3933 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
3934 
3935 	write_bytes = g_dev_write_bytes;
3936 	read_bytes = g_dev_read_bytes;
3937 
3938 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
3939 	set_thread(1);
3940 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
3941 	CU_ASSERT(channel_thread1 != NULL);
3942 	memset(payload_write, 0xE5, sizeof(payload_write));
3943 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
3944 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3945 	/* Perform write on thread 0. That will try to allocate cluster,
3946 	 * but fail due to another thread issuing the cluster allocation first. */
3947 	set_thread(0);
3948 	memset(payload_write, 0xE5, sizeof(payload_write));
3949 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
3950 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
3951 	poll_threads();
3952 	CU_ASSERT(g_bserrno == 0);
3953 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3954 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
3955 	 * read 0 bytes */
3956 	if (g_use_extent_table) {
3957 		/* Add one more page for EXTENT_PAGE write */
3958 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
3959 	} else {
3960 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
3961 	}
3962 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
3963 
3964 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3965 	poll_threads();
3966 	CU_ASSERT(g_bserrno == 0);
3967 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
3968 
3969 	ut_blob_close_and_delete(bs, blob);
3970 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3971 
3972 	set_thread(1);
3973 	spdk_bs_free_io_channel(channel_thread1);
3974 	set_thread(0);
3975 	spdk_bs_free_io_channel(channel);
3976 	poll_threads();
3977 	g_blob = NULL;
3978 	g_blobid = 0;
3979 }
3980 
3981 static void
3982 blob_thin_prov_write_count_io(void)
3983 {
3984 	struct spdk_blob_store *bs;
3985 	struct spdk_blob *blob;
3986 	struct spdk_io_channel *ch;
3987 	struct spdk_bs_dev *dev;
3988 	struct spdk_bs_opts bs_opts;
3989 	struct spdk_blob_opts opts;
3990 	uint64_t free_clusters;
3991 	uint64_t page_size;
3992 	uint8_t payload_write[4096];
3993 	uint64_t write_bytes;
3994 	uint64_t read_bytes;
3995 	const uint32_t CLUSTER_SZ = 16384;
3996 	uint32_t pages_per_cluster;
3997 	uint32_t pages_per_extent_page;
3998 	uint32_t i;
3999 
4000 	/* Use a very small cluster size for this test.  This ensures we need multiple
4001 	 * extent pages to hold all of the clusters even for relatively small blobs like
4002 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4003 	 * buffers).
4004 	 */
4005 	dev = init_dev();
4006 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4007 	bs_opts.cluster_sz = CLUSTER_SZ;
4008 
4009 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4010 	poll_threads();
4011 	CU_ASSERT(g_bserrno == 0);
4012 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4013 	bs = g_bs;
4014 
4015 	free_clusters = spdk_bs_free_cluster_count(bs);
4016 	page_size = spdk_bs_get_page_size(bs);
4017 	pages_per_cluster = CLUSTER_SZ / page_size;
4018 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4019 
4020 	ch = spdk_bs_alloc_io_channel(bs);
4021 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4022 
4023 	ut_spdk_blob_opts_init(&opts);
4024 	opts.thin_provision = true;
4025 
4026 	blob = ut_blob_create_and_open(bs, &opts);
4027 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4028 
4029 	/* Resize the blob so that it will require 8 extent pages to hold all of
4030 	 * the clusters.
4031 	 */
4032 	g_bserrno = -1;
4033 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4034 	poll_threads();
4035 	CU_ASSERT(g_bserrno == 0);
4036 
4037 	g_bserrno = -1;
4038 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4039 	poll_threads();
4040 	CU_ASSERT(g_bserrno == 0);
4041 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4042 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4043 
4044 	memset(payload_write, 0, sizeof(payload_write));
4045 	for (i = 0; i < 8; i++) {
4046 		write_bytes = g_dev_write_bytes;
4047 		read_bytes = g_dev_read_bytes;
4048 
4049 		g_bserrno = -1;
4050 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4051 		poll_threads();
4052 		CU_ASSERT(g_bserrno == 0);
4053 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4054 
4055 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4056 		if (!g_use_extent_table) {
4057 			/* For legacy metadata, we should have written two pages - one for the
4058 			 * write I/O itself, another for the blob's primary metadata.
4059 			 */
4060 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4061 		} else {
4062 			/* For extent table metadata, we should have written three pages - one
4063 			 * for the write I/O, one for the extent page, one for the blob's primary
4064 			 * metadata.
4065 			 */
4066 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4067 		}
4068 
4069 		/* The write should have synced the metadata already.  Do another sync here
4070 		 * just to confirm.
4071 		 */
4072 		write_bytes = g_dev_write_bytes;
4073 		read_bytes = g_dev_read_bytes;
4074 
4075 		g_bserrno = -1;
4076 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4077 		poll_threads();
4078 		CU_ASSERT(g_bserrno == 0);
4079 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4080 
4081 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4082 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4083 
4084 		/* Now write to another unallocated cluster that is part of the same extent page. */
4085 		g_bserrno = -1;
4086 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4087 				   1, blob_op_complete, NULL);
4088 		poll_threads();
4089 		CU_ASSERT(g_bserrno == 0);
4090 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4091 
4092 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4093 		/*
4094 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4095 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4096 		 */
4097 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4098 	}
4099 
4100 	ut_blob_close_and_delete(bs, blob);
4101 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4102 
4103 	spdk_bs_free_io_channel(ch);
4104 	poll_threads();
4105 	g_blob = NULL;
4106 	g_blobid = 0;
4107 
4108 	spdk_bs_unload(bs, bs_op_complete, NULL);
4109 	poll_threads();
4110 	CU_ASSERT(g_bserrno == 0);
4111 	g_bs = NULL;
4112 }
4113 
4114 static void
4115 blob_thin_prov_rle(void)
4116 {
4117 	static const uint8_t zero[10 * 4096] = { 0 };
4118 	struct spdk_blob_store *bs = g_bs;
4119 	struct spdk_blob *blob;
4120 	struct spdk_io_channel *channel;
4121 	struct spdk_blob_opts opts;
4122 	spdk_blob_id blobid;
4123 	uint64_t free_clusters;
4124 	uint64_t page_size;
4125 	uint8_t payload_read[10 * 4096];
4126 	uint8_t payload_write[10 * 4096];
4127 	uint64_t write_bytes;
4128 	uint64_t read_bytes;
4129 	uint64_t io_unit;
4130 
4131 	free_clusters = spdk_bs_free_cluster_count(bs);
4132 	page_size = spdk_bs_get_page_size(bs);
4133 
4134 	ut_spdk_blob_opts_init(&opts);
4135 	opts.thin_provision = true;
4136 	opts.num_clusters = 5;
4137 
4138 	blob = ut_blob_create_and_open(bs, &opts);
4139 	blobid = spdk_blob_get_id(blob);
4140 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4141 
4142 	channel = spdk_bs_alloc_io_channel(bs);
4143 	CU_ASSERT(channel != NULL);
4144 
4145 	/* Target specifically second cluster in a blob as first allocation */
4146 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4147 
4148 	/* Payload should be all zeros from unallocated clusters */
4149 	memset(payload_read, 0xFF, sizeof(payload_read));
4150 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4151 	poll_threads();
4152 	CU_ASSERT(g_bserrno == 0);
4153 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4154 
4155 	write_bytes = g_dev_write_bytes;
4156 	read_bytes = g_dev_read_bytes;
4157 
4158 	/* Issue write to second cluster in a blob */
4159 	memset(payload_write, 0xE5, sizeof(payload_write));
4160 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4161 	poll_threads();
4162 	CU_ASSERT(g_bserrno == 0);
4163 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4164 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4165 	 * read 0 bytes */
4166 	if (g_use_extent_table) {
4167 		/* Add one more page for EXTENT_PAGE write */
4168 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4169 	} else {
4170 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4171 	}
4172 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4173 
4174 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4175 	poll_threads();
4176 	CU_ASSERT(g_bserrno == 0);
4177 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4178 
4179 	spdk_bs_free_io_channel(channel);
4180 	poll_threads();
4181 
4182 	spdk_blob_close(blob, blob_op_complete, NULL);
4183 	poll_threads();
4184 	CU_ASSERT(g_bserrno == 0);
4185 
4186 	ut_bs_reload(&bs, NULL);
4187 
4188 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4189 	poll_threads();
4190 	CU_ASSERT(g_bserrno == 0);
4191 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4192 	blob = g_blob;
4193 
4194 	channel = spdk_bs_alloc_io_channel(bs);
4195 	CU_ASSERT(channel != NULL);
4196 
4197 	/* Read second cluster after blob reload to confirm data written */
4198 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4199 	poll_threads();
4200 	CU_ASSERT(g_bserrno == 0);
4201 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4202 
4203 	spdk_bs_free_io_channel(channel);
4204 	poll_threads();
4205 
4206 	ut_blob_close_and_delete(bs, blob);
4207 }
4208 
4209 static void
4210 blob_thin_prov_rw_iov(void)
4211 {
4212 	static const uint8_t zero[10 * 4096] = { 0 };
4213 	struct spdk_blob_store *bs = g_bs;
4214 	struct spdk_blob *blob;
4215 	struct spdk_io_channel *channel;
4216 	struct spdk_blob_opts opts;
4217 	uint64_t free_clusters;
4218 	uint8_t payload_read[10 * 4096];
4219 	uint8_t payload_write[10 * 4096];
4220 	struct iovec iov_read[3];
4221 	struct iovec iov_write[3];
4222 
4223 	free_clusters = spdk_bs_free_cluster_count(bs);
4224 
4225 	channel = spdk_bs_alloc_io_channel(bs);
4226 	CU_ASSERT(channel != NULL);
4227 
4228 	ut_spdk_blob_opts_init(&opts);
4229 	opts.thin_provision = true;
4230 
4231 	blob = ut_blob_create_and_open(bs, &opts);
4232 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4233 
4234 	CU_ASSERT(blob->active.num_clusters == 0);
4235 
4236 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4237 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4238 	poll_threads();
4239 	CU_ASSERT(g_bserrno == 0);
4240 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4241 	CU_ASSERT(blob->active.num_clusters == 5);
4242 
4243 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4244 	poll_threads();
4245 	CU_ASSERT(g_bserrno == 0);
4246 	/* Sync must not change anything */
4247 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4248 	CU_ASSERT(blob->active.num_clusters == 5);
4249 
4250 	/* Payload should be all zeros from unallocated clusters */
4251 	memset(payload_read, 0xAA, sizeof(payload_read));
4252 	iov_read[0].iov_base = payload_read;
4253 	iov_read[0].iov_len = 3 * 4096;
4254 	iov_read[1].iov_base = payload_read + 3 * 4096;
4255 	iov_read[1].iov_len = 4 * 4096;
4256 	iov_read[2].iov_base = payload_read + 7 * 4096;
4257 	iov_read[2].iov_len = 3 * 4096;
4258 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4259 	poll_threads();
4260 	CU_ASSERT(g_bserrno == 0);
4261 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4262 
4263 	memset(payload_write, 0xE5, sizeof(payload_write));
4264 	iov_write[0].iov_base = payload_write;
4265 	iov_write[0].iov_len = 1 * 4096;
4266 	iov_write[1].iov_base = payload_write + 1 * 4096;
4267 	iov_write[1].iov_len = 5 * 4096;
4268 	iov_write[2].iov_base = payload_write + 6 * 4096;
4269 	iov_write[2].iov_len = 4 * 4096;
4270 
4271 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4272 	poll_threads();
4273 	CU_ASSERT(g_bserrno == 0);
4274 
4275 	memset(payload_read, 0xAA, sizeof(payload_read));
4276 	iov_read[0].iov_base = payload_read;
4277 	iov_read[0].iov_len = 3 * 4096;
4278 	iov_read[1].iov_base = payload_read + 3 * 4096;
4279 	iov_read[1].iov_len = 4 * 4096;
4280 	iov_read[2].iov_base = payload_read + 7 * 4096;
4281 	iov_read[2].iov_len = 3 * 4096;
4282 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4283 	poll_threads();
4284 	CU_ASSERT(g_bserrno == 0);
4285 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4286 
4287 	spdk_bs_free_io_channel(channel);
4288 	poll_threads();
4289 
4290 	ut_blob_close_and_delete(bs, blob);
4291 }
4292 
4293 struct iter_ctx {
4294 	int		current_iter;
4295 	spdk_blob_id	blobid[4];
4296 };
4297 
4298 static void
4299 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4300 {
4301 	struct iter_ctx *iter_ctx = arg;
4302 	spdk_blob_id blobid;
4303 
4304 	CU_ASSERT(bserrno == 0);
4305 	blobid = spdk_blob_get_id(blob);
4306 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4307 }
4308 
4309 static void
4310 bs_load_iter_test(void)
4311 {
4312 	struct spdk_blob_store *bs;
4313 	struct spdk_bs_dev *dev;
4314 	struct iter_ctx iter_ctx = { 0 };
4315 	struct spdk_blob *blob;
4316 	int i, rc;
4317 	struct spdk_bs_opts opts;
4318 
4319 	dev = init_dev();
4320 	spdk_bs_opts_init(&opts, sizeof(opts));
4321 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4322 
4323 	/* Initialize a new blob store */
4324 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4325 	poll_threads();
4326 	CU_ASSERT(g_bserrno == 0);
4327 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4328 	bs = g_bs;
4329 
4330 	for (i = 0; i < 4; i++) {
4331 		blob = ut_blob_create_and_open(bs, NULL);
4332 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4333 
4334 		/* Just save the blobid as an xattr for testing purposes. */
4335 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4336 		CU_ASSERT(rc == 0);
4337 
4338 		/* Resize the blob */
4339 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4340 		poll_threads();
4341 		CU_ASSERT(g_bserrno == 0);
4342 
4343 		spdk_blob_close(blob, blob_op_complete, NULL);
4344 		poll_threads();
4345 		CU_ASSERT(g_bserrno == 0);
4346 	}
4347 
4348 	g_bserrno = -1;
4349 	spdk_bs_unload(bs, bs_op_complete, NULL);
4350 	poll_threads();
4351 	CU_ASSERT(g_bserrno == 0);
4352 
4353 	dev = init_dev();
4354 	spdk_bs_opts_init(&opts, sizeof(opts));
4355 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4356 	opts.iter_cb_fn = test_iter;
4357 	opts.iter_cb_arg = &iter_ctx;
4358 
4359 	/* Test blob iteration during load after a clean shutdown. */
4360 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4361 	poll_threads();
4362 	CU_ASSERT(g_bserrno == 0);
4363 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4364 	bs = g_bs;
4365 
4366 	/* Dirty shutdown */
4367 	bs_free(bs);
4368 
4369 	dev = init_dev();
4370 	spdk_bs_opts_init(&opts, sizeof(opts));
4371 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4372 	opts.iter_cb_fn = test_iter;
4373 	iter_ctx.current_iter = 0;
4374 	opts.iter_cb_arg = &iter_ctx;
4375 
4376 	/* Test blob iteration during load after a dirty shutdown. */
4377 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4378 	poll_threads();
4379 	CU_ASSERT(g_bserrno == 0);
4380 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4381 	bs = g_bs;
4382 
4383 	spdk_bs_unload(bs, bs_op_complete, NULL);
4384 	poll_threads();
4385 	CU_ASSERT(g_bserrno == 0);
4386 	g_bs = NULL;
4387 }
4388 
4389 static void
4390 blob_snapshot_rw(void)
4391 {
4392 	static const uint8_t zero[10 * 4096] = { 0 };
4393 	struct spdk_blob_store *bs = g_bs;
4394 	struct spdk_blob *blob, *snapshot;
4395 	struct spdk_io_channel *channel;
4396 	struct spdk_blob_opts opts;
4397 	spdk_blob_id blobid, snapshotid;
4398 	uint64_t free_clusters;
4399 	uint64_t cluster_size;
4400 	uint64_t page_size;
4401 	uint8_t payload_read[10 * 4096];
4402 	uint8_t payload_write[10 * 4096];
4403 	uint64_t write_bytes;
4404 	uint64_t read_bytes;
4405 
4406 	free_clusters = spdk_bs_free_cluster_count(bs);
4407 	cluster_size = spdk_bs_get_cluster_size(bs);
4408 	page_size = spdk_bs_get_page_size(bs);
4409 
4410 	channel = spdk_bs_alloc_io_channel(bs);
4411 	CU_ASSERT(channel != NULL);
4412 
4413 	ut_spdk_blob_opts_init(&opts);
4414 	opts.thin_provision = true;
4415 	opts.num_clusters = 5;
4416 
4417 	blob = ut_blob_create_and_open(bs, &opts);
4418 	blobid = spdk_blob_get_id(blob);
4419 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4420 
4421 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4422 
4423 	memset(payload_read, 0xFF, sizeof(payload_read));
4424 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4425 	poll_threads();
4426 	CU_ASSERT(g_bserrno == 0);
4427 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4428 
4429 	memset(payload_write, 0xE5, sizeof(payload_write));
4430 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4431 	poll_threads();
4432 	CU_ASSERT(g_bserrno == 0);
4433 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4434 
4435 	/* Create snapshot from blob */
4436 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4437 	poll_threads();
4438 	CU_ASSERT(g_bserrno == 0);
4439 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4440 	snapshotid = g_blobid;
4441 
4442 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4443 	poll_threads();
4444 	CU_ASSERT(g_bserrno == 0);
4445 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4446 	snapshot = g_blob;
4447 	CU_ASSERT(snapshot->data_ro == true);
4448 	CU_ASSERT(snapshot->md_ro == true);
4449 
4450 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4451 
4452 	write_bytes = g_dev_write_bytes;
4453 	read_bytes = g_dev_read_bytes;
4454 
4455 	memset(payload_write, 0xAA, sizeof(payload_write));
4456 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4457 	poll_threads();
4458 	CU_ASSERT(g_bserrno == 0);
4459 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4460 
4461 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4462 	 * and then write 10 pages of payload.
4463 	 */
4464 	if (g_use_extent_table) {
4465 		/* Add one more page for EXTENT_PAGE write */
4466 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
4467 	} else {
4468 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
4469 	}
4470 	CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
4471 
4472 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4473 	poll_threads();
4474 	CU_ASSERT(g_bserrno == 0);
4475 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4476 
4477 	/* Data on snapshot should not change after write to clone */
4478 	memset(payload_write, 0xE5, sizeof(payload_write));
4479 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4480 	poll_threads();
4481 	CU_ASSERT(g_bserrno == 0);
4482 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4483 
4484 	ut_blob_close_and_delete(bs, blob);
4485 	ut_blob_close_and_delete(bs, snapshot);
4486 
4487 	spdk_bs_free_io_channel(channel);
4488 	poll_threads();
4489 	g_blob = NULL;
4490 	g_blobid = 0;
4491 }
4492 
4493 static void
4494 blob_snapshot_rw_iov(void)
4495 {
4496 	static const uint8_t zero[10 * 4096] = { 0 };
4497 	struct spdk_blob_store *bs = g_bs;
4498 	struct spdk_blob *blob, *snapshot;
4499 	struct spdk_io_channel *channel;
4500 	struct spdk_blob_opts opts;
4501 	spdk_blob_id blobid, snapshotid;
4502 	uint64_t free_clusters;
4503 	uint8_t payload_read[10 * 4096];
4504 	uint8_t payload_write[10 * 4096];
4505 	struct iovec iov_read[3];
4506 	struct iovec iov_write[3];
4507 
4508 	free_clusters = spdk_bs_free_cluster_count(bs);
4509 
4510 	channel = spdk_bs_alloc_io_channel(bs);
4511 	CU_ASSERT(channel != NULL);
4512 
4513 	ut_spdk_blob_opts_init(&opts);
4514 	opts.thin_provision = true;
4515 	opts.num_clusters = 5;
4516 
4517 	blob = ut_blob_create_and_open(bs, &opts);
4518 	blobid = spdk_blob_get_id(blob);
4519 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4520 
4521 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4522 
4523 	/* Create snapshot from blob */
4524 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4525 	poll_threads();
4526 	CU_ASSERT(g_bserrno == 0);
4527 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4528 	snapshotid = g_blobid;
4529 
4530 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4531 	poll_threads();
4532 	CU_ASSERT(g_bserrno == 0);
4533 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4534 	snapshot = g_blob;
4535 	CU_ASSERT(snapshot->data_ro == true);
4536 	CU_ASSERT(snapshot->md_ro == true);
4537 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4538 
4539 	/* Payload should be all zeros from unallocated clusters */
4540 	memset(payload_read, 0xAA, sizeof(payload_read));
4541 	iov_read[0].iov_base = payload_read;
4542 	iov_read[0].iov_len = 3 * 4096;
4543 	iov_read[1].iov_base = payload_read + 3 * 4096;
4544 	iov_read[1].iov_len = 4 * 4096;
4545 	iov_read[2].iov_base = payload_read + 7 * 4096;
4546 	iov_read[2].iov_len = 3 * 4096;
4547 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4548 	poll_threads();
4549 	CU_ASSERT(g_bserrno == 0);
4550 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4551 
4552 	memset(payload_write, 0xE5, sizeof(payload_write));
4553 	iov_write[0].iov_base = payload_write;
4554 	iov_write[0].iov_len = 1 * 4096;
4555 	iov_write[1].iov_base = payload_write + 1 * 4096;
4556 	iov_write[1].iov_len = 5 * 4096;
4557 	iov_write[2].iov_base = payload_write + 6 * 4096;
4558 	iov_write[2].iov_len = 4 * 4096;
4559 
4560 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4561 	poll_threads();
4562 	CU_ASSERT(g_bserrno == 0);
4563 
4564 	memset(payload_read, 0xAA, sizeof(payload_read));
4565 	iov_read[0].iov_base = payload_read;
4566 	iov_read[0].iov_len = 3 * 4096;
4567 	iov_read[1].iov_base = payload_read + 3 * 4096;
4568 	iov_read[1].iov_len = 4 * 4096;
4569 	iov_read[2].iov_base = payload_read + 7 * 4096;
4570 	iov_read[2].iov_len = 3 * 4096;
4571 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4572 	poll_threads();
4573 	CU_ASSERT(g_bserrno == 0);
4574 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4575 
4576 	spdk_bs_free_io_channel(channel);
4577 	poll_threads();
4578 
4579 	ut_blob_close_and_delete(bs, blob);
4580 	ut_blob_close_and_delete(bs, snapshot);
4581 }
4582 
4583 /**
4584  * Inflate / decouple parent rw unit tests.
4585  *
4586  * --------------
4587  * original blob:         0         1         2         3         4
4588  *                   ,---------+---------+---------+---------+---------.
4589  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4590  *                   +---------+---------+---------+---------+---------+
4591  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4592  *                   +---------+---------+---------+---------+---------+
4593  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4594  *                   '---------+---------+---------+---------+---------'
4595  *                   .         .         .         .         .         .
4596  * --------          .         .         .         .         .         .
4597  * inflate:          .         .         .         .         .         .
4598  *                   ,---------+---------+---------+---------+---------.
4599  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4600  *                   '---------+---------+---------+---------+---------'
4601  *
4602  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4603  *               on snapshot2 and snapshot removed .         .         .
4604  *                   .         .         .         .         .         .
4605  * ----------------  .         .         .         .         .         .
4606  * decouple parent:  .         .         .         .         .         .
4607  *                   ,---------+---------+---------+---------+---------.
4608  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4609  *                   +---------+---------+---------+---------+---------+
4610  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4611  *                   '---------+---------+---------+---------+---------'
4612  *
4613  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4614  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4615  *               should remain a clone of snapshot.
4616  */
4617 static void
4618 _blob_inflate_rw(bool decouple_parent)
4619 {
4620 	struct spdk_blob_store *bs = g_bs;
4621 	struct spdk_blob *blob, *snapshot, *snapshot2;
4622 	struct spdk_io_channel *channel;
4623 	struct spdk_blob_opts opts;
4624 	spdk_blob_id blobid, snapshotid, snapshot2id;
4625 	uint64_t free_clusters;
4626 	uint64_t cluster_size;
4627 
4628 	uint64_t payload_size;
4629 	uint8_t *payload_read;
4630 	uint8_t *payload_write;
4631 	uint8_t *payload_clone;
4632 
4633 	uint64_t pages_per_cluster;
4634 	uint64_t pages_per_payload;
4635 
4636 	int i;
4637 	spdk_blob_id ids[2];
4638 	size_t count;
4639 
4640 	free_clusters = spdk_bs_free_cluster_count(bs);
4641 	cluster_size = spdk_bs_get_cluster_size(bs);
4642 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4643 	pages_per_payload = pages_per_cluster * 5;
4644 
4645 	payload_size = cluster_size * 5;
4646 
4647 	payload_read = malloc(payload_size);
4648 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4649 
4650 	payload_write = malloc(payload_size);
4651 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4652 
4653 	payload_clone = malloc(payload_size);
4654 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4655 
4656 	channel = spdk_bs_alloc_io_channel(bs);
4657 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4658 
4659 	/* Create blob */
4660 	ut_spdk_blob_opts_init(&opts);
4661 	opts.thin_provision = true;
4662 	opts.num_clusters = 5;
4663 
4664 	blob = ut_blob_create_and_open(bs, &opts);
4665 	blobid = spdk_blob_get_id(blob);
4666 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4667 
4668 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4669 
4670 	/* 1) Initial read should return zeroed payload */
4671 	memset(payload_read, 0xFF, payload_size);
4672 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4673 			  blob_op_complete, NULL);
4674 	poll_threads();
4675 	CU_ASSERT(g_bserrno == 0);
4676 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4677 
4678 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4679 	 * isn't allocated) */
4680 	memset(payload_write, 0xE5, payload_size - cluster_size);
4681 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4682 			   pages_per_cluster, blob_op_complete, NULL);
4683 	poll_threads();
4684 	CU_ASSERT(g_bserrno == 0);
4685 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4686 
4687 	/* 2) Create snapshot from blob (first level) */
4688 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4689 	poll_threads();
4690 	CU_ASSERT(g_bserrno == 0);
4691 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4692 	snapshotid = g_blobid;
4693 
4694 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4695 	poll_threads();
4696 	CU_ASSERT(g_bserrno == 0);
4697 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4698 	snapshot = g_blob;
4699 	CU_ASSERT(snapshot->data_ro == true);
4700 	CU_ASSERT(snapshot->md_ro == true);
4701 
4702 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4703 
4704 	/* Write every second cluster with a pattern.
4705 	 *
4706 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4707 	 * doesn't allocate it.
4708 	 *
4709 	 * payload_clone stores expected result on "blob" read at the time and
4710 	 * is used only to check data consistency on clone before and after
4711 	 * inflation. Initially we fill it with a backing snapshots pattern
4712 	 * used before.
4713 	 */
4714 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4715 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4716 	memset(payload_write, 0xAA, payload_size);
4717 	for (i = 1; i < 5; i += 2) {
4718 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4719 				   pages_per_cluster, blob_op_complete, NULL);
4720 		poll_threads();
4721 		CU_ASSERT(g_bserrno == 0);
4722 
4723 		/* Update expected result */
4724 		memcpy(payload_clone + (cluster_size * i), payload_write,
4725 		       cluster_size);
4726 	}
4727 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4728 
4729 	/* Check data consistency on clone */
4730 	memset(payload_read, 0xFF, payload_size);
4731 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4732 			  blob_op_complete, NULL);
4733 	poll_threads();
4734 	CU_ASSERT(g_bserrno == 0);
4735 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4736 
4737 	/* 3) Create second levels snapshot from blob */
4738 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4739 	poll_threads();
4740 	CU_ASSERT(g_bserrno == 0);
4741 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4742 	snapshot2id = g_blobid;
4743 
4744 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4745 	poll_threads();
4746 	CU_ASSERT(g_bserrno == 0);
4747 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4748 	snapshot2 = g_blob;
4749 	CU_ASSERT(snapshot2->data_ro == true);
4750 	CU_ASSERT(snapshot2->md_ro == true);
4751 
4752 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4753 
4754 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4755 
4756 	/* Write one cluster on the top level blob. This cluster (1) covers
4757 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4758 	 * at all */
4759 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4760 			   pages_per_cluster, blob_op_complete, NULL);
4761 	poll_threads();
4762 	CU_ASSERT(g_bserrno == 0);
4763 
4764 	/* Update expected result */
4765 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4766 
4767 	/* Check data consistency on clone */
4768 	memset(payload_read, 0xFF, payload_size);
4769 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4770 			  blob_op_complete, NULL);
4771 	poll_threads();
4772 	CU_ASSERT(g_bserrno == 0);
4773 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4774 
4775 
4776 	/* Close all blobs */
4777 	spdk_blob_close(blob, blob_op_complete, NULL);
4778 	poll_threads();
4779 	CU_ASSERT(g_bserrno == 0);
4780 
4781 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4782 	poll_threads();
4783 	CU_ASSERT(g_bserrno == 0);
4784 
4785 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4786 	poll_threads();
4787 	CU_ASSERT(g_bserrno == 0);
4788 
4789 	/* Check snapshot-clone relations */
4790 	count = 2;
4791 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4792 	CU_ASSERT(count == 1);
4793 	CU_ASSERT(ids[0] == snapshot2id);
4794 
4795 	count = 2;
4796 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4797 	CU_ASSERT(count == 1);
4798 	CU_ASSERT(ids[0] == blobid);
4799 
4800 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4801 
4802 	free_clusters = spdk_bs_free_cluster_count(bs);
4803 	if (!decouple_parent) {
4804 		/* Do full blob inflation */
4805 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4806 		poll_threads();
4807 		CU_ASSERT(g_bserrno == 0);
4808 
4809 		/* All clusters should be inflated (except one already allocated
4810 		 * in a top level blob) */
4811 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
4812 
4813 		/* Check if relation tree updated correctly */
4814 		count = 2;
4815 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4816 
4817 		/* snapshotid have one clone */
4818 		CU_ASSERT(count == 1);
4819 		CU_ASSERT(ids[0] == snapshot2id);
4820 
4821 		/* snapshot2id have no clones */
4822 		count = 2;
4823 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4824 		CU_ASSERT(count == 0);
4825 
4826 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4827 	} else {
4828 		/* Decouple parent of blob */
4829 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
4830 		poll_threads();
4831 		CU_ASSERT(g_bserrno == 0);
4832 
4833 		/* Only one cluster from a parent should be inflated (second one
4834 		 * is covered by a cluster written on a top level blob, and
4835 		 * already allocated) */
4836 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
4837 
4838 		/* Check if relation tree updated correctly */
4839 		count = 2;
4840 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4841 
4842 		/* snapshotid have two clones now */
4843 		CU_ASSERT(count == 2);
4844 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4845 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
4846 
4847 		/* snapshot2id have no clones */
4848 		count = 2;
4849 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4850 		CU_ASSERT(count == 0);
4851 
4852 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4853 	}
4854 
4855 	/* Try to delete snapshot2 (should pass) */
4856 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
4857 	poll_threads();
4858 	CU_ASSERT(g_bserrno == 0);
4859 
4860 	/* Try to delete base snapshot */
4861 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
4862 	poll_threads();
4863 	CU_ASSERT(g_bserrno == 0);
4864 
4865 	/* Reopen blob after snapshot deletion */
4866 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4867 	poll_threads();
4868 	CU_ASSERT(g_bserrno == 0);
4869 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4870 	blob = g_blob;
4871 
4872 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4873 
4874 	/* Check data consistency on inflated blob */
4875 	memset(payload_read, 0xFF, payload_size);
4876 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4877 			  blob_op_complete, NULL);
4878 	poll_threads();
4879 	CU_ASSERT(g_bserrno == 0);
4880 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4881 
4882 	spdk_bs_free_io_channel(channel);
4883 	poll_threads();
4884 
4885 	free(payload_read);
4886 	free(payload_write);
4887 	free(payload_clone);
4888 
4889 	ut_blob_close_and_delete(bs, blob);
4890 }
4891 
4892 static void
4893 blob_inflate_rw(void)
4894 {
4895 	_blob_inflate_rw(false);
4896 	_blob_inflate_rw(true);
4897 }
4898 
4899 /**
4900  * Snapshot-clones relation test
4901  *
4902  *         snapshot
4903  *            |
4904  *      +-----+-----+
4905  *      |           |
4906  *   blob(ro)   snapshot2
4907  *      |           |
4908  *   clone2      clone
4909  */
4910 static void
4911 blob_relations(void)
4912 {
4913 	struct spdk_blob_store *bs;
4914 	struct spdk_bs_dev *dev;
4915 	struct spdk_bs_opts bs_opts;
4916 	struct spdk_blob_opts opts;
4917 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
4918 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
4919 	int rc;
4920 	size_t count;
4921 	spdk_blob_id ids[10] = {};
4922 
4923 	dev = init_dev();
4924 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4925 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
4926 
4927 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4928 	poll_threads();
4929 	CU_ASSERT(g_bserrno == 0);
4930 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4931 	bs = g_bs;
4932 
4933 	/* 1. Create blob with 10 clusters */
4934 
4935 	ut_spdk_blob_opts_init(&opts);
4936 	opts.num_clusters = 10;
4937 
4938 	blob = ut_blob_create_and_open(bs, &opts);
4939 	blobid = spdk_blob_get_id(blob);
4940 
4941 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4942 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4943 	CU_ASSERT(!spdk_blob_is_clone(blob));
4944 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
4945 
4946 	/* blob should not have underlying snapshot nor clones */
4947 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
4948 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4949 	count = SPDK_COUNTOF(ids);
4950 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
4951 	CU_ASSERT(rc == 0);
4952 	CU_ASSERT(count == 0);
4953 
4954 
4955 	/* 2. Create snapshot */
4956 
4957 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4958 	poll_threads();
4959 	CU_ASSERT(g_bserrno == 0);
4960 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4961 	snapshotid = g_blobid;
4962 
4963 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4964 	poll_threads();
4965 	CU_ASSERT(g_bserrno == 0);
4966 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4967 	snapshot = g_blob;
4968 
4969 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
4970 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
4971 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
4972 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
4973 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
4974 
4975 	/* Check if original blob is converted to the clone of snapshot */
4976 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4977 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4978 	CU_ASSERT(spdk_blob_is_clone(blob));
4979 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
4980 	CU_ASSERT(blob->parent_id == snapshotid);
4981 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4982 
4983 	count = SPDK_COUNTOF(ids);
4984 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4985 	CU_ASSERT(rc == 0);
4986 	CU_ASSERT(count == 1);
4987 	CU_ASSERT(ids[0] == blobid);
4988 
4989 
4990 	/* 3. Create clone from snapshot */
4991 
4992 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
4993 	poll_threads();
4994 	CU_ASSERT(g_bserrno == 0);
4995 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4996 	cloneid = g_blobid;
4997 
4998 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
4999 	poll_threads();
5000 	CU_ASSERT(g_bserrno == 0);
5001 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5002 	clone = g_blob;
5003 
5004 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5005 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5006 	CU_ASSERT(spdk_blob_is_clone(clone));
5007 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5008 	CU_ASSERT(clone->parent_id == snapshotid);
5009 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5010 
5011 	count = SPDK_COUNTOF(ids);
5012 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5013 	CU_ASSERT(rc == 0);
5014 	CU_ASSERT(count == 0);
5015 
5016 	/* Check if clone is on the snapshot's list */
5017 	count = SPDK_COUNTOF(ids);
5018 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5019 	CU_ASSERT(rc == 0);
5020 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5021 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5022 
5023 
5024 	/* 4. Create snapshot of the clone */
5025 
5026 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5027 	poll_threads();
5028 	CU_ASSERT(g_bserrno == 0);
5029 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5030 	snapshotid2 = g_blobid;
5031 
5032 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5033 	poll_threads();
5034 	CU_ASSERT(g_bserrno == 0);
5035 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5036 	snapshot2 = g_blob;
5037 
5038 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5039 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5040 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5041 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5042 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5043 
5044 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5045 	 * is a child of snapshot */
5046 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5047 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5048 	CU_ASSERT(spdk_blob_is_clone(clone));
5049 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5050 	CU_ASSERT(clone->parent_id == snapshotid2);
5051 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5052 
5053 	count = SPDK_COUNTOF(ids);
5054 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5055 	CU_ASSERT(rc == 0);
5056 	CU_ASSERT(count == 1);
5057 	CU_ASSERT(ids[0] == cloneid);
5058 
5059 
5060 	/* 5. Try to create clone from read only blob */
5061 
5062 	/* Mark blob as read only */
5063 	spdk_blob_set_read_only(blob);
5064 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5065 	poll_threads();
5066 	CU_ASSERT(g_bserrno == 0);
5067 
5068 	/* Check if previously created blob is read only clone */
5069 	CU_ASSERT(spdk_blob_is_read_only(blob));
5070 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5071 	CU_ASSERT(spdk_blob_is_clone(blob));
5072 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5073 
5074 	/* Create clone from read only blob */
5075 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5076 	poll_threads();
5077 	CU_ASSERT(g_bserrno == 0);
5078 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5079 	cloneid2 = g_blobid;
5080 
5081 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5082 	poll_threads();
5083 	CU_ASSERT(g_bserrno == 0);
5084 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5085 	clone2 = g_blob;
5086 
5087 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5088 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5089 	CU_ASSERT(spdk_blob_is_clone(clone2));
5090 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5091 
5092 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5093 
5094 	count = SPDK_COUNTOF(ids);
5095 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5096 	CU_ASSERT(rc == 0);
5097 
5098 	CU_ASSERT(count == 1);
5099 	CU_ASSERT(ids[0] == cloneid2);
5100 
5101 	/* Close blobs */
5102 
5103 	spdk_blob_close(clone2, blob_op_complete, NULL);
5104 	poll_threads();
5105 	CU_ASSERT(g_bserrno == 0);
5106 
5107 	spdk_blob_close(blob, blob_op_complete, NULL);
5108 	poll_threads();
5109 	CU_ASSERT(g_bserrno == 0);
5110 
5111 	spdk_blob_close(clone, blob_op_complete, NULL);
5112 	poll_threads();
5113 	CU_ASSERT(g_bserrno == 0);
5114 
5115 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5116 	poll_threads();
5117 	CU_ASSERT(g_bserrno == 0);
5118 
5119 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5120 	poll_threads();
5121 	CU_ASSERT(g_bserrno == 0);
5122 
5123 	/* Try to delete snapshot with more than 1 clone */
5124 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5125 	poll_threads();
5126 	CU_ASSERT(g_bserrno != 0);
5127 
5128 	ut_bs_reload(&bs, &bs_opts);
5129 
5130 	/* NULL ids array should return number of clones in count */
5131 	count = SPDK_COUNTOF(ids);
5132 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5133 	CU_ASSERT(rc == -ENOMEM);
5134 	CU_ASSERT(count == 2);
5135 
5136 	/* incorrect array size */
5137 	count = 1;
5138 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5139 	CU_ASSERT(rc == -ENOMEM);
5140 	CU_ASSERT(count == 2);
5141 
5142 
5143 	/* Verify structure of loaded blob store */
5144 
5145 	/* snapshot */
5146 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5147 
5148 	count = SPDK_COUNTOF(ids);
5149 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5150 	CU_ASSERT(rc == 0);
5151 	CU_ASSERT(count == 2);
5152 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5153 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5154 
5155 	/* blob */
5156 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5157 	count = SPDK_COUNTOF(ids);
5158 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5159 	CU_ASSERT(rc == 0);
5160 	CU_ASSERT(count == 1);
5161 	CU_ASSERT(ids[0] == cloneid2);
5162 
5163 	/* clone */
5164 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5165 	count = SPDK_COUNTOF(ids);
5166 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5167 	CU_ASSERT(rc == 0);
5168 	CU_ASSERT(count == 0);
5169 
5170 	/* snapshot2 */
5171 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5172 	count = SPDK_COUNTOF(ids);
5173 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5174 	CU_ASSERT(rc == 0);
5175 	CU_ASSERT(count == 1);
5176 	CU_ASSERT(ids[0] == cloneid);
5177 
5178 	/* clone2 */
5179 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5180 	count = SPDK_COUNTOF(ids);
5181 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5182 	CU_ASSERT(rc == 0);
5183 	CU_ASSERT(count == 0);
5184 
5185 	/* Try to delete blob that user should not be able to remove */
5186 
5187 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5188 	poll_threads();
5189 	CU_ASSERT(g_bserrno != 0);
5190 
5191 	/* Remove all blobs */
5192 
5193 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5194 	poll_threads();
5195 	CU_ASSERT(g_bserrno == 0);
5196 
5197 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5198 	poll_threads();
5199 	CU_ASSERT(g_bserrno == 0);
5200 
5201 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5202 	poll_threads();
5203 	CU_ASSERT(g_bserrno == 0);
5204 
5205 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5206 	poll_threads();
5207 	CU_ASSERT(g_bserrno == 0);
5208 
5209 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5210 	poll_threads();
5211 	CU_ASSERT(g_bserrno == 0);
5212 
5213 	spdk_bs_unload(bs, bs_op_complete, NULL);
5214 	poll_threads();
5215 	CU_ASSERT(g_bserrno == 0);
5216 
5217 	g_bs = NULL;
5218 }
5219 
5220 /**
5221  * Snapshot-clones relation test 2
5222  *
5223  *         snapshot1
5224  *            |
5225  *         snapshot2
5226  *            |
5227  *      +-----+-----+
5228  *      |           |
5229  *   blob(ro)   snapshot3
5230  *      |           |
5231  *      |       snapshot4
5232  *      |        |     |
5233  *   clone2   clone  clone3
5234  */
5235 static void
5236 blob_relations2(void)
5237 {
5238 	struct spdk_blob_store *bs;
5239 	struct spdk_bs_dev *dev;
5240 	struct spdk_bs_opts bs_opts;
5241 	struct spdk_blob_opts opts;
5242 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5243 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5244 		     cloneid3;
5245 	int rc;
5246 	size_t count;
5247 	spdk_blob_id ids[10] = {};
5248 
5249 	dev = init_dev();
5250 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5251 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5252 
5253 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5254 	poll_threads();
5255 	CU_ASSERT(g_bserrno == 0);
5256 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5257 	bs = g_bs;
5258 
5259 	/* 1. Create blob with 10 clusters */
5260 
5261 	ut_spdk_blob_opts_init(&opts);
5262 	opts.num_clusters = 10;
5263 
5264 	blob = ut_blob_create_and_open(bs, &opts);
5265 	blobid = spdk_blob_get_id(blob);
5266 
5267 	/* 2. Create snapshot1 */
5268 
5269 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5270 	poll_threads();
5271 	CU_ASSERT(g_bserrno == 0);
5272 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5273 	snapshotid1 = g_blobid;
5274 
5275 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5276 	poll_threads();
5277 	CU_ASSERT(g_bserrno == 0);
5278 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5279 	snapshot1 = g_blob;
5280 
5281 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5282 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5283 
5284 	CU_ASSERT(blob->parent_id == snapshotid1);
5285 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5286 
5287 	/* Check if blob is the clone of snapshot1 */
5288 	CU_ASSERT(blob->parent_id == snapshotid1);
5289 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5290 
5291 	count = SPDK_COUNTOF(ids);
5292 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5293 	CU_ASSERT(rc == 0);
5294 	CU_ASSERT(count == 1);
5295 	CU_ASSERT(ids[0] == blobid);
5296 
5297 	/* 3. Create another snapshot */
5298 
5299 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5300 	poll_threads();
5301 	CU_ASSERT(g_bserrno == 0);
5302 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5303 	snapshotid2 = g_blobid;
5304 
5305 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5306 	poll_threads();
5307 	CU_ASSERT(g_bserrno == 0);
5308 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5309 	snapshot2 = g_blob;
5310 
5311 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5312 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5313 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5314 
5315 	/* Check if snapshot2 is the clone of snapshot1 and blob
5316 	 * is a child of snapshot2 */
5317 	CU_ASSERT(blob->parent_id == snapshotid2);
5318 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5319 
5320 	count = SPDK_COUNTOF(ids);
5321 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5322 	CU_ASSERT(rc == 0);
5323 	CU_ASSERT(count == 1);
5324 	CU_ASSERT(ids[0] == blobid);
5325 
5326 	/* 4. Create clone from snapshot */
5327 
5328 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5329 	poll_threads();
5330 	CU_ASSERT(g_bserrno == 0);
5331 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5332 	cloneid = g_blobid;
5333 
5334 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5335 	poll_threads();
5336 	CU_ASSERT(g_bserrno == 0);
5337 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5338 	clone = g_blob;
5339 
5340 	CU_ASSERT(clone->parent_id == snapshotid2);
5341 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5342 
5343 	/* Check if clone is on the snapshot's list */
5344 	count = SPDK_COUNTOF(ids);
5345 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5346 	CU_ASSERT(rc == 0);
5347 	CU_ASSERT(count == 2);
5348 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5349 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5350 
5351 	/* 5. Create snapshot of the clone */
5352 
5353 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5354 	poll_threads();
5355 	CU_ASSERT(g_bserrno == 0);
5356 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5357 	snapshotid3 = g_blobid;
5358 
5359 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5360 	poll_threads();
5361 	CU_ASSERT(g_bserrno == 0);
5362 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5363 	snapshot3 = g_blob;
5364 
5365 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5366 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5367 
5368 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5369 	 * is a child of snapshot2 */
5370 	CU_ASSERT(clone->parent_id == snapshotid3);
5371 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5372 
5373 	count = SPDK_COUNTOF(ids);
5374 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5375 	CU_ASSERT(rc == 0);
5376 	CU_ASSERT(count == 1);
5377 	CU_ASSERT(ids[0] == cloneid);
5378 
5379 	/* 6. Create another snapshot of the clone */
5380 
5381 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5382 	poll_threads();
5383 	CU_ASSERT(g_bserrno == 0);
5384 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5385 	snapshotid4 = g_blobid;
5386 
5387 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5388 	poll_threads();
5389 	CU_ASSERT(g_bserrno == 0);
5390 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5391 	snapshot4 = g_blob;
5392 
5393 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5394 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5395 
5396 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5397 	 * is a child of snapshot3 */
5398 	CU_ASSERT(clone->parent_id == snapshotid4);
5399 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5400 
5401 	count = SPDK_COUNTOF(ids);
5402 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5403 	CU_ASSERT(rc == 0);
5404 	CU_ASSERT(count == 1);
5405 	CU_ASSERT(ids[0] == cloneid);
5406 
5407 	/* 7. Remove snapshot 4 */
5408 
5409 	ut_blob_close_and_delete(bs, snapshot4);
5410 
5411 	/* Check if relations are back to state from before creating snapshot 4 */
5412 	CU_ASSERT(clone->parent_id == snapshotid3);
5413 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5414 
5415 	count = SPDK_COUNTOF(ids);
5416 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5417 	CU_ASSERT(rc == 0);
5418 	CU_ASSERT(count == 1);
5419 	CU_ASSERT(ids[0] == cloneid);
5420 
5421 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5422 
5423 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5424 	poll_threads();
5425 	CU_ASSERT(g_bserrno == 0);
5426 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5427 	cloneid3 = g_blobid;
5428 
5429 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5430 	poll_threads();
5431 	CU_ASSERT(g_bserrno != 0);
5432 
5433 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5434 
5435 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5436 	poll_threads();
5437 	CU_ASSERT(g_bserrno == 0);
5438 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5439 	snapshot3 = g_blob;
5440 
5441 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5442 	poll_threads();
5443 	CU_ASSERT(g_bserrno != 0);
5444 
5445 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5446 	poll_threads();
5447 	CU_ASSERT(g_bserrno == 0);
5448 
5449 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5450 	poll_threads();
5451 	CU_ASSERT(g_bserrno == 0);
5452 
5453 	/* 10. Remove snapshot 1 */
5454 
5455 	ut_blob_close_and_delete(bs, snapshot1);
5456 
5457 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5458 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5459 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5460 
5461 	count = SPDK_COUNTOF(ids);
5462 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5463 	CU_ASSERT(rc == 0);
5464 	CU_ASSERT(count == 2);
5465 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5466 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5467 
5468 	/* 11. Try to create clone from read only blob */
5469 
5470 	/* Mark blob as read only */
5471 	spdk_blob_set_read_only(blob);
5472 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5473 	poll_threads();
5474 	CU_ASSERT(g_bserrno == 0);
5475 
5476 	/* Create clone from read only blob */
5477 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5478 	poll_threads();
5479 	CU_ASSERT(g_bserrno == 0);
5480 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5481 	cloneid2 = g_blobid;
5482 
5483 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5484 	poll_threads();
5485 	CU_ASSERT(g_bserrno == 0);
5486 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5487 	clone2 = g_blob;
5488 
5489 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5490 
5491 	count = SPDK_COUNTOF(ids);
5492 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5493 	CU_ASSERT(rc == 0);
5494 	CU_ASSERT(count == 1);
5495 	CU_ASSERT(ids[0] == cloneid2);
5496 
5497 	/* Close blobs */
5498 
5499 	spdk_blob_close(clone2, blob_op_complete, NULL);
5500 	poll_threads();
5501 	CU_ASSERT(g_bserrno == 0);
5502 
5503 	spdk_blob_close(blob, blob_op_complete, NULL);
5504 	poll_threads();
5505 	CU_ASSERT(g_bserrno == 0);
5506 
5507 	spdk_blob_close(clone, blob_op_complete, NULL);
5508 	poll_threads();
5509 	CU_ASSERT(g_bserrno == 0);
5510 
5511 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5512 	poll_threads();
5513 	CU_ASSERT(g_bserrno == 0);
5514 
5515 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5516 	poll_threads();
5517 	CU_ASSERT(g_bserrno == 0);
5518 
5519 	ut_bs_reload(&bs, &bs_opts);
5520 
5521 	/* Verify structure of loaded blob store */
5522 
5523 	/* snapshot2 */
5524 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5525 
5526 	count = SPDK_COUNTOF(ids);
5527 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5528 	CU_ASSERT(rc == 0);
5529 	CU_ASSERT(count == 2);
5530 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5531 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5532 
5533 	/* blob */
5534 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5535 	count = SPDK_COUNTOF(ids);
5536 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5537 	CU_ASSERT(rc == 0);
5538 	CU_ASSERT(count == 1);
5539 	CU_ASSERT(ids[0] == cloneid2);
5540 
5541 	/* clone */
5542 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5543 	count = SPDK_COUNTOF(ids);
5544 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5545 	CU_ASSERT(rc == 0);
5546 	CU_ASSERT(count == 0);
5547 
5548 	/* snapshot3 */
5549 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5550 	count = SPDK_COUNTOF(ids);
5551 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5552 	CU_ASSERT(rc == 0);
5553 	CU_ASSERT(count == 1);
5554 	CU_ASSERT(ids[0] == cloneid);
5555 
5556 	/* clone2 */
5557 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5558 	count = SPDK_COUNTOF(ids);
5559 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5560 	CU_ASSERT(rc == 0);
5561 	CU_ASSERT(count == 0);
5562 
5563 	/* Try to delete all blobs in the worse possible order */
5564 
5565 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5566 	poll_threads();
5567 	CU_ASSERT(g_bserrno != 0);
5568 
5569 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5570 	poll_threads();
5571 	CU_ASSERT(g_bserrno == 0);
5572 
5573 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5574 	poll_threads();
5575 	CU_ASSERT(g_bserrno != 0);
5576 
5577 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5578 	poll_threads();
5579 	CU_ASSERT(g_bserrno == 0);
5580 
5581 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5582 	poll_threads();
5583 	CU_ASSERT(g_bserrno == 0);
5584 
5585 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5586 	poll_threads();
5587 	CU_ASSERT(g_bserrno == 0);
5588 
5589 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5590 	poll_threads();
5591 	CU_ASSERT(g_bserrno == 0);
5592 
5593 	spdk_bs_unload(bs, bs_op_complete, NULL);
5594 	poll_threads();
5595 	CU_ASSERT(g_bserrno == 0);
5596 
5597 	g_bs = NULL;
5598 }
5599 
5600 /**
5601  * Snapshot-clones relation test 3
5602  *
5603  *         snapshot0
5604  *            |
5605  *         snapshot1
5606  *            |
5607  *         snapshot2
5608  *            |
5609  *           blob
5610  */
5611 static void
5612 blob_relations3(void)
5613 {
5614 	struct spdk_blob_store *bs;
5615 	struct spdk_bs_dev *dev;
5616 	struct spdk_io_channel *channel;
5617 	struct spdk_bs_opts bs_opts;
5618 	struct spdk_blob_opts opts;
5619 	struct spdk_blob *blob;
5620 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
5621 
5622 	dev = init_dev();
5623 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5624 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5625 
5626 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5627 	poll_threads();
5628 	CU_ASSERT(g_bserrno == 0);
5629 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5630 	bs = g_bs;
5631 
5632 	channel = spdk_bs_alloc_io_channel(bs);
5633 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5634 
5635 	/* 1. Create blob with 10 clusters */
5636 	ut_spdk_blob_opts_init(&opts);
5637 	opts.num_clusters = 10;
5638 
5639 	blob = ut_blob_create_and_open(bs, &opts);
5640 	blobid = spdk_blob_get_id(blob);
5641 
5642 	/* 2. Create snapshot0 */
5643 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5644 	poll_threads();
5645 	CU_ASSERT(g_bserrno == 0);
5646 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5647 	snapshotid0 = g_blobid;
5648 
5649 	/* 3. Create snapshot1 */
5650 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5651 	poll_threads();
5652 	CU_ASSERT(g_bserrno == 0);
5653 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5654 	snapshotid1 = g_blobid;
5655 
5656 	/* 4. Create snapshot2 */
5657 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5658 	poll_threads();
5659 	CU_ASSERT(g_bserrno == 0);
5660 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5661 	snapshotid2 = g_blobid;
5662 
5663 	/* 5. Decouple blob */
5664 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5665 	poll_threads();
5666 	CU_ASSERT(g_bserrno == 0);
5667 
5668 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
5669 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
5670 	poll_threads();
5671 	CU_ASSERT(g_bserrno == 0);
5672 
5673 	/* 7. Delete blob */
5674 	spdk_blob_close(blob, blob_op_complete, NULL);
5675 	poll_threads();
5676 	CU_ASSERT(g_bserrno == 0);
5677 
5678 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5679 	poll_threads();
5680 	CU_ASSERT(g_bserrno == 0);
5681 
5682 	/* 8. Delete snapshot2.
5683 	 * If md of snapshot 2 was updated, it should be possible to delete it */
5684 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5685 	poll_threads();
5686 	CU_ASSERT(g_bserrno == 0);
5687 
5688 	/* Remove remaining blobs and unload bs */
5689 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
5690 	poll_threads();
5691 	CU_ASSERT(g_bserrno == 0);
5692 
5693 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
5694 	poll_threads();
5695 	CU_ASSERT(g_bserrno == 0);
5696 
5697 	spdk_bs_free_io_channel(channel);
5698 	poll_threads();
5699 
5700 	spdk_bs_unload(bs, bs_op_complete, NULL);
5701 	poll_threads();
5702 	CU_ASSERT(g_bserrno == 0);
5703 
5704 	g_bs = NULL;
5705 }
5706 
5707 static void
5708 blobstore_clean_power_failure(void)
5709 {
5710 	struct spdk_blob_store *bs;
5711 	struct spdk_blob *blob;
5712 	struct spdk_power_failure_thresholds thresholds = {};
5713 	bool clean = false;
5714 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5715 	struct spdk_bs_super_block super_copy = {};
5716 
5717 	thresholds.general_threshold = 1;
5718 	while (!clean) {
5719 		/* Create bs and blob */
5720 		suite_blob_setup();
5721 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5722 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5723 		bs = g_bs;
5724 		blob = g_blob;
5725 
5726 		/* Super block should not change for rest of the UT,
5727 		 * save it and compare later. */
5728 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5729 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5730 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5731 
5732 		/* Force bs/super block in a clean state.
5733 		 * Along with marking blob dirty, to cause blob persist. */
5734 		blob->state = SPDK_BLOB_STATE_DIRTY;
5735 		bs->clean = 1;
5736 		super->clean = 1;
5737 		super->crc = blob_md_page_calc_crc(super);
5738 
5739 		g_bserrno = -1;
5740 		dev_set_power_failure_thresholds(thresholds);
5741 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5742 		poll_threads();
5743 		dev_reset_power_failure_event();
5744 
5745 		if (g_bserrno == 0) {
5746 			/* After successful md sync, both bs and super block
5747 			 * should be marked as not clean. */
5748 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5749 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5750 			clean = true;
5751 		}
5752 
5753 		/* Depending on the point of failure, super block was either updated or not. */
5754 		super_copy.clean = super->clean;
5755 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5756 		/* Compare that the values in super block remained unchanged. */
5757 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5758 
5759 		/* Delete blob and unload bs */
5760 		suite_blob_cleanup();
5761 
5762 		thresholds.general_threshold++;
5763 	}
5764 }
5765 
5766 static void
5767 blob_delete_snapshot_power_failure(void)
5768 {
5769 	struct spdk_bs_dev *dev;
5770 	struct spdk_blob_store *bs;
5771 	struct spdk_blob_opts opts;
5772 	struct spdk_blob *blob, *snapshot;
5773 	struct spdk_power_failure_thresholds thresholds = {};
5774 	spdk_blob_id blobid, snapshotid;
5775 	const void *value;
5776 	size_t value_len;
5777 	size_t count;
5778 	spdk_blob_id ids[3] = {};
5779 	int rc;
5780 	bool deleted = false;
5781 	int delete_snapshot_bserrno = -1;
5782 
5783 	thresholds.general_threshold = 1;
5784 	while (!deleted) {
5785 		dev = init_dev();
5786 
5787 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5788 		poll_threads();
5789 		CU_ASSERT(g_bserrno == 0);
5790 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5791 		bs = g_bs;
5792 
5793 		/* Create blob */
5794 		ut_spdk_blob_opts_init(&opts);
5795 		opts.num_clusters = 10;
5796 
5797 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5798 		poll_threads();
5799 		CU_ASSERT(g_bserrno == 0);
5800 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5801 		blobid = g_blobid;
5802 
5803 		/* Create snapshot */
5804 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5805 		poll_threads();
5806 		CU_ASSERT(g_bserrno == 0);
5807 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5808 		snapshotid = g_blobid;
5809 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5810 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5811 
5812 		dev_set_power_failure_thresholds(thresholds);
5813 
5814 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5815 		poll_threads();
5816 		delete_snapshot_bserrno = g_bserrno;
5817 
5818 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
5819 		 * reports success, changes to both blobs should already persisted. */
5820 		dev_reset_power_failure_event();
5821 		ut_bs_dirty_load(&bs, NULL);
5822 
5823 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5824 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5825 
5826 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5827 		poll_threads();
5828 		CU_ASSERT(g_bserrno == 0);
5829 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5830 		blob = g_blob;
5831 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5832 
5833 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5834 		poll_threads();
5835 
5836 		if (g_bserrno == 0) {
5837 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5838 			snapshot = g_blob;
5839 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5840 			count = SPDK_COUNTOF(ids);
5841 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5842 			CU_ASSERT(rc == 0);
5843 			CU_ASSERT(count == 1);
5844 			CU_ASSERT(ids[0] == blobid);
5845 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
5846 			CU_ASSERT(rc != 0);
5847 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5848 
5849 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5850 			poll_threads();
5851 			CU_ASSERT(g_bserrno == 0);
5852 		} else {
5853 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5854 			/* Snapshot might have been left in unrecoverable state, so it does not open.
5855 			 * Yet delete might perform further changes to the clone after that.
5856 			 * This UT should test until snapshot is deleted and delete call succeeds. */
5857 			if (delete_snapshot_bserrno == 0) {
5858 				deleted = true;
5859 			}
5860 		}
5861 
5862 		spdk_blob_close(blob, blob_op_complete, NULL);
5863 		poll_threads();
5864 		CU_ASSERT(g_bserrno == 0);
5865 
5866 		spdk_bs_unload(bs, bs_op_complete, NULL);
5867 		poll_threads();
5868 		CU_ASSERT(g_bserrno == 0);
5869 
5870 		thresholds.general_threshold++;
5871 	}
5872 }
5873 
5874 static void
5875 blob_create_snapshot_power_failure(void)
5876 {
5877 	struct spdk_blob_store *bs = g_bs;
5878 	struct spdk_bs_dev *dev;
5879 	struct spdk_blob_opts opts;
5880 	struct spdk_blob *blob, *snapshot;
5881 	struct spdk_power_failure_thresholds thresholds = {};
5882 	spdk_blob_id blobid, snapshotid;
5883 	const void *value;
5884 	size_t value_len;
5885 	size_t count;
5886 	spdk_blob_id ids[3] = {};
5887 	int rc;
5888 	bool created = false;
5889 	int create_snapshot_bserrno = -1;
5890 
5891 	thresholds.general_threshold = 1;
5892 	while (!created) {
5893 		dev = init_dev();
5894 
5895 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5896 		poll_threads();
5897 		CU_ASSERT(g_bserrno == 0);
5898 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5899 		bs = g_bs;
5900 
5901 		/* Create blob */
5902 		ut_spdk_blob_opts_init(&opts);
5903 		opts.num_clusters = 10;
5904 
5905 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5906 		poll_threads();
5907 		CU_ASSERT(g_bserrno == 0);
5908 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5909 		blobid = g_blobid;
5910 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5911 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5912 
5913 		dev_set_power_failure_thresholds(thresholds);
5914 
5915 		/* Create snapshot */
5916 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5917 		poll_threads();
5918 		create_snapshot_bserrno = g_bserrno;
5919 		snapshotid = g_blobid;
5920 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5921 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5922 
5923 		/* Do not shut down cleanly. Assumption is that after create snapshot
5924 		 * reports success, both blobs should be power-fail safe. */
5925 		dev_reset_power_failure_event();
5926 		ut_bs_dirty_load(&bs, NULL);
5927 
5928 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5929 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5930 
5931 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5932 		poll_threads();
5933 		CU_ASSERT(g_bserrno == 0);
5934 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5935 		blob = g_blob;
5936 
5937 		if (snapshotid != SPDK_BLOBID_INVALID) {
5938 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5939 			poll_threads();
5940 		}
5941 
5942 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
5943 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5944 			snapshot = g_blob;
5945 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5946 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5947 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5948 			count = SPDK_COUNTOF(ids);
5949 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5950 			CU_ASSERT(rc == 0);
5951 			CU_ASSERT(count == 1);
5952 			CU_ASSERT(ids[0] == blobid);
5953 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
5954 			CU_ASSERT(rc != 0);
5955 
5956 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5957 			poll_threads();
5958 			CU_ASSERT(g_bserrno == 0);
5959 			if (create_snapshot_bserrno == 0) {
5960 				created = true;
5961 			}
5962 		} else {
5963 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5964 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
5965 		}
5966 
5967 		spdk_blob_close(blob, blob_op_complete, NULL);
5968 		poll_threads();
5969 		CU_ASSERT(g_bserrno == 0);
5970 
5971 		spdk_bs_unload(bs, bs_op_complete, NULL);
5972 		poll_threads();
5973 		CU_ASSERT(g_bserrno == 0);
5974 
5975 		thresholds.general_threshold++;
5976 	}
5977 }
5978 
5979 static void
5980 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5981 {
5982 	uint8_t payload_ff[64 * 512];
5983 	uint8_t payload_aa[64 * 512];
5984 	uint8_t payload_00[64 * 512];
5985 	uint8_t *cluster0, *cluster1;
5986 
5987 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5988 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5989 	memset(payload_00, 0x00, sizeof(payload_00));
5990 
5991 	/* Try to perform I/O with io unit = 512 */
5992 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
5993 	poll_threads();
5994 	CU_ASSERT(g_bserrno == 0);
5995 
5996 	/* If thin provisioned is set cluster should be allocated now */
5997 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
5998 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
5999 
6000 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6001 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6002 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6003 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6004 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6005 
6006 	/* Verify write with offset on first page */
6007 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6008 	poll_threads();
6009 	CU_ASSERT(g_bserrno == 0);
6010 
6011 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6012 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6013 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6014 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6015 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6016 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6017 
6018 	/* Verify write with offset on first page */
6019 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6020 	poll_threads();
6021 
6022 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6023 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6024 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6025 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6026 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6027 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6028 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6029 
6030 	/* Verify write with offset on second page */
6031 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6032 	poll_threads();
6033 
6034 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6035 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6036 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6037 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6038 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6039 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6040 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6041 
6042 	/* Verify write across multiple pages */
6043 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6044 	poll_threads();
6045 
6046 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6047 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6048 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6049 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6050 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6051 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6052 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6053 
6054 	/* Verify write across multiple clusters */
6055 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6056 	poll_threads();
6057 
6058 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6059 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6060 
6061 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6062 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6063 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6064 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6065 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6066 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6067 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6068 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6069 
6070 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6071 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6072 
6073 	/* Verify write to second cluster */
6074 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6075 	poll_threads();
6076 
6077 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6078 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6079 
6080 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6081 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6082 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6083 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6084 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6085 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6086 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6087 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6088 
6089 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6090 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6091 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6092 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6093 }
6094 
6095 static void
6096 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6097 {
6098 	uint8_t payload_read[64 * 512];
6099 	uint8_t payload_ff[64 * 512];
6100 	uint8_t payload_aa[64 * 512];
6101 	uint8_t payload_00[64 * 512];
6102 
6103 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6104 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6105 	memset(payload_00, 0x00, sizeof(payload_00));
6106 
6107 	/* Read only first io unit */
6108 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6109 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6110 	 * payload_read: F000 0000 | 0000 0000 ... */
6111 	memset(payload_read, 0x00, sizeof(payload_read));
6112 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6113 	poll_threads();
6114 	CU_ASSERT(g_bserrno == 0);
6115 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6116 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6117 
6118 	/* Read four io_units starting from offset = 2
6119 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6120 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6121 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6122 
6123 	memset(payload_read, 0x00, sizeof(payload_read));
6124 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6125 	poll_threads();
6126 	CU_ASSERT(g_bserrno == 0);
6127 
6128 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6129 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6130 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6131 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6132 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6133 
6134 	/* Read eight io_units across multiple pages
6135 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6136 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6137 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6138 	memset(payload_read, 0x00, sizeof(payload_read));
6139 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6140 	poll_threads();
6141 	CU_ASSERT(g_bserrno == 0);
6142 
6143 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6144 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6145 
6146 	/* Read eight io_units across multiple clusters
6147 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6148 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6149 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6150 	memset(payload_read, 0x00, sizeof(payload_read));
6151 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6152 	poll_threads();
6153 	CU_ASSERT(g_bserrno == 0);
6154 
6155 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6156 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6157 
6158 	/* Read four io_units from second cluster
6159 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6160 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6161 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6162 	memset(payload_read, 0x00, sizeof(payload_read));
6163 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6164 	poll_threads();
6165 	CU_ASSERT(g_bserrno == 0);
6166 
6167 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6168 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6169 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6170 
6171 	/* Read second cluster
6172 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6173 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6174 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6175 	memset(payload_read, 0x00, sizeof(payload_read));
6176 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6177 	poll_threads();
6178 	CU_ASSERT(g_bserrno == 0);
6179 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6180 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6181 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6182 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6183 
6184 	/* Read whole two clusters
6185 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6186 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6187 	memset(payload_read, 0x00, sizeof(payload_read));
6188 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6189 	poll_threads();
6190 	CU_ASSERT(g_bserrno == 0);
6191 
6192 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6193 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6194 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6195 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6196 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6197 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6198 
6199 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6200 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6201 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6202 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6203 }
6204 
6205 
6206 static void
6207 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6208 {
6209 	uint8_t payload_ff[64 * 512];
6210 	uint8_t payload_aa[64 * 512];
6211 	uint8_t payload_00[64 * 512];
6212 	uint8_t *cluster0, *cluster1;
6213 
6214 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6215 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6216 	memset(payload_00, 0x00, sizeof(payload_00));
6217 
6218 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6219 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6220 
6221 	/* Unmap */
6222 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6223 	poll_threads();
6224 
6225 	CU_ASSERT(g_bserrno == 0);
6226 
6227 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6228 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6229 }
6230 
6231 static void
6232 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6233 {
6234 	uint8_t payload_ff[64 * 512];
6235 	uint8_t payload_aa[64 * 512];
6236 	uint8_t payload_00[64 * 512];
6237 	uint8_t *cluster0, *cluster1;
6238 
6239 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6240 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6241 	memset(payload_00, 0x00, sizeof(payload_00));
6242 
6243 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6244 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6245 
6246 	/* Write zeroes  */
6247 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6248 	poll_threads();
6249 
6250 	CU_ASSERT(g_bserrno == 0);
6251 
6252 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6253 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6254 }
6255 
6256 static inline void
6257 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6258 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6259 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6260 {
6261 	if (io_opts) {
6262 		g_dev_writev_ext_called = false;
6263 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6264 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
6265 					io_opts);
6266 	} else {
6267 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6268 	}
6269 	poll_threads();
6270 	CU_ASSERT(g_bserrno == 0);
6271 	if (io_opts) {
6272 		CU_ASSERT(g_dev_writev_ext_called);
6273 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6274 	}
6275 }
6276 
6277 static void
6278 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6279 	       bool ext_api)
6280 {
6281 	uint8_t payload_ff[64 * 512];
6282 	uint8_t payload_aa[64 * 512];
6283 	uint8_t payload_00[64 * 512];
6284 	uint8_t *cluster0, *cluster1;
6285 	struct iovec iov[4];
6286 	struct spdk_blob_ext_io_opts ext_opts = {
6287 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6288 		.memory_domain_ctx = (void *)0xf00df00d,
6289 		.size = sizeof(struct spdk_blob_ext_io_opts),
6290 		.user_ctx = (void *)123,
6291 	};
6292 
6293 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6294 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6295 	memset(payload_00, 0x00, sizeof(payload_00));
6296 
6297 	/* Try to perform I/O with io unit = 512 */
6298 	iov[0].iov_base = payload_ff;
6299 	iov[0].iov_len = 1 * 512;
6300 
6301 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
6302 			    ext_api ? &ext_opts : NULL);
6303 
6304 	/* If thin provisioned is set cluster should be allocated now */
6305 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6306 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6307 
6308 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6309 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6310 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6311 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6312 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6313 
6314 	/* Verify write with offset on first page */
6315 	iov[0].iov_base = payload_ff;
6316 	iov[0].iov_len = 1 * 512;
6317 
6318 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
6319 			    ext_api ? &ext_opts : NULL);
6320 
6321 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6322 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6323 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6324 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6325 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6326 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6327 
6328 	/* Verify write with offset on first page */
6329 	iov[0].iov_base = payload_ff;
6330 	iov[0].iov_len = 4 * 512;
6331 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6332 	poll_threads();
6333 
6334 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6335 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6336 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6337 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6338 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6339 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6340 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6341 
6342 	/* Verify write with offset on second page */
6343 	iov[0].iov_base = payload_ff;
6344 	iov[0].iov_len = 4 * 512;
6345 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6346 	poll_threads();
6347 
6348 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6349 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6350 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6351 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6352 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6353 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6354 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6355 
6356 	/* Verify write across multiple pages */
6357 	iov[0].iov_base = payload_aa;
6358 	iov[0].iov_len = 8 * 512;
6359 
6360 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
6361 			    ext_api ? &ext_opts : NULL);
6362 
6363 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6364 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6365 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6366 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6367 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6368 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6369 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6370 
6371 	/* Verify write across multiple clusters */
6372 
6373 	iov[0].iov_base = payload_ff;
6374 	iov[0].iov_len = 8 * 512;
6375 
6376 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
6377 			    ext_api ? &ext_opts : NULL);
6378 
6379 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6380 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6381 
6382 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6383 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6384 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6385 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6386 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6387 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6388 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6389 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6390 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6391 
6392 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6393 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6394 
6395 	/* Verify write to second cluster */
6396 
6397 	iov[0].iov_base = payload_ff;
6398 	iov[0].iov_len = 2 * 512;
6399 
6400 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
6401 			    ext_api ? &ext_opts : NULL);
6402 
6403 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6404 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6405 
6406 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6407 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6408 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6409 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6410 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6411 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6412 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6413 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6414 
6415 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6416 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6417 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6418 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6419 }
6420 
6421 static inline void
6422 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6423 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6424 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6425 {
6426 	if (io_opts) {
6427 		g_dev_readv_ext_called = false;
6428 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6429 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
6430 	} else {
6431 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6432 	}
6433 	poll_threads();
6434 	CU_ASSERT(g_bserrno == 0);
6435 	if (io_opts) {
6436 		CU_ASSERT(g_dev_readv_ext_called);
6437 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6438 	}
6439 }
6440 
6441 static void
6442 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6443 	      bool ext_api)
6444 {
6445 	uint8_t payload_read[64 * 512];
6446 	uint8_t payload_ff[64 * 512];
6447 	uint8_t payload_aa[64 * 512];
6448 	uint8_t payload_00[64 * 512];
6449 	struct iovec iov[4];
6450 	struct spdk_blob_ext_io_opts ext_opts = {
6451 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6452 		.memory_domain_ctx = (void *)0xf00df00d,
6453 		.size = sizeof(struct spdk_blob_ext_io_opts),
6454 		.user_ctx = (void *)123,
6455 	};
6456 
6457 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6458 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6459 	memset(payload_00, 0x00, sizeof(payload_00));
6460 
6461 	/* Read only first io unit */
6462 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6463 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6464 	 * payload_read: F000 0000 | 0000 0000 ... */
6465 	memset(payload_read, 0x00, sizeof(payload_read));
6466 	iov[0].iov_base = payload_read;
6467 	iov[0].iov_len = 1 * 512;
6468 
6469 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6470 
6471 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6472 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6473 
6474 	/* Read four io_units starting from offset = 2
6475 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6476 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6477 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6478 
6479 	memset(payload_read, 0x00, sizeof(payload_read));
6480 	iov[0].iov_base = payload_read;
6481 	iov[0].iov_len = 4 * 512;
6482 
6483 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6484 
6485 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6486 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6487 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6488 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6489 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6490 
6491 	/* Read eight io_units across multiple pages
6492 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6493 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6494 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6495 	memset(payload_read, 0x00, sizeof(payload_read));
6496 	iov[0].iov_base = payload_read;
6497 	iov[0].iov_len = 4 * 512;
6498 	iov[1].iov_base = payload_read + 4 * 512;
6499 	iov[1].iov_len = 4 * 512;
6500 
6501 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6502 
6503 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6504 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6505 
6506 	/* Read eight io_units across multiple clusters
6507 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6508 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6509 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6510 	memset(payload_read, 0x00, sizeof(payload_read));
6511 	iov[0].iov_base = payload_read;
6512 	iov[0].iov_len = 2 * 512;
6513 	iov[1].iov_base = payload_read + 2 * 512;
6514 	iov[1].iov_len = 2 * 512;
6515 	iov[2].iov_base = payload_read + 4 * 512;
6516 	iov[2].iov_len = 2 * 512;
6517 	iov[3].iov_base = payload_read + 6 * 512;
6518 	iov[3].iov_len = 2 * 512;
6519 
6520 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
6521 			   ext_api ? &ext_opts : NULL);
6522 
6523 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6524 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6525 
6526 	/* Read four io_units from second cluster
6527 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6528 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6529 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6530 	memset(payload_read, 0x00, sizeof(payload_read));
6531 	iov[0].iov_base = payload_read;
6532 	iov[0].iov_len = 1 * 512;
6533 	iov[1].iov_base = payload_read + 1 * 512;
6534 	iov[1].iov_len = 3 * 512;
6535 
6536 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
6537 			   ext_api ? &ext_opts : NULL);
6538 
6539 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6540 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6541 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6542 
6543 	/* Read second cluster
6544 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6545 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6546 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6547 	memset(payload_read, 0x00, sizeof(payload_read));
6548 	iov[0].iov_base = payload_read;
6549 	iov[0].iov_len = 1 * 512;
6550 	iov[1].iov_base = payload_read + 1 * 512;
6551 	iov[1].iov_len = 2 * 512;
6552 	iov[2].iov_base = payload_read + 3 * 512;
6553 	iov[2].iov_len = 4 * 512;
6554 	iov[3].iov_base = payload_read + 7 * 512;
6555 	iov[3].iov_len = 25 * 512;
6556 
6557 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
6558 			   ext_api ? &ext_opts : NULL);
6559 
6560 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6561 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6562 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6563 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6564 
6565 	/* Read whole two clusters
6566 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6567 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6568 	memset(payload_read, 0x00, sizeof(payload_read));
6569 	iov[0].iov_base = payload_read;
6570 	iov[0].iov_len = 1 * 512;
6571 	iov[1].iov_base = payload_read + 1 * 512;
6572 	iov[1].iov_len = 8 * 512;
6573 	iov[2].iov_base = payload_read + 9 * 512;
6574 	iov[2].iov_len = 16 * 512;
6575 	iov[3].iov_base = payload_read + 25 * 512;
6576 	iov[3].iov_len = 39 * 512;
6577 
6578 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
6579 			   ext_api ? &ext_opts : NULL);
6580 
6581 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6582 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6583 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6584 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6585 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6586 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6587 
6588 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6589 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6590 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6591 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6592 }
6593 
6594 static void
6595 blob_io_unit(void)
6596 {
6597 	struct spdk_bs_opts bsopts;
6598 	struct spdk_blob_opts opts;
6599 	struct spdk_blob_store *bs;
6600 	struct spdk_bs_dev *dev;
6601 	struct spdk_blob *blob, *snapshot, *clone;
6602 	spdk_blob_id blobid;
6603 	struct spdk_io_channel *channel;
6604 
6605 	/* Create dev with 512 bytes io unit size */
6606 
6607 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6608 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6609 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6610 
6611 	/* Try to initialize a new blob store with unsupported io_unit */
6612 	dev = init_dev();
6613 	dev->blocklen = 512;
6614 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6615 
6616 	/* Initialize a new blob store */
6617 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6618 	poll_threads();
6619 	CU_ASSERT(g_bserrno == 0);
6620 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6621 	bs = g_bs;
6622 
6623 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6624 	channel = spdk_bs_alloc_io_channel(bs);
6625 
6626 	/* Create thick provisioned blob */
6627 	ut_spdk_blob_opts_init(&opts);
6628 	opts.thin_provision = false;
6629 	opts.num_clusters = 32;
6630 
6631 	blob = ut_blob_create_and_open(bs, &opts);
6632 	blobid = spdk_blob_get_id(blob);
6633 
6634 	test_io_write(dev, blob, channel);
6635 	test_io_read(dev, blob, channel);
6636 	test_io_zeroes(dev, blob, channel);
6637 
6638 	test_iov_write(dev, blob, channel, false);
6639 	test_iov_read(dev, blob, channel, false);
6640 	test_io_zeroes(dev, blob, channel);
6641 
6642 	test_iov_write(dev, blob, channel, true);
6643 	test_iov_read(dev, blob, channel, true);
6644 
6645 	test_io_unmap(dev, blob, channel);
6646 
6647 	spdk_blob_close(blob, blob_op_complete, NULL);
6648 	poll_threads();
6649 	CU_ASSERT(g_bserrno == 0);
6650 	blob = NULL;
6651 	g_blob = NULL;
6652 
6653 	/* Create thin provisioned blob */
6654 
6655 	ut_spdk_blob_opts_init(&opts);
6656 	opts.thin_provision = true;
6657 	opts.num_clusters = 32;
6658 
6659 	blob = ut_blob_create_and_open(bs, &opts);
6660 	blobid = spdk_blob_get_id(blob);
6661 
6662 	test_io_write(dev, blob, channel);
6663 	test_io_read(dev, blob, channel);
6664 	test_io_zeroes(dev, blob, channel);
6665 
6666 	test_iov_write(dev, blob, channel, false);
6667 	test_iov_read(dev, blob, channel, false);
6668 	test_io_zeroes(dev, blob, channel);
6669 
6670 	test_iov_write(dev, blob, channel, true);
6671 	test_iov_read(dev, blob, channel, true);
6672 
6673 	/* Create snapshot */
6674 
6675 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6676 	poll_threads();
6677 	CU_ASSERT(g_bserrno == 0);
6678 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6679 	blobid = g_blobid;
6680 
6681 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6682 	poll_threads();
6683 	CU_ASSERT(g_bserrno == 0);
6684 	CU_ASSERT(g_blob != NULL);
6685 	snapshot = g_blob;
6686 
6687 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6688 	poll_threads();
6689 	CU_ASSERT(g_bserrno == 0);
6690 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6691 	blobid = g_blobid;
6692 
6693 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6694 	poll_threads();
6695 	CU_ASSERT(g_bserrno == 0);
6696 	CU_ASSERT(g_blob != NULL);
6697 	clone = g_blob;
6698 
6699 	test_io_read(dev, blob, channel);
6700 	test_io_read(dev, snapshot, channel);
6701 	test_io_read(dev, clone, channel);
6702 
6703 	test_iov_read(dev, blob, channel, false);
6704 	test_iov_read(dev, snapshot, channel, false);
6705 	test_iov_read(dev, clone, channel, false);
6706 
6707 	test_iov_read(dev, blob, channel, true);
6708 	test_iov_read(dev, snapshot, channel, true);
6709 	test_iov_read(dev, clone, channel, true);
6710 
6711 	/* Inflate clone */
6712 
6713 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6714 	poll_threads();
6715 
6716 	CU_ASSERT(g_bserrno == 0);
6717 
6718 	test_io_read(dev, clone, channel);
6719 
6720 	test_io_unmap(dev, clone, channel);
6721 
6722 	test_iov_write(dev, clone, channel, false);
6723 	test_iov_read(dev, clone, channel, false);
6724 	test_io_unmap(dev, clone, channel);
6725 
6726 	test_iov_write(dev, clone, channel, true);
6727 	test_iov_read(dev, clone, channel, true);
6728 
6729 	spdk_blob_close(blob, blob_op_complete, NULL);
6730 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6731 	spdk_blob_close(clone, blob_op_complete, NULL);
6732 	poll_threads();
6733 	CU_ASSERT(g_bserrno == 0);
6734 	blob = NULL;
6735 	g_blob = NULL;
6736 
6737 	spdk_bs_free_io_channel(channel);
6738 	poll_threads();
6739 
6740 	/* Unload the blob store */
6741 	spdk_bs_unload(bs, bs_op_complete, NULL);
6742 	poll_threads();
6743 	CU_ASSERT(g_bserrno == 0);
6744 	g_bs = NULL;
6745 	g_blob = NULL;
6746 	g_blobid = 0;
6747 }
6748 
6749 static void
6750 blob_io_unit_compatibility(void)
6751 {
6752 	struct spdk_bs_opts bsopts;
6753 	struct spdk_blob_store *bs;
6754 	struct spdk_bs_dev *dev;
6755 	struct spdk_bs_super_block *super;
6756 
6757 	/* Create dev with 512 bytes io unit size */
6758 
6759 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6760 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6761 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6762 
6763 	/* Try to initialize a new blob store with unsupported io_unit */
6764 	dev = init_dev();
6765 	dev->blocklen = 512;
6766 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6767 
6768 	/* Initialize a new blob store */
6769 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6770 	poll_threads();
6771 	CU_ASSERT(g_bserrno == 0);
6772 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6773 	bs = g_bs;
6774 
6775 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6776 
6777 	/* Unload the blob store */
6778 	spdk_bs_unload(bs, bs_op_complete, NULL);
6779 	poll_threads();
6780 	CU_ASSERT(g_bserrno == 0);
6781 
6782 	/* Modify super block to behave like older version.
6783 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6784 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6785 	super->io_unit_size = 0;
6786 	super->crc = blob_md_page_calc_crc(super);
6787 
6788 	dev = init_dev();
6789 	dev->blocklen = 512;
6790 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6791 
6792 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6793 	poll_threads();
6794 	CU_ASSERT(g_bserrno == 0);
6795 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6796 	bs = g_bs;
6797 
6798 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6799 
6800 	/* Unload the blob store */
6801 	spdk_bs_unload(bs, bs_op_complete, NULL);
6802 	poll_threads();
6803 	CU_ASSERT(g_bserrno == 0);
6804 
6805 	g_bs = NULL;
6806 	g_blob = NULL;
6807 	g_blobid = 0;
6808 }
6809 
6810 static void
6811 first_sync_complete(void *cb_arg, int bserrno)
6812 {
6813 	struct spdk_blob *blob = cb_arg;
6814 	int rc;
6815 
6816 	CU_ASSERT(bserrno == 0);
6817 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
6818 	CU_ASSERT(rc == 0);
6819 	CU_ASSERT(g_bserrno == -1);
6820 
6821 	/* Keep g_bserrno at -1, only the
6822 	 * second sync completion should set it at 0. */
6823 }
6824 
6825 static void
6826 second_sync_complete(void *cb_arg, int bserrno)
6827 {
6828 	struct spdk_blob *blob = cb_arg;
6829 	const void *value;
6830 	size_t value_len;
6831 	int rc;
6832 
6833 	CU_ASSERT(bserrno == 0);
6834 
6835 	/* Verify that the first sync completion had a chance to execute */
6836 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
6837 	CU_ASSERT(rc == 0);
6838 	SPDK_CU_ASSERT_FATAL(value != NULL);
6839 	CU_ASSERT(value_len == strlen("second") + 1);
6840 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
6841 
6842 	CU_ASSERT(g_bserrno == -1);
6843 	g_bserrno = bserrno;
6844 }
6845 
6846 static void
6847 blob_simultaneous_operations(void)
6848 {
6849 	struct spdk_blob_store *bs = g_bs;
6850 	struct spdk_blob_opts opts;
6851 	struct spdk_blob *blob, *snapshot;
6852 	spdk_blob_id blobid, snapshotid;
6853 	struct spdk_io_channel *channel;
6854 	int rc;
6855 
6856 	channel = spdk_bs_alloc_io_channel(bs);
6857 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6858 
6859 	ut_spdk_blob_opts_init(&opts);
6860 	opts.num_clusters = 10;
6861 
6862 	blob = ut_blob_create_and_open(bs, &opts);
6863 	blobid = spdk_blob_get_id(blob);
6864 
6865 	/* Create snapshot and try to remove blob in the same time:
6866 	 * - snapshot should be created successfully
6867 	 * - delete operation should fail w -EBUSY */
6868 	CU_ASSERT(blob->locked_operation_in_progress == false);
6869 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6870 	CU_ASSERT(blob->locked_operation_in_progress == true);
6871 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6872 	CU_ASSERT(blob->locked_operation_in_progress == true);
6873 	/* Deletion failure */
6874 	CU_ASSERT(g_bserrno == -EBUSY);
6875 	poll_threads();
6876 	CU_ASSERT(blob->locked_operation_in_progress == false);
6877 	/* Snapshot creation success */
6878 	CU_ASSERT(g_bserrno == 0);
6879 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6880 
6881 	snapshotid = g_blobid;
6882 
6883 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6884 	poll_threads();
6885 	CU_ASSERT(g_bserrno == 0);
6886 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6887 	snapshot = g_blob;
6888 
6889 	/* Inflate blob and try to remove blob in the same time:
6890 	 * - blob should be inflated successfully
6891 	 * - delete operation should fail w -EBUSY */
6892 	CU_ASSERT(blob->locked_operation_in_progress == false);
6893 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6894 	CU_ASSERT(blob->locked_operation_in_progress == true);
6895 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6896 	CU_ASSERT(blob->locked_operation_in_progress == true);
6897 	/* Deletion failure */
6898 	CU_ASSERT(g_bserrno == -EBUSY);
6899 	poll_threads();
6900 	CU_ASSERT(blob->locked_operation_in_progress == false);
6901 	/* Inflation success */
6902 	CU_ASSERT(g_bserrno == 0);
6903 
6904 	/* Clone snapshot and try to remove snapshot in the same time:
6905 	 * - snapshot should be cloned successfully
6906 	 * - delete operation should fail w -EBUSY */
6907 	CU_ASSERT(blob->locked_operation_in_progress == false);
6908 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
6909 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6910 	/* Deletion failure */
6911 	CU_ASSERT(g_bserrno == -EBUSY);
6912 	poll_threads();
6913 	CU_ASSERT(blob->locked_operation_in_progress == false);
6914 	/* Clone created */
6915 	CU_ASSERT(g_bserrno == 0);
6916 
6917 	/* Resize blob and try to remove blob in the same time:
6918 	 * - blob should be resized successfully
6919 	 * - delete operation should fail w -EBUSY */
6920 	CU_ASSERT(blob->locked_operation_in_progress == false);
6921 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
6922 	CU_ASSERT(blob->locked_operation_in_progress == true);
6923 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6924 	CU_ASSERT(blob->locked_operation_in_progress == true);
6925 	/* Deletion failure */
6926 	CU_ASSERT(g_bserrno == -EBUSY);
6927 	poll_threads();
6928 	CU_ASSERT(blob->locked_operation_in_progress == false);
6929 	/* Blob resized successfully */
6930 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6931 	poll_threads();
6932 	CU_ASSERT(g_bserrno == 0);
6933 
6934 	/* Issue two consecutive blob syncs, neither should fail.
6935 	 * Force sync to actually occur by marking blob dirty each time.
6936 	 * Execution of sync should not be enough to complete the operation,
6937 	 * since disk I/O is required to complete it. */
6938 	g_bserrno = -1;
6939 
6940 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
6941 	CU_ASSERT(rc == 0);
6942 	spdk_blob_sync_md(blob, first_sync_complete, blob);
6943 	CU_ASSERT(g_bserrno == -1);
6944 
6945 	spdk_blob_sync_md(blob, second_sync_complete, blob);
6946 	CU_ASSERT(g_bserrno == -1);
6947 
6948 	poll_threads();
6949 	CU_ASSERT(g_bserrno == 0);
6950 
6951 	spdk_bs_free_io_channel(channel);
6952 	poll_threads();
6953 
6954 	ut_blob_close_and_delete(bs, snapshot);
6955 	ut_blob_close_and_delete(bs, blob);
6956 }
6957 
6958 static void
6959 blob_persist_test(void)
6960 {
6961 	struct spdk_blob_store *bs = g_bs;
6962 	struct spdk_blob_opts opts;
6963 	struct spdk_blob *blob;
6964 	spdk_blob_id blobid;
6965 	struct spdk_io_channel *channel;
6966 	char *xattr;
6967 	size_t xattr_length;
6968 	int rc;
6969 	uint32_t page_count_clear, page_count_xattr;
6970 	uint64_t poller_iterations;
6971 	bool run_poller;
6972 
6973 	channel = spdk_bs_alloc_io_channel(bs);
6974 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6975 
6976 	ut_spdk_blob_opts_init(&opts);
6977 	opts.num_clusters = 10;
6978 
6979 	blob = ut_blob_create_and_open(bs, &opts);
6980 	blobid = spdk_blob_get_id(blob);
6981 
6982 	/* Save the amount of md pages used after creation of a blob.
6983 	 * This should be consistent after removing xattr. */
6984 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
6985 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
6986 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
6987 
6988 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
6989 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
6990 		       strlen("large_xattr");
6991 	xattr = calloc(xattr_length, sizeof(char));
6992 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
6993 
6994 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6995 	SPDK_CU_ASSERT_FATAL(rc == 0);
6996 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6997 	poll_threads();
6998 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
6999 
7000 	/* Save the amount of md pages used after adding the large xattr */
7001 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7002 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7003 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7004 
7005 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7006 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7007 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7008 	poller_iterations = 1;
7009 	run_poller = true;
7010 	while (run_poller) {
7011 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7012 		SPDK_CU_ASSERT_FATAL(rc == 0);
7013 		g_bserrno = -1;
7014 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7015 		poll_thread_times(0, poller_iterations);
7016 		if (g_bserrno == 0) {
7017 			/* Poller iteration count was high enough for first sync to complete.
7018 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7019 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7020 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7021 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7022 			run_poller = false;
7023 		}
7024 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7025 		SPDK_CU_ASSERT_FATAL(rc == 0);
7026 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7027 		poll_threads();
7028 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7029 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7030 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7031 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7032 
7033 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7034 		spdk_blob_close(blob, blob_op_complete, NULL);
7035 		poll_threads();
7036 		CU_ASSERT(g_bserrno == 0);
7037 
7038 		ut_bs_reload(&bs, NULL);
7039 
7040 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7041 		poll_threads();
7042 		CU_ASSERT(g_bserrno == 0);
7043 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7044 		blob = g_blob;
7045 
7046 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7047 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7048 
7049 		poller_iterations++;
7050 		/* Stop at high iteration count to prevent infinite loop.
7051 		 * This value should be enough for first md sync to complete in any case. */
7052 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7053 	}
7054 
7055 	free(xattr);
7056 
7057 	ut_blob_close_and_delete(bs, blob);
7058 
7059 	spdk_bs_free_io_channel(channel);
7060 	poll_threads();
7061 }
7062 
7063 static void
7064 blob_decouple_snapshot(void)
7065 {
7066 	struct spdk_blob_store *bs = g_bs;
7067 	struct spdk_blob_opts opts;
7068 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7069 	struct spdk_io_channel *channel;
7070 	spdk_blob_id blobid, snapshotid;
7071 	uint64_t cluster;
7072 
7073 	channel = spdk_bs_alloc_io_channel(bs);
7074 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7075 
7076 	ut_spdk_blob_opts_init(&opts);
7077 	opts.num_clusters = 10;
7078 	opts.thin_provision = false;
7079 
7080 	blob = ut_blob_create_and_open(bs, &opts);
7081 	blobid = spdk_blob_get_id(blob);
7082 
7083 	/* Create first snapshot */
7084 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7085 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7086 	poll_threads();
7087 	CU_ASSERT(g_bserrno == 0);
7088 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7089 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7090 	snapshotid = g_blobid;
7091 
7092 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7093 	poll_threads();
7094 	CU_ASSERT(g_bserrno == 0);
7095 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7096 	snapshot1 = g_blob;
7097 
7098 	/* Create the second one */
7099 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7100 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7101 	poll_threads();
7102 	CU_ASSERT(g_bserrno == 0);
7103 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7104 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7105 	snapshotid = g_blobid;
7106 
7107 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7108 	poll_threads();
7109 	CU_ASSERT(g_bserrno == 0);
7110 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7111 	snapshot2 = g_blob;
7112 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7113 
7114 	/* Now decouple the second snapshot forcing it to copy the written clusters */
7115 	spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7116 	poll_threads();
7117 	CU_ASSERT(g_bserrno == 0);
7118 
7119 	/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7120 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7121 	for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7122 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7123 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7124 				    snapshot1->active.clusters[cluster]);
7125 	}
7126 
7127 	spdk_bs_free_io_channel(channel);
7128 
7129 	ut_blob_close_and_delete(bs, snapshot2);
7130 	ut_blob_close_and_delete(bs, snapshot1);
7131 	ut_blob_close_and_delete(bs, blob);
7132 	poll_threads();
7133 }
7134 
7135 static void
7136 suite_bs_setup(void)
7137 {
7138 	struct spdk_bs_dev *dev;
7139 
7140 	dev = init_dev();
7141 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7142 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
7143 	poll_threads();
7144 	CU_ASSERT(g_bserrno == 0);
7145 	CU_ASSERT(g_bs != NULL);
7146 }
7147 
7148 static void
7149 suite_bs_cleanup(void)
7150 {
7151 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
7152 	poll_threads();
7153 	CU_ASSERT(g_bserrno == 0);
7154 	g_bs = NULL;
7155 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7156 }
7157 
7158 static struct spdk_blob *
7159 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
7160 {
7161 	struct spdk_blob *blob;
7162 	struct spdk_blob_opts create_blob_opts;
7163 	spdk_blob_id blobid;
7164 
7165 	if (blob_opts == NULL) {
7166 		ut_spdk_blob_opts_init(&create_blob_opts);
7167 		blob_opts = &create_blob_opts;
7168 	}
7169 
7170 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
7171 	poll_threads();
7172 	CU_ASSERT(g_bserrno == 0);
7173 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7174 	blobid = g_blobid;
7175 	g_blobid = -1;
7176 
7177 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7178 	poll_threads();
7179 	CU_ASSERT(g_bserrno == 0);
7180 	CU_ASSERT(g_blob != NULL);
7181 	blob = g_blob;
7182 
7183 	g_blob = NULL;
7184 	g_bserrno = -1;
7185 
7186 	return blob;
7187 }
7188 
7189 static void
7190 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
7191 {
7192 	spdk_blob_id blobid = spdk_blob_get_id(blob);
7193 
7194 	spdk_blob_close(blob, blob_op_complete, NULL);
7195 	poll_threads();
7196 	CU_ASSERT(g_bserrno == 0);
7197 	g_blob = NULL;
7198 
7199 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7200 	poll_threads();
7201 	CU_ASSERT(g_bserrno == 0);
7202 	g_bserrno = -1;
7203 }
7204 
7205 static void
7206 suite_blob_setup(void)
7207 {
7208 	suite_bs_setup();
7209 	CU_ASSERT(g_bs != NULL);
7210 
7211 	g_blob = ut_blob_create_and_open(g_bs, NULL);
7212 	CU_ASSERT(g_blob != NULL);
7213 }
7214 
7215 static void
7216 suite_blob_cleanup(void)
7217 {
7218 	ut_blob_close_and_delete(g_bs, g_blob);
7219 	CU_ASSERT(g_blob == NULL);
7220 
7221 	suite_bs_cleanup();
7222 	CU_ASSERT(g_bs == NULL);
7223 }
7224 
7225 int main(int argc, char **argv)
7226 {
7227 	CU_pSuite	suite, suite_bs, suite_blob;
7228 	unsigned int	num_failures;
7229 
7230 	CU_set_error_action(CUEA_ABORT);
7231 	CU_initialize_registry();
7232 
7233 	suite = CU_add_suite("blob", NULL, NULL);
7234 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
7235 			suite_bs_setup, suite_bs_cleanup);
7236 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
7237 			suite_blob_setup, suite_blob_cleanup);
7238 
7239 	CU_ADD_TEST(suite, blob_init);
7240 	CU_ADD_TEST(suite_bs, blob_open);
7241 	CU_ADD_TEST(suite_bs, blob_create);
7242 	CU_ADD_TEST(suite_bs, blob_create_loop);
7243 	CU_ADD_TEST(suite_bs, blob_create_fail);
7244 	CU_ADD_TEST(suite_bs, blob_create_internal);
7245 	CU_ADD_TEST(suite_bs, blob_create_zero_extent);
7246 	CU_ADD_TEST(suite, blob_thin_provision);
7247 	CU_ADD_TEST(suite_bs, blob_snapshot);
7248 	CU_ADD_TEST(suite_bs, blob_clone);
7249 	CU_ADD_TEST(suite_bs, blob_inflate);
7250 	CU_ADD_TEST(suite_bs, blob_delete);
7251 	CU_ADD_TEST(suite_bs, blob_resize_test);
7252 	CU_ADD_TEST(suite, blob_read_only);
7253 	CU_ADD_TEST(suite_bs, channel_ops);
7254 	CU_ADD_TEST(suite_bs, blob_super);
7255 	CU_ADD_TEST(suite_blob, blob_write);
7256 	CU_ADD_TEST(suite_blob, blob_read);
7257 	CU_ADD_TEST(suite_blob, blob_rw_verify);
7258 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
7259 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
7260 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
7261 	CU_ADD_TEST(suite_bs, blob_unmap);
7262 	CU_ADD_TEST(suite_bs, blob_iter);
7263 	CU_ADD_TEST(suite_blob, blob_xattr);
7264 	CU_ADD_TEST(suite_bs, blob_parse_md);
7265 	CU_ADD_TEST(suite, bs_load);
7266 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
7267 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
7268 	CU_ADD_TEST(suite_bs, bs_unload);
7269 	CU_ADD_TEST(suite, bs_cluster_sz);
7270 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
7271 	CU_ADD_TEST(suite, bs_resize_md);
7272 	CU_ADD_TEST(suite, bs_destroy);
7273 	CU_ADD_TEST(suite, bs_type);
7274 	CU_ADD_TEST(suite, bs_super_block);
7275 	CU_ADD_TEST(suite, bs_test_recover_cluster_count);
7276 	CU_ADD_TEST(suite, blob_serialize_test);
7277 	CU_ADD_TEST(suite_bs, blob_crc);
7278 	CU_ADD_TEST(suite, super_block_crc);
7279 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
7280 	CU_ADD_TEST(suite_bs, blob_flags);
7281 	CU_ADD_TEST(suite_bs, bs_version);
7282 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
7283 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
7284 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
7285 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
7286 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
7287 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
7288 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
7289 	CU_ADD_TEST(suite, bs_load_iter_test);
7290 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
7291 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
7292 	CU_ADD_TEST(suite, blob_relations);
7293 	CU_ADD_TEST(suite, blob_relations2);
7294 	CU_ADD_TEST(suite, blob_relations3);
7295 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
7296 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
7297 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
7298 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
7299 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
7300 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
7301 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
7302 	CU_ADD_TEST(suite, blob_io_unit);
7303 	CU_ADD_TEST(suite, blob_io_unit_compatibility);
7304 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
7305 	CU_ADD_TEST(suite_bs, blob_persist_test);
7306 	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
7307 
7308 	allocate_threads(2);
7309 	set_thread(0);
7310 
7311 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
7312 
7313 	CU_basic_set_mode(CU_BRM_VERBOSE);
7314 	g_use_extent_table = false;
7315 	CU_basic_run_tests();
7316 	num_failures = CU_get_number_of_failures();
7317 	g_use_extent_table = true;
7318 	CU_basic_run_tests();
7319 	num_failures += CU_get_number_of_failures();
7320 	CU_cleanup_registry();
7321 
7322 	free(g_dev_buffer);
7323 
7324 	free_threads();
7325 
7326 	return num_failures;
7327 }
7328