xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 8d3947977640da882a3cdcc21a7575115b7e7787)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "thread/thread.c"
16 #include "ext_dev.c"
17 #include "blob/blobstore.c"
18 #include "blob/request.c"
19 #include "blob/zeroes.c"
20 #include "blob/blob_bs_dev.c"
21 #include "esnap_dev.c"
22 #define BLOCKLEN DEV_BUFFER_BLOCKLEN
23 
24 struct spdk_blob_store *g_bs;
25 spdk_blob_id g_blobid;
26 struct spdk_blob *g_blob, *g_blob2;
27 int g_bserrno, g_bserrno2;
28 struct spdk_xattr_names *g_names;
29 int g_done;
30 char *g_xattr_names[] = {"first", "second", "third"};
31 char *g_xattr_values[] = {"one", "two", "three"};
32 uint64_t g_ctx = 1729;
33 bool g_use_extent_table = false;
34 uint64_t g_copied_clusters_count = 0;
35 
36 struct spdk_bs_super_block_ver1 {
37 	uint8_t		signature[8];
38 	uint32_t        version;
39 	uint32_t        length;
40 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
41 	spdk_blob_id	super_blob;
42 
43 	uint32_t	cluster_size; /* In bytes */
44 
45 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
46 	uint32_t	used_page_mask_len; /* Count, in pages */
47 
48 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
49 	uint32_t	used_cluster_mask_len; /* Count, in pages */
50 
51 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
52 	uint32_t	md_len; /* Count, in pages */
53 
54 	uint8_t		reserved[4036];
55 	uint32_t	crc;
56 } __attribute__((packed));
57 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
58 
59 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
60 		struct spdk_blob_opts *blob_opts);
61 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
62 static void suite_blob_setup(void);
63 static void suite_blob_cleanup(void);
64 
65 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
66 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
67 		void *cpl_cb_arg), 0);
68 
69 static bool
70 is_esnap_clone(struct spdk_blob *_blob, const void *id, size_t id_len)
71 {
72 	const void *val = NULL;
73 	size_t len = 0;
74 	bool c0, c1, c2, c3;
75 
76 	CU_ASSERT(blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
77 				       true) == 0);
78 	CU_ASSERT((c0 = (len == id_len)));
79 	CU_ASSERT((c1 = (val != NULL && memcmp(val, id, len) == 0)));
80 	CU_ASSERT((c2 = !!(_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT)));
81 	CU_ASSERT((c3 = (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
82 
83 	return c0 && c1 && c2 && c3;
84 }
85 
86 static bool
87 is_not_esnap_clone(struct spdk_blob *_blob)
88 {
89 	const void *val = NULL;
90 	size_t len = 0;
91 	bool c1, c2, c3, c4;
92 
93 	CU_ASSERT((c1 = (blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
94 					      true) == -ENOENT)));
95 	CU_ASSERT((c2 = (val == NULL)));
96 	CU_ASSERT((c3 = ((_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT) == 0)));
97 	CU_ASSERT((c4 = (_blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
98 
99 	return c1 && c2 && c3 && c4;
100 }
101 
102 #define UT_ASSERT_IS_ESNAP_CLONE(_blob, _id, _len) CU_ASSERT(is_esnap_clone(_blob, _id, _len))
103 #define UT_ASSERT_IS_NOT_ESNAP_CLONE(_blob) CU_ASSERT(is_not_esnap_clone(_blob))
104 
105 static void
106 _get_xattr_value(void *arg, const char *name,
107 		 const void **value, size_t *value_len)
108 {
109 	uint64_t i;
110 
111 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
112 	SPDK_CU_ASSERT_FATAL(value != NULL);
113 	CU_ASSERT(arg == &g_ctx);
114 
115 	for (i = 0; i < sizeof(g_xattr_names); i++) {
116 		if (!strcmp(name, g_xattr_names[i])) {
117 			*value_len = strlen(g_xattr_values[i]);
118 			*value = g_xattr_values[i];
119 			break;
120 		}
121 	}
122 }
123 
124 static void
125 _get_xattr_value_null(void *arg, const char *name,
126 		      const void **value, size_t *value_len)
127 {
128 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
129 	SPDK_CU_ASSERT_FATAL(value != NULL);
130 	CU_ASSERT(arg == NULL);
131 
132 	*value_len = 0;
133 	*value = NULL;
134 }
135 
136 static int
137 _get_snapshots_count(struct spdk_blob_store *bs)
138 {
139 	struct spdk_blob_list *snapshot = NULL;
140 	int count = 0;
141 
142 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
143 		count += 1;
144 	}
145 
146 	return count;
147 }
148 
149 static void
150 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
151 {
152 	spdk_blob_opts_init(opts, sizeof(*opts));
153 	opts->use_extent_table = g_use_extent_table;
154 }
155 
156 static void
157 bs_op_complete(void *cb_arg, int bserrno)
158 {
159 	g_bserrno = bserrno;
160 }
161 
162 static void
163 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
164 			   int bserrno)
165 {
166 	g_bs = bs;
167 	g_bserrno = bserrno;
168 }
169 
170 static void
171 blob_op_complete(void *cb_arg, int bserrno)
172 {
173 	if (cb_arg != NULL) {
174 		int *errp = cb_arg;
175 
176 		*errp = bserrno;
177 	}
178 	g_bserrno = bserrno;
179 }
180 
181 static void
182 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
183 {
184 	g_blobid = blobid;
185 	g_bserrno = bserrno;
186 }
187 
188 static void
189 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
190 {
191 	g_blob = blb;
192 	g_bserrno = bserrno;
193 }
194 
195 static void
196 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
197 {
198 	if (g_blob == NULL) {
199 		g_blob = blob;
200 		g_bserrno = bserrno;
201 	} else {
202 		g_blob2 = blob;
203 		g_bserrno2 = bserrno;
204 	}
205 }
206 
207 static void
208 blob_shallow_copy_status_cb(uint64_t copied_clusters, void *cb_arg)
209 {
210 	g_copied_clusters_count = copied_clusters;
211 }
212 
213 static void
214 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
215 {
216 	struct spdk_bs_dev *dev;
217 
218 	/* Unload the blob store */
219 	spdk_bs_unload(*bs, bs_op_complete, NULL);
220 	poll_threads();
221 	CU_ASSERT(g_bserrno == 0);
222 
223 	dev = init_dev();
224 	/* Load an existing blob store */
225 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
226 	poll_threads();
227 	CU_ASSERT(g_bserrno == 0);
228 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
229 	*bs = g_bs;
230 
231 	g_bserrno = -1;
232 }
233 
234 static void
235 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
236 {
237 	struct spdk_bs_dev *dev;
238 
239 	/* Dirty shutdown */
240 	bs_free(*bs);
241 
242 	dev = init_dev();
243 	/* Load an existing blob store */
244 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
245 	poll_threads();
246 	CU_ASSERT(g_bserrno == 0);
247 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
248 	*bs = g_bs;
249 
250 	g_bserrno = -1;
251 }
252 
253 static void
254 blob_init(void)
255 {
256 	struct spdk_blob_store *bs;
257 	struct spdk_bs_dev *dev;
258 
259 	dev = init_dev();
260 
261 	/* should fail for an unsupported blocklen */
262 	dev->blocklen = 500;
263 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
264 	poll_threads();
265 	CU_ASSERT(g_bserrno == -EINVAL);
266 
267 	dev = init_dev();
268 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
269 	poll_threads();
270 	CU_ASSERT(g_bserrno == 0);
271 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
272 	bs = g_bs;
273 
274 	spdk_bs_unload(bs, bs_op_complete, NULL);
275 	poll_threads();
276 	CU_ASSERT(g_bserrno == 0);
277 	g_bs = NULL;
278 }
279 
280 static void
281 blob_super(void)
282 {
283 	struct spdk_blob_store *bs = g_bs;
284 	spdk_blob_id blobid;
285 	struct spdk_blob_opts blob_opts;
286 
287 	/* Get the super blob without having set one */
288 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
289 	poll_threads();
290 	CU_ASSERT(g_bserrno == -ENOENT);
291 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
292 
293 	/* Create a blob */
294 	ut_spdk_blob_opts_init(&blob_opts);
295 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
296 	poll_threads();
297 	CU_ASSERT(g_bserrno == 0);
298 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
299 	blobid = g_blobid;
300 
301 	/* Set the blob as the super blob */
302 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
303 	poll_threads();
304 	CU_ASSERT(g_bserrno == 0);
305 
306 	/* Get the super blob */
307 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
308 	poll_threads();
309 	CU_ASSERT(g_bserrno == 0);
310 	CU_ASSERT(blobid == g_blobid);
311 }
312 
313 static void
314 blob_open(void)
315 {
316 	struct spdk_blob_store *bs = g_bs;
317 	struct spdk_blob *blob;
318 	struct spdk_blob_opts blob_opts;
319 	spdk_blob_id blobid, blobid2;
320 
321 	ut_spdk_blob_opts_init(&blob_opts);
322 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
323 	poll_threads();
324 	CU_ASSERT(g_bserrno == 0);
325 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
326 	blobid = g_blobid;
327 
328 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
329 	poll_threads();
330 	CU_ASSERT(g_bserrno == 0);
331 	CU_ASSERT(g_blob != NULL);
332 	blob = g_blob;
333 
334 	blobid2 = spdk_blob_get_id(blob);
335 	CU_ASSERT(blobid == blobid2);
336 
337 	/* Try to open file again.  It should return success. */
338 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
339 	poll_threads();
340 	CU_ASSERT(g_bserrno == 0);
341 	CU_ASSERT(blob == g_blob);
342 
343 	spdk_blob_close(blob, blob_op_complete, NULL);
344 	poll_threads();
345 	CU_ASSERT(g_bserrno == 0);
346 
347 	/*
348 	 * Close the file a second time, releasing the second reference.  This
349 	 *  should succeed.
350 	 */
351 	blob = g_blob;
352 	spdk_blob_close(blob, blob_op_complete, NULL);
353 	poll_threads();
354 	CU_ASSERT(g_bserrno == 0);
355 
356 	/*
357 	 * Try to open file again.  It should succeed.  This tests the case
358 	 *  where the file is opened, closed, then re-opened again.
359 	 */
360 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
361 	poll_threads();
362 	CU_ASSERT(g_bserrno == 0);
363 	CU_ASSERT(g_blob != NULL);
364 	blob = g_blob;
365 	spdk_blob_close(blob, blob_op_complete, NULL);
366 	poll_threads();
367 	CU_ASSERT(g_bserrno == 0);
368 
369 	/* Try to open file twice in succession.  This should return the same
370 	 * blob object.
371 	 */
372 	g_blob = NULL;
373 	g_blob2 = NULL;
374 	g_bserrno = -1;
375 	g_bserrno2 = -1;
376 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
377 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 	CU_ASSERT(g_bserrno2 == 0);
381 	CU_ASSERT(g_blob != NULL);
382 	CU_ASSERT(g_blob2 != NULL);
383 	CU_ASSERT(g_blob == g_blob2);
384 
385 	g_bserrno = -1;
386 	spdk_blob_close(g_blob, blob_op_complete, NULL);
387 	poll_threads();
388 	CU_ASSERT(g_bserrno == 0);
389 
390 	ut_blob_close_and_delete(bs, g_blob);
391 }
392 
393 static void
394 blob_create(void)
395 {
396 	struct spdk_blob_store *bs = g_bs;
397 	struct spdk_blob *blob;
398 	struct spdk_blob_opts opts;
399 	spdk_blob_id blobid;
400 
401 	/* Create blob with 10 clusters */
402 
403 	ut_spdk_blob_opts_init(&opts);
404 	opts.num_clusters = 10;
405 
406 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
407 	poll_threads();
408 	CU_ASSERT(g_bserrno == 0);
409 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
410 	blobid = g_blobid;
411 
412 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
413 	poll_threads();
414 	CU_ASSERT(g_bserrno == 0);
415 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
416 	blob = g_blob;
417 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
418 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
419 
420 	spdk_blob_close(blob, blob_op_complete, NULL);
421 	poll_threads();
422 	CU_ASSERT(g_bserrno == 0);
423 
424 	/* Create blob with 0 clusters */
425 
426 	ut_spdk_blob_opts_init(&opts);
427 	opts.num_clusters = 0;
428 
429 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
430 	poll_threads();
431 	CU_ASSERT(g_bserrno == 0);
432 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
433 	blobid = g_blobid;
434 
435 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
436 	poll_threads();
437 	CU_ASSERT(g_bserrno == 0);
438 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
439 	blob = g_blob;
440 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
441 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
442 
443 	spdk_blob_close(blob, blob_op_complete, NULL);
444 	poll_threads();
445 	CU_ASSERT(g_bserrno == 0);
446 
447 	/* Create blob with default options (opts == NULL) */
448 
449 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
450 	poll_threads();
451 	CU_ASSERT(g_bserrno == 0);
452 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
453 	blobid = g_blobid;
454 
455 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
456 	poll_threads();
457 	CU_ASSERT(g_bserrno == 0);
458 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
459 	blob = g_blob;
460 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
461 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
462 
463 	spdk_blob_close(blob, blob_op_complete, NULL);
464 	poll_threads();
465 	CU_ASSERT(g_bserrno == 0);
466 
467 	/* Try to create blob with size larger than blobstore */
468 
469 	ut_spdk_blob_opts_init(&opts);
470 	opts.num_clusters = bs->total_clusters + 1;
471 
472 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
473 	poll_threads();
474 	CU_ASSERT(g_bserrno == -ENOSPC);
475 }
476 
477 static void
478 blob_create_zero_extent(void)
479 {
480 	struct spdk_blob_store *bs = g_bs;
481 	struct spdk_blob *blob;
482 	spdk_blob_id blobid;
483 
484 	/* Create blob with default options (opts == NULL) */
485 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
486 	poll_threads();
487 	CU_ASSERT(g_bserrno == 0);
488 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
489 	blobid = g_blobid;
490 
491 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
492 	poll_threads();
493 	CU_ASSERT(g_bserrno == 0);
494 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
495 	blob = g_blob;
496 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
497 	CU_ASSERT(blob->extent_table_found == true);
498 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
499 	CU_ASSERT(blob->active.extent_pages == NULL);
500 
501 	spdk_blob_close(blob, blob_op_complete, NULL);
502 	poll_threads();
503 	CU_ASSERT(g_bserrno == 0);
504 
505 	/* Create blob with NULL internal options  */
506 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
507 	poll_threads();
508 	CU_ASSERT(g_bserrno == 0);
509 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
510 	blobid = g_blobid;
511 
512 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
513 	poll_threads();
514 	CU_ASSERT(g_bserrno == 0);
515 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
516 	blob = g_blob;
517 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
518 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
519 	CU_ASSERT(blob->extent_table_found == true);
520 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
521 	CU_ASSERT(blob->active.extent_pages == NULL);
522 
523 	spdk_blob_close(blob, blob_op_complete, NULL);
524 	poll_threads();
525 	CU_ASSERT(g_bserrno == 0);
526 }
527 
528 /*
529  * Create and delete one blob in a loop over and over again.  This helps ensure
530  * that the internal bit masks tracking used clusters and md_pages are being
531  * tracked correctly.
532  */
533 static void
534 blob_create_loop(void)
535 {
536 	struct spdk_blob_store *bs = g_bs;
537 	struct spdk_blob_opts opts;
538 	uint32_t i, loop_count;
539 
540 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
541 				  spdk_bit_pool_capacity(bs->used_clusters));
542 
543 	for (i = 0; i < loop_count; i++) {
544 		ut_spdk_blob_opts_init(&opts);
545 		opts.num_clusters = 1;
546 		g_bserrno = -1;
547 		g_blobid = SPDK_BLOBID_INVALID;
548 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
549 		poll_threads();
550 		CU_ASSERT(g_bserrno == 0);
551 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
552 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
553 		poll_threads();
554 		CU_ASSERT(g_bserrno == 0);
555 	}
556 }
557 
558 static void
559 blob_create_fail(void)
560 {
561 	struct spdk_blob_store *bs = g_bs;
562 	struct spdk_blob_opts opts;
563 	spdk_blob_id blobid;
564 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
565 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
566 
567 	/* NULL callback */
568 	ut_spdk_blob_opts_init(&opts);
569 	opts.xattrs.names = g_xattr_names;
570 	opts.xattrs.get_value = NULL;
571 	opts.xattrs.count = 1;
572 	opts.xattrs.ctx = &g_ctx;
573 
574 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
575 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
576 	poll_threads();
577 	CU_ASSERT(g_bserrno == -EINVAL);
578 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
579 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
580 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
581 
582 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
583 	poll_threads();
584 	CU_ASSERT(g_bserrno == -ENOENT);
585 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
586 
587 	ut_bs_reload(&bs, NULL);
588 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
589 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
590 
591 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
592 	poll_threads();
593 	CU_ASSERT(g_blob == NULL);
594 	CU_ASSERT(g_bserrno == -ENOENT);
595 }
596 
597 static void
598 blob_create_internal(void)
599 {
600 	struct spdk_blob_store *bs = g_bs;
601 	struct spdk_blob *blob;
602 	struct spdk_blob_opts opts;
603 	struct spdk_blob_xattr_opts internal_xattrs;
604 	const void *value;
605 	size_t value_len;
606 	spdk_blob_id blobid;
607 	int rc;
608 
609 	/* Create blob with custom xattrs */
610 
611 	ut_spdk_blob_opts_init(&opts);
612 	blob_xattrs_init(&internal_xattrs);
613 	internal_xattrs.count = 3;
614 	internal_xattrs.names = g_xattr_names;
615 	internal_xattrs.get_value = _get_xattr_value;
616 	internal_xattrs.ctx = &g_ctx;
617 
618 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
619 	poll_threads();
620 	CU_ASSERT(g_bserrno == 0);
621 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
622 	blobid = g_blobid;
623 
624 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
625 	poll_threads();
626 	CU_ASSERT(g_bserrno == 0);
627 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
628 	blob = g_blob;
629 
630 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
631 	CU_ASSERT(rc == 0);
632 	SPDK_CU_ASSERT_FATAL(value != NULL);
633 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
634 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
635 
636 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
637 	CU_ASSERT(rc == 0);
638 	SPDK_CU_ASSERT_FATAL(value != NULL);
639 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
640 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
641 
642 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
643 	CU_ASSERT(rc == 0);
644 	SPDK_CU_ASSERT_FATAL(value != NULL);
645 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
646 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
647 
648 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
649 	CU_ASSERT(rc != 0);
650 
651 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
652 	CU_ASSERT(rc != 0);
653 
654 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
655 	CU_ASSERT(rc != 0);
656 
657 	spdk_blob_close(blob, blob_op_complete, NULL);
658 	poll_threads();
659 	CU_ASSERT(g_bserrno == 0);
660 
661 	/* Create blob with NULL internal options  */
662 
663 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
664 	poll_threads();
665 	CU_ASSERT(g_bserrno == 0);
666 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
667 	blobid = g_blobid;
668 
669 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
670 	poll_threads();
671 	CU_ASSERT(g_bserrno == 0);
672 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
673 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
674 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
675 
676 	blob = g_blob;
677 
678 	spdk_blob_close(blob, blob_op_complete, NULL);
679 	poll_threads();
680 	CU_ASSERT(g_bserrno == 0);
681 }
682 
683 static void
684 blob_thin_provision(void)
685 {
686 	struct spdk_blob_store *bs;
687 	struct spdk_bs_dev *dev;
688 	struct spdk_blob *blob;
689 	struct spdk_blob_opts opts;
690 	struct spdk_bs_opts bs_opts;
691 	spdk_blob_id blobid;
692 
693 	dev = init_dev();
694 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
695 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
696 
697 	/* Initialize a new blob store */
698 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
699 	poll_threads();
700 	CU_ASSERT(g_bserrno == 0);
701 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
702 
703 	bs = g_bs;
704 
705 	/* Create blob with thin provisioning enabled */
706 
707 	ut_spdk_blob_opts_init(&opts);
708 	opts.thin_provision = true;
709 	opts.num_clusters = 10;
710 
711 	blob = ut_blob_create_and_open(bs, &opts);
712 	blobid = spdk_blob_get_id(blob);
713 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
714 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
715 	/* In thin provisioning with num_clusters is set, if not using the
716 	 * extent table, there is no allocation. If extent table is used,
717 	 * there is related allocation happened. */
718 	if (blob->extent_table_found == true) {
719 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
720 		CU_ASSERT(blob->active.extent_pages != NULL);
721 	} else {
722 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
723 		CU_ASSERT(blob->active.extent_pages == NULL);
724 	}
725 
726 	spdk_blob_close(blob, blob_op_complete, NULL);
727 	CU_ASSERT(g_bserrno == 0);
728 
729 	/* Do not shut down cleanly.  This makes sure that when we load again
730 	 *  and try to recover a valid used_cluster map, that blobstore will
731 	 *  ignore clusters with index 0 since these are unallocated clusters.
732 	 */
733 	ut_bs_dirty_load(&bs, &bs_opts);
734 
735 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
736 	poll_threads();
737 	CU_ASSERT(g_bserrno == 0);
738 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
739 	blob = g_blob;
740 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
741 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
742 
743 	ut_blob_close_and_delete(bs, blob);
744 
745 	spdk_bs_unload(bs, bs_op_complete, NULL);
746 	poll_threads();
747 	CU_ASSERT(g_bserrno == 0);
748 	g_bs = NULL;
749 }
750 
751 static void
752 blob_snapshot(void)
753 {
754 	struct spdk_blob_store *bs = g_bs;
755 	struct spdk_blob *blob;
756 	struct spdk_blob *snapshot, *snapshot2;
757 	struct spdk_blob_bs_dev *blob_bs_dev;
758 	struct spdk_blob_opts opts;
759 	struct spdk_blob_xattr_opts xattrs;
760 	spdk_blob_id blobid;
761 	spdk_blob_id snapshotid;
762 	spdk_blob_id snapshotid2;
763 	const void *value;
764 	size_t value_len;
765 	int rc;
766 	spdk_blob_id ids[2];
767 	size_t count;
768 
769 	/* Create blob with 10 clusters */
770 	ut_spdk_blob_opts_init(&opts);
771 	opts.num_clusters = 10;
772 
773 	blob = ut_blob_create_and_open(bs, &opts);
774 	blobid = spdk_blob_get_id(blob);
775 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
776 
777 	/* Create snapshot from blob */
778 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
779 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
780 	poll_threads();
781 	CU_ASSERT(g_bserrno == 0);
782 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
783 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
784 	snapshotid = g_blobid;
785 
786 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
787 	poll_threads();
788 	CU_ASSERT(g_bserrno == 0);
789 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
790 	snapshot = g_blob;
791 	CU_ASSERT(snapshot->data_ro == true);
792 	CU_ASSERT(snapshot->md_ro == true);
793 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
794 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
795 
796 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
797 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
798 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
799 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
800 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
801 
802 	/* Try to create snapshot from clone with xattrs */
803 	xattrs.names = g_xattr_names;
804 	xattrs.get_value = _get_xattr_value;
805 	xattrs.count = 3;
806 	xattrs.ctx = &g_ctx;
807 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
808 	poll_threads();
809 	CU_ASSERT(g_bserrno == 0);
810 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
811 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
812 	snapshotid2 = g_blobid;
813 
814 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
815 	CU_ASSERT(g_bserrno == 0);
816 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
817 	snapshot2 = g_blob;
818 	CU_ASSERT(snapshot2->data_ro == true);
819 	CU_ASSERT(snapshot2->md_ro == true);
820 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
821 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
822 
823 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
824 	CU_ASSERT(snapshot->back_bs_dev == NULL);
825 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
826 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
827 
828 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
829 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
830 
831 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
832 	CU_ASSERT(blob_bs_dev->blob == snapshot);
833 
834 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
835 	CU_ASSERT(rc == 0);
836 	SPDK_CU_ASSERT_FATAL(value != NULL);
837 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
838 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
839 
840 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
841 	CU_ASSERT(rc == 0);
842 	SPDK_CU_ASSERT_FATAL(value != NULL);
843 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
844 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
845 
846 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
847 	CU_ASSERT(rc == 0);
848 	SPDK_CU_ASSERT_FATAL(value != NULL);
849 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
850 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
851 
852 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
853 	count = 2;
854 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
855 	CU_ASSERT(count == 1);
856 	CU_ASSERT(ids[0] == blobid);
857 
858 	count = 2;
859 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
860 	CU_ASSERT(count == 1);
861 	CU_ASSERT(ids[0] == snapshotid2);
862 
863 	/* Try to create snapshot from snapshot */
864 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
865 	poll_threads();
866 	CU_ASSERT(g_bserrno == -EINVAL);
867 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
868 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
869 
870 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
871 	ut_blob_close_and_delete(bs, blob);
872 	count = 2;
873 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
874 	CU_ASSERT(count == 0);
875 
876 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
877 	ut_blob_close_and_delete(bs, snapshot2);
878 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
879 	count = 2;
880 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
881 	CU_ASSERT(count == 0);
882 
883 	ut_blob_close_and_delete(bs, snapshot);
884 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
885 }
886 
887 static void
888 blob_snapshot_freeze_io(void)
889 {
890 	struct spdk_io_channel *channel;
891 	struct spdk_bs_channel *bs_channel;
892 	struct spdk_blob_store *bs = g_bs;
893 	struct spdk_blob *blob;
894 	struct spdk_blob_opts opts;
895 	spdk_blob_id blobid;
896 	uint32_t num_of_pages = 10;
897 	uint8_t payload_read[num_of_pages * BLOCKLEN];
898 	uint8_t payload_write[num_of_pages * BLOCKLEN];
899 	uint8_t payload_zero[num_of_pages * BLOCKLEN];
900 
901 	memset(payload_write, 0xE5, sizeof(payload_write));
902 	memset(payload_read, 0x00, sizeof(payload_read));
903 	memset(payload_zero, 0x00, sizeof(payload_zero));
904 
905 	/* Test freeze I/O during snapshot */
906 	channel = spdk_bs_alloc_io_channel(bs);
907 	bs_channel = spdk_io_channel_get_ctx(channel);
908 
909 	/* Create blob with 10 clusters */
910 	ut_spdk_blob_opts_init(&opts);
911 	opts.num_clusters = 10;
912 	opts.thin_provision = false;
913 
914 	blob = ut_blob_create_and_open(bs, &opts);
915 	blobid = spdk_blob_get_id(blob);
916 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
917 
918 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
919 
920 	/* This is implementation specific.
921 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
922 	 * Four async I/O operations happen before that. */
923 	poll_thread_times(0, 5);
924 
925 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
926 
927 	/* Blob I/O should be frozen here */
928 	CU_ASSERT(blob->frozen_refcnt == 1);
929 
930 	/* Write to the blob */
931 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
932 
933 	/* Verify that I/O is queued */
934 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
935 	/* Verify that payload is not written to disk, at this point the blobs already switched */
936 	CU_ASSERT(blob->active.clusters[0] == 0);
937 
938 	/* Finish all operations including spdk_bs_create_snapshot */
939 	poll_threads();
940 
941 	/* Verify snapshot */
942 	CU_ASSERT(g_bserrno == 0);
943 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
944 
945 	/* Verify that blob has unset frozen_io */
946 	CU_ASSERT(blob->frozen_refcnt == 0);
947 
948 	/* Verify that postponed I/O completed successfully by comparing payload */
949 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
950 	poll_threads();
951 	CU_ASSERT(g_bserrno == 0);
952 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * BLOCKLEN) == 0);
953 
954 	spdk_bs_free_io_channel(channel);
955 	poll_threads();
956 
957 	ut_blob_close_and_delete(bs, blob);
958 }
959 
960 static void
961 blob_clone(void)
962 {
963 	struct spdk_blob_store *bs = g_bs;
964 	struct spdk_blob_opts opts;
965 	struct spdk_blob *blob, *snapshot, *clone;
966 	spdk_blob_id blobid, cloneid, snapshotid;
967 	struct spdk_blob_xattr_opts xattrs;
968 	const void *value;
969 	size_t value_len;
970 	int rc;
971 
972 	/* Create blob with 10 clusters */
973 
974 	ut_spdk_blob_opts_init(&opts);
975 	opts.num_clusters = 10;
976 
977 	blob = ut_blob_create_and_open(bs, &opts);
978 	blobid = spdk_blob_get_id(blob);
979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
980 
981 	/* Create snapshot */
982 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
983 	poll_threads();
984 	CU_ASSERT(g_bserrno == 0);
985 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
986 	snapshotid = g_blobid;
987 
988 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
989 	poll_threads();
990 	CU_ASSERT(g_bserrno == 0);
991 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
992 	snapshot = g_blob;
993 	CU_ASSERT(snapshot->data_ro == true);
994 	CU_ASSERT(snapshot->md_ro == true);
995 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
996 
997 	spdk_blob_close(snapshot, blob_op_complete, NULL);
998 	poll_threads();
999 	CU_ASSERT(g_bserrno == 0);
1000 
1001 	/* Create clone from snapshot with xattrs */
1002 	xattrs.names = g_xattr_names;
1003 	xattrs.get_value = _get_xattr_value;
1004 	xattrs.count = 3;
1005 	xattrs.ctx = &g_ctx;
1006 
1007 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
1008 	poll_threads();
1009 	CU_ASSERT(g_bserrno == 0);
1010 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1011 	cloneid = g_blobid;
1012 
1013 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1014 	poll_threads();
1015 	CU_ASSERT(g_bserrno == 0);
1016 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1017 	clone = g_blob;
1018 	CU_ASSERT(clone->data_ro == false);
1019 	CU_ASSERT(clone->md_ro == false);
1020 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1021 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(clone) == 0);
1022 
1023 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
1024 	CU_ASSERT(rc == 0);
1025 	SPDK_CU_ASSERT_FATAL(value != NULL);
1026 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
1027 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
1028 
1029 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
1030 	CU_ASSERT(rc == 0);
1031 	SPDK_CU_ASSERT_FATAL(value != NULL);
1032 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1033 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1034 
1035 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1036 	CU_ASSERT(rc == 0);
1037 	SPDK_CU_ASSERT_FATAL(value != NULL);
1038 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1039 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1040 
1041 
1042 	spdk_blob_close(clone, blob_op_complete, NULL);
1043 	poll_threads();
1044 	CU_ASSERT(g_bserrno == 0);
1045 
1046 	/* Try to create clone from not read only blob */
1047 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1048 	poll_threads();
1049 	CU_ASSERT(g_bserrno == -EINVAL);
1050 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1051 
1052 	/* Mark blob as read only */
1053 	spdk_blob_set_read_only(blob);
1054 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1055 	poll_threads();
1056 	CU_ASSERT(g_bserrno == 0);
1057 
1058 	/* Create clone from read only blob */
1059 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1060 	poll_threads();
1061 	CU_ASSERT(g_bserrno == 0);
1062 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1063 	cloneid = g_blobid;
1064 
1065 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1066 	poll_threads();
1067 	CU_ASSERT(g_bserrno == 0);
1068 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1069 	clone = g_blob;
1070 	CU_ASSERT(clone->data_ro == false);
1071 	CU_ASSERT(clone->md_ro == false);
1072 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1073 
1074 	ut_blob_close_and_delete(bs, clone);
1075 	ut_blob_close_and_delete(bs, blob);
1076 }
1077 
1078 static void
1079 _blob_inflate(bool decouple_parent)
1080 {
1081 	struct spdk_blob_store *bs = g_bs;
1082 	struct spdk_blob_opts opts;
1083 	struct spdk_blob *blob, *snapshot;
1084 	spdk_blob_id blobid, snapshotid;
1085 	struct spdk_io_channel *channel;
1086 	uint64_t free_clusters;
1087 
1088 	channel = spdk_bs_alloc_io_channel(bs);
1089 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1090 
1091 	/* Create blob with 10 clusters */
1092 
1093 	ut_spdk_blob_opts_init(&opts);
1094 	opts.num_clusters = 10;
1095 	opts.thin_provision = true;
1096 
1097 	blob = ut_blob_create_and_open(bs, &opts);
1098 	blobid = spdk_blob_get_id(blob);
1099 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1100 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1101 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1102 
1103 	/* 1) Blob with no parent */
1104 	if (decouple_parent) {
1105 		/* Decouple parent of blob with no parent (should fail) */
1106 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1107 		poll_threads();
1108 		CU_ASSERT(g_bserrno != 0);
1109 	} else {
1110 		/* Inflate of thin blob with no parent should made it thick */
1111 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1112 		poll_threads();
1113 		CU_ASSERT(g_bserrno == 0);
1114 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1115 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1116 	}
1117 
1118 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1119 	poll_threads();
1120 	CU_ASSERT(g_bserrno == 0);
1121 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1122 	snapshotid = g_blobid;
1123 
1124 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1125 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1126 
1127 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1128 	poll_threads();
1129 	CU_ASSERT(g_bserrno == 0);
1130 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1131 	snapshot = g_blob;
1132 	CU_ASSERT(snapshot->data_ro == true);
1133 	CU_ASSERT(snapshot->md_ro == true);
1134 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1135 
1136 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1137 	poll_threads();
1138 	CU_ASSERT(g_bserrno == 0);
1139 
1140 	free_clusters = spdk_bs_free_cluster_count(bs);
1141 
1142 	/* 2) Blob with parent */
1143 	if (!decouple_parent) {
1144 		/* Do full blob inflation */
1145 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1146 		poll_threads();
1147 		CU_ASSERT(g_bserrno == 0);
1148 		/* all 10 clusters should be allocated */
1149 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1150 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1151 	} else {
1152 		/* Decouple parent of blob */
1153 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1154 		poll_threads();
1155 		CU_ASSERT(g_bserrno == 0);
1156 		/* when only parent is removed, none of the clusters should be allocated */
1157 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1158 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1159 	}
1160 
1161 	/* Now, it should be possible to delete snapshot */
1162 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1163 	poll_threads();
1164 	CU_ASSERT(g_bserrno == 0);
1165 
1166 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1167 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1168 
1169 	spdk_bs_free_io_channel(channel);
1170 	poll_threads();
1171 
1172 	ut_blob_close_and_delete(bs, blob);
1173 }
1174 
1175 static void
1176 blob_inflate(void)
1177 {
1178 	_blob_inflate(false);
1179 	_blob_inflate(true);
1180 }
1181 
1182 static void
1183 blob_delete(void)
1184 {
1185 	struct spdk_blob_store *bs = g_bs;
1186 	struct spdk_blob_opts blob_opts;
1187 	spdk_blob_id blobid;
1188 
1189 	/* Create a blob and then delete it. */
1190 	ut_spdk_blob_opts_init(&blob_opts);
1191 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1192 	poll_threads();
1193 	CU_ASSERT(g_bserrno == 0);
1194 	CU_ASSERT(g_blobid > 0);
1195 	blobid = g_blobid;
1196 
1197 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1198 	poll_threads();
1199 	CU_ASSERT(g_bserrno == 0);
1200 
1201 	/* Try to open the blob */
1202 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1203 	poll_threads();
1204 	CU_ASSERT(g_bserrno == -ENOENT);
1205 }
1206 
1207 static void
1208 blob_resize_test(void)
1209 {
1210 	struct spdk_blob_store *bs = g_bs;
1211 	struct spdk_blob *blob;
1212 	uint64_t free_clusters;
1213 
1214 	free_clusters = spdk_bs_free_cluster_count(bs);
1215 
1216 	blob = ut_blob_create_and_open(bs, NULL);
1217 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1218 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1219 
1220 	/* Confirm that resize fails if blob is marked read-only. */
1221 	blob->md_ro = true;
1222 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1223 	poll_threads();
1224 	CU_ASSERT(g_bserrno == -EPERM);
1225 	blob->md_ro = false;
1226 
1227 	/* The blob started at 0 clusters. Resize it to be 5. */
1228 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1229 	poll_threads();
1230 	CU_ASSERT(g_bserrno == 0);
1231 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1232 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 5);
1233 
1234 	/* Shrink the blob to 3 clusters. This will not actually release
1235 	 * the old clusters until the blob is synced.
1236 	 */
1237 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1238 	poll_threads();
1239 	CU_ASSERT(g_bserrno == 0);
1240 	/* Verify there are still 5 clusters in use */
1241 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1242 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 3);
1243 
1244 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1245 	poll_threads();
1246 	CU_ASSERT(g_bserrno == 0);
1247 	/* Now there are only 3 clusters in use */
1248 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1249 
1250 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1251 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1252 	poll_threads();
1253 	CU_ASSERT(g_bserrno == 0);
1254 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1255 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1256 
1257 	/* Try to resize the blob to size larger than blobstore. */
1258 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1259 	poll_threads();
1260 	CU_ASSERT(g_bserrno == -ENOSPC);
1261 
1262 	ut_blob_close_and_delete(bs, blob);
1263 }
1264 
1265 static void
1266 blob_resize_thin_test(void)
1267 {
1268 	struct spdk_blob_store *bs = g_bs;
1269 	struct spdk_blob *blob;
1270 	struct spdk_blob_opts opts;
1271 	struct spdk_io_channel *blob_ch;
1272 	uint64_t free_clusters;
1273 	uint64_t io_units_per_cluster;
1274 	uint64_t offset;
1275 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
1276 
1277 	free_clusters = spdk_bs_free_cluster_count(bs);
1278 
1279 	blob_ch = spdk_bs_alloc_io_channel(bs);
1280 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
1281 
1282 	/* Create blob with thin provisioning enabled */
1283 	ut_spdk_blob_opts_init(&opts);
1284 	opts.thin_provision = true;
1285 	opts.num_clusters = 0;
1286 
1287 	blob = ut_blob_create_and_open(bs, &opts);
1288 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1289 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1290 	io_units_per_cluster = bs_io_units_per_cluster(blob);
1291 
1292 	/* The blob started at 0 clusters. Resize it to be 6. */
1293 	spdk_blob_resize(blob, 6, blob_op_complete, NULL);
1294 	poll_threads();
1295 	CU_ASSERT(g_bserrno == 0);
1296 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1297 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1298 
1299 	/* Write on cluster 0,2,4 and 5 of blob */
1300 	for (offset = 0; offset < io_units_per_cluster; offset++) {
1301 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1302 		poll_threads();
1303 		CU_ASSERT(g_bserrno == 0);
1304 	}
1305 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
1306 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1307 		poll_threads();
1308 		CU_ASSERT(g_bserrno == 0);
1309 	}
1310 	for (offset = 4 * io_units_per_cluster; offset < 5 * io_units_per_cluster; offset++) {
1311 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1312 		poll_threads();
1313 		CU_ASSERT(g_bserrno == 0);
1314 	}
1315 	for (offset = 5 * io_units_per_cluster; offset < 6 * io_units_per_cluster; offset++) {
1316 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1317 		poll_threads();
1318 		CU_ASSERT(g_bserrno == 0);
1319 	}
1320 
1321 	/* Check allocated clusters after write */
1322 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1323 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 4);
1324 
1325 	/* Shrink the blob to 2 clusters. This will not actually release
1326 	 * the old clusters until the blob is synced.
1327 	 */
1328 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1329 	poll_threads();
1330 	CU_ASSERT(g_bserrno == 0);
1331 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1332 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1333 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1334 
1335 	/* Sync blob: 4 clusters were truncated but only 3 of them was allocated */
1336 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1337 	poll_threads();
1338 	CU_ASSERT(g_bserrno == 0);
1339 	CU_ASSERT((free_clusters - 1) == spdk_bs_free_cluster_count(bs));
1340 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1341 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1342 
1343 	spdk_bs_free_io_channel(blob_ch);
1344 	ut_blob_close_and_delete(bs, blob);
1345 }
1346 
1347 static void
1348 blob_read_only(void)
1349 {
1350 	struct spdk_blob_store *bs;
1351 	struct spdk_bs_dev *dev;
1352 	struct spdk_blob *blob;
1353 	struct spdk_bs_opts opts;
1354 	spdk_blob_id blobid;
1355 	int rc;
1356 
1357 	dev = init_dev();
1358 	spdk_bs_opts_init(&opts, sizeof(opts));
1359 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1360 
1361 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1362 	poll_threads();
1363 	CU_ASSERT(g_bserrno == 0);
1364 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1365 	bs = g_bs;
1366 
1367 	blob = ut_blob_create_and_open(bs, NULL);
1368 	blobid = spdk_blob_get_id(blob);
1369 
1370 	rc = spdk_blob_set_read_only(blob);
1371 	CU_ASSERT(rc == 0);
1372 
1373 	CU_ASSERT(blob->data_ro == false);
1374 	CU_ASSERT(blob->md_ro == false);
1375 
1376 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1377 	poll_threads();
1378 
1379 	CU_ASSERT(blob->data_ro == true);
1380 	CU_ASSERT(blob->md_ro == true);
1381 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1382 
1383 	spdk_blob_close(blob, blob_op_complete, NULL);
1384 	poll_threads();
1385 	CU_ASSERT(g_bserrno == 0);
1386 
1387 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1388 	poll_threads();
1389 	CU_ASSERT(g_bserrno == 0);
1390 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1391 	blob = g_blob;
1392 
1393 	CU_ASSERT(blob->data_ro == true);
1394 	CU_ASSERT(blob->md_ro == true);
1395 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1396 
1397 	spdk_blob_close(blob, blob_op_complete, NULL);
1398 	poll_threads();
1399 	CU_ASSERT(g_bserrno == 0);
1400 
1401 	ut_bs_reload(&bs, &opts);
1402 
1403 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1404 	poll_threads();
1405 	CU_ASSERT(g_bserrno == 0);
1406 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1407 	blob = g_blob;
1408 
1409 	CU_ASSERT(blob->data_ro == true);
1410 	CU_ASSERT(blob->md_ro == true);
1411 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1412 
1413 	ut_blob_close_and_delete(bs, blob);
1414 
1415 	spdk_bs_unload(bs, bs_op_complete, NULL);
1416 	poll_threads();
1417 	CU_ASSERT(g_bserrno == 0);
1418 }
1419 
1420 static void
1421 channel_ops(void)
1422 {
1423 	struct spdk_blob_store *bs = g_bs;
1424 	struct spdk_io_channel *channel;
1425 
1426 	channel = spdk_bs_alloc_io_channel(bs);
1427 	CU_ASSERT(channel != NULL);
1428 
1429 	spdk_bs_free_io_channel(channel);
1430 	poll_threads();
1431 }
1432 
1433 static void
1434 blob_write(void)
1435 {
1436 	struct spdk_blob_store *bs = g_bs;
1437 	struct spdk_blob *blob = g_blob;
1438 	struct spdk_io_channel *channel;
1439 	uint64_t io_units_per_cluster;
1440 	uint8_t payload[10 * BLOCKLEN];
1441 
1442 	io_units_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_io_unit_size(bs);
1443 
1444 	channel = spdk_bs_alloc_io_channel(bs);
1445 	CU_ASSERT(channel != NULL);
1446 
1447 	/* Write to a blob with 0 size */
1448 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1449 	poll_threads();
1450 	CU_ASSERT(g_bserrno == -EINVAL);
1451 
1452 	/* Resize the blob */
1453 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1454 	poll_threads();
1455 	CU_ASSERT(g_bserrno == 0);
1456 
1457 	/* Confirm that write fails if blob is marked read-only. */
1458 	blob->data_ro = true;
1459 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1460 	poll_threads();
1461 	CU_ASSERT(g_bserrno == -EPERM);
1462 	blob->data_ro = false;
1463 
1464 	/* Write to the blob */
1465 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1466 	poll_threads();
1467 	CU_ASSERT(g_bserrno == 0);
1468 
1469 	/* Write starting beyond the end */
1470 	spdk_blob_io_write(blob, channel, payload, 5 * io_units_per_cluster, 1, blob_op_complete,
1471 			   NULL);
1472 	poll_threads();
1473 	CU_ASSERT(g_bserrno == -EINVAL);
1474 
1475 	/* Write starting at a valid location but going off the end */
1476 	spdk_blob_io_write(blob, channel, payload, 4 * io_units_per_cluster, io_units_per_cluster + 1,
1477 			   blob_op_complete, NULL);
1478 	poll_threads();
1479 	CU_ASSERT(g_bserrno == -EINVAL);
1480 
1481 	spdk_bs_free_io_channel(channel);
1482 	poll_threads();
1483 }
1484 
1485 static void
1486 blob_read(void)
1487 {
1488 	struct spdk_blob_store *bs = g_bs;
1489 	struct spdk_blob *blob = g_blob;
1490 	struct spdk_io_channel *channel;
1491 	uint64_t io_units_per_cluster;
1492 	uint8_t payload[10 * BLOCKLEN];
1493 
1494 	io_units_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_io_unit_size(bs);
1495 
1496 	channel = spdk_bs_alloc_io_channel(bs);
1497 	CU_ASSERT(channel != NULL);
1498 
1499 	/* Read from a blob with 0 size */
1500 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1501 	poll_threads();
1502 	CU_ASSERT(g_bserrno == -EINVAL);
1503 
1504 	/* Resize the blob */
1505 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1506 	poll_threads();
1507 	CU_ASSERT(g_bserrno == 0);
1508 
1509 	/* Confirm that read passes if blob is marked read-only. */
1510 	blob->data_ro = true;
1511 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1512 	poll_threads();
1513 	CU_ASSERT(g_bserrno == 0);
1514 	blob->data_ro = false;
1515 
1516 	/* Read from the blob */
1517 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1518 	poll_threads();
1519 	CU_ASSERT(g_bserrno == 0);
1520 
1521 	/* Read starting beyond the end */
1522 	spdk_blob_io_read(blob, channel, payload, 5 * io_units_per_cluster, 1, blob_op_complete,
1523 			  NULL);
1524 	poll_threads();
1525 	CU_ASSERT(g_bserrno == -EINVAL);
1526 
1527 	/* Read starting at a valid location but going off the end */
1528 	spdk_blob_io_read(blob, channel, payload, 4 * io_units_per_cluster, io_units_per_cluster + 1,
1529 			  blob_op_complete, NULL);
1530 	poll_threads();
1531 	CU_ASSERT(g_bserrno == -EINVAL);
1532 
1533 	spdk_bs_free_io_channel(channel);
1534 	poll_threads();
1535 }
1536 
1537 static void
1538 blob_rw_verify(void)
1539 {
1540 	struct spdk_blob_store *bs = g_bs;
1541 	struct spdk_blob *blob = g_blob;
1542 	struct spdk_io_channel *channel;
1543 	uint8_t payload_read[10 * BLOCKLEN];
1544 	uint8_t payload_write[10 * BLOCKLEN];
1545 
1546 	channel = spdk_bs_alloc_io_channel(bs);
1547 	CU_ASSERT(channel != NULL);
1548 
1549 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1550 	poll_threads();
1551 	CU_ASSERT(g_bserrno == 0);
1552 
1553 	memset(payload_write, 0xE5, sizeof(payload_write));
1554 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1555 	poll_threads();
1556 	CU_ASSERT(g_bserrno == 0);
1557 
1558 	memset(payload_read, 0x00, sizeof(payload_read));
1559 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1560 	poll_threads();
1561 	CU_ASSERT(g_bserrno == 0);
1562 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * BLOCKLEN) == 0);
1563 
1564 	spdk_bs_free_io_channel(channel);
1565 	poll_threads();
1566 }
1567 
1568 static void
1569 blob_rw_verify_iov(void)
1570 {
1571 	struct spdk_blob_store *bs = g_bs;
1572 	struct spdk_blob *blob;
1573 	struct spdk_io_channel *channel;
1574 	uint8_t payload_read[10 * BLOCKLEN];
1575 	uint8_t payload_write[10 * BLOCKLEN];
1576 	struct iovec iov_read[3];
1577 	struct iovec iov_write[3];
1578 	void *buf;
1579 	uint32_t first_data_cluster = FIRST_DATA_CLUSTER(bs);
1580 
1581 	channel = spdk_bs_alloc_io_channel(bs);
1582 	CU_ASSERT(channel != NULL);
1583 
1584 	blob = ut_blob_create_and_open(bs, NULL);
1585 
1586 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1587 	poll_threads();
1588 	CU_ASSERT(g_bserrno == 0);
1589 
1590 	/*
1591 	 * Manually adjust the offset of the blob's second cluster.  This allows
1592 	 *  us to make sure that the readv/write code correctly accounts for I/O
1593 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1594 	 *  clusters are where we expect before modifying the second cluster.
1595 	 */
1596 	CU_ASSERT(blob->active.clusters[0] == first_data_cluster * 256);
1597 	CU_ASSERT(blob->active.clusters[1] == (first_data_cluster + 1) * 256);
1598 	blob->active.clusters[1] = (first_data_cluster + 2) * 256;
1599 
1600 	memset(payload_write, 0xE5, sizeof(payload_write));
1601 	iov_write[0].iov_base = payload_write;
1602 	iov_write[0].iov_len = 1 * BLOCKLEN;
1603 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1604 	iov_write[1].iov_len = 5 * BLOCKLEN;
1605 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1606 	iov_write[2].iov_len = 4 * BLOCKLEN;
1607 	/*
1608 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1609 	 *  will get written to the first cluster, the last 4 to the second cluster.
1610 	 */
1611 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1612 	poll_threads();
1613 	CU_ASSERT(g_bserrno == 0);
1614 
1615 	memset(payload_read, 0xAA, sizeof(payload_read));
1616 	iov_read[0].iov_base = payload_read;
1617 	iov_read[0].iov_len = 3 * BLOCKLEN;
1618 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
1619 	iov_read[1].iov_len = 4 * BLOCKLEN;
1620 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
1621 	iov_read[2].iov_len = 3 * BLOCKLEN;
1622 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1623 	poll_threads();
1624 	CU_ASSERT(g_bserrno == 0);
1625 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
1626 
1627 	buf = calloc(1, 256 * BLOCKLEN);
1628 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1629 	/* Check that cluster 2 on "disk" was not modified. */
1630 	CU_ASSERT(memcmp(buf, &g_dev_buffer[(first_data_cluster + 1) * 256 * BLOCKLEN],
1631 			 256 * BLOCKLEN) == 0);
1632 	free(buf);
1633 
1634 	spdk_blob_close(blob, blob_op_complete, NULL);
1635 	poll_threads();
1636 	CU_ASSERT(g_bserrno == 0);
1637 
1638 	spdk_bs_free_io_channel(channel);
1639 	poll_threads();
1640 }
1641 
1642 static uint32_t
1643 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1644 {
1645 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1646 	struct spdk_bs_request_set *set;
1647 	uint32_t count = 0;
1648 
1649 	TAILQ_FOREACH(set, &channel->reqs, link) {
1650 		count++;
1651 	}
1652 
1653 	return count;
1654 }
1655 
1656 static void
1657 blob_rw_verify_iov_nomem(void)
1658 {
1659 	struct spdk_blob_store *bs = g_bs;
1660 	struct spdk_blob *blob = g_blob;
1661 	struct spdk_io_channel *channel;
1662 	uint8_t payload_write[10 * BLOCKLEN];
1663 	struct iovec iov_write[3];
1664 	uint32_t req_count;
1665 
1666 	channel = spdk_bs_alloc_io_channel(bs);
1667 	CU_ASSERT(channel != NULL);
1668 
1669 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1670 	poll_threads();
1671 	CU_ASSERT(g_bserrno == 0);
1672 
1673 	/*
1674 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1675 	 *  will get written to the first cluster, the last 4 to the second cluster.
1676 	 */
1677 	iov_write[0].iov_base = payload_write;
1678 	iov_write[0].iov_len = 1 * BLOCKLEN;
1679 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1680 	iov_write[1].iov_len = 5 * BLOCKLEN;
1681 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1682 	iov_write[2].iov_len = 4 * BLOCKLEN;
1683 	MOCK_SET(calloc, NULL);
1684 	req_count = bs_channel_get_req_count(channel);
1685 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1686 	poll_threads();
1687 	CU_ASSERT(g_bserrno == -ENOMEM);
1688 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1689 	MOCK_CLEAR(calloc);
1690 
1691 	spdk_bs_free_io_channel(channel);
1692 	poll_threads();
1693 }
1694 
1695 static void
1696 blob_rw_iov_read_only(void)
1697 {
1698 	struct spdk_blob_store *bs = g_bs;
1699 	struct spdk_blob *blob = g_blob;
1700 	struct spdk_io_channel *channel;
1701 	uint8_t payload_read[BLOCKLEN];
1702 	uint8_t payload_write[BLOCKLEN];
1703 	struct iovec iov_read;
1704 	struct iovec iov_write;
1705 
1706 	channel = spdk_bs_alloc_io_channel(bs);
1707 	CU_ASSERT(channel != NULL);
1708 
1709 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1710 	poll_threads();
1711 	CU_ASSERT(g_bserrno == 0);
1712 
1713 	/* Verify that writev failed if read_only flag is set. */
1714 	blob->data_ro = true;
1715 	iov_write.iov_base = payload_write;
1716 	iov_write.iov_len = sizeof(payload_write);
1717 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1718 	poll_threads();
1719 	CU_ASSERT(g_bserrno == -EPERM);
1720 
1721 	/* Verify that reads pass if data_ro flag is set. */
1722 	iov_read.iov_base = payload_read;
1723 	iov_read.iov_len = sizeof(payload_read);
1724 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1725 	poll_threads();
1726 	CU_ASSERT(g_bserrno == 0);
1727 
1728 	spdk_bs_free_io_channel(channel);
1729 	poll_threads();
1730 }
1731 
1732 static void
1733 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1734 		       uint8_t *payload, uint64_t offset, uint64_t length,
1735 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1736 {
1737 	uint64_t i;
1738 	uint8_t *buf;
1739 	uint64_t io_unit_size = spdk_bs_get_io_unit_size(blob->bs);
1740 
1741 	/* To be sure that operation is NOT split, read one io_unit at the time */
1742 	buf = payload;
1743 	for (i = 0; i < length; i++) {
1744 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1745 		poll_threads();
1746 		if (g_bserrno != 0) {
1747 			/* Pass the error code up */
1748 			break;
1749 		}
1750 		buf += io_unit_size;
1751 	}
1752 
1753 	cb_fn(cb_arg, g_bserrno);
1754 }
1755 
1756 static void
1757 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1758 			uint8_t *payload, uint64_t offset, uint64_t length,
1759 			spdk_blob_op_complete cb_fn, void *cb_arg)
1760 {
1761 	uint64_t i;
1762 	uint8_t *buf;
1763 	uint64_t io_unit_size = spdk_bs_get_io_unit_size(blob->bs);
1764 
1765 	/* To be sure that operation is NOT split, write one io_unit at the time */
1766 	buf = payload;
1767 	for (i = 0; i < length; i++) {
1768 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1769 		poll_threads();
1770 		if (g_bserrno != 0) {
1771 			/* Pass the error code up */
1772 			break;
1773 		}
1774 		buf += io_unit_size;
1775 	}
1776 
1777 	cb_fn(cb_arg, g_bserrno);
1778 }
1779 
1780 static void
1781 blob_operation_split_rw(void)
1782 {
1783 	struct spdk_blob_store *bs = g_bs;
1784 	struct spdk_blob *blob;
1785 	struct spdk_io_channel *channel;
1786 	struct spdk_blob_opts opts;
1787 	uint64_t cluster_size;
1788 
1789 	uint64_t payload_size;
1790 	uint8_t *payload_read;
1791 	uint8_t *payload_write;
1792 	uint8_t *payload_pattern;
1793 
1794 	uint64_t io_unit_size;
1795 	uint64_t io_units_per_cluster;
1796 	uint64_t io_units_per_payload;
1797 
1798 	uint64_t i;
1799 
1800 	cluster_size = spdk_bs_get_cluster_size(bs);
1801 	io_unit_size = spdk_bs_get_io_unit_size(bs);
1802 	io_units_per_cluster = cluster_size / io_unit_size;
1803 	io_units_per_payload = io_units_per_cluster * 5;
1804 	payload_size = cluster_size * 5;
1805 
1806 	payload_read = malloc(payload_size);
1807 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1808 
1809 	payload_write = malloc(payload_size);
1810 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1811 
1812 	payload_pattern = malloc(payload_size);
1813 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1814 
1815 	/* Prepare random pattern to write */
1816 	memset(payload_pattern, 0xFF, payload_size);
1817 	for (i = 0; i < io_units_per_payload; i++) {
1818 		*((uint64_t *)(payload_pattern + io_unit_size * i)) = (i + 1);
1819 	}
1820 
1821 	channel = spdk_bs_alloc_io_channel(bs);
1822 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1823 
1824 	/* Create blob */
1825 	ut_spdk_blob_opts_init(&opts);
1826 	opts.thin_provision = false;
1827 	opts.num_clusters = 5;
1828 
1829 	blob = ut_blob_create_and_open(bs, &opts);
1830 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1831 
1832 	/* Initial read should return zeroed payload */
1833 	memset(payload_read, 0xFF, payload_size);
1834 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1835 	poll_threads();
1836 	CU_ASSERT(g_bserrno == 0);
1837 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1838 
1839 	/* Fill whole blob except last page */
1840 	spdk_blob_io_write(blob, channel, payload_pattern, 0, io_units_per_payload - 1,
1841 			   blob_op_complete, NULL);
1842 	poll_threads();
1843 	CU_ASSERT(g_bserrno == 0);
1844 
1845 	/* Write last page with a pattern */
1846 	spdk_blob_io_write(blob, channel, payload_pattern, io_units_per_payload - 1, 1,
1847 			   blob_op_complete, NULL);
1848 	poll_threads();
1849 	CU_ASSERT(g_bserrno == 0);
1850 
1851 	/* Read whole blob and check consistency */
1852 	memset(payload_read, 0xFF, payload_size);
1853 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1854 	poll_threads();
1855 	CU_ASSERT(g_bserrno == 0);
1856 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - io_unit_size) == 0);
1857 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - io_unit_size, io_unit_size) == 0);
1858 
1859 	/* Fill whole blob except first page */
1860 	spdk_blob_io_write(blob, channel, payload_pattern, 1, io_units_per_payload - 1,
1861 			   blob_op_complete, NULL);
1862 	poll_threads();
1863 	CU_ASSERT(g_bserrno == 0);
1864 
1865 	/* Write first page with a pattern */
1866 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1867 			   blob_op_complete, NULL);
1868 	poll_threads();
1869 	CU_ASSERT(g_bserrno == 0);
1870 
1871 	/* Read whole blob and check consistency */
1872 	memset(payload_read, 0xFF, payload_size);
1873 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1874 	poll_threads();
1875 	CU_ASSERT(g_bserrno == 0);
1876 	CU_ASSERT(memcmp(payload_pattern, payload_read + io_unit_size, payload_size - io_unit_size) == 0);
1877 	CU_ASSERT(memcmp(payload_pattern, payload_read, io_unit_size) == 0);
1878 
1879 
1880 	/* Fill whole blob with a pattern (5 clusters) */
1881 
1882 	/* 1. Read test. */
1883 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, io_units_per_payload,
1884 				blob_op_complete, NULL);
1885 	poll_threads();
1886 	CU_ASSERT(g_bserrno == 0);
1887 
1888 	memset(payload_read, 0xFF, payload_size);
1889 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1890 	poll_threads();
1891 	poll_threads();
1892 	CU_ASSERT(g_bserrno == 0);
1893 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1894 
1895 	/* 2. Write test. */
1896 	spdk_blob_io_write(blob, channel, payload_pattern, 0, io_units_per_payload,
1897 			   blob_op_complete, NULL);
1898 	poll_threads();
1899 	CU_ASSERT(g_bserrno == 0);
1900 
1901 	memset(payload_read, 0xFF, payload_size);
1902 	_blob_io_read_no_split(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete,
1903 			       NULL);
1904 	poll_threads();
1905 	CU_ASSERT(g_bserrno == 0);
1906 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1907 
1908 	spdk_bs_free_io_channel(channel);
1909 	poll_threads();
1910 
1911 	g_blob = NULL;
1912 	g_blobid = 0;
1913 
1914 	free(payload_read);
1915 	free(payload_write);
1916 	free(payload_pattern);
1917 
1918 	ut_blob_close_and_delete(bs, blob);
1919 }
1920 
1921 static void
1922 blob_operation_split_rw_iov(void)
1923 {
1924 	struct spdk_blob_store *bs = g_bs;
1925 	struct spdk_blob *blob;
1926 	struct spdk_io_channel *channel;
1927 	struct spdk_blob_opts opts;
1928 	uint64_t cluster_size;
1929 
1930 	uint64_t payload_size;
1931 	uint8_t *payload_read;
1932 	uint8_t *payload_write;
1933 	uint8_t *payload_pattern;
1934 
1935 	uint64_t io_unit_size;
1936 	uint64_t io_units_per_cluster;
1937 	uint64_t io_units_per_payload;
1938 
1939 	struct iovec iov_read[2];
1940 	struct iovec iov_write[2];
1941 
1942 	uint64_t i, j;
1943 
1944 	cluster_size = spdk_bs_get_cluster_size(bs);
1945 	io_unit_size = spdk_bs_get_io_unit_size(bs);
1946 	io_units_per_cluster = cluster_size / io_unit_size;
1947 	io_units_per_payload = io_units_per_cluster * 5;
1948 	payload_size = cluster_size * 5;
1949 
1950 	payload_read = malloc(payload_size);
1951 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1952 
1953 	payload_write = malloc(payload_size);
1954 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1955 
1956 	payload_pattern = malloc(payload_size);
1957 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1958 
1959 	/* Prepare random pattern to write */
1960 	for (i = 0; i < io_units_per_payload; i++) {
1961 		for (j = 0; j < io_unit_size / sizeof(uint64_t); j++) {
1962 			uint64_t *tmp;
1963 
1964 			tmp = (uint64_t *)payload_pattern;
1965 			tmp += ((io_unit_size * i) / sizeof(uint64_t)) + j;
1966 			*tmp = i + 1;
1967 		}
1968 	}
1969 
1970 	channel = spdk_bs_alloc_io_channel(bs);
1971 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1972 
1973 	/* Create blob */
1974 	ut_spdk_blob_opts_init(&opts);
1975 	opts.thin_provision = false;
1976 	opts.num_clusters = 5;
1977 
1978 	blob = ut_blob_create_and_open(bs, &opts);
1979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1980 
1981 	/* Initial read should return zeroes payload */
1982 	memset(payload_read, 0xFF, payload_size);
1983 	iov_read[0].iov_base = payload_read;
1984 	iov_read[0].iov_len = cluster_size * 3;
1985 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1986 	iov_read[1].iov_len = cluster_size * 2;
1987 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
1988 	poll_threads();
1989 	CU_ASSERT(g_bserrno == 0);
1990 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1991 
1992 	/* First of iovs fills whole blob except last io_unit and second of iovs writes last io_unit
1993 	 *  with a pattern. */
1994 	iov_write[0].iov_base = payload_pattern;
1995 	iov_write[0].iov_len = payload_size - io_unit_size;
1996 	iov_write[1].iov_base = payload_pattern;
1997 	iov_write[1].iov_len = io_unit_size;
1998 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
1999 	poll_threads();
2000 	CU_ASSERT(g_bserrno == 0);
2001 
2002 	/* Read whole blob and check consistency */
2003 	memset(payload_read, 0xFF, payload_size);
2004 	iov_read[0].iov_base = payload_read;
2005 	iov_read[0].iov_len = cluster_size * 2;
2006 	iov_read[1].iov_base = payload_read + cluster_size * 2;
2007 	iov_read[1].iov_len = cluster_size * 3;
2008 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2009 	poll_threads();
2010 	CU_ASSERT(g_bserrno == 0);
2011 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - io_unit_size) == 0);
2012 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - io_unit_size, io_unit_size) == 0);
2013 
2014 	/* First of iovs fills only first io_unit and second of iovs writes whole blob except
2015 	 *  first io_unit with a pattern. */
2016 	iov_write[0].iov_base = payload_pattern;
2017 	iov_write[0].iov_len = io_unit_size;
2018 	iov_write[1].iov_base = payload_pattern;
2019 	iov_write[1].iov_len = payload_size - io_unit_size;
2020 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2021 	poll_threads();
2022 	CU_ASSERT(g_bserrno == 0);
2023 
2024 	/* Read whole blob and check consistency */
2025 	memset(payload_read, 0xFF, payload_size);
2026 	iov_read[0].iov_base = payload_read;
2027 	iov_read[0].iov_len = cluster_size * 4;
2028 	iov_read[1].iov_base = payload_read + cluster_size * 4;
2029 	iov_read[1].iov_len = cluster_size;
2030 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2031 	poll_threads();
2032 	CU_ASSERT(g_bserrno == 0);
2033 	CU_ASSERT(memcmp(payload_pattern, payload_read + io_unit_size, payload_size - io_unit_size) == 0);
2034 	CU_ASSERT(memcmp(payload_pattern, payload_read, io_unit_size) == 0);
2035 
2036 
2037 	/* Fill whole blob with a pattern (5 clusters) */
2038 
2039 	/* 1. Read test. */
2040 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, io_units_per_payload,
2041 				blob_op_complete, NULL);
2042 	poll_threads();
2043 	CU_ASSERT(g_bserrno == 0);
2044 
2045 	memset(payload_read, 0xFF, payload_size);
2046 	iov_read[0].iov_base = payload_read;
2047 	iov_read[0].iov_len = cluster_size;
2048 	iov_read[1].iov_base = payload_read + cluster_size;
2049 	iov_read[1].iov_len = cluster_size * 4;
2050 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2051 	poll_threads();
2052 	CU_ASSERT(g_bserrno == 0);
2053 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2054 
2055 	/* 2. Write test. */
2056 	iov_write[0].iov_base = payload_read;
2057 	iov_write[0].iov_len = cluster_size * 2;
2058 	iov_write[1].iov_base = payload_read + cluster_size * 2;
2059 	iov_write[1].iov_len = cluster_size * 3;
2060 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2061 	poll_threads();
2062 	CU_ASSERT(g_bserrno == 0);
2063 
2064 	memset(payload_read, 0xFF, payload_size);
2065 	_blob_io_read_no_split(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete,
2066 			       NULL);
2067 	poll_threads();
2068 	CU_ASSERT(g_bserrno == 0);
2069 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2070 
2071 	spdk_bs_free_io_channel(channel);
2072 	poll_threads();
2073 
2074 	g_blob = NULL;
2075 	g_blobid = 0;
2076 
2077 	free(payload_read);
2078 	free(payload_write);
2079 	free(payload_pattern);
2080 
2081 	ut_blob_close_and_delete(bs, blob);
2082 }
2083 
2084 static void
2085 blob_unmap(void)
2086 {
2087 	struct spdk_blob_store *bs = g_bs;
2088 	struct spdk_blob *blob;
2089 	struct spdk_io_channel *channel;
2090 	struct spdk_blob_opts opts;
2091 	uint8_t payload[BLOCKLEN];
2092 	uint32_t first_data_cluster = FIRST_DATA_CLUSTER(bs);
2093 	int i;
2094 
2095 	channel = spdk_bs_alloc_io_channel(bs);
2096 	CU_ASSERT(channel != NULL);
2097 
2098 	ut_spdk_blob_opts_init(&opts);
2099 	opts.num_clusters = 10;
2100 
2101 	blob = ut_blob_create_and_open(bs, &opts);
2102 
2103 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2104 	poll_threads();
2105 	CU_ASSERT(g_bserrno == 0);
2106 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2107 
2108 	memset(payload, 0, sizeof(payload));
2109 	payload[0] = 0xFF;
2110 
2111 	/*
2112 	 * Set first byte of every cluster to 0xFF.
2113 	 */
2114 	for (i = 0; i < 10; i++) {
2115 		g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
2116 	}
2117 
2118 	/* Confirm writes */
2119 	for (i = 0; i < 10; i++) {
2120 		payload[0] = 0;
2121 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / BLOCKLEN, 1,
2122 				  blob_op_complete, NULL);
2123 		poll_threads();
2124 		CU_ASSERT(g_bserrno == 0);
2125 		CU_ASSERT(payload[0] == 0xFF);
2126 	}
2127 
2128 	/* Mark some clusters as unallocated */
2129 	blob->active.clusters[1] = 0;
2130 	blob->active.clusters[2] = 0;
2131 	blob->active.clusters[3] = 0;
2132 	blob->active.clusters[6] = 0;
2133 	blob->active.clusters[8] = 0;
2134 	blob->active.num_allocated_clusters -= 5;
2135 
2136 	/* Unmap clusters by resizing to 0 */
2137 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2138 	poll_threads();
2139 	CU_ASSERT(g_bserrno == 0);
2140 
2141 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2142 	poll_threads();
2143 	CU_ASSERT(g_bserrno == 0);
2144 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
2145 
2146 	/* Confirm that only 'allocated' clusters were unmapped */
2147 	for (i = 0; i < 10; i++) {
2148 		switch (i) {
2149 		case 1:
2150 		case 2:
2151 		case 3:
2152 		case 6:
2153 		case 8:
2154 			CU_ASSERT(g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2155 			break;
2156 		default:
2157 			CU_ASSERT(g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2158 			break;
2159 		}
2160 	}
2161 
2162 	spdk_bs_free_io_channel(channel);
2163 	poll_threads();
2164 
2165 	ut_blob_close_and_delete(bs, blob);
2166 }
2167 
2168 static void
2169 blob_iter(void)
2170 {
2171 	struct spdk_blob_store *bs = g_bs;
2172 	struct spdk_blob *blob;
2173 	spdk_blob_id blobid;
2174 	struct spdk_blob_opts blob_opts;
2175 
2176 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2177 	poll_threads();
2178 	CU_ASSERT(g_blob == NULL);
2179 	CU_ASSERT(g_bserrno == -ENOENT);
2180 
2181 	ut_spdk_blob_opts_init(&blob_opts);
2182 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2183 	poll_threads();
2184 	CU_ASSERT(g_bserrno == 0);
2185 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2186 	blobid = g_blobid;
2187 
2188 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2189 	poll_threads();
2190 	CU_ASSERT(g_blob != NULL);
2191 	CU_ASSERT(g_bserrno == 0);
2192 	blob = g_blob;
2193 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2194 
2195 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2196 	poll_threads();
2197 	CU_ASSERT(g_blob == NULL);
2198 	CU_ASSERT(g_bserrno == -ENOENT);
2199 }
2200 
2201 static void
2202 blob_xattr(void)
2203 {
2204 	struct spdk_blob_store *bs = g_bs;
2205 	struct spdk_blob *blob = g_blob;
2206 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2207 	uint64_t length;
2208 	int rc;
2209 	const char *name1, *name2;
2210 	const void *value;
2211 	size_t value_len;
2212 	struct spdk_xattr_names *names;
2213 
2214 	/* Test that set_xattr fails if md_ro flag is set. */
2215 	blob->md_ro = true;
2216 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2217 	CU_ASSERT(rc == -EPERM);
2218 
2219 	blob->md_ro = false;
2220 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2221 	CU_ASSERT(rc == 0);
2222 
2223 	length = 2345;
2224 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2225 	CU_ASSERT(rc == 0);
2226 
2227 	/* Overwrite "length" xattr. */
2228 	length = 3456;
2229 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2230 	CU_ASSERT(rc == 0);
2231 
2232 	/* get_xattr should still work even if md_ro flag is set. */
2233 	value = NULL;
2234 	blob->md_ro = true;
2235 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2236 	CU_ASSERT(rc == 0);
2237 	SPDK_CU_ASSERT_FATAL(value != NULL);
2238 	CU_ASSERT(*(uint64_t *)value == length);
2239 	CU_ASSERT(value_len == 8);
2240 	blob->md_ro = false;
2241 
2242 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2243 	CU_ASSERT(rc == -ENOENT);
2244 
2245 	names = NULL;
2246 	rc = spdk_blob_get_xattr_names(blob, &names);
2247 	CU_ASSERT(rc == 0);
2248 	SPDK_CU_ASSERT_FATAL(names != NULL);
2249 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2250 	name1 = spdk_xattr_names_get_name(names, 0);
2251 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2252 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2253 	name2 = spdk_xattr_names_get_name(names, 1);
2254 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2255 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2256 	CU_ASSERT(strcmp(name1, name2));
2257 	spdk_xattr_names_free(names);
2258 
2259 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2260 	blob->md_ro = true;
2261 	rc = spdk_blob_remove_xattr(blob, "name");
2262 	CU_ASSERT(rc == -EPERM);
2263 
2264 	blob->md_ro = false;
2265 	rc = spdk_blob_remove_xattr(blob, "name");
2266 	CU_ASSERT(rc == 0);
2267 
2268 	rc = spdk_blob_remove_xattr(blob, "foobar");
2269 	CU_ASSERT(rc == -ENOENT);
2270 
2271 	/* Set internal xattr */
2272 	length = 7898;
2273 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2274 	CU_ASSERT(rc == 0);
2275 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2276 	CU_ASSERT(rc == 0);
2277 	CU_ASSERT(*(uint64_t *)value == length);
2278 	/* try to get public xattr with same name */
2279 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2280 	CU_ASSERT(rc != 0);
2281 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2282 	CU_ASSERT(rc != 0);
2283 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2284 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2285 		  SPDK_BLOB_INTERNAL_XATTR);
2286 
2287 	spdk_blob_close(blob, blob_op_complete, NULL);
2288 	poll_threads();
2289 
2290 	/* Check if xattrs are persisted */
2291 	ut_bs_reload(&bs, NULL);
2292 
2293 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2294 	poll_threads();
2295 	CU_ASSERT(g_bserrno == 0);
2296 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2297 	blob = g_blob;
2298 
2299 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2300 	CU_ASSERT(rc == 0);
2301 	CU_ASSERT(*(uint64_t *)value == length);
2302 
2303 	/* try to get internal xattr through public call */
2304 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2305 	CU_ASSERT(rc != 0);
2306 
2307 	rc = blob_remove_xattr(blob, "internal", true);
2308 	CU_ASSERT(rc == 0);
2309 
2310 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2311 }
2312 
2313 static void
2314 blob_parse_md(void)
2315 {
2316 	struct spdk_blob_store *bs = g_bs;
2317 	struct spdk_blob *blob;
2318 	int rc;
2319 	uint32_t used_pages;
2320 	size_t xattr_length;
2321 	char *xattr;
2322 
2323 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2324 	blob = ut_blob_create_and_open(bs, NULL);
2325 
2326 	/* Create large extent to force more than 1 page of metadata. */
2327 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2328 		       strlen("large_xattr");
2329 	xattr = calloc(xattr_length, sizeof(char));
2330 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2331 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2332 	free(xattr);
2333 	SPDK_CU_ASSERT_FATAL(rc == 0);
2334 
2335 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2336 	poll_threads();
2337 
2338 	/* Delete the blob and verify that number of pages returned to before its creation. */
2339 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2340 	ut_blob_close_and_delete(bs, blob);
2341 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2342 }
2343 
2344 static void
2345 bs_load(void)
2346 {
2347 	struct spdk_blob_store *bs;
2348 	struct spdk_bs_dev *dev;
2349 	spdk_blob_id blobid;
2350 	struct spdk_blob *blob;
2351 	struct spdk_bs_super_block *super_block;
2352 	uint64_t length;
2353 	int rc;
2354 	const void *value;
2355 	size_t value_len;
2356 	struct spdk_bs_opts opts;
2357 	struct spdk_blob_opts blob_opts;
2358 
2359 	dev = init_dev();
2360 	spdk_bs_opts_init(&opts, sizeof(opts));
2361 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2362 
2363 	/* Initialize a new blob store */
2364 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2365 	poll_threads();
2366 	CU_ASSERT(g_bserrno == 0);
2367 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2368 	bs = g_bs;
2369 
2370 	/* Try to open a blobid that does not exist */
2371 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2372 	poll_threads();
2373 	CU_ASSERT(g_bserrno == -ENOENT);
2374 	CU_ASSERT(g_blob == NULL);
2375 
2376 	/* Create a blob */
2377 	blob = ut_blob_create_and_open(bs, NULL);
2378 	blobid = spdk_blob_get_id(blob);
2379 
2380 	/* Try again to open valid blob but without the upper bit set */
2381 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2382 	poll_threads();
2383 	CU_ASSERT(g_bserrno == -ENOENT);
2384 	CU_ASSERT(g_blob == NULL);
2385 
2386 	/* Set some xattrs */
2387 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2388 	CU_ASSERT(rc == 0);
2389 
2390 	length = 2345;
2391 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2392 	CU_ASSERT(rc == 0);
2393 
2394 	/* Resize the blob */
2395 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2396 	poll_threads();
2397 	CU_ASSERT(g_bserrno == 0);
2398 
2399 	spdk_blob_close(blob, blob_op_complete, NULL);
2400 	poll_threads();
2401 	CU_ASSERT(g_bserrno == 0);
2402 	blob = NULL;
2403 	g_blob = NULL;
2404 	g_blobid = SPDK_BLOBID_INVALID;
2405 
2406 	/* Unload the blob store */
2407 	spdk_bs_unload(bs, bs_op_complete, NULL);
2408 	poll_threads();
2409 	CU_ASSERT(g_bserrno == 0);
2410 	g_bs = NULL;
2411 	g_blob = NULL;
2412 	g_blobid = 0;
2413 
2414 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2415 	CU_ASSERT(super_block->clean == 1);
2416 
2417 	/* Load should fail for device with an unsupported blocklen */
2418 	dev = init_dev();
2419 	dev->blocklen = g_phys_blocklen * 2;
2420 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2421 	poll_threads();
2422 	CU_ASSERT(g_bserrno == -EINVAL);
2423 
2424 	/* Load should when max_md_ops is set to zero */
2425 	dev = init_dev();
2426 	spdk_bs_opts_init(&opts, sizeof(opts));
2427 	opts.max_md_ops = 0;
2428 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2429 	poll_threads();
2430 	CU_ASSERT(g_bserrno == -EINVAL);
2431 
2432 	/* Load should when max_channel_ops is set to zero */
2433 	dev = init_dev();
2434 	spdk_bs_opts_init(&opts, sizeof(opts));
2435 	opts.max_channel_ops = 0;
2436 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2437 	poll_threads();
2438 	CU_ASSERT(g_bserrno == -EINVAL);
2439 
2440 	/* Load an existing blob store */
2441 	dev = init_dev();
2442 	spdk_bs_opts_init(&opts, sizeof(opts));
2443 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2444 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2445 	poll_threads();
2446 	CU_ASSERT(g_bserrno == 0);
2447 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2448 	bs = g_bs;
2449 
2450 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2451 	CU_ASSERT(super_block->clean == 1);
2452 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2453 
2454 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2455 	poll_threads();
2456 	CU_ASSERT(g_bserrno == 0);
2457 	CU_ASSERT(g_blob != NULL);
2458 	blob = g_blob;
2459 
2460 	/* Verify that blobstore is marked dirty after first metadata sync */
2461 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2462 	CU_ASSERT(super_block->clean == 1);
2463 
2464 	/* Get the xattrs */
2465 	value = NULL;
2466 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2467 	CU_ASSERT(rc == 0);
2468 	SPDK_CU_ASSERT_FATAL(value != NULL);
2469 	CU_ASSERT(*(uint64_t *)value == length);
2470 	CU_ASSERT(value_len == 8);
2471 
2472 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2473 	CU_ASSERT(rc == -ENOENT);
2474 
2475 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2476 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2477 
2478 	spdk_blob_close(blob, blob_op_complete, NULL);
2479 	poll_threads();
2480 	CU_ASSERT(g_bserrno == 0);
2481 	blob = NULL;
2482 	g_blob = NULL;
2483 
2484 	spdk_bs_unload(bs, bs_op_complete, NULL);
2485 	poll_threads();
2486 	CU_ASSERT(g_bserrno == 0);
2487 	g_bs = NULL;
2488 
2489 	/* Load should fail: bdev size < saved size */
2490 	dev = init_dev();
2491 	dev->blockcnt /= 2;
2492 
2493 	spdk_bs_opts_init(&opts, sizeof(opts));
2494 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2495 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2496 	poll_threads();
2497 
2498 	CU_ASSERT(g_bserrno == -EILSEQ);
2499 
2500 	/* Load should succeed: bdev size > saved size */
2501 	dev = init_dev();
2502 	dev->blockcnt *= 4;
2503 
2504 	spdk_bs_opts_init(&opts, sizeof(opts));
2505 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2506 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2507 	poll_threads();
2508 	CU_ASSERT(g_bserrno == 0);
2509 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2510 	bs = g_bs;
2511 
2512 	CU_ASSERT(g_bserrno == 0);
2513 	spdk_bs_unload(bs, bs_op_complete, NULL);
2514 	poll_threads();
2515 
2516 
2517 	/* Test compatibility mode */
2518 
2519 	dev = init_dev();
2520 	super_block->size = 0;
2521 	super_block->crc = blob_md_page_calc_crc(super_block);
2522 
2523 	spdk_bs_opts_init(&opts, sizeof(opts));
2524 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2525 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2526 	poll_threads();
2527 	CU_ASSERT(g_bserrno == 0);
2528 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2529 	bs = g_bs;
2530 
2531 	/* Create a blob */
2532 	ut_spdk_blob_opts_init(&blob_opts);
2533 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2534 	poll_threads();
2535 	CU_ASSERT(g_bserrno == 0);
2536 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2537 
2538 	/* Blobstore should update number of blocks in super_block */
2539 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2540 	CU_ASSERT(super_block->clean == 0);
2541 
2542 	spdk_bs_unload(bs, bs_op_complete, NULL);
2543 	poll_threads();
2544 	CU_ASSERT(g_bserrno == 0);
2545 	CU_ASSERT(super_block->clean == 1);
2546 	g_bs = NULL;
2547 
2548 }
2549 
2550 static void
2551 bs_load_pending_removal(void)
2552 {
2553 	struct spdk_blob_store *bs = g_bs;
2554 	struct spdk_blob_opts opts;
2555 	struct spdk_blob *blob, *snapshot;
2556 	spdk_blob_id blobid, snapshotid;
2557 	const void *value;
2558 	size_t value_len;
2559 	int rc;
2560 
2561 	/* Create blob */
2562 	ut_spdk_blob_opts_init(&opts);
2563 	opts.num_clusters = 10;
2564 
2565 	blob = ut_blob_create_and_open(bs, &opts);
2566 	blobid = spdk_blob_get_id(blob);
2567 
2568 	/* Create snapshot */
2569 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2570 	poll_threads();
2571 	CU_ASSERT(g_bserrno == 0);
2572 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2573 	snapshotid = g_blobid;
2574 
2575 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2576 	poll_threads();
2577 	CU_ASSERT(g_bserrno == 0);
2578 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2579 	snapshot = g_blob;
2580 
2581 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2582 	snapshot->md_ro = false;
2583 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2584 	CU_ASSERT(rc == 0);
2585 	snapshot->md_ro = true;
2586 
2587 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2588 	poll_threads();
2589 	CU_ASSERT(g_bserrno == 0);
2590 
2591 	spdk_blob_close(blob, blob_op_complete, NULL);
2592 	poll_threads();
2593 	CU_ASSERT(g_bserrno == 0);
2594 
2595 	/* Reload blobstore */
2596 	ut_bs_reload(&bs, NULL);
2597 
2598 	/* Snapshot should not be removed as blob is still pointing to it */
2599 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2600 	poll_threads();
2601 	CU_ASSERT(g_bserrno == 0);
2602 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2603 	snapshot = g_blob;
2604 
2605 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2606 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2607 	CU_ASSERT(rc != 0);
2608 
2609 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2610 	snapshot->md_ro = false;
2611 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2612 	CU_ASSERT(rc == 0);
2613 	snapshot->md_ro = true;
2614 
2615 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2616 	poll_threads();
2617 	CU_ASSERT(g_bserrno == 0);
2618 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2619 	blob = g_blob;
2620 
2621 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2622 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2623 
2624 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2625 	poll_threads();
2626 	CU_ASSERT(g_bserrno == 0);
2627 
2628 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2629 	poll_threads();
2630 	CU_ASSERT(g_bserrno == 0);
2631 
2632 	spdk_blob_close(blob, blob_op_complete, NULL);
2633 	poll_threads();
2634 	CU_ASSERT(g_bserrno == 0);
2635 
2636 	/* Reload blobstore */
2637 	ut_bs_reload(&bs, NULL);
2638 
2639 	/* Snapshot should be removed as blob is not pointing to it anymore */
2640 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2641 	poll_threads();
2642 	CU_ASSERT(g_bserrno != 0);
2643 }
2644 
2645 static void
2646 bs_load_custom_cluster_size(void)
2647 {
2648 	struct spdk_blob_store *bs;
2649 	struct spdk_bs_dev *dev;
2650 	struct spdk_bs_super_block *super_block;
2651 	struct spdk_bs_opts opts;
2652 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2653 	uint32_t cluster_sz;
2654 	uint64_t total_clusters;
2655 
2656 	dev = init_dev();
2657 	spdk_bs_opts_init(&opts, sizeof(opts));
2658 	opts.cluster_sz = custom_cluster_size;
2659 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2660 
2661 	/* Initialize a new blob store */
2662 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2663 	poll_threads();
2664 	CU_ASSERT(g_bserrno == 0);
2665 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2666 	bs = g_bs;
2667 	cluster_sz = bs->cluster_sz;
2668 	total_clusters = bs->total_clusters;
2669 
2670 	/* Unload the blob store */
2671 	spdk_bs_unload(bs, bs_op_complete, NULL);
2672 	poll_threads();
2673 	CU_ASSERT(g_bserrno == 0);
2674 	g_bs = NULL;
2675 	g_blob = NULL;
2676 	g_blobid = 0;
2677 
2678 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2679 	CU_ASSERT(super_block->clean == 1);
2680 
2681 	/* Load an existing blob store */
2682 	dev = init_dev();
2683 	spdk_bs_opts_init(&opts, sizeof(opts));
2684 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2685 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2686 	poll_threads();
2687 	CU_ASSERT(g_bserrno == 0);
2688 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2689 	bs = g_bs;
2690 	/* Compare cluster size and number to one after initialization */
2691 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2692 	CU_ASSERT(total_clusters == bs->total_clusters);
2693 
2694 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2695 	CU_ASSERT(super_block->clean == 1);
2696 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2697 
2698 	spdk_bs_unload(bs, bs_op_complete, NULL);
2699 	poll_threads();
2700 	CU_ASSERT(g_bserrno == 0);
2701 	CU_ASSERT(super_block->clean == 1);
2702 	g_bs = NULL;
2703 }
2704 
2705 static void
2706 bs_load_after_failed_grow(void)
2707 {
2708 	struct spdk_blob_store *bs;
2709 	struct spdk_bs_dev *dev;
2710 	struct spdk_bs_super_block *super_block;
2711 	struct spdk_bs_opts opts;
2712 	struct spdk_bs_md_mask *mask;
2713 	struct spdk_blob_opts blob_opts;
2714 	struct spdk_blob *blob, *snapshot;
2715 	spdk_blob_id blobid, snapshotid;
2716 	uint64_t total_data_clusters;
2717 
2718 	dev = init_dev();
2719 	spdk_bs_opts_init(&opts, sizeof(opts));
2720 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2721 	/*
2722 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2723 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2724 	 * thus the blobstore could grow to the double size.
2725 	 */
2726 	opts.num_md_pages = 128;
2727 
2728 	/* Initialize a new blob store */
2729 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2730 	poll_threads();
2731 	CU_ASSERT(g_bserrno == 0);
2732 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2733 	bs = g_bs;
2734 
2735 	/* Create blob */
2736 	ut_spdk_blob_opts_init(&blob_opts);
2737 	blob_opts.num_clusters = 10;
2738 
2739 	blob = ut_blob_create_and_open(bs, &blob_opts);
2740 	blobid = spdk_blob_get_id(blob);
2741 
2742 	/* Create snapshot */
2743 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2744 	poll_threads();
2745 	CU_ASSERT(g_bserrno == 0);
2746 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2747 	snapshotid = g_blobid;
2748 
2749 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2750 	poll_threads();
2751 	CU_ASSERT(g_bserrno == 0);
2752 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2753 	snapshot = g_blob;
2754 
2755 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2756 	poll_threads();
2757 	CU_ASSERT(g_bserrno == 0);
2758 
2759 	spdk_blob_close(blob, blob_op_complete, NULL);
2760 	poll_threads();
2761 	CU_ASSERT(g_bserrno == 0);
2762 
2763 	total_data_clusters = bs->total_data_clusters;
2764 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2765 
2766 	/* Unload the blob store */
2767 	spdk_bs_unload(bs, bs_op_complete, NULL);
2768 	poll_threads();
2769 	CU_ASSERT(g_bserrno == 0);
2770 	g_bs = NULL;
2771 	g_blob = NULL;
2772 	g_blobid = 0;
2773 
2774 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2775 	CU_ASSERT(super_block->clean == 1);
2776 
2777 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start *
2778 					  g_phys_blocklen);
2779 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2780 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2781 
2782 	/*
2783 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2784 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2785 	 */
2786 	mask->length *= 2;
2787 
2788 	/* Load an existing blob store */
2789 	dev = init_dev();
2790 	dev->blockcnt *= 2;
2791 	spdk_bs_opts_init(&opts, sizeof(opts));
2792 	opts.clear_method = BS_CLEAR_WITH_NONE;
2793 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2794 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2795 	poll_threads();
2796 	CU_ASSERT(g_bserrno == 0);
2797 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2798 	bs = g_bs;
2799 
2800 	/* Check the capacity is the same as before */
2801 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2802 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2803 
2804 	/* Check the blob and the snapshot are still available */
2805 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2806 	poll_threads();
2807 	CU_ASSERT(g_bserrno == 0);
2808 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2809 	blob = g_blob;
2810 
2811 	spdk_blob_close(blob, blob_op_complete, NULL);
2812 	poll_threads();
2813 	CU_ASSERT(g_bserrno == 0);
2814 
2815 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2816 	poll_threads();
2817 	CU_ASSERT(g_bserrno == 0);
2818 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2819 	snapshot = g_blob;
2820 
2821 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2822 	poll_threads();
2823 	CU_ASSERT(g_bserrno == 0);
2824 
2825 	spdk_bs_unload(bs, bs_op_complete, NULL);
2826 	poll_threads();
2827 	CU_ASSERT(g_bserrno == 0);
2828 	CU_ASSERT(super_block->clean == 1);
2829 	g_bs = NULL;
2830 }
2831 
2832 static void
2833 bs_load_error(void)
2834 {
2835 	struct spdk_blob_store *bs;
2836 	struct spdk_bs_dev *dev;
2837 	struct spdk_bs_opts opts;
2838 	struct spdk_power_failure_thresholds thresholds = {};
2839 
2840 	dev = init_dev();
2841 	spdk_bs_opts_init(&opts, sizeof(opts));
2842 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2843 
2844 	/* Initialize a new blob store */
2845 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2846 	poll_threads();
2847 	CU_ASSERT(g_bserrno == 0);
2848 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2849 	bs = g_bs;
2850 
2851 	/* Unload the blob store */
2852 	spdk_bs_unload(bs, bs_op_complete, NULL);
2853 	poll_threads();
2854 	CU_ASSERT(g_bserrno == 0);
2855 
2856 	/* Load fails with I/O error */
2857 	thresholds.general_threshold = 2;
2858 	dev_set_power_failure_thresholds(thresholds);
2859 	g_bserrno = -1;
2860 	dev = init_dev();
2861 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2862 	poll_threads();
2863 	CU_ASSERT(g_bserrno == -EIO);
2864 	CU_ASSERT(g_bs == NULL);
2865 	dev_reset_power_failure_event();
2866 
2867 	/* Load fails with NOMEM error */
2868 	g_bserrno = -1;
2869 	dev = init_dev();
2870 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2871 	MOCK_SET(spdk_zmalloc, NULL);
2872 	poll_threads();
2873 	CU_ASSERT(g_bserrno == -ENOMEM);
2874 	CU_ASSERT(g_bs == NULL);
2875 	MOCK_CLEAR(spdk_zmalloc);
2876 }
2877 
2878 static void
2879 bs_type(void)
2880 {
2881 	struct spdk_blob_store *bs;
2882 	struct spdk_bs_dev *dev;
2883 	struct spdk_bs_opts opts;
2884 
2885 	dev = init_dev();
2886 	spdk_bs_opts_init(&opts, sizeof(opts));
2887 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2888 
2889 	/* Initialize a new blob store */
2890 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2891 	poll_threads();
2892 	CU_ASSERT(g_bserrno == 0);
2893 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2894 	bs = g_bs;
2895 
2896 	/* Unload the blob store */
2897 	spdk_bs_unload(bs, bs_op_complete, NULL);
2898 	poll_threads();
2899 	CU_ASSERT(g_bserrno == 0);
2900 	g_bs = NULL;
2901 	g_blob = NULL;
2902 	g_blobid = 0;
2903 
2904 	/* Load non existing blobstore type */
2905 	dev = init_dev();
2906 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2907 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2908 	poll_threads();
2909 	CU_ASSERT(g_bserrno != 0);
2910 
2911 	/* Load with empty blobstore type */
2912 	dev = init_dev();
2913 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2914 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2915 	poll_threads();
2916 	CU_ASSERT(g_bserrno == 0);
2917 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2918 	bs = g_bs;
2919 
2920 	spdk_bs_unload(bs, bs_op_complete, NULL);
2921 	poll_threads();
2922 	CU_ASSERT(g_bserrno == 0);
2923 	g_bs = NULL;
2924 
2925 	/* Initialize a new blob store with empty bstype */
2926 	dev = init_dev();
2927 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2928 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2929 	poll_threads();
2930 	CU_ASSERT(g_bserrno == 0);
2931 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2932 	bs = g_bs;
2933 
2934 	spdk_bs_unload(bs, bs_op_complete, NULL);
2935 	poll_threads();
2936 	CU_ASSERT(g_bserrno == 0);
2937 	g_bs = NULL;
2938 
2939 	/* Load non existing blobstore type */
2940 	dev = init_dev();
2941 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2942 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2943 	poll_threads();
2944 	CU_ASSERT(g_bserrno != 0);
2945 
2946 	/* Load with empty blobstore type */
2947 	dev = init_dev();
2948 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2949 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2950 	poll_threads();
2951 	CU_ASSERT(g_bserrno == 0);
2952 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2953 	bs = g_bs;
2954 
2955 	spdk_bs_unload(bs, bs_op_complete, NULL);
2956 	poll_threads();
2957 	CU_ASSERT(g_bserrno == 0);
2958 	g_bs = NULL;
2959 }
2960 
2961 static void
2962 bs_super_block(void)
2963 {
2964 	struct spdk_blob_store *bs;
2965 	struct spdk_bs_dev *dev;
2966 	struct spdk_bs_super_block *super_block;
2967 	struct spdk_bs_opts opts;
2968 	struct spdk_bs_super_block_ver1 super_block_v1;
2969 
2970 	dev = init_dev();
2971 	spdk_bs_opts_init(&opts, sizeof(opts));
2972 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2973 
2974 	/* Initialize a new blob store */
2975 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2976 	poll_threads();
2977 	CU_ASSERT(g_bserrno == 0);
2978 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2979 	bs = g_bs;
2980 
2981 	/* Unload the blob store */
2982 	spdk_bs_unload(bs, bs_op_complete, NULL);
2983 	poll_threads();
2984 	CU_ASSERT(g_bserrno == 0);
2985 	g_bs = NULL;
2986 	g_blob = NULL;
2987 	g_blobid = 0;
2988 
2989 	/* Load an existing blob store with version newer than supported */
2990 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2991 	super_block->version++;
2992 
2993 	dev = init_dev();
2994 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2995 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2996 	poll_threads();
2997 	CU_ASSERT(g_bserrno != 0);
2998 
2999 	/* Create a new blob store with super block version 1 */
3000 	dev = init_dev();
3001 	super_block_v1.version = 1;
3002 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
3003 	super_block_v1.length = 0x1000;
3004 	super_block_v1.clean = 1;
3005 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
3006 	super_block_v1.cluster_size = 0x100000;
3007 	super_block_v1.used_page_mask_start = 0x01;
3008 	super_block_v1.used_page_mask_len = 0x01;
3009 	super_block_v1.used_cluster_mask_start = 0x02;
3010 	super_block_v1.used_cluster_mask_len = 0x01;
3011 	super_block_v1.md_start = 0x03;
3012 	super_block_v1.md_len = 0x40;
3013 	memset(super_block_v1.reserved, 0, 4036);
3014 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
3015 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
3016 
3017 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
3018 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
3019 	poll_threads();
3020 	CU_ASSERT(g_bserrno == 0);
3021 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3022 	bs = g_bs;
3023 
3024 	spdk_bs_unload(bs, bs_op_complete, NULL);
3025 	poll_threads();
3026 	CU_ASSERT(g_bserrno == 0);
3027 	g_bs = NULL;
3028 }
3029 
3030 static void
3031 bs_test_recover_cluster_count(void)
3032 {
3033 	struct spdk_blob_store *bs;
3034 	struct spdk_bs_dev *dev;
3035 	struct spdk_bs_super_block super_block;
3036 	struct spdk_bs_opts opts;
3037 
3038 	dev = init_dev();
3039 	spdk_bs_opts_init(&opts, sizeof(opts));
3040 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
3041 
3042 	super_block.version = 3;
3043 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
3044 	super_block.length = 0x1000;
3045 	super_block.clean = 0;
3046 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
3047 	super_block.cluster_size = g_phys_blocklen;
3048 	super_block.used_page_mask_start = 0x01;
3049 	super_block.used_page_mask_len = 0x01;
3050 	super_block.used_cluster_mask_start = 0x02;
3051 	super_block.used_cluster_mask_len = 0x01;
3052 	super_block.used_blobid_mask_start = 0x03;
3053 	super_block.used_blobid_mask_len = 0x01;
3054 	super_block.md_page_size = g_phys_blocklen;
3055 	super_block.md_start = 0x04;
3056 	super_block.md_len = 0x40;
3057 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
3058 	super_block.size = dev->blockcnt * dev->blocklen;
3059 	super_block.io_unit_size = 0x1000;
3060 	memset(super_block.reserved, 0, SPDK_SIZEOF_MEMBER(struct spdk_bs_super_block, reserved));
3061 	super_block.crc = blob_md_page_calc_crc(&super_block);
3062 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
3063 
3064 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
3065 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
3066 	poll_threads();
3067 	CU_ASSERT(g_bserrno == 0);
3068 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3069 	bs = g_bs;
3070 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
3071 			super_block.md_len));
3072 
3073 	spdk_bs_unload(bs, bs_op_complete, NULL);
3074 	poll_threads();
3075 	CU_ASSERT(g_bserrno == 0);
3076 	g_bs = NULL;
3077 }
3078 
3079 static void
3080 bs_grow_live_size(uint64_t new_blockcnt)
3081 {
3082 	struct spdk_blob_store *bs;
3083 	struct spdk_bs_dev *dev;
3084 	struct spdk_bs_super_block super_block;
3085 	struct spdk_bs_opts opts;
3086 	struct spdk_bs_md_mask mask;
3087 	uint64_t bdev_size;
3088 	uint64_t total_data_clusters;
3089 
3090 	/*
3091 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3092 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3093 	 * will write beyond the end of g_dev_buffer.
3094 	 */
3095 	dev = init_dev();
3096 	spdk_bs_opts_init(&opts, sizeof(opts));
3097 	opts.clear_method = BS_CLEAR_WITH_NONE;
3098 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3099 	poll_threads();
3100 	CU_ASSERT(g_bserrno == 0);
3101 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3102 	bs = g_bs;
3103 
3104 	/*
3105 	 * Set the dev size according to the new_blockcnt,
3106 	 * then the blobstore will adjust the metadata according to the new size.
3107 	 */
3108 	dev->blockcnt = new_blockcnt;
3109 	bdev_size = dev->blockcnt * dev->blocklen;
3110 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3111 	poll_threads();
3112 	CU_ASSERT(g_bserrno == 0);
3113 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3114 
3115 	/* Make sure the super block is updated. */
3116 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3117 	CU_ASSERT(super_block.size == bdev_size);
3118 	CU_ASSERT(super_block.clean == 0);
3119 	/* The used_cluster mask is not written out until first spdk_bs_unload. */
3120 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3121 	       sizeof(struct spdk_bs_md_mask));
3122 	CU_ASSERT(mask.type == 0);
3123 	CU_ASSERT(mask.length == 0);
3124 
3125 	spdk_bs_unload(bs, bs_op_complete, NULL);
3126 	poll_threads();
3127 	CU_ASSERT(g_bserrno == 0);
3128 	g_bs = NULL;
3129 
3130 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3131 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3132 	CU_ASSERT(super_block.size == bdev_size);
3133 	CU_ASSERT(super_block.clean == 1);
3134 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3135 	       sizeof(struct spdk_bs_md_mask));
3136 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3137 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3138 
3139 	/* Load blobstore and check the cluster counts again. */
3140 	dev = init_dev();
3141 	dev->blockcnt = new_blockcnt;
3142 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3143 	poll_threads();
3144 	CU_ASSERT(g_bserrno == 0);
3145 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3146 	CU_ASSERT(super_block.clean == 1);
3147 	bs = g_bs;
3148 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3149 
3150 	/* Perform grow without change in size, expected pass. */
3151 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3152 	poll_threads();
3153 	CU_ASSERT(g_bserrno == 0);
3154 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3155 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3156 	CU_ASSERT(super_block.size == bdev_size);
3157 	CU_ASSERT(super_block.clean == 1);
3158 
3159 	spdk_bs_unload(bs, bs_op_complete, NULL);
3160 	poll_threads();
3161 	CU_ASSERT(g_bserrno == 0);
3162 	g_bs = NULL;
3163 }
3164 
3165 static void
3166 bs_grow_live(void)
3167 {
3168 	/* No change expected */
3169 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT);
3170 
3171 	/* Size slightly increased, but not enough to increase cluster count */
3172 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT + 1);
3173 
3174 	/* Size doubled, increasing the cluster count */
3175 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT * 2);
3176 }
3177 
3178 static void
3179 bs_grow_live_no_space(void)
3180 {
3181 	struct spdk_blob_store *bs;
3182 	struct spdk_bs_dev *dev;
3183 	struct spdk_bs_super_block super_block;
3184 	struct spdk_bs_opts opts;
3185 	struct spdk_bs_md_mask mask;
3186 	uint64_t bdev_size_init;
3187 	uint64_t total_data_clusters, max_clusters;
3188 
3189 	/*
3190 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3191 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3192 	 * will write beyond the end of g_dev_buffer.
3193 	 */
3194 	dev = init_dev();
3195 	bdev_size_init = dev->blockcnt * dev->blocklen;
3196 	spdk_bs_opts_init(&opts, sizeof(opts));
3197 	opts.clear_method = BS_CLEAR_WITH_NONE;
3198 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3199 	poll_threads();
3200 	CU_ASSERT(g_bserrno == 0);
3201 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3202 	bs = g_bs;
3203 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3204 
3205 	/*
3206 	 * The default dev size is 64M, here we set the dev size to 32M,
3207 	 * expecting EILSEQ due to super_block validation and no change in blobstore.
3208 	 */
3209 	dev->blockcnt = (32L * 1024L * 1024L) / dev->blocklen;
3210 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3211 	poll_threads();
3212 	/* This error code comes from bs_super_validate() */
3213 	CU_ASSERT(g_bserrno == -EILSEQ);
3214 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3215 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3216 	CU_ASSERT(super_block.size == bdev_size_init);
3217 
3218 	/*
3219 	 * Blobstore in this test has only space for single md_page for used_clusters,
3220 	 * which fits 1 bit per cluster minus the md header.
3221 	 *
3222 	 * Dev size is increased to exceed the reserved space for the used_cluster_mask
3223 	 * in the metadata, expecting ENOSPC and no change in blobstore.
3224 	 */
3225 	max_clusters = (spdk_bs_get_page_size(bs) - sizeof(struct spdk_bs_md_mask)) * 8;
3226 	max_clusters += 1;
3227 	dev->blockcnt = (max_clusters * spdk_bs_get_cluster_size(bs)) / dev->blocklen;
3228 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3229 	poll_threads();
3230 	CU_ASSERT(g_bserrno == -ENOSPC);
3231 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3232 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3233 	CU_ASSERT(super_block.size == bdev_size_init);
3234 
3235 	/*
3236 	 * No change should have occurred for the duration of the test,
3237 	 * unload blobstore and check metadata.
3238 	 */
3239 	spdk_bs_unload(bs, bs_op_complete, NULL);
3240 	poll_threads();
3241 	CU_ASSERT(g_bserrno == 0);
3242 	g_bs = NULL;
3243 
3244 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3245 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3246 	CU_ASSERT(super_block.size == bdev_size_init);
3247 	CU_ASSERT(super_block.clean == 1);
3248 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3249 	       sizeof(struct spdk_bs_md_mask));
3250 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3251 	CU_ASSERT(mask.length == bdev_size_init / (1 * 1024 * 1024));
3252 
3253 	/* Load blobstore and check the cluster counts again. */
3254 	dev = init_dev();
3255 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3256 	poll_threads();
3257 	CU_ASSERT(g_bserrno == 0);
3258 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3259 	bs = g_bs;
3260 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3261 
3262 	spdk_bs_unload(bs, bs_op_complete, NULL);
3263 	poll_threads();
3264 	CU_ASSERT(g_bserrno == 0);
3265 	g_bs = NULL;
3266 }
3267 
3268 static void
3269 bs_test_grow(void)
3270 {
3271 	struct spdk_blob_store *bs;
3272 	struct spdk_bs_dev *dev;
3273 	struct spdk_bs_super_block super_block;
3274 	struct spdk_bs_opts opts;
3275 	struct spdk_bs_md_mask mask;
3276 	uint64_t bdev_size;
3277 
3278 	dev = init_dev();
3279 	bdev_size = dev->blockcnt * dev->blocklen;
3280 	spdk_bs_opts_init(&opts, sizeof(opts));
3281 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3282 	poll_threads();
3283 	CU_ASSERT(g_bserrno == 0);
3284 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3285 	bs = g_bs;
3286 
3287 	spdk_bs_unload(bs, bs_op_complete, NULL);
3288 	poll_threads();
3289 	CU_ASSERT(g_bserrno == 0);
3290 	g_bs = NULL;
3291 
3292 	/*
3293 	 * To make sure all the metadata are updated to the disk,
3294 	 * we check the g_dev_buffer after spdk_bs_unload.
3295 	 */
3296 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3297 	CU_ASSERT(super_block.size == bdev_size);
3298 
3299 	/*
3300 	 * Make sure the used_cluster mask is correct.
3301 	 */
3302 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3303 	       sizeof(struct spdk_bs_md_mask));
3304 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3305 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3306 
3307 	/*
3308 	 * The default dev size is 64M, here we set the dev size to 128M,
3309 	 * then the blobstore will adjust the metadata according to the new size.
3310 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
3311 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
3312 	 * the end of g_dev_buffer.
3313 	 */
3314 	dev = init_dev();
3315 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
3316 	bdev_size = dev->blockcnt * dev->blocklen;
3317 	spdk_bs_opts_init(&opts, sizeof(opts));
3318 	opts.clear_method = BS_CLEAR_WITH_NONE;
3319 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
3320 	poll_threads();
3321 	CU_ASSERT(g_bserrno == 0);
3322 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3323 	bs = g_bs;
3324 
3325 	/*
3326 	 * After spdk_bs_grow, all metadata are updated to the disk.
3327 	 * So we can check g_dev_buffer now.
3328 	 */
3329 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3330 	CU_ASSERT(super_block.size == bdev_size);
3331 
3332 	/*
3333 	 * Make sure the used_cluster mask has been updated according to the bdev size
3334 	 */
3335 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3336 	       sizeof(struct spdk_bs_md_mask));
3337 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3338 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3339 
3340 	spdk_bs_unload(bs, bs_op_complete, NULL);
3341 	poll_threads();
3342 	CU_ASSERT(g_bserrno == 0);
3343 	g_bs = NULL;
3344 }
3345 
3346 /*
3347  * Create a blobstore and then unload it.
3348  */
3349 static void
3350 bs_unload(void)
3351 {
3352 	struct spdk_blob_store *bs = g_bs;
3353 	struct spdk_blob *blob;
3354 
3355 	/* Create a blob and open it. */
3356 	blob = ut_blob_create_and_open(bs, NULL);
3357 
3358 	/* Try to unload blobstore, should fail with open blob */
3359 	g_bserrno = -1;
3360 	spdk_bs_unload(bs, bs_op_complete, NULL);
3361 	poll_threads();
3362 	CU_ASSERT(g_bserrno == -EBUSY);
3363 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3364 
3365 	/* Close the blob, then successfully unload blobstore */
3366 	g_bserrno = -1;
3367 	spdk_blob_close(blob, blob_op_complete, NULL);
3368 	poll_threads();
3369 	CU_ASSERT(g_bserrno == 0);
3370 }
3371 
3372 /*
3373  * Create a blobstore with a cluster size different than the default, and ensure it is
3374  *  persisted.
3375  */
3376 static void
3377 bs_cluster_sz(void)
3378 {
3379 	struct spdk_blob_store *bs;
3380 	struct spdk_bs_dev *dev;
3381 	struct spdk_bs_opts opts;
3382 	uint32_t cluster_sz;
3383 
3384 	/* Set cluster size to zero */
3385 	dev = init_dev();
3386 	spdk_bs_opts_init(&opts, sizeof(opts));
3387 	opts.cluster_sz = 0;
3388 
3389 	/* Initialize a new blob store */
3390 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3391 	poll_threads();
3392 	CU_ASSERT(g_bserrno == -EINVAL);
3393 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3394 
3395 	/*
3396 	 * Set cluster size to blobstore page size,
3397 	 * to work it is required to be at least twice the blobstore page size.
3398 	 */
3399 	dev = init_dev();
3400 	spdk_bs_opts_init(&opts, sizeof(opts));
3401 	opts.cluster_sz = g_phys_blocklen;
3402 
3403 	/* Initialize a new blob store */
3404 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3405 	poll_threads();
3406 	CU_ASSERT(g_bserrno == -ENOMEM);
3407 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3408 
3409 	/*
3410 	 * Set cluster size to lower than page size,
3411 	 * to work it is required to be at least twice the blobstore page size.
3412 	 */
3413 	dev = init_dev();
3414 	spdk_bs_opts_init(&opts, sizeof(opts));
3415 	opts.cluster_sz = g_phys_blocklen - 1;
3416 
3417 	/* Initialize a new blob store */
3418 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3419 	poll_threads();
3420 	CU_ASSERT(g_bserrno == -EINVAL);
3421 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3422 
3423 	/* Set cluster size to twice the default */
3424 	dev = init_dev();
3425 	spdk_bs_opts_init(&opts, sizeof(opts));
3426 	opts.cluster_sz *= 2;
3427 	cluster_sz = opts.cluster_sz;
3428 
3429 	/* Initialize a new blob store */
3430 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3431 	poll_threads();
3432 	CU_ASSERT(g_bserrno == 0);
3433 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3434 	bs = g_bs;
3435 
3436 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3437 
3438 	ut_bs_reload(&bs, &opts);
3439 
3440 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3441 
3442 	spdk_bs_unload(bs, bs_op_complete, NULL);
3443 	poll_threads();
3444 	CU_ASSERT(g_bserrno == 0);
3445 	g_bs = NULL;
3446 }
3447 
3448 /*
3449  * Create a blobstore, reload it and ensure total usable cluster count
3450  *  stays the same.
3451  */
3452 static void
3453 bs_usable_clusters(void)
3454 {
3455 	struct spdk_blob_store *bs = g_bs;
3456 	struct spdk_blob *blob;
3457 	uint32_t clusters;
3458 	int i;
3459 
3460 
3461 	clusters = spdk_bs_total_data_cluster_count(bs);
3462 
3463 	ut_bs_reload(&bs, NULL);
3464 
3465 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3466 
3467 	/* Create and resize blobs to make sure that usable cluster count won't change */
3468 	for (i = 0; i < 4; i++) {
3469 		g_bserrno = -1;
3470 		g_blobid = SPDK_BLOBID_INVALID;
3471 		blob = ut_blob_create_and_open(bs, NULL);
3472 
3473 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3474 		poll_threads();
3475 		CU_ASSERT(g_bserrno == 0);
3476 
3477 		g_bserrno = -1;
3478 		spdk_blob_close(blob, blob_op_complete, NULL);
3479 		poll_threads();
3480 		CU_ASSERT(g_bserrno == 0);
3481 
3482 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3483 	}
3484 
3485 	/* Reload the blob store to make sure that nothing changed */
3486 	ut_bs_reload(&bs, NULL);
3487 
3488 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3489 }
3490 
3491 /*
3492  * Test resizing of the metadata blob.  This requires creating enough blobs
3493  *  so that one cluster is not enough to fit the metadata for those blobs.
3494  *  To induce this condition to happen more quickly, we reduce the cluster
3495  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3496  */
3497 static void
3498 bs_resize_md(void)
3499 {
3500 	struct spdk_blob_store *bs;
3501 	const int CLUSTER_PAGE_COUNT = 4;
3502 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3503 	struct spdk_bs_dev *dev;
3504 	struct spdk_bs_opts opts;
3505 	struct spdk_blob *blob;
3506 	struct spdk_blob_opts blob_opts;
3507 	uint32_t cluster_sz;
3508 	spdk_blob_id blobids[NUM_BLOBS];
3509 	int i;
3510 
3511 
3512 	dev = init_dev();
3513 	spdk_bs_opts_init(&opts, sizeof(opts));
3514 	opts.cluster_sz = CLUSTER_PAGE_COUNT * g_phys_blocklen;
3515 	cluster_sz = opts.cluster_sz;
3516 
3517 	/* Initialize a new blob store */
3518 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3519 	poll_threads();
3520 	CU_ASSERT(g_bserrno == 0);
3521 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3522 	bs = g_bs;
3523 
3524 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3525 
3526 	ut_spdk_blob_opts_init(&blob_opts);
3527 
3528 	for (i = 0; i < NUM_BLOBS; i++) {
3529 		g_bserrno = -1;
3530 		g_blobid = SPDK_BLOBID_INVALID;
3531 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3532 		poll_threads();
3533 		CU_ASSERT(g_bserrno == 0);
3534 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3535 		blobids[i] = g_blobid;
3536 	}
3537 
3538 	ut_bs_reload(&bs, &opts);
3539 
3540 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3541 
3542 	for (i = 0; i < NUM_BLOBS; i++) {
3543 		g_bserrno = -1;
3544 		g_blob = NULL;
3545 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3546 		poll_threads();
3547 		CU_ASSERT(g_bserrno == 0);
3548 		CU_ASSERT(g_blob !=  NULL);
3549 		blob = g_blob;
3550 		g_bserrno = -1;
3551 		spdk_blob_close(blob, blob_op_complete, NULL);
3552 		poll_threads();
3553 		CU_ASSERT(g_bserrno == 0);
3554 	}
3555 
3556 	spdk_bs_unload(bs, bs_op_complete, NULL);
3557 	poll_threads();
3558 	CU_ASSERT(g_bserrno == 0);
3559 	g_bs = NULL;
3560 }
3561 
3562 static void
3563 bs_destroy(void)
3564 {
3565 	struct spdk_blob_store *bs;
3566 	struct spdk_bs_dev *dev;
3567 
3568 	/* Initialize a new blob store */
3569 	dev = init_dev();
3570 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3571 	poll_threads();
3572 	CU_ASSERT(g_bserrno == 0);
3573 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3574 	bs = g_bs;
3575 
3576 	/* Destroy the blob store */
3577 	g_bserrno = -1;
3578 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3579 	poll_threads();
3580 	CU_ASSERT(g_bserrno == 0);
3581 
3582 	/* Loading an non-existent blob store should fail. */
3583 	g_bs = NULL;
3584 	dev = init_dev();
3585 
3586 	g_bserrno = 0;
3587 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3588 	poll_threads();
3589 	CU_ASSERT(g_bserrno != 0);
3590 }
3591 
3592 /* Try to hit all of the corner cases associated with serializing
3593  * a blob to disk
3594  */
3595 static void
3596 blob_serialize_test(void)
3597 {
3598 	struct spdk_bs_dev *dev;
3599 	struct spdk_bs_opts opts;
3600 	struct spdk_blob_store *bs;
3601 	spdk_blob_id blobid[2];
3602 	struct spdk_blob *blob[2];
3603 	uint64_t i;
3604 	char *value;
3605 	int rc;
3606 
3607 	dev = init_dev();
3608 
3609 	/* Initialize a new blobstore with very small clusters */
3610 	spdk_bs_opts_init(&opts, sizeof(opts));
3611 	opts.cluster_sz = dev->blocklen * 8;
3612 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3613 	poll_threads();
3614 	CU_ASSERT(g_bserrno == 0);
3615 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3616 	bs = g_bs;
3617 
3618 	/* Create and open two blobs */
3619 	for (i = 0; i < 2; i++) {
3620 		blob[i] = ut_blob_create_and_open(bs, NULL);
3621 		blobid[i] = spdk_blob_get_id(blob[i]);
3622 
3623 		/* Set a fairly large xattr on both blobs to eat up
3624 		 * metadata space
3625 		 */
3626 		value = calloc(dev->blocklen - 64, sizeof(char));
3627 		SPDK_CU_ASSERT_FATAL(value != NULL);
3628 		memset(value, i, dev->blocklen / 2);
3629 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3630 		CU_ASSERT(rc == 0);
3631 		free(value);
3632 	}
3633 
3634 	/* Resize the blobs, alternating 1 cluster at a time.
3635 	 * This thwarts run length encoding and will cause spill
3636 	 * over of the extents.
3637 	 */
3638 	for (i = 0; i < 6; i++) {
3639 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3640 		poll_threads();
3641 		CU_ASSERT(g_bserrno == 0);
3642 	}
3643 
3644 	for (i = 0; i < 2; i++) {
3645 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3646 		poll_threads();
3647 		CU_ASSERT(g_bserrno == 0);
3648 	}
3649 
3650 	/* Close the blobs */
3651 	for (i = 0; i < 2; i++) {
3652 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3653 		poll_threads();
3654 		CU_ASSERT(g_bserrno == 0);
3655 	}
3656 
3657 	ut_bs_reload(&bs, &opts);
3658 
3659 	for (i = 0; i < 2; i++) {
3660 		blob[i] = NULL;
3661 
3662 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3663 		poll_threads();
3664 		CU_ASSERT(g_bserrno == 0);
3665 		CU_ASSERT(g_blob != NULL);
3666 		blob[i] = g_blob;
3667 
3668 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3669 
3670 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3671 		poll_threads();
3672 		CU_ASSERT(g_bserrno == 0);
3673 	}
3674 
3675 	spdk_bs_unload(bs, bs_op_complete, NULL);
3676 	poll_threads();
3677 	CU_ASSERT(g_bserrno == 0);
3678 	g_bs = NULL;
3679 }
3680 
3681 static void
3682 blob_crc(void)
3683 {
3684 	struct spdk_blob_store *bs = g_bs;
3685 	struct spdk_blob *blob;
3686 	spdk_blob_id blobid;
3687 	uint32_t page_num;
3688 	int index;
3689 	struct spdk_blob_md_page *page;
3690 
3691 	blob = ut_blob_create_and_open(bs, NULL);
3692 	blobid = spdk_blob_get_id(blob);
3693 
3694 	spdk_blob_close(blob, blob_op_complete, NULL);
3695 	poll_threads();
3696 	CU_ASSERT(g_bserrno == 0);
3697 
3698 	page_num = bs_blobid_to_page(blobid);
3699 	index = g_phys_blocklen * (bs->md_start + page_num);
3700 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3701 	page->crc = 0;
3702 
3703 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3704 	poll_threads();
3705 	CU_ASSERT(g_bserrno == -EINVAL);
3706 	CU_ASSERT(g_blob == NULL);
3707 	g_bserrno = 0;
3708 
3709 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3710 	poll_threads();
3711 	CU_ASSERT(g_bserrno == -EINVAL);
3712 }
3713 
3714 static void
3715 super_block_crc(void)
3716 {
3717 	struct spdk_blob_store *bs;
3718 	struct spdk_bs_dev *dev;
3719 	struct spdk_bs_super_block *super_block;
3720 
3721 	dev = init_dev();
3722 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3723 	poll_threads();
3724 	CU_ASSERT(g_bserrno == 0);
3725 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3726 	bs = g_bs;
3727 
3728 	spdk_bs_unload(bs, bs_op_complete, NULL);
3729 	poll_threads();
3730 	CU_ASSERT(g_bserrno == 0);
3731 	g_bs = NULL;
3732 
3733 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3734 	super_block->crc = 0;
3735 	dev = init_dev();
3736 
3737 	/* Load an existing blob store */
3738 	g_bserrno = 0;
3739 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3740 	poll_threads();
3741 	CU_ASSERT(g_bserrno == -EILSEQ);
3742 }
3743 
3744 /* For blob dirty shutdown test case we do the following sub-test cases:
3745  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3746  *   dirty shutdown and reload the blob store and verify the xattrs.
3747  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3748  *   reload the blob store and verify the clusters number.
3749  * 3 Create the second blob and then dirty shutdown, reload the blob store
3750  *   and verify the second blob.
3751  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3752  *   and verify the second blob is invalid.
3753  * 5 Create the second blob again and also create the third blob, modify the
3754  *   md of second blob which makes the md invalid, and then dirty shutdown,
3755  *   reload the blob store verify the second blob, it should invalid and also
3756  *   verify the third blob, it should correct.
3757  */
3758 static void
3759 blob_dirty_shutdown(void)
3760 {
3761 	int rc;
3762 	int index;
3763 	struct spdk_blob_store *bs = g_bs;
3764 	spdk_blob_id blobid1, blobid2, blobid3;
3765 	struct spdk_blob *blob = g_blob;
3766 	uint64_t length;
3767 	uint64_t free_clusters;
3768 	const void *value;
3769 	size_t value_len;
3770 	uint32_t page_num;
3771 	struct spdk_blob_md_page *page;
3772 	struct spdk_blob_opts blob_opts;
3773 
3774 	/* Create first blob */
3775 	blobid1 = spdk_blob_get_id(blob);
3776 
3777 	/* Set some xattrs */
3778 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3779 	CU_ASSERT(rc == 0);
3780 
3781 	length = 2345;
3782 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3783 	CU_ASSERT(rc == 0);
3784 
3785 	/* Put xattr that fits exactly single page.
3786 	 * This results in adding additional pages to MD.
3787 	 * First is flags and smaller xattr, second the large xattr,
3788 	 * third are just the extents.
3789 	 */
3790 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3791 			      strlen("large_xattr");
3792 	char *xattr = calloc(xattr_length, sizeof(char));
3793 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3794 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3795 	free(xattr);
3796 	SPDK_CU_ASSERT_FATAL(rc == 0);
3797 
3798 	/* Resize the blob */
3799 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3800 	poll_threads();
3801 	CU_ASSERT(g_bserrno == 0);
3802 
3803 	/* Set the blob as the super blob */
3804 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3805 	poll_threads();
3806 	CU_ASSERT(g_bserrno == 0);
3807 
3808 	free_clusters = spdk_bs_free_cluster_count(bs);
3809 
3810 	spdk_blob_close(blob, blob_op_complete, NULL);
3811 	poll_threads();
3812 	CU_ASSERT(g_bserrno == 0);
3813 	blob = NULL;
3814 	g_blob = NULL;
3815 	g_blobid = SPDK_BLOBID_INVALID;
3816 
3817 	ut_bs_dirty_load(&bs, NULL);
3818 
3819 	/* Get the super blob */
3820 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3821 	poll_threads();
3822 	CU_ASSERT(g_bserrno == 0);
3823 	CU_ASSERT(blobid1 == g_blobid);
3824 
3825 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3826 	poll_threads();
3827 	CU_ASSERT(g_bserrno == 0);
3828 	CU_ASSERT(g_blob != NULL);
3829 	blob = g_blob;
3830 
3831 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3832 
3833 	/* Get the xattrs */
3834 	value = NULL;
3835 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3836 	CU_ASSERT(rc == 0);
3837 	SPDK_CU_ASSERT_FATAL(value != NULL);
3838 	CU_ASSERT(*(uint64_t *)value == length);
3839 	CU_ASSERT(value_len == 8);
3840 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3841 
3842 	/* Resize the blob */
3843 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3844 	poll_threads();
3845 	CU_ASSERT(g_bserrno == 0);
3846 
3847 	free_clusters = spdk_bs_free_cluster_count(bs);
3848 
3849 	spdk_blob_close(blob, blob_op_complete, NULL);
3850 	poll_threads();
3851 	CU_ASSERT(g_bserrno == 0);
3852 	blob = NULL;
3853 	g_blob = NULL;
3854 	g_blobid = SPDK_BLOBID_INVALID;
3855 
3856 	ut_bs_dirty_load(&bs, NULL);
3857 
3858 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3859 	poll_threads();
3860 	CU_ASSERT(g_bserrno == 0);
3861 	CU_ASSERT(g_blob != NULL);
3862 	blob = g_blob;
3863 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3864 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3865 
3866 	spdk_blob_close(blob, blob_op_complete, NULL);
3867 	poll_threads();
3868 	CU_ASSERT(g_bserrno == 0);
3869 	blob = NULL;
3870 	g_blob = NULL;
3871 	g_blobid = SPDK_BLOBID_INVALID;
3872 
3873 	/* Create second blob */
3874 	blob = ut_blob_create_and_open(bs, NULL);
3875 	blobid2 = spdk_blob_get_id(blob);
3876 
3877 	/* Set some xattrs */
3878 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3879 	CU_ASSERT(rc == 0);
3880 
3881 	length = 5432;
3882 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3883 	CU_ASSERT(rc == 0);
3884 
3885 	/* Resize the blob */
3886 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3887 	poll_threads();
3888 	CU_ASSERT(g_bserrno == 0);
3889 
3890 	free_clusters = spdk_bs_free_cluster_count(bs);
3891 
3892 	spdk_blob_close(blob, blob_op_complete, NULL);
3893 	poll_threads();
3894 	CU_ASSERT(g_bserrno == 0);
3895 	blob = NULL;
3896 	g_blob = NULL;
3897 	g_blobid = SPDK_BLOBID_INVALID;
3898 
3899 	ut_bs_dirty_load(&bs, NULL);
3900 
3901 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3902 	poll_threads();
3903 	CU_ASSERT(g_bserrno == 0);
3904 	CU_ASSERT(g_blob != NULL);
3905 	blob = g_blob;
3906 
3907 	/* Get the xattrs */
3908 	value = NULL;
3909 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3910 	CU_ASSERT(rc == 0);
3911 	SPDK_CU_ASSERT_FATAL(value != NULL);
3912 	CU_ASSERT(*(uint64_t *)value == length);
3913 	CU_ASSERT(value_len == 8);
3914 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3915 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3916 
3917 	ut_blob_close_and_delete(bs, blob);
3918 
3919 	free_clusters = spdk_bs_free_cluster_count(bs);
3920 
3921 	ut_bs_dirty_load(&bs, NULL);
3922 
3923 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3924 	poll_threads();
3925 	CU_ASSERT(g_bserrno != 0);
3926 	CU_ASSERT(g_blob == NULL);
3927 
3928 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3929 	poll_threads();
3930 	CU_ASSERT(g_bserrno == 0);
3931 	CU_ASSERT(g_blob != NULL);
3932 	blob = g_blob;
3933 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3934 	spdk_blob_close(blob, blob_op_complete, NULL);
3935 	poll_threads();
3936 	CU_ASSERT(g_bserrno == 0);
3937 
3938 	ut_bs_reload(&bs, NULL);
3939 
3940 	/* Create second blob */
3941 	ut_spdk_blob_opts_init(&blob_opts);
3942 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3943 	poll_threads();
3944 	CU_ASSERT(g_bserrno == 0);
3945 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3946 	blobid2 = g_blobid;
3947 
3948 	/* Create third blob */
3949 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3950 	poll_threads();
3951 	CU_ASSERT(g_bserrno == 0);
3952 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3953 	blobid3 = g_blobid;
3954 
3955 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3956 	poll_threads();
3957 	CU_ASSERT(g_bserrno == 0);
3958 	CU_ASSERT(g_blob != NULL);
3959 	blob = g_blob;
3960 
3961 	/* Set some xattrs for second blob */
3962 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3963 	CU_ASSERT(rc == 0);
3964 
3965 	length = 5432;
3966 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3967 	CU_ASSERT(rc == 0);
3968 
3969 	spdk_blob_close(blob, blob_op_complete, NULL);
3970 	poll_threads();
3971 	CU_ASSERT(g_bserrno == 0);
3972 	blob = NULL;
3973 	g_blob = NULL;
3974 	g_blobid = SPDK_BLOBID_INVALID;
3975 
3976 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3977 	poll_threads();
3978 	CU_ASSERT(g_bserrno == 0);
3979 	CU_ASSERT(g_blob != NULL);
3980 	blob = g_blob;
3981 
3982 	/* Set some xattrs for third blob */
3983 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3984 	CU_ASSERT(rc == 0);
3985 
3986 	length = 5432;
3987 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3988 	CU_ASSERT(rc == 0);
3989 
3990 	spdk_blob_close(blob, blob_op_complete, NULL);
3991 	poll_threads();
3992 	CU_ASSERT(g_bserrno == 0);
3993 	blob = NULL;
3994 	g_blob = NULL;
3995 	g_blobid = SPDK_BLOBID_INVALID;
3996 
3997 	/* Mark second blob as invalid */
3998 	page_num = bs_blobid_to_page(blobid2);
3999 
4000 	index = g_phys_blocklen * (bs->md_start + page_num);
4001 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
4002 	page->sequence_num = 1;
4003 	page->crc = blob_md_page_calc_crc(page);
4004 
4005 	free_clusters = spdk_bs_free_cluster_count(bs);
4006 
4007 	ut_bs_dirty_load(&bs, NULL);
4008 
4009 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
4010 	poll_threads();
4011 	CU_ASSERT(g_bserrno != 0);
4012 	CU_ASSERT(g_blob == NULL);
4013 
4014 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
4015 	poll_threads();
4016 	CU_ASSERT(g_bserrno == 0);
4017 	CU_ASSERT(g_blob != NULL);
4018 	blob = g_blob;
4019 
4020 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4021 }
4022 
4023 static void
4024 blob_flags(void)
4025 {
4026 	struct spdk_blob_store *bs = g_bs;
4027 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
4028 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
4029 	struct spdk_blob_opts blob_opts;
4030 	int rc;
4031 
4032 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
4033 	blob_invalid = ut_blob_create_and_open(bs, NULL);
4034 	blobid_invalid = spdk_blob_get_id(blob_invalid);
4035 
4036 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
4037 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
4038 
4039 	ut_spdk_blob_opts_init(&blob_opts);
4040 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
4041 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
4042 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
4043 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
4044 
4045 	/* Change the size of blob_data_ro to check if flags are serialized
4046 	 * when blob has non zero number of extents */
4047 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
4048 	poll_threads();
4049 	CU_ASSERT(g_bserrno == 0);
4050 
4051 	/* Set the xattr to check if flags are serialized
4052 	 * when blob has non zero number of xattrs */
4053 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
4054 	CU_ASSERT(rc == 0);
4055 
4056 	blob_invalid->invalid_flags = (1ULL << 63);
4057 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
4058 	blob_data_ro->data_ro_flags = (1ULL << 62);
4059 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
4060 	blob_md_ro->md_ro_flags = (1ULL << 61);
4061 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
4062 
4063 	g_bserrno = -1;
4064 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
4065 	poll_threads();
4066 	CU_ASSERT(g_bserrno == 0);
4067 	g_bserrno = -1;
4068 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
4069 	poll_threads();
4070 	CU_ASSERT(g_bserrno == 0);
4071 	g_bserrno = -1;
4072 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4073 	poll_threads();
4074 	CU_ASSERT(g_bserrno == 0);
4075 
4076 	g_bserrno = -1;
4077 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
4078 	poll_threads();
4079 	CU_ASSERT(g_bserrno == 0);
4080 	blob_invalid = NULL;
4081 	g_bserrno = -1;
4082 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
4083 	poll_threads();
4084 	CU_ASSERT(g_bserrno == 0);
4085 	blob_data_ro = NULL;
4086 	g_bserrno = -1;
4087 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
4088 	poll_threads();
4089 	CU_ASSERT(g_bserrno == 0);
4090 	blob_md_ro = NULL;
4091 
4092 	g_blob = NULL;
4093 	g_blobid = SPDK_BLOBID_INVALID;
4094 
4095 	ut_bs_reload(&bs, NULL);
4096 
4097 	g_blob = NULL;
4098 	g_bserrno = 0;
4099 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
4100 	poll_threads();
4101 	CU_ASSERT(g_bserrno != 0);
4102 	CU_ASSERT(g_blob == NULL);
4103 
4104 	g_blob = NULL;
4105 	g_bserrno = -1;
4106 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
4107 	poll_threads();
4108 	CU_ASSERT(g_bserrno == 0);
4109 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4110 	blob_data_ro = g_blob;
4111 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
4112 	CU_ASSERT(blob_data_ro->data_ro == true);
4113 	CU_ASSERT(blob_data_ro->md_ro == true);
4114 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
4115 
4116 	g_blob = NULL;
4117 	g_bserrno = -1;
4118 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
4119 	poll_threads();
4120 	CU_ASSERT(g_bserrno == 0);
4121 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4122 	blob_md_ro = g_blob;
4123 	CU_ASSERT(blob_md_ro->data_ro == false);
4124 	CU_ASSERT(blob_md_ro->md_ro == true);
4125 
4126 	g_bserrno = -1;
4127 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4128 	poll_threads();
4129 	CU_ASSERT(g_bserrno == 0);
4130 
4131 	ut_blob_close_and_delete(bs, blob_data_ro);
4132 	ut_blob_close_and_delete(bs, blob_md_ro);
4133 }
4134 
4135 static void
4136 bs_version(void)
4137 {
4138 	struct spdk_bs_super_block *super;
4139 	struct spdk_blob_store *bs = g_bs;
4140 	struct spdk_bs_dev *dev;
4141 	struct spdk_blob *blob;
4142 	struct spdk_blob_opts blob_opts;
4143 	spdk_blob_id blobid;
4144 
4145 	/* Unload the blob store */
4146 	spdk_bs_unload(bs, bs_op_complete, NULL);
4147 	poll_threads();
4148 	CU_ASSERT(g_bserrno == 0);
4149 	g_bs = NULL;
4150 
4151 	/*
4152 	 * Change the bs version on disk.  This will allow us to
4153 	 *  test that the version does not get modified automatically
4154 	 *  when loading and unloading the blobstore.
4155 	 */
4156 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
4157 	CU_ASSERT(super->version == SPDK_BS_VERSION);
4158 	CU_ASSERT(super->clean == 1);
4159 	super->version = 2;
4160 	/*
4161 	 * Version 2 metadata does not have a used blobid mask, so clear
4162 	 *  those fields in the super block and zero the corresponding
4163 	 *  region on "disk".  We will use this to ensure blob IDs are
4164 	 *  correctly reconstructed.
4165 	 */
4166 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
4167 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
4168 	super->used_blobid_mask_start = 0;
4169 	super->used_blobid_mask_len = 0;
4170 	super->crc = blob_md_page_calc_crc(super);
4171 
4172 	/* Load an existing blob store */
4173 	dev = init_dev();
4174 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4175 	poll_threads();
4176 	CU_ASSERT(g_bserrno == 0);
4177 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4178 	CU_ASSERT(super->clean == 1);
4179 	bs = g_bs;
4180 
4181 	/*
4182 	 * Create a blob - just to make sure that when we unload it
4183 	 *  results in writing the super block (since metadata pages
4184 	 *  were allocated.
4185 	 */
4186 	ut_spdk_blob_opts_init(&blob_opts);
4187 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
4188 	poll_threads();
4189 	CU_ASSERT(g_bserrno == 0);
4190 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4191 	blobid = g_blobid;
4192 
4193 	/* Unload the blob store */
4194 	spdk_bs_unload(bs, bs_op_complete, NULL);
4195 	poll_threads();
4196 	CU_ASSERT(g_bserrno == 0);
4197 	g_bs = NULL;
4198 	CU_ASSERT(super->version == 2);
4199 	CU_ASSERT(super->used_blobid_mask_start == 0);
4200 	CU_ASSERT(super->used_blobid_mask_len == 0);
4201 
4202 	dev = init_dev();
4203 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4204 	poll_threads();
4205 	CU_ASSERT(g_bserrno == 0);
4206 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4207 	bs = g_bs;
4208 
4209 	g_blob = NULL;
4210 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4211 	poll_threads();
4212 	CU_ASSERT(g_bserrno == 0);
4213 	CU_ASSERT(g_blob != NULL);
4214 	blob = g_blob;
4215 
4216 	ut_blob_close_and_delete(bs, blob);
4217 
4218 	CU_ASSERT(super->version == 2);
4219 	CU_ASSERT(super->used_blobid_mask_start == 0);
4220 	CU_ASSERT(super->used_blobid_mask_len == 0);
4221 }
4222 
4223 static void
4224 blob_set_xattrs_test(void)
4225 {
4226 	struct spdk_blob_store *bs = g_bs;
4227 	struct spdk_blob *blob;
4228 	struct spdk_blob_opts opts;
4229 	const void *value;
4230 	size_t value_len;
4231 	char *xattr;
4232 	size_t xattr_length;
4233 	int rc;
4234 
4235 	/* Create blob with extra attributes */
4236 	ut_spdk_blob_opts_init(&opts);
4237 
4238 	opts.xattrs.names = g_xattr_names;
4239 	opts.xattrs.get_value = _get_xattr_value;
4240 	opts.xattrs.count = 3;
4241 	opts.xattrs.ctx = &g_ctx;
4242 
4243 	blob = ut_blob_create_and_open(bs, &opts);
4244 
4245 	/* Get the xattrs */
4246 	value = NULL;
4247 
4248 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
4249 	CU_ASSERT(rc == 0);
4250 	SPDK_CU_ASSERT_FATAL(value != NULL);
4251 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
4252 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
4253 
4254 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
4255 	CU_ASSERT(rc == 0);
4256 	SPDK_CU_ASSERT_FATAL(value != NULL);
4257 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
4258 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
4259 
4260 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
4261 	CU_ASSERT(rc == 0);
4262 	SPDK_CU_ASSERT_FATAL(value != NULL);
4263 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
4264 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
4265 
4266 	/* Try to get non existing attribute */
4267 
4268 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
4269 	CU_ASSERT(rc == -ENOENT);
4270 
4271 	/* Try xattr exceeding maximum length of descriptor in single page */
4272 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
4273 		       strlen("large_xattr") + 1;
4274 	xattr = calloc(xattr_length, sizeof(char));
4275 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
4276 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
4277 	free(xattr);
4278 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
4279 
4280 	spdk_blob_close(blob, blob_op_complete, NULL);
4281 	poll_threads();
4282 	CU_ASSERT(g_bserrno == 0);
4283 	blob = NULL;
4284 	g_blob = NULL;
4285 	g_blobid = SPDK_BLOBID_INVALID;
4286 
4287 	/* NULL callback */
4288 	ut_spdk_blob_opts_init(&opts);
4289 	opts.xattrs.names = g_xattr_names;
4290 	opts.xattrs.get_value = NULL;
4291 	opts.xattrs.count = 1;
4292 	opts.xattrs.ctx = &g_ctx;
4293 
4294 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4295 	poll_threads();
4296 	CU_ASSERT(g_bserrno == -EINVAL);
4297 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4298 
4299 	/* NULL values */
4300 	ut_spdk_blob_opts_init(&opts);
4301 	opts.xattrs.names = g_xattr_names;
4302 	opts.xattrs.get_value = _get_xattr_value_null;
4303 	opts.xattrs.count = 1;
4304 	opts.xattrs.ctx = NULL;
4305 
4306 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4307 	poll_threads();
4308 	CU_ASSERT(g_bserrno == -EINVAL);
4309 }
4310 
4311 static void
4312 blob_thin_prov_alloc(void)
4313 {
4314 	struct spdk_blob_store *bs = g_bs;
4315 	struct spdk_blob *blob;
4316 	struct spdk_blob_opts opts;
4317 	spdk_blob_id blobid;
4318 	uint64_t free_clusters;
4319 
4320 	free_clusters = spdk_bs_free_cluster_count(bs);
4321 
4322 	/* Set blob as thin provisioned */
4323 	ut_spdk_blob_opts_init(&opts);
4324 	opts.thin_provision = true;
4325 
4326 	blob = ut_blob_create_and_open(bs, &opts);
4327 	blobid = spdk_blob_get_id(blob);
4328 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4329 
4330 	CU_ASSERT(blob->active.num_clusters == 0);
4331 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
4332 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4333 
4334 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4335 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4336 	poll_threads();
4337 	CU_ASSERT(g_bserrno == 0);
4338 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4339 	CU_ASSERT(blob->active.num_clusters == 5);
4340 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4341 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4342 
4343 	/* Grow it to 1TB - still unallocated */
4344 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
4345 	poll_threads();
4346 	CU_ASSERT(g_bserrno == 0);
4347 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4348 	CU_ASSERT(blob->active.num_clusters == 262144);
4349 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4350 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4351 
4352 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4353 	poll_threads();
4354 	CU_ASSERT(g_bserrno == 0);
4355 	/* Sync must not change anything */
4356 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4357 	CU_ASSERT(blob->active.num_clusters == 262144);
4358 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4359 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4360 	/* Since clusters are not allocated,
4361 	 * number of metadata pages is expected to be minimal.
4362 	 */
4363 	CU_ASSERT(blob->active.num_pages == 1);
4364 
4365 	/* Shrink the blob to 3 clusters - still unallocated */
4366 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
4367 	poll_threads();
4368 	CU_ASSERT(g_bserrno == 0);
4369 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4370 	CU_ASSERT(blob->active.num_clusters == 3);
4371 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4372 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4373 
4374 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4375 	poll_threads();
4376 	CU_ASSERT(g_bserrno == 0);
4377 	/* Sync must not change anything */
4378 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4379 	CU_ASSERT(blob->active.num_clusters == 3);
4380 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4381 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4382 
4383 	spdk_blob_close(blob, blob_op_complete, NULL);
4384 	poll_threads();
4385 	CU_ASSERT(g_bserrno == 0);
4386 
4387 	ut_bs_reload(&bs, NULL);
4388 
4389 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4390 	poll_threads();
4391 	CU_ASSERT(g_bserrno == 0);
4392 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4393 	blob = g_blob;
4394 
4395 	/* Check that clusters allocation and size is still the same */
4396 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4397 	CU_ASSERT(blob->active.num_clusters == 3);
4398 
4399 	ut_blob_close_and_delete(bs, blob);
4400 }
4401 
4402 static void
4403 blob_insert_cluster_msg_test(void)
4404 {
4405 	struct spdk_blob_store *bs = g_bs;
4406 	struct spdk_blob *blob;
4407 	struct spdk_blob_opts opts;
4408 	/* For now, even if md_page_size is > 4KB, we still only use the first
4409 	 * 4KB of it. The rest is left unused. Future changes may allow using the
4410 	 * rest of the md_page, but that will require more extensive changes since
4411 	 * then the struct spdk_blob_md_page cannot be used directly (since some
4412 	 * fields such as crc would have variable placement in the struct).
4413 	 */
4414 	struct {
4415 		struct spdk_blob_md_page page;
4416 		uint8_t pad[DEV_MAX_PHYS_BLOCKLEN - sizeof(struct spdk_blob_md_page)];
4417 	} md = {};
4418 	spdk_blob_id blobid;
4419 	uint64_t free_clusters;
4420 	uint64_t new_cluster = 0;
4421 	uint32_t cluster_num = 3;
4422 	uint32_t extent_page = 0;
4423 
4424 	free_clusters = spdk_bs_free_cluster_count(bs);
4425 
4426 	/* Set blob as thin provisioned */
4427 	ut_spdk_blob_opts_init(&opts);
4428 	opts.thin_provision = true;
4429 	opts.num_clusters = 4;
4430 
4431 	blob = ut_blob_create_and_open(bs, &opts);
4432 	blobid = spdk_blob_get_id(blob);
4433 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4434 
4435 	CU_ASSERT(blob->active.num_clusters == 4);
4436 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4437 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4438 
4439 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4440 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4441 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4442 	spdk_spin_lock(&bs->used_lock);
4443 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4444 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4445 	spdk_spin_unlock(&bs->used_lock);
4446 
4447 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &md.page,
4448 					 blob_op_complete, NULL);
4449 	poll_threads();
4450 
4451 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4452 
4453 	spdk_blob_close(blob, blob_op_complete, NULL);
4454 	poll_threads();
4455 	CU_ASSERT(g_bserrno == 0);
4456 
4457 	ut_bs_reload(&bs, NULL);
4458 
4459 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4460 	poll_threads();
4461 	CU_ASSERT(g_bserrno == 0);
4462 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4463 	blob = g_blob;
4464 
4465 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4466 
4467 	ut_blob_close_and_delete(bs, blob);
4468 }
4469 
4470 static void
4471 blob_thin_prov_rw(void)
4472 {
4473 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
4474 	struct spdk_blob_store *bs = g_bs;
4475 	struct spdk_blob *blob, *blob_id0;
4476 	struct spdk_io_channel *channel, *channel_thread1;
4477 	struct spdk_blob_opts opts;
4478 	uint64_t free_clusters;
4479 	uint64_t io_unit_size;
4480 	uint8_t payload_read[10 * BLOCKLEN];
4481 	uint8_t payload_write[10 * BLOCKLEN];
4482 	uint64_t write_bytes;
4483 	uint64_t read_bytes;
4484 	uint64_t expected_bytes;
4485 
4486 	free_clusters = spdk_bs_free_cluster_count(bs);
4487 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4488 
4489 	channel = spdk_bs_alloc_io_channel(bs);
4490 	CU_ASSERT(channel != NULL);
4491 
4492 	ut_spdk_blob_opts_init(&opts);
4493 	opts.thin_provision = true;
4494 
4495 	/* Create and delete blob at md page 0, so that next md page allocation
4496 	 * for extent will use that. */
4497 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4498 	blob = ut_blob_create_and_open(bs, &opts);
4499 	ut_blob_close_and_delete(bs, blob_id0);
4500 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4501 
4502 	CU_ASSERT(blob->active.num_clusters == 0);
4503 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4504 
4505 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4506 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4507 	poll_threads();
4508 	CU_ASSERT(g_bserrno == 0);
4509 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4510 	CU_ASSERT(blob->active.num_clusters == 5);
4511 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4512 
4513 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4514 	poll_threads();
4515 	CU_ASSERT(g_bserrno == 0);
4516 	/* Sync must not change anything */
4517 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4518 	CU_ASSERT(blob->active.num_clusters == 5);
4519 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4520 
4521 	/* Payload should be all zeros from unallocated clusters */
4522 	memset(payload_read, 0xFF, sizeof(payload_read));
4523 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4524 	poll_threads();
4525 	CU_ASSERT(g_bserrno == 0);
4526 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
4527 
4528 	write_bytes = g_dev_write_bytes;
4529 	read_bytes = g_dev_read_bytes;
4530 
4531 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4532 	set_thread(1);
4533 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4534 	CU_ASSERT(channel_thread1 != NULL);
4535 	memset(payload_write, 0xE5, sizeof(payload_write));
4536 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4537 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4538 	/* Perform write on thread 0. That will try to allocate cluster,
4539 	 * but fail due to another thread issuing the cluster allocation first. */
4540 	set_thread(0);
4541 	memset(payload_write, 0xE5, sizeof(payload_write));
4542 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4543 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4544 	poll_threads();
4545 	CU_ASSERT(g_bserrno == 0);
4546 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4547 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
4548 	/* For thin-provisioned blob we need to write 20 io_units plus one page metadata and
4549 	 * read 0 bytes */
4550 	expected_bytes = 20 * io_unit_size + spdk_bs_get_page_size(bs);
4551 	if (g_use_extent_table) {
4552 		/* Add one more page for EXTENT_PAGE write */
4553 		expected_bytes += spdk_bs_get_page_size(bs);
4554 	}
4555 	CU_ASSERT(g_dev_write_bytes - write_bytes == expected_bytes);
4556 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4557 
4558 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4559 	poll_threads();
4560 	CU_ASSERT(g_bserrno == 0);
4561 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
4562 
4563 	ut_blob_close_and_delete(bs, blob);
4564 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4565 
4566 	set_thread(1);
4567 	spdk_bs_free_io_channel(channel_thread1);
4568 	set_thread(0);
4569 	spdk_bs_free_io_channel(channel);
4570 	poll_threads();
4571 	g_blob = NULL;
4572 	g_blobid = 0;
4573 }
4574 
4575 static void
4576 blob_thin_prov_write_count_io(void)
4577 {
4578 	struct spdk_blob_store *bs;
4579 	struct spdk_blob *blob;
4580 	struct spdk_io_channel *ch;
4581 	struct spdk_bs_dev *dev;
4582 	struct spdk_bs_opts bs_opts;
4583 	struct spdk_blob_opts opts;
4584 	uint64_t free_clusters;
4585 	uint64_t io_unit_size;
4586 	uint8_t payload_write[BLOCKLEN];
4587 	uint64_t write_bytes;
4588 	uint64_t read_bytes;
4589 	uint64_t expected_bytes;
4590 	const uint32_t CLUSTER_SZ = g_phys_blocklen * 4;
4591 	uint32_t io_units_per_cluster;
4592 	uint32_t io_units_per_extent_page;
4593 	uint32_t i;
4594 
4595 	/* Use a very small cluster size for this test.  This ensures we need multiple
4596 	 * extent pages to hold all of the clusters even for relatively small blobs like
4597 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4598 	 * buffers).
4599 	 */
4600 	dev = init_dev();
4601 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4602 	bs_opts.cluster_sz = CLUSTER_SZ;
4603 
4604 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4605 	poll_threads();
4606 	CU_ASSERT(g_bserrno == 0);
4607 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4608 	bs = g_bs;
4609 
4610 	free_clusters = spdk_bs_free_cluster_count(bs);
4611 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4612 	io_units_per_cluster = CLUSTER_SZ / io_unit_size;
4613 	io_units_per_extent_page = SPDK_EXTENTS_PER_EP * io_units_per_cluster;
4614 
4615 	ch = spdk_bs_alloc_io_channel(bs);
4616 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4617 
4618 	ut_spdk_blob_opts_init(&opts);
4619 	opts.thin_provision = true;
4620 
4621 	blob = ut_blob_create_and_open(bs, &opts);
4622 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4623 
4624 	/* Resize the blob so that it will require 8 extent pages to hold all of
4625 	 * the clusters.
4626 	 */
4627 	g_bserrno = -1;
4628 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4629 	poll_threads();
4630 	CU_ASSERT(g_bserrno == 0);
4631 
4632 	g_bserrno = -1;
4633 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4634 	poll_threads();
4635 	CU_ASSERT(g_bserrno == 0);
4636 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4637 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4638 
4639 	memset(payload_write, 0, sizeof(payload_write));
4640 	for (i = 0; i < 8; i++) {
4641 		write_bytes = g_dev_write_bytes;
4642 		read_bytes = g_dev_read_bytes;
4643 
4644 		g_bserrno = -1;
4645 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i, 1, blob_op_complete,
4646 				   NULL);
4647 		poll_threads();
4648 		CU_ASSERT(g_bserrno == 0);
4649 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4650 
4651 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4652 		if (!g_use_extent_table) {
4653 			/* For legacy metadata, we should have written the io_unit for
4654 			 * the write I/O, plus the blob's primary metadata page
4655 			 */
4656 			expected_bytes = io_unit_size + spdk_bs_get_page_size(bs);
4657 		} else {
4658 			/* For extent table metadata, we should have written the io_unit for
4659 			 * the write I/O, plus 2 metadata pages - the extent page and the
4660 			 * blob's primary metadata page
4661 			 */
4662 			expected_bytes = io_unit_size + 2 * spdk_bs_get_page_size(bs);
4663 		}
4664 		CU_ASSERT((g_dev_write_bytes - write_bytes) == expected_bytes);
4665 
4666 		/* The write should have synced the metadata already.  Do another sync here
4667 		 * just to confirm.
4668 		 */
4669 		write_bytes = g_dev_write_bytes;
4670 		read_bytes = g_dev_read_bytes;
4671 
4672 		g_bserrno = -1;
4673 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4674 		poll_threads();
4675 		CU_ASSERT(g_bserrno == 0);
4676 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4677 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 1);
4678 
4679 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4680 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4681 
4682 		/* Now write to another unallocated cluster that is part of the same extent page. */
4683 		g_bserrno = -1;
4684 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i + io_units_per_cluster,
4685 				   1, blob_op_complete, NULL);
4686 		poll_threads();
4687 		CU_ASSERT(g_bserrno == 0);
4688 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4689 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 2);
4690 
4691 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4692 		/*
4693 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4694 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4695 		 */
4696 		expected_bytes = io_unit_size + spdk_bs_get_page_size(bs);
4697 		CU_ASSERT((g_dev_write_bytes - write_bytes) == expected_bytes);
4698 
4699 		/* Send unmap aligned to the whole cluster - should free it up */
4700 		g_bserrno = -1;
4701 		spdk_blob_io_unmap(blob, ch, io_units_per_extent_page * i, io_units_per_cluster, blob_op_complete,
4702 				   NULL);
4703 		poll_threads();
4704 		CU_ASSERT(g_bserrno == 0);
4705 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4706 
4707 		/* Write back to the freed cluster */
4708 		g_bserrno = -1;
4709 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i, 1, blob_op_complete,
4710 				   NULL);
4711 		poll_threads();
4712 		CU_ASSERT(g_bserrno == 0);
4713 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4714 	}
4715 
4716 	ut_blob_close_and_delete(bs, blob);
4717 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4718 
4719 	spdk_bs_free_io_channel(ch);
4720 	poll_threads();
4721 	g_blob = NULL;
4722 	g_blobid = 0;
4723 
4724 	spdk_bs_unload(bs, bs_op_complete, NULL);
4725 	poll_threads();
4726 	CU_ASSERT(g_bserrno == 0);
4727 	g_bs = NULL;
4728 }
4729 
4730 static void
4731 blob_thin_prov_unmap_cluster(void)
4732 {
4733 	struct spdk_blob_store *bs;
4734 	struct spdk_blob *blob, *snapshot;
4735 	struct spdk_io_channel *ch;
4736 	struct spdk_bs_dev *dev;
4737 	struct spdk_bs_opts bs_opts;
4738 	struct spdk_blob_opts opts;
4739 	uint64_t free_clusters;
4740 	uint64_t io_unit_size;
4741 	uint8_t payload_write[BLOCKLEN];
4742 	uint8_t payload_read[BLOCKLEN];
4743 	const uint32_t CLUSTER_COUNT = 3;
4744 	uint32_t io_units_per_cluster;
4745 	spdk_blob_id blobid, snapshotid;
4746 	uint32_t i;
4747 	int err;
4748 
4749 	/* Use a very large cluster size for this test. Check how the unmap/release cluster code path behaves when
4750 	 * clusters are fully used.
4751 	 */
4752 	dev = init_dev();
4753 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4754 	bs_opts.cluster_sz = dev->blocklen * dev->blockcnt / (CLUSTER_COUNT + 1);
4755 
4756 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4757 	poll_threads();
4758 	CU_ASSERT(g_bserrno == 0);
4759 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4760 	bs = g_bs;
4761 
4762 	free_clusters = spdk_bs_free_cluster_count(bs);
4763 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4764 	io_units_per_cluster = bs_opts.cluster_sz / io_unit_size;
4765 
4766 	ch = spdk_bs_alloc_io_channel(bs);
4767 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4768 
4769 	ut_spdk_blob_opts_init(&opts);
4770 	opts.thin_provision = true;
4771 
4772 	blob = ut_blob_create_and_open(bs, &opts);
4773 	CU_ASSERT(free_clusters == CLUSTER_COUNT);
4774 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4775 	blobid = spdk_blob_get_id(blob);
4776 
4777 	g_bserrno = -1;
4778 	spdk_blob_resize(blob, CLUSTER_COUNT, blob_op_complete, NULL);
4779 	poll_threads();
4780 	CU_ASSERT(g_bserrno == 0);
4781 
4782 	g_bserrno = -1;
4783 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4784 	poll_threads();
4785 	CU_ASSERT(g_bserrno == 0);
4786 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4787 	CU_ASSERT(blob->active.num_clusters == CLUSTER_COUNT);
4788 
4789 	/* Fill all clusters */
4790 	for (i = 0; i < CLUSTER_COUNT; i++) {
4791 		memset(payload_write, i + 1, sizeof(payload_write));
4792 		g_bserrno = -1;
4793 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster * i, 1, blob_op_complete, NULL);
4794 		poll_threads();
4795 		CU_ASSERT(g_bserrno == 0);
4796 		CU_ASSERT(free_clusters - (i + 1) == spdk_bs_free_cluster_count(bs));
4797 	}
4798 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4799 
4800 	/* Unmap one whole cluster */
4801 	g_bserrno = -1;
4802 	spdk_blob_io_unmap(blob, ch, io_units_per_cluster, io_units_per_cluster, blob_op_complete, NULL);
4803 	poll_threads();
4804 	CU_ASSERT(g_bserrno == 0);
4805 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4806 
4807 	/* Verify the data read from the cluster is zeroed out */
4808 	memset(payload_write, 0, sizeof(payload_write));
4809 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4810 	poll_threads();
4811 	CU_ASSERT(g_bserrno == 0);
4812 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4813 
4814 	/* Fill the same cluster with data */
4815 	memset(payload_write, 3, sizeof(payload_write));
4816 	g_bserrno = -1;
4817 	spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster, 1, blob_op_complete, NULL);
4818 	poll_threads();
4819 	CU_ASSERT(g_bserrno == 0);
4820 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4821 
4822 	/* Verify the data read from the cluster has the expected data */
4823 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4824 	poll_threads();
4825 	CU_ASSERT(g_bserrno == 0);
4826 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4827 
4828 	/* Send an unaligned unmap that ecompasses one whole cluster */
4829 	g_bserrno = -1;
4830 	spdk_blob_io_unmap(blob, ch, io_units_per_cluster - 1, io_units_per_cluster + 2, blob_op_complete,
4831 			   NULL);
4832 	poll_threads();
4833 	CU_ASSERT(g_bserrno == 0);
4834 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4835 
4836 	/* Verify the data read from the cluster is zeroed out */
4837 	g_bserrno = -1;
4838 	memset(payload_write, 0, sizeof(payload_write));
4839 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4840 	poll_threads();
4841 	CU_ASSERT(g_bserrno == 0);
4842 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4843 
4844 	/* Send a simultaneous unmap with a write to an unallocated area -
4845 	 * check that writes don't claim the currently unmapped cluster */
4846 	g_bserrno = -1;
4847 	memset(payload_write, 7, sizeof(payload_write));
4848 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4849 	spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster, 1, blob_op_complete, NULL);
4850 	poll_threads();
4851 	CU_ASSERT(g_bserrno == 0);
4852 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4853 
4854 	/* Verify the contents of written sector */
4855 	g_bserrno = -1;
4856 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4857 	poll_threads();
4858 	CU_ASSERT(g_bserrno == 0);
4859 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4860 
4861 	/* Verify the contents of unmapped sector */
4862 	g_bserrno = -1;
4863 	memset(payload_write, 0, sizeof(payload_write));
4864 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4865 	poll_threads();
4866 	CU_ASSERT(g_bserrno == 0);
4867 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4868 
4869 	/* Make sure clusters are not freed until the unmap to the drive is done */
4870 	g_bserrno = -1;
4871 	memset(payload_write, 7, sizeof(payload_write));
4872 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4873 	poll_threads();
4874 	CU_ASSERT(g_bserrno == 0);
4875 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4876 
4877 	g_bserrno = -1;
4878 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4879 	while (memcmp(payload_write, &g_dev_buffer[BLOCKLEN * io_units_per_cluster], BLOCKLEN) == 0) {
4880 		CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4881 		poll_thread_times(0, 1);
4882 	}
4883 	poll_threads();
4884 	CU_ASSERT(g_bserrno == 0);
4885 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4886 
4887 	/* Issue #3358 had a bug with concurrent trims to the same cluster causing an assert, check for regressions.
4888 	 * Send three concurrent unmaps to the same cluster.
4889 	 */
4890 	g_bserrno = -1;
4891 	memset(payload_write, 7, sizeof(payload_write));
4892 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4893 	poll_threads();
4894 	CU_ASSERT(g_bserrno == 0);
4895 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4896 
4897 	g_bserrno = -1;
4898 	err = -1;
4899 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4900 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4901 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, &err);
4902 	poll_threads();
4903 	CU_ASSERT(g_bserrno == 0);
4904 	CU_ASSERT(err == 0);
4905 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4906 
4907 	/* Test thin-provisioned blob that is backed */
4908 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
4909 	poll_threads();
4910 	CU_ASSERT(g_bserrno == 0);
4911 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4912 	poll_threads();
4913 	CU_ASSERT(g_bserrno == 0);
4914 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4915 
4916 	g_bserrno = -1;
4917 	memset(payload_write, 1, sizeof(payload_write));
4918 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4919 	poll_threads();
4920 	CU_ASSERT(g_bserrno == 0);
4921 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4922 
4923 	/* Create a snapshot */
4924 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
4925 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4926 	poll_threads();
4927 	CU_ASSERT(g_bserrno == 0);
4928 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4929 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
4930 	snapshotid = g_blobid;
4931 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4932 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4933 	poll_threads();
4934 	CU_ASSERT(g_bserrno == 0);
4935 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4936 	snapshot = g_blob;
4937 
4938 	/* Write data to blob, it will alloc new cluster */
4939 	g_bserrno = -1;
4940 	memset(payload_write, 2, sizeof(payload_write));
4941 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4942 	poll_threads();
4943 	CU_ASSERT(g_bserrno == 0);
4944 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4945 
4946 	/* Unmap one whole cluster, but do not release this cluster */
4947 	g_bserrno = -1;
4948 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4949 	poll_threads();
4950 	CU_ASSERT(g_bserrno == 0);
4951 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4952 
4953 	/* Verify the data read from the cluster is zeroed out */
4954 	g_bserrno = -1;
4955 	memset(payload_write, 0, sizeof(payload_write));
4956 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4957 	poll_threads();
4958 	CU_ASSERT(g_bserrno == 0);
4959 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4960 
4961 	ut_blob_close_and_delete(bs, blob);
4962 	ut_blob_close_and_delete(bs, snapshot);
4963 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4964 
4965 	spdk_bs_free_io_channel(ch);
4966 	poll_threads();
4967 	g_blob = NULL;
4968 	g_blobid = 0;
4969 
4970 	spdk_bs_unload(bs, bs_op_complete, NULL);
4971 	poll_threads();
4972 	CU_ASSERT(g_bserrno == 0);
4973 	g_bs = NULL;
4974 }
4975 
4976 static void
4977 blob_thin_prov_rle(void)
4978 {
4979 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
4980 	struct spdk_blob_store *bs = g_bs;
4981 	struct spdk_blob *blob;
4982 	struct spdk_io_channel *channel;
4983 	struct spdk_blob_opts opts;
4984 	spdk_blob_id blobid;
4985 	uint64_t free_clusters;
4986 	uint64_t io_unit_size;
4987 	uint8_t payload_read[10 * BLOCKLEN];
4988 	uint8_t payload_write[10 * BLOCKLEN];
4989 	uint64_t write_bytes;
4990 	uint64_t read_bytes;
4991 	uint64_t expected_bytes;
4992 	uint64_t io_unit;
4993 
4994 	/* assert that the stack variables above are of correct size */
4995 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == BLOCKLEN);
4996 
4997 	free_clusters = spdk_bs_free_cluster_count(bs);
4998 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4999 
5000 	ut_spdk_blob_opts_init(&opts);
5001 	opts.thin_provision = true;
5002 	opts.num_clusters = 5;
5003 
5004 	blob = ut_blob_create_and_open(bs, &opts);
5005 	blobid = spdk_blob_get_id(blob);
5006 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5007 
5008 	channel = spdk_bs_alloc_io_channel(bs);
5009 	CU_ASSERT(channel != NULL);
5010 
5011 	/* Target specifically second cluster in a blob as first allocation */
5012 	io_unit = bs_cluster_to_io_unit(bs, 1);
5013 
5014 	/* Payload should be all zeros from unallocated clusters */
5015 	memset(payload_read, 0xFF, sizeof(payload_read));
5016 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
5017 	poll_threads();
5018 	CU_ASSERT(g_bserrno == 0);
5019 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5020 
5021 	write_bytes = g_dev_write_bytes;
5022 	read_bytes = g_dev_read_bytes;
5023 
5024 	/* Issue write to second cluster in a blob */
5025 	memset(payload_write, 0xE5, sizeof(payload_write));
5026 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
5027 	poll_threads();
5028 	CU_ASSERT(g_bserrno == 0);
5029 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
5030 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
5031 	 * read 0 bytes */
5032 	expected_bytes = 10 * io_unit_size + spdk_bs_get_page_size(bs);
5033 	if (g_use_extent_table) {
5034 		/* Add one more page for EXTENT_PAGE write */
5035 		expected_bytes += spdk_bs_get_page_size(bs);
5036 	}
5037 	CU_ASSERT(g_dev_write_bytes - write_bytes == expected_bytes);
5038 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
5039 
5040 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
5041 	poll_threads();
5042 	CU_ASSERT(g_bserrno == 0);
5043 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5044 
5045 	spdk_bs_free_io_channel(channel);
5046 	poll_threads();
5047 
5048 	spdk_blob_close(blob, blob_op_complete, NULL);
5049 	poll_threads();
5050 	CU_ASSERT(g_bserrno == 0);
5051 
5052 	ut_bs_reload(&bs, NULL);
5053 
5054 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5055 	poll_threads();
5056 	CU_ASSERT(g_bserrno == 0);
5057 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5058 	blob = g_blob;
5059 
5060 	channel = spdk_bs_alloc_io_channel(bs);
5061 	CU_ASSERT(channel != NULL);
5062 
5063 	/* Read second cluster after blob reload to confirm data written */
5064 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
5065 	poll_threads();
5066 	CU_ASSERT(g_bserrno == 0);
5067 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5068 
5069 	spdk_bs_free_io_channel(channel);
5070 	poll_threads();
5071 
5072 	ut_blob_close_and_delete(bs, blob);
5073 }
5074 
5075 static void
5076 blob_thin_prov_rw_iov(void)
5077 {
5078 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5079 	struct spdk_blob_store *bs = g_bs;
5080 	struct spdk_blob *blob;
5081 	struct spdk_io_channel *channel;
5082 	struct spdk_blob_opts opts;
5083 	uint64_t free_clusters;
5084 	uint8_t payload_read[10 * BLOCKLEN];
5085 	uint8_t payload_write[10 * BLOCKLEN];
5086 	struct iovec iov_read[3];
5087 	struct iovec iov_write[3];
5088 
5089 	free_clusters = spdk_bs_free_cluster_count(bs);
5090 
5091 	channel = spdk_bs_alloc_io_channel(bs);
5092 	CU_ASSERT(channel != NULL);
5093 
5094 	ut_spdk_blob_opts_init(&opts);
5095 	opts.thin_provision = true;
5096 
5097 	blob = ut_blob_create_and_open(bs, &opts);
5098 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5099 
5100 	CU_ASSERT(blob->active.num_clusters == 0);
5101 
5102 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
5103 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
5104 	poll_threads();
5105 	CU_ASSERT(g_bserrno == 0);
5106 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5107 	CU_ASSERT(blob->active.num_clusters == 5);
5108 
5109 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5110 	poll_threads();
5111 	CU_ASSERT(g_bserrno == 0);
5112 	/* Sync must not change anything */
5113 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5114 	CU_ASSERT(blob->active.num_clusters == 5);
5115 
5116 	/* Payload should be all zeros from unallocated clusters */
5117 	memset(payload_read, 0xAA, sizeof(payload_read));
5118 	iov_read[0].iov_base = payload_read;
5119 	iov_read[0].iov_len = 3 * BLOCKLEN;
5120 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5121 	iov_read[1].iov_len = 4 * BLOCKLEN;
5122 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5123 	iov_read[2].iov_len = 3 * BLOCKLEN;
5124 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5125 	poll_threads();
5126 	CU_ASSERT(g_bserrno == 0);
5127 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5128 
5129 	memset(payload_write, 0xE5, sizeof(payload_write));
5130 	iov_write[0].iov_base = payload_write;
5131 	iov_write[0].iov_len = 1 * BLOCKLEN;
5132 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5133 	iov_write[1].iov_len = 5 * BLOCKLEN;
5134 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5135 	iov_write[2].iov_len = 4 * BLOCKLEN;
5136 
5137 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5138 	poll_threads();
5139 	CU_ASSERT(g_bserrno == 0);
5140 
5141 	memset(payload_read, 0xAA, sizeof(payload_read));
5142 	iov_read[0].iov_base = payload_read;
5143 	iov_read[0].iov_len = 3 * BLOCKLEN;
5144 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5145 	iov_read[1].iov_len = 4 * BLOCKLEN;
5146 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5147 	iov_read[2].iov_len = 3 * BLOCKLEN;
5148 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5149 	poll_threads();
5150 	CU_ASSERT(g_bserrno == 0);
5151 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5152 
5153 	spdk_bs_free_io_channel(channel);
5154 	poll_threads();
5155 
5156 	ut_blob_close_and_delete(bs, blob);
5157 }
5158 
5159 struct iter_ctx {
5160 	int		current_iter;
5161 	spdk_blob_id	blobid[4];
5162 };
5163 
5164 static void
5165 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
5166 {
5167 	struct iter_ctx *iter_ctx = arg;
5168 	spdk_blob_id blobid;
5169 
5170 	CU_ASSERT(bserrno == 0);
5171 	blobid = spdk_blob_get_id(blob);
5172 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
5173 }
5174 
5175 static void
5176 bs_load_iter_test(void)
5177 {
5178 	struct spdk_blob_store *bs;
5179 	struct spdk_bs_dev *dev;
5180 	struct iter_ctx iter_ctx = { 0 };
5181 	struct spdk_blob *blob;
5182 	int i, rc;
5183 	struct spdk_bs_opts opts;
5184 
5185 	dev = init_dev();
5186 	spdk_bs_opts_init(&opts, sizeof(opts));
5187 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5188 
5189 	/* Initialize a new blob store */
5190 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
5191 	poll_threads();
5192 	CU_ASSERT(g_bserrno == 0);
5193 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5194 	bs = g_bs;
5195 
5196 	for (i = 0; i < 4; i++) {
5197 		blob = ut_blob_create_and_open(bs, NULL);
5198 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
5199 
5200 		/* Just save the blobid as an xattr for testing purposes. */
5201 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
5202 		CU_ASSERT(rc == 0);
5203 
5204 		/* Resize the blob */
5205 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
5206 		poll_threads();
5207 		CU_ASSERT(g_bserrno == 0);
5208 
5209 		spdk_blob_close(blob, blob_op_complete, NULL);
5210 		poll_threads();
5211 		CU_ASSERT(g_bserrno == 0);
5212 	}
5213 
5214 	g_bserrno = -1;
5215 	spdk_bs_unload(bs, bs_op_complete, NULL);
5216 	poll_threads();
5217 	CU_ASSERT(g_bserrno == 0);
5218 
5219 	dev = init_dev();
5220 	spdk_bs_opts_init(&opts, sizeof(opts));
5221 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5222 	opts.iter_cb_fn = test_iter;
5223 	opts.iter_cb_arg = &iter_ctx;
5224 
5225 	/* Test blob iteration during load after a clean shutdown. */
5226 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5227 	poll_threads();
5228 	CU_ASSERT(g_bserrno == 0);
5229 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5230 	bs = g_bs;
5231 
5232 	/* Dirty shutdown */
5233 	bs_free(bs);
5234 
5235 	dev = init_dev();
5236 	spdk_bs_opts_init(&opts, sizeof(opts));
5237 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5238 	opts.iter_cb_fn = test_iter;
5239 	iter_ctx.current_iter = 0;
5240 	opts.iter_cb_arg = &iter_ctx;
5241 
5242 	/* Test blob iteration during load after a dirty shutdown. */
5243 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5244 	poll_threads();
5245 	CU_ASSERT(g_bserrno == 0);
5246 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5247 	bs = g_bs;
5248 
5249 	spdk_bs_unload(bs, bs_op_complete, NULL);
5250 	poll_threads();
5251 	CU_ASSERT(g_bserrno == 0);
5252 	g_bs = NULL;
5253 }
5254 
5255 static void
5256 blob_snapshot_rw(void)
5257 {
5258 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5259 	struct spdk_blob_store *bs = g_bs;
5260 	struct spdk_blob *blob, *snapshot;
5261 	struct spdk_io_channel *channel;
5262 	struct spdk_blob_opts opts;
5263 	spdk_blob_id blobid, snapshotid;
5264 	uint64_t free_clusters;
5265 	uint64_t cluster_size;
5266 	uint64_t io_unit_size;
5267 	uint8_t payload_read[10 * BLOCKLEN];
5268 	uint8_t payload_write[10 * BLOCKLEN];
5269 	uint64_t write_bytes_start;
5270 	uint64_t read_bytes_start;
5271 	uint64_t copy_bytes_start;
5272 	uint64_t write_bytes;
5273 	uint64_t read_bytes;
5274 	uint64_t copy_bytes;
5275 	uint64_t expected_bytes;
5276 
5277 	free_clusters = spdk_bs_free_cluster_count(bs);
5278 	cluster_size = spdk_bs_get_cluster_size(bs);
5279 	io_unit_size = spdk_bs_get_io_unit_size(bs);
5280 
5281 	channel = spdk_bs_alloc_io_channel(bs);
5282 	CU_ASSERT(channel != NULL);
5283 
5284 	ut_spdk_blob_opts_init(&opts);
5285 	opts.thin_provision = true;
5286 	opts.num_clusters = 5;
5287 
5288 	blob = ut_blob_create_and_open(bs, &opts);
5289 	blobid = spdk_blob_get_id(blob);
5290 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5291 
5292 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5293 
5294 	memset(payload_read, 0xFF, sizeof(payload_read));
5295 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5296 	poll_threads();
5297 	CU_ASSERT(g_bserrno == 0);
5298 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5299 
5300 	memset(payload_write, 0xE5, sizeof(payload_write));
5301 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5302 	poll_threads();
5303 	CU_ASSERT(g_bserrno == 0);
5304 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5305 
5306 	/* Create snapshot from blob */
5307 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5308 	poll_threads();
5309 	CU_ASSERT(g_bserrno == 0);
5310 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5311 	snapshotid = g_blobid;
5312 
5313 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5314 	poll_threads();
5315 	CU_ASSERT(g_bserrno == 0);
5316 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5317 	snapshot = g_blob;
5318 	CU_ASSERT(snapshot->data_ro == true);
5319 	CU_ASSERT(snapshot->md_ro == true);
5320 
5321 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5322 
5323 	write_bytes_start = g_dev_write_bytes;
5324 	read_bytes_start = g_dev_read_bytes;
5325 	copy_bytes_start = g_dev_copy_bytes;
5326 
5327 	memset(payload_write, 0xAA, sizeof(payload_write));
5328 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5329 	poll_threads();
5330 	CU_ASSERT(g_bserrno == 0);
5331 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5332 
5333 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
5334 	 * and then write 10 io units of payload.
5335 	 */
5336 	write_bytes = g_dev_write_bytes - write_bytes_start;
5337 	read_bytes = g_dev_read_bytes - read_bytes_start;
5338 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
5339 	if (g_dev_copy_enabled) {
5340 		CU_ASSERT(copy_bytes == cluster_size);
5341 	} else {
5342 		CU_ASSERT(copy_bytes == 0);
5343 	}
5344 	expected_bytes = 10 * io_unit_size + cluster_size + spdk_bs_get_page_size(bs);
5345 	if (g_use_extent_table) {
5346 		/* Add one more page for EXTENT_PAGE write */
5347 		expected_bytes += spdk_bs_get_page_size(bs);
5348 	}
5349 	CU_ASSERT(write_bytes + copy_bytes == expected_bytes);
5350 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
5351 
5352 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5353 	poll_threads();
5354 	CU_ASSERT(g_bserrno == 0);
5355 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5356 
5357 	/* Data on snapshot should not change after write to clone */
5358 	memset(payload_write, 0xE5, sizeof(payload_write));
5359 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
5360 	poll_threads();
5361 	CU_ASSERT(g_bserrno == 0);
5362 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5363 
5364 	ut_blob_close_and_delete(bs, blob);
5365 	ut_blob_close_and_delete(bs, snapshot);
5366 
5367 	spdk_bs_free_io_channel(channel);
5368 	poll_threads();
5369 	g_blob = NULL;
5370 	g_blobid = 0;
5371 }
5372 
5373 static void
5374 blob_snapshot_rw_iov(void)
5375 {
5376 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5377 	struct spdk_blob_store *bs = g_bs;
5378 	struct spdk_blob *blob, *snapshot;
5379 	struct spdk_io_channel *channel;
5380 	struct spdk_blob_opts opts;
5381 	spdk_blob_id blobid, snapshotid;
5382 	uint64_t free_clusters;
5383 	uint8_t payload_read[10 * BLOCKLEN];
5384 	uint8_t payload_write[10 * BLOCKLEN];
5385 	struct iovec iov_read[3];
5386 	struct iovec iov_write[3];
5387 
5388 	free_clusters = spdk_bs_free_cluster_count(bs);
5389 
5390 	channel = spdk_bs_alloc_io_channel(bs);
5391 	CU_ASSERT(channel != NULL);
5392 
5393 	ut_spdk_blob_opts_init(&opts);
5394 	opts.thin_provision = true;
5395 	opts.num_clusters = 5;
5396 
5397 	blob = ut_blob_create_and_open(bs, &opts);
5398 	blobid = spdk_blob_get_id(blob);
5399 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5400 
5401 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5402 
5403 	/* Create snapshot from blob */
5404 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5405 	poll_threads();
5406 	CU_ASSERT(g_bserrno == 0);
5407 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5408 	snapshotid = g_blobid;
5409 
5410 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5411 	poll_threads();
5412 	CU_ASSERT(g_bserrno == 0);
5413 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5414 	snapshot = g_blob;
5415 	CU_ASSERT(snapshot->data_ro == true);
5416 	CU_ASSERT(snapshot->md_ro == true);
5417 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5418 
5419 	/* Payload should be all zeros from unallocated clusters */
5420 	memset(payload_read, 0xAA, sizeof(payload_read));
5421 	iov_read[0].iov_base = payload_read;
5422 	iov_read[0].iov_len = 3 * BLOCKLEN;
5423 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5424 	iov_read[1].iov_len = 4 * BLOCKLEN;
5425 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5426 	iov_read[2].iov_len = 3 * BLOCKLEN;
5427 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5428 	poll_threads();
5429 	CU_ASSERT(g_bserrno == 0);
5430 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5431 
5432 	memset(payload_write, 0xE5, sizeof(payload_write));
5433 	iov_write[0].iov_base = payload_write;
5434 	iov_write[0].iov_len = 1 * BLOCKLEN;
5435 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5436 	iov_write[1].iov_len = 5 * BLOCKLEN;
5437 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5438 	iov_write[2].iov_len = 4 * BLOCKLEN;
5439 
5440 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5441 	poll_threads();
5442 	CU_ASSERT(g_bserrno == 0);
5443 
5444 	memset(payload_read, 0xAA, sizeof(payload_read));
5445 	iov_read[0].iov_base = payload_read;
5446 	iov_read[0].iov_len = 3 * BLOCKLEN;
5447 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5448 	iov_read[1].iov_len = 4 * BLOCKLEN;
5449 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5450 	iov_read[2].iov_len = 3 * BLOCKLEN;
5451 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5452 	poll_threads();
5453 	CU_ASSERT(g_bserrno == 0);
5454 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5455 
5456 	spdk_bs_free_io_channel(channel);
5457 	poll_threads();
5458 
5459 	ut_blob_close_and_delete(bs, blob);
5460 	ut_blob_close_and_delete(bs, snapshot);
5461 }
5462 
5463 /**
5464  * Inflate / decouple parent rw unit tests.
5465  *
5466  * --------------
5467  * original blob:         0         1         2         3         4
5468  *                   ,---------+---------+---------+---------+---------.
5469  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5470  *                   +---------+---------+---------+---------+---------+
5471  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
5472  *                   +---------+---------+---------+---------+---------+
5473  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
5474  *                   '---------+---------+---------+---------+---------'
5475  *                   .         .         .         .         .         .
5476  * --------          .         .         .         .         .         .
5477  * inflate:          .         .         .         .         .         .
5478  *                   ,---------+---------+---------+---------+---------.
5479  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
5480  *                   '---------+---------+---------+---------+---------'
5481  *
5482  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
5483  *               on snapshot2 and snapshot removed .         .         .
5484  *                   .         .         .         .         .         .
5485  * ----------------  .         .         .         .         .         .
5486  * decouple parent:  .         .         .         .         .         .
5487  *                   ,---------+---------+---------+---------+---------.
5488  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5489  *                   +---------+---------+---------+---------+---------+
5490  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
5491  *                   '---------+---------+---------+---------+---------'
5492  *
5493  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
5494  *               on snapshot2 removed and on snapshot still exists. Snapshot2
5495  *               should remain a clone of snapshot.
5496  */
5497 static void
5498 _blob_inflate_rw(bool decouple_parent)
5499 {
5500 	struct spdk_blob_store *bs = g_bs;
5501 	struct spdk_blob *blob, *snapshot, *snapshot2;
5502 	struct spdk_io_channel *channel;
5503 	struct spdk_blob_opts opts;
5504 	spdk_blob_id blobid, snapshotid, snapshot2id;
5505 	uint64_t free_clusters;
5506 	uint64_t cluster_size;
5507 
5508 	uint64_t payload_size;
5509 	uint8_t *payload_read;
5510 	uint8_t *payload_write;
5511 	uint8_t *payload_clone;
5512 
5513 	uint64_t io_units_per_cluster;
5514 	uint64_t io_units_per_payload;
5515 
5516 	int i;
5517 	spdk_blob_id ids[2];
5518 	size_t count;
5519 
5520 	free_clusters = spdk_bs_free_cluster_count(bs);
5521 	cluster_size = spdk_bs_get_cluster_size(bs);
5522 	io_units_per_cluster = cluster_size / spdk_bs_get_io_unit_size(bs);
5523 	io_units_per_payload = io_units_per_cluster * 5;
5524 
5525 	payload_size = cluster_size * 5;
5526 
5527 	payload_read = malloc(payload_size);
5528 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
5529 
5530 	payload_write = malloc(payload_size);
5531 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
5532 
5533 	payload_clone = malloc(payload_size);
5534 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
5535 
5536 	channel = spdk_bs_alloc_io_channel(bs);
5537 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5538 
5539 	/* Create blob */
5540 	ut_spdk_blob_opts_init(&opts);
5541 	opts.thin_provision = true;
5542 	opts.num_clusters = 5;
5543 
5544 	blob = ut_blob_create_and_open(bs, &opts);
5545 	blobid = spdk_blob_get_id(blob);
5546 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5547 
5548 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5549 
5550 	/* 1) Initial read should return zeroed payload */
5551 	memset(payload_read, 0xFF, payload_size);
5552 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5553 			  blob_op_complete, NULL);
5554 	poll_threads();
5555 	CU_ASSERT(g_bserrno == 0);
5556 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
5557 
5558 	/* Fill whole blob with a pattern, except last cluster (to be sure it
5559 	 * isn't allocated) */
5560 	memset(payload_write, 0xE5, payload_size - cluster_size);
5561 	spdk_blob_io_write(blob, channel, payload_write, 0, io_units_per_payload -
5562 			   io_units_per_cluster, blob_op_complete, NULL);
5563 	poll_threads();
5564 	CU_ASSERT(g_bserrno == 0);
5565 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5566 
5567 	/* 2) Create snapshot from blob (first level) */
5568 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5569 	poll_threads();
5570 	CU_ASSERT(g_bserrno == 0);
5571 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5572 	snapshotid = g_blobid;
5573 
5574 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5575 	poll_threads();
5576 	CU_ASSERT(g_bserrno == 0);
5577 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5578 	snapshot = g_blob;
5579 	CU_ASSERT(snapshot->data_ro == true);
5580 	CU_ASSERT(snapshot->md_ro == true);
5581 
5582 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5583 
5584 	/* Write every second cluster with a pattern.
5585 	 *
5586 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
5587 	 * doesn't allocate it.
5588 	 *
5589 	 * payload_clone stores expected result on "blob" read at the time and
5590 	 * is used only to check data consistency on clone before and after
5591 	 * inflation. Initially we fill it with a backing snapshots pattern
5592 	 * used before.
5593 	 */
5594 	memset(payload_clone, 0xE5, payload_size - cluster_size);
5595 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
5596 	memset(payload_write, 0xAA, payload_size);
5597 	for (i = 1; i < 5; i += 2) {
5598 		spdk_blob_io_write(blob, channel, payload_write, i * io_units_per_cluster,
5599 				   io_units_per_cluster, blob_op_complete, NULL);
5600 		poll_threads();
5601 		CU_ASSERT(g_bserrno == 0);
5602 
5603 		/* Update expected result */
5604 		memcpy(payload_clone + (cluster_size * i), payload_write,
5605 		       cluster_size);
5606 	}
5607 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5608 
5609 	/* Check data consistency on clone */
5610 	memset(payload_read, 0xFF, payload_size);
5611 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5612 			  blob_op_complete, NULL);
5613 	poll_threads();
5614 	CU_ASSERT(g_bserrno == 0);
5615 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5616 
5617 	/* 3) Create second levels snapshot from blob */
5618 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5619 	poll_threads();
5620 	CU_ASSERT(g_bserrno == 0);
5621 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5622 	snapshot2id = g_blobid;
5623 
5624 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
5625 	poll_threads();
5626 	CU_ASSERT(g_bserrno == 0);
5627 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5628 	snapshot2 = g_blob;
5629 	CU_ASSERT(snapshot2->data_ro == true);
5630 	CU_ASSERT(snapshot2->md_ro == true);
5631 
5632 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
5633 
5634 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5635 
5636 	/* Write one cluster on the top level blob. This cluster (1) covers
5637 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
5638 	 * at all */
5639 	spdk_blob_io_write(blob, channel, payload_write, io_units_per_cluster,
5640 			   io_units_per_cluster, blob_op_complete, NULL);
5641 	poll_threads();
5642 	CU_ASSERT(g_bserrno == 0);
5643 
5644 	/* Update expected result */
5645 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
5646 
5647 	/* Check data consistency on clone */
5648 	memset(payload_read, 0xFF, payload_size);
5649 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5650 			  blob_op_complete, NULL);
5651 	poll_threads();
5652 	CU_ASSERT(g_bserrno == 0);
5653 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5654 
5655 
5656 	/* Close all blobs */
5657 	spdk_blob_close(blob, blob_op_complete, NULL);
5658 	poll_threads();
5659 	CU_ASSERT(g_bserrno == 0);
5660 
5661 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5662 	poll_threads();
5663 	CU_ASSERT(g_bserrno == 0);
5664 
5665 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5666 	poll_threads();
5667 	CU_ASSERT(g_bserrno == 0);
5668 
5669 	/* Check snapshot-clone relations */
5670 	count = 2;
5671 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5672 	CU_ASSERT(count == 1);
5673 	CU_ASSERT(ids[0] == snapshot2id);
5674 
5675 	count = 2;
5676 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5677 	CU_ASSERT(count == 1);
5678 	CU_ASSERT(ids[0] == blobid);
5679 
5680 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
5681 
5682 	free_clusters = spdk_bs_free_cluster_count(bs);
5683 	if (!decouple_parent) {
5684 		/* Do full blob inflation */
5685 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
5686 		poll_threads();
5687 		CU_ASSERT(g_bserrno == 0);
5688 
5689 		/* All clusters should be inflated (except one already allocated
5690 		 * in a top level blob) */
5691 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5692 
5693 		/* Check if relation tree updated correctly */
5694 		count = 2;
5695 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5696 
5697 		/* snapshotid have one clone */
5698 		CU_ASSERT(count == 1);
5699 		CU_ASSERT(ids[0] == snapshot2id);
5700 
5701 		/* snapshot2id have no clones */
5702 		count = 2;
5703 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5704 		CU_ASSERT(count == 0);
5705 
5706 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5707 	} else {
5708 		/* Decouple parent of blob */
5709 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5710 		poll_threads();
5711 		CU_ASSERT(g_bserrno == 0);
5712 
5713 		/* Only one cluster from a parent should be inflated (second one
5714 		 * is covered by a cluster written on a top level blob, and
5715 		 * already allocated) */
5716 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5717 
5718 		/* Check if relation tree updated correctly */
5719 		count = 2;
5720 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5721 
5722 		/* snapshotid have two clones now */
5723 		CU_ASSERT(count == 2);
5724 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5725 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5726 
5727 		/* snapshot2id have no clones */
5728 		count = 2;
5729 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5730 		CU_ASSERT(count == 0);
5731 
5732 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5733 	}
5734 
5735 	/* Try to delete snapshot2 (should pass) */
5736 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5737 	poll_threads();
5738 	CU_ASSERT(g_bserrno == 0);
5739 
5740 	/* Try to delete base snapshot */
5741 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5742 	poll_threads();
5743 	CU_ASSERT(g_bserrno == 0);
5744 
5745 	/* Reopen blob after snapshot deletion */
5746 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5747 	poll_threads();
5748 	CU_ASSERT(g_bserrno == 0);
5749 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5750 	blob = g_blob;
5751 
5752 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5753 
5754 	/* Check data consistency on inflated blob */
5755 	memset(payload_read, 0xFF, payload_size);
5756 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5757 			  blob_op_complete, NULL);
5758 	poll_threads();
5759 	CU_ASSERT(g_bserrno == 0);
5760 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5761 
5762 	spdk_bs_free_io_channel(channel);
5763 	poll_threads();
5764 
5765 	free(payload_read);
5766 	free(payload_write);
5767 	free(payload_clone);
5768 
5769 	ut_blob_close_and_delete(bs, blob);
5770 }
5771 
5772 static void
5773 blob_inflate_rw(void)
5774 {
5775 	_blob_inflate_rw(false);
5776 	_blob_inflate_rw(true);
5777 }
5778 
5779 /**
5780  * Snapshot-clones relation test
5781  *
5782  *         snapshot
5783  *            |
5784  *      +-----+-----+
5785  *      |           |
5786  *   blob(ro)   snapshot2
5787  *      |           |
5788  *   clone2      clone
5789  */
5790 static void
5791 blob_relations(void)
5792 {
5793 	struct spdk_blob_store *bs;
5794 	struct spdk_bs_dev *dev;
5795 	struct spdk_bs_opts bs_opts;
5796 	struct spdk_blob_opts opts;
5797 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5798 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5799 	int rc;
5800 	size_t count;
5801 	spdk_blob_id ids[10] = {};
5802 
5803 	dev = init_dev();
5804 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5805 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5806 
5807 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5808 	poll_threads();
5809 	CU_ASSERT(g_bserrno == 0);
5810 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5811 	bs = g_bs;
5812 
5813 	/* 1. Create blob with 10 clusters */
5814 
5815 	ut_spdk_blob_opts_init(&opts);
5816 	opts.num_clusters = 10;
5817 
5818 	blob = ut_blob_create_and_open(bs, &opts);
5819 	blobid = spdk_blob_get_id(blob);
5820 
5821 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5822 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5823 	CU_ASSERT(!spdk_blob_is_clone(blob));
5824 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5825 
5826 	/* blob should not have underlying snapshot nor clones */
5827 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5828 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5829 	count = SPDK_COUNTOF(ids);
5830 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5831 	CU_ASSERT(rc == 0);
5832 	CU_ASSERT(count == 0);
5833 
5834 
5835 	/* 2. Create snapshot */
5836 
5837 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5838 	poll_threads();
5839 	CU_ASSERT(g_bserrno == 0);
5840 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5841 	snapshotid = g_blobid;
5842 
5843 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5844 	poll_threads();
5845 	CU_ASSERT(g_bserrno == 0);
5846 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5847 	snapshot = g_blob;
5848 
5849 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5850 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5851 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5852 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5853 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5854 
5855 	/* Check if original blob is converted to the clone of snapshot */
5856 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5857 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5858 	CU_ASSERT(spdk_blob_is_clone(blob));
5859 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5860 	CU_ASSERT(blob->parent_id == snapshotid);
5861 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5862 
5863 	count = SPDK_COUNTOF(ids);
5864 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5865 	CU_ASSERT(rc == 0);
5866 	CU_ASSERT(count == 1);
5867 	CU_ASSERT(ids[0] == blobid);
5868 
5869 
5870 	/* 3. Create clone from snapshot */
5871 
5872 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5873 	poll_threads();
5874 	CU_ASSERT(g_bserrno == 0);
5875 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5876 	cloneid = g_blobid;
5877 
5878 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5879 	poll_threads();
5880 	CU_ASSERT(g_bserrno == 0);
5881 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5882 	clone = g_blob;
5883 
5884 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5885 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5886 	CU_ASSERT(spdk_blob_is_clone(clone));
5887 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5888 	CU_ASSERT(clone->parent_id == snapshotid);
5889 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5890 
5891 	count = SPDK_COUNTOF(ids);
5892 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5893 	CU_ASSERT(rc == 0);
5894 	CU_ASSERT(count == 0);
5895 
5896 	/* Check if clone is on the snapshot's list */
5897 	count = SPDK_COUNTOF(ids);
5898 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5899 	CU_ASSERT(rc == 0);
5900 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5901 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5902 
5903 
5904 	/* 4. Create snapshot of the clone */
5905 
5906 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5907 	poll_threads();
5908 	CU_ASSERT(g_bserrno == 0);
5909 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5910 	snapshotid2 = g_blobid;
5911 
5912 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5913 	poll_threads();
5914 	CU_ASSERT(g_bserrno == 0);
5915 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5916 	snapshot2 = g_blob;
5917 
5918 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5919 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5920 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5921 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5922 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5923 
5924 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5925 	 * is a child of snapshot */
5926 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5927 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5928 	CU_ASSERT(spdk_blob_is_clone(clone));
5929 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5930 	CU_ASSERT(clone->parent_id == snapshotid2);
5931 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5932 
5933 	count = SPDK_COUNTOF(ids);
5934 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5935 	CU_ASSERT(rc == 0);
5936 	CU_ASSERT(count == 1);
5937 	CU_ASSERT(ids[0] == cloneid);
5938 
5939 
5940 	/* 5. Try to create clone from read only blob */
5941 
5942 	/* Mark blob as read only */
5943 	spdk_blob_set_read_only(blob);
5944 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5945 	poll_threads();
5946 	CU_ASSERT(g_bserrno == 0);
5947 
5948 	/* Check if previously created blob is read only clone */
5949 	CU_ASSERT(spdk_blob_is_read_only(blob));
5950 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5951 	CU_ASSERT(spdk_blob_is_clone(blob));
5952 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5953 
5954 	/* Create clone from read only blob */
5955 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5956 	poll_threads();
5957 	CU_ASSERT(g_bserrno == 0);
5958 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5959 	cloneid2 = g_blobid;
5960 
5961 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5962 	poll_threads();
5963 	CU_ASSERT(g_bserrno == 0);
5964 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5965 	clone2 = g_blob;
5966 
5967 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5968 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5969 	CU_ASSERT(spdk_blob_is_clone(clone2));
5970 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5971 
5972 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5973 
5974 	count = SPDK_COUNTOF(ids);
5975 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5976 	CU_ASSERT(rc == 0);
5977 
5978 	CU_ASSERT(count == 1);
5979 	CU_ASSERT(ids[0] == cloneid2);
5980 
5981 	/* Close blobs */
5982 
5983 	spdk_blob_close(clone2, blob_op_complete, NULL);
5984 	poll_threads();
5985 	CU_ASSERT(g_bserrno == 0);
5986 
5987 	spdk_blob_close(blob, blob_op_complete, NULL);
5988 	poll_threads();
5989 	CU_ASSERT(g_bserrno == 0);
5990 
5991 	spdk_blob_close(clone, blob_op_complete, NULL);
5992 	poll_threads();
5993 	CU_ASSERT(g_bserrno == 0);
5994 
5995 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5996 	poll_threads();
5997 	CU_ASSERT(g_bserrno == 0);
5998 
5999 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
6000 	poll_threads();
6001 	CU_ASSERT(g_bserrno == 0);
6002 
6003 	/* Try to delete snapshot with more than 1 clone */
6004 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6005 	poll_threads();
6006 	CU_ASSERT(g_bserrno != 0);
6007 
6008 	ut_bs_reload(&bs, &bs_opts);
6009 
6010 	/* NULL ids array should return number of clones in count */
6011 	count = SPDK_COUNTOF(ids);
6012 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
6013 	CU_ASSERT(rc == -ENOMEM);
6014 	CU_ASSERT(count == 2);
6015 
6016 	/* incorrect array size */
6017 	count = 1;
6018 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6019 	CU_ASSERT(rc == -ENOMEM);
6020 	CU_ASSERT(count == 2);
6021 
6022 
6023 	/* Verify structure of loaded blob store */
6024 
6025 	/* snapshot */
6026 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
6027 
6028 	count = SPDK_COUNTOF(ids);
6029 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6030 	CU_ASSERT(rc == 0);
6031 	CU_ASSERT(count == 2);
6032 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6033 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
6034 
6035 	/* blob */
6036 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6037 	count = SPDK_COUNTOF(ids);
6038 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6039 	CU_ASSERT(rc == 0);
6040 	CU_ASSERT(count == 1);
6041 	CU_ASSERT(ids[0] == cloneid2);
6042 
6043 	/* clone */
6044 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
6045 	count = SPDK_COUNTOF(ids);
6046 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6047 	CU_ASSERT(rc == 0);
6048 	CU_ASSERT(count == 0);
6049 
6050 	/* snapshot2 */
6051 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
6052 	count = SPDK_COUNTOF(ids);
6053 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6054 	CU_ASSERT(rc == 0);
6055 	CU_ASSERT(count == 1);
6056 	CU_ASSERT(ids[0] == cloneid);
6057 
6058 	/* clone2 */
6059 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6060 	count = SPDK_COUNTOF(ids);
6061 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6062 	CU_ASSERT(rc == 0);
6063 	CU_ASSERT(count == 0);
6064 
6065 	/* Try to delete blob that user should not be able to remove */
6066 
6067 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6068 	poll_threads();
6069 	CU_ASSERT(g_bserrno != 0);
6070 
6071 	/* Remove all blobs */
6072 
6073 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6074 	poll_threads();
6075 	CU_ASSERT(g_bserrno == 0);
6076 
6077 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6078 	poll_threads();
6079 	CU_ASSERT(g_bserrno == 0);
6080 
6081 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6082 	poll_threads();
6083 	CU_ASSERT(g_bserrno == 0);
6084 
6085 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6086 	poll_threads();
6087 	CU_ASSERT(g_bserrno == 0);
6088 
6089 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6090 	poll_threads();
6091 	CU_ASSERT(g_bserrno == 0);
6092 
6093 	spdk_bs_unload(bs, bs_op_complete, NULL);
6094 	poll_threads();
6095 	CU_ASSERT(g_bserrno == 0);
6096 
6097 	g_bs = NULL;
6098 }
6099 
6100 /**
6101  * Snapshot-clones relation test 2
6102  *
6103  *         snapshot1
6104  *            |
6105  *         snapshot2
6106  *            |
6107  *      +-----+-----+
6108  *      |           |
6109  *   blob(ro)   snapshot3
6110  *      |           |
6111  *      |       snapshot4
6112  *      |        |     |
6113  *   clone2   clone  clone3
6114  */
6115 static void
6116 blob_relations2(void)
6117 {
6118 	struct spdk_blob_store *bs;
6119 	struct spdk_bs_dev *dev;
6120 	struct spdk_bs_opts bs_opts;
6121 	struct spdk_blob_opts opts;
6122 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
6123 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
6124 		     cloneid3;
6125 	int rc;
6126 	size_t count;
6127 	spdk_blob_id ids[10] = {};
6128 
6129 	dev = init_dev();
6130 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6131 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6132 
6133 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6134 	poll_threads();
6135 	CU_ASSERT(g_bserrno == 0);
6136 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6137 	bs = g_bs;
6138 
6139 	/* 1. Create blob with 10 clusters */
6140 
6141 	ut_spdk_blob_opts_init(&opts);
6142 	opts.num_clusters = 10;
6143 
6144 	blob = ut_blob_create_and_open(bs, &opts);
6145 	blobid = spdk_blob_get_id(blob);
6146 
6147 	/* 2. Create snapshot1 */
6148 
6149 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6150 	poll_threads();
6151 	CU_ASSERT(g_bserrno == 0);
6152 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6153 	snapshotid1 = g_blobid;
6154 
6155 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
6156 	poll_threads();
6157 	CU_ASSERT(g_bserrno == 0);
6158 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6159 	snapshot1 = g_blob;
6160 
6161 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
6162 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
6163 
6164 	CU_ASSERT(blob->parent_id == snapshotid1);
6165 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6166 
6167 	/* Check if blob is the clone of snapshot1 */
6168 	CU_ASSERT(blob->parent_id == snapshotid1);
6169 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6170 
6171 	count = SPDK_COUNTOF(ids);
6172 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
6173 	CU_ASSERT(rc == 0);
6174 	CU_ASSERT(count == 1);
6175 	CU_ASSERT(ids[0] == blobid);
6176 
6177 	/* 3. Create another snapshot */
6178 
6179 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6180 	poll_threads();
6181 	CU_ASSERT(g_bserrno == 0);
6182 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6183 	snapshotid2 = g_blobid;
6184 
6185 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
6186 	poll_threads();
6187 	CU_ASSERT(g_bserrno == 0);
6188 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6189 	snapshot2 = g_blob;
6190 
6191 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
6192 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
6193 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
6194 
6195 	/* Check if snapshot2 is the clone of snapshot1 and blob
6196 	 * is a child of snapshot2 */
6197 	CU_ASSERT(blob->parent_id == snapshotid2);
6198 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6199 
6200 	count = SPDK_COUNTOF(ids);
6201 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6202 	CU_ASSERT(rc == 0);
6203 	CU_ASSERT(count == 1);
6204 	CU_ASSERT(ids[0] == blobid);
6205 
6206 	/* 4. Create clone from snapshot */
6207 
6208 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
6209 	poll_threads();
6210 	CU_ASSERT(g_bserrno == 0);
6211 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6212 	cloneid = g_blobid;
6213 
6214 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
6215 	poll_threads();
6216 	CU_ASSERT(g_bserrno == 0);
6217 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6218 	clone = g_blob;
6219 
6220 	CU_ASSERT(clone->parent_id == snapshotid2);
6221 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
6222 
6223 	/* Check if clone is on the snapshot's list */
6224 	count = SPDK_COUNTOF(ids);
6225 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6226 	CU_ASSERT(rc == 0);
6227 	CU_ASSERT(count == 2);
6228 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6229 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
6230 
6231 	/* 5. Create snapshot of the clone */
6232 
6233 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6234 	poll_threads();
6235 	CU_ASSERT(g_bserrno == 0);
6236 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6237 	snapshotid3 = g_blobid;
6238 
6239 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6240 	poll_threads();
6241 	CU_ASSERT(g_bserrno == 0);
6242 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6243 	snapshot3 = g_blob;
6244 
6245 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
6246 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6247 
6248 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
6249 	 * is a child of snapshot2 */
6250 	CU_ASSERT(clone->parent_id == snapshotid3);
6251 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6252 
6253 	count = SPDK_COUNTOF(ids);
6254 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6255 	CU_ASSERT(rc == 0);
6256 	CU_ASSERT(count == 1);
6257 	CU_ASSERT(ids[0] == cloneid);
6258 
6259 	/* 6. Create another snapshot of the clone */
6260 
6261 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6262 	poll_threads();
6263 	CU_ASSERT(g_bserrno == 0);
6264 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6265 	snapshotid4 = g_blobid;
6266 
6267 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
6268 	poll_threads();
6269 	CU_ASSERT(g_bserrno == 0);
6270 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6271 	snapshot4 = g_blob;
6272 
6273 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
6274 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
6275 
6276 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
6277 	 * is a child of snapshot3 */
6278 	CU_ASSERT(clone->parent_id == snapshotid4);
6279 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
6280 
6281 	count = SPDK_COUNTOF(ids);
6282 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
6283 	CU_ASSERT(rc == 0);
6284 	CU_ASSERT(count == 1);
6285 	CU_ASSERT(ids[0] == cloneid);
6286 
6287 	/* 7. Remove snapshot 4 */
6288 
6289 	ut_blob_close_and_delete(bs, snapshot4);
6290 
6291 	/* Check if relations are back to state from before creating snapshot 4 */
6292 	CU_ASSERT(clone->parent_id == snapshotid3);
6293 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6294 
6295 	count = SPDK_COUNTOF(ids);
6296 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6297 	CU_ASSERT(rc == 0);
6298 	CU_ASSERT(count == 1);
6299 	CU_ASSERT(ids[0] == cloneid);
6300 
6301 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
6302 
6303 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
6304 	poll_threads();
6305 	CU_ASSERT(g_bserrno == 0);
6306 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6307 	cloneid3 = g_blobid;
6308 
6309 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6310 	poll_threads();
6311 	CU_ASSERT(g_bserrno != 0);
6312 
6313 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
6314 
6315 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6316 	poll_threads();
6317 	CU_ASSERT(g_bserrno == 0);
6318 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6319 	snapshot3 = g_blob;
6320 
6321 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6322 	poll_threads();
6323 	CU_ASSERT(g_bserrno != 0);
6324 
6325 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6326 	poll_threads();
6327 	CU_ASSERT(g_bserrno == 0);
6328 
6329 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
6330 	poll_threads();
6331 	CU_ASSERT(g_bserrno == 0);
6332 
6333 	/* 10. Remove snapshot 1 */
6334 
6335 	/* Check snapshot 1 and snapshot 2 allocated clusters */
6336 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot1) == 10);
6337 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
6338 
6339 	ut_blob_close_and_delete(bs, snapshot1);
6340 
6341 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
6342 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
6343 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6344 
6345 	/* Check that snapshot 2 has the clusters that were allocated to snapshot 1 */
6346 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 10);
6347 
6348 	count = SPDK_COUNTOF(ids);
6349 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6350 	CU_ASSERT(rc == 0);
6351 	CU_ASSERT(count == 2);
6352 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6353 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6354 
6355 	/* 11. Try to create clone from read only blob */
6356 
6357 	/* Mark blob as read only */
6358 	spdk_blob_set_read_only(blob);
6359 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6360 	poll_threads();
6361 	CU_ASSERT(g_bserrno == 0);
6362 
6363 	/* Create clone from read only blob */
6364 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6365 	poll_threads();
6366 	CU_ASSERT(g_bserrno == 0);
6367 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6368 	cloneid2 = g_blobid;
6369 
6370 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
6371 	poll_threads();
6372 	CU_ASSERT(g_bserrno == 0);
6373 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6374 	clone2 = g_blob;
6375 
6376 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6377 
6378 	count = SPDK_COUNTOF(ids);
6379 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6380 	CU_ASSERT(rc == 0);
6381 	CU_ASSERT(count == 1);
6382 	CU_ASSERT(ids[0] == cloneid2);
6383 
6384 	/* Close blobs */
6385 
6386 	spdk_blob_close(clone2, blob_op_complete, NULL);
6387 	poll_threads();
6388 	CU_ASSERT(g_bserrno == 0);
6389 
6390 	spdk_blob_close(blob, blob_op_complete, NULL);
6391 	poll_threads();
6392 	CU_ASSERT(g_bserrno == 0);
6393 
6394 	spdk_blob_close(clone, blob_op_complete, NULL);
6395 	poll_threads();
6396 	CU_ASSERT(g_bserrno == 0);
6397 
6398 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
6399 	poll_threads();
6400 	CU_ASSERT(g_bserrno == 0);
6401 
6402 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6403 	poll_threads();
6404 	CU_ASSERT(g_bserrno == 0);
6405 
6406 	ut_bs_reload(&bs, &bs_opts);
6407 
6408 	/* Verify structure of loaded blob store */
6409 
6410 	/* snapshot2 */
6411 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6412 
6413 	count = SPDK_COUNTOF(ids);
6414 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6415 	CU_ASSERT(rc == 0);
6416 	CU_ASSERT(count == 2);
6417 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6418 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6419 
6420 	/* blob */
6421 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6422 	count = SPDK_COUNTOF(ids);
6423 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6424 	CU_ASSERT(rc == 0);
6425 	CU_ASSERT(count == 1);
6426 	CU_ASSERT(ids[0] == cloneid2);
6427 
6428 	/* clone */
6429 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6430 	count = SPDK_COUNTOF(ids);
6431 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6432 	CU_ASSERT(rc == 0);
6433 	CU_ASSERT(count == 0);
6434 
6435 	/* snapshot3 */
6436 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6437 	count = SPDK_COUNTOF(ids);
6438 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6439 	CU_ASSERT(rc == 0);
6440 	CU_ASSERT(count == 1);
6441 	CU_ASSERT(ids[0] == cloneid);
6442 
6443 	/* clone2 */
6444 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6445 	count = SPDK_COUNTOF(ids);
6446 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6447 	CU_ASSERT(rc == 0);
6448 	CU_ASSERT(count == 0);
6449 
6450 	/* Try to delete all blobs in the worse possible order */
6451 
6452 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6453 	poll_threads();
6454 	CU_ASSERT(g_bserrno != 0);
6455 
6456 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6457 	poll_threads();
6458 	CU_ASSERT(g_bserrno == 0);
6459 
6460 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6461 	poll_threads();
6462 	CU_ASSERT(g_bserrno != 0);
6463 
6464 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6465 	poll_threads();
6466 	CU_ASSERT(g_bserrno == 0);
6467 
6468 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6469 	poll_threads();
6470 	CU_ASSERT(g_bserrno == 0);
6471 
6472 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6473 	poll_threads();
6474 	CU_ASSERT(g_bserrno == 0);
6475 
6476 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6477 	poll_threads();
6478 	CU_ASSERT(g_bserrno == 0);
6479 
6480 	spdk_bs_unload(bs, bs_op_complete, NULL);
6481 	poll_threads();
6482 	CU_ASSERT(g_bserrno == 0);
6483 
6484 	g_bs = NULL;
6485 }
6486 
6487 /**
6488  * Snapshot-clones relation test 3
6489  *
6490  *         snapshot0
6491  *            |
6492  *         snapshot1
6493  *            |
6494  *         snapshot2
6495  *            |
6496  *           blob
6497  */
6498 static void
6499 blob_relations3(void)
6500 {
6501 	struct spdk_blob_store *bs;
6502 	struct spdk_bs_dev *dev;
6503 	struct spdk_io_channel *channel;
6504 	struct spdk_bs_opts bs_opts;
6505 	struct spdk_blob_opts opts;
6506 	struct spdk_blob *blob;
6507 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
6508 
6509 	dev = init_dev();
6510 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6511 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6512 
6513 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6514 	poll_threads();
6515 	CU_ASSERT(g_bserrno == 0);
6516 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6517 	bs = g_bs;
6518 
6519 	channel = spdk_bs_alloc_io_channel(bs);
6520 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6521 
6522 	/* 1. Create blob with 10 clusters */
6523 	ut_spdk_blob_opts_init(&opts);
6524 	opts.num_clusters = 10;
6525 
6526 	blob = ut_blob_create_and_open(bs, &opts);
6527 	blobid = spdk_blob_get_id(blob);
6528 
6529 	/* 2. Create snapshot0 */
6530 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6531 	poll_threads();
6532 	CU_ASSERT(g_bserrno == 0);
6533 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6534 	snapshotid0 = g_blobid;
6535 
6536 	/* 3. Create snapshot1 */
6537 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6538 	poll_threads();
6539 	CU_ASSERT(g_bserrno == 0);
6540 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6541 	snapshotid1 = g_blobid;
6542 
6543 	/* 4. Create snapshot2 */
6544 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6545 	poll_threads();
6546 	CU_ASSERT(g_bserrno == 0);
6547 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6548 	snapshotid2 = g_blobid;
6549 
6550 	/* 5. Decouple blob */
6551 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
6552 	poll_threads();
6553 	CU_ASSERT(g_bserrno == 0);
6554 
6555 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
6556 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
6557 	poll_threads();
6558 	CU_ASSERT(g_bserrno == 0);
6559 
6560 	/* 7. Delete blob */
6561 	spdk_blob_close(blob, blob_op_complete, NULL);
6562 	poll_threads();
6563 	CU_ASSERT(g_bserrno == 0);
6564 
6565 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6566 	poll_threads();
6567 	CU_ASSERT(g_bserrno == 0);
6568 
6569 	/* 8. Delete snapshot2.
6570 	 * If md of snapshot 2 was updated, it should be possible to delete it */
6571 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6572 	poll_threads();
6573 	CU_ASSERT(g_bserrno == 0);
6574 
6575 	/* Remove remaining blobs and unload bs */
6576 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
6577 	poll_threads();
6578 	CU_ASSERT(g_bserrno == 0);
6579 
6580 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
6581 	poll_threads();
6582 	CU_ASSERT(g_bserrno == 0);
6583 
6584 	spdk_bs_free_io_channel(channel);
6585 	poll_threads();
6586 
6587 	spdk_bs_unload(bs, bs_op_complete, NULL);
6588 	poll_threads();
6589 	CU_ASSERT(g_bserrno == 0);
6590 
6591 	g_bs = NULL;
6592 }
6593 
6594 static void
6595 blobstore_clean_power_failure(void)
6596 {
6597 	struct spdk_blob_store *bs;
6598 	struct spdk_blob *blob;
6599 	struct spdk_power_failure_thresholds thresholds = {};
6600 	bool clean = false;
6601 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6602 	struct spdk_bs_super_block super_copy = {};
6603 
6604 	thresholds.general_threshold = 1;
6605 	while (!clean) {
6606 		/* Create bs and blob */
6607 		suite_blob_setup();
6608 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6609 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6610 		bs = g_bs;
6611 		blob = g_blob;
6612 
6613 		/* Super block should not change for rest of the UT,
6614 		 * save it and compare later. */
6615 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
6616 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
6617 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6618 
6619 		/* Force bs/super block in a clean state.
6620 		 * Along with marking blob dirty, to cause blob persist. */
6621 		blob->state = SPDK_BLOB_STATE_DIRTY;
6622 		bs->clean = 1;
6623 		super->clean = 1;
6624 		super->crc = blob_md_page_calc_crc(super);
6625 
6626 		g_bserrno = -1;
6627 		dev_set_power_failure_thresholds(thresholds);
6628 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6629 		poll_threads();
6630 		dev_reset_power_failure_event();
6631 
6632 		if (g_bserrno == 0) {
6633 			/* After successful md sync, both bs and super block
6634 			 * should be marked as not clean. */
6635 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6636 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
6637 			clean = true;
6638 		}
6639 
6640 		/* Depending on the point of failure, super block was either updated or not. */
6641 		super_copy.clean = super->clean;
6642 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
6643 		/* Compare that the values in super block remained unchanged. */
6644 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
6645 
6646 		/* Delete blob and unload bs */
6647 		suite_blob_cleanup();
6648 
6649 		thresholds.general_threshold++;
6650 	}
6651 }
6652 
6653 static void
6654 blob_delete_snapshot_power_failure(void)
6655 {
6656 	struct spdk_bs_dev *dev;
6657 	struct spdk_blob_store *bs;
6658 	struct spdk_blob_opts opts;
6659 	struct spdk_blob *blob, *snapshot;
6660 	struct spdk_power_failure_thresholds thresholds = {};
6661 	spdk_blob_id blobid, snapshotid;
6662 	const void *value;
6663 	size_t value_len;
6664 	size_t count;
6665 	spdk_blob_id ids[3] = {};
6666 	int rc;
6667 	bool deleted = false;
6668 	int delete_snapshot_bserrno = -1;
6669 	uint32_t first_data_cluster;
6670 
6671 	thresholds.general_threshold = 1;
6672 	while (!deleted) {
6673 		dev = init_dev();
6674 
6675 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6676 		poll_threads();
6677 		CU_ASSERT(g_bserrno == 0);
6678 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6679 		bs = g_bs;
6680 
6681 		first_data_cluster = FIRST_DATA_CLUSTER(bs);
6682 
6683 		/* Create blob */
6684 		ut_spdk_blob_opts_init(&opts);
6685 		opts.num_clusters = 10;
6686 
6687 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6688 		poll_threads();
6689 		CU_ASSERT(g_bserrno == 0);
6690 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6691 		blobid = g_blobid;
6692 
6693 		/* Create snapshot */
6694 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6695 		poll_threads();
6696 		CU_ASSERT(g_bserrno == 0);
6697 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6698 		snapshotid = g_blobid;
6699 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6700 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6701 
6702 		dev_set_power_failure_thresholds(thresholds);
6703 
6704 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6705 		poll_threads();
6706 		delete_snapshot_bserrno = g_bserrno;
6707 
6708 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6709 		 * reports success, changes to both blobs should already persisted. */
6710 		dev_reset_power_failure_event();
6711 		ut_bs_dirty_load(&bs, NULL);
6712 
6713 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6714 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6715 
6716 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6717 		poll_threads();
6718 		CU_ASSERT(g_bserrno == 0);
6719 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6720 		blob = g_blob;
6721 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6722 
6723 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6724 		poll_threads();
6725 
6726 		if (g_bserrno == 0) {
6727 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6728 			snapshot = g_blob;
6729 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6730 			count = SPDK_COUNTOF(ids);
6731 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6732 			CU_ASSERT(rc == 0);
6733 			CU_ASSERT(count == 1);
6734 			CU_ASSERT(ids[0] == blobid);
6735 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6736 			CU_ASSERT(rc != 0);
6737 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6738 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6739 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6740 
6741 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6742 			poll_threads();
6743 			CU_ASSERT(g_bserrno == 0);
6744 		} else {
6745 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6746 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6747 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6748 			 * Yet delete might perform further changes to the clone after that.
6749 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6750 			if (delete_snapshot_bserrno == 0) {
6751 				deleted = true;
6752 			}
6753 		}
6754 
6755 		spdk_blob_close(blob, blob_op_complete, NULL);
6756 		poll_threads();
6757 		CU_ASSERT(g_bserrno == 0);
6758 
6759 		spdk_bs_unload(bs, bs_op_complete, NULL);
6760 		poll_threads();
6761 		CU_ASSERT(g_bserrno == 0);
6762 
6763 		thresholds.general_threshold++;
6764 	}
6765 }
6766 
6767 static void
6768 blob_create_snapshot_power_failure(void)
6769 {
6770 	struct spdk_blob_store *bs = g_bs;
6771 	struct spdk_bs_dev *dev;
6772 	struct spdk_blob_opts opts;
6773 	struct spdk_blob *blob, *snapshot;
6774 	struct spdk_power_failure_thresholds thresholds = {};
6775 	spdk_blob_id blobid, snapshotid;
6776 	const void *value;
6777 	size_t value_len;
6778 	size_t count;
6779 	spdk_blob_id ids[3] = {};
6780 	int rc;
6781 	bool created = false;
6782 	int create_snapshot_bserrno = -1;
6783 	uint32_t first_data_cluster;
6784 
6785 	thresholds.general_threshold = 1;
6786 	while (!created) {
6787 		dev = init_dev();
6788 
6789 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6790 		poll_threads();
6791 		CU_ASSERT(g_bserrno == 0);
6792 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6793 		bs = g_bs;
6794 
6795 		first_data_cluster = FIRST_DATA_CLUSTER(bs);
6796 
6797 		/* Create blob */
6798 		ut_spdk_blob_opts_init(&opts);
6799 		opts.num_clusters = 10;
6800 
6801 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6802 		poll_threads();
6803 		CU_ASSERT(g_bserrno == 0);
6804 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6805 		blobid = g_blobid;
6806 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6807 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6808 
6809 		dev_set_power_failure_thresholds(thresholds);
6810 
6811 		/* Create snapshot */
6812 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6813 		poll_threads();
6814 		create_snapshot_bserrno = g_bserrno;
6815 		snapshotid = g_blobid;
6816 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6817 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6818 
6819 		/* Do not shut down cleanly. Assumption is that after create snapshot
6820 		 * reports success, both blobs should be power-fail safe. */
6821 		dev_reset_power_failure_event();
6822 		ut_bs_dirty_load(&bs, NULL);
6823 
6824 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6825 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6826 
6827 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6828 		poll_threads();
6829 		CU_ASSERT(g_bserrno == 0);
6830 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6831 		blob = g_blob;
6832 
6833 		if (snapshotid != SPDK_BLOBID_INVALID) {
6834 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6835 			poll_threads();
6836 		}
6837 
6838 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6839 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6840 			snapshot = g_blob;
6841 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6842 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6843 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6844 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6845 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6846 			count = SPDK_COUNTOF(ids);
6847 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6848 			CU_ASSERT(rc == 0);
6849 			CU_ASSERT(count == 1);
6850 			CU_ASSERT(ids[0] == blobid);
6851 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6852 			CU_ASSERT(rc != 0);
6853 
6854 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6855 			poll_threads();
6856 			CU_ASSERT(g_bserrno == 0);
6857 			if (create_snapshot_bserrno == 0) {
6858 				created = true;
6859 			}
6860 		} else {
6861 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6862 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6863 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6864 		}
6865 
6866 		spdk_blob_close(blob, blob_op_complete, NULL);
6867 		poll_threads();
6868 		CU_ASSERT(g_bserrno == 0);
6869 
6870 		spdk_bs_unload(bs, bs_op_complete, NULL);
6871 		poll_threads();
6872 		CU_ASSERT(g_bserrno == 0);
6873 
6874 		thresholds.general_threshold++;
6875 	}
6876 }
6877 
6878 #define IO_UT_BLOCKS_PER_CLUSTER 64
6879 
6880 static void
6881 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6882 {
6883 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
6884 	uint8_t payload_ff[SZ * 512];
6885 	uint8_t payload_aa[SZ * 512];
6886 	uint8_t payload_00[SZ * 512];
6887 	uint8_t *cluster0, *cluster1;
6888 
6889 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6890 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6891 	memset(payload_00, 0x00, sizeof(payload_00));
6892 
6893 	/* Try to perform I/O with io unit = 512 */
6894 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6895 	poll_threads();
6896 	CU_ASSERT(g_bserrno == 0);
6897 
6898 	/* If thin provisioned is set cluster should be allocated now */
6899 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6900 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6901 
6902 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6903 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6904 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6905 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6906 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
6907 
6908 	/* Verify write with offset on first page */
6909 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6910 	poll_threads();
6911 	CU_ASSERT(g_bserrno == 0);
6912 
6913 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6914 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6915 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6916 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6917 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6918 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
6919 
6920 	/* Verify write with offset on first page */
6921 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6922 	poll_threads();
6923 
6924 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6925 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6926 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6927 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6928 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6929 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6930 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
6931 
6932 	/* Verify write with offset on second page */
6933 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6934 	poll_threads();
6935 
6936 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6937 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6938 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6939 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6940 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6941 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6942 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
6943 
6944 	/* Verify write across multiple pages */
6945 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6946 	poll_threads();
6947 
6948 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6949 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6950 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6951 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6952 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6953 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6954 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
6955 
6956 	/* Verify write across multiple clusters */
6957 	spdk_blob_io_write(blob, channel, payload_ff, SZ - 4, 8, blob_op_complete, NULL);
6958 	poll_threads();
6959 
6960 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6961 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6962 
6963 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6964 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6965 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6966 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6967 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6968 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6969 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6970 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
6971 
6972 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6973 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
6974 
6975 	/* Verify write to second cluster */
6976 	spdk_blob_io_write(blob, channel, payload_ff, SZ + 12, 2, blob_op_complete, NULL);
6977 	poll_threads();
6978 
6979 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6980 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6981 
6982 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6983 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6984 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6985 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6986 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6987 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6988 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6989 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
6990 
6991 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6992 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6993 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6994 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
6995 }
6996 
6997 static void
6998 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6999 {
7000 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7001 	uint8_t payload_read[2 * SZ * 512];
7002 	uint8_t payload_ff[SZ * 512];
7003 	uint8_t payload_aa[SZ * 512];
7004 	uint8_t payload_00[SZ * 512];
7005 
7006 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7007 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7008 	memset(payload_00, 0x00, sizeof(payload_00));
7009 
7010 	/* Read only first io unit */
7011 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7012 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7013 	 * payload_read: F000 0000 | 0000 0000 ... */
7014 	memset(payload_read, 0x00, sizeof(payload_read));
7015 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
7016 	poll_threads();
7017 	CU_ASSERT(g_bserrno == 0);
7018 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7019 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7020 
7021 	/* Read four io_units starting from offset = 2
7022 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7023 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7024 	 * payload_read: F0AA 0000 | 0000 0000 ... */
7025 
7026 	memset(payload_read, 0x00, sizeof(payload_read));
7027 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
7028 	poll_threads();
7029 	CU_ASSERT(g_bserrno == 0);
7030 
7031 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7032 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7033 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
7034 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
7035 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7036 
7037 	/* Read eight io_units across multiple pages
7038 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7039 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7040 	 * payload_read: AAAA AAAA | 0000 0000 ... */
7041 	memset(payload_read, 0x00, sizeof(payload_read));
7042 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
7043 	poll_threads();
7044 	CU_ASSERT(g_bserrno == 0);
7045 
7046 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7047 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7048 
7049 	/* Read eight io_units across multiple clusters
7050 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7051 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7052 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7053 	memset(payload_read, 0x00, sizeof(payload_read));
7054 	spdk_blob_io_read(blob, channel, payload_read, SZ - 4, 8, blob_op_complete, NULL);
7055 	poll_threads();
7056 	CU_ASSERT(g_bserrno == 0);
7057 
7058 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7059 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7060 
7061 	/* Read four io_units from second cluster
7062 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7063 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7064 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7065 	memset(payload_read, 0x00, sizeof(payload_read));
7066 	spdk_blob_io_read(blob, channel, payload_read, SZ + 10, 4, blob_op_complete, NULL);
7067 	poll_threads();
7068 	CU_ASSERT(g_bserrno == 0);
7069 
7070 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7071 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7072 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7073 
7074 	/* Read second cluster
7075 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7076 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7077 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7078 	memset(payload_read, 0x00, sizeof(payload_read));
7079 	spdk_blob_io_read(blob, channel, payload_read, SZ, SZ, blob_op_complete, NULL);
7080 	poll_threads();
7081 	CU_ASSERT(g_bserrno == 0);
7082 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7083 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7084 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7085 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7086 
7087 	/* Read whole two clusters
7088 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7089 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7090 	memset(payload_read, 0x00, sizeof(payload_read));
7091 	spdk_blob_io_read(blob, channel, payload_read, 0, SZ * 2, blob_op_complete, NULL);
7092 	poll_threads();
7093 	CU_ASSERT(g_bserrno == 0);
7094 
7095 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7096 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7097 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7098 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7099 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7100 	CU_ASSERT(memcmp(payload_read + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7101 
7102 	CU_ASSERT(memcmp(payload_read + (SZ + 0) * 512, payload_ff, 4 * 512) == 0);
7103 	CU_ASSERT(memcmp(payload_read + (SZ + 4) * 512, payload_00, 8 * 512) == 0);
7104 	CU_ASSERT(memcmp(payload_read + (SZ + 12) * 512, payload_ff, 2 * 512) == 0);
7105 	CU_ASSERT(memcmp(payload_read + (SZ + 14) * 512, payload_00, (SZ - 14) * 512) == 0);
7106 }
7107 
7108 
7109 static void
7110 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7111 {
7112 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7113 	uint8_t payload_ff[SZ * 512];
7114 	uint8_t payload_aa[SZ * 512];
7115 	uint8_t payload_00[SZ * 512];
7116 	uint8_t *cluster0, *cluster1;
7117 
7118 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7119 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7120 	memset(payload_00, 0x00, sizeof(payload_00));
7121 
7122 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7123 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7124 
7125 	/* Unmap */
7126 	spdk_blob_io_unmap(blob, channel, 0, SZ * 2, blob_op_complete, NULL);
7127 	poll_threads();
7128 
7129 	CU_ASSERT(g_bserrno == 0);
7130 
7131 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, SZ * 512) == 0);
7132 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, SZ * 512) == 0);
7133 }
7134 
7135 static void
7136 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7137 {
7138 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7139 	uint8_t payload_ff[SZ * 512];
7140 	uint8_t payload_aa[SZ * 512];
7141 	uint8_t payload_00[SZ * 512];
7142 	uint8_t *cluster0, *cluster1;
7143 
7144 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7145 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7146 	memset(payload_00, 0x00, sizeof(payload_00));
7147 
7148 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7149 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7150 
7151 	/* Write zeroes  */
7152 	spdk_blob_io_write_zeroes(blob, channel, 0, SZ * 2, blob_op_complete, NULL);
7153 	poll_threads();
7154 
7155 	CU_ASSERT(g_bserrno == 0);
7156 
7157 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, SZ * 512) == 0);
7158 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, SZ * 512) == 0);
7159 }
7160 
7161 static inline void
7162 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
7163 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7164 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7165 {
7166 	if (io_opts) {
7167 		g_dev_writev_ext_called = false;
7168 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7169 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
7170 					io_opts);
7171 	} else {
7172 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7173 	}
7174 	poll_threads();
7175 	CU_ASSERT(g_bserrno == 0);
7176 	if (io_opts) {
7177 		CU_ASSERT(g_dev_writev_ext_called);
7178 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7179 	}
7180 }
7181 
7182 static void
7183 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7184 	       bool ext_api)
7185 {
7186 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7187 	uint8_t payload_ff[SZ * 512];
7188 	uint8_t payload_aa[SZ * 512];
7189 	uint8_t payload_00[SZ * 512];
7190 	uint8_t *cluster0, *cluster1;
7191 	struct iovec iov[4];
7192 	struct spdk_blob_ext_io_opts ext_opts = {
7193 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7194 		.memory_domain_ctx = (void *)0xf00df00d,
7195 		.size = sizeof(struct spdk_blob_ext_io_opts),
7196 		.user_ctx = (void *)123,
7197 	};
7198 
7199 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7200 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7201 	memset(payload_00, 0x00, sizeof(payload_00));
7202 
7203 	/* Try to perform I/O with io unit = 512 */
7204 	iov[0].iov_base = payload_ff;
7205 	iov[0].iov_len = 1 * 512;
7206 
7207 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
7208 			    ext_api ? &ext_opts : NULL);
7209 
7210 	/* If thin provisioned is set cluster should be allocated now */
7211 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
7212 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7213 
7214 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
7215 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
7216 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7217 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7218 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7219 
7220 	/* Verify write with offset on first page */
7221 	iov[0].iov_base = payload_ff;
7222 	iov[0].iov_len = 1 * 512;
7223 
7224 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
7225 			    ext_api ? &ext_opts : NULL);
7226 
7227 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7228 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7229 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7230 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7231 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7232 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7233 
7234 	/* Verify write with offset on first page */
7235 	iov[0].iov_base = payload_ff;
7236 	iov[0].iov_len = 4 * 512;
7237 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
7238 	poll_threads();
7239 
7240 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
7241 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7242 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7243 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7244 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7245 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
7246 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7247 
7248 	/* Verify write with offset on second page */
7249 	iov[0].iov_base = payload_ff;
7250 	iov[0].iov_len = 4 * 512;
7251 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
7252 	poll_threads();
7253 
7254 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
7255 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7256 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7257 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7258 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7259 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
7260 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
7261 
7262 	/* Verify write across multiple pages */
7263 	iov[0].iov_base = payload_aa;
7264 	iov[0].iov_len = 8 * 512;
7265 
7266 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
7267 			    ext_api ? &ext_opts : NULL);
7268 
7269 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
7270 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7271 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7272 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7273 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7274 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7275 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
7276 
7277 	/* Verify write across multiple clusters */
7278 
7279 	iov[0].iov_base = payload_ff;
7280 	iov[0].iov_len = 8 * 512;
7281 
7282 	test_blob_io_writev(blob, channel, iov, 1, (SZ - 4), 8, blob_op_complete, NULL,
7283 			    ext_api ? &ext_opts : NULL);
7284 
7285 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7286 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7287 
7288 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7289 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7290 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7291 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7292 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7293 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7294 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7295 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 16) * 512) == 0);
7296 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7297 
7298 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7299 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7300 
7301 	/* Verify write to second cluster */
7302 
7303 	iov[0].iov_base = payload_ff;
7304 	iov[0].iov_len = 2 * 512;
7305 
7306 	test_blob_io_writev(blob, channel, iov, 1, SZ + 12, 2, blob_op_complete, NULL,
7307 			    ext_api ? &ext_opts : NULL);
7308 
7309 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7310 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7311 
7312 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7313 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
7314 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7315 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7316 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7317 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7318 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7319 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7320 
7321 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7322 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
7323 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
7324 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7325 }
7326 
7327 static inline void
7328 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
7329 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7330 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7331 {
7332 	if (io_opts) {
7333 		g_dev_readv_ext_called = false;
7334 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7335 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
7336 	} else {
7337 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7338 	}
7339 	poll_threads();
7340 	CU_ASSERT(g_bserrno == 0);
7341 	if (io_opts) {
7342 		CU_ASSERT(g_dev_readv_ext_called);
7343 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7344 	}
7345 }
7346 
7347 static void
7348 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7349 	      bool ext_api)
7350 {
7351 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7352 	uint8_t payload_read[2 * SZ * 512];
7353 	uint8_t payload_ff[SZ * 512];
7354 	uint8_t payload_aa[SZ * 512];
7355 	uint8_t payload_00[SZ * 512];
7356 	struct iovec iov[4];
7357 	struct spdk_blob_ext_io_opts ext_opts = {
7358 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7359 		.memory_domain_ctx = (void *)0xf00df00d,
7360 		.size = sizeof(struct spdk_blob_ext_io_opts),
7361 		.user_ctx = (void *)123,
7362 	};
7363 
7364 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7365 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7366 	memset(payload_00, 0x00, sizeof(payload_00));
7367 
7368 	/* Read only first io unit */
7369 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7370 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7371 	 * payload_read: F000 0000 | 0000 0000 ... */
7372 	memset(payload_read, 0x00, sizeof(payload_read));
7373 	iov[0].iov_base = payload_read;
7374 	iov[0].iov_len = 1 * 512;
7375 
7376 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7377 
7378 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7379 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7380 
7381 	/* Read four io_units starting from offset = 2
7382 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7383 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7384 	 * payload_read: F0AA 0000 | 0000 0000 ... */
7385 
7386 	memset(payload_read, 0x00, sizeof(payload_read));
7387 	iov[0].iov_base = payload_read;
7388 	iov[0].iov_len = 4 * 512;
7389 
7390 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7391 
7392 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7393 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7394 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
7395 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
7396 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7397 
7398 	/* Read eight io_units across multiple pages
7399 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7400 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7401 	 * payload_read: AAAA AAAA | 0000 0000 ... */
7402 	memset(payload_read, 0x00, sizeof(payload_read));
7403 	iov[0].iov_base = payload_read;
7404 	iov[0].iov_len = 4 * 512;
7405 	iov[1].iov_base = payload_read + 4 * 512;
7406 	iov[1].iov_len = 4 * 512;
7407 
7408 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7409 
7410 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7411 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7412 
7413 	/* Read eight io_units across multiple clusters
7414 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7415 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7416 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7417 	memset(payload_read, 0x00, sizeof(payload_read));
7418 	iov[0].iov_base = payload_read;
7419 	iov[0].iov_len = 2 * 512;
7420 	iov[1].iov_base = payload_read + 2 * 512;
7421 	iov[1].iov_len = 2 * 512;
7422 	iov[2].iov_base = payload_read + 4 * 512;
7423 	iov[2].iov_len = 2 * 512;
7424 	iov[3].iov_base = payload_read + 6 * 512;
7425 	iov[3].iov_len = 2 * 512;
7426 
7427 	test_blob_io_readv(blob, channel, iov, 4, SZ - 4, 8, blob_op_complete, NULL,
7428 			   ext_api ? &ext_opts : NULL);
7429 
7430 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7431 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7432 
7433 	/* Read four io_units from second cluster
7434 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7435 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7436 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7437 	memset(payload_read, 0x00, sizeof(payload_read));
7438 	iov[0].iov_base = payload_read;
7439 	iov[0].iov_len = 1 * 512;
7440 	iov[1].iov_base = payload_read + 1 * 512;
7441 	iov[1].iov_len = 3 * 512;
7442 
7443 	test_blob_io_readv(blob, channel, iov, 2, SZ + 10, 4, blob_op_complete, NULL,
7444 			   ext_api ? &ext_opts : NULL);
7445 
7446 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7447 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7448 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7449 
7450 	/* Read second cluster
7451 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7452 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7453 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7454 	memset(payload_read, 0x00, sizeof(payload_read));
7455 	iov[0].iov_base = payload_read;
7456 	iov[0].iov_len = 1 * 512;
7457 	iov[1].iov_base = payload_read + 1 * 512;
7458 	iov[1].iov_len = 2 * 512;
7459 	iov[2].iov_base = payload_read + 3 * 512;
7460 	iov[2].iov_len = 4 * 512;
7461 	iov[3].iov_base = payload_read + 7 * 512;
7462 	iov[3].iov_len = (SZ - 7) * 512;
7463 
7464 	test_blob_io_readv(blob, channel, iov, 4, SZ, SZ, blob_op_complete, NULL,
7465 			   ext_api ? &ext_opts : NULL);
7466 
7467 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7468 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7469 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7470 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7471 
7472 	/* Read whole two clusters
7473 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7474 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7475 	memset(payload_read, 0x00, sizeof(payload_read));
7476 	iov[0].iov_base = payload_read;
7477 	iov[0].iov_len = 1 * 512;
7478 	iov[1].iov_base = payload_read + 1 * 512;
7479 	iov[1].iov_len = 8 * 512;
7480 	iov[2].iov_base = payload_read + 9 * 512;
7481 	iov[2].iov_len = 16 * 512;
7482 	iov[3].iov_base = payload_read + 25 * 512;
7483 	iov[3].iov_len = (2 * SZ - 25) * 512;
7484 
7485 	test_blob_io_readv(blob, channel, iov, 4, 0, SZ * 2, blob_op_complete, NULL,
7486 			   ext_api ? &ext_opts : NULL);
7487 
7488 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7489 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7490 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7491 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7492 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7493 	CU_ASSERT(memcmp(payload_read + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7494 
7495 	CU_ASSERT(memcmp(payload_read + (SZ + 0) * 512, payload_ff, 4 * 512) == 0);
7496 	CU_ASSERT(memcmp(payload_read + (SZ + 4) * 512, payload_00, 8 * 512) == 0);
7497 	CU_ASSERT(memcmp(payload_read + (SZ + 12) * 512, payload_ff, 2 * 512) == 0);
7498 	CU_ASSERT(memcmp(payload_read + (SZ + 14) * 512, payload_00, (SZ - 14) * 512) == 0);
7499 }
7500 
7501 static void
7502 blob_io_unit(void)
7503 {
7504 	struct spdk_bs_opts bsopts;
7505 	struct spdk_blob_opts opts;
7506 	struct spdk_blob_store *bs;
7507 	struct spdk_bs_dev *dev;
7508 	struct spdk_blob *blob, *snapshot, *clone;
7509 	spdk_blob_id blobid;
7510 	struct spdk_io_channel *channel;
7511 
7512 	/* Create dev with 512 bytes io unit size */
7513 
7514 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7515 	bsopts.cluster_sz = IO_UT_BLOCKS_PER_CLUSTER * 512;
7516 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7517 
7518 	/* Try to initialize a new blob store with unsupported io_unit */
7519 	dev = init_dev();
7520 	dev->blocklen = 512;
7521 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7522 
7523 	/* Initialize a new blob store */
7524 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7525 	poll_threads();
7526 	CU_ASSERT(g_bserrno == 0);
7527 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7528 	bs = g_bs;
7529 
7530 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7531 	channel = spdk_bs_alloc_io_channel(bs);
7532 
7533 	/* Create thick provisioned blob */
7534 	ut_spdk_blob_opts_init(&opts);
7535 	opts.thin_provision = false;
7536 	opts.num_clusters = 32;
7537 
7538 	blob = ut_blob_create_and_open(bs, &opts);
7539 	blobid = spdk_blob_get_id(blob);
7540 
7541 	test_io_write(dev, blob, channel);
7542 	test_io_read(dev, blob, channel);
7543 	test_io_zeroes(dev, blob, channel);
7544 
7545 	test_iov_write(dev, blob, channel, false);
7546 	test_iov_read(dev, blob, channel, false);
7547 	test_io_zeroes(dev, blob, channel);
7548 
7549 	test_iov_write(dev, blob, channel, true);
7550 	test_iov_read(dev, blob, channel, true);
7551 
7552 	test_io_unmap(dev, blob, channel);
7553 
7554 	spdk_blob_close(blob, blob_op_complete, NULL);
7555 	poll_threads();
7556 	CU_ASSERT(g_bserrno == 0);
7557 	blob = NULL;
7558 	g_blob = NULL;
7559 
7560 	/* Create thin provisioned blob */
7561 
7562 	ut_spdk_blob_opts_init(&opts);
7563 	opts.thin_provision = true;
7564 	opts.num_clusters = 32;
7565 
7566 	blob = ut_blob_create_and_open(bs, &opts);
7567 	blobid = spdk_blob_get_id(blob);
7568 
7569 	test_io_write(dev, blob, channel);
7570 	test_io_read(dev, blob, channel);
7571 	test_io_zeroes(dev, blob, channel);
7572 
7573 	test_iov_write(dev, blob, channel, false);
7574 	test_iov_read(dev, blob, channel, false);
7575 	test_io_zeroes(dev, blob, channel);
7576 
7577 	test_iov_write(dev, blob, channel, true);
7578 	test_iov_read(dev, blob, channel, true);
7579 
7580 	/* Create snapshot */
7581 
7582 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7583 	poll_threads();
7584 	CU_ASSERT(g_bserrno == 0);
7585 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7586 	blobid = g_blobid;
7587 
7588 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7589 	poll_threads();
7590 	CU_ASSERT(g_bserrno == 0);
7591 	CU_ASSERT(g_blob != NULL);
7592 	snapshot = g_blob;
7593 
7594 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7595 	poll_threads();
7596 	CU_ASSERT(g_bserrno == 0);
7597 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7598 	blobid = g_blobid;
7599 
7600 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7601 	poll_threads();
7602 	CU_ASSERT(g_bserrno == 0);
7603 	CU_ASSERT(g_blob != NULL);
7604 	clone = g_blob;
7605 
7606 	test_io_read(dev, blob, channel);
7607 	test_io_read(dev, snapshot, channel);
7608 	test_io_read(dev, clone, channel);
7609 
7610 	test_iov_read(dev, blob, channel, false);
7611 	test_iov_read(dev, snapshot, channel, false);
7612 	test_iov_read(dev, clone, channel, false);
7613 
7614 	test_iov_read(dev, blob, channel, true);
7615 	test_iov_read(dev, snapshot, channel, true);
7616 	test_iov_read(dev, clone, channel, true);
7617 
7618 	/* Inflate clone */
7619 
7620 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7621 	poll_threads();
7622 
7623 	CU_ASSERT(g_bserrno == 0);
7624 
7625 	test_io_read(dev, clone, channel);
7626 
7627 	test_io_unmap(dev, clone, channel);
7628 
7629 	test_iov_write(dev, clone, channel, false);
7630 	test_iov_read(dev, clone, channel, false);
7631 	test_io_unmap(dev, clone, channel);
7632 
7633 	test_iov_write(dev, clone, channel, true);
7634 	test_iov_read(dev, clone, channel, true);
7635 
7636 	spdk_blob_close(blob, blob_op_complete, NULL);
7637 	spdk_blob_close(snapshot, blob_op_complete, NULL);
7638 	spdk_blob_close(clone, blob_op_complete, NULL);
7639 	poll_threads();
7640 	CU_ASSERT(g_bserrno == 0);
7641 	blob = NULL;
7642 	g_blob = NULL;
7643 
7644 	spdk_bs_free_io_channel(channel);
7645 	poll_threads();
7646 
7647 	/* Unload the blob store */
7648 	spdk_bs_unload(bs, bs_op_complete, NULL);
7649 	poll_threads();
7650 	CU_ASSERT(g_bserrno == 0);
7651 	g_bs = NULL;
7652 	g_blob = NULL;
7653 	g_blobid = 0;
7654 }
7655 
7656 static void
7657 blob_io_unit_compatibility(void)
7658 {
7659 	struct spdk_bs_opts bsopts;
7660 	struct spdk_blob_store *bs;
7661 	struct spdk_bs_dev *dev;
7662 	struct spdk_bs_super_block *super;
7663 
7664 	/* Create dev with 512 bytes io unit size */
7665 
7666 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7667 	bsopts.cluster_sz = g_phys_blocklen * 4;
7668 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7669 
7670 	/* Try to initialize a new blob store with unsupported io_unit */
7671 	dev = init_dev();
7672 	dev->blocklen = 512;
7673 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7674 
7675 	/* Initialize a new blob store */
7676 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7677 	poll_threads();
7678 	CU_ASSERT(g_bserrno == 0);
7679 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7680 	bs = g_bs;
7681 
7682 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7683 
7684 	/* Unload the blob store */
7685 	spdk_bs_unload(bs, bs_op_complete, NULL);
7686 	poll_threads();
7687 	CU_ASSERT(g_bserrno == 0);
7688 
7689 	/* Modify super block to behave like older version.
7690 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
7691 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
7692 	super->io_unit_size = 0;
7693 	super->crc = blob_md_page_calc_crc(super);
7694 
7695 	dev = init_dev();
7696 	dev->blocklen = 512;
7697 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7698 
7699 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
7700 	poll_threads();
7701 	CU_ASSERT(g_bserrno == 0);
7702 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7703 	bs = g_bs;
7704 
7705 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
7706 
7707 	/* Unload the blob store */
7708 	spdk_bs_unload(bs, bs_op_complete, NULL);
7709 	poll_threads();
7710 	CU_ASSERT(g_bserrno == 0);
7711 
7712 	g_bs = NULL;
7713 	g_blob = NULL;
7714 	g_blobid = 0;
7715 }
7716 
7717 static void
7718 first_sync_complete(void *cb_arg, int bserrno)
7719 {
7720 	struct spdk_blob *blob = cb_arg;
7721 	int rc;
7722 
7723 	CU_ASSERT(bserrno == 0);
7724 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7725 	CU_ASSERT(rc == 0);
7726 	CU_ASSERT(g_bserrno == -1);
7727 
7728 	/* Keep g_bserrno at -1, only the
7729 	 * second sync completion should set it at 0. */
7730 }
7731 
7732 static void
7733 second_sync_complete(void *cb_arg, int bserrno)
7734 {
7735 	struct spdk_blob *blob = cb_arg;
7736 	const void *value;
7737 	size_t value_len;
7738 	int rc;
7739 
7740 	CU_ASSERT(bserrno == 0);
7741 
7742 	/* Verify that the first sync completion had a chance to execute */
7743 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7744 	CU_ASSERT(rc == 0);
7745 	SPDK_CU_ASSERT_FATAL(value != NULL);
7746 	CU_ASSERT(value_len == strlen("second") + 1);
7747 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7748 
7749 	CU_ASSERT(g_bserrno == -1);
7750 	g_bserrno = bserrno;
7751 }
7752 
7753 static void
7754 blob_simultaneous_operations(void)
7755 {
7756 	struct spdk_blob_store *bs = g_bs;
7757 	struct spdk_blob_opts opts;
7758 	struct spdk_blob *blob, *snapshot;
7759 	spdk_blob_id blobid, snapshotid;
7760 	struct spdk_io_channel *channel;
7761 	int rc;
7762 
7763 	channel = spdk_bs_alloc_io_channel(bs);
7764 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7765 
7766 	ut_spdk_blob_opts_init(&opts);
7767 	opts.num_clusters = 10;
7768 
7769 	blob = ut_blob_create_and_open(bs, &opts);
7770 	blobid = spdk_blob_get_id(blob);
7771 
7772 	/* Create snapshot and try to remove blob in the same time:
7773 	 * - snapshot should be created successfully
7774 	 * - delete operation should fail w -EBUSY */
7775 	CU_ASSERT(blob->locked_operation_in_progress == false);
7776 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7777 	CU_ASSERT(blob->locked_operation_in_progress == true);
7778 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7779 	CU_ASSERT(blob->locked_operation_in_progress == true);
7780 	/* Deletion failure */
7781 	CU_ASSERT(g_bserrno == -EBUSY);
7782 	poll_threads();
7783 	CU_ASSERT(blob->locked_operation_in_progress == false);
7784 	/* Snapshot creation success */
7785 	CU_ASSERT(g_bserrno == 0);
7786 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7787 
7788 	snapshotid = g_blobid;
7789 
7790 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7791 	poll_threads();
7792 	CU_ASSERT(g_bserrno == 0);
7793 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7794 	snapshot = g_blob;
7795 
7796 	/* Inflate blob and try to remove blob in the same time:
7797 	 * - blob should be inflated successfully
7798 	 * - delete operation should fail w -EBUSY */
7799 	CU_ASSERT(blob->locked_operation_in_progress == false);
7800 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7801 	CU_ASSERT(blob->locked_operation_in_progress == true);
7802 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7803 	CU_ASSERT(blob->locked_operation_in_progress == true);
7804 	/* Deletion failure */
7805 	CU_ASSERT(g_bserrno == -EBUSY);
7806 	poll_threads();
7807 	CU_ASSERT(blob->locked_operation_in_progress == false);
7808 	/* Inflation success */
7809 	CU_ASSERT(g_bserrno == 0);
7810 
7811 	/* Clone snapshot and try to remove snapshot in the same time:
7812 	 * - snapshot should be cloned successfully
7813 	 * - delete operation should fail w -EBUSY */
7814 	CU_ASSERT(blob->locked_operation_in_progress == false);
7815 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7816 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7817 	/* Deletion failure */
7818 	CU_ASSERT(g_bserrno == -EBUSY);
7819 	poll_threads();
7820 	CU_ASSERT(blob->locked_operation_in_progress == false);
7821 	/* Clone created */
7822 	CU_ASSERT(g_bserrno == 0);
7823 
7824 	/* Resize blob and try to remove blob in the same time:
7825 	 * - blob should be resized successfully
7826 	 * - delete operation should fail w -EBUSY */
7827 	CU_ASSERT(blob->locked_operation_in_progress == false);
7828 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7829 	CU_ASSERT(blob->locked_operation_in_progress == true);
7830 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7831 	CU_ASSERT(blob->locked_operation_in_progress == true);
7832 	/* Deletion failure */
7833 	CU_ASSERT(g_bserrno == -EBUSY);
7834 	poll_threads();
7835 	CU_ASSERT(blob->locked_operation_in_progress == false);
7836 	/* Blob resized successfully */
7837 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7838 	poll_threads();
7839 	CU_ASSERT(g_bserrno == 0);
7840 
7841 	/* Issue two consecutive blob syncs, neither should fail.
7842 	 * Force sync to actually occur by marking blob dirty each time.
7843 	 * Execution of sync should not be enough to complete the operation,
7844 	 * since disk I/O is required to complete it. */
7845 	g_bserrno = -1;
7846 
7847 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7848 	CU_ASSERT(rc == 0);
7849 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7850 	CU_ASSERT(g_bserrno == -1);
7851 
7852 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7853 	CU_ASSERT(g_bserrno == -1);
7854 
7855 	poll_threads();
7856 	CU_ASSERT(g_bserrno == 0);
7857 
7858 	spdk_bs_free_io_channel(channel);
7859 	poll_threads();
7860 
7861 	ut_blob_close_and_delete(bs, snapshot);
7862 	ut_blob_close_and_delete(bs, blob);
7863 }
7864 
7865 static void
7866 blob_persist_test(void)
7867 {
7868 	struct spdk_blob_store *bs = g_bs;
7869 	struct spdk_blob_opts opts;
7870 	struct spdk_blob *blob;
7871 	spdk_blob_id blobid;
7872 	struct spdk_io_channel *channel;
7873 	char *xattr;
7874 	size_t xattr_length;
7875 	int rc;
7876 	uint32_t page_count_clear, page_count_xattr;
7877 	uint64_t poller_iterations;
7878 	bool run_poller;
7879 
7880 	channel = spdk_bs_alloc_io_channel(bs);
7881 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7882 
7883 	ut_spdk_blob_opts_init(&opts);
7884 	opts.num_clusters = 10;
7885 
7886 	blob = ut_blob_create_and_open(bs, &opts);
7887 	blobid = spdk_blob_get_id(blob);
7888 
7889 	/* Save the amount of md pages used after creation of a blob.
7890 	 * This should be consistent after removing xattr. */
7891 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7892 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7893 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7894 
7895 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7896 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7897 		       strlen("large_xattr");
7898 	xattr = calloc(xattr_length, sizeof(char));
7899 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7900 
7901 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7902 	SPDK_CU_ASSERT_FATAL(rc == 0);
7903 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7904 	poll_threads();
7905 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7906 
7907 	/* Save the amount of md pages used after adding the large xattr */
7908 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7909 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7910 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7911 
7912 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7913 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7914 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7915 	poller_iterations = 1;
7916 	run_poller = true;
7917 	while (run_poller) {
7918 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7919 		SPDK_CU_ASSERT_FATAL(rc == 0);
7920 		g_bserrno = -1;
7921 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7922 		poll_thread_times(0, poller_iterations);
7923 		if (g_bserrno == 0) {
7924 			/* Poller iteration count was high enough for first sync to complete.
7925 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7926 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7927 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7928 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7929 			run_poller = false;
7930 		}
7931 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7932 		SPDK_CU_ASSERT_FATAL(rc == 0);
7933 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7934 		poll_threads();
7935 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7936 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7937 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7938 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7939 
7940 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7941 		spdk_blob_close(blob, blob_op_complete, NULL);
7942 		poll_threads();
7943 		CU_ASSERT(g_bserrno == 0);
7944 
7945 		ut_bs_reload(&bs, NULL);
7946 
7947 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7948 		poll_threads();
7949 		CU_ASSERT(g_bserrno == 0);
7950 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7951 		blob = g_blob;
7952 
7953 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7954 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7955 
7956 		poller_iterations++;
7957 		/* Stop at high iteration count to prevent infinite loop.
7958 		 * This value should be enough for first md sync to complete in any case. */
7959 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7960 	}
7961 
7962 	free(xattr);
7963 
7964 	ut_blob_close_and_delete(bs, blob);
7965 
7966 	spdk_bs_free_io_channel(channel);
7967 	poll_threads();
7968 }
7969 
7970 static void
7971 blob_decouple_snapshot(void)
7972 {
7973 	struct spdk_blob_store *bs = g_bs;
7974 	struct spdk_blob_opts opts;
7975 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7976 	struct spdk_io_channel *channel;
7977 	spdk_blob_id blobid, snapshotid;
7978 	uint64_t cluster;
7979 
7980 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7981 		channel = spdk_bs_alloc_io_channel(bs);
7982 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7983 
7984 		ut_spdk_blob_opts_init(&opts);
7985 		opts.num_clusters = 10;
7986 		opts.thin_provision = false;
7987 
7988 		blob = ut_blob_create_and_open(bs, &opts);
7989 		blobid = spdk_blob_get_id(blob);
7990 
7991 		/* Create first snapshot */
7992 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7993 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7994 		poll_threads();
7995 		CU_ASSERT(g_bserrno == 0);
7996 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7997 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7998 		snapshotid = g_blobid;
7999 
8000 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
8001 		poll_threads();
8002 		CU_ASSERT(g_bserrno == 0);
8003 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8004 		snapshot1 = g_blob;
8005 
8006 		/* Create the second one */
8007 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
8008 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8009 		poll_threads();
8010 		CU_ASSERT(g_bserrno == 0);
8011 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8012 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
8013 		snapshotid = g_blobid;
8014 
8015 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
8016 		poll_threads();
8017 		CU_ASSERT(g_bserrno == 0);
8018 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8019 		snapshot2 = g_blob;
8020 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
8021 
8022 		/* Now decouple the second snapshot forcing it to copy the written clusters */
8023 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
8024 		poll_threads();
8025 		CU_ASSERT(g_bserrno == 0);
8026 
8027 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
8028 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
8029 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
8030 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
8031 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
8032 					    snapshot1->active.clusters[cluster]);
8033 		}
8034 
8035 		spdk_bs_free_io_channel(channel);
8036 
8037 		if (delete_snapshot_first) {
8038 			ut_blob_close_and_delete(bs, snapshot2);
8039 			ut_blob_close_and_delete(bs, snapshot1);
8040 			ut_blob_close_and_delete(bs, blob);
8041 		} else {
8042 			ut_blob_close_and_delete(bs, blob);
8043 			ut_blob_close_and_delete(bs, snapshot2);
8044 			ut_blob_close_and_delete(bs, snapshot1);
8045 		}
8046 		poll_threads();
8047 	}
8048 }
8049 
8050 static void
8051 blob_seek_io_unit(void)
8052 {
8053 	struct spdk_blob_store *bs = g_bs;
8054 	struct spdk_blob *blob;
8055 	struct spdk_io_channel *channel;
8056 	struct spdk_blob_opts opts;
8057 	uint64_t free_clusters;
8058 	uint8_t payload[10 * BLOCKLEN];
8059 	uint64_t offset;
8060 	uint64_t io_unit, io_units_per_cluster;
8061 
8062 	free_clusters = spdk_bs_free_cluster_count(bs);
8063 
8064 	channel = spdk_bs_alloc_io_channel(bs);
8065 	CU_ASSERT(channel != NULL);
8066 
8067 	/* Set blob as thin provisioned */
8068 	ut_spdk_blob_opts_init(&opts);
8069 	opts.thin_provision = true;
8070 
8071 	/* Create a blob */
8072 	blob = ut_blob_create_and_open(bs, &opts);
8073 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
8074 
8075 	io_units_per_cluster = bs_io_units_per_cluster(blob);
8076 
8077 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
8078 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
8079 	poll_threads();
8080 	CU_ASSERT(g_bserrno == 0);
8081 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
8082 	CU_ASSERT(blob->active.num_clusters == 5);
8083 
8084 	/* Write at the beginning of first cluster */
8085 	offset = 0;
8086 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8087 	poll_threads();
8088 	CU_ASSERT(g_bserrno == 0);
8089 
8090 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
8091 	CU_ASSERT(io_unit == offset);
8092 
8093 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
8094 	CU_ASSERT(io_unit == io_units_per_cluster);
8095 
8096 	/* Write in the middle of third cluster */
8097 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
8098 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8099 	poll_threads();
8100 	CU_ASSERT(g_bserrno == 0);
8101 
8102 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
8103 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
8104 
8105 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
8106 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
8107 
8108 	/* Write at the end of last cluster */
8109 	offset = 5 * io_units_per_cluster - 1;
8110 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8111 	poll_threads();
8112 	CU_ASSERT(g_bserrno == 0);
8113 
8114 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
8115 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
8116 
8117 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
8118 	CU_ASSERT(io_unit == UINT64_MAX);
8119 
8120 	spdk_bs_free_io_channel(channel);
8121 	poll_threads();
8122 
8123 	ut_blob_close_and_delete(bs, blob);
8124 }
8125 
8126 static void
8127 blob_esnap_create(void)
8128 {
8129 	struct spdk_blob_store	*bs = g_bs;
8130 	struct spdk_bs_opts	bs_opts;
8131 	struct ut_esnap_opts	esnap_opts;
8132 	struct spdk_blob_opts	opts;
8133 	struct spdk_blob_open_opts open_opts;
8134 	struct spdk_blob	*blob;
8135 	uint32_t		cluster_sz, block_sz;
8136 	const uint32_t		esnap_num_clusters = 4;
8137 	uint64_t		esnap_num_blocks;
8138 	uint32_t		sz;
8139 	spdk_blob_id		blobid;
8140 	uint32_t		bs_ctx_count, blob_ctx_count;
8141 
8142 	cluster_sz = spdk_bs_get_cluster_size(bs);
8143 	block_sz = spdk_bs_get_io_unit_size(bs);
8144 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8145 
8146 	/* Create a normal blob and verify it is not an esnap clone. */
8147 	ut_spdk_blob_opts_init(&opts);
8148 	blob = ut_blob_create_and_open(bs, &opts);
8149 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob));
8150 	ut_blob_close_and_delete(bs, blob);
8151 
8152 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
8153 	ut_spdk_blob_opts_init(&opts);
8154 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8155 	opts.esnap_id = &esnap_opts;
8156 	opts.esnap_id_len = sizeof(esnap_opts);
8157 	opts.num_clusters = esnap_num_clusters;
8158 	blob = ut_blob_create_and_open(bs, &opts);
8159 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8160 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8161 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
8162 	SPDK_CU_ASSERT_FATAL(!spdk_blob_is_clone(blob));
8163 	sz = spdk_blob_get_num_clusters(blob);
8164 	CU_ASSERT(sz == esnap_num_clusters);
8165 	ut_blob_close_and_delete(bs, blob);
8166 
8167 	/* Create an esnap clone without the size and verify it can be grown */
8168 	ut_spdk_blob_opts_init(&opts);
8169 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8170 	opts.esnap_id = &esnap_opts;
8171 	opts.esnap_id_len = sizeof(esnap_opts);
8172 	blob = ut_blob_create_and_open(bs, &opts);
8173 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8174 	sz = spdk_blob_get_num_clusters(blob);
8175 	CU_ASSERT(sz == 0);
8176 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
8177 	poll_threads();
8178 	CU_ASSERT(g_bserrno == 0);
8179 	sz = spdk_blob_get_num_clusters(blob);
8180 	CU_ASSERT(sz == 1);
8181 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
8182 	poll_threads();
8183 	CU_ASSERT(g_bserrno == 0);
8184 	sz = spdk_blob_get_num_clusters(blob);
8185 	CU_ASSERT(sz == esnap_num_clusters);
8186 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
8187 	poll_threads();
8188 	CU_ASSERT(g_bserrno == 0);
8189 	sz = spdk_blob_get_num_clusters(blob);
8190 	CU_ASSERT(sz == esnap_num_clusters + 1);
8191 
8192 	/* Reload the blobstore and be sure that the blob can be opened. */
8193 	blobid = spdk_blob_get_id(blob);
8194 	spdk_blob_close(blob, blob_op_complete, NULL);
8195 	poll_threads();
8196 	CU_ASSERT(g_bserrno == 0);
8197 	g_blob = NULL;
8198 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8199 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8200 	ut_bs_reload(&bs, &bs_opts);
8201 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8202 	poll_threads();
8203 	CU_ASSERT(g_bserrno == 0);
8204 	CU_ASSERT(g_blob != NULL);
8205 	blob = g_blob;
8206 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8207 	sz = spdk_blob_get_num_clusters(blob);
8208 	CU_ASSERT(sz == esnap_num_clusters + 1);
8209 
8210 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
8211 	spdk_blob_close(blob, blob_op_complete, NULL);
8212 	poll_threads();
8213 	CU_ASSERT(g_bserrno == 0);
8214 	g_blob = NULL;
8215 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8216 	ut_bs_reload(&bs, &bs_opts);
8217 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8218 	poll_threads();
8219 	CU_ASSERT(g_bserrno != 0);
8220 	CU_ASSERT(g_blob == NULL);
8221 
8222 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
8223 	bs_ctx_count = 0;
8224 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8225 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
8226 	bs_opts.esnap_ctx = &bs_ctx_count;
8227 	ut_bs_reload(&bs, &bs_opts);
8228 	/* Loading the blobstore triggers the esnap to be loaded */
8229 	CU_ASSERT(bs_ctx_count == 1);
8230 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8231 	poll_threads();
8232 	CU_ASSERT(g_bserrno == 0);
8233 	CU_ASSERT(g_blob != NULL);
8234 	/* Opening the blob also triggers the esnap to be loaded */
8235 	CU_ASSERT(bs_ctx_count == 2);
8236 	blob = g_blob;
8237 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8238 	sz = spdk_blob_get_num_clusters(blob);
8239 	CU_ASSERT(sz == esnap_num_clusters + 1);
8240 	spdk_blob_close(blob, blob_op_complete, NULL);
8241 	poll_threads();
8242 	CU_ASSERT(g_bserrno == 0);
8243 	g_blob = NULL;
8244 	/* If open_opts.esnap_ctx is set it is passed to the esnap create callback */
8245 	blob_ctx_count = 0;
8246 	spdk_blob_open_opts_init(&open_opts, sizeof(open_opts));
8247 	open_opts.esnap_ctx = &blob_ctx_count;
8248 	spdk_bs_open_blob_ext(bs, blobid, &open_opts, blob_op_with_handle_complete, NULL);
8249 	poll_threads();
8250 	blob = g_blob;
8251 	CU_ASSERT(bs_ctx_count == 3);
8252 	CU_ASSERT(blob_ctx_count == 1);
8253 	spdk_blob_close(blob, blob_op_complete, NULL);
8254 	poll_threads();
8255 	CU_ASSERT(g_bserrno == 0);
8256 	g_blob = NULL;
8257 }
8258 
8259 static void
8260 blob_esnap_clone_reload(void)
8261 {
8262 	struct spdk_blob_store	*bs = g_bs;
8263 	struct spdk_bs_opts	bs_opts;
8264 	struct ut_esnap_opts	esnap_opts;
8265 	struct spdk_blob_opts	opts;
8266 	struct spdk_blob	*eclone1, *snap1, *clone1;
8267 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8268 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
8269 	const uint32_t		esnap_num_clusters = 4;
8270 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8271 	spdk_blob_id		eclone1_id, snap1_id, clone1_id;
8272 	struct spdk_io_channel	*bs_ch;
8273 	char			buf[block_sz];
8274 	int			bserr1, bserr2, bserr3, bserr4;
8275 	struct spdk_bs_dev	*dev;
8276 
8277 	/* Create and open an esnap clone blob */
8278 	ut_spdk_blob_opts_init(&opts);
8279 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8280 	opts.esnap_id = &esnap_opts;
8281 	opts.esnap_id_len = sizeof(esnap_opts);
8282 	opts.num_clusters = esnap_num_clusters;
8283 	eclone1 = ut_blob_create_and_open(bs, &opts);
8284 	CU_ASSERT(eclone1 != NULL);
8285 	CU_ASSERT(spdk_blob_is_esnap_clone(eclone1));
8286 	eclone1_id = eclone1->id;
8287 
8288 	/* Create and open a snapshot of eclone1 */
8289 	spdk_bs_create_snapshot(bs, eclone1_id, NULL, blob_op_with_id_complete, NULL);
8290 	poll_threads();
8291 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8292 	CU_ASSERT(g_bserrno == 0);
8293 	snap1_id = g_blobid;
8294 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8295 	poll_threads();
8296 	CU_ASSERT(g_bserrno == 0);
8297 	CU_ASSERT(g_blob != NULL);
8298 	snap1 = g_blob;
8299 
8300 	/* Create and open regular clone of snap1 */
8301 	spdk_bs_create_clone(bs, snap1_id, NULL, blob_op_with_id_complete, NULL);
8302 	poll_threads();
8303 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8304 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
8305 	clone1_id = g_blobid;
8306 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8307 	poll_threads();
8308 	CU_ASSERT(g_bserrno == 0);
8309 	CU_ASSERT(g_blob != NULL);
8310 	clone1 = g_blob;
8311 
8312 	/* Close the blobs in preparation for reloading the blobstore */
8313 	spdk_blob_close(clone1, blob_op_complete, NULL);
8314 	poll_threads();
8315 	CU_ASSERT(g_bserrno == 0);
8316 	spdk_blob_close(snap1, blob_op_complete, NULL);
8317 	poll_threads();
8318 	CU_ASSERT(g_bserrno == 0);
8319 	spdk_blob_close(eclone1, blob_op_complete, NULL);
8320 	poll_threads();
8321 	CU_ASSERT(g_bserrno == 0);
8322 	g_blob = NULL;
8323 
8324 	/* Reload the blobstore */
8325 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8326 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8327 	ut_bs_reload(&bs, &bs_opts);
8328 
8329 	/* Be sure each of the blobs can be opened */
8330 	spdk_bs_open_blob(bs, eclone1_id, blob_op_with_handle_complete, NULL);
8331 	poll_threads();
8332 	CU_ASSERT(g_bserrno == 0);
8333 	CU_ASSERT(g_blob != NULL);
8334 	eclone1 = g_blob;
8335 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8336 	poll_threads();
8337 	CU_ASSERT(g_bserrno == 0);
8338 	CU_ASSERT(g_blob != NULL);
8339 	snap1 = g_blob;
8340 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8341 	poll_threads();
8342 	CU_ASSERT(g_bserrno == 0);
8343 	CU_ASSERT(g_blob != NULL);
8344 	clone1 = g_blob;
8345 
8346 	/* Perform some reads on each of them to cause channels to be allocated */
8347 	bs_ch = spdk_bs_alloc_io_channel(bs);
8348 	CU_ASSERT(bs_ch != NULL);
8349 	spdk_blob_io_read(eclone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8350 	poll_threads();
8351 	CU_ASSERT(g_bserrno == 0);
8352 	spdk_blob_io_read(snap1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8353 	poll_threads();
8354 	CU_ASSERT(g_bserrno == 0);
8355 	spdk_blob_io_read(clone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8356 	poll_threads();
8357 	CU_ASSERT(g_bserrno == 0);
8358 
8359 	/*
8360 	 * Unload the blobstore in a way similar to how lvstore unloads it.  This should exercise
8361 	 * the deferred unload path in spdk_bs_unload().
8362 	 */
8363 	bserr1 = 0xbad;
8364 	bserr2 = 0xbad;
8365 	bserr3 = 0xbad;
8366 	bserr4 = 0xbad;
8367 	spdk_blob_close(eclone1, blob_op_complete, &bserr1);
8368 	spdk_blob_close(snap1, blob_op_complete, &bserr2);
8369 	spdk_blob_close(clone1, blob_op_complete, &bserr3);
8370 	spdk_bs_unload(bs, blob_op_complete, &bserr4);
8371 	spdk_bs_free_io_channel(bs_ch);
8372 	poll_threads();
8373 	CU_ASSERT(bserr1 == 0);
8374 	CU_ASSERT(bserr2 == 0);
8375 	CU_ASSERT(bserr3 == 0);
8376 	CU_ASSERT(bserr4 == 0);
8377 	g_blob = NULL;
8378 
8379 	/* Reload the blobstore */
8380 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8381 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8382 	dev = init_dev();
8383 	spdk_bs_load(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8384 	poll_threads();
8385 	CU_ASSERT(g_bserrno == 0);
8386 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8387 }
8388 
8389 static bool
8390 blob_esnap_verify_contents(struct spdk_blob *blob, struct spdk_io_channel *ch,
8391 			   uint64_t offset, uint64_t size, uint32_t readsize, const char *how)
8392 {
8393 	const uint32_t	bs_blksz = blob->bs->io_unit_size;
8394 	const uint32_t	esnap_blksz = blob->back_bs_dev ? blob->back_bs_dev->blocklen : bs_blksz;
8395 	const uint32_t	start_blk = offset / bs_blksz;
8396 	const uint32_t	num_blocks = spdk_max(size, readsize) / bs_blksz;
8397 	const uint32_t	blocks_per_read = spdk_min(size, readsize) / bs_blksz;
8398 	uint32_t	blob_block;
8399 	struct iovec	iov;
8400 	uint8_t		buf[spdk_min(size, readsize)];
8401 	bool		block_ok;
8402 
8403 	SPDK_CU_ASSERT_FATAL(offset % bs_blksz == 0);
8404 	SPDK_CU_ASSERT_FATAL(size % bs_blksz == 0);
8405 	SPDK_CU_ASSERT_FATAL(readsize % bs_blksz == 0);
8406 
8407 	memset(buf, 0, readsize);
8408 	iov.iov_base = buf;
8409 	iov.iov_len = readsize;
8410 	for (blob_block = start_blk; blob_block < num_blocks; blob_block += blocks_per_read) {
8411 		if (strcmp(how, "read") == 0) {
8412 			spdk_blob_io_read(blob, ch, buf, blob_block, blocks_per_read,
8413 					  bs_op_complete, NULL);
8414 		} else if (strcmp(how, "readv") == 0) {
8415 			spdk_blob_io_readv(blob, ch, &iov, 1, blob_block, blocks_per_read,
8416 					   bs_op_complete, NULL);
8417 		} else if (strcmp(how, "readv_ext") == 0) {
8418 			/*
8419 			 * This is currently pointless. NULL ext_opts leads to dev->readv(), not
8420 			 * dev->readv_ext().
8421 			 */
8422 			spdk_blob_io_readv_ext(blob, ch, &iov, 1, blob_block, blocks_per_read,
8423 					       bs_op_complete, NULL, NULL);
8424 		} else {
8425 			abort();
8426 		}
8427 		poll_threads();
8428 		CU_ASSERT(g_bserrno == 0);
8429 		if (g_bserrno != 0) {
8430 			return false;
8431 		}
8432 		block_ok = ut_esnap_content_is_correct(buf, blocks_per_read * bs_blksz, blob->id,
8433 						       blob_block * bs_blksz, esnap_blksz);
8434 		CU_ASSERT(block_ok);
8435 		if (!block_ok) {
8436 			return false;
8437 		}
8438 	}
8439 
8440 	return true;
8441 }
8442 
8443 static void
8444 blob_esnap_io_size(uint32_t bs_blksz, uint32_t esnap_blksz)
8445 {
8446 	struct spdk_bs_dev	*dev;
8447 	struct spdk_blob_store	*bs;
8448 	struct spdk_bs_opts	bsopts;
8449 	struct spdk_blob_opts	opts;
8450 	struct ut_esnap_opts	esnap_opts;
8451 	struct spdk_blob	*blob;
8452 	const uint32_t		cluster_sz = 4 * g_phys_blocklen;
8453 	const uint64_t		esnap_num_clusters = 4;
8454 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8455 	const uint64_t		esnap_num_blocks = esnap_sz / esnap_blksz;
8456 	const uint64_t		blob_num_blocks = esnap_sz / bs_blksz;
8457 	uint32_t		block;
8458 	struct spdk_io_channel	*bs_ch;
8459 
8460 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
8461 	bsopts.cluster_sz = cluster_sz;
8462 	bsopts.esnap_bs_dev_create = ut_esnap_create;
8463 
8464 	/* Create device with desired block size */
8465 	dev = init_dev();
8466 	dev->blocklen = bs_blksz;
8467 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8468 
8469 	/* Initialize a new blob store */
8470 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
8471 	poll_threads();
8472 	CU_ASSERT(g_bserrno == 0);
8473 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8474 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8475 	bs = g_bs;
8476 
8477 	bs_ch = spdk_bs_alloc_io_channel(bs);
8478 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
8479 
8480 	/* Create and open the esnap clone  */
8481 	ut_spdk_blob_opts_init(&opts);
8482 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8483 	opts.esnap_id = &esnap_opts;
8484 	opts.esnap_id_len = sizeof(esnap_opts);
8485 	opts.num_clusters = esnap_num_clusters;
8486 	blob = ut_blob_create_and_open(bs, &opts);
8487 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8488 
8489 	/* Verify that large reads return the content of the esnap device */
8490 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "read"));
8491 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv"));
8492 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv_ext"));
8493 	/* Verify that small reads return the content of the esnap device */
8494 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "read"));
8495 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv"));
8496 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv_ext"));
8497 
8498 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
8499 	for (block = 0; block < blob_num_blocks; block++) {
8500 		char		buf[bs_blksz];
8501 		union ut_word	word;
8502 
8503 		word.f.blob_id = 0xfedcba90;
8504 		word.f.lba = block;
8505 		ut_memset8(buf, word.num, bs_blksz);
8506 
8507 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8508 		poll_threads();
8509 		CU_ASSERT(g_bserrno == 0);
8510 		if (g_bserrno != 0) {
8511 			break;
8512 		}
8513 
8514 		/* Read and verify the block before the current block */
8515 		if (block != 0) {
8516 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
8517 			poll_threads();
8518 			CU_ASSERT(g_bserrno == 0);
8519 			if (g_bserrno != 0) {
8520 				break;
8521 			}
8522 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8523 							      (block - 1) * bs_blksz, bs_blksz));
8524 		}
8525 
8526 		/* Read and verify the current block */
8527 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8528 		poll_threads();
8529 		CU_ASSERT(g_bserrno == 0);
8530 		if (g_bserrno != 0) {
8531 			break;
8532 		}
8533 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8534 						      block * bs_blksz, bs_blksz));
8535 
8536 		/* Check the block that follows */
8537 		if (block + 1 < blob_num_blocks) {
8538 			g_bserrno = 0xbad;
8539 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
8540 			poll_threads();
8541 			CU_ASSERT(g_bserrno == 0);
8542 			if (g_bserrno != 0) {
8543 				break;
8544 			}
8545 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
8546 							      (block + 1) * bs_blksz,
8547 							      esnap_blksz));
8548 		}
8549 	}
8550 
8551 	/* Clean up */
8552 	spdk_bs_free_io_channel(bs_ch);
8553 	g_bserrno = 0xbad;
8554 	spdk_blob_close(blob, blob_op_complete, NULL);
8555 	poll_threads();
8556 	CU_ASSERT(g_bserrno == 0);
8557 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
8558 	poll_threads();
8559 	CU_ASSERT(g_bserrno == 0);
8560 	g_bs = NULL;
8561 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8562 }
8563 
8564 static void
8565 blob_esnap_io_4096_4096(void)
8566 {
8567 	blob_esnap_io_size(4096, 4096);
8568 }
8569 
8570 static void
8571 blob_esnap_io_512_512(void)
8572 {
8573 	blob_esnap_io_size(512, 512);
8574 }
8575 
8576 static void
8577 blob_esnap_io_4096_512(void)
8578 {
8579 	blob_esnap_io_size(4096, 512);
8580 }
8581 
8582 static void
8583 blob_esnap_io_512_4096(void)
8584 {
8585 	struct spdk_bs_dev	*dev;
8586 	struct spdk_blob_store	*bs;
8587 	struct spdk_bs_opts	bs_opts;
8588 	struct spdk_blob_opts	blob_opts;
8589 	struct ut_esnap_opts	esnap_opts;
8590 	uint64_t		cluster_sz = 4 * g_phys_blocklen;
8591 	uint32_t		bs_blksz = 512;
8592 	uint32_t		esnap_blksz = BLOCKLEN;
8593 	uint64_t		esnap_num_blocks = 64;
8594 	spdk_blob_id		blobid;
8595 
8596 	/* Create device with desired block size */
8597 	dev = init_dev();
8598 	dev->blocklen = bs_blksz;
8599 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8600 
8601 	/* Initialize a new blob store */
8602 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8603 	bs_opts.cluster_sz = cluster_sz;
8604 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8605 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8606 	poll_threads();
8607 	CU_ASSERT(g_bserrno == 0);
8608 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8609 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8610 	bs = g_bs;
8611 
8612 	/* Try to create and open the esnap clone. Create should succeed, open should fail. */
8613 	ut_spdk_blob_opts_init(&blob_opts);
8614 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8615 	blob_opts.esnap_id = &esnap_opts;
8616 	blob_opts.esnap_id_len = sizeof(esnap_opts);
8617 	blob_opts.num_clusters = esnap_num_blocks * esnap_blksz / bs_blksz;
8618 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
8619 	poll_threads();
8620 	CU_ASSERT(g_bserrno == 0);
8621 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8622 	blobid = g_blobid;
8623 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8624 	poll_threads();
8625 	CU_ASSERT(g_bserrno == -EINVAL);
8626 	CU_ASSERT(g_blob == NULL);
8627 
8628 	/* Clean up */
8629 	spdk_bs_unload(bs, bs_op_complete, NULL);
8630 	poll_threads();
8631 	CU_ASSERT(g_bserrno == 0);
8632 	g_bs = NULL;
8633 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8634 }
8635 
8636 static void
8637 blob_esnap_thread_add_remove(void)
8638 {
8639 	struct spdk_blob_store	*bs = g_bs;
8640 	struct spdk_blob_opts	opts;
8641 	struct ut_esnap_opts	ut_esnap_opts;
8642 	struct spdk_blob	*blob;
8643 	struct ut_esnap_dev	*ut_dev;
8644 	spdk_blob_id		blobid;
8645 	uint64_t		start_thread = g_ut_thread_id;
8646 	bool			destroyed = false;
8647 	struct spdk_io_channel	*ch0, *ch1;
8648 	struct ut_esnap_channel	*ut_ch0, *ut_ch1;
8649 	const uint32_t		blocklen = bs->io_unit_size;
8650 	char			buf[blocklen * 4];
8651 
8652 	SPDK_CU_ASSERT_FATAL(g_ut_num_threads > 1);
8653 	set_thread(0);
8654 
8655 	/* Create the esnap clone */
8656 	ut_esnap_opts_init(blocklen, 2048, "add_remove_1", &destroyed, &ut_esnap_opts);
8657 	ut_spdk_blob_opts_init(&opts);
8658 	opts.esnap_id = &ut_esnap_opts;
8659 	opts.esnap_id_len = sizeof(ut_esnap_opts);
8660 	opts.num_clusters = 10;
8661 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8662 	poll_threads();
8663 	CU_ASSERT(g_bserrno == 0);
8664 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8665 	blobid = g_blobid;
8666 
8667 	/* Open the blob. No channels should be allocated yet. */
8668 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8669 	poll_threads();
8670 	CU_ASSERT(g_bserrno == 0);
8671 	CU_ASSERT(g_blob != NULL);
8672 	blob = g_blob;
8673 	ut_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8674 	CU_ASSERT(ut_dev != NULL);
8675 	CU_ASSERT(ut_dev->num_channels == 0);
8676 
8677 	/* Create a channel on thread 0. It is lazily created on the first read. */
8678 	ch0 = spdk_bs_alloc_io_channel(bs);
8679 	CU_ASSERT(ch0 != NULL);
8680 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8681 	CU_ASSERT(ut_ch0 == NULL);
8682 	CU_ASSERT(ut_dev->num_channels == 0);
8683 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
8684 	poll_threads();
8685 	CU_ASSERT(g_bserrno == 0);
8686 	CU_ASSERT(ut_dev->num_channels == 1);
8687 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8688 	CU_ASSERT(ut_ch0 != NULL);
8689 	CU_ASSERT(ut_ch0->blocks_read == 1);
8690 
8691 	/* Create a channel on thread 1 and verify its lazy creation too. */
8692 	set_thread(1);
8693 	ch1 = spdk_bs_alloc_io_channel(bs);
8694 	CU_ASSERT(ch1 != NULL);
8695 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8696 	CU_ASSERT(ut_ch1 == NULL);
8697 	CU_ASSERT(ut_dev->num_channels == 1);
8698 	spdk_blob_io_read(blob, ch1, buf, 0, 4, bs_op_complete, NULL);
8699 	poll_threads();
8700 	CU_ASSERT(g_bserrno == 0);
8701 	CU_ASSERT(ut_dev->num_channels == 2);
8702 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8703 	CU_ASSERT(ut_ch1 != NULL);
8704 	CU_ASSERT(ut_ch1->blocks_read == 4);
8705 
8706 	/* Close the channel on thread 0 and verify the bs_dev channel is also gone. */
8707 	set_thread(0);
8708 	spdk_bs_free_io_channel(ch0);
8709 	poll_threads();
8710 	CU_ASSERT(ut_dev->num_channels == 1);
8711 
8712 	/* Close the blob. There is no outstanding IO so it should close right away. */
8713 	g_bserrno = 0xbad;
8714 	spdk_blob_close(blob, blob_op_complete, NULL);
8715 	poll_threads();
8716 	CU_ASSERT(g_bserrno == 0);
8717 	CU_ASSERT(destroyed);
8718 
8719 	/* The esnap channel for the blob should be gone now too. */
8720 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8721 	CU_ASSERT(ut_ch1 == NULL);
8722 
8723 	/* Clean up */
8724 	set_thread(1);
8725 	spdk_bs_free_io_channel(ch1);
8726 	set_thread(start_thread);
8727 }
8728 
8729 static void
8730 freeze_done(void *cb_arg, int bserrno)
8731 {
8732 	uint32_t *freeze_cnt = cb_arg;
8733 
8734 	CU_ASSERT(bserrno == 0);
8735 	(*freeze_cnt)++;
8736 }
8737 
8738 static void
8739 unfreeze_done(void *cb_arg, int bserrno)
8740 {
8741 	uint32_t *unfreeze_cnt = cb_arg;
8742 
8743 	CU_ASSERT(bserrno == 0);
8744 	(*unfreeze_cnt)++;
8745 }
8746 
8747 static void
8748 blob_nested_freezes(void)
8749 {
8750 	struct spdk_blob_store *bs = g_bs;
8751 	struct spdk_blob *blob;
8752 	struct spdk_io_channel *channel[2];
8753 	struct spdk_blob_opts opts;
8754 	uint32_t freeze_cnt, unfreeze_cnt;
8755 	int i;
8756 
8757 	for (i = 0; i < 2; i++) {
8758 		set_thread(i);
8759 		channel[i] = spdk_bs_alloc_io_channel(bs);
8760 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
8761 	}
8762 
8763 	set_thread(0);
8764 
8765 	ut_spdk_blob_opts_init(&opts);
8766 	blob = ut_blob_create_and_open(bs, &opts);
8767 
8768 	/* First just test a single freeze/unfreeze. */
8769 	freeze_cnt = 0;
8770 	unfreeze_cnt = 0;
8771 	CU_ASSERT(blob->frozen_refcnt == 0);
8772 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8773 	CU_ASSERT(blob->frozen_refcnt == 1);
8774 	CU_ASSERT(freeze_cnt == 0);
8775 	poll_threads();
8776 	CU_ASSERT(freeze_cnt == 1);
8777 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8778 	CU_ASSERT(blob->frozen_refcnt == 0);
8779 	CU_ASSERT(unfreeze_cnt == 0);
8780 	poll_threads();
8781 	CU_ASSERT(unfreeze_cnt == 1);
8782 
8783 	/* Now nest multiple freeze/unfreeze operations.  We should
8784 	 * expect a callback for each operation, but only after
8785 	 * the threads have been polled to ensure a for_each_channel()
8786 	 * was executed.
8787 	 */
8788 	freeze_cnt = 0;
8789 	unfreeze_cnt = 0;
8790 	CU_ASSERT(blob->frozen_refcnt == 0);
8791 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8792 	CU_ASSERT(blob->frozen_refcnt == 1);
8793 	CU_ASSERT(freeze_cnt == 0);
8794 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8795 	CU_ASSERT(blob->frozen_refcnt == 2);
8796 	CU_ASSERT(freeze_cnt == 0);
8797 	poll_threads();
8798 	CU_ASSERT(freeze_cnt == 2);
8799 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8800 	CU_ASSERT(blob->frozen_refcnt == 1);
8801 	CU_ASSERT(unfreeze_cnt == 0);
8802 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8803 	CU_ASSERT(blob->frozen_refcnt == 0);
8804 	CU_ASSERT(unfreeze_cnt == 0);
8805 	poll_threads();
8806 	CU_ASSERT(unfreeze_cnt == 2);
8807 
8808 	for (i = 0; i < 2; i++) {
8809 		set_thread(i);
8810 		spdk_bs_free_io_channel(channel[i]);
8811 	}
8812 	set_thread(0);
8813 	ut_blob_close_and_delete(bs, blob);
8814 
8815 	poll_threads();
8816 	g_blob = NULL;
8817 	g_blobid = 0;
8818 }
8819 
8820 static void
8821 blob_ext_md_pages(void)
8822 {
8823 	struct spdk_blob_store *bs;
8824 	struct spdk_bs_dev *dev;
8825 	struct spdk_blob *blob;
8826 	struct spdk_blob_opts opts;
8827 	struct spdk_bs_opts bs_opts;
8828 	uint64_t free_clusters;
8829 
8830 	dev = init_dev();
8831 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8832 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
8833 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
8834 	 * It requires num_md_pages that is much smaller than the number of clusters.
8835 	 * Make sure we can create a blob that uses all of the free clusters.
8836 	 */
8837 	bs_opts.cluster_sz = 65536;
8838 	bs_opts.num_md_pages = 16;
8839 
8840 	/* Initialize a new blob store */
8841 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8842 	poll_threads();
8843 	CU_ASSERT(g_bserrno == 0);
8844 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8845 	bs = g_bs;
8846 
8847 	free_clusters = spdk_bs_free_cluster_count(bs);
8848 
8849 	ut_spdk_blob_opts_init(&opts);
8850 	opts.num_clusters = free_clusters;
8851 
8852 	blob = ut_blob_create_and_open(bs, &opts);
8853 	spdk_blob_close(blob, blob_op_complete, NULL);
8854 	CU_ASSERT(g_bserrno == 0);
8855 
8856 	spdk_bs_unload(bs, bs_op_complete, NULL);
8857 	poll_threads();
8858 	CU_ASSERT(g_bserrno == 0);
8859 	g_bs = NULL;
8860 }
8861 
8862 static void
8863 blob_esnap_clone_snapshot(void)
8864 {
8865 	/*
8866 	 * When a snapshot is created, the blob that is being snapped becomes
8867 	 * the leaf node (a clone of the snapshot) and the newly created
8868 	 * snapshot sits between the snapped blob and the external snapshot.
8869 	 *
8870 	 * Before creating snap1
8871 	 *
8872 	 *   ,--------.     ,----------.
8873 	 *   |  blob  |     |  vbdev   |
8874 	 *   | blob1  |<----| nvme1n42 |
8875 	 *   |  (rw)  |     |   (ro)   |
8876 	 *   `--------'     `----------'
8877 	 *       Figure 1
8878 	 *
8879 	 * After creating snap1
8880 	 *
8881 	 *   ,--------.     ,--------.     ,----------.
8882 	 *   |  blob  |     |  blob  |     |  vbdev   |
8883 	 *   | blob1  |<----| snap1  |<----| nvme1n42 |
8884 	 *   |  (rw)  |     |  (ro)  |     |   (ro)   |
8885 	 *   `--------'     `--------'     `----------'
8886 	 *       Figure 2
8887 	 *
8888 	 * Starting from Figure 2, if snap1 is removed, the chain reverts to
8889 	 * what it looks like in Figure 1.
8890 	 *
8891 	 * Starting from Figure 2, if blob1 is removed, the chain becomes:
8892 	 *
8893 	 *   ,--------.     ,----------.
8894 	 *   |  blob  |     |  vbdev   |
8895 	 *   | snap1  |<----| nvme1n42 |
8896 	 *   |  (ro)  |     |   (ro)   |
8897 	 *   `--------'     `----------'
8898 	 *       Figure 3
8899 	 *
8900 	 * In each case, the blob pointed to by the nvme vbdev is considered
8901 	 * the "esnap clone".  The esnap clone must have:
8902 	 *
8903 	 *   - XATTR_INTERNAL for BLOB_EXTERNAL_SNAPSHOT_ID (e.g. name or UUID)
8904 	 *   - blob->invalid_flags must contain SPDK_BLOB_EXTERNAL_SNAPSHOT
8905 	 *   - blob->parent_id must be SPDK_BLOBID_EXTERNAL_SNAPSHOT.
8906 	 *
8907 	 * No other blob that descends from the esnap clone may have any of
8908 	 * those set.
8909 	 */
8910 	struct spdk_blob_store	*bs = g_bs;
8911 	const uint32_t		blocklen = bs->io_unit_size;
8912 	struct spdk_blob_opts	opts;
8913 	struct ut_esnap_opts	esnap_opts;
8914 	struct spdk_blob	*blob, *snap_blob;
8915 	spdk_blob_id		blobid, snap_blobid;
8916 	bool			destroyed = false;
8917 
8918 	/* Create the esnap clone */
8919 	ut_esnap_opts_init(blocklen, 2048, __func__, &destroyed, &esnap_opts);
8920 	ut_spdk_blob_opts_init(&opts);
8921 	opts.esnap_id = &esnap_opts;
8922 	opts.esnap_id_len = sizeof(esnap_opts);
8923 	opts.num_clusters = 10;
8924 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8925 	poll_threads();
8926 	CU_ASSERT(g_bserrno == 0);
8927 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8928 	blobid = g_blobid;
8929 
8930 	/* Open the blob. */
8931 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8932 	poll_threads();
8933 	CU_ASSERT(g_bserrno == 0);
8934 	CU_ASSERT(g_blob != NULL);
8935 	blob = g_blob;
8936 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8937 
8938 	/*
8939 	 * Create a snapshot of the blob. The snapshot becomes the esnap clone.
8940 	 */
8941 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8942 	poll_threads();
8943 	CU_ASSERT(g_bserrno == 0);
8944 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8945 	snap_blobid = g_blobid;
8946 
8947 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8948 	poll_threads();
8949 	CU_ASSERT(g_bserrno == 0);
8950 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8951 	snap_blob = g_blob;
8952 
8953 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8954 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8955 
8956 	/*
8957 	 * Delete the snapshot.  The original blob becomes the esnap clone.
8958 	 */
8959 	ut_blob_close_and_delete(bs, snap_blob);
8960 	snap_blob = NULL;
8961 	snap_blobid = SPDK_BLOBID_INVALID;
8962 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8963 
8964 	/*
8965 	 * Create the snapshot again, then delete the original blob.  The
8966 	 * snapshot should survive as the esnap clone.
8967 	 */
8968 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8969 	poll_threads();
8970 	CU_ASSERT(g_bserrno == 0);
8971 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8972 	snap_blobid = g_blobid;
8973 
8974 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8975 	poll_threads();
8976 	CU_ASSERT(g_bserrno == 0);
8977 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8978 	snap_blob = g_blob;
8979 
8980 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8981 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8982 
8983 	ut_blob_close_and_delete(bs, blob);
8984 	blob = NULL;
8985 	blobid = SPDK_BLOBID_INVALID;
8986 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8987 
8988 	/*
8989 	 * Clone the snapshot.  The snapshot continues to be the esnap clone.
8990 	 */
8991 	spdk_bs_create_clone(bs, snap_blobid, NULL, blob_op_with_id_complete, NULL);
8992 	poll_threads();
8993 	CU_ASSERT(g_bserrno == 0);
8994 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8995 	blobid = g_blobid;
8996 
8997 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8998 	poll_threads();
8999 	CU_ASSERT(g_bserrno == 0);
9000 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9001 	blob = g_blob;
9002 
9003 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
9004 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
9005 
9006 	/*
9007 	 * Delete the snapshot. The clone becomes the esnap clone.
9008 	 */
9009 	ut_blob_close_and_delete(bs, snap_blob);
9010 	snap_blob = NULL;
9011 	snap_blobid = SPDK_BLOBID_INVALID;
9012 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
9013 
9014 	/*
9015 	 * Clean up
9016 	 */
9017 	ut_blob_close_and_delete(bs, blob);
9018 }
9019 
9020 static uint64_t
9021 _blob_esnap_clone_hydrate(bool inflate)
9022 {
9023 	struct spdk_blob_store	*bs = g_bs;
9024 	struct spdk_blob_opts	opts;
9025 	struct ut_esnap_opts	esnap_opts;
9026 	struct spdk_blob	*blob;
9027 	spdk_blob_id		blobid;
9028 	struct spdk_io_channel *channel;
9029 	bool			destroyed = false;
9030 	const uint32_t		blocklen = spdk_bs_get_io_unit_size(bs);
9031 	const uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
9032 	const uint64_t		esnap_num_clusters = 4;
9033 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
9034 	const uint64_t		esnap_num_blocks = esnap_sz / blocklen;
9035 	uint64_t		num_failures = CU_get_number_of_failures();
9036 
9037 	channel = spdk_bs_alloc_io_channel(bs);
9038 	SPDK_CU_ASSERT_FATAL(channel != NULL);
9039 
9040 	/* Create the esnap clone */
9041 	ut_spdk_blob_opts_init(&opts);
9042 	ut_esnap_opts_init(blocklen, esnap_num_blocks, __func__, &destroyed, &esnap_opts);
9043 	opts.esnap_id = &esnap_opts;
9044 	opts.esnap_id_len = sizeof(esnap_opts);
9045 	opts.num_clusters = esnap_num_clusters;
9046 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
9047 	poll_threads();
9048 	CU_ASSERT(g_bserrno == 0);
9049 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9050 	blobid = g_blobid;
9051 
9052 	/* Open the esnap clone */
9053 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9054 	poll_threads();
9055 	CU_ASSERT(g_bserrno == 0);
9056 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9057 	blob = g_blob;
9058 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
9059 
9060 	/*
9061 	 * Inflate or decouple  the blob then verify that it is no longer an esnap clone and has
9062 	 * right content
9063 	 */
9064 	if (inflate) {
9065 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
9066 	} else {
9067 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
9068 	}
9069 	poll_threads();
9070 	CU_ASSERT(g_bserrno == 0);
9071 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
9072 	CU_ASSERT(blob_esnap_verify_contents(blob, channel, 0, esnap_sz, esnap_sz, "read"));
9073 	ut_blob_close_and_delete(bs, blob);
9074 
9075 	/*
9076 	 * Clean up
9077 	 */
9078 	spdk_bs_free_io_channel(channel);
9079 	poll_threads();
9080 
9081 	/* Return number of new failures */
9082 	return CU_get_number_of_failures() - num_failures;
9083 }
9084 
9085 static void
9086 blob_esnap_clone_inflate(void)
9087 {
9088 	_blob_esnap_clone_hydrate(true);
9089 }
9090 
9091 static void
9092 blob_esnap_clone_decouple(void)
9093 {
9094 	_blob_esnap_clone_hydrate(false);
9095 }
9096 
9097 static void
9098 blob_esnap_hotplug(void)
9099 {
9100 	struct spdk_blob_store	*bs = g_bs;
9101 	struct ut_esnap_opts	esnap1_opts, esnap2_opts;
9102 	struct spdk_blob_opts	opts;
9103 	struct spdk_blob	*blob;
9104 	struct spdk_bs_dev	*bs_dev;
9105 	struct ut_esnap_dev	*esnap_dev;
9106 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
9107 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
9108 	const uint32_t		esnap_num_clusters = 4;
9109 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9110 	bool			destroyed1 = false, destroyed2 = false;
9111 	uint64_t		start_thread = g_ut_thread_id;
9112 	struct spdk_io_channel	*ch0, *ch1;
9113 	char			buf[block_sz];
9114 
9115 	/* Create and open an esnap clone blob */
9116 	ut_spdk_blob_opts_init(&opts);
9117 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1", &destroyed1, &esnap1_opts);
9118 	opts.esnap_id = &esnap1_opts;
9119 	opts.esnap_id_len = sizeof(esnap1_opts);
9120 	opts.num_clusters = esnap_num_clusters;
9121 	blob = ut_blob_create_and_open(bs, &opts);
9122 	CU_ASSERT(blob != NULL);
9123 	CU_ASSERT(spdk_blob_is_esnap_clone(blob));
9124 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9125 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9126 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1") == 0);
9127 
9128 	/* Replace the external snapshot */
9129 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap2", &destroyed2, &esnap2_opts);
9130 	bs_dev = ut_esnap_dev_alloc(&esnap2_opts);
9131 	CU_ASSERT(!destroyed1);
9132 	CU_ASSERT(!destroyed2);
9133 	g_bserrno = 0xbad;
9134 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9135 	poll_threads();
9136 	CU_ASSERT(g_bserrno == 0);
9137 	CU_ASSERT(destroyed1);
9138 	CU_ASSERT(!destroyed2);
9139 	SPDK_CU_ASSERT_FATAL(bs_dev == blob->back_bs_dev);
9140 	SPDK_CU_ASSERT_FATAL(bs_dev == spdk_blob_get_esnap_bs_dev(blob));
9141 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9142 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap2") == 0);
9143 
9144 	/* Create a couple channels */
9145 	set_thread(0);
9146 	ch0 = spdk_bs_alloc_io_channel(bs);
9147 	CU_ASSERT(ch0 != NULL);
9148 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
9149 	set_thread(1);
9150 	ch1 = spdk_bs_alloc_io_channel(bs);
9151 	CU_ASSERT(ch1 != NULL);
9152 	spdk_blob_io_read(blob, ch1, buf, 0, 1, bs_op_complete, NULL);
9153 	set_thread(start_thread);
9154 	poll_threads();
9155 	CU_ASSERT(esnap_dev->num_channels == 2);
9156 
9157 	/* Replace the external snapshot */
9158 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1a", &destroyed1, &esnap1_opts);
9159 	bs_dev = ut_esnap_dev_alloc(&esnap1_opts);
9160 	destroyed1 = destroyed2 = false;
9161 	g_bserrno = 0xbad;
9162 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9163 	poll_threads();
9164 	CU_ASSERT(g_bserrno == 0);
9165 	CU_ASSERT(!destroyed1);
9166 	CU_ASSERT(destroyed2);
9167 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9168 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9169 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1a") == 0);
9170 
9171 	/* Clean up */
9172 	set_thread(0);
9173 	spdk_bs_free_io_channel(ch0);
9174 	set_thread(1);
9175 	spdk_bs_free_io_channel(ch1);
9176 	set_thread(start_thread);
9177 	g_bserrno = 0xbad;
9178 	spdk_blob_close(blob, bs_op_complete, NULL);
9179 	poll_threads();
9180 	CU_ASSERT(g_bserrno == 0);
9181 }
9182 
9183 static bool g_blob_is_degraded;
9184 static int g_blob_is_degraded_called;
9185 
9186 static bool
9187 _blob_is_degraded(struct spdk_bs_dev *dev)
9188 {
9189 	g_blob_is_degraded_called++;
9190 	return g_blob_is_degraded;
9191 }
9192 
9193 static void
9194 blob_is_degraded(void)
9195 {
9196 	struct spdk_bs_dev bs_is_degraded_null = { 0 };
9197 	struct spdk_bs_dev bs_is_degraded = { .is_degraded = _blob_is_degraded };
9198 
9199 	/* No back_bs_dev, no bs->dev->is_degraded */
9200 	g_blob_is_degraded_called = 0;
9201 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9202 	CU_ASSERT(g_blob_is_degraded_called == 0);
9203 
9204 	/* No back_bs_dev, blobstore device degraded */
9205 	g_bs->dev->is_degraded = _blob_is_degraded;
9206 	g_blob_is_degraded_called = 0;
9207 	g_blob_is_degraded = true;
9208 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9209 	CU_ASSERT(g_blob_is_degraded_called == 1);
9210 
9211 	/* No back_bs_dev, blobstore device not degraded */
9212 	g_bs->dev->is_degraded = _blob_is_degraded;
9213 	g_blob_is_degraded_called = 0;
9214 	g_blob_is_degraded = false;
9215 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9216 	CU_ASSERT(g_blob_is_degraded_called == 1);
9217 
9218 	/* back_bs_dev does not define is_degraded, no bs->dev->is_degraded */
9219 	g_bs->dev->is_degraded = NULL;
9220 	g_blob->back_bs_dev = &bs_is_degraded_null;
9221 	g_blob_is_degraded_called = 0;
9222 	g_blob_is_degraded = false;
9223 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9224 	CU_ASSERT(g_blob_is_degraded_called == 0);
9225 
9226 	/* back_bs_dev is not degraded, no bs->dev->is_degraded */
9227 	g_bs->dev->is_degraded = NULL;
9228 	g_blob->back_bs_dev = &bs_is_degraded;
9229 	g_blob_is_degraded_called = 0;
9230 	g_blob_is_degraded = false;
9231 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9232 	CU_ASSERT(g_blob_is_degraded_called == 1);
9233 
9234 	/* back_bs_dev is degraded, no bs->dev->is_degraded */
9235 	g_bs->dev->is_degraded = NULL;
9236 	g_blob->back_bs_dev = &bs_is_degraded;
9237 	g_blob_is_degraded_called = 0;
9238 	g_blob_is_degraded = true;
9239 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9240 	CU_ASSERT(g_blob_is_degraded_called == 1);
9241 
9242 	/* back_bs_dev is not degraded, blobstore device is not degraded */
9243 	g_bs->dev->is_degraded = _blob_is_degraded;
9244 	g_blob->back_bs_dev = &bs_is_degraded;
9245 	g_blob_is_degraded_called = 0;
9246 	g_blob_is_degraded = false;
9247 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9248 	CU_ASSERT(g_blob_is_degraded_called == 2);
9249 
9250 	g_blob->back_bs_dev = NULL;
9251 }
9252 
9253 /* Resize a blob which is a clone created from snapshot. Verify read/writes to
9254  * expanded clone blob. Then inflate the clone blob. */
9255 static void
9256 blob_clone_resize(void)
9257 {
9258 	struct spdk_blob_store *bs = g_bs;
9259 	struct spdk_blob_opts opts;
9260 	struct spdk_blob *blob, *clone, *snap_blob, *snap_blob_rsz;
9261 	spdk_blob_id blobid, cloneid, snapid1, snapid2;
9262 	uint64_t pages_per_cluster;
9263 	uint8_t payload_read[bs->dev->blocklen];
9264 	uint8_t payload_write[bs->dev->blocklen];
9265 	struct spdk_io_channel *channel;
9266 	uint64_t free_clusters;
9267 
9268 	channel = spdk_bs_alloc_io_channel(bs);
9269 	SPDK_CU_ASSERT_FATAL(channel != NULL);
9270 
9271 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
9272 
9273 	/* Create blob with 10 clusters */
9274 	ut_spdk_blob_opts_init(&opts);
9275 	opts.num_clusters = 10;
9276 
9277 	blob = ut_blob_create_and_open(bs, &opts);
9278 	blobid = spdk_blob_get_id(blob);
9279 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
9280 
9281 	/* Create snapshot */
9282 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9283 	poll_threads();
9284 	CU_ASSERT(g_bserrno == 0);
9285 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9286 	snapid1 = g_blobid;
9287 
9288 	spdk_bs_create_clone(bs, snapid1, NULL, blob_op_with_id_complete, NULL);
9289 	poll_threads();
9290 	CU_ASSERT(g_bserrno == 0);
9291 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9292 	cloneid = g_blobid;
9293 
9294 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
9295 	poll_threads();
9296 	CU_ASSERT(g_bserrno == 0);
9297 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9298 	clone = g_blob;
9299 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
9300 
9301 	g_bserrno = -1;
9302 	spdk_blob_resize(clone, 20, blob_op_complete, NULL);
9303 	poll_threads();
9304 	CU_ASSERT(g_bserrno == 0);
9305 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 20);
9306 
9307 	/* Create another snapshot after resizing the clone */
9308 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
9309 	poll_threads();
9310 	CU_ASSERT(g_bserrno == 0);
9311 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9312 	snapid2 = g_blobid;
9313 
9314 	/* Open the snapshot blobs */
9315 	spdk_bs_open_blob(bs, snapid1, blob_op_with_handle_complete, NULL);
9316 	CU_ASSERT(g_bserrno == 0);
9317 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9318 	snap_blob = g_blob;
9319 	CU_ASSERT(snap_blob->data_ro == true);
9320 	CU_ASSERT(snap_blob->md_ro == true);
9321 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob) == 10);
9322 
9323 	spdk_bs_open_blob(bs, snapid2, blob_op_with_handle_complete, NULL);
9324 	CU_ASSERT(g_bserrno == 0);
9325 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9326 	snap_blob_rsz = g_blob;
9327 	CU_ASSERT(snap_blob_rsz->data_ro == true);
9328 	CU_ASSERT(snap_blob_rsz->md_ro == true);
9329 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob_rsz) == 20);
9330 
9331 	/* Confirm that clone is backed by snap_blob_rsz, and snap_blob_rsz is backed by snap_blob */
9332 	SPDK_CU_ASSERT_FATAL(snap_blob->back_bs_dev == NULL);
9333 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9334 	SPDK_CU_ASSERT_FATAL(snap_blob_rsz->back_bs_dev != NULL);
9335 
9336 	/* Write and read from pre-resize ranges */
9337 	g_bserrno = -1;
9338 	memset(payload_write, 0xE5, sizeof(payload_write));
9339 	spdk_blob_io_write(clone, channel, payload_write, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9340 	poll_threads();
9341 	CU_ASSERT(g_bserrno == 0);
9342 
9343 	g_bserrno = -1;
9344 	memset(payload_read, 0x00, sizeof(payload_read));
9345 	spdk_blob_io_read(clone, channel, payload_read, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9346 	poll_threads();
9347 	CU_ASSERT(g_bserrno == 0);
9348 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
9349 
9350 	/* Write and read from post-resize ranges */
9351 	g_bserrno = -1;
9352 	memset(payload_write, 0xE5, sizeof(payload_write));
9353 	spdk_blob_io_write(clone, channel, payload_write, 15 * pages_per_cluster, 1, blob_op_complete,
9354 			   NULL);
9355 	poll_threads();
9356 	CU_ASSERT(g_bserrno == 0);
9357 
9358 	g_bserrno = -1;
9359 	memset(payload_read, 0x00, sizeof(payload_read));
9360 	spdk_blob_io_read(clone, channel, payload_read, 15 * pages_per_cluster, 1, blob_op_complete, NULL);
9361 	poll_threads();
9362 	CU_ASSERT(g_bserrno == 0);
9363 	CU_ASSERT(memcmp(payload_write, payload_read, bs->dev->blocklen) == 0);
9364 
9365 	/* Now do full blob inflation of the resized blob/clone. */
9366 	free_clusters = spdk_bs_free_cluster_count(bs);
9367 	spdk_bs_inflate_blob(bs, channel, cloneid, blob_op_complete, NULL);
9368 	poll_threads();
9369 	CU_ASSERT(g_bserrno == 0);
9370 	/* We wrote to 2 clusters earlier, all remaining 18 clusters in
9371 	 * blob should get allocated after inflation */
9372 	CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 18);
9373 
9374 	spdk_blob_close(clone, blob_op_complete, NULL);
9375 	poll_threads();
9376 	CU_ASSERT(g_bserrno == 0);
9377 
9378 	spdk_blob_close(snap_blob, blob_op_complete, NULL);
9379 	poll_threads();
9380 	CU_ASSERT(g_bserrno == 0);
9381 
9382 	spdk_blob_close(snap_blob_rsz, blob_op_complete, NULL);
9383 	poll_threads();
9384 	CU_ASSERT(g_bserrno == 0);
9385 
9386 	ut_blob_close_and_delete(bs, blob);
9387 
9388 	spdk_bs_free_io_channel(channel);
9389 }
9390 
9391 
9392 static void
9393 blob_esnap_clone_resize(void)
9394 {
9395 	struct spdk_bs_dev *dev;
9396 	struct spdk_blob_store *bs;
9397 	struct spdk_bs_opts bsopts;
9398 	struct spdk_blob_opts opts;
9399 	struct ut_esnap_opts esnap_opts;
9400 	struct spdk_blob *blob;
9401 	uint32_t block, esnap_blksz = 512, bs_blksz = 512;
9402 	const uint32_t cluster_sz = 4 * g_phys_blocklen;
9403 	const uint64_t esnap_num_clusters = 4;
9404 	const uint32_t esnap_sz = cluster_sz * esnap_num_clusters;
9405 	const uint64_t esnap_num_blocks = esnap_sz / esnap_blksz;
9406 	uint64_t blob_num_blocks = esnap_sz / bs_blksz;
9407 	struct spdk_io_channel *bs_ch;
9408 
9409 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
9410 	bsopts.cluster_sz = cluster_sz;
9411 	bsopts.esnap_bs_dev_create = ut_esnap_create;
9412 	/* Create device with desired block size */
9413 	dev = init_dev();
9414 	dev->blocklen = bs_blksz;
9415 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
9416 	/* Initialize a new blob store */
9417 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
9418 	poll_threads();
9419 	CU_ASSERT(g_bserrno == 0);
9420 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9421 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
9422 	bs = g_bs;
9423 
9424 	bs_ch = spdk_bs_alloc_io_channel(bs);
9425 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
9426 
9427 	/* Create and open the esnap clone  */
9428 	ut_spdk_blob_opts_init(&opts);
9429 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9430 	opts.esnap_id = &esnap_opts;
9431 	opts.esnap_id_len = sizeof(esnap_opts);
9432 	opts.num_clusters = esnap_num_clusters;
9433 	blob = ut_blob_create_and_open(bs, &opts);
9434 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9435 
9436 	g_bserrno = -1;
9437 	spdk_blob_resize(blob, esnap_num_clusters * 2, blob_op_complete, NULL);
9438 	poll_threads();
9439 	CU_ASSERT(g_bserrno == 0);
9440 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == esnap_num_clusters * 2);
9441 
9442 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
9443 	blob_num_blocks = (spdk_blob_get_num_clusters(blob) * cluster_sz) / bs_blksz;
9444 	for (block = 0; block < blob_num_blocks; block++) {
9445 		char buf[bs_blksz];
9446 		union ut_word word;
9447 		word.f.blob_id = 0xfedcba90;
9448 		word.f.lba = block;
9449 		ut_memset8(buf, word.num, bs_blksz);
9450 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9451 		poll_threads();
9452 		CU_ASSERT(g_bserrno == 0);
9453 		if (g_bserrno != 0) {
9454 			break;
9455 		}
9456 		/* Read and verify the block before the current block */
9457 		if (block != 0) {
9458 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
9459 			poll_threads();
9460 			CU_ASSERT(g_bserrno == 0);
9461 			if (g_bserrno != 0) {
9462 				break;
9463 			}
9464 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9465 							      (block - 1) * bs_blksz, bs_blksz));
9466 		}
9467 		/* Read and verify the current block */
9468 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9469 		poll_threads();
9470 		CU_ASSERT(g_bserrno == 0);
9471 		if (g_bserrno != 0) {
9472 			break;
9473 		}
9474 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9475 						      block * bs_blksz, bs_blksz));
9476 		/* Check the block that follows */
9477 		if (block + 1 < blob_num_blocks) {
9478 			g_bserrno = 0xbad;
9479 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
9480 			poll_threads();
9481 			CU_ASSERT(g_bserrno == 0);
9482 			if (g_bserrno != 0) {
9483 				break;
9484 			}
9485 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
9486 							      (block + 1) * bs_blksz,
9487 							      esnap_blksz));
9488 		}
9489 	}
9490 	/* Clean up */
9491 	spdk_bs_free_io_channel(bs_ch);
9492 	g_bserrno = 0xbad;
9493 	spdk_blob_close(blob, blob_op_complete, NULL);
9494 	poll_threads();
9495 	CU_ASSERT(g_bserrno == 0);
9496 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
9497 	poll_threads();
9498 	CU_ASSERT(g_bserrno == 0);
9499 	g_bs = NULL;
9500 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9501 }
9502 
9503 static void
9504 bs_dev_io_complete_cb(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
9505 {
9506 	g_bserrno = bserrno;
9507 }
9508 
9509 static void
9510 blob_shallow_copy(void)
9511 {
9512 	struct spdk_blob_store *bs = g_bs;
9513 	struct spdk_blob_opts blob_opts;
9514 	struct spdk_blob *blob;
9515 	spdk_blob_id blobid;
9516 	uint64_t num_clusters = 4;
9517 	struct spdk_bs_dev *ext_dev;
9518 	struct spdk_bs_dev_cb_args ext_args;
9519 	struct spdk_io_channel *bdev_ch, *blob_ch;
9520 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
9521 	uint8_t buf2[DEV_BUFFER_BLOCKLEN];
9522 	uint64_t io_units_per_cluster;
9523 	uint64_t offset;
9524 	int rc;
9525 
9526 	blob_ch = spdk_bs_alloc_io_channel(bs);
9527 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
9528 
9529 	/* Set blob dimension and as thin provisioned */
9530 	ut_spdk_blob_opts_init(&blob_opts);
9531 	blob_opts.thin_provision = true;
9532 	blob_opts.num_clusters = num_clusters;
9533 
9534 	/* Create a blob */
9535 	blob = ut_blob_create_and_open(bs, &blob_opts);
9536 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9537 	blobid = spdk_blob_get_id(blob);
9538 	io_units_per_cluster = bs_io_units_per_cluster(blob);
9539 
9540 	/* Write on cluster 2 and 4 of blob */
9541 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9542 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9543 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9544 		poll_threads();
9545 		CU_ASSERT(g_bserrno == 0);
9546 	}
9547 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9548 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9549 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9550 		poll_threads();
9551 		CU_ASSERT(g_bserrno == 0);
9552 	}
9553 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9554 
9555 	/* Make a snapshot over blob */
9556 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9557 	poll_threads();
9558 	CU_ASSERT(g_bserrno == 0);
9559 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
9560 
9561 	/* Write on cluster 1 and 3 of blob */
9562 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9563 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9564 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9565 		poll_threads();
9566 		CU_ASSERT(g_bserrno == 0);
9567 	}
9568 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9569 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9570 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9571 		poll_threads();
9572 		CU_ASSERT(g_bserrno == 0);
9573 	}
9574 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9575 
9576 	/* Shallow copy with a not read only blob */
9577 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9578 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9579 				       blob_shallow_copy_status_cb, NULL,
9580 				       blob_op_complete, NULL);
9581 	CU_ASSERT(rc == 0);
9582 	poll_threads();
9583 	CU_ASSERT(g_bserrno == -EPERM);
9584 	ext_dev->destroy(ext_dev);
9585 
9586 	/* Set blob read only */
9587 	spdk_blob_set_read_only(blob);
9588 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
9589 	poll_threads();
9590 	CU_ASSERT(g_bserrno == 0);
9591 
9592 	/* Shallow copy over a spdk_bs_dev with incorrect size */
9593 	ext_dev = init_ext_dev(1, DEV_BUFFER_BLOCKLEN);
9594 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9595 				       blob_shallow_copy_status_cb, NULL,
9596 				       blob_op_complete, NULL);
9597 	CU_ASSERT(rc == 0);
9598 	poll_threads();
9599 	CU_ASSERT(g_bserrno == -EINVAL);
9600 	ext_dev->destroy(ext_dev);
9601 
9602 	/* Shallow copy over a spdk_bs_dev with incorrect block len */
9603 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN * 2);
9604 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9605 				       blob_shallow_copy_status_cb, NULL,
9606 				       blob_op_complete, NULL);
9607 	CU_ASSERT(rc == 0);
9608 	poll_threads();
9609 	CU_ASSERT(g_bserrno == -EINVAL);
9610 	ext_dev->destroy(ext_dev);
9611 
9612 	/* Initialize ext_dev for the successuful shallow copy */
9613 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9614 	bdev_ch = ext_dev->create_channel(ext_dev);
9615 	SPDK_CU_ASSERT_FATAL(bdev_ch != NULL);
9616 	ext_args.cb_fn = bs_dev_io_complete_cb;
9617 	for (offset = 0; offset < 4 * io_units_per_cluster; offset++) {
9618 		memset(buf2, 0xff, DEV_BUFFER_BLOCKLEN);
9619 		ext_dev->write(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9620 		poll_threads();
9621 		CU_ASSERT(g_bserrno == 0);
9622 	}
9623 
9624 	/* Correct shallow copy of blob over bdev */
9625 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9626 				       blob_shallow_copy_status_cb, NULL,
9627 				       blob_op_complete, NULL);
9628 	CU_ASSERT(rc == 0);
9629 	poll_thread_times(0, 1);
9630 	CU_ASSERT(g_copied_clusters_count == 1);
9631 	poll_thread_times(0, 2);
9632 	CU_ASSERT(g_bserrno == 0);
9633 	CU_ASSERT(g_copied_clusters_count == 2);
9634 
9635 	/* Read from bdev */
9636 	/* Only cluster 1 and 3 must be filled */
9637 	/* Clusters 2 and 4 should not have been touched */
9638 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9639 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9640 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9641 		poll_threads();
9642 		CU_ASSERT(g_bserrno == 0);
9643 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9644 	}
9645 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9646 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9647 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9648 		poll_threads();
9649 		CU_ASSERT(g_bserrno == 0);
9650 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9651 	}
9652 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9653 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9654 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9655 		poll_threads();
9656 		CU_ASSERT(g_bserrno == 0);
9657 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9658 	}
9659 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9660 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9661 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9662 		poll_threads();
9663 		CU_ASSERT(g_bserrno == 0);
9664 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9665 	}
9666 
9667 	/* Clean up */
9668 	ext_dev->destroy_channel(ext_dev, bdev_ch);
9669 	ext_dev->destroy(ext_dev);
9670 	spdk_bs_free_io_channel(blob_ch);
9671 	ut_blob_close_and_delete(bs, blob);
9672 	poll_threads();
9673 }
9674 
9675 static void
9676 blob_set_parent(void)
9677 {
9678 	struct spdk_blob_store *bs = g_bs;
9679 	struct spdk_blob_opts opts;
9680 	struct ut_esnap_opts esnap_opts;
9681 	struct spdk_blob *blob1, *blob2, *blob3, *blob4, *blob5;
9682 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, blobid5,
9683 		     snapshotid1, snapshotid2, snapshotid3;
9684 	uint32_t cluster_sz, block_sz;
9685 	const uint32_t esnap_num_clusters = 4;
9686 	uint64_t esnap_num_blocks;
9687 	spdk_blob_id ids[2];
9688 	size_t clone_count = 2;
9689 
9690 	cluster_sz = spdk_bs_get_cluster_size(bs);
9691 	block_sz = spdk_bs_get_io_unit_size(bs);
9692 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9693 
9694 	/* Create a normal blob and make a couple of snapshots */
9695 	ut_spdk_blob_opts_init(&opts);
9696 	blob1 = ut_blob_create_and_open(bs, &opts);
9697 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9698 	blobid1 = spdk_blob_get_id(blob1);
9699 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9700 	poll_threads();
9701 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9702 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9703 	snapshotid1 = g_blobid;
9704 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9705 	poll_threads();
9706 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9707 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9708 	snapshotid2 = g_blobid;
9709 
9710 	/* Call set_parent with an invalid snapshotid */
9711 	spdk_bs_blob_set_parent(bs, blobid1, SPDK_BLOBID_INVALID, blob_op_complete, NULL);
9712 	poll_threads();
9713 	CU_ASSERT(g_bserrno == -EINVAL);
9714 
9715 	/* Call set_parent with blobid and snapshotid the same */
9716 	spdk_bs_blob_set_parent(bs, blobid1, blobid1, blob_op_complete, NULL);
9717 	poll_threads();
9718 	CU_ASSERT(g_bserrno == -EINVAL);
9719 
9720 	/* Call set_parent with a blob and its parent snapshot */
9721 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid2, blob_op_complete, NULL);
9722 	poll_threads();
9723 	CU_ASSERT(g_bserrno == -EEXIST);
9724 
9725 	/* Create an esnap clone blob */
9726 	ut_spdk_blob_opts_init(&opts);
9727 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9728 	opts.esnap_id = &esnap_opts;
9729 	opts.esnap_id_len = sizeof(esnap_opts);
9730 	opts.num_clusters = esnap_num_clusters;
9731 	blob2 = ut_blob_create_and_open(bs, &opts);
9732 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9733 	blobid2 = spdk_blob_get_id(blob2);
9734 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9735 
9736 	/* Call set_parent with a non snapshot parent */
9737 	spdk_bs_blob_set_parent(bs, blobid2, blobid1, blob_op_complete, NULL);
9738 	poll_threads();
9739 	CU_ASSERT(g_bserrno == -EINVAL);
9740 
9741 	/* Call set_parent with blob and snapshot of different size */
9742 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid1, blob_op_complete, NULL);
9743 	poll_threads();
9744 	CU_ASSERT(g_bserrno == -EINVAL);
9745 
9746 	/* Call set_parent correctly with a snapshot's clone blob */
9747 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid1, blob_op_complete, NULL);
9748 	poll_threads();
9749 	CU_ASSERT(g_bserrno == 0);
9750 
9751 	/* Check relations */
9752 	CU_ASSERT(spdk_blob_is_clone(blob1));
9753 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid1) == snapshotid1);
9754 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid1, ids, &clone_count) == 0);
9755 	CU_ASSERT(clone_count == 2);
9756 	CU_ASSERT(ids[1] == blobid1);
9757 
9758 	/* Create another normal blob with size equal to esnap size and make a snapshot */
9759 	ut_spdk_blob_opts_init(&opts);
9760 	opts.num_clusters = esnap_num_clusters;
9761 	opts.thin_provision = true;
9762 	blob3 = ut_blob_create_and_open(bs, &opts);
9763 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9764 	blobid3 = spdk_blob_get_id(blob3);
9765 	spdk_bs_create_snapshot(bs, blobid3, NULL, blob_op_with_id_complete, NULL);
9766 	poll_threads();
9767 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9768 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9769 	snapshotid3 = g_blobid;
9770 
9771 	/* Call set_parent correctly with an esnap's clone blob */
9772 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid3, blob_op_complete, NULL);
9773 	poll_threads();
9774 	CU_ASSERT(g_bserrno == 0);
9775 
9776 	/* Check relations */
9777 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob2));
9778 	CU_ASSERT(spdk_blob_is_clone(blob2));
9779 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid2) == snapshotid3);
9780 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid3, ids, &clone_count) == 0);
9781 	CU_ASSERT(clone_count == 2);
9782 	CU_ASSERT(ids[1] == blobid2);
9783 
9784 	/* Create a not thin-provisioned blob that is not a clone */
9785 	ut_spdk_blob_opts_init(&opts);
9786 	opts.thin_provision = false;
9787 	blob4 = ut_blob_create_and_open(bs, &opts);
9788 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9789 	blobid4 = spdk_blob_get_id(blob4);
9790 
9791 	/* Call set_parent with a blob that isn't a clone and that isn't thin-provisioned */
9792 	spdk_bs_blob_set_parent(bs, blobid4, snapshotid2, blob_op_complete, NULL);
9793 	poll_threads();
9794 	CU_ASSERT(g_bserrno == -EINVAL);
9795 
9796 	/* Create a thin-provisioned blob that is not a clone */
9797 	ut_spdk_blob_opts_init(&opts);
9798 	opts.thin_provision = true;
9799 	blob5 = ut_blob_create_and_open(bs, &opts);
9800 	SPDK_CU_ASSERT_FATAL(blob5 != NULL);
9801 	blobid5 = spdk_blob_get_id(blob5);
9802 
9803 	/* Call set_parent correctly with a blob that isn't a clone */
9804 	spdk_bs_blob_set_parent(bs, blobid5, snapshotid2, blob_op_complete, NULL);
9805 	poll_threads();
9806 	CU_ASSERT(g_bserrno == 0);
9807 
9808 	/* Check relations */
9809 	CU_ASSERT(spdk_blob_is_clone(blob5));
9810 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid5) == snapshotid2);
9811 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &clone_count) == 0);
9812 	CU_ASSERT(clone_count == 1);
9813 	CU_ASSERT(ids[0] == blobid5);
9814 
9815 	/* Clean up */
9816 	ut_blob_close_and_delete(bs, blob5);
9817 	ut_blob_close_and_delete(bs, blob4);
9818 	ut_blob_close_and_delete(bs, blob3);
9819 	ut_blob_close_and_delete(bs, blob2);
9820 	ut_blob_close_and_delete(bs, blob1);
9821 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
9822 	poll_threads();
9823 	CU_ASSERT(g_bserrno == 0);
9824 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
9825 	poll_threads();
9826 	CU_ASSERT(g_bserrno == 0);
9827 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
9828 	poll_threads();
9829 	CU_ASSERT(g_bserrno == 0);
9830 }
9831 
9832 static void
9833 blob_set_external_parent(void)
9834 {
9835 	struct spdk_blob_store *bs = g_bs;
9836 	struct spdk_blob_opts opts;
9837 	struct ut_esnap_opts esnap_opts, esnap_opts2;
9838 	struct spdk_blob *blob1, *blob2, *blob3, *blob4;
9839 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, snapshotid;
9840 	uint32_t cluster_sz, block_sz;
9841 	const uint32_t esnap_num_clusters = 4;
9842 	uint64_t esnap_num_blocks;
9843 	struct spdk_bs_dev *esnap_dev1, *esnap_dev2, *esnap_dev3;
9844 	const void *esnap_id;
9845 	size_t esnap_id_len;
9846 	int rc;
9847 
9848 	cluster_sz = spdk_bs_get_cluster_size(bs);
9849 	block_sz = spdk_bs_get_io_unit_size(bs);
9850 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9851 	esnap_dev1 = init_dev();
9852 	esnap_dev2 = init_dev();
9853 	esnap_dev3 = init_dev();
9854 
9855 	/* Create an esnap clone blob */
9856 	ut_spdk_blob_opts_init(&opts);
9857 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9858 	opts.esnap_id = &esnap_opts;
9859 	opts.esnap_id_len = sizeof(esnap_opts);
9860 	opts.num_clusters = esnap_num_clusters;
9861 	blob1 = ut_blob_create_and_open(bs, &opts);
9862 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9863 	blobid1 = spdk_blob_get_id(blob1);
9864 	CU_ASSERT(spdk_blob_is_esnap_clone(blob1));
9865 
9866 	/* Call set_esternal_parent with blobid and esnapid the same */
9867 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, &blobid1, sizeof(blobid1),
9868 					 blob_op_complete, NULL);
9869 	CU_ASSERT(g_bserrno == -EINVAL);
9870 
9871 	/* Call set_external_parent with esnap of incompatible size */
9872 	esnap_dev1->blockcnt = esnap_num_blocks - 1;
9873 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9874 					 blob_op_complete, NULL);
9875 	CU_ASSERT(g_bserrno == -EINVAL);
9876 
9877 	/* Call set_external_parent with a blob and its parent esnap */
9878 	esnap_dev1->blocklen = block_sz;
9879 	esnap_dev1->blockcnt = esnap_num_blocks;
9880 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9881 					 blob_op_complete, NULL);
9882 	poll_threads();
9883 	CU_ASSERT(g_bserrno == -EEXIST);
9884 
9885 	/* Create a blob that is a clone of a snapshots */
9886 	ut_spdk_blob_opts_init(&opts);
9887 	blob2 = ut_blob_create_and_open(bs, &opts);
9888 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9889 	blobid2 = spdk_blob_get_id(blob2);
9890 	spdk_bs_create_snapshot(bs, blobid2, NULL, blob_op_with_id_complete, NULL);
9891 	poll_threads();
9892 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9893 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9894 	snapshotid = g_blobid;
9895 
9896 	/* Call set_parent correctly with a snapshot's clone blob */
9897 	esnap_dev2->blocklen = block_sz;
9898 	esnap_dev2->blockcnt = esnap_num_blocks;
9899 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts2);
9900 	spdk_bs_blob_set_external_parent(bs, blobid2, esnap_dev2, &esnap_opts2, sizeof(esnap_opts2),
9901 					 blob_op_complete, NULL);
9902 	poll_threads();
9903 	CU_ASSERT(g_bserrno == 0);
9904 
9905 	/* Check relations */
9906 	rc = spdk_blob_get_esnap_id(blob2, &esnap_id, &esnap_id_len);
9907 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9908 	CU_ASSERT(!spdk_blob_is_clone(blob2));
9909 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts2) &&
9910 		  memcmp(esnap_id, &esnap_opts2, esnap_id_len) == 0);
9911 	CU_ASSERT(blob2->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9912 
9913 	/* Create a not thin-provisioned blob that is not a clone */
9914 	ut_spdk_blob_opts_init(&opts);
9915 	opts.thin_provision = false;
9916 	blob3 = ut_blob_create_and_open(bs, &opts);
9917 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9918 	blobid3 = spdk_blob_get_id(blob3);
9919 
9920 	/* Call set_external_parent with a blob that isn't a clone and that isn't thin-provisioned */
9921 	spdk_bs_blob_set_external_parent(bs, blobid3, esnap_dev1, &esnap_opts, sizeof(esnap_opts),
9922 					 blob_op_complete, NULL);
9923 	poll_threads();
9924 	CU_ASSERT(g_bserrno == -EINVAL);
9925 
9926 	/* Create a thin-provisioned blob that is not a clone */
9927 	ut_spdk_blob_opts_init(&opts);
9928 	opts.thin_provision = true;
9929 	blob4 = ut_blob_create_and_open(bs, &opts);
9930 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9931 	blobid4 = spdk_blob_get_id(blob4);
9932 
9933 	/* Call set_external_parent correctly with a blob that isn't a clone */
9934 	esnap_dev3->blocklen = block_sz;
9935 	esnap_dev3->blockcnt = esnap_num_blocks;
9936 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9937 	spdk_bs_blob_set_external_parent(bs, blobid4, esnap_dev3, &esnap_opts, sizeof(esnap_opts),
9938 					 blob_op_complete, NULL);
9939 	poll_threads();
9940 	CU_ASSERT(g_bserrno == 0);
9941 
9942 	/* Check relations */
9943 	rc = spdk_blob_get_esnap_id(blob4, &esnap_id, &esnap_id_len);
9944 	CU_ASSERT(spdk_blob_is_esnap_clone(blob4));
9945 	CU_ASSERT(!spdk_blob_is_clone(blob4));
9946 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts) &&
9947 		  memcmp(esnap_id, &esnap_opts, esnap_id_len) == 0);
9948 	CU_ASSERT(blob4->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9949 
9950 	ut_blob_close_and_delete(bs, blob4);
9951 	ut_blob_close_and_delete(bs, blob3);
9952 	ut_blob_close_and_delete(bs, blob2);
9953 	ut_blob_close_and_delete(bs, blob1);
9954 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
9955 	dev_destroy(esnap_dev1);
9956 	poll_threads();
9957 	CU_ASSERT(g_bserrno == 0);
9958 }
9959 
9960 static void
9961 suite_bs_setup(void)
9962 {
9963 	struct spdk_bs_dev *dev;
9964 
9965 	dev = init_dev();
9966 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9967 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
9968 	poll_threads();
9969 	CU_ASSERT(g_bserrno == 0);
9970 	CU_ASSERT(g_bs != NULL);
9971 }
9972 
9973 static void
9974 suite_esnap_bs_setup(void)
9975 {
9976 	struct spdk_bs_dev	*dev;
9977 	struct spdk_bs_opts	bs_opts;
9978 
9979 	dev = init_dev();
9980 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9981 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
9982 	bs_opts.cluster_sz = 4 * g_phys_blocklen;
9983 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
9984 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
9985 	poll_threads();
9986 	CU_ASSERT(g_bserrno == 0);
9987 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9988 }
9989 
9990 static void
9991 suite_bs_cleanup(void)
9992 {
9993 	if (g_bs != NULL) {
9994 		spdk_bs_unload(g_bs, bs_op_complete, NULL);
9995 		poll_threads();
9996 		CU_ASSERT(g_bserrno == 0);
9997 		g_bs = NULL;
9998 	}
9999 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
10000 }
10001 
10002 static struct spdk_blob *
10003 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
10004 {
10005 	struct spdk_blob *blob;
10006 	struct spdk_blob_opts create_blob_opts;
10007 	spdk_blob_id blobid;
10008 
10009 	if (blob_opts == NULL) {
10010 		ut_spdk_blob_opts_init(&create_blob_opts);
10011 		blob_opts = &create_blob_opts;
10012 	}
10013 
10014 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
10015 	poll_threads();
10016 	CU_ASSERT(g_bserrno == 0);
10017 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
10018 	blobid = g_blobid;
10019 	g_blobid = -1;
10020 
10021 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
10022 	poll_threads();
10023 	CU_ASSERT(g_bserrno == 0);
10024 	CU_ASSERT(g_blob != NULL);
10025 	blob = g_blob;
10026 
10027 	g_blob = NULL;
10028 	g_bserrno = -1;
10029 
10030 	return blob;
10031 }
10032 
10033 static void
10034 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
10035 {
10036 	spdk_blob_id blobid = spdk_blob_get_id(blob);
10037 
10038 	spdk_blob_close(blob, blob_op_complete, NULL);
10039 	poll_threads();
10040 	CU_ASSERT(g_bserrno == 0);
10041 	g_blob = NULL;
10042 
10043 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
10044 	poll_threads();
10045 	CU_ASSERT(g_bserrno == 0);
10046 	g_bserrno = -1;
10047 }
10048 
10049 static void
10050 suite_blob_setup(void)
10051 {
10052 	suite_bs_setup();
10053 	CU_ASSERT(g_bs != NULL);
10054 
10055 	g_blob = ut_blob_create_and_open(g_bs, NULL);
10056 	CU_ASSERT(g_blob != NULL);
10057 }
10058 
10059 static void
10060 suite_blob_cleanup(void)
10061 {
10062 	ut_blob_close_and_delete(g_bs, g_blob);
10063 	CU_ASSERT(g_blob == NULL);
10064 
10065 	suite_bs_cleanup();
10066 	CU_ASSERT(g_bs == NULL);
10067 }
10068 
10069 static int
10070 ut_setup_config_nocopy_noextent(void)
10071 {
10072 	g_dev_copy_enabled = false;
10073 	g_use_extent_table = false;
10074 	g_phys_blocklen = 4096;
10075 
10076 	return 0;
10077 }
10078 
10079 static int
10080 ut_setup_config_nocopy_extent(void)
10081 {
10082 	g_dev_copy_enabled = false;
10083 	g_use_extent_table = true;
10084 	g_phys_blocklen = 4096;
10085 
10086 	return 0;
10087 }
10088 
10089 static int
10090 ut_setup_config_nocopy_extent_16k_phys(void)
10091 {
10092 	g_dev_copy_enabled = false;
10093 	g_use_extent_table = true;
10094 	g_phys_blocklen = 16384;
10095 
10096 	return 0;
10097 }
10098 
10099 
10100 static int
10101 ut_setup_config_copy_noextent(void)
10102 {
10103 	g_dev_copy_enabled = true;
10104 	g_use_extent_table = false;
10105 	g_phys_blocklen = 4096;
10106 
10107 	return 0;
10108 }
10109 
10110 static int
10111 ut_setup_config_copy_extent(void)
10112 {
10113 	g_dev_copy_enabled = true;
10114 	g_use_extent_table = true;
10115 	g_phys_blocklen = 4096;
10116 
10117 	return 0;
10118 }
10119 
10120 struct ut_config {
10121 	const char *suffix;
10122 	CU_InitializeFunc setup_cb;
10123 };
10124 
10125 int
10126 main(int argc, char **argv)
10127 {
10128 	CU_pSuite		suite, suite_bs, suite_blob, suite_esnap_bs;
10129 	unsigned int		i, num_failures;
10130 	char			suite_name[4096];
10131 	struct ut_config	*config;
10132 	struct ut_config	configs[] = {
10133 		{"nocopy_noextent", ut_setup_config_nocopy_noextent},
10134 		{"nocopy_extent", ut_setup_config_nocopy_extent},
10135 		{"nocopy_extent_16k_phys", ut_setup_config_nocopy_extent_16k_phys},
10136 		{"copy_noextent", ut_setup_config_copy_noextent},
10137 		{"copy_extent", ut_setup_config_copy_extent},
10138 	};
10139 
10140 	CU_initialize_registry();
10141 
10142 	for (i = 0; i < SPDK_COUNTOF(configs); ++i) {
10143 		config = &configs[i];
10144 
10145 		snprintf(suite_name, sizeof(suite_name), "blob_%s", config->suffix);
10146 		suite = CU_add_suite(suite_name, config->setup_cb, NULL);
10147 
10148 		snprintf(suite_name, sizeof(suite_name), "blob_bs_%s", config->suffix);
10149 		suite_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10150 				suite_bs_setup, suite_bs_cleanup);
10151 
10152 		snprintf(suite_name, sizeof(suite_name), "blob_blob_%s", config->suffix);
10153 		suite_blob = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10154 				suite_blob_setup, suite_blob_cleanup);
10155 
10156 		snprintf(suite_name, sizeof(suite_name), "blob_esnap_bs_%s", config->suffix);
10157 		suite_esnap_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10158 				 suite_esnap_bs_setup,
10159 				 suite_bs_cleanup);
10160 
10161 		CU_ADD_TEST(suite, blob_init);
10162 		CU_ADD_TEST(suite_bs, blob_open);
10163 		CU_ADD_TEST(suite_bs, blob_create);
10164 		CU_ADD_TEST(suite_bs, blob_create_loop);
10165 		CU_ADD_TEST(suite_bs, blob_create_fail);
10166 		CU_ADD_TEST(suite_bs, blob_create_internal);
10167 		CU_ADD_TEST(suite_bs, blob_create_zero_extent);
10168 		CU_ADD_TEST(suite, blob_thin_provision);
10169 		CU_ADD_TEST(suite_bs, blob_snapshot);
10170 		CU_ADD_TEST(suite_bs, blob_clone);
10171 		CU_ADD_TEST(suite_bs, blob_inflate);
10172 		CU_ADD_TEST(suite_bs, blob_delete);
10173 		CU_ADD_TEST(suite_bs, blob_resize_test);
10174 		CU_ADD_TEST(suite_bs, blob_resize_thin_test);
10175 		CU_ADD_TEST(suite, blob_read_only);
10176 		CU_ADD_TEST(suite_bs, channel_ops);
10177 		CU_ADD_TEST(suite_bs, blob_super);
10178 		CU_ADD_TEST(suite_blob, blob_write);
10179 		CU_ADD_TEST(suite_blob, blob_read);
10180 		CU_ADD_TEST(suite_blob, blob_rw_verify);
10181 		CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
10182 		CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
10183 		CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
10184 		CU_ADD_TEST(suite_bs, blob_unmap);
10185 		CU_ADD_TEST(suite_bs, blob_iter);
10186 		CU_ADD_TEST(suite_blob, blob_xattr);
10187 		CU_ADD_TEST(suite_bs, blob_parse_md);
10188 		CU_ADD_TEST(suite, bs_load);
10189 		CU_ADD_TEST(suite_bs, bs_load_pending_removal);
10190 		CU_ADD_TEST(suite, bs_load_custom_cluster_size);
10191 		CU_ADD_TEST(suite, bs_load_after_failed_grow);
10192 		CU_ADD_TEST(suite, bs_load_error);
10193 		CU_ADD_TEST(suite_bs, bs_unload);
10194 		CU_ADD_TEST(suite, bs_cluster_sz);
10195 		CU_ADD_TEST(suite_bs, bs_usable_clusters);
10196 		CU_ADD_TEST(suite, bs_resize_md);
10197 		CU_ADD_TEST(suite, bs_destroy);
10198 		CU_ADD_TEST(suite, bs_type);
10199 		CU_ADD_TEST(suite, bs_super_block);
10200 		CU_ADD_TEST(suite, bs_test_recover_cluster_count);
10201 		CU_ADD_TEST(suite, bs_grow_live);
10202 		CU_ADD_TEST(suite, bs_grow_live_no_space);
10203 		CU_ADD_TEST(suite, bs_test_grow);
10204 		CU_ADD_TEST(suite, blob_serialize_test);
10205 		CU_ADD_TEST(suite_bs, blob_crc);
10206 		CU_ADD_TEST(suite, super_block_crc);
10207 		CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
10208 		CU_ADD_TEST(suite_bs, blob_flags);
10209 		CU_ADD_TEST(suite_bs, bs_version);
10210 		CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
10211 		CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
10212 		CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
10213 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
10214 		CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
10215 		CU_ADD_TEST(suite, blob_thin_prov_unmap_cluster);
10216 		CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
10217 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
10218 		CU_ADD_TEST(suite, bs_load_iter_test);
10219 		CU_ADD_TEST(suite_bs, blob_snapshot_rw);
10220 		CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
10221 		CU_ADD_TEST(suite, blob_relations);
10222 		CU_ADD_TEST(suite, blob_relations2);
10223 		CU_ADD_TEST(suite, blob_relations3);
10224 		CU_ADD_TEST(suite, blobstore_clean_power_failure);
10225 		CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
10226 		CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
10227 		CU_ADD_TEST(suite_bs, blob_inflate_rw);
10228 		CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
10229 		CU_ADD_TEST(suite_bs, blob_operation_split_rw);
10230 		CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
10231 		CU_ADD_TEST(suite, blob_io_unit);
10232 		CU_ADD_TEST(suite, blob_io_unit_compatibility);
10233 		CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
10234 		CU_ADD_TEST(suite_bs, blob_persist_test);
10235 		CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
10236 		CU_ADD_TEST(suite_bs, blob_seek_io_unit);
10237 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
10238 		CU_ADD_TEST(suite_bs, blob_nested_freezes);
10239 		CU_ADD_TEST(suite, blob_ext_md_pages);
10240 		CU_ADD_TEST(suite, blob_esnap_io_4096_4096);
10241 		CU_ADD_TEST(suite, blob_esnap_io_512_512);
10242 		CU_ADD_TEST(suite, blob_esnap_io_4096_512);
10243 		CU_ADD_TEST(suite, blob_esnap_io_512_4096);
10244 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_thread_add_remove);
10245 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_snapshot);
10246 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_inflate);
10247 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_decouple);
10248 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_reload);
10249 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_hotplug);
10250 		CU_ADD_TEST(suite_blob, blob_is_degraded);
10251 		CU_ADD_TEST(suite_bs, blob_clone_resize);
10252 		CU_ADD_TEST(suite, blob_esnap_clone_resize);
10253 		CU_ADD_TEST(suite_bs, blob_shallow_copy);
10254 		CU_ADD_TEST(suite_esnap_bs, blob_set_parent);
10255 		CU_ADD_TEST(suite_esnap_bs, blob_set_external_parent);
10256 	}
10257 
10258 	allocate_threads(2);
10259 	set_thread(0);
10260 
10261 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
10262 
10263 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
10264 
10265 	free(g_dev_buffer);
10266 
10267 	free_threads();
10268 
10269 	return num_failures;
10270 }
10271