xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision a2f5e1c2d535934bced849d8b079523bc74c98f1)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "thread/thread.c"
16 #include "ext_dev.c"
17 #include "blob/blobstore.c"
18 #include "blob/request.c"
19 #include "blob/zeroes.c"
20 #include "blob/blob_bs_dev.c"
21 #include "esnap_dev.c"
22 #define BLOCKLEN DEV_BUFFER_BLOCKLEN
23 
24 struct spdk_blob_store *g_bs;
25 spdk_blob_id g_blobid;
26 struct spdk_blob *g_blob, *g_blob2;
27 int g_bserrno, g_bserrno2;
28 struct spdk_xattr_names *g_names;
29 int g_done;
30 char *g_xattr_names[] = {"first", "second", "third"};
31 char *g_xattr_values[] = {"one", "two", "three"};
32 uint64_t g_ctx = 1729;
33 bool g_use_extent_table = false;
34 uint64_t g_copied_clusters_count = 0;
35 
36 struct spdk_bs_super_block_ver1 {
37 	uint8_t		signature[8];
38 	uint32_t        version;
39 	uint32_t        length;
40 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
41 	spdk_blob_id	super_blob;
42 
43 	uint32_t	cluster_size; /* In bytes */
44 
45 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
46 	uint32_t	used_page_mask_len; /* Count, in pages */
47 
48 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
49 	uint32_t	used_cluster_mask_len; /* Count, in pages */
50 
51 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
52 	uint32_t	md_len; /* Count, in pages */
53 
54 	uint8_t		reserved[4036];
55 	uint32_t	crc;
56 } __attribute__((packed));
57 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
58 
59 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
60 		struct spdk_blob_opts *blob_opts);
61 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
62 static void suite_blob_setup(void);
63 static void suite_blob_cleanup(void);
64 
65 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
66 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
67 		void *cpl_cb_arg), 0);
68 
69 static bool
70 is_esnap_clone(struct spdk_blob *_blob, const void *id, size_t id_len)
71 {
72 	const void *val = NULL;
73 	size_t len = 0;
74 	bool c0, c1, c2, c3;
75 
76 	CU_ASSERT(blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
77 				       true) == 0);
78 	CU_ASSERT((c0 = (len == id_len)));
79 	CU_ASSERT((c1 = (val != NULL && memcmp(val, id, len) == 0)));
80 	CU_ASSERT((c2 = !!(_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT)));
81 	CU_ASSERT((c3 = (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
82 
83 	return c0 && c1 && c2 && c3;
84 }
85 
86 static bool
87 is_not_esnap_clone(struct spdk_blob *_blob)
88 {
89 	const void *val = NULL;
90 	size_t len = 0;
91 	bool c1, c2, c3, c4;
92 
93 	CU_ASSERT((c1 = (blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
94 					      true) == -ENOENT)));
95 	CU_ASSERT((c2 = (val == NULL)));
96 	CU_ASSERT((c3 = ((_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT) == 0)));
97 	CU_ASSERT((c4 = (_blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
98 
99 	return c1 && c2 && c3 && c4;
100 }
101 
102 #define UT_ASSERT_IS_ESNAP_CLONE(_blob, _id, _len) CU_ASSERT(is_esnap_clone(_blob, _id, _len))
103 #define UT_ASSERT_IS_NOT_ESNAP_CLONE(_blob) CU_ASSERT(is_not_esnap_clone(_blob))
104 
105 static void
106 _get_xattr_value(void *arg, const char *name,
107 		 const void **value, size_t *value_len)
108 {
109 	uint64_t i;
110 
111 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
112 	SPDK_CU_ASSERT_FATAL(value != NULL);
113 	CU_ASSERT(arg == &g_ctx);
114 
115 	for (i = 0; i < sizeof(g_xattr_names); i++) {
116 		if (!strcmp(name, g_xattr_names[i])) {
117 			*value_len = strlen(g_xattr_values[i]);
118 			*value = g_xattr_values[i];
119 			break;
120 		}
121 	}
122 }
123 
124 static void
125 _get_xattr_value_null(void *arg, const char *name,
126 		      const void **value, size_t *value_len)
127 {
128 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
129 	SPDK_CU_ASSERT_FATAL(value != NULL);
130 	CU_ASSERT(arg == NULL);
131 
132 	*value_len = 0;
133 	*value = NULL;
134 }
135 
136 static int
137 _get_snapshots_count(struct spdk_blob_store *bs)
138 {
139 	struct spdk_blob_list *snapshot = NULL;
140 	int count = 0;
141 
142 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
143 		count += 1;
144 	}
145 
146 	return count;
147 }
148 
149 static void
150 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
151 {
152 	spdk_blob_opts_init(opts, sizeof(*opts));
153 	opts->use_extent_table = g_use_extent_table;
154 }
155 
156 static void
157 bs_op_complete(void *cb_arg, int bserrno)
158 {
159 	g_bserrno = bserrno;
160 }
161 
162 static void
163 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
164 			   int bserrno)
165 {
166 	g_bs = bs;
167 	g_bserrno = bserrno;
168 }
169 
170 static void
171 blob_op_complete(void *cb_arg, int bserrno)
172 {
173 	if (cb_arg != NULL) {
174 		int *errp = cb_arg;
175 
176 		*errp = bserrno;
177 	}
178 	g_bserrno = bserrno;
179 }
180 
181 static void
182 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
183 {
184 	g_blobid = blobid;
185 	g_bserrno = bserrno;
186 }
187 
188 static void
189 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
190 {
191 	g_blob = blb;
192 	g_bserrno = bserrno;
193 }
194 
195 static void
196 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
197 {
198 	if (g_blob == NULL) {
199 		g_blob = blob;
200 		g_bserrno = bserrno;
201 	} else {
202 		g_blob2 = blob;
203 		g_bserrno2 = bserrno;
204 	}
205 }
206 
207 static void
208 blob_shallow_copy_status_cb(uint64_t copied_clusters, void *cb_arg)
209 {
210 	g_copied_clusters_count = copied_clusters;
211 }
212 
213 static void
214 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
215 {
216 	struct spdk_bs_dev *dev;
217 
218 	/* Unload the blob store */
219 	spdk_bs_unload(*bs, bs_op_complete, NULL);
220 	poll_threads();
221 	CU_ASSERT(g_bserrno == 0);
222 
223 	dev = init_dev();
224 	/* Load an existing blob store */
225 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
226 	poll_threads();
227 	CU_ASSERT(g_bserrno == 0);
228 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
229 	*bs = g_bs;
230 
231 	g_bserrno = -1;
232 }
233 
234 static void
235 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
236 {
237 	struct spdk_bs_dev *dev;
238 
239 	/* Dirty shutdown */
240 	bs_free(*bs);
241 
242 	dev = init_dev();
243 	/* Load an existing blob store */
244 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
245 	poll_threads();
246 	CU_ASSERT(g_bserrno == 0);
247 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
248 	*bs = g_bs;
249 
250 	g_bserrno = -1;
251 }
252 
253 static void
254 blob_init(void)
255 {
256 	struct spdk_blob_store *bs;
257 	struct spdk_bs_dev *dev;
258 
259 	dev = init_dev();
260 
261 	/* should fail for an unsupported blocklen */
262 	dev->blocklen = 500;
263 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
264 	poll_threads();
265 	CU_ASSERT(g_bserrno == -EINVAL);
266 
267 	dev = init_dev();
268 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
269 	poll_threads();
270 	CU_ASSERT(g_bserrno == 0);
271 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
272 	bs = g_bs;
273 
274 	spdk_bs_unload(bs, bs_op_complete, NULL);
275 	poll_threads();
276 	CU_ASSERT(g_bserrno == 0);
277 	g_bs = NULL;
278 }
279 
280 static void
281 blob_super(void)
282 {
283 	struct spdk_blob_store *bs = g_bs;
284 	spdk_blob_id blobid;
285 	struct spdk_blob_opts blob_opts;
286 
287 	/* Get the super blob without having set one */
288 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
289 	poll_threads();
290 	CU_ASSERT(g_bserrno == -ENOENT);
291 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
292 
293 	/* Create a blob */
294 	ut_spdk_blob_opts_init(&blob_opts);
295 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
296 	poll_threads();
297 	CU_ASSERT(g_bserrno == 0);
298 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
299 	blobid = g_blobid;
300 
301 	/* Set the blob as the super blob */
302 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
303 	poll_threads();
304 	CU_ASSERT(g_bserrno == 0);
305 
306 	/* Get the super blob */
307 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
308 	poll_threads();
309 	CU_ASSERT(g_bserrno == 0);
310 	CU_ASSERT(blobid == g_blobid);
311 }
312 
313 static void
314 blob_open(void)
315 {
316 	struct spdk_blob_store *bs = g_bs;
317 	struct spdk_blob *blob;
318 	struct spdk_blob_opts blob_opts;
319 	spdk_blob_id blobid, blobid2;
320 
321 	ut_spdk_blob_opts_init(&blob_opts);
322 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
323 	poll_threads();
324 	CU_ASSERT(g_bserrno == 0);
325 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
326 	blobid = g_blobid;
327 
328 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
329 	poll_threads();
330 	CU_ASSERT(g_bserrno == 0);
331 	CU_ASSERT(g_blob != NULL);
332 	blob = g_blob;
333 
334 	blobid2 = spdk_blob_get_id(blob);
335 	CU_ASSERT(blobid == blobid2);
336 
337 	/* Try to open file again.  It should return success. */
338 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
339 	poll_threads();
340 	CU_ASSERT(g_bserrno == 0);
341 	CU_ASSERT(blob == g_blob);
342 
343 	spdk_blob_close(blob, blob_op_complete, NULL);
344 	poll_threads();
345 	CU_ASSERT(g_bserrno == 0);
346 
347 	/*
348 	 * Close the file a second time, releasing the second reference.  This
349 	 *  should succeed.
350 	 */
351 	blob = g_blob;
352 	spdk_blob_close(blob, blob_op_complete, NULL);
353 	poll_threads();
354 	CU_ASSERT(g_bserrno == 0);
355 
356 	/*
357 	 * Try to open file again.  It should succeed.  This tests the case
358 	 *  where the file is opened, closed, then re-opened again.
359 	 */
360 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
361 	poll_threads();
362 	CU_ASSERT(g_bserrno == 0);
363 	CU_ASSERT(g_blob != NULL);
364 	blob = g_blob;
365 	spdk_blob_close(blob, blob_op_complete, NULL);
366 	poll_threads();
367 	CU_ASSERT(g_bserrno == 0);
368 
369 	/* Try to open file twice in succession.  This should return the same
370 	 * blob object.
371 	 */
372 	g_blob = NULL;
373 	g_blob2 = NULL;
374 	g_bserrno = -1;
375 	g_bserrno2 = -1;
376 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
377 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 	CU_ASSERT(g_bserrno2 == 0);
381 	CU_ASSERT(g_blob != NULL);
382 	CU_ASSERT(g_blob2 != NULL);
383 	CU_ASSERT(g_blob == g_blob2);
384 
385 	g_bserrno = -1;
386 	spdk_blob_close(g_blob, blob_op_complete, NULL);
387 	poll_threads();
388 	CU_ASSERT(g_bserrno == 0);
389 
390 	ut_blob_close_and_delete(bs, g_blob);
391 }
392 
393 static void
394 blob_create(void)
395 {
396 	struct spdk_blob_store *bs = g_bs;
397 	struct spdk_blob *blob;
398 	struct spdk_blob_opts opts;
399 	spdk_blob_id blobid;
400 
401 	/* Create blob with 10 clusters */
402 
403 	ut_spdk_blob_opts_init(&opts);
404 	opts.num_clusters = 10;
405 
406 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
407 	poll_threads();
408 	CU_ASSERT(g_bserrno == 0);
409 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
410 	blobid = g_blobid;
411 
412 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
413 	poll_threads();
414 	CU_ASSERT(g_bserrno == 0);
415 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
416 	blob = g_blob;
417 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
418 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
419 
420 	spdk_blob_close(blob, blob_op_complete, NULL);
421 	poll_threads();
422 	CU_ASSERT(g_bserrno == 0);
423 
424 	/* Create blob with 0 clusters */
425 
426 	ut_spdk_blob_opts_init(&opts);
427 	opts.num_clusters = 0;
428 
429 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
430 	poll_threads();
431 	CU_ASSERT(g_bserrno == 0);
432 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
433 	blobid = g_blobid;
434 
435 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
436 	poll_threads();
437 	CU_ASSERT(g_bserrno == 0);
438 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
439 	blob = g_blob;
440 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
441 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
442 
443 	spdk_blob_close(blob, blob_op_complete, NULL);
444 	poll_threads();
445 	CU_ASSERT(g_bserrno == 0);
446 
447 	/* Create blob with default options (opts == NULL) */
448 
449 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
450 	poll_threads();
451 	CU_ASSERT(g_bserrno == 0);
452 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
453 	blobid = g_blobid;
454 
455 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
456 	poll_threads();
457 	CU_ASSERT(g_bserrno == 0);
458 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
459 	blob = g_blob;
460 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
461 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
462 
463 	spdk_blob_close(blob, blob_op_complete, NULL);
464 	poll_threads();
465 	CU_ASSERT(g_bserrno == 0);
466 
467 	/* Try to create blob with size larger than blobstore */
468 
469 	ut_spdk_blob_opts_init(&opts);
470 	opts.num_clusters = bs->total_clusters + 1;
471 
472 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
473 	poll_threads();
474 	CU_ASSERT(g_bserrno == -ENOSPC);
475 }
476 
477 static void
478 blob_create_zero_extent(void)
479 {
480 	struct spdk_blob_store *bs = g_bs;
481 	struct spdk_blob *blob;
482 	spdk_blob_id blobid;
483 
484 	/* Create blob with default options (opts == NULL) */
485 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
486 	poll_threads();
487 	CU_ASSERT(g_bserrno == 0);
488 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
489 	blobid = g_blobid;
490 
491 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
492 	poll_threads();
493 	CU_ASSERT(g_bserrno == 0);
494 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
495 	blob = g_blob;
496 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
497 	CU_ASSERT(blob->extent_table_found == true);
498 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
499 	CU_ASSERT(blob->active.extent_pages == NULL);
500 
501 	spdk_blob_close(blob, blob_op_complete, NULL);
502 	poll_threads();
503 	CU_ASSERT(g_bserrno == 0);
504 
505 	/* Create blob with NULL internal options  */
506 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
507 	poll_threads();
508 	CU_ASSERT(g_bserrno == 0);
509 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
510 	blobid = g_blobid;
511 
512 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
513 	poll_threads();
514 	CU_ASSERT(g_bserrno == 0);
515 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
516 	blob = g_blob;
517 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
518 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
519 	CU_ASSERT(blob->extent_table_found == true);
520 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
521 	CU_ASSERT(blob->active.extent_pages == NULL);
522 
523 	spdk_blob_close(blob, blob_op_complete, NULL);
524 	poll_threads();
525 	CU_ASSERT(g_bserrno == 0);
526 }
527 
528 /*
529  * Create and delete one blob in a loop over and over again.  This helps ensure
530  * that the internal bit masks tracking used clusters and md_pages are being
531  * tracked correctly.
532  */
533 static void
534 blob_create_loop(void)
535 {
536 	struct spdk_blob_store *bs = g_bs;
537 	struct spdk_blob_opts opts;
538 	uint32_t i, loop_count;
539 
540 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
541 				  spdk_bit_pool_capacity(bs->used_clusters));
542 
543 	for (i = 0; i < loop_count; i++) {
544 		ut_spdk_blob_opts_init(&opts);
545 		opts.num_clusters = 1;
546 		g_bserrno = -1;
547 		g_blobid = SPDK_BLOBID_INVALID;
548 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
549 		poll_threads();
550 		CU_ASSERT(g_bserrno == 0);
551 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
552 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
553 		poll_threads();
554 		CU_ASSERT(g_bserrno == 0);
555 	}
556 }
557 
558 static void
559 blob_create_fail(void)
560 {
561 	struct spdk_blob_store *bs = g_bs;
562 	struct spdk_blob_opts opts;
563 	spdk_blob_id blobid;
564 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
565 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
566 
567 	/* NULL callback */
568 	ut_spdk_blob_opts_init(&opts);
569 	opts.xattrs.names = g_xattr_names;
570 	opts.xattrs.get_value = NULL;
571 	opts.xattrs.count = 1;
572 	opts.xattrs.ctx = &g_ctx;
573 
574 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
575 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
576 	poll_threads();
577 	CU_ASSERT(g_bserrno == -EINVAL);
578 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
579 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
580 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
581 
582 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
583 	poll_threads();
584 	CU_ASSERT(g_bserrno == -ENOENT);
585 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
586 
587 	ut_bs_reload(&bs, NULL);
588 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
589 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
590 
591 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
592 	poll_threads();
593 	CU_ASSERT(g_blob == NULL);
594 	CU_ASSERT(g_bserrno == -ENOENT);
595 }
596 
597 static void
598 blob_create_internal(void)
599 {
600 	struct spdk_blob_store *bs = g_bs;
601 	struct spdk_blob *blob;
602 	struct spdk_blob_opts opts;
603 	struct spdk_blob_xattr_opts internal_xattrs;
604 	const void *value;
605 	size_t value_len;
606 	spdk_blob_id blobid;
607 	int rc;
608 
609 	/* Create blob with custom xattrs */
610 
611 	ut_spdk_blob_opts_init(&opts);
612 	blob_xattrs_init(&internal_xattrs);
613 	internal_xattrs.count = 3;
614 	internal_xattrs.names = g_xattr_names;
615 	internal_xattrs.get_value = _get_xattr_value;
616 	internal_xattrs.ctx = &g_ctx;
617 
618 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
619 	poll_threads();
620 	CU_ASSERT(g_bserrno == 0);
621 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
622 	blobid = g_blobid;
623 
624 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
625 	poll_threads();
626 	CU_ASSERT(g_bserrno == 0);
627 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
628 	blob = g_blob;
629 
630 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
631 	CU_ASSERT(rc == 0);
632 	SPDK_CU_ASSERT_FATAL(value != NULL);
633 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
634 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
635 
636 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
637 	CU_ASSERT(rc == 0);
638 	SPDK_CU_ASSERT_FATAL(value != NULL);
639 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
640 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
641 
642 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
643 	CU_ASSERT(rc == 0);
644 	SPDK_CU_ASSERT_FATAL(value != NULL);
645 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
646 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
647 
648 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
649 	CU_ASSERT(rc != 0);
650 
651 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
652 	CU_ASSERT(rc != 0);
653 
654 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
655 	CU_ASSERT(rc != 0);
656 
657 	spdk_blob_close(blob, blob_op_complete, NULL);
658 	poll_threads();
659 	CU_ASSERT(g_bserrno == 0);
660 
661 	/* Create blob with NULL internal options  */
662 
663 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
664 	poll_threads();
665 	CU_ASSERT(g_bserrno == 0);
666 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
667 	blobid = g_blobid;
668 
669 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
670 	poll_threads();
671 	CU_ASSERT(g_bserrno == 0);
672 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
673 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
674 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
675 
676 	blob = g_blob;
677 
678 	spdk_blob_close(blob, blob_op_complete, NULL);
679 	poll_threads();
680 	CU_ASSERT(g_bserrno == 0);
681 }
682 
683 static void
684 blob_thin_provision(void)
685 {
686 	struct spdk_blob_store *bs;
687 	struct spdk_bs_dev *dev;
688 	struct spdk_blob *blob;
689 	struct spdk_blob_opts opts;
690 	struct spdk_bs_opts bs_opts;
691 	spdk_blob_id blobid;
692 
693 	dev = init_dev();
694 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
695 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
696 
697 	/* Initialize a new blob store */
698 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
699 	poll_threads();
700 	CU_ASSERT(g_bserrno == 0);
701 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
702 
703 	bs = g_bs;
704 
705 	/* Create blob with thin provisioning enabled */
706 
707 	ut_spdk_blob_opts_init(&opts);
708 	opts.thin_provision = true;
709 	opts.num_clusters = 10;
710 
711 	blob = ut_blob_create_and_open(bs, &opts);
712 	blobid = spdk_blob_get_id(blob);
713 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
714 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
715 	/* In thin provisioning with num_clusters is set, if not using the
716 	 * extent table, there is no allocation. If extent table is used,
717 	 * there is related allocation happened. */
718 	if (blob->extent_table_found == true) {
719 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
720 		CU_ASSERT(blob->active.extent_pages != NULL);
721 	} else {
722 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
723 		CU_ASSERT(blob->active.extent_pages == NULL);
724 	}
725 
726 	spdk_blob_close(blob, blob_op_complete, NULL);
727 	CU_ASSERT(g_bserrno == 0);
728 
729 	/* Do not shut down cleanly.  This makes sure that when we load again
730 	 *  and try to recover a valid used_cluster map, that blobstore will
731 	 *  ignore clusters with index 0 since these are unallocated clusters.
732 	 */
733 	ut_bs_dirty_load(&bs, &bs_opts);
734 
735 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
736 	poll_threads();
737 	CU_ASSERT(g_bserrno == 0);
738 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
739 	blob = g_blob;
740 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
741 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
742 
743 	ut_blob_close_and_delete(bs, blob);
744 
745 	spdk_bs_unload(bs, bs_op_complete, NULL);
746 	poll_threads();
747 	CU_ASSERT(g_bserrno == 0);
748 	g_bs = NULL;
749 }
750 
751 static void
752 blob_snapshot(void)
753 {
754 	struct spdk_blob_store *bs = g_bs;
755 	struct spdk_blob *blob;
756 	struct spdk_blob *snapshot, *snapshot2;
757 	struct spdk_blob_bs_dev *blob_bs_dev;
758 	struct spdk_blob_opts opts;
759 	struct spdk_blob_xattr_opts xattrs;
760 	spdk_blob_id blobid;
761 	spdk_blob_id snapshotid;
762 	spdk_blob_id snapshotid2;
763 	const void *value;
764 	size_t value_len;
765 	int rc;
766 	spdk_blob_id ids[2];
767 	size_t count;
768 
769 	/* Create blob with 10 clusters */
770 	ut_spdk_blob_opts_init(&opts);
771 	opts.num_clusters = 10;
772 
773 	blob = ut_blob_create_and_open(bs, &opts);
774 	blobid = spdk_blob_get_id(blob);
775 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
776 
777 	/* Create snapshot from blob */
778 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
779 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
780 	poll_threads();
781 	CU_ASSERT(g_bserrno == 0);
782 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
783 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
784 	snapshotid = g_blobid;
785 
786 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
787 	poll_threads();
788 	CU_ASSERT(g_bserrno == 0);
789 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
790 	snapshot = g_blob;
791 	CU_ASSERT(snapshot->data_ro == true);
792 	CU_ASSERT(snapshot->md_ro == true);
793 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
794 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
795 
796 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
797 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
798 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
799 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
800 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
801 
802 	/* Try to create snapshot from clone with xattrs */
803 	xattrs.names = g_xattr_names;
804 	xattrs.get_value = _get_xattr_value;
805 	xattrs.count = 3;
806 	xattrs.ctx = &g_ctx;
807 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
808 	poll_threads();
809 	CU_ASSERT(g_bserrno == 0);
810 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
811 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
812 	snapshotid2 = g_blobid;
813 
814 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
815 	CU_ASSERT(g_bserrno == 0);
816 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
817 	snapshot2 = g_blob;
818 	CU_ASSERT(snapshot2->data_ro == true);
819 	CU_ASSERT(snapshot2->md_ro == true);
820 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
821 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
822 
823 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
824 	CU_ASSERT(snapshot->back_bs_dev == NULL);
825 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
826 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
827 
828 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
829 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
830 
831 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
832 	CU_ASSERT(blob_bs_dev->blob == snapshot);
833 
834 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
835 	CU_ASSERT(rc == 0);
836 	SPDK_CU_ASSERT_FATAL(value != NULL);
837 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
838 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
839 
840 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
841 	CU_ASSERT(rc == 0);
842 	SPDK_CU_ASSERT_FATAL(value != NULL);
843 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
844 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
845 
846 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
847 	CU_ASSERT(rc == 0);
848 	SPDK_CU_ASSERT_FATAL(value != NULL);
849 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
850 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
851 
852 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
853 	count = 2;
854 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
855 	CU_ASSERT(count == 1);
856 	CU_ASSERT(ids[0] == blobid);
857 
858 	count = 2;
859 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
860 	CU_ASSERT(count == 1);
861 	CU_ASSERT(ids[0] == snapshotid2);
862 
863 	/* Try to create snapshot from snapshot */
864 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
865 	poll_threads();
866 	CU_ASSERT(g_bserrno == -EINVAL);
867 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
868 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
869 
870 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
871 	ut_blob_close_and_delete(bs, blob);
872 	count = 2;
873 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
874 	CU_ASSERT(count == 0);
875 
876 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
877 	ut_blob_close_and_delete(bs, snapshot2);
878 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
879 	count = 2;
880 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
881 	CU_ASSERT(count == 0);
882 
883 	ut_blob_close_and_delete(bs, snapshot);
884 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
885 }
886 
887 static void
888 blob_snapshot_freeze_io(void)
889 {
890 	struct spdk_io_channel *channel;
891 	struct spdk_bs_channel *bs_channel;
892 	struct spdk_blob_store *bs = g_bs;
893 	struct spdk_blob *blob;
894 	struct spdk_blob_opts opts;
895 	spdk_blob_id blobid;
896 	uint32_t num_of_pages = 10;
897 	uint8_t payload_read[num_of_pages * BLOCKLEN];
898 	uint8_t payload_write[num_of_pages * BLOCKLEN];
899 	uint8_t payload_zero[num_of_pages * BLOCKLEN];
900 
901 	memset(payload_write, 0xE5, sizeof(payload_write));
902 	memset(payload_read, 0x00, sizeof(payload_read));
903 	memset(payload_zero, 0x00, sizeof(payload_zero));
904 
905 	/* Test freeze I/O during snapshot */
906 	channel = spdk_bs_alloc_io_channel(bs);
907 	bs_channel = spdk_io_channel_get_ctx(channel);
908 
909 	/* Create blob with 10 clusters */
910 	ut_spdk_blob_opts_init(&opts);
911 	opts.num_clusters = 10;
912 	opts.thin_provision = false;
913 
914 	blob = ut_blob_create_and_open(bs, &opts);
915 	blobid = spdk_blob_get_id(blob);
916 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
917 
918 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
919 
920 	/* This is implementation specific.
921 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
922 	 * Four async I/O operations happen before that. */
923 	poll_thread_times(0, 5);
924 
925 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
926 
927 	/* Blob I/O should be frozen here */
928 	CU_ASSERT(blob->frozen_refcnt == 1);
929 
930 	/* Write to the blob */
931 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
932 
933 	/* Verify that I/O is queued */
934 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
935 	/* Verify that payload is not written to disk, at this point the blobs already switched */
936 	CU_ASSERT(blob->active.clusters[0] == 0);
937 
938 	/* Finish all operations including spdk_bs_create_snapshot */
939 	poll_threads();
940 
941 	/* Verify snapshot */
942 	CU_ASSERT(g_bserrno == 0);
943 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
944 
945 	/* Verify that blob has unset frozen_io */
946 	CU_ASSERT(blob->frozen_refcnt == 0);
947 
948 	/* Verify that postponed I/O completed successfully by comparing payload */
949 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
950 	poll_threads();
951 	CU_ASSERT(g_bserrno == 0);
952 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * BLOCKLEN) == 0);
953 
954 	spdk_bs_free_io_channel(channel);
955 	poll_threads();
956 
957 	ut_blob_close_and_delete(bs, blob);
958 }
959 
960 static void
961 blob_clone(void)
962 {
963 	struct spdk_blob_store *bs = g_bs;
964 	struct spdk_blob_opts opts;
965 	struct spdk_blob *blob, *snapshot, *clone;
966 	spdk_blob_id blobid, cloneid, snapshotid;
967 	struct spdk_blob_xattr_opts xattrs;
968 	const void *value;
969 	size_t value_len;
970 	int rc;
971 
972 	/* Create blob with 10 clusters */
973 
974 	ut_spdk_blob_opts_init(&opts);
975 	opts.num_clusters = 10;
976 
977 	blob = ut_blob_create_and_open(bs, &opts);
978 	blobid = spdk_blob_get_id(blob);
979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
980 
981 	/* Create snapshot */
982 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
983 	poll_threads();
984 	CU_ASSERT(g_bserrno == 0);
985 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
986 	snapshotid = g_blobid;
987 
988 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
989 	poll_threads();
990 	CU_ASSERT(g_bserrno == 0);
991 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
992 	snapshot = g_blob;
993 	CU_ASSERT(snapshot->data_ro == true);
994 	CU_ASSERT(snapshot->md_ro == true);
995 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
996 
997 	spdk_blob_close(snapshot, blob_op_complete, NULL);
998 	poll_threads();
999 	CU_ASSERT(g_bserrno == 0);
1000 
1001 	/* Create clone from snapshot with xattrs */
1002 	xattrs.names = g_xattr_names;
1003 	xattrs.get_value = _get_xattr_value;
1004 	xattrs.count = 3;
1005 	xattrs.ctx = &g_ctx;
1006 
1007 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
1008 	poll_threads();
1009 	CU_ASSERT(g_bserrno == 0);
1010 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1011 	cloneid = g_blobid;
1012 
1013 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1014 	poll_threads();
1015 	CU_ASSERT(g_bserrno == 0);
1016 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1017 	clone = g_blob;
1018 	CU_ASSERT(clone->data_ro == false);
1019 	CU_ASSERT(clone->md_ro == false);
1020 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1021 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(clone) == 0);
1022 
1023 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
1024 	CU_ASSERT(rc == 0);
1025 	SPDK_CU_ASSERT_FATAL(value != NULL);
1026 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
1027 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
1028 
1029 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
1030 	CU_ASSERT(rc == 0);
1031 	SPDK_CU_ASSERT_FATAL(value != NULL);
1032 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1033 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1034 
1035 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1036 	CU_ASSERT(rc == 0);
1037 	SPDK_CU_ASSERT_FATAL(value != NULL);
1038 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1039 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1040 
1041 
1042 	spdk_blob_close(clone, blob_op_complete, NULL);
1043 	poll_threads();
1044 	CU_ASSERT(g_bserrno == 0);
1045 
1046 	/* Try to create clone from not read only blob */
1047 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1048 	poll_threads();
1049 	CU_ASSERT(g_bserrno == -EINVAL);
1050 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1051 
1052 	/* Mark blob as read only */
1053 	spdk_blob_set_read_only(blob);
1054 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1055 	poll_threads();
1056 	CU_ASSERT(g_bserrno == 0);
1057 
1058 	/* Create clone from read only blob */
1059 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1060 	poll_threads();
1061 	CU_ASSERT(g_bserrno == 0);
1062 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1063 	cloneid = g_blobid;
1064 
1065 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1066 	poll_threads();
1067 	CU_ASSERT(g_bserrno == 0);
1068 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1069 	clone = g_blob;
1070 	CU_ASSERT(clone->data_ro == false);
1071 	CU_ASSERT(clone->md_ro == false);
1072 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1073 
1074 	ut_blob_close_and_delete(bs, clone);
1075 	ut_blob_close_and_delete(bs, blob);
1076 }
1077 
1078 static void
1079 _blob_inflate(bool decouple_parent)
1080 {
1081 	struct spdk_blob_store *bs = g_bs;
1082 	struct spdk_blob_opts opts;
1083 	struct spdk_blob *blob, *snapshot;
1084 	spdk_blob_id blobid, snapshotid;
1085 	struct spdk_io_channel *channel;
1086 	uint64_t free_clusters;
1087 
1088 	channel = spdk_bs_alloc_io_channel(bs);
1089 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1090 
1091 	/* Create blob with 10 clusters */
1092 
1093 	ut_spdk_blob_opts_init(&opts);
1094 	opts.num_clusters = 10;
1095 	opts.thin_provision = true;
1096 
1097 	blob = ut_blob_create_and_open(bs, &opts);
1098 	blobid = spdk_blob_get_id(blob);
1099 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1100 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1101 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1102 
1103 	/* 1) Blob with no parent */
1104 	if (decouple_parent) {
1105 		/* Decouple parent of blob with no parent (should fail) */
1106 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1107 		poll_threads();
1108 		CU_ASSERT(g_bserrno != 0);
1109 	} else {
1110 		/* Inflate of thin blob with no parent should made it thick */
1111 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1112 		poll_threads();
1113 		CU_ASSERT(g_bserrno == 0);
1114 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1115 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1116 	}
1117 
1118 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1119 	poll_threads();
1120 	CU_ASSERT(g_bserrno == 0);
1121 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1122 	snapshotid = g_blobid;
1123 
1124 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1125 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1126 
1127 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1128 	poll_threads();
1129 	CU_ASSERT(g_bserrno == 0);
1130 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1131 	snapshot = g_blob;
1132 	CU_ASSERT(snapshot->data_ro == true);
1133 	CU_ASSERT(snapshot->md_ro == true);
1134 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1135 
1136 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1137 	poll_threads();
1138 	CU_ASSERT(g_bserrno == 0);
1139 
1140 	free_clusters = spdk_bs_free_cluster_count(bs);
1141 
1142 	/* 2) Blob with parent */
1143 	if (!decouple_parent) {
1144 		/* Do full blob inflation */
1145 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1146 		poll_threads();
1147 		CU_ASSERT(g_bserrno == 0);
1148 		/* all 10 clusters should be allocated */
1149 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1150 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1151 	} else {
1152 		/* Decouple parent of blob */
1153 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1154 		poll_threads();
1155 		CU_ASSERT(g_bserrno == 0);
1156 		/* when only parent is removed, none of the clusters should be allocated */
1157 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1158 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1159 	}
1160 
1161 	/* Now, it should be possible to delete snapshot */
1162 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1163 	poll_threads();
1164 	CU_ASSERT(g_bserrno == 0);
1165 
1166 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1167 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1168 
1169 	spdk_bs_free_io_channel(channel);
1170 	poll_threads();
1171 
1172 	ut_blob_close_and_delete(bs, blob);
1173 }
1174 
1175 static void
1176 blob_inflate(void)
1177 {
1178 	_blob_inflate(false);
1179 	_blob_inflate(true);
1180 }
1181 
1182 static void
1183 blob_delete(void)
1184 {
1185 	struct spdk_blob_store *bs = g_bs;
1186 	struct spdk_blob_opts blob_opts;
1187 	spdk_blob_id blobid;
1188 
1189 	/* Create a blob and then delete it. */
1190 	ut_spdk_blob_opts_init(&blob_opts);
1191 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1192 	poll_threads();
1193 	CU_ASSERT(g_bserrno == 0);
1194 	CU_ASSERT(g_blobid > 0);
1195 	blobid = g_blobid;
1196 
1197 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1198 	poll_threads();
1199 	CU_ASSERT(g_bserrno == 0);
1200 
1201 	/* Try to open the blob */
1202 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1203 	poll_threads();
1204 	CU_ASSERT(g_bserrno == -ENOENT);
1205 }
1206 
1207 static void
1208 blob_resize_test(void)
1209 {
1210 	struct spdk_blob_store *bs = g_bs;
1211 	struct spdk_blob *blob;
1212 	uint64_t free_clusters;
1213 
1214 	free_clusters = spdk_bs_free_cluster_count(bs);
1215 
1216 	blob = ut_blob_create_and_open(bs, NULL);
1217 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1218 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1219 
1220 	/* Confirm that resize fails if blob is marked read-only. */
1221 	blob->md_ro = true;
1222 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1223 	poll_threads();
1224 	CU_ASSERT(g_bserrno == -EPERM);
1225 	blob->md_ro = false;
1226 
1227 	/* The blob started at 0 clusters. Resize it to be 5. */
1228 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1229 	poll_threads();
1230 	CU_ASSERT(g_bserrno == 0);
1231 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1232 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 5);
1233 
1234 	/* Shrink the blob to 3 clusters. This will not actually release
1235 	 * the old clusters until the blob is synced.
1236 	 */
1237 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1238 	poll_threads();
1239 	CU_ASSERT(g_bserrno == 0);
1240 	/* Verify there are still 5 clusters in use */
1241 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1242 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 3);
1243 
1244 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1245 	poll_threads();
1246 	CU_ASSERT(g_bserrno == 0);
1247 	/* Now there are only 3 clusters in use */
1248 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1249 
1250 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1251 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1252 	poll_threads();
1253 	CU_ASSERT(g_bserrno == 0);
1254 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1255 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1256 
1257 	/* Try to resize the blob to size larger than blobstore. */
1258 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1259 	poll_threads();
1260 	CU_ASSERT(g_bserrno == -ENOSPC);
1261 
1262 	ut_blob_close_and_delete(bs, blob);
1263 }
1264 
1265 static void
1266 blob_resize_thin_test(void)
1267 {
1268 	struct spdk_blob_store *bs = g_bs;
1269 	struct spdk_blob *blob;
1270 	struct spdk_blob_opts opts;
1271 	struct spdk_io_channel *blob_ch;
1272 	uint64_t free_clusters;
1273 	uint64_t io_units_per_cluster;
1274 	uint64_t offset;
1275 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
1276 
1277 	free_clusters = spdk_bs_free_cluster_count(bs);
1278 
1279 	blob_ch = spdk_bs_alloc_io_channel(bs);
1280 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
1281 
1282 	/* Create blob with thin provisioning enabled */
1283 	ut_spdk_blob_opts_init(&opts);
1284 	opts.thin_provision = true;
1285 	opts.num_clusters = 0;
1286 
1287 	blob = ut_blob_create_and_open(bs, &opts);
1288 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1289 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1290 	io_units_per_cluster = bs_io_units_per_cluster(blob);
1291 
1292 	/* The blob started at 0 clusters. Resize it to be 6. */
1293 	spdk_blob_resize(blob, 6, blob_op_complete, NULL);
1294 	poll_threads();
1295 	CU_ASSERT(g_bserrno == 0);
1296 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1297 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1298 
1299 	/* Write on cluster 0,2,4 and 5 of blob */
1300 	for (offset = 0; offset < io_units_per_cluster; offset++) {
1301 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1302 		poll_threads();
1303 		CU_ASSERT(g_bserrno == 0);
1304 	}
1305 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
1306 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1307 		poll_threads();
1308 		CU_ASSERT(g_bserrno == 0);
1309 	}
1310 	for (offset = 4 * io_units_per_cluster; offset < 5 * io_units_per_cluster; offset++) {
1311 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1312 		poll_threads();
1313 		CU_ASSERT(g_bserrno == 0);
1314 	}
1315 	for (offset = 5 * io_units_per_cluster; offset < 6 * io_units_per_cluster; offset++) {
1316 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1317 		poll_threads();
1318 		CU_ASSERT(g_bserrno == 0);
1319 	}
1320 
1321 	/* Check allocated clusters after write */
1322 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1323 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 4);
1324 
1325 	/* Shrink the blob to 2 clusters. This will not actually release
1326 	 * the old clusters until the blob is synced.
1327 	 */
1328 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1329 	poll_threads();
1330 	CU_ASSERT(g_bserrno == 0);
1331 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1332 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1333 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1334 
1335 	/* Sync blob: 4 clusters were truncated but only 3 of them was allocated */
1336 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1337 	poll_threads();
1338 	CU_ASSERT(g_bserrno == 0);
1339 	CU_ASSERT((free_clusters - 1) == spdk_bs_free_cluster_count(bs));
1340 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1341 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1342 
1343 	spdk_bs_free_io_channel(blob_ch);
1344 	ut_blob_close_and_delete(bs, blob);
1345 }
1346 
1347 static void
1348 blob_read_only(void)
1349 {
1350 	struct spdk_blob_store *bs;
1351 	struct spdk_bs_dev *dev;
1352 	struct spdk_blob *blob;
1353 	struct spdk_bs_opts opts;
1354 	spdk_blob_id blobid;
1355 	int rc;
1356 
1357 	dev = init_dev();
1358 	spdk_bs_opts_init(&opts, sizeof(opts));
1359 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1360 
1361 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1362 	poll_threads();
1363 	CU_ASSERT(g_bserrno == 0);
1364 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1365 	bs = g_bs;
1366 
1367 	blob = ut_blob_create_and_open(bs, NULL);
1368 	blobid = spdk_blob_get_id(blob);
1369 
1370 	rc = spdk_blob_set_read_only(blob);
1371 	CU_ASSERT(rc == 0);
1372 
1373 	CU_ASSERT(blob->data_ro == false);
1374 	CU_ASSERT(blob->md_ro == false);
1375 
1376 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1377 	poll_threads();
1378 
1379 	CU_ASSERT(blob->data_ro == true);
1380 	CU_ASSERT(blob->md_ro == true);
1381 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1382 
1383 	spdk_blob_close(blob, blob_op_complete, NULL);
1384 	poll_threads();
1385 	CU_ASSERT(g_bserrno == 0);
1386 
1387 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1388 	poll_threads();
1389 	CU_ASSERT(g_bserrno == 0);
1390 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1391 	blob = g_blob;
1392 
1393 	CU_ASSERT(blob->data_ro == true);
1394 	CU_ASSERT(blob->md_ro == true);
1395 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1396 
1397 	spdk_blob_close(blob, blob_op_complete, NULL);
1398 	poll_threads();
1399 	CU_ASSERT(g_bserrno == 0);
1400 
1401 	ut_bs_reload(&bs, &opts);
1402 
1403 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1404 	poll_threads();
1405 	CU_ASSERT(g_bserrno == 0);
1406 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1407 	blob = g_blob;
1408 
1409 	CU_ASSERT(blob->data_ro == true);
1410 	CU_ASSERT(blob->md_ro == true);
1411 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1412 
1413 	ut_blob_close_and_delete(bs, blob);
1414 
1415 	spdk_bs_unload(bs, bs_op_complete, NULL);
1416 	poll_threads();
1417 	CU_ASSERT(g_bserrno == 0);
1418 }
1419 
1420 static void
1421 channel_ops(void)
1422 {
1423 	struct spdk_blob_store *bs = g_bs;
1424 	struct spdk_io_channel *channel;
1425 
1426 	channel = spdk_bs_alloc_io_channel(bs);
1427 	CU_ASSERT(channel != NULL);
1428 
1429 	spdk_bs_free_io_channel(channel);
1430 	poll_threads();
1431 }
1432 
1433 static void
1434 blob_write(void)
1435 {
1436 	struct spdk_blob_store *bs = g_bs;
1437 	struct spdk_blob *blob = g_blob;
1438 	struct spdk_io_channel *channel;
1439 	uint64_t io_units_per_cluster;
1440 	uint8_t payload[10 * BLOCKLEN];
1441 
1442 	io_units_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_io_unit_size(bs);
1443 
1444 	channel = spdk_bs_alloc_io_channel(bs);
1445 	CU_ASSERT(channel != NULL);
1446 
1447 	/* Write to a blob with 0 size */
1448 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1449 	poll_threads();
1450 	CU_ASSERT(g_bserrno == -EINVAL);
1451 
1452 	/* Resize the blob */
1453 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1454 	poll_threads();
1455 	CU_ASSERT(g_bserrno == 0);
1456 
1457 	/* Confirm that write fails if blob is marked read-only. */
1458 	blob->data_ro = true;
1459 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1460 	poll_threads();
1461 	CU_ASSERT(g_bserrno == -EPERM);
1462 	blob->data_ro = false;
1463 
1464 	/* Write to the blob */
1465 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1466 	poll_threads();
1467 	CU_ASSERT(g_bserrno == 0);
1468 
1469 	/* Write starting beyond the end */
1470 	spdk_blob_io_write(blob, channel, payload, 5 * io_units_per_cluster, 1, blob_op_complete,
1471 			   NULL);
1472 	poll_threads();
1473 	CU_ASSERT(g_bserrno == -EINVAL);
1474 
1475 	/* Write starting at a valid location but going off the end */
1476 	spdk_blob_io_write(blob, channel, payload, 4 * io_units_per_cluster, io_units_per_cluster + 1,
1477 			   blob_op_complete, NULL);
1478 	poll_threads();
1479 	CU_ASSERT(g_bserrno == -EINVAL);
1480 
1481 	spdk_bs_free_io_channel(channel);
1482 	poll_threads();
1483 }
1484 
1485 static void
1486 blob_read(void)
1487 {
1488 	struct spdk_blob_store *bs = g_bs;
1489 	struct spdk_blob *blob = g_blob;
1490 	struct spdk_io_channel *channel;
1491 	uint64_t io_units_per_cluster;
1492 	uint8_t payload[10 * BLOCKLEN];
1493 
1494 	io_units_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_io_unit_size(bs);
1495 
1496 	channel = spdk_bs_alloc_io_channel(bs);
1497 	CU_ASSERT(channel != NULL);
1498 
1499 	/* Read from a blob with 0 size */
1500 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1501 	poll_threads();
1502 	CU_ASSERT(g_bserrno == -EINVAL);
1503 
1504 	/* Resize the blob */
1505 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1506 	poll_threads();
1507 	CU_ASSERT(g_bserrno == 0);
1508 
1509 	/* Confirm that read passes if blob is marked read-only. */
1510 	blob->data_ro = true;
1511 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1512 	poll_threads();
1513 	CU_ASSERT(g_bserrno == 0);
1514 	blob->data_ro = false;
1515 
1516 	/* Read from the blob */
1517 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1518 	poll_threads();
1519 	CU_ASSERT(g_bserrno == 0);
1520 
1521 	/* Read starting beyond the end */
1522 	spdk_blob_io_read(blob, channel, payload, 5 * io_units_per_cluster, 1, blob_op_complete,
1523 			  NULL);
1524 	poll_threads();
1525 	CU_ASSERT(g_bserrno == -EINVAL);
1526 
1527 	/* Read starting at a valid location but going off the end */
1528 	spdk_blob_io_read(blob, channel, payload, 4 * io_units_per_cluster, io_units_per_cluster + 1,
1529 			  blob_op_complete, NULL);
1530 	poll_threads();
1531 	CU_ASSERT(g_bserrno == -EINVAL);
1532 
1533 	spdk_bs_free_io_channel(channel);
1534 	poll_threads();
1535 }
1536 
1537 static void
1538 blob_rw_verify(void)
1539 {
1540 	struct spdk_blob_store *bs = g_bs;
1541 	struct spdk_blob *blob = g_blob;
1542 	struct spdk_io_channel *channel;
1543 	uint8_t payload_read[10 * BLOCKLEN];
1544 	uint8_t payload_write[10 * BLOCKLEN];
1545 
1546 	channel = spdk_bs_alloc_io_channel(bs);
1547 	CU_ASSERT(channel != NULL);
1548 
1549 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1550 	poll_threads();
1551 	CU_ASSERT(g_bserrno == 0);
1552 
1553 	memset(payload_write, 0xE5, sizeof(payload_write));
1554 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1555 	poll_threads();
1556 	CU_ASSERT(g_bserrno == 0);
1557 
1558 	memset(payload_read, 0x00, sizeof(payload_read));
1559 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1560 	poll_threads();
1561 	CU_ASSERT(g_bserrno == 0);
1562 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * BLOCKLEN) == 0);
1563 
1564 	spdk_bs_free_io_channel(channel);
1565 	poll_threads();
1566 }
1567 
1568 static void
1569 blob_rw_verify_iov(void)
1570 {
1571 	struct spdk_blob_store *bs = g_bs;
1572 	struct spdk_blob *blob;
1573 	struct spdk_io_channel *channel;
1574 	uint8_t payload_read[10 * BLOCKLEN];
1575 	uint8_t payload_write[10 * BLOCKLEN];
1576 	struct iovec iov_read[3];
1577 	struct iovec iov_write[3];
1578 	void *buf;
1579 	uint32_t first_data_cluster = FIRST_DATA_CLUSTER(bs);
1580 
1581 	channel = spdk_bs_alloc_io_channel(bs);
1582 	CU_ASSERT(channel != NULL);
1583 
1584 	blob = ut_blob_create_and_open(bs, NULL);
1585 
1586 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1587 	poll_threads();
1588 	CU_ASSERT(g_bserrno == 0);
1589 
1590 	/*
1591 	 * Manually adjust the offset of the blob's second cluster.  This allows
1592 	 *  us to make sure that the readv/write code correctly accounts for I/O
1593 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1594 	 *  clusters are where we expect before modifying the second cluster.
1595 	 */
1596 	CU_ASSERT(blob->active.clusters[0] == first_data_cluster * 256);
1597 	CU_ASSERT(blob->active.clusters[1] == (first_data_cluster + 1) * 256);
1598 	blob->active.clusters[1] = (first_data_cluster + 2) * 256;
1599 
1600 	memset(payload_write, 0xE5, sizeof(payload_write));
1601 	iov_write[0].iov_base = payload_write;
1602 	iov_write[0].iov_len = 1 * BLOCKLEN;
1603 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1604 	iov_write[1].iov_len = 5 * BLOCKLEN;
1605 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1606 	iov_write[2].iov_len = 4 * BLOCKLEN;
1607 	/*
1608 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1609 	 *  will get written to the first cluster, the last 4 to the second cluster.
1610 	 */
1611 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1612 	poll_threads();
1613 	CU_ASSERT(g_bserrno == 0);
1614 
1615 	memset(payload_read, 0xAA, sizeof(payload_read));
1616 	iov_read[0].iov_base = payload_read;
1617 	iov_read[0].iov_len = 3 * BLOCKLEN;
1618 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
1619 	iov_read[1].iov_len = 4 * BLOCKLEN;
1620 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
1621 	iov_read[2].iov_len = 3 * BLOCKLEN;
1622 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1623 	poll_threads();
1624 	CU_ASSERT(g_bserrno == 0);
1625 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
1626 
1627 	buf = calloc(1, 256 * BLOCKLEN);
1628 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1629 	/* Check that cluster 2 on "disk" was not modified. */
1630 	CU_ASSERT(memcmp(buf, &g_dev_buffer[(first_data_cluster + 1) * 256 * BLOCKLEN],
1631 			 256 * BLOCKLEN) == 0);
1632 	free(buf);
1633 
1634 	spdk_blob_close(blob, blob_op_complete, NULL);
1635 	poll_threads();
1636 	CU_ASSERT(g_bserrno == 0);
1637 
1638 	spdk_bs_free_io_channel(channel);
1639 	poll_threads();
1640 }
1641 
1642 static uint32_t
1643 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1644 {
1645 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1646 	struct spdk_bs_request_set *set;
1647 	uint32_t count = 0;
1648 
1649 	TAILQ_FOREACH(set, &channel->reqs, link) {
1650 		count++;
1651 	}
1652 
1653 	return count;
1654 }
1655 
1656 static void
1657 blob_rw_verify_iov_nomem(void)
1658 {
1659 	struct spdk_blob_store *bs = g_bs;
1660 	struct spdk_blob *blob = g_blob;
1661 	struct spdk_io_channel *channel;
1662 	uint8_t payload_write[10 * BLOCKLEN];
1663 	struct iovec iov_write[3];
1664 	uint32_t req_count;
1665 
1666 	channel = spdk_bs_alloc_io_channel(bs);
1667 	CU_ASSERT(channel != NULL);
1668 
1669 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1670 	poll_threads();
1671 	CU_ASSERT(g_bserrno == 0);
1672 
1673 	/*
1674 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1675 	 *  will get written to the first cluster, the last 4 to the second cluster.
1676 	 */
1677 	iov_write[0].iov_base = payload_write;
1678 	iov_write[0].iov_len = 1 * BLOCKLEN;
1679 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1680 	iov_write[1].iov_len = 5 * BLOCKLEN;
1681 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1682 	iov_write[2].iov_len = 4 * BLOCKLEN;
1683 	MOCK_SET(calloc, NULL);
1684 	req_count = bs_channel_get_req_count(channel);
1685 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1686 	poll_threads();
1687 	CU_ASSERT(g_bserrno == -ENOMEM);
1688 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1689 	MOCK_CLEAR(calloc);
1690 
1691 	spdk_bs_free_io_channel(channel);
1692 	poll_threads();
1693 }
1694 
1695 static void
1696 blob_rw_iov_read_only(void)
1697 {
1698 	struct spdk_blob_store *bs = g_bs;
1699 	struct spdk_blob *blob = g_blob;
1700 	struct spdk_io_channel *channel;
1701 	uint8_t payload_read[BLOCKLEN];
1702 	uint8_t payload_write[BLOCKLEN];
1703 	struct iovec iov_read;
1704 	struct iovec iov_write;
1705 
1706 	channel = spdk_bs_alloc_io_channel(bs);
1707 	CU_ASSERT(channel != NULL);
1708 
1709 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1710 	poll_threads();
1711 	CU_ASSERT(g_bserrno == 0);
1712 
1713 	/* Verify that writev failed if read_only flag is set. */
1714 	blob->data_ro = true;
1715 	iov_write.iov_base = payload_write;
1716 	iov_write.iov_len = sizeof(payload_write);
1717 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1718 	poll_threads();
1719 	CU_ASSERT(g_bserrno == -EPERM);
1720 
1721 	/* Verify that reads pass if data_ro flag is set. */
1722 	iov_read.iov_base = payload_read;
1723 	iov_read.iov_len = sizeof(payload_read);
1724 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1725 	poll_threads();
1726 	CU_ASSERT(g_bserrno == 0);
1727 
1728 	spdk_bs_free_io_channel(channel);
1729 	poll_threads();
1730 }
1731 
1732 static void
1733 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1734 		       uint8_t *payload, uint64_t offset, uint64_t length,
1735 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1736 {
1737 	uint64_t i;
1738 	uint8_t *buf;
1739 	uint64_t io_unit_size = spdk_bs_get_io_unit_size(blob->bs);
1740 
1741 	/* To be sure that operation is NOT split, read one io_unit at the time */
1742 	buf = payload;
1743 	for (i = 0; i < length; i++) {
1744 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1745 		poll_threads();
1746 		if (g_bserrno != 0) {
1747 			/* Pass the error code up */
1748 			break;
1749 		}
1750 		buf += io_unit_size;
1751 	}
1752 
1753 	cb_fn(cb_arg, g_bserrno);
1754 }
1755 
1756 static void
1757 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1758 			uint8_t *payload, uint64_t offset, uint64_t length,
1759 			spdk_blob_op_complete cb_fn, void *cb_arg)
1760 {
1761 	uint64_t i;
1762 	uint8_t *buf;
1763 	uint64_t io_unit_size = spdk_bs_get_io_unit_size(blob->bs);
1764 
1765 	/* To be sure that operation is NOT split, write one io_unit at the time */
1766 	buf = payload;
1767 	for (i = 0; i < length; i++) {
1768 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1769 		poll_threads();
1770 		if (g_bserrno != 0) {
1771 			/* Pass the error code up */
1772 			break;
1773 		}
1774 		buf += io_unit_size;
1775 	}
1776 
1777 	cb_fn(cb_arg, g_bserrno);
1778 }
1779 
1780 static void
1781 blob_operation_split_rw(void)
1782 {
1783 	struct spdk_blob_store *bs = g_bs;
1784 	struct spdk_blob *blob;
1785 	struct spdk_io_channel *channel;
1786 	struct spdk_blob_opts opts;
1787 	uint64_t cluster_size;
1788 
1789 	uint64_t payload_size;
1790 	uint8_t *payload_read;
1791 	uint8_t *payload_write;
1792 	uint8_t *payload_pattern;
1793 
1794 	uint64_t io_unit_size;
1795 	uint64_t io_units_per_cluster;
1796 	uint64_t io_units_per_payload;
1797 
1798 	uint64_t i;
1799 
1800 	cluster_size = spdk_bs_get_cluster_size(bs);
1801 	io_unit_size = spdk_bs_get_io_unit_size(bs);
1802 	io_units_per_cluster = cluster_size / io_unit_size;
1803 	io_units_per_payload = io_units_per_cluster * 5;
1804 	payload_size = cluster_size * 5;
1805 
1806 	payload_read = malloc(payload_size);
1807 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1808 
1809 	payload_write = malloc(payload_size);
1810 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1811 
1812 	payload_pattern = malloc(payload_size);
1813 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1814 
1815 	/* Prepare random pattern to write */
1816 	memset(payload_pattern, 0xFF, payload_size);
1817 	for (i = 0; i < io_units_per_payload; i++) {
1818 		*((uint64_t *)(payload_pattern + io_unit_size * i)) = (i + 1);
1819 	}
1820 
1821 	channel = spdk_bs_alloc_io_channel(bs);
1822 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1823 
1824 	/* Create blob */
1825 	ut_spdk_blob_opts_init(&opts);
1826 	opts.thin_provision = false;
1827 	opts.num_clusters = 5;
1828 
1829 	blob = ut_blob_create_and_open(bs, &opts);
1830 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1831 
1832 	/* Initial read should return zeroed payload */
1833 	memset(payload_read, 0xFF, payload_size);
1834 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1835 	poll_threads();
1836 	CU_ASSERT(g_bserrno == 0);
1837 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1838 
1839 	/* Fill whole blob except last page */
1840 	spdk_blob_io_write(blob, channel, payload_pattern, 0, io_units_per_payload - 1,
1841 			   blob_op_complete, NULL);
1842 	poll_threads();
1843 	CU_ASSERT(g_bserrno == 0);
1844 
1845 	/* Write last page with a pattern */
1846 	spdk_blob_io_write(blob, channel, payload_pattern, io_units_per_payload - 1, 1,
1847 			   blob_op_complete, NULL);
1848 	poll_threads();
1849 	CU_ASSERT(g_bserrno == 0);
1850 
1851 	/* Read whole blob and check consistency */
1852 	memset(payload_read, 0xFF, payload_size);
1853 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1854 	poll_threads();
1855 	CU_ASSERT(g_bserrno == 0);
1856 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - io_unit_size) == 0);
1857 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - io_unit_size, io_unit_size) == 0);
1858 
1859 	/* Fill whole blob except first page */
1860 	spdk_blob_io_write(blob, channel, payload_pattern, 1, io_units_per_payload - 1,
1861 			   blob_op_complete, NULL);
1862 	poll_threads();
1863 	CU_ASSERT(g_bserrno == 0);
1864 
1865 	/* Write first page with a pattern */
1866 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1867 			   blob_op_complete, NULL);
1868 	poll_threads();
1869 	CU_ASSERT(g_bserrno == 0);
1870 
1871 	/* Read whole blob and check consistency */
1872 	memset(payload_read, 0xFF, payload_size);
1873 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1874 	poll_threads();
1875 	CU_ASSERT(g_bserrno == 0);
1876 	CU_ASSERT(memcmp(payload_pattern, payload_read + io_unit_size, payload_size - io_unit_size) == 0);
1877 	CU_ASSERT(memcmp(payload_pattern, payload_read, io_unit_size) == 0);
1878 
1879 
1880 	/* Fill whole blob with a pattern (5 clusters) */
1881 
1882 	/* 1. Read test. */
1883 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, io_units_per_payload,
1884 				blob_op_complete, NULL);
1885 	poll_threads();
1886 	CU_ASSERT(g_bserrno == 0);
1887 
1888 	memset(payload_read, 0xFF, payload_size);
1889 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1890 	poll_threads();
1891 	poll_threads();
1892 	CU_ASSERT(g_bserrno == 0);
1893 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1894 
1895 	/* 2. Write test. */
1896 	spdk_blob_io_write(blob, channel, payload_pattern, 0, io_units_per_payload,
1897 			   blob_op_complete, NULL);
1898 	poll_threads();
1899 	CU_ASSERT(g_bserrno == 0);
1900 
1901 	memset(payload_read, 0xFF, payload_size);
1902 	_blob_io_read_no_split(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete,
1903 			       NULL);
1904 	poll_threads();
1905 	CU_ASSERT(g_bserrno == 0);
1906 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1907 
1908 	spdk_bs_free_io_channel(channel);
1909 	poll_threads();
1910 
1911 	g_blob = NULL;
1912 	g_blobid = 0;
1913 
1914 	free(payload_read);
1915 	free(payload_write);
1916 	free(payload_pattern);
1917 
1918 	ut_blob_close_and_delete(bs, blob);
1919 }
1920 
1921 static void
1922 blob_operation_split_rw_iov(void)
1923 {
1924 	struct spdk_blob_store *bs = g_bs;
1925 	struct spdk_blob *blob;
1926 	struct spdk_io_channel *channel;
1927 	struct spdk_blob_opts opts;
1928 	uint64_t cluster_size;
1929 
1930 	uint64_t payload_size;
1931 	uint8_t *payload_read;
1932 	uint8_t *payload_write;
1933 	uint8_t *payload_pattern;
1934 
1935 	uint64_t io_unit_size;
1936 	uint64_t io_units_per_cluster;
1937 	uint64_t io_units_per_payload;
1938 
1939 	struct iovec iov_read[2];
1940 	struct iovec iov_write[2];
1941 
1942 	uint64_t i, j;
1943 
1944 	cluster_size = spdk_bs_get_cluster_size(bs);
1945 	io_unit_size = spdk_bs_get_io_unit_size(bs);
1946 	io_units_per_cluster = cluster_size / io_unit_size;
1947 	io_units_per_payload = io_units_per_cluster * 5;
1948 	payload_size = cluster_size * 5;
1949 
1950 	payload_read = malloc(payload_size);
1951 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1952 
1953 	payload_write = malloc(payload_size);
1954 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1955 
1956 	payload_pattern = malloc(payload_size);
1957 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1958 
1959 	/* Prepare random pattern to write */
1960 	for (i = 0; i < io_units_per_payload; i++) {
1961 		for (j = 0; j < io_unit_size / sizeof(uint64_t); j++) {
1962 			uint64_t *tmp;
1963 
1964 			tmp = (uint64_t *)payload_pattern;
1965 			tmp += ((io_unit_size * i) / sizeof(uint64_t)) + j;
1966 			*tmp = i + 1;
1967 		}
1968 	}
1969 
1970 	channel = spdk_bs_alloc_io_channel(bs);
1971 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1972 
1973 	/* Create blob */
1974 	ut_spdk_blob_opts_init(&opts);
1975 	opts.thin_provision = false;
1976 	opts.num_clusters = 5;
1977 
1978 	blob = ut_blob_create_and_open(bs, &opts);
1979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1980 
1981 	/* Initial read should return zeroes payload */
1982 	memset(payload_read, 0xFF, payload_size);
1983 	iov_read[0].iov_base = payload_read;
1984 	iov_read[0].iov_len = cluster_size * 3;
1985 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1986 	iov_read[1].iov_len = cluster_size * 2;
1987 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
1988 	poll_threads();
1989 	CU_ASSERT(g_bserrno == 0);
1990 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1991 
1992 	/* First of iovs fills whole blob except last io_unit and second of iovs writes last io_unit
1993 	 *  with a pattern. */
1994 	iov_write[0].iov_base = payload_pattern;
1995 	iov_write[0].iov_len = payload_size - io_unit_size;
1996 	iov_write[1].iov_base = payload_pattern;
1997 	iov_write[1].iov_len = io_unit_size;
1998 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
1999 	poll_threads();
2000 	CU_ASSERT(g_bserrno == 0);
2001 
2002 	/* Read whole blob and check consistency */
2003 	memset(payload_read, 0xFF, payload_size);
2004 	iov_read[0].iov_base = payload_read;
2005 	iov_read[0].iov_len = cluster_size * 2;
2006 	iov_read[1].iov_base = payload_read + cluster_size * 2;
2007 	iov_read[1].iov_len = cluster_size * 3;
2008 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2009 	poll_threads();
2010 	CU_ASSERT(g_bserrno == 0);
2011 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - io_unit_size) == 0);
2012 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - io_unit_size, io_unit_size) == 0);
2013 
2014 	/* First of iovs fills only first io_unit and second of iovs writes whole blob except
2015 	 *  first io_unit with a pattern. */
2016 	iov_write[0].iov_base = payload_pattern;
2017 	iov_write[0].iov_len = io_unit_size;
2018 	iov_write[1].iov_base = payload_pattern;
2019 	iov_write[1].iov_len = payload_size - io_unit_size;
2020 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2021 	poll_threads();
2022 	CU_ASSERT(g_bserrno == 0);
2023 
2024 	/* Read whole blob and check consistency */
2025 	memset(payload_read, 0xFF, payload_size);
2026 	iov_read[0].iov_base = payload_read;
2027 	iov_read[0].iov_len = cluster_size * 4;
2028 	iov_read[1].iov_base = payload_read + cluster_size * 4;
2029 	iov_read[1].iov_len = cluster_size;
2030 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2031 	poll_threads();
2032 	CU_ASSERT(g_bserrno == 0);
2033 	CU_ASSERT(memcmp(payload_pattern, payload_read + io_unit_size, payload_size - io_unit_size) == 0);
2034 	CU_ASSERT(memcmp(payload_pattern, payload_read, io_unit_size) == 0);
2035 
2036 
2037 	/* Fill whole blob with a pattern (5 clusters) */
2038 
2039 	/* 1. Read test. */
2040 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, io_units_per_payload,
2041 				blob_op_complete, NULL);
2042 	poll_threads();
2043 	CU_ASSERT(g_bserrno == 0);
2044 
2045 	memset(payload_read, 0xFF, payload_size);
2046 	iov_read[0].iov_base = payload_read;
2047 	iov_read[0].iov_len = cluster_size;
2048 	iov_read[1].iov_base = payload_read + cluster_size;
2049 	iov_read[1].iov_len = cluster_size * 4;
2050 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2051 	poll_threads();
2052 	CU_ASSERT(g_bserrno == 0);
2053 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2054 
2055 	/* 2. Write test. */
2056 	iov_write[0].iov_base = payload_read;
2057 	iov_write[0].iov_len = cluster_size * 2;
2058 	iov_write[1].iov_base = payload_read + cluster_size * 2;
2059 	iov_write[1].iov_len = cluster_size * 3;
2060 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2061 	poll_threads();
2062 	CU_ASSERT(g_bserrno == 0);
2063 
2064 	memset(payload_read, 0xFF, payload_size);
2065 	_blob_io_read_no_split(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete,
2066 			       NULL);
2067 	poll_threads();
2068 	CU_ASSERT(g_bserrno == 0);
2069 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2070 
2071 	spdk_bs_free_io_channel(channel);
2072 	poll_threads();
2073 
2074 	g_blob = NULL;
2075 	g_blobid = 0;
2076 
2077 	free(payload_read);
2078 	free(payload_write);
2079 	free(payload_pattern);
2080 
2081 	ut_blob_close_and_delete(bs, blob);
2082 }
2083 
2084 static void
2085 blob_unmap(void)
2086 {
2087 	struct spdk_blob_store *bs = g_bs;
2088 	struct spdk_blob *blob;
2089 	struct spdk_io_channel *channel;
2090 	struct spdk_blob_opts opts;
2091 	uint8_t payload[BLOCKLEN];
2092 	uint32_t first_data_cluster = FIRST_DATA_CLUSTER(bs);
2093 	int i;
2094 
2095 	channel = spdk_bs_alloc_io_channel(bs);
2096 	CU_ASSERT(channel != NULL);
2097 
2098 	ut_spdk_blob_opts_init(&opts);
2099 	opts.num_clusters = 10;
2100 
2101 	blob = ut_blob_create_and_open(bs, &opts);
2102 
2103 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2104 	poll_threads();
2105 	CU_ASSERT(g_bserrno == 0);
2106 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2107 
2108 	memset(payload, 0, sizeof(payload));
2109 	payload[0] = 0xFF;
2110 
2111 	/*
2112 	 * Set first byte of every cluster to 0xFF.
2113 	 */
2114 	for (i = 0; i < 10; i++) {
2115 		g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
2116 	}
2117 
2118 	/* Confirm writes */
2119 	for (i = 0; i < 10; i++) {
2120 		payload[0] = 0;
2121 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / BLOCKLEN, 1,
2122 				  blob_op_complete, NULL);
2123 		poll_threads();
2124 		CU_ASSERT(g_bserrno == 0);
2125 		CU_ASSERT(payload[0] == 0xFF);
2126 	}
2127 
2128 	/* Mark some clusters as unallocated */
2129 	blob->active.clusters[1] = 0;
2130 	blob->active.clusters[2] = 0;
2131 	blob->active.clusters[3] = 0;
2132 	blob->active.clusters[6] = 0;
2133 	blob->active.clusters[8] = 0;
2134 	blob->active.num_allocated_clusters -= 5;
2135 
2136 	/* Unmap clusters by resizing to 0 */
2137 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2138 	poll_threads();
2139 	CU_ASSERT(g_bserrno == 0);
2140 
2141 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2142 	poll_threads();
2143 	CU_ASSERT(g_bserrno == 0);
2144 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
2145 
2146 	/* Confirm that only 'allocated' clusters were unmapped */
2147 	for (i = 0; i < 10; i++) {
2148 		switch (i) {
2149 		case 1:
2150 		case 2:
2151 		case 3:
2152 		case 6:
2153 		case 8:
2154 			CU_ASSERT(g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2155 			break;
2156 		default:
2157 			CU_ASSERT(g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2158 			break;
2159 		}
2160 	}
2161 
2162 	spdk_bs_free_io_channel(channel);
2163 	poll_threads();
2164 
2165 	ut_blob_close_and_delete(bs, blob);
2166 }
2167 
2168 static void
2169 blob_iter(void)
2170 {
2171 	struct spdk_blob_store *bs = g_bs;
2172 	struct spdk_blob *blob;
2173 	spdk_blob_id blobid;
2174 	struct spdk_blob_opts blob_opts;
2175 
2176 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2177 	poll_threads();
2178 	CU_ASSERT(g_blob == NULL);
2179 	CU_ASSERT(g_bserrno == -ENOENT);
2180 
2181 	ut_spdk_blob_opts_init(&blob_opts);
2182 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2183 	poll_threads();
2184 	CU_ASSERT(g_bserrno == 0);
2185 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2186 	blobid = g_blobid;
2187 
2188 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2189 	poll_threads();
2190 	CU_ASSERT(g_blob != NULL);
2191 	CU_ASSERT(g_bserrno == 0);
2192 	blob = g_blob;
2193 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2194 
2195 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2196 	poll_threads();
2197 	CU_ASSERT(g_blob == NULL);
2198 	CU_ASSERT(g_bserrno == -ENOENT);
2199 }
2200 
2201 static void
2202 blob_xattr(void)
2203 {
2204 	struct spdk_blob_store *bs = g_bs;
2205 	struct spdk_blob *blob = g_blob;
2206 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2207 	uint64_t length;
2208 	int rc;
2209 	const char *name1, *name2;
2210 	const void *value;
2211 	size_t value_len;
2212 	struct spdk_xattr_names *names;
2213 
2214 	/* Test that set_xattr fails if md_ro flag is set. */
2215 	blob->md_ro = true;
2216 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2217 	CU_ASSERT(rc == -EPERM);
2218 
2219 	blob->md_ro = false;
2220 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2221 	CU_ASSERT(rc == 0);
2222 
2223 	length = 2345;
2224 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2225 	CU_ASSERT(rc == 0);
2226 
2227 	/* Overwrite "length" xattr. */
2228 	length = 3456;
2229 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2230 	CU_ASSERT(rc == 0);
2231 
2232 	/* get_xattr should still work even if md_ro flag is set. */
2233 	value = NULL;
2234 	blob->md_ro = true;
2235 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2236 	CU_ASSERT(rc == 0);
2237 	SPDK_CU_ASSERT_FATAL(value != NULL);
2238 	CU_ASSERT(*(uint64_t *)value == length);
2239 	CU_ASSERT(value_len == 8);
2240 	blob->md_ro = false;
2241 
2242 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2243 	CU_ASSERT(rc == -ENOENT);
2244 
2245 	names = NULL;
2246 	rc = spdk_blob_get_xattr_names(blob, &names);
2247 	CU_ASSERT(rc == 0);
2248 	SPDK_CU_ASSERT_FATAL(names != NULL);
2249 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2250 	name1 = spdk_xattr_names_get_name(names, 0);
2251 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2252 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2253 	name2 = spdk_xattr_names_get_name(names, 1);
2254 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2255 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2256 	CU_ASSERT(strcmp(name1, name2));
2257 	spdk_xattr_names_free(names);
2258 
2259 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2260 	blob->md_ro = true;
2261 	rc = spdk_blob_remove_xattr(blob, "name");
2262 	CU_ASSERT(rc == -EPERM);
2263 
2264 	blob->md_ro = false;
2265 	rc = spdk_blob_remove_xattr(blob, "name");
2266 	CU_ASSERT(rc == 0);
2267 
2268 	rc = spdk_blob_remove_xattr(blob, "foobar");
2269 	CU_ASSERT(rc == -ENOENT);
2270 
2271 	/* Set internal xattr */
2272 	length = 7898;
2273 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2274 	CU_ASSERT(rc == 0);
2275 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2276 	CU_ASSERT(rc == 0);
2277 	CU_ASSERT(*(uint64_t *)value == length);
2278 	/* try to get public xattr with same name */
2279 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2280 	CU_ASSERT(rc != 0);
2281 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2282 	CU_ASSERT(rc != 0);
2283 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2284 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2285 		  SPDK_BLOB_INTERNAL_XATTR);
2286 
2287 	spdk_blob_close(blob, blob_op_complete, NULL);
2288 	poll_threads();
2289 
2290 	/* Check if xattrs are persisted */
2291 	ut_bs_reload(&bs, NULL);
2292 
2293 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2294 	poll_threads();
2295 	CU_ASSERT(g_bserrno == 0);
2296 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2297 	blob = g_blob;
2298 
2299 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2300 	CU_ASSERT(rc == 0);
2301 	CU_ASSERT(*(uint64_t *)value == length);
2302 
2303 	/* try to get internal xattr through public call */
2304 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2305 	CU_ASSERT(rc != 0);
2306 
2307 	rc = blob_remove_xattr(blob, "internal", true);
2308 	CU_ASSERT(rc == 0);
2309 
2310 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2311 }
2312 
2313 static void
2314 blob_parse_md(void)
2315 {
2316 	struct spdk_blob_store *bs = g_bs;
2317 	struct spdk_blob *blob;
2318 	int rc;
2319 	uint32_t used_pages;
2320 	size_t xattr_length;
2321 	char *xattr;
2322 
2323 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2324 	blob = ut_blob_create_and_open(bs, NULL);
2325 
2326 	/* Create large extent to force more than 1 page of metadata. */
2327 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2328 		       strlen("large_xattr");
2329 	xattr = calloc(xattr_length, sizeof(char));
2330 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2331 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2332 	free(xattr);
2333 	SPDK_CU_ASSERT_FATAL(rc == 0);
2334 
2335 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2336 	poll_threads();
2337 
2338 	/* Delete the blob and verify that number of pages returned to before its creation. */
2339 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2340 	ut_blob_close_and_delete(bs, blob);
2341 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2342 }
2343 
2344 static void
2345 bs_load(void)
2346 {
2347 	struct spdk_blob_store *bs;
2348 	struct spdk_bs_dev *dev;
2349 	spdk_blob_id blobid;
2350 	struct spdk_blob *blob;
2351 	struct spdk_bs_super_block *super_block;
2352 	uint64_t length;
2353 	int rc;
2354 	const void *value;
2355 	size_t value_len;
2356 	struct spdk_bs_opts opts;
2357 	struct spdk_blob_opts blob_opts;
2358 
2359 	dev = init_dev();
2360 	spdk_bs_opts_init(&opts, sizeof(opts));
2361 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2362 
2363 	/* Initialize a new blob store */
2364 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2365 	poll_threads();
2366 	CU_ASSERT(g_bserrno == 0);
2367 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2368 	bs = g_bs;
2369 
2370 	/* Try to open a blobid that does not exist */
2371 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2372 	poll_threads();
2373 	CU_ASSERT(g_bserrno == -ENOENT);
2374 	CU_ASSERT(g_blob == NULL);
2375 
2376 	/* Create a blob */
2377 	blob = ut_blob_create_and_open(bs, NULL);
2378 	blobid = spdk_blob_get_id(blob);
2379 
2380 	/* Try again to open valid blob but without the upper bit set */
2381 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2382 	poll_threads();
2383 	CU_ASSERT(g_bserrno == -ENOENT);
2384 	CU_ASSERT(g_blob == NULL);
2385 
2386 	/* Set some xattrs */
2387 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2388 	CU_ASSERT(rc == 0);
2389 
2390 	length = 2345;
2391 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2392 	CU_ASSERT(rc == 0);
2393 
2394 	/* Resize the blob */
2395 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2396 	poll_threads();
2397 	CU_ASSERT(g_bserrno == 0);
2398 
2399 	spdk_blob_close(blob, blob_op_complete, NULL);
2400 	poll_threads();
2401 	CU_ASSERT(g_bserrno == 0);
2402 	blob = NULL;
2403 	g_blob = NULL;
2404 	g_blobid = SPDK_BLOBID_INVALID;
2405 
2406 	/* Unload the blob store */
2407 	spdk_bs_unload(bs, bs_op_complete, NULL);
2408 	poll_threads();
2409 	CU_ASSERT(g_bserrno == 0);
2410 	g_bs = NULL;
2411 	g_blob = NULL;
2412 	g_blobid = 0;
2413 
2414 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2415 	CU_ASSERT(super_block->clean == 1);
2416 
2417 	/* Load should fail for device with an unsupported blocklen */
2418 	dev = init_dev();
2419 	dev->blocklen = g_phys_blocklen * 2;
2420 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2421 	poll_threads();
2422 	CU_ASSERT(g_bserrno == -EINVAL);
2423 
2424 	/* Load should when max_md_ops is set to zero */
2425 	dev = init_dev();
2426 	spdk_bs_opts_init(&opts, sizeof(opts));
2427 	opts.max_md_ops = 0;
2428 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2429 	poll_threads();
2430 	CU_ASSERT(g_bserrno == -EINVAL);
2431 
2432 	/* Load should when max_channel_ops is set to zero */
2433 	dev = init_dev();
2434 	spdk_bs_opts_init(&opts, sizeof(opts));
2435 	opts.max_channel_ops = 0;
2436 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2437 	poll_threads();
2438 	CU_ASSERT(g_bserrno == -EINVAL);
2439 
2440 	/* Load an existing blob store */
2441 	dev = init_dev();
2442 	spdk_bs_opts_init(&opts, sizeof(opts));
2443 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2444 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2445 	poll_threads();
2446 	CU_ASSERT(g_bserrno == 0);
2447 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2448 	bs = g_bs;
2449 
2450 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2451 	CU_ASSERT(super_block->clean == 1);
2452 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2453 
2454 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2455 	poll_threads();
2456 	CU_ASSERT(g_bserrno == 0);
2457 	CU_ASSERT(g_blob != NULL);
2458 	blob = g_blob;
2459 
2460 	/* Verify that blobstore is marked dirty after first metadata sync */
2461 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2462 	CU_ASSERT(super_block->clean == 1);
2463 
2464 	/* Get the xattrs */
2465 	value = NULL;
2466 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2467 	CU_ASSERT(rc == 0);
2468 	SPDK_CU_ASSERT_FATAL(value != NULL);
2469 	CU_ASSERT(*(uint64_t *)value == length);
2470 	CU_ASSERT(value_len == 8);
2471 
2472 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2473 	CU_ASSERT(rc == -ENOENT);
2474 
2475 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2476 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2477 
2478 	spdk_blob_close(blob, blob_op_complete, NULL);
2479 	poll_threads();
2480 	CU_ASSERT(g_bserrno == 0);
2481 	blob = NULL;
2482 	g_blob = NULL;
2483 
2484 	spdk_bs_unload(bs, bs_op_complete, NULL);
2485 	poll_threads();
2486 	CU_ASSERT(g_bserrno == 0);
2487 	g_bs = NULL;
2488 
2489 	/* Load should fail: bdev size < saved size */
2490 	dev = init_dev();
2491 	dev->blockcnt /= 2;
2492 
2493 	spdk_bs_opts_init(&opts, sizeof(opts));
2494 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2495 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2496 	poll_threads();
2497 
2498 	CU_ASSERT(g_bserrno == -EILSEQ);
2499 
2500 	/* Load should succeed: bdev size > saved size */
2501 	dev = init_dev();
2502 	dev->blockcnt *= 4;
2503 
2504 	spdk_bs_opts_init(&opts, sizeof(opts));
2505 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2506 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2507 	poll_threads();
2508 	CU_ASSERT(g_bserrno == 0);
2509 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2510 	bs = g_bs;
2511 
2512 	CU_ASSERT(g_bserrno == 0);
2513 	spdk_bs_unload(bs, bs_op_complete, NULL);
2514 	poll_threads();
2515 
2516 
2517 	/* Test compatibility mode */
2518 
2519 	dev = init_dev();
2520 	super_block->size = 0;
2521 	super_block->crc = blob_md_page_calc_crc(super_block);
2522 
2523 	spdk_bs_opts_init(&opts, sizeof(opts));
2524 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2525 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2526 	poll_threads();
2527 	CU_ASSERT(g_bserrno == 0);
2528 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2529 	bs = g_bs;
2530 
2531 	/* Create a blob */
2532 	ut_spdk_blob_opts_init(&blob_opts);
2533 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2534 	poll_threads();
2535 	CU_ASSERT(g_bserrno == 0);
2536 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2537 
2538 	/* Blobstore should update number of blocks in super_block */
2539 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2540 	CU_ASSERT(super_block->clean == 0);
2541 
2542 	spdk_bs_unload(bs, bs_op_complete, NULL);
2543 	poll_threads();
2544 	CU_ASSERT(g_bserrno == 0);
2545 	CU_ASSERT(super_block->clean == 1);
2546 	g_bs = NULL;
2547 
2548 }
2549 
2550 static void
2551 bs_load_pending_removal(void)
2552 {
2553 	struct spdk_blob_store *bs = g_bs;
2554 	struct spdk_blob_opts opts;
2555 	struct spdk_blob *blob, *snapshot;
2556 	spdk_blob_id blobid, snapshotid;
2557 	const void *value;
2558 	size_t value_len;
2559 	int rc;
2560 
2561 	/* Create blob */
2562 	ut_spdk_blob_opts_init(&opts);
2563 	opts.num_clusters = 10;
2564 
2565 	blob = ut_blob_create_and_open(bs, &opts);
2566 	blobid = spdk_blob_get_id(blob);
2567 
2568 	/* Create snapshot */
2569 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2570 	poll_threads();
2571 	CU_ASSERT(g_bserrno == 0);
2572 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2573 	snapshotid = g_blobid;
2574 
2575 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2576 	poll_threads();
2577 	CU_ASSERT(g_bserrno == 0);
2578 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2579 	snapshot = g_blob;
2580 
2581 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2582 	snapshot->md_ro = false;
2583 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2584 	CU_ASSERT(rc == 0);
2585 	snapshot->md_ro = true;
2586 
2587 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2588 	poll_threads();
2589 	CU_ASSERT(g_bserrno == 0);
2590 
2591 	spdk_blob_close(blob, blob_op_complete, NULL);
2592 	poll_threads();
2593 	CU_ASSERT(g_bserrno == 0);
2594 
2595 	/* Reload blobstore */
2596 	ut_bs_reload(&bs, NULL);
2597 
2598 	/* Snapshot should not be removed as blob is still pointing to it */
2599 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2600 	poll_threads();
2601 	CU_ASSERT(g_bserrno == 0);
2602 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2603 	snapshot = g_blob;
2604 
2605 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2606 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2607 	CU_ASSERT(rc != 0);
2608 
2609 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2610 	snapshot->md_ro = false;
2611 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2612 	CU_ASSERT(rc == 0);
2613 	snapshot->md_ro = true;
2614 
2615 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2616 	poll_threads();
2617 	CU_ASSERT(g_bserrno == 0);
2618 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2619 	blob = g_blob;
2620 
2621 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2622 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2623 
2624 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2625 	poll_threads();
2626 	CU_ASSERT(g_bserrno == 0);
2627 
2628 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2629 	poll_threads();
2630 	CU_ASSERT(g_bserrno == 0);
2631 
2632 	spdk_blob_close(blob, blob_op_complete, NULL);
2633 	poll_threads();
2634 	CU_ASSERT(g_bserrno == 0);
2635 
2636 	/* Reload blobstore */
2637 	ut_bs_reload(&bs, NULL);
2638 
2639 	/* Snapshot should be removed as blob is not pointing to it anymore */
2640 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2641 	poll_threads();
2642 	CU_ASSERT(g_bserrno != 0);
2643 }
2644 
2645 static void
2646 bs_load_custom_cluster_size(void)
2647 {
2648 	struct spdk_blob_store *bs;
2649 	struct spdk_bs_dev *dev;
2650 	struct spdk_bs_super_block *super_block;
2651 	struct spdk_bs_opts opts;
2652 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2653 	uint32_t cluster_sz;
2654 	uint64_t total_clusters;
2655 
2656 	dev = init_dev();
2657 	spdk_bs_opts_init(&opts, sizeof(opts));
2658 	opts.cluster_sz = custom_cluster_size;
2659 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2660 
2661 	/* Initialize a new blob store */
2662 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2663 	poll_threads();
2664 	CU_ASSERT(g_bserrno == 0);
2665 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2666 	bs = g_bs;
2667 	cluster_sz = bs->cluster_sz;
2668 	total_clusters = bs->total_clusters;
2669 
2670 	/* Unload the blob store */
2671 	spdk_bs_unload(bs, bs_op_complete, NULL);
2672 	poll_threads();
2673 	CU_ASSERT(g_bserrno == 0);
2674 	g_bs = NULL;
2675 	g_blob = NULL;
2676 	g_blobid = 0;
2677 
2678 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2679 	CU_ASSERT(super_block->clean == 1);
2680 
2681 	/* Load an existing blob store */
2682 	dev = init_dev();
2683 	spdk_bs_opts_init(&opts, sizeof(opts));
2684 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2685 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2686 	poll_threads();
2687 	CU_ASSERT(g_bserrno == 0);
2688 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2689 	bs = g_bs;
2690 	/* Compare cluster size and number to one after initialization */
2691 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2692 	CU_ASSERT(total_clusters == bs->total_clusters);
2693 
2694 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2695 	CU_ASSERT(super_block->clean == 1);
2696 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2697 
2698 	spdk_bs_unload(bs, bs_op_complete, NULL);
2699 	poll_threads();
2700 	CU_ASSERT(g_bserrno == 0);
2701 	CU_ASSERT(super_block->clean == 1);
2702 	g_bs = NULL;
2703 }
2704 
2705 static void
2706 bs_load_after_failed_grow(void)
2707 {
2708 	struct spdk_blob_store *bs;
2709 	struct spdk_bs_dev *dev;
2710 	struct spdk_bs_super_block *super_block;
2711 	struct spdk_bs_opts opts;
2712 	struct spdk_bs_md_mask *mask;
2713 	struct spdk_blob_opts blob_opts;
2714 	struct spdk_blob *blob, *snapshot;
2715 	spdk_blob_id blobid, snapshotid;
2716 	uint64_t total_data_clusters;
2717 
2718 	dev = init_dev();
2719 	spdk_bs_opts_init(&opts, sizeof(opts));
2720 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2721 	/*
2722 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2723 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2724 	 * thus the blobstore could grow to the double size.
2725 	 */
2726 	opts.num_md_pages = 128;
2727 
2728 	/* Initialize a new blob store */
2729 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2730 	poll_threads();
2731 	CU_ASSERT(g_bserrno == 0);
2732 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2733 	bs = g_bs;
2734 
2735 	/* Create blob */
2736 	ut_spdk_blob_opts_init(&blob_opts);
2737 	blob_opts.num_clusters = 10;
2738 
2739 	blob = ut_blob_create_and_open(bs, &blob_opts);
2740 	blobid = spdk_blob_get_id(blob);
2741 
2742 	/* Create snapshot */
2743 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2744 	poll_threads();
2745 	CU_ASSERT(g_bserrno == 0);
2746 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2747 	snapshotid = g_blobid;
2748 
2749 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2750 	poll_threads();
2751 	CU_ASSERT(g_bserrno == 0);
2752 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2753 	snapshot = g_blob;
2754 
2755 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2756 	poll_threads();
2757 	CU_ASSERT(g_bserrno == 0);
2758 
2759 	spdk_blob_close(blob, blob_op_complete, NULL);
2760 	poll_threads();
2761 	CU_ASSERT(g_bserrno == 0);
2762 
2763 	total_data_clusters = bs->total_data_clusters;
2764 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2765 
2766 	/* Unload the blob store */
2767 	spdk_bs_unload(bs, bs_op_complete, NULL);
2768 	poll_threads();
2769 	CU_ASSERT(g_bserrno == 0);
2770 	g_bs = NULL;
2771 	g_blob = NULL;
2772 	g_blobid = 0;
2773 
2774 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2775 	CU_ASSERT(super_block->clean == 1);
2776 
2777 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start *
2778 					  g_phys_blocklen);
2779 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2780 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2781 
2782 	/*
2783 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2784 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2785 	 */
2786 	mask->length *= 2;
2787 
2788 	/* Load an existing blob store */
2789 	dev = init_dev();
2790 	dev->blockcnt *= 2;
2791 	spdk_bs_opts_init(&opts, sizeof(opts));
2792 	opts.clear_method = BS_CLEAR_WITH_NONE;
2793 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2794 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2795 	poll_threads();
2796 	CU_ASSERT(g_bserrno == 0);
2797 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2798 	bs = g_bs;
2799 
2800 	/* Check the capacity is the same as before */
2801 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2802 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2803 
2804 	/* Check the blob and the snapshot are still available */
2805 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2806 	poll_threads();
2807 	CU_ASSERT(g_bserrno == 0);
2808 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2809 	blob = g_blob;
2810 
2811 	spdk_blob_close(blob, blob_op_complete, NULL);
2812 	poll_threads();
2813 	CU_ASSERT(g_bserrno == 0);
2814 
2815 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2816 	poll_threads();
2817 	CU_ASSERT(g_bserrno == 0);
2818 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2819 	snapshot = g_blob;
2820 
2821 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2822 	poll_threads();
2823 	CU_ASSERT(g_bserrno == 0);
2824 
2825 	spdk_bs_unload(bs, bs_op_complete, NULL);
2826 	poll_threads();
2827 	CU_ASSERT(g_bserrno == 0);
2828 	CU_ASSERT(super_block->clean == 1);
2829 	g_bs = NULL;
2830 }
2831 
2832 static void
2833 bs_load_error(void)
2834 {
2835 	struct spdk_blob_store *bs;
2836 	struct spdk_bs_dev *dev;
2837 	struct spdk_bs_opts opts;
2838 	struct spdk_power_failure_thresholds thresholds = {};
2839 
2840 	dev = init_dev();
2841 	spdk_bs_opts_init(&opts, sizeof(opts));
2842 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2843 
2844 	/* Initialize a new blob store */
2845 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2846 	poll_threads();
2847 	CU_ASSERT(g_bserrno == 0);
2848 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2849 	bs = g_bs;
2850 
2851 	/* Unload the blob store */
2852 	spdk_bs_unload(bs, bs_op_complete, NULL);
2853 	poll_threads();
2854 	CU_ASSERT(g_bserrno == 0);
2855 
2856 	/* Load fails with I/O error */
2857 	thresholds.general_threshold = 2;
2858 	dev_set_power_failure_thresholds(thresholds);
2859 	g_bserrno = -1;
2860 	dev = init_dev();
2861 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2862 	poll_threads();
2863 	CU_ASSERT(g_bserrno == -EIO);
2864 	CU_ASSERT(g_bs == NULL);
2865 	dev_reset_power_failure_event();
2866 
2867 	/* Load fails with NOMEM error */
2868 	g_bserrno = -1;
2869 	dev = init_dev();
2870 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2871 	MOCK_SET(spdk_zmalloc, NULL);
2872 	poll_threads();
2873 	CU_ASSERT(g_bserrno == -ENOMEM);
2874 	CU_ASSERT(g_bs == NULL);
2875 	MOCK_CLEAR(spdk_zmalloc);
2876 }
2877 
2878 static void
2879 bs_type(void)
2880 {
2881 	struct spdk_blob_store *bs;
2882 	struct spdk_bs_dev *dev;
2883 	struct spdk_bs_opts opts;
2884 
2885 	dev = init_dev();
2886 	spdk_bs_opts_init(&opts, sizeof(opts));
2887 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2888 
2889 	/* Initialize a new blob store */
2890 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2891 	poll_threads();
2892 	CU_ASSERT(g_bserrno == 0);
2893 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2894 	bs = g_bs;
2895 
2896 	/* Unload the blob store */
2897 	spdk_bs_unload(bs, bs_op_complete, NULL);
2898 	poll_threads();
2899 	CU_ASSERT(g_bserrno == 0);
2900 	g_bs = NULL;
2901 	g_blob = NULL;
2902 	g_blobid = 0;
2903 
2904 	/* Load non existing blobstore type */
2905 	dev = init_dev();
2906 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2907 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2908 	poll_threads();
2909 	CU_ASSERT(g_bserrno != 0);
2910 
2911 	/* Load with empty blobstore type */
2912 	dev = init_dev();
2913 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2914 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2915 	poll_threads();
2916 	CU_ASSERT(g_bserrno == 0);
2917 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2918 	bs = g_bs;
2919 
2920 	spdk_bs_unload(bs, bs_op_complete, NULL);
2921 	poll_threads();
2922 	CU_ASSERT(g_bserrno == 0);
2923 	g_bs = NULL;
2924 
2925 	/* Initialize a new blob store with empty bstype */
2926 	dev = init_dev();
2927 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2928 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2929 	poll_threads();
2930 	CU_ASSERT(g_bserrno == 0);
2931 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2932 	bs = g_bs;
2933 
2934 	spdk_bs_unload(bs, bs_op_complete, NULL);
2935 	poll_threads();
2936 	CU_ASSERT(g_bserrno == 0);
2937 	g_bs = NULL;
2938 
2939 	/* Load non existing blobstore type */
2940 	dev = init_dev();
2941 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2942 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2943 	poll_threads();
2944 	CU_ASSERT(g_bserrno != 0);
2945 
2946 	/* Load with empty blobstore type */
2947 	dev = init_dev();
2948 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2949 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2950 	poll_threads();
2951 	CU_ASSERT(g_bserrno == 0);
2952 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2953 	bs = g_bs;
2954 
2955 	spdk_bs_unload(bs, bs_op_complete, NULL);
2956 	poll_threads();
2957 	CU_ASSERT(g_bserrno == 0);
2958 	g_bs = NULL;
2959 }
2960 
2961 static void
2962 bs_super_block(void)
2963 {
2964 	struct spdk_blob_store *bs;
2965 	struct spdk_bs_dev *dev;
2966 	struct spdk_bs_super_block *super_block;
2967 	struct spdk_bs_opts opts;
2968 	struct spdk_bs_super_block_ver1 super_block_v1;
2969 
2970 	dev = init_dev();
2971 	spdk_bs_opts_init(&opts, sizeof(opts));
2972 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2973 
2974 	/* Initialize a new blob store */
2975 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2976 	poll_threads();
2977 	CU_ASSERT(g_bserrno == 0);
2978 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2979 	bs = g_bs;
2980 
2981 	/* Unload the blob store */
2982 	spdk_bs_unload(bs, bs_op_complete, NULL);
2983 	poll_threads();
2984 	CU_ASSERT(g_bserrno == 0);
2985 	g_bs = NULL;
2986 	g_blob = NULL;
2987 	g_blobid = 0;
2988 
2989 	/* Load an existing blob store with version newer than supported */
2990 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2991 	super_block->version++;
2992 
2993 	dev = init_dev();
2994 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2995 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2996 	poll_threads();
2997 	CU_ASSERT(g_bserrno != 0);
2998 
2999 	/* Create a new blob store with super block version 1 */
3000 	dev = init_dev();
3001 	super_block_v1.version = 1;
3002 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
3003 	super_block_v1.length = 0x1000;
3004 	super_block_v1.clean = 1;
3005 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
3006 	super_block_v1.cluster_size = 0x100000;
3007 	super_block_v1.used_page_mask_start = 0x01;
3008 	super_block_v1.used_page_mask_len = 0x01;
3009 	super_block_v1.used_cluster_mask_start = 0x02;
3010 	super_block_v1.used_cluster_mask_len = 0x01;
3011 	super_block_v1.md_start = 0x03;
3012 	super_block_v1.md_len = 0x40;
3013 	memset(super_block_v1.reserved, 0, 4036);
3014 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
3015 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
3016 
3017 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
3018 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
3019 	poll_threads();
3020 	CU_ASSERT(g_bserrno == 0);
3021 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3022 	bs = g_bs;
3023 
3024 	spdk_bs_unload(bs, bs_op_complete, NULL);
3025 	poll_threads();
3026 	CU_ASSERT(g_bserrno == 0);
3027 	g_bs = NULL;
3028 }
3029 
3030 static void
3031 bs_test_recover_cluster_count(void)
3032 {
3033 	struct spdk_blob_store *bs;
3034 	struct spdk_bs_dev *dev;
3035 	struct spdk_bs_super_block super_block;
3036 	struct spdk_bs_opts opts;
3037 
3038 	dev = init_dev();
3039 	spdk_bs_opts_init(&opts, sizeof(opts));
3040 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
3041 
3042 	super_block.version = 3;
3043 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
3044 	super_block.length = 0x1000;
3045 	super_block.clean = 0;
3046 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
3047 	super_block.cluster_size = g_phys_blocklen;
3048 	super_block.used_page_mask_start = 0x01;
3049 	super_block.used_page_mask_len = 0x01;
3050 	super_block.used_cluster_mask_start = 0x02;
3051 	super_block.used_cluster_mask_len = 0x01;
3052 	super_block.used_blobid_mask_start = 0x03;
3053 	super_block.used_blobid_mask_len = 0x01;
3054 	super_block.md_page_size = g_phys_blocklen;
3055 	super_block.md_start = 0x04;
3056 	super_block.md_len = 0x40;
3057 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
3058 	super_block.size = dev->blockcnt * dev->blocklen;
3059 	super_block.io_unit_size = 0x1000;
3060 	memset(super_block.reserved, 0, SPDK_SIZEOF_MEMBER(struct spdk_bs_super_block, reserved));
3061 	super_block.crc = blob_md_page_calc_crc(&super_block);
3062 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
3063 
3064 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
3065 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
3066 	poll_threads();
3067 	CU_ASSERT(g_bserrno == 0);
3068 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3069 	bs = g_bs;
3070 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
3071 			super_block.md_len));
3072 
3073 	spdk_bs_unload(bs, bs_op_complete, NULL);
3074 	poll_threads();
3075 	CU_ASSERT(g_bserrno == 0);
3076 	g_bs = NULL;
3077 }
3078 
3079 static void
3080 bs_grow_live_size(uint64_t new_blockcnt)
3081 {
3082 	struct spdk_blob_store *bs;
3083 	struct spdk_bs_dev *dev;
3084 	struct spdk_bs_super_block super_block;
3085 	struct spdk_bs_opts opts;
3086 	struct spdk_bs_md_mask mask;
3087 	uint64_t bdev_size;
3088 	uint64_t total_data_clusters;
3089 
3090 	/*
3091 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3092 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3093 	 * will write beyond the end of g_dev_buffer.
3094 	 */
3095 	dev = init_dev();
3096 	spdk_bs_opts_init(&opts, sizeof(opts));
3097 	opts.clear_method = BS_CLEAR_WITH_NONE;
3098 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3099 	poll_threads();
3100 	CU_ASSERT(g_bserrno == 0);
3101 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3102 	bs = g_bs;
3103 
3104 	/*
3105 	 * Set the dev size according to the new_blockcnt,
3106 	 * then the blobstore will adjust the metadata according to the new size.
3107 	 */
3108 	dev->blockcnt = new_blockcnt;
3109 	bdev_size = dev->blockcnt * dev->blocklen;
3110 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3111 	poll_threads();
3112 	CU_ASSERT(g_bserrno == 0);
3113 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3114 
3115 	/* Make sure the super block is updated. */
3116 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3117 	CU_ASSERT(super_block.size == bdev_size);
3118 	CU_ASSERT(super_block.clean == 0);
3119 	/* The used_cluster mask is not written out until first spdk_bs_unload. */
3120 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3121 	       sizeof(struct spdk_bs_md_mask));
3122 	CU_ASSERT(mask.type == 0);
3123 	CU_ASSERT(mask.length == 0);
3124 
3125 	spdk_bs_unload(bs, bs_op_complete, NULL);
3126 	poll_threads();
3127 	CU_ASSERT(g_bserrno == 0);
3128 	g_bs = NULL;
3129 
3130 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3131 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3132 	CU_ASSERT(super_block.size == bdev_size);
3133 	CU_ASSERT(super_block.clean == 1);
3134 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3135 	       sizeof(struct spdk_bs_md_mask));
3136 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3137 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3138 
3139 	/* Load blobstore and check the cluster counts again. */
3140 	dev = init_dev();
3141 	dev->blockcnt = new_blockcnt;
3142 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3143 	poll_threads();
3144 	CU_ASSERT(g_bserrno == 0);
3145 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3146 	CU_ASSERT(super_block.clean == 1);
3147 	bs = g_bs;
3148 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3149 
3150 	/* Perform grow without change in size, expected pass. */
3151 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3152 	poll_threads();
3153 	CU_ASSERT(g_bserrno == 0);
3154 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3155 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3156 	CU_ASSERT(super_block.size == bdev_size);
3157 	CU_ASSERT(super_block.clean == 1);
3158 
3159 	spdk_bs_unload(bs, bs_op_complete, NULL);
3160 	poll_threads();
3161 	CU_ASSERT(g_bserrno == 0);
3162 	g_bs = NULL;
3163 }
3164 
3165 static void
3166 bs_grow_live(void)
3167 {
3168 	/* No change expected */
3169 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT);
3170 
3171 	/* Size slightly increased, but not enough to increase cluster count */
3172 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT + 1);
3173 
3174 	/* Size doubled, increasing the cluster count */
3175 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT * 2);
3176 }
3177 
3178 static void
3179 bs_grow_live_no_space(void)
3180 {
3181 	struct spdk_blob_store *bs;
3182 	struct spdk_bs_dev *dev;
3183 	struct spdk_bs_super_block super_block;
3184 	struct spdk_bs_opts opts;
3185 	struct spdk_bs_md_mask mask;
3186 	uint64_t bdev_size_init;
3187 	uint64_t total_data_clusters, max_clusters;
3188 
3189 	/*
3190 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3191 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3192 	 * will write beyond the end of g_dev_buffer.
3193 	 */
3194 	dev = init_dev();
3195 	bdev_size_init = dev->blockcnt * dev->blocklen;
3196 	spdk_bs_opts_init(&opts, sizeof(opts));
3197 	opts.clear_method = BS_CLEAR_WITH_NONE;
3198 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3199 	poll_threads();
3200 	CU_ASSERT(g_bserrno == 0);
3201 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3202 	bs = g_bs;
3203 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3204 
3205 	/*
3206 	 * The default dev size is 64M, here we set the dev size to 32M,
3207 	 * expecting EILSEQ due to super_block validation and no change in blobstore.
3208 	 */
3209 	dev->blockcnt = (32L * 1024L * 1024L) / dev->blocklen;
3210 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3211 	poll_threads();
3212 	/* This error code comes from bs_super_validate() */
3213 	CU_ASSERT(g_bserrno == -EILSEQ);
3214 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3215 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3216 	CU_ASSERT(super_block.size == bdev_size_init);
3217 
3218 	/*
3219 	 * Blobstore in this test has only space for single md_page for used_clusters,
3220 	 * which fits 1 bit per cluster minus the md header.
3221 	 *
3222 	 * Dev size is increased to exceed the reserved space for the used_cluster_mask
3223 	 * in the metadata, expecting ENOSPC and no change in blobstore.
3224 	 */
3225 	max_clusters = (spdk_bs_get_page_size(bs) - sizeof(struct spdk_bs_md_mask)) * 8;
3226 	max_clusters += 1;
3227 	dev->blockcnt = (max_clusters * spdk_bs_get_cluster_size(bs)) / dev->blocklen;
3228 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3229 	poll_threads();
3230 	CU_ASSERT(g_bserrno == -ENOSPC);
3231 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3232 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3233 	CU_ASSERT(super_block.size == bdev_size_init);
3234 
3235 	/*
3236 	 * No change should have occurred for the duration of the test,
3237 	 * unload blobstore and check metadata.
3238 	 */
3239 	spdk_bs_unload(bs, bs_op_complete, NULL);
3240 	poll_threads();
3241 	CU_ASSERT(g_bserrno == 0);
3242 	g_bs = NULL;
3243 
3244 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3245 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3246 	CU_ASSERT(super_block.size == bdev_size_init);
3247 	CU_ASSERT(super_block.clean == 1);
3248 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3249 	       sizeof(struct spdk_bs_md_mask));
3250 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3251 	CU_ASSERT(mask.length == bdev_size_init / (1 * 1024 * 1024));
3252 
3253 	/* Load blobstore and check the cluster counts again. */
3254 	dev = init_dev();
3255 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3256 	poll_threads();
3257 	CU_ASSERT(g_bserrno == 0);
3258 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3259 	bs = g_bs;
3260 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3261 
3262 	spdk_bs_unload(bs, bs_op_complete, NULL);
3263 	poll_threads();
3264 	CU_ASSERT(g_bserrno == 0);
3265 	g_bs = NULL;
3266 }
3267 
3268 static void
3269 bs_test_grow(void)
3270 {
3271 	struct spdk_blob_store *bs;
3272 	struct spdk_bs_dev *dev;
3273 	struct spdk_bs_super_block super_block;
3274 	struct spdk_bs_opts opts;
3275 	struct spdk_bs_md_mask mask;
3276 	uint64_t bdev_size;
3277 
3278 	dev = init_dev();
3279 	bdev_size = dev->blockcnt * dev->blocklen;
3280 	spdk_bs_opts_init(&opts, sizeof(opts));
3281 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3282 	poll_threads();
3283 	CU_ASSERT(g_bserrno == 0);
3284 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3285 	bs = g_bs;
3286 
3287 	spdk_bs_unload(bs, bs_op_complete, NULL);
3288 	poll_threads();
3289 	CU_ASSERT(g_bserrno == 0);
3290 	g_bs = NULL;
3291 
3292 	/*
3293 	 * To make sure all the metadata are updated to the disk,
3294 	 * we check the g_dev_buffer after spdk_bs_unload.
3295 	 */
3296 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3297 	CU_ASSERT(super_block.size == bdev_size);
3298 
3299 	/*
3300 	 * Make sure the used_cluster mask is correct.
3301 	 */
3302 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3303 	       sizeof(struct spdk_bs_md_mask));
3304 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3305 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3306 
3307 	/*
3308 	 * The default dev size is 64M, here we set the dev size to 128M,
3309 	 * then the blobstore will adjust the metadata according to the new size.
3310 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
3311 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
3312 	 * the end of g_dev_buffer.
3313 	 */
3314 	dev = init_dev();
3315 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
3316 	bdev_size = dev->blockcnt * dev->blocklen;
3317 	spdk_bs_opts_init(&opts, sizeof(opts));
3318 	opts.clear_method = BS_CLEAR_WITH_NONE;
3319 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
3320 	poll_threads();
3321 	CU_ASSERT(g_bserrno == 0);
3322 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3323 	bs = g_bs;
3324 
3325 	/*
3326 	 * After spdk_bs_grow, all metadata are updated to the disk.
3327 	 * So we can check g_dev_buffer now.
3328 	 */
3329 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3330 	CU_ASSERT(super_block.size == bdev_size);
3331 
3332 	/*
3333 	 * Make sure the used_cluster mask has been updated according to the bdev size
3334 	 */
3335 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3336 	       sizeof(struct spdk_bs_md_mask));
3337 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3338 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3339 
3340 	spdk_bs_unload(bs, bs_op_complete, NULL);
3341 	poll_threads();
3342 	CU_ASSERT(g_bserrno == 0);
3343 	g_bs = NULL;
3344 }
3345 
3346 /*
3347  * Create a blobstore and then unload it.
3348  */
3349 static void
3350 bs_unload(void)
3351 {
3352 	struct spdk_blob_store *bs = g_bs;
3353 	struct spdk_blob *blob;
3354 	struct spdk_power_failure_thresholds thresholds = {};
3355 
3356 	/* Create a blob and open it. */
3357 	blob = ut_blob_create_and_open(bs, NULL);
3358 
3359 	/* Try to unload blobstore, should fail with open blob */
3360 	g_bserrno = -1;
3361 	spdk_bs_unload(bs, bs_op_complete, NULL);
3362 	poll_threads();
3363 	CU_ASSERT(g_bserrno == -EBUSY);
3364 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3365 
3366 	/* Close the blob, then successfully unload blobstore */
3367 	g_bserrno = -1;
3368 	spdk_blob_close(blob, blob_op_complete, NULL);
3369 	poll_threads();
3370 	CU_ASSERT(g_bserrno == 0);
3371 
3372 	/* Try to unload blobstore, should fail due to I/O error */
3373 	thresholds.general_threshold = 2;
3374 	dev_set_power_failure_thresholds(thresholds);
3375 	g_bserrno = -1;
3376 	spdk_bs_unload(bs, bs_op_complete, NULL);
3377 	poll_threads();
3378 	CU_ASSERT(g_bserrno == -EIO);
3379 	dev_reset_power_failure_event();
3380 
3381 	/* Try to unload blobstore, should fail with spdk_zmalloc returning NULL */
3382 	g_bserrno = -1;
3383 	spdk_bs_unload(bs, bs_op_complete, NULL);
3384 	MOCK_SET(spdk_zmalloc, NULL);
3385 	poll_threads();
3386 	CU_ASSERT(g_bserrno == -ENOMEM);
3387 	MOCK_CLEAR(spdk_zmalloc);
3388 }
3389 
3390 /*
3391  * Create a blobstore with a cluster size different than the default, and ensure it is
3392  *  persisted.
3393  */
3394 static void
3395 bs_cluster_sz(void)
3396 {
3397 	struct spdk_blob_store *bs;
3398 	struct spdk_bs_dev *dev;
3399 	struct spdk_bs_opts opts;
3400 	uint32_t cluster_sz;
3401 
3402 	/* Set cluster size to zero */
3403 	dev = init_dev();
3404 	spdk_bs_opts_init(&opts, sizeof(opts));
3405 	opts.cluster_sz = 0;
3406 
3407 	/* Initialize a new blob store */
3408 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3409 	poll_threads();
3410 	CU_ASSERT(g_bserrno == -EINVAL);
3411 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3412 
3413 	/*
3414 	 * Set cluster size to blobstore page size,
3415 	 * to work it is required to be at least twice the blobstore page size.
3416 	 */
3417 	dev = init_dev();
3418 	spdk_bs_opts_init(&opts, sizeof(opts));
3419 	opts.cluster_sz = g_phys_blocklen;
3420 
3421 	/* Initialize a new blob store */
3422 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3423 	poll_threads();
3424 	CU_ASSERT(g_bserrno == -ENOMEM);
3425 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3426 
3427 	/*
3428 	 * Set cluster size to lower than page size,
3429 	 * to work it is required to be at least twice the blobstore page size.
3430 	 */
3431 	dev = init_dev();
3432 	spdk_bs_opts_init(&opts, sizeof(opts));
3433 	opts.cluster_sz = g_phys_blocklen - 1;
3434 
3435 	/* Initialize a new blob store */
3436 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3437 	poll_threads();
3438 	CU_ASSERT(g_bserrno == -EINVAL);
3439 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3440 
3441 	/* Set cluster size to twice the default */
3442 	dev = init_dev();
3443 	spdk_bs_opts_init(&opts, sizeof(opts));
3444 	opts.cluster_sz *= 2;
3445 	cluster_sz = opts.cluster_sz;
3446 
3447 	/* Initialize a new blob store */
3448 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3449 	poll_threads();
3450 	CU_ASSERT(g_bserrno == 0);
3451 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3452 	bs = g_bs;
3453 
3454 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3455 
3456 	ut_bs_reload(&bs, &opts);
3457 
3458 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3459 
3460 	spdk_bs_unload(bs, bs_op_complete, NULL);
3461 	poll_threads();
3462 	CU_ASSERT(g_bserrno == 0);
3463 	g_bs = NULL;
3464 }
3465 
3466 /*
3467  * Create a blobstore, reload it and ensure total usable cluster count
3468  *  stays the same.
3469  */
3470 static void
3471 bs_usable_clusters(void)
3472 {
3473 	struct spdk_blob_store *bs = g_bs;
3474 	struct spdk_blob *blob;
3475 	uint32_t clusters;
3476 	int i;
3477 
3478 
3479 	clusters = spdk_bs_total_data_cluster_count(bs);
3480 
3481 	ut_bs_reload(&bs, NULL);
3482 
3483 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3484 
3485 	/* Create and resize blobs to make sure that usable cluster count won't change */
3486 	for (i = 0; i < 4; i++) {
3487 		g_bserrno = -1;
3488 		g_blobid = SPDK_BLOBID_INVALID;
3489 		blob = ut_blob_create_and_open(bs, NULL);
3490 
3491 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3492 		poll_threads();
3493 		CU_ASSERT(g_bserrno == 0);
3494 
3495 		g_bserrno = -1;
3496 		spdk_blob_close(blob, blob_op_complete, NULL);
3497 		poll_threads();
3498 		CU_ASSERT(g_bserrno == 0);
3499 
3500 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3501 	}
3502 
3503 	/* Reload the blob store to make sure that nothing changed */
3504 	ut_bs_reload(&bs, NULL);
3505 
3506 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3507 }
3508 
3509 /*
3510  * Test resizing of the metadata blob.  This requires creating enough blobs
3511  *  so that one cluster is not enough to fit the metadata for those blobs.
3512  *  To induce this condition to happen more quickly, we reduce the cluster
3513  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3514  */
3515 static void
3516 bs_resize_md(void)
3517 {
3518 	struct spdk_blob_store *bs;
3519 	const int CLUSTER_PAGE_COUNT = 4;
3520 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3521 	struct spdk_bs_dev *dev;
3522 	struct spdk_bs_opts opts;
3523 	struct spdk_blob *blob;
3524 	struct spdk_blob_opts blob_opts;
3525 	uint32_t cluster_sz;
3526 	spdk_blob_id blobids[NUM_BLOBS];
3527 	int i;
3528 
3529 
3530 	dev = init_dev();
3531 	spdk_bs_opts_init(&opts, sizeof(opts));
3532 	opts.cluster_sz = CLUSTER_PAGE_COUNT * g_phys_blocklen;
3533 	cluster_sz = opts.cluster_sz;
3534 
3535 	/* Initialize a new blob store */
3536 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3537 	poll_threads();
3538 	CU_ASSERT(g_bserrno == 0);
3539 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3540 	bs = g_bs;
3541 
3542 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3543 
3544 	ut_spdk_blob_opts_init(&blob_opts);
3545 
3546 	for (i = 0; i < NUM_BLOBS; i++) {
3547 		g_bserrno = -1;
3548 		g_blobid = SPDK_BLOBID_INVALID;
3549 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3550 		poll_threads();
3551 		CU_ASSERT(g_bserrno == 0);
3552 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3553 		blobids[i] = g_blobid;
3554 	}
3555 
3556 	ut_bs_reload(&bs, &opts);
3557 
3558 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3559 
3560 	for (i = 0; i < NUM_BLOBS; i++) {
3561 		g_bserrno = -1;
3562 		g_blob = NULL;
3563 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3564 		poll_threads();
3565 		CU_ASSERT(g_bserrno == 0);
3566 		CU_ASSERT(g_blob !=  NULL);
3567 		blob = g_blob;
3568 		g_bserrno = -1;
3569 		spdk_blob_close(blob, blob_op_complete, NULL);
3570 		poll_threads();
3571 		CU_ASSERT(g_bserrno == 0);
3572 	}
3573 
3574 	spdk_bs_unload(bs, bs_op_complete, NULL);
3575 	poll_threads();
3576 	CU_ASSERT(g_bserrno == 0);
3577 	g_bs = NULL;
3578 }
3579 
3580 static void
3581 bs_destroy(void)
3582 {
3583 	struct spdk_blob_store *bs;
3584 	struct spdk_bs_dev *dev;
3585 	struct spdk_power_failure_thresholds thresholds = {};
3586 
3587 	/* Initialize a new blob store */
3588 	dev = init_dev();
3589 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3590 	poll_threads();
3591 	CU_ASSERT(g_bserrno == 0);
3592 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3593 	bs = g_bs;
3594 
3595 	/* Destroy the blobstore, should fail due to I/O error */
3596 	thresholds.general_threshold = 1;
3597 	dev_set_power_failure_thresholds(thresholds);
3598 	g_bserrno = -1;
3599 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3600 	poll_threads();
3601 	CU_ASSERT(g_bserrno == -EIO);
3602 	dev_reset_power_failure_event();
3603 
3604 	/* Destroy the blob store */
3605 	g_bserrno = -1;
3606 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3607 	poll_threads();
3608 	CU_ASSERT(g_bserrno == 0);
3609 
3610 	/* Loading an non-existent blob store should fail. */
3611 	g_bs = NULL;
3612 	dev = init_dev();
3613 
3614 	g_bserrno = 0;
3615 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3616 	poll_threads();
3617 	CU_ASSERT(g_bserrno != 0);
3618 }
3619 
3620 /* Try to hit all of the corner cases associated with serializing
3621  * a blob to disk
3622  */
3623 static void
3624 blob_serialize_test(void)
3625 {
3626 	struct spdk_bs_dev *dev;
3627 	struct spdk_bs_opts opts;
3628 	struct spdk_blob_store *bs;
3629 	spdk_blob_id blobid[2];
3630 	struct spdk_blob *blob[2];
3631 	uint64_t i;
3632 	char *value;
3633 	int rc;
3634 
3635 	dev = init_dev();
3636 
3637 	/* Initialize a new blobstore with very small clusters */
3638 	spdk_bs_opts_init(&opts, sizeof(opts));
3639 	opts.cluster_sz = dev->blocklen * 8;
3640 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3641 	poll_threads();
3642 	CU_ASSERT(g_bserrno == 0);
3643 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3644 	bs = g_bs;
3645 
3646 	/* Create and open two blobs */
3647 	for (i = 0; i < 2; i++) {
3648 		blob[i] = ut_blob_create_and_open(bs, NULL);
3649 		blobid[i] = spdk_blob_get_id(blob[i]);
3650 
3651 		/* Set a fairly large xattr on both blobs to eat up
3652 		 * metadata space
3653 		 */
3654 		value = calloc(dev->blocklen - 64, sizeof(char));
3655 		SPDK_CU_ASSERT_FATAL(value != NULL);
3656 		memset(value, i, dev->blocklen / 2);
3657 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3658 		CU_ASSERT(rc == 0);
3659 		free(value);
3660 	}
3661 
3662 	/* Resize the blobs, alternating 1 cluster at a time.
3663 	 * This thwarts run length encoding and will cause spill
3664 	 * over of the extents.
3665 	 */
3666 	for (i = 0; i < 6; i++) {
3667 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3668 		poll_threads();
3669 		CU_ASSERT(g_bserrno == 0);
3670 	}
3671 
3672 	for (i = 0; i < 2; i++) {
3673 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3674 		poll_threads();
3675 		CU_ASSERT(g_bserrno == 0);
3676 	}
3677 
3678 	/* Close the blobs */
3679 	for (i = 0; i < 2; i++) {
3680 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3681 		poll_threads();
3682 		CU_ASSERT(g_bserrno == 0);
3683 	}
3684 
3685 	ut_bs_reload(&bs, &opts);
3686 
3687 	for (i = 0; i < 2; i++) {
3688 		blob[i] = NULL;
3689 
3690 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3691 		poll_threads();
3692 		CU_ASSERT(g_bserrno == 0);
3693 		CU_ASSERT(g_blob != NULL);
3694 		blob[i] = g_blob;
3695 
3696 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3697 
3698 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3699 		poll_threads();
3700 		CU_ASSERT(g_bserrno == 0);
3701 	}
3702 
3703 	spdk_bs_unload(bs, bs_op_complete, NULL);
3704 	poll_threads();
3705 	CU_ASSERT(g_bserrno == 0);
3706 	g_bs = NULL;
3707 }
3708 
3709 static void
3710 blob_crc(void)
3711 {
3712 	struct spdk_blob_store *bs = g_bs;
3713 	struct spdk_blob *blob;
3714 	spdk_blob_id blobid;
3715 	uint32_t page_num;
3716 	int index;
3717 	struct spdk_blob_md_page *page;
3718 
3719 	blob = ut_blob_create_and_open(bs, NULL);
3720 	blobid = spdk_blob_get_id(blob);
3721 
3722 	spdk_blob_close(blob, blob_op_complete, NULL);
3723 	poll_threads();
3724 	CU_ASSERT(g_bserrno == 0);
3725 
3726 	page_num = bs_blobid_to_page(blobid);
3727 	index = g_phys_blocklen * (bs->md_start + page_num);
3728 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3729 	page->crc = 0;
3730 
3731 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3732 	poll_threads();
3733 	CU_ASSERT(g_bserrno == -EINVAL);
3734 	CU_ASSERT(g_blob == NULL);
3735 	g_bserrno = 0;
3736 
3737 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3738 	poll_threads();
3739 	CU_ASSERT(g_bserrno == -EINVAL);
3740 }
3741 
3742 static void
3743 super_block_crc(void)
3744 {
3745 	struct spdk_blob_store *bs;
3746 	struct spdk_bs_dev *dev;
3747 	struct spdk_bs_super_block *super_block;
3748 
3749 	dev = init_dev();
3750 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3751 	poll_threads();
3752 	CU_ASSERT(g_bserrno == 0);
3753 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3754 	bs = g_bs;
3755 
3756 	spdk_bs_unload(bs, bs_op_complete, NULL);
3757 	poll_threads();
3758 	CU_ASSERT(g_bserrno == 0);
3759 	g_bs = NULL;
3760 
3761 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3762 	super_block->crc = 0;
3763 	dev = init_dev();
3764 
3765 	/* Load an existing blob store */
3766 	g_bserrno = 0;
3767 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3768 	poll_threads();
3769 	CU_ASSERT(g_bserrno == -EILSEQ);
3770 }
3771 
3772 /* For blob dirty shutdown test case we do the following sub-test cases:
3773  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3774  *   dirty shutdown and reload the blob store and verify the xattrs.
3775  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3776  *   reload the blob store and verify the clusters number.
3777  * 3 Create the second blob and then dirty shutdown, reload the blob store
3778  *   and verify the second blob.
3779  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3780  *   and verify the second blob is invalid.
3781  * 5 Create the second blob again and also create the third blob, modify the
3782  *   md of second blob which makes the md invalid, and then dirty shutdown,
3783  *   reload the blob store verify the second blob, it should invalid and also
3784  *   verify the third blob, it should correct.
3785  */
3786 static void
3787 blob_dirty_shutdown(void)
3788 {
3789 	int rc;
3790 	int index;
3791 	struct spdk_blob_store *bs = g_bs;
3792 	spdk_blob_id blobid1, blobid2, blobid3;
3793 	struct spdk_blob *blob = g_blob;
3794 	uint64_t length;
3795 	uint64_t free_clusters;
3796 	const void *value;
3797 	size_t value_len;
3798 	uint32_t page_num;
3799 	struct spdk_blob_md_page *page;
3800 	struct spdk_blob_opts blob_opts;
3801 
3802 	/* Create first blob */
3803 	blobid1 = spdk_blob_get_id(blob);
3804 
3805 	/* Set some xattrs */
3806 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3807 	CU_ASSERT(rc == 0);
3808 
3809 	length = 2345;
3810 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3811 	CU_ASSERT(rc == 0);
3812 
3813 	/* Put xattr that fits exactly single page.
3814 	 * This results in adding additional pages to MD.
3815 	 * First is flags and smaller xattr, second the large xattr,
3816 	 * third are just the extents.
3817 	 */
3818 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3819 			      strlen("large_xattr");
3820 	char *xattr = calloc(xattr_length, sizeof(char));
3821 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3822 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3823 	free(xattr);
3824 	SPDK_CU_ASSERT_FATAL(rc == 0);
3825 
3826 	/* Resize the blob */
3827 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3828 	poll_threads();
3829 	CU_ASSERT(g_bserrno == 0);
3830 
3831 	/* Set the blob as the super blob */
3832 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3833 	poll_threads();
3834 	CU_ASSERT(g_bserrno == 0);
3835 
3836 	free_clusters = spdk_bs_free_cluster_count(bs);
3837 
3838 	spdk_blob_close(blob, blob_op_complete, NULL);
3839 	poll_threads();
3840 	CU_ASSERT(g_bserrno == 0);
3841 	blob = NULL;
3842 	g_blob = NULL;
3843 	g_blobid = SPDK_BLOBID_INVALID;
3844 
3845 	ut_bs_dirty_load(&bs, NULL);
3846 
3847 	/* Get the super blob */
3848 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3849 	poll_threads();
3850 	CU_ASSERT(g_bserrno == 0);
3851 	CU_ASSERT(blobid1 == g_blobid);
3852 
3853 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3854 	poll_threads();
3855 	CU_ASSERT(g_bserrno == 0);
3856 	CU_ASSERT(g_blob != NULL);
3857 	blob = g_blob;
3858 
3859 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3860 
3861 	/* Get the xattrs */
3862 	value = NULL;
3863 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3864 	CU_ASSERT(rc == 0);
3865 	SPDK_CU_ASSERT_FATAL(value != NULL);
3866 	CU_ASSERT(*(uint64_t *)value == length);
3867 	CU_ASSERT(value_len == 8);
3868 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3869 
3870 	/* Resize the blob */
3871 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3872 	poll_threads();
3873 	CU_ASSERT(g_bserrno == 0);
3874 
3875 	free_clusters = spdk_bs_free_cluster_count(bs);
3876 
3877 	spdk_blob_close(blob, blob_op_complete, NULL);
3878 	poll_threads();
3879 	CU_ASSERT(g_bserrno == 0);
3880 	blob = NULL;
3881 	g_blob = NULL;
3882 	g_blobid = SPDK_BLOBID_INVALID;
3883 
3884 	ut_bs_dirty_load(&bs, NULL);
3885 
3886 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3887 	poll_threads();
3888 	CU_ASSERT(g_bserrno == 0);
3889 	CU_ASSERT(g_blob != NULL);
3890 	blob = g_blob;
3891 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3892 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3893 
3894 	spdk_blob_close(blob, blob_op_complete, NULL);
3895 	poll_threads();
3896 	CU_ASSERT(g_bserrno == 0);
3897 	blob = NULL;
3898 	g_blob = NULL;
3899 	g_blobid = SPDK_BLOBID_INVALID;
3900 
3901 	/* Create second blob */
3902 	blob = ut_blob_create_and_open(bs, NULL);
3903 	blobid2 = spdk_blob_get_id(blob);
3904 
3905 	/* Set some xattrs */
3906 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3907 	CU_ASSERT(rc == 0);
3908 
3909 	length = 5432;
3910 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3911 	CU_ASSERT(rc == 0);
3912 
3913 	/* Resize the blob */
3914 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3915 	poll_threads();
3916 	CU_ASSERT(g_bserrno == 0);
3917 
3918 	free_clusters = spdk_bs_free_cluster_count(bs);
3919 
3920 	spdk_blob_close(blob, blob_op_complete, NULL);
3921 	poll_threads();
3922 	CU_ASSERT(g_bserrno == 0);
3923 	blob = NULL;
3924 	g_blob = NULL;
3925 	g_blobid = SPDK_BLOBID_INVALID;
3926 
3927 	ut_bs_dirty_load(&bs, NULL);
3928 
3929 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3930 	poll_threads();
3931 	CU_ASSERT(g_bserrno == 0);
3932 	CU_ASSERT(g_blob != NULL);
3933 	blob = g_blob;
3934 
3935 	/* Get the xattrs */
3936 	value = NULL;
3937 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3938 	CU_ASSERT(rc == 0);
3939 	SPDK_CU_ASSERT_FATAL(value != NULL);
3940 	CU_ASSERT(*(uint64_t *)value == length);
3941 	CU_ASSERT(value_len == 8);
3942 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3943 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3944 
3945 	ut_blob_close_and_delete(bs, blob);
3946 
3947 	free_clusters = spdk_bs_free_cluster_count(bs);
3948 
3949 	ut_bs_dirty_load(&bs, NULL);
3950 
3951 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3952 	poll_threads();
3953 	CU_ASSERT(g_bserrno != 0);
3954 	CU_ASSERT(g_blob == NULL);
3955 
3956 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3957 	poll_threads();
3958 	CU_ASSERT(g_bserrno == 0);
3959 	CU_ASSERT(g_blob != NULL);
3960 	blob = g_blob;
3961 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3962 	spdk_blob_close(blob, blob_op_complete, NULL);
3963 	poll_threads();
3964 	CU_ASSERT(g_bserrno == 0);
3965 
3966 	ut_bs_reload(&bs, NULL);
3967 
3968 	/* Create second blob */
3969 	ut_spdk_blob_opts_init(&blob_opts);
3970 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3971 	poll_threads();
3972 	CU_ASSERT(g_bserrno == 0);
3973 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3974 	blobid2 = g_blobid;
3975 
3976 	/* Create third blob */
3977 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3978 	poll_threads();
3979 	CU_ASSERT(g_bserrno == 0);
3980 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3981 	blobid3 = g_blobid;
3982 
3983 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3984 	poll_threads();
3985 	CU_ASSERT(g_bserrno == 0);
3986 	CU_ASSERT(g_blob != NULL);
3987 	blob = g_blob;
3988 
3989 	/* Set some xattrs for second blob */
3990 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3991 	CU_ASSERT(rc == 0);
3992 
3993 	length = 5432;
3994 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3995 	CU_ASSERT(rc == 0);
3996 
3997 	spdk_blob_close(blob, blob_op_complete, NULL);
3998 	poll_threads();
3999 	CU_ASSERT(g_bserrno == 0);
4000 	blob = NULL;
4001 	g_blob = NULL;
4002 	g_blobid = SPDK_BLOBID_INVALID;
4003 
4004 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
4005 	poll_threads();
4006 	CU_ASSERT(g_bserrno == 0);
4007 	CU_ASSERT(g_blob != NULL);
4008 	blob = g_blob;
4009 
4010 	/* Set some xattrs for third blob */
4011 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
4012 	CU_ASSERT(rc == 0);
4013 
4014 	length = 5432;
4015 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
4016 	CU_ASSERT(rc == 0);
4017 
4018 	spdk_blob_close(blob, blob_op_complete, NULL);
4019 	poll_threads();
4020 	CU_ASSERT(g_bserrno == 0);
4021 	blob = NULL;
4022 	g_blob = NULL;
4023 	g_blobid = SPDK_BLOBID_INVALID;
4024 
4025 	/* Mark second blob as invalid */
4026 	page_num = bs_blobid_to_page(blobid2);
4027 
4028 	index = g_phys_blocklen * (bs->md_start + page_num);
4029 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
4030 	page->sequence_num = 1;
4031 	page->crc = blob_md_page_calc_crc(page);
4032 
4033 	free_clusters = spdk_bs_free_cluster_count(bs);
4034 
4035 	ut_bs_dirty_load(&bs, NULL);
4036 
4037 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
4038 	poll_threads();
4039 	CU_ASSERT(g_bserrno != 0);
4040 	CU_ASSERT(g_blob == NULL);
4041 
4042 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
4043 	poll_threads();
4044 	CU_ASSERT(g_bserrno == 0);
4045 	CU_ASSERT(g_blob != NULL);
4046 	blob = g_blob;
4047 
4048 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4049 }
4050 
4051 static void
4052 blob_flags(void)
4053 {
4054 	struct spdk_blob_store *bs = g_bs;
4055 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
4056 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
4057 	struct spdk_blob_opts blob_opts;
4058 	int rc;
4059 
4060 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
4061 	blob_invalid = ut_blob_create_and_open(bs, NULL);
4062 	blobid_invalid = spdk_blob_get_id(blob_invalid);
4063 
4064 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
4065 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
4066 
4067 	ut_spdk_blob_opts_init(&blob_opts);
4068 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
4069 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
4070 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
4071 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
4072 
4073 	/* Change the size of blob_data_ro to check if flags are serialized
4074 	 * when blob has non zero number of extents */
4075 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
4076 	poll_threads();
4077 	CU_ASSERT(g_bserrno == 0);
4078 
4079 	/* Set the xattr to check if flags are serialized
4080 	 * when blob has non zero number of xattrs */
4081 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
4082 	CU_ASSERT(rc == 0);
4083 
4084 	blob_invalid->invalid_flags = (1ULL << 63);
4085 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
4086 	blob_data_ro->data_ro_flags = (1ULL << 62);
4087 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
4088 	blob_md_ro->md_ro_flags = (1ULL << 61);
4089 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
4090 
4091 	g_bserrno = -1;
4092 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
4093 	poll_threads();
4094 	CU_ASSERT(g_bserrno == 0);
4095 	g_bserrno = -1;
4096 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
4097 	poll_threads();
4098 	CU_ASSERT(g_bserrno == 0);
4099 	g_bserrno = -1;
4100 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4101 	poll_threads();
4102 	CU_ASSERT(g_bserrno == 0);
4103 
4104 	g_bserrno = -1;
4105 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
4106 	poll_threads();
4107 	CU_ASSERT(g_bserrno == 0);
4108 	blob_invalid = NULL;
4109 	g_bserrno = -1;
4110 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
4111 	poll_threads();
4112 	CU_ASSERT(g_bserrno == 0);
4113 	blob_data_ro = NULL;
4114 	g_bserrno = -1;
4115 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
4116 	poll_threads();
4117 	CU_ASSERT(g_bserrno == 0);
4118 	blob_md_ro = NULL;
4119 
4120 	g_blob = NULL;
4121 	g_blobid = SPDK_BLOBID_INVALID;
4122 
4123 	ut_bs_reload(&bs, NULL);
4124 
4125 	g_blob = NULL;
4126 	g_bserrno = 0;
4127 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
4128 	poll_threads();
4129 	CU_ASSERT(g_bserrno != 0);
4130 	CU_ASSERT(g_blob == NULL);
4131 
4132 	g_blob = NULL;
4133 	g_bserrno = -1;
4134 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
4135 	poll_threads();
4136 	CU_ASSERT(g_bserrno == 0);
4137 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4138 	blob_data_ro = g_blob;
4139 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
4140 	CU_ASSERT(blob_data_ro->data_ro == true);
4141 	CU_ASSERT(blob_data_ro->md_ro == true);
4142 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
4143 
4144 	g_blob = NULL;
4145 	g_bserrno = -1;
4146 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
4147 	poll_threads();
4148 	CU_ASSERT(g_bserrno == 0);
4149 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4150 	blob_md_ro = g_blob;
4151 	CU_ASSERT(blob_md_ro->data_ro == false);
4152 	CU_ASSERT(blob_md_ro->md_ro == true);
4153 
4154 	g_bserrno = -1;
4155 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4156 	poll_threads();
4157 	CU_ASSERT(g_bserrno == 0);
4158 
4159 	ut_blob_close_and_delete(bs, blob_data_ro);
4160 	ut_blob_close_and_delete(bs, blob_md_ro);
4161 }
4162 
4163 static void
4164 bs_version(void)
4165 {
4166 	struct spdk_bs_super_block *super;
4167 	struct spdk_blob_store *bs = g_bs;
4168 	struct spdk_bs_dev *dev;
4169 	struct spdk_blob *blob;
4170 	struct spdk_blob_opts blob_opts;
4171 	spdk_blob_id blobid;
4172 
4173 	/* Unload the blob store */
4174 	spdk_bs_unload(bs, bs_op_complete, NULL);
4175 	poll_threads();
4176 	CU_ASSERT(g_bserrno == 0);
4177 	g_bs = NULL;
4178 
4179 	/*
4180 	 * Change the bs version on disk.  This will allow us to
4181 	 *  test that the version does not get modified automatically
4182 	 *  when loading and unloading the blobstore.
4183 	 */
4184 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
4185 	CU_ASSERT(super->version == SPDK_BS_VERSION);
4186 	CU_ASSERT(super->clean == 1);
4187 	super->version = 2;
4188 	/*
4189 	 * Version 2 metadata does not have a used blobid mask, so clear
4190 	 *  those fields in the super block and zero the corresponding
4191 	 *  region on "disk".  We will use this to ensure blob IDs are
4192 	 *  correctly reconstructed.
4193 	 */
4194 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
4195 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
4196 	super->used_blobid_mask_start = 0;
4197 	super->used_blobid_mask_len = 0;
4198 	super->crc = blob_md_page_calc_crc(super);
4199 
4200 	/* Load an existing blob store */
4201 	dev = init_dev();
4202 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4203 	poll_threads();
4204 	CU_ASSERT(g_bserrno == 0);
4205 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4206 	CU_ASSERT(super->clean == 1);
4207 	bs = g_bs;
4208 
4209 	/*
4210 	 * Create a blob - just to make sure that when we unload it
4211 	 *  results in writing the super block (since metadata pages
4212 	 *  were allocated.
4213 	 */
4214 	ut_spdk_blob_opts_init(&blob_opts);
4215 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
4216 	poll_threads();
4217 	CU_ASSERT(g_bserrno == 0);
4218 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4219 	blobid = g_blobid;
4220 
4221 	/* Unload the blob store */
4222 	spdk_bs_unload(bs, bs_op_complete, NULL);
4223 	poll_threads();
4224 	CU_ASSERT(g_bserrno == 0);
4225 	g_bs = NULL;
4226 	CU_ASSERT(super->version == 2);
4227 	CU_ASSERT(super->used_blobid_mask_start == 0);
4228 	CU_ASSERT(super->used_blobid_mask_len == 0);
4229 
4230 	dev = init_dev();
4231 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4232 	poll_threads();
4233 	CU_ASSERT(g_bserrno == 0);
4234 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4235 	bs = g_bs;
4236 
4237 	g_blob = NULL;
4238 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4239 	poll_threads();
4240 	CU_ASSERT(g_bserrno == 0);
4241 	CU_ASSERT(g_blob != NULL);
4242 	blob = g_blob;
4243 
4244 	ut_blob_close_and_delete(bs, blob);
4245 
4246 	CU_ASSERT(super->version == 2);
4247 	CU_ASSERT(super->used_blobid_mask_start == 0);
4248 	CU_ASSERT(super->used_blobid_mask_len == 0);
4249 }
4250 
4251 static void
4252 blob_set_xattrs_test(void)
4253 {
4254 	struct spdk_blob_store *bs = g_bs;
4255 	struct spdk_blob *blob;
4256 	struct spdk_blob_opts opts;
4257 	const void *value;
4258 	size_t value_len;
4259 	char *xattr;
4260 	size_t xattr_length;
4261 	int rc;
4262 
4263 	/* Create blob with extra attributes */
4264 	ut_spdk_blob_opts_init(&opts);
4265 
4266 	opts.xattrs.names = g_xattr_names;
4267 	opts.xattrs.get_value = _get_xattr_value;
4268 	opts.xattrs.count = 3;
4269 	opts.xattrs.ctx = &g_ctx;
4270 
4271 	blob = ut_blob_create_and_open(bs, &opts);
4272 
4273 	/* Get the xattrs */
4274 	value = NULL;
4275 
4276 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
4277 	CU_ASSERT(rc == 0);
4278 	SPDK_CU_ASSERT_FATAL(value != NULL);
4279 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
4280 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
4281 
4282 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
4283 	CU_ASSERT(rc == 0);
4284 	SPDK_CU_ASSERT_FATAL(value != NULL);
4285 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
4286 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
4287 
4288 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
4289 	CU_ASSERT(rc == 0);
4290 	SPDK_CU_ASSERT_FATAL(value != NULL);
4291 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
4292 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
4293 
4294 	/* Try to get non existing attribute */
4295 
4296 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
4297 	CU_ASSERT(rc == -ENOENT);
4298 
4299 	/* Try xattr exceeding maximum length of descriptor in single page */
4300 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
4301 		       strlen("large_xattr") + 1;
4302 	xattr = calloc(xattr_length, sizeof(char));
4303 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
4304 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
4305 	free(xattr);
4306 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
4307 
4308 	spdk_blob_close(blob, blob_op_complete, NULL);
4309 	poll_threads();
4310 	CU_ASSERT(g_bserrno == 0);
4311 	blob = NULL;
4312 	g_blob = NULL;
4313 	g_blobid = SPDK_BLOBID_INVALID;
4314 
4315 	/* NULL callback */
4316 	ut_spdk_blob_opts_init(&opts);
4317 	opts.xattrs.names = g_xattr_names;
4318 	opts.xattrs.get_value = NULL;
4319 	opts.xattrs.count = 1;
4320 	opts.xattrs.ctx = &g_ctx;
4321 
4322 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4323 	poll_threads();
4324 	CU_ASSERT(g_bserrno == -EINVAL);
4325 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4326 
4327 	/* NULL values */
4328 	ut_spdk_blob_opts_init(&opts);
4329 	opts.xattrs.names = g_xattr_names;
4330 	opts.xattrs.get_value = _get_xattr_value_null;
4331 	opts.xattrs.count = 1;
4332 	opts.xattrs.ctx = NULL;
4333 
4334 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4335 	poll_threads();
4336 	CU_ASSERT(g_bserrno == -EINVAL);
4337 }
4338 
4339 static void
4340 blob_thin_prov_alloc(void)
4341 {
4342 	struct spdk_blob_store *bs = g_bs;
4343 	struct spdk_blob *blob;
4344 	struct spdk_blob_opts opts;
4345 	spdk_blob_id blobid;
4346 	uint64_t free_clusters;
4347 
4348 	free_clusters = spdk_bs_free_cluster_count(bs);
4349 
4350 	/* Set blob as thin provisioned */
4351 	ut_spdk_blob_opts_init(&opts);
4352 	opts.thin_provision = true;
4353 
4354 	blob = ut_blob_create_and_open(bs, &opts);
4355 	blobid = spdk_blob_get_id(blob);
4356 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4357 
4358 	CU_ASSERT(blob->active.num_clusters == 0);
4359 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
4360 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4361 
4362 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4363 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4364 	poll_threads();
4365 	CU_ASSERT(g_bserrno == 0);
4366 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4367 	CU_ASSERT(blob->active.num_clusters == 5);
4368 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4369 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4370 
4371 	/* Grow it to 1TB - still unallocated */
4372 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
4373 	poll_threads();
4374 	CU_ASSERT(g_bserrno == 0);
4375 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4376 	CU_ASSERT(blob->active.num_clusters == 262144);
4377 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4378 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4379 
4380 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4381 	poll_threads();
4382 	CU_ASSERT(g_bserrno == 0);
4383 	/* Sync must not change anything */
4384 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4385 	CU_ASSERT(blob->active.num_clusters == 262144);
4386 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4387 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4388 	/* Since clusters are not allocated,
4389 	 * number of metadata pages is expected to be minimal.
4390 	 */
4391 	CU_ASSERT(blob->active.num_pages == 1);
4392 
4393 	/* Shrink the blob to 3 clusters - still unallocated */
4394 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
4395 	poll_threads();
4396 	CU_ASSERT(g_bserrno == 0);
4397 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4398 	CU_ASSERT(blob->active.num_clusters == 3);
4399 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4400 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4401 
4402 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4403 	poll_threads();
4404 	CU_ASSERT(g_bserrno == 0);
4405 	/* Sync must not change anything */
4406 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4407 	CU_ASSERT(blob->active.num_clusters == 3);
4408 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4409 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4410 
4411 	spdk_blob_close(blob, blob_op_complete, NULL);
4412 	poll_threads();
4413 	CU_ASSERT(g_bserrno == 0);
4414 
4415 	ut_bs_reload(&bs, NULL);
4416 
4417 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4418 	poll_threads();
4419 	CU_ASSERT(g_bserrno == 0);
4420 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4421 	blob = g_blob;
4422 
4423 	/* Check that clusters allocation and size is still the same */
4424 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4425 	CU_ASSERT(blob->active.num_clusters == 3);
4426 
4427 	ut_blob_close_and_delete(bs, blob);
4428 }
4429 
4430 static void
4431 blob_insert_cluster_msg_test(void)
4432 {
4433 	struct spdk_blob_store *bs = g_bs;
4434 	struct spdk_blob *blob;
4435 	struct spdk_blob_opts opts;
4436 	/* For now, even if md_page_size is > 4KB, we still only use the first
4437 	 * 4KB of it. The rest is left unused. Future changes may allow using the
4438 	 * rest of the md_page, but that will require more extensive changes since
4439 	 * then the struct spdk_blob_md_page cannot be used directly (since some
4440 	 * fields such as crc would have variable placement in the struct).
4441 	 */
4442 	struct {
4443 		struct spdk_blob_md_page page;
4444 		uint8_t pad[DEV_MAX_PHYS_BLOCKLEN - sizeof(struct spdk_blob_md_page)];
4445 	} md = {};
4446 	spdk_blob_id blobid;
4447 	uint64_t free_clusters;
4448 	uint64_t new_cluster = 0;
4449 	uint32_t cluster_num = 3;
4450 	uint32_t extent_page = 0;
4451 
4452 	free_clusters = spdk_bs_free_cluster_count(bs);
4453 
4454 	/* Set blob as thin provisioned */
4455 	ut_spdk_blob_opts_init(&opts);
4456 	opts.thin_provision = true;
4457 	opts.num_clusters = 4;
4458 
4459 	blob = ut_blob_create_and_open(bs, &opts);
4460 	blobid = spdk_blob_get_id(blob);
4461 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4462 
4463 	CU_ASSERT(blob->active.num_clusters == 4);
4464 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4465 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4466 
4467 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4468 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4469 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4470 	spdk_spin_lock(&bs->used_lock);
4471 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4472 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4473 	spdk_spin_unlock(&bs->used_lock);
4474 
4475 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &md.page,
4476 					 blob_op_complete, NULL);
4477 	poll_threads();
4478 
4479 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4480 
4481 	spdk_blob_close(blob, blob_op_complete, NULL);
4482 	poll_threads();
4483 	CU_ASSERT(g_bserrno == 0);
4484 
4485 	ut_bs_reload(&bs, NULL);
4486 
4487 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4488 	poll_threads();
4489 	CU_ASSERT(g_bserrno == 0);
4490 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4491 	blob = g_blob;
4492 
4493 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4494 
4495 	ut_blob_close_and_delete(bs, blob);
4496 }
4497 
4498 static void
4499 blob_thin_prov_rw(void)
4500 {
4501 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
4502 	struct spdk_blob_store *bs = g_bs;
4503 	struct spdk_blob *blob, *blob_id0;
4504 	struct spdk_io_channel *channel, *channel_thread1;
4505 	struct spdk_blob_opts opts;
4506 	uint64_t free_clusters;
4507 	uint64_t io_unit_size;
4508 	uint8_t payload_read[10 * BLOCKLEN];
4509 	uint8_t payload_write[10 * BLOCKLEN];
4510 	uint64_t write_bytes;
4511 	uint64_t read_bytes;
4512 	uint64_t expected_bytes;
4513 
4514 	free_clusters = spdk_bs_free_cluster_count(bs);
4515 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4516 
4517 	channel = spdk_bs_alloc_io_channel(bs);
4518 	CU_ASSERT(channel != NULL);
4519 
4520 	ut_spdk_blob_opts_init(&opts);
4521 	opts.thin_provision = true;
4522 
4523 	/* Create and delete blob at md page 0, so that next md page allocation
4524 	 * for extent will use that. */
4525 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4526 	blob = ut_blob_create_and_open(bs, &opts);
4527 	ut_blob_close_and_delete(bs, blob_id0);
4528 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4529 
4530 	CU_ASSERT(blob->active.num_clusters == 0);
4531 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4532 
4533 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4534 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4535 	poll_threads();
4536 	CU_ASSERT(g_bserrno == 0);
4537 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4538 	CU_ASSERT(blob->active.num_clusters == 5);
4539 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4540 
4541 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4542 	poll_threads();
4543 	CU_ASSERT(g_bserrno == 0);
4544 	/* Sync must not change anything */
4545 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4546 	CU_ASSERT(blob->active.num_clusters == 5);
4547 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4548 
4549 	/* Payload should be all zeros from unallocated clusters */
4550 	memset(payload_read, 0xFF, sizeof(payload_read));
4551 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4552 	poll_threads();
4553 	CU_ASSERT(g_bserrno == 0);
4554 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
4555 
4556 	write_bytes = g_dev_write_bytes;
4557 	read_bytes = g_dev_read_bytes;
4558 
4559 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4560 	set_thread(1);
4561 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4562 	CU_ASSERT(channel_thread1 != NULL);
4563 	memset(payload_write, 0xE5, sizeof(payload_write));
4564 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4565 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4566 	/* Perform write on thread 0. That will try to allocate cluster,
4567 	 * but fail due to another thread issuing the cluster allocation first. */
4568 	set_thread(0);
4569 	memset(payload_write, 0xE5, sizeof(payload_write));
4570 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4571 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4572 	poll_threads();
4573 	CU_ASSERT(g_bserrno == 0);
4574 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4575 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
4576 	/* For thin-provisioned blob we need to write 20 io_units plus one page metadata and
4577 	 * read 0 bytes */
4578 	expected_bytes = 20 * io_unit_size + spdk_bs_get_page_size(bs);
4579 	if (g_use_extent_table) {
4580 		/* Add one more page for EXTENT_PAGE write */
4581 		expected_bytes += spdk_bs_get_page_size(bs);
4582 	}
4583 	CU_ASSERT(g_dev_write_bytes - write_bytes == expected_bytes);
4584 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4585 
4586 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4587 	poll_threads();
4588 	CU_ASSERT(g_bserrno == 0);
4589 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
4590 
4591 	ut_blob_close_and_delete(bs, blob);
4592 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4593 
4594 	set_thread(1);
4595 	spdk_bs_free_io_channel(channel_thread1);
4596 	set_thread(0);
4597 	spdk_bs_free_io_channel(channel);
4598 	poll_threads();
4599 	g_blob = NULL;
4600 	g_blobid = 0;
4601 }
4602 
4603 static void
4604 blob_thin_prov_write_count_io(void)
4605 {
4606 	struct spdk_blob_store *bs;
4607 	struct spdk_blob *blob;
4608 	struct spdk_io_channel *ch;
4609 	struct spdk_bs_dev *dev;
4610 	struct spdk_bs_opts bs_opts;
4611 	struct spdk_blob_opts opts;
4612 	uint64_t free_clusters;
4613 	uint64_t io_unit_size;
4614 	uint8_t payload_write[BLOCKLEN];
4615 	uint64_t write_bytes;
4616 	uint64_t read_bytes;
4617 	uint64_t expected_bytes;
4618 	const uint32_t CLUSTER_SZ = g_phys_blocklen * 4;
4619 	uint32_t io_units_per_cluster;
4620 	uint32_t io_units_per_extent_page;
4621 	uint32_t i;
4622 
4623 	/* Use a very small cluster size for this test.  This ensures we need multiple
4624 	 * extent pages to hold all of the clusters even for relatively small blobs like
4625 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4626 	 * buffers).
4627 	 */
4628 	dev = init_dev();
4629 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4630 	bs_opts.cluster_sz = CLUSTER_SZ;
4631 
4632 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4633 	poll_threads();
4634 	CU_ASSERT(g_bserrno == 0);
4635 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4636 	bs = g_bs;
4637 
4638 	free_clusters = spdk_bs_free_cluster_count(bs);
4639 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4640 	io_units_per_cluster = CLUSTER_SZ / io_unit_size;
4641 	io_units_per_extent_page = SPDK_EXTENTS_PER_EP * io_units_per_cluster;
4642 
4643 	ch = spdk_bs_alloc_io_channel(bs);
4644 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4645 
4646 	ut_spdk_blob_opts_init(&opts);
4647 	opts.thin_provision = true;
4648 
4649 	blob = ut_blob_create_and_open(bs, &opts);
4650 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4651 
4652 	/* Resize the blob so that it will require 8 extent pages to hold all of
4653 	 * the clusters.
4654 	 */
4655 	g_bserrno = -1;
4656 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4657 	poll_threads();
4658 	CU_ASSERT(g_bserrno == 0);
4659 
4660 	g_bserrno = -1;
4661 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4662 	poll_threads();
4663 	CU_ASSERT(g_bserrno == 0);
4664 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4665 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4666 
4667 	memset(payload_write, 0, sizeof(payload_write));
4668 	for (i = 0; i < 8; i++) {
4669 		write_bytes = g_dev_write_bytes;
4670 		read_bytes = g_dev_read_bytes;
4671 
4672 		g_bserrno = -1;
4673 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i, 1, blob_op_complete,
4674 				   NULL);
4675 		poll_threads();
4676 		CU_ASSERT(g_bserrno == 0);
4677 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4678 
4679 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4680 		if (!g_use_extent_table) {
4681 			/* For legacy metadata, we should have written the io_unit for
4682 			 * the write I/O, plus the blob's primary metadata page
4683 			 */
4684 			expected_bytes = io_unit_size + spdk_bs_get_page_size(bs);
4685 		} else {
4686 			/* For extent table metadata, we should have written the io_unit for
4687 			 * the write I/O, plus 2 metadata pages - the extent page and the
4688 			 * blob's primary metadata page
4689 			 */
4690 			expected_bytes = io_unit_size + 2 * spdk_bs_get_page_size(bs);
4691 		}
4692 		CU_ASSERT((g_dev_write_bytes - write_bytes) == expected_bytes);
4693 
4694 		/* The write should have synced the metadata already.  Do another sync here
4695 		 * just to confirm.
4696 		 */
4697 		write_bytes = g_dev_write_bytes;
4698 		read_bytes = g_dev_read_bytes;
4699 
4700 		g_bserrno = -1;
4701 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4702 		poll_threads();
4703 		CU_ASSERT(g_bserrno == 0);
4704 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4705 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 1);
4706 
4707 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4708 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4709 
4710 		/* Now write to another unallocated cluster that is part of the same extent page. */
4711 		g_bserrno = -1;
4712 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i + io_units_per_cluster,
4713 				   1, blob_op_complete, NULL);
4714 		poll_threads();
4715 		CU_ASSERT(g_bserrno == 0);
4716 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4717 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 2);
4718 
4719 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4720 		/*
4721 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4722 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4723 		 */
4724 		expected_bytes = io_unit_size + spdk_bs_get_page_size(bs);
4725 		CU_ASSERT((g_dev_write_bytes - write_bytes) == expected_bytes);
4726 
4727 		/* Send unmap aligned to the whole cluster - should free it up */
4728 		g_bserrno = -1;
4729 		spdk_blob_io_unmap(blob, ch, io_units_per_extent_page * i, io_units_per_cluster, blob_op_complete,
4730 				   NULL);
4731 		poll_threads();
4732 		CU_ASSERT(g_bserrno == 0);
4733 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4734 
4735 		/* Write back to the freed cluster */
4736 		g_bserrno = -1;
4737 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i, 1, blob_op_complete,
4738 				   NULL);
4739 		poll_threads();
4740 		CU_ASSERT(g_bserrno == 0);
4741 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4742 	}
4743 
4744 	ut_blob_close_and_delete(bs, blob);
4745 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4746 
4747 	spdk_bs_free_io_channel(ch);
4748 	poll_threads();
4749 	g_blob = NULL;
4750 	g_blobid = 0;
4751 
4752 	spdk_bs_unload(bs, bs_op_complete, NULL);
4753 	poll_threads();
4754 	CU_ASSERT(g_bserrno == 0);
4755 	g_bs = NULL;
4756 }
4757 
4758 static void
4759 blob_thin_prov_unmap_cluster(void)
4760 {
4761 	struct spdk_blob_store *bs;
4762 	struct spdk_blob *blob, *snapshot;
4763 	struct spdk_io_channel *ch;
4764 	struct spdk_bs_dev *dev;
4765 	struct spdk_bs_opts bs_opts;
4766 	struct spdk_blob_opts opts;
4767 	uint64_t free_clusters;
4768 	uint64_t io_unit_size;
4769 	uint8_t payload_write[BLOCKLEN];
4770 	uint8_t payload_read[BLOCKLEN];
4771 	const uint32_t CLUSTER_COUNT = 3;
4772 	uint32_t io_units_per_cluster;
4773 	spdk_blob_id blobid, snapshotid;
4774 	uint32_t i;
4775 	int err;
4776 
4777 	/* Use a very large cluster size for this test. Check how the unmap/release cluster code path behaves when
4778 	 * clusters are fully used.
4779 	 */
4780 	dev = init_dev();
4781 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4782 	bs_opts.cluster_sz = dev->blocklen * dev->blockcnt / (CLUSTER_COUNT + 1);
4783 
4784 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4785 	poll_threads();
4786 	CU_ASSERT(g_bserrno == 0);
4787 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4788 	bs = g_bs;
4789 
4790 	free_clusters = spdk_bs_free_cluster_count(bs);
4791 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4792 	io_units_per_cluster = bs_opts.cluster_sz / io_unit_size;
4793 
4794 	ch = spdk_bs_alloc_io_channel(bs);
4795 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4796 
4797 	ut_spdk_blob_opts_init(&opts);
4798 	opts.thin_provision = true;
4799 
4800 	blob = ut_blob_create_and_open(bs, &opts);
4801 	CU_ASSERT(free_clusters == CLUSTER_COUNT);
4802 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4803 	blobid = spdk_blob_get_id(blob);
4804 
4805 	g_bserrno = -1;
4806 	spdk_blob_resize(blob, CLUSTER_COUNT, blob_op_complete, NULL);
4807 	poll_threads();
4808 	CU_ASSERT(g_bserrno == 0);
4809 
4810 	g_bserrno = -1;
4811 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4812 	poll_threads();
4813 	CU_ASSERT(g_bserrno == 0);
4814 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4815 	CU_ASSERT(blob->active.num_clusters == CLUSTER_COUNT);
4816 
4817 	/* Fill all clusters */
4818 	for (i = 0; i < CLUSTER_COUNT; i++) {
4819 		memset(payload_write, i + 1, sizeof(payload_write));
4820 		g_bserrno = -1;
4821 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster * i, 1, blob_op_complete, NULL);
4822 		poll_threads();
4823 		CU_ASSERT(g_bserrno == 0);
4824 		CU_ASSERT(free_clusters - (i + 1) == spdk_bs_free_cluster_count(bs));
4825 	}
4826 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4827 
4828 	/* Unmap one whole cluster */
4829 	g_bserrno = -1;
4830 	spdk_blob_io_unmap(blob, ch, io_units_per_cluster, io_units_per_cluster, blob_op_complete, NULL);
4831 	poll_threads();
4832 	CU_ASSERT(g_bserrno == 0);
4833 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4834 
4835 	/* Verify the data read from the cluster is zeroed out */
4836 	memset(payload_write, 0, sizeof(payload_write));
4837 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4838 	poll_threads();
4839 	CU_ASSERT(g_bserrno == 0);
4840 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4841 
4842 	/* Fill the same cluster with data */
4843 	memset(payload_write, 3, sizeof(payload_write));
4844 	g_bserrno = -1;
4845 	spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster, 1, blob_op_complete, NULL);
4846 	poll_threads();
4847 	CU_ASSERT(g_bserrno == 0);
4848 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4849 
4850 	/* Verify the data read from the cluster has the expected data */
4851 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4852 	poll_threads();
4853 	CU_ASSERT(g_bserrno == 0);
4854 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4855 
4856 	/* Send an unaligned unmap that ecompasses one whole cluster */
4857 	g_bserrno = -1;
4858 	spdk_blob_io_unmap(blob, ch, io_units_per_cluster - 1, io_units_per_cluster + 2, blob_op_complete,
4859 			   NULL);
4860 	poll_threads();
4861 	CU_ASSERT(g_bserrno == 0);
4862 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4863 
4864 	/* Verify the data read from the cluster is zeroed out */
4865 	g_bserrno = -1;
4866 	memset(payload_write, 0, sizeof(payload_write));
4867 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4868 	poll_threads();
4869 	CU_ASSERT(g_bserrno == 0);
4870 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4871 
4872 	/* Send a simultaneous unmap with a write to an unallocated area -
4873 	 * check that writes don't claim the currently unmapped cluster */
4874 	g_bserrno = -1;
4875 	memset(payload_write, 7, sizeof(payload_write));
4876 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4877 	spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster, 1, blob_op_complete, NULL);
4878 	poll_threads();
4879 	CU_ASSERT(g_bserrno == 0);
4880 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4881 
4882 	/* Verify the contents of written sector */
4883 	g_bserrno = -1;
4884 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4885 	poll_threads();
4886 	CU_ASSERT(g_bserrno == 0);
4887 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4888 
4889 	/* Verify the contents of unmapped sector */
4890 	g_bserrno = -1;
4891 	memset(payload_write, 0, sizeof(payload_write));
4892 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4893 	poll_threads();
4894 	CU_ASSERT(g_bserrno == 0);
4895 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4896 
4897 	/* Make sure clusters are not freed until the unmap to the drive is done */
4898 	g_bserrno = -1;
4899 	memset(payload_write, 7, sizeof(payload_write));
4900 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4901 	poll_threads();
4902 	CU_ASSERT(g_bserrno == 0);
4903 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4904 
4905 	g_bserrno = -1;
4906 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4907 	while (memcmp(payload_write, &g_dev_buffer[BLOCKLEN * io_units_per_cluster], BLOCKLEN) == 0) {
4908 		CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4909 		poll_thread_times(0, 1);
4910 	}
4911 	poll_threads();
4912 	CU_ASSERT(g_bserrno == 0);
4913 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4914 
4915 	/* Issue #3358 had a bug with concurrent trims to the same cluster causing an assert, check for regressions.
4916 	 * Send three concurrent unmaps to the same cluster.
4917 	 */
4918 	g_bserrno = -1;
4919 	memset(payload_write, 7, sizeof(payload_write));
4920 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4921 	poll_threads();
4922 	CU_ASSERT(g_bserrno == 0);
4923 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4924 
4925 	g_bserrno = -1;
4926 	err = -1;
4927 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4928 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4929 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, &err);
4930 	poll_threads();
4931 	CU_ASSERT(g_bserrno == 0);
4932 	CU_ASSERT(err == 0);
4933 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4934 
4935 	/* Test thin-provisioned blob that is backed */
4936 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
4937 	poll_threads();
4938 	CU_ASSERT(g_bserrno == 0);
4939 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4940 	poll_threads();
4941 	CU_ASSERT(g_bserrno == 0);
4942 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4943 
4944 	g_bserrno = -1;
4945 	memset(payload_write, 1, sizeof(payload_write));
4946 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4947 	poll_threads();
4948 	CU_ASSERT(g_bserrno == 0);
4949 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4950 
4951 	/* Create a snapshot */
4952 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
4953 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4954 	poll_threads();
4955 	CU_ASSERT(g_bserrno == 0);
4956 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4957 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
4958 	snapshotid = g_blobid;
4959 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4960 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4961 	poll_threads();
4962 	CU_ASSERT(g_bserrno == 0);
4963 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4964 	snapshot = g_blob;
4965 
4966 	/* Write data to blob, it will alloc new cluster */
4967 	g_bserrno = -1;
4968 	memset(payload_write, 2, sizeof(payload_write));
4969 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4970 	poll_threads();
4971 	CU_ASSERT(g_bserrno == 0);
4972 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4973 
4974 	/* Unmap one whole cluster, but do not release this cluster */
4975 	g_bserrno = -1;
4976 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4977 	poll_threads();
4978 	CU_ASSERT(g_bserrno == 0);
4979 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4980 
4981 	/* Verify the data read from the cluster is zeroed out */
4982 	g_bserrno = -1;
4983 	memset(payload_write, 0, sizeof(payload_write));
4984 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4985 	poll_threads();
4986 	CU_ASSERT(g_bserrno == 0);
4987 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4988 
4989 	ut_blob_close_and_delete(bs, blob);
4990 	ut_blob_close_and_delete(bs, snapshot);
4991 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4992 
4993 	spdk_bs_free_io_channel(ch);
4994 	poll_threads();
4995 	g_blob = NULL;
4996 	g_blobid = 0;
4997 
4998 	spdk_bs_unload(bs, bs_op_complete, NULL);
4999 	poll_threads();
5000 	CU_ASSERT(g_bserrno == 0);
5001 	g_bs = NULL;
5002 }
5003 
5004 static void
5005 blob_thin_prov_rle(void)
5006 {
5007 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5008 	struct spdk_blob_store *bs = g_bs;
5009 	struct spdk_blob *blob;
5010 	struct spdk_io_channel *channel;
5011 	struct spdk_blob_opts opts;
5012 	spdk_blob_id blobid;
5013 	uint64_t free_clusters;
5014 	uint64_t io_unit_size;
5015 	uint8_t payload_read[10 * BLOCKLEN];
5016 	uint8_t payload_write[10 * BLOCKLEN];
5017 	uint64_t write_bytes;
5018 	uint64_t read_bytes;
5019 	uint64_t expected_bytes;
5020 	uint64_t io_unit;
5021 
5022 	/* assert that the stack variables above are of correct size */
5023 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == BLOCKLEN);
5024 
5025 	free_clusters = spdk_bs_free_cluster_count(bs);
5026 	io_unit_size = spdk_bs_get_io_unit_size(bs);
5027 
5028 	ut_spdk_blob_opts_init(&opts);
5029 	opts.thin_provision = true;
5030 	opts.num_clusters = 5;
5031 
5032 	blob = ut_blob_create_and_open(bs, &opts);
5033 	blobid = spdk_blob_get_id(blob);
5034 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5035 
5036 	channel = spdk_bs_alloc_io_channel(bs);
5037 	CU_ASSERT(channel != NULL);
5038 
5039 	/* Target specifically second cluster in a blob as first allocation */
5040 	io_unit = bs_cluster_to_io_unit(bs, 1);
5041 
5042 	/* Payload should be all zeros from unallocated clusters */
5043 	memset(payload_read, 0xFF, sizeof(payload_read));
5044 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
5045 	poll_threads();
5046 	CU_ASSERT(g_bserrno == 0);
5047 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5048 
5049 	write_bytes = g_dev_write_bytes;
5050 	read_bytes = g_dev_read_bytes;
5051 
5052 	/* Issue write to second cluster in a blob */
5053 	memset(payload_write, 0xE5, sizeof(payload_write));
5054 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
5055 	poll_threads();
5056 	CU_ASSERT(g_bserrno == 0);
5057 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
5058 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
5059 	 * read 0 bytes */
5060 	expected_bytes = 10 * io_unit_size + spdk_bs_get_page_size(bs);
5061 	if (g_use_extent_table) {
5062 		/* Add one more page for EXTENT_PAGE write */
5063 		expected_bytes += spdk_bs_get_page_size(bs);
5064 	}
5065 	CU_ASSERT(g_dev_write_bytes - write_bytes == expected_bytes);
5066 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
5067 
5068 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
5069 	poll_threads();
5070 	CU_ASSERT(g_bserrno == 0);
5071 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5072 
5073 	spdk_bs_free_io_channel(channel);
5074 	poll_threads();
5075 
5076 	spdk_blob_close(blob, blob_op_complete, NULL);
5077 	poll_threads();
5078 	CU_ASSERT(g_bserrno == 0);
5079 
5080 	ut_bs_reload(&bs, NULL);
5081 
5082 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5083 	poll_threads();
5084 	CU_ASSERT(g_bserrno == 0);
5085 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5086 	blob = g_blob;
5087 
5088 	channel = spdk_bs_alloc_io_channel(bs);
5089 	CU_ASSERT(channel != NULL);
5090 
5091 	/* Read second cluster after blob reload to confirm data written */
5092 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
5093 	poll_threads();
5094 	CU_ASSERT(g_bserrno == 0);
5095 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5096 
5097 	spdk_bs_free_io_channel(channel);
5098 	poll_threads();
5099 
5100 	ut_blob_close_and_delete(bs, blob);
5101 }
5102 
5103 static void
5104 blob_thin_prov_rw_iov(void)
5105 {
5106 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5107 	struct spdk_blob_store *bs = g_bs;
5108 	struct spdk_blob *blob;
5109 	struct spdk_io_channel *channel;
5110 	struct spdk_blob_opts opts;
5111 	uint64_t free_clusters;
5112 	uint8_t payload_read[10 * BLOCKLEN];
5113 	uint8_t payload_write[10 * BLOCKLEN];
5114 	struct iovec iov_read[3];
5115 	struct iovec iov_write[3];
5116 
5117 	free_clusters = spdk_bs_free_cluster_count(bs);
5118 
5119 	channel = spdk_bs_alloc_io_channel(bs);
5120 	CU_ASSERT(channel != NULL);
5121 
5122 	ut_spdk_blob_opts_init(&opts);
5123 	opts.thin_provision = true;
5124 
5125 	blob = ut_blob_create_and_open(bs, &opts);
5126 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5127 
5128 	CU_ASSERT(blob->active.num_clusters == 0);
5129 
5130 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
5131 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
5132 	poll_threads();
5133 	CU_ASSERT(g_bserrno == 0);
5134 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5135 	CU_ASSERT(blob->active.num_clusters == 5);
5136 
5137 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5138 	poll_threads();
5139 	CU_ASSERT(g_bserrno == 0);
5140 	/* Sync must not change anything */
5141 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5142 	CU_ASSERT(blob->active.num_clusters == 5);
5143 
5144 	/* Payload should be all zeros from unallocated clusters */
5145 	memset(payload_read, 0xAA, sizeof(payload_read));
5146 	iov_read[0].iov_base = payload_read;
5147 	iov_read[0].iov_len = 3 * BLOCKLEN;
5148 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5149 	iov_read[1].iov_len = 4 * BLOCKLEN;
5150 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5151 	iov_read[2].iov_len = 3 * BLOCKLEN;
5152 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5153 	poll_threads();
5154 	CU_ASSERT(g_bserrno == 0);
5155 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5156 
5157 	memset(payload_write, 0xE5, sizeof(payload_write));
5158 	iov_write[0].iov_base = payload_write;
5159 	iov_write[0].iov_len = 1 * BLOCKLEN;
5160 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5161 	iov_write[1].iov_len = 5 * BLOCKLEN;
5162 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5163 	iov_write[2].iov_len = 4 * BLOCKLEN;
5164 
5165 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5166 	poll_threads();
5167 	CU_ASSERT(g_bserrno == 0);
5168 
5169 	memset(payload_read, 0xAA, sizeof(payload_read));
5170 	iov_read[0].iov_base = payload_read;
5171 	iov_read[0].iov_len = 3 * BLOCKLEN;
5172 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5173 	iov_read[1].iov_len = 4 * BLOCKLEN;
5174 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5175 	iov_read[2].iov_len = 3 * BLOCKLEN;
5176 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5177 	poll_threads();
5178 	CU_ASSERT(g_bserrno == 0);
5179 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5180 
5181 	spdk_bs_free_io_channel(channel);
5182 	poll_threads();
5183 
5184 	ut_blob_close_and_delete(bs, blob);
5185 }
5186 
5187 struct iter_ctx {
5188 	int		current_iter;
5189 	spdk_blob_id	blobid[4];
5190 };
5191 
5192 static void
5193 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
5194 {
5195 	struct iter_ctx *iter_ctx = arg;
5196 	spdk_blob_id blobid;
5197 
5198 	CU_ASSERT(bserrno == 0);
5199 	blobid = spdk_blob_get_id(blob);
5200 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
5201 }
5202 
5203 static void
5204 bs_load_iter_test(void)
5205 {
5206 	struct spdk_blob_store *bs;
5207 	struct spdk_bs_dev *dev;
5208 	struct iter_ctx iter_ctx = { 0 };
5209 	struct spdk_blob *blob;
5210 	int i, rc;
5211 	struct spdk_bs_opts opts;
5212 
5213 	dev = init_dev();
5214 	spdk_bs_opts_init(&opts, sizeof(opts));
5215 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5216 
5217 	/* Initialize a new blob store */
5218 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
5219 	poll_threads();
5220 	CU_ASSERT(g_bserrno == 0);
5221 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5222 	bs = g_bs;
5223 
5224 	for (i = 0; i < 4; i++) {
5225 		blob = ut_blob_create_and_open(bs, NULL);
5226 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
5227 
5228 		/* Just save the blobid as an xattr for testing purposes. */
5229 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
5230 		CU_ASSERT(rc == 0);
5231 
5232 		/* Resize the blob */
5233 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
5234 		poll_threads();
5235 		CU_ASSERT(g_bserrno == 0);
5236 
5237 		spdk_blob_close(blob, blob_op_complete, NULL);
5238 		poll_threads();
5239 		CU_ASSERT(g_bserrno == 0);
5240 	}
5241 
5242 	g_bserrno = -1;
5243 	spdk_bs_unload(bs, bs_op_complete, NULL);
5244 	poll_threads();
5245 	CU_ASSERT(g_bserrno == 0);
5246 
5247 	dev = init_dev();
5248 	spdk_bs_opts_init(&opts, sizeof(opts));
5249 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5250 	opts.iter_cb_fn = test_iter;
5251 	opts.iter_cb_arg = &iter_ctx;
5252 
5253 	/* Test blob iteration during load after a clean shutdown. */
5254 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5255 	poll_threads();
5256 	CU_ASSERT(g_bserrno == 0);
5257 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5258 	bs = g_bs;
5259 
5260 	/* Dirty shutdown */
5261 	bs_free(bs);
5262 
5263 	dev = init_dev();
5264 	spdk_bs_opts_init(&opts, sizeof(opts));
5265 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5266 	opts.iter_cb_fn = test_iter;
5267 	iter_ctx.current_iter = 0;
5268 	opts.iter_cb_arg = &iter_ctx;
5269 
5270 	/* Test blob iteration during load after a dirty shutdown. */
5271 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5272 	poll_threads();
5273 	CU_ASSERT(g_bserrno == 0);
5274 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5275 	bs = g_bs;
5276 
5277 	spdk_bs_unload(bs, bs_op_complete, NULL);
5278 	poll_threads();
5279 	CU_ASSERT(g_bserrno == 0);
5280 	g_bs = NULL;
5281 }
5282 
5283 static void
5284 blob_snapshot_rw(void)
5285 {
5286 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5287 	struct spdk_blob_store *bs = g_bs;
5288 	struct spdk_blob *blob, *snapshot;
5289 	struct spdk_io_channel *channel;
5290 	struct spdk_blob_opts opts;
5291 	spdk_blob_id blobid, snapshotid;
5292 	uint64_t free_clusters;
5293 	uint64_t cluster_size;
5294 	uint64_t io_unit_size;
5295 	uint8_t payload_read[10 * BLOCKLEN];
5296 	uint8_t payload_write[10 * BLOCKLEN];
5297 	uint64_t write_bytes_start;
5298 	uint64_t read_bytes_start;
5299 	uint64_t copy_bytes_start;
5300 	uint64_t write_bytes;
5301 	uint64_t read_bytes;
5302 	uint64_t copy_bytes;
5303 	uint64_t expected_bytes;
5304 
5305 	free_clusters = spdk_bs_free_cluster_count(bs);
5306 	cluster_size = spdk_bs_get_cluster_size(bs);
5307 	io_unit_size = spdk_bs_get_io_unit_size(bs);
5308 
5309 	channel = spdk_bs_alloc_io_channel(bs);
5310 	CU_ASSERT(channel != NULL);
5311 
5312 	ut_spdk_blob_opts_init(&opts);
5313 	opts.thin_provision = true;
5314 	opts.num_clusters = 5;
5315 
5316 	blob = ut_blob_create_and_open(bs, &opts);
5317 	blobid = spdk_blob_get_id(blob);
5318 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5319 
5320 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5321 
5322 	memset(payload_read, 0xFF, sizeof(payload_read));
5323 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5324 	poll_threads();
5325 	CU_ASSERT(g_bserrno == 0);
5326 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5327 
5328 	memset(payload_write, 0xE5, sizeof(payload_write));
5329 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5330 	poll_threads();
5331 	CU_ASSERT(g_bserrno == 0);
5332 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5333 
5334 	/* Create snapshot from blob */
5335 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5336 	poll_threads();
5337 	CU_ASSERT(g_bserrno == 0);
5338 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5339 	snapshotid = g_blobid;
5340 
5341 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5342 	poll_threads();
5343 	CU_ASSERT(g_bserrno == 0);
5344 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5345 	snapshot = g_blob;
5346 	CU_ASSERT(snapshot->data_ro == true);
5347 	CU_ASSERT(snapshot->md_ro == true);
5348 
5349 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5350 
5351 	write_bytes_start = g_dev_write_bytes;
5352 	read_bytes_start = g_dev_read_bytes;
5353 	copy_bytes_start = g_dev_copy_bytes;
5354 
5355 	memset(payload_write, 0xAA, sizeof(payload_write));
5356 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5357 	poll_threads();
5358 	CU_ASSERT(g_bserrno == 0);
5359 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5360 
5361 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
5362 	 * and then write 10 io units of payload.
5363 	 */
5364 	write_bytes = g_dev_write_bytes - write_bytes_start;
5365 	read_bytes = g_dev_read_bytes - read_bytes_start;
5366 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
5367 	if (g_dev_copy_enabled) {
5368 		CU_ASSERT(copy_bytes == cluster_size);
5369 	} else {
5370 		CU_ASSERT(copy_bytes == 0);
5371 	}
5372 	expected_bytes = 10 * io_unit_size + cluster_size + spdk_bs_get_page_size(bs);
5373 	if (g_use_extent_table) {
5374 		/* Add one more page for EXTENT_PAGE write */
5375 		expected_bytes += spdk_bs_get_page_size(bs);
5376 	}
5377 	CU_ASSERT(write_bytes + copy_bytes == expected_bytes);
5378 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
5379 
5380 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5381 	poll_threads();
5382 	CU_ASSERT(g_bserrno == 0);
5383 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5384 
5385 	/* Data on snapshot should not change after write to clone */
5386 	memset(payload_write, 0xE5, sizeof(payload_write));
5387 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
5388 	poll_threads();
5389 	CU_ASSERT(g_bserrno == 0);
5390 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5391 
5392 	ut_blob_close_and_delete(bs, blob);
5393 	ut_blob_close_and_delete(bs, snapshot);
5394 
5395 	spdk_bs_free_io_channel(channel);
5396 	poll_threads();
5397 	g_blob = NULL;
5398 	g_blobid = 0;
5399 }
5400 
5401 static void
5402 blob_snapshot_rw_iov(void)
5403 {
5404 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5405 	struct spdk_blob_store *bs = g_bs;
5406 	struct spdk_blob *blob, *snapshot;
5407 	struct spdk_io_channel *channel;
5408 	struct spdk_blob_opts opts;
5409 	spdk_blob_id blobid, snapshotid;
5410 	uint64_t free_clusters;
5411 	uint8_t payload_read[10 * BLOCKLEN];
5412 	uint8_t payload_write[10 * BLOCKLEN];
5413 	struct iovec iov_read[3];
5414 	struct iovec iov_write[3];
5415 
5416 	free_clusters = spdk_bs_free_cluster_count(bs);
5417 
5418 	channel = spdk_bs_alloc_io_channel(bs);
5419 	CU_ASSERT(channel != NULL);
5420 
5421 	ut_spdk_blob_opts_init(&opts);
5422 	opts.thin_provision = true;
5423 	opts.num_clusters = 5;
5424 
5425 	blob = ut_blob_create_and_open(bs, &opts);
5426 	blobid = spdk_blob_get_id(blob);
5427 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5428 
5429 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5430 
5431 	/* Create snapshot from blob */
5432 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5433 	poll_threads();
5434 	CU_ASSERT(g_bserrno == 0);
5435 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5436 	snapshotid = g_blobid;
5437 
5438 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5439 	poll_threads();
5440 	CU_ASSERT(g_bserrno == 0);
5441 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5442 	snapshot = g_blob;
5443 	CU_ASSERT(snapshot->data_ro == true);
5444 	CU_ASSERT(snapshot->md_ro == true);
5445 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5446 
5447 	/* Payload should be all zeros from unallocated clusters */
5448 	memset(payload_read, 0xAA, sizeof(payload_read));
5449 	iov_read[0].iov_base = payload_read;
5450 	iov_read[0].iov_len = 3 * BLOCKLEN;
5451 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5452 	iov_read[1].iov_len = 4 * BLOCKLEN;
5453 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5454 	iov_read[2].iov_len = 3 * BLOCKLEN;
5455 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5456 	poll_threads();
5457 	CU_ASSERT(g_bserrno == 0);
5458 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5459 
5460 	memset(payload_write, 0xE5, sizeof(payload_write));
5461 	iov_write[0].iov_base = payload_write;
5462 	iov_write[0].iov_len = 1 * BLOCKLEN;
5463 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5464 	iov_write[1].iov_len = 5 * BLOCKLEN;
5465 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5466 	iov_write[2].iov_len = 4 * BLOCKLEN;
5467 
5468 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5469 	poll_threads();
5470 	CU_ASSERT(g_bserrno == 0);
5471 
5472 	memset(payload_read, 0xAA, sizeof(payload_read));
5473 	iov_read[0].iov_base = payload_read;
5474 	iov_read[0].iov_len = 3 * BLOCKLEN;
5475 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5476 	iov_read[1].iov_len = 4 * BLOCKLEN;
5477 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5478 	iov_read[2].iov_len = 3 * BLOCKLEN;
5479 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5480 	poll_threads();
5481 	CU_ASSERT(g_bserrno == 0);
5482 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5483 
5484 	spdk_bs_free_io_channel(channel);
5485 	poll_threads();
5486 
5487 	ut_blob_close_and_delete(bs, blob);
5488 	ut_blob_close_and_delete(bs, snapshot);
5489 }
5490 
5491 /**
5492  * Inflate / decouple parent rw unit tests.
5493  *
5494  * --------------
5495  * original blob:         0         1         2         3         4
5496  *                   ,---------+---------+---------+---------+---------.
5497  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5498  *                   +---------+---------+---------+---------+---------+
5499  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
5500  *                   +---------+---------+---------+---------+---------+
5501  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
5502  *                   '---------+---------+---------+---------+---------'
5503  *                   .         .         .         .         .         .
5504  * --------          .         .         .         .         .         .
5505  * inflate:          .         .         .         .         .         .
5506  *                   ,---------+---------+---------+---------+---------.
5507  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
5508  *                   '---------+---------+---------+---------+---------'
5509  *
5510  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
5511  *               on snapshot2 and snapshot removed .         .         .
5512  *                   .         .         .         .         .         .
5513  * ----------------  .         .         .         .         .         .
5514  * decouple parent:  .         .         .         .         .         .
5515  *                   ,---------+---------+---------+---------+---------.
5516  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5517  *                   +---------+---------+---------+---------+---------+
5518  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
5519  *                   '---------+---------+---------+---------+---------'
5520  *
5521  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
5522  *               on snapshot2 removed and on snapshot still exists. Snapshot2
5523  *               should remain a clone of snapshot.
5524  */
5525 static void
5526 _blob_inflate_rw(bool decouple_parent)
5527 {
5528 	struct spdk_blob_store *bs = g_bs;
5529 	struct spdk_blob *blob, *snapshot, *snapshot2;
5530 	struct spdk_io_channel *channel;
5531 	struct spdk_blob_opts opts;
5532 	spdk_blob_id blobid, snapshotid, snapshot2id;
5533 	uint64_t free_clusters;
5534 	uint64_t cluster_size;
5535 
5536 	uint64_t payload_size;
5537 	uint8_t *payload_read;
5538 	uint8_t *payload_write;
5539 	uint8_t *payload_clone;
5540 
5541 	uint64_t io_units_per_cluster;
5542 	uint64_t io_units_per_payload;
5543 
5544 	int i;
5545 	spdk_blob_id ids[2];
5546 	size_t count;
5547 
5548 	free_clusters = spdk_bs_free_cluster_count(bs);
5549 	cluster_size = spdk_bs_get_cluster_size(bs);
5550 	io_units_per_cluster = cluster_size / spdk_bs_get_io_unit_size(bs);
5551 	io_units_per_payload = io_units_per_cluster * 5;
5552 
5553 	payload_size = cluster_size * 5;
5554 
5555 	payload_read = malloc(payload_size);
5556 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
5557 
5558 	payload_write = malloc(payload_size);
5559 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
5560 
5561 	payload_clone = malloc(payload_size);
5562 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
5563 
5564 	channel = spdk_bs_alloc_io_channel(bs);
5565 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5566 
5567 	/* Create blob */
5568 	ut_spdk_blob_opts_init(&opts);
5569 	opts.thin_provision = true;
5570 	opts.num_clusters = 5;
5571 
5572 	blob = ut_blob_create_and_open(bs, &opts);
5573 	blobid = spdk_blob_get_id(blob);
5574 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5575 
5576 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5577 
5578 	/* 1) Initial read should return zeroed payload */
5579 	memset(payload_read, 0xFF, payload_size);
5580 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5581 			  blob_op_complete, NULL);
5582 	poll_threads();
5583 	CU_ASSERT(g_bserrno == 0);
5584 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
5585 
5586 	/* Fill whole blob with a pattern, except last cluster (to be sure it
5587 	 * isn't allocated) */
5588 	memset(payload_write, 0xE5, payload_size - cluster_size);
5589 	spdk_blob_io_write(blob, channel, payload_write, 0, io_units_per_payload -
5590 			   io_units_per_cluster, blob_op_complete, NULL);
5591 	poll_threads();
5592 	CU_ASSERT(g_bserrno == 0);
5593 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5594 
5595 	/* 2) Create snapshot from blob (first level) */
5596 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5597 	poll_threads();
5598 	CU_ASSERT(g_bserrno == 0);
5599 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5600 	snapshotid = g_blobid;
5601 
5602 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5603 	poll_threads();
5604 	CU_ASSERT(g_bserrno == 0);
5605 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5606 	snapshot = g_blob;
5607 	CU_ASSERT(snapshot->data_ro == true);
5608 	CU_ASSERT(snapshot->md_ro == true);
5609 
5610 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5611 
5612 	/* Write every second cluster with a pattern.
5613 	 *
5614 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
5615 	 * doesn't allocate it.
5616 	 *
5617 	 * payload_clone stores expected result on "blob" read at the time and
5618 	 * is used only to check data consistency on clone before and after
5619 	 * inflation. Initially we fill it with a backing snapshots pattern
5620 	 * used before.
5621 	 */
5622 	memset(payload_clone, 0xE5, payload_size - cluster_size);
5623 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
5624 	memset(payload_write, 0xAA, payload_size);
5625 	for (i = 1; i < 5; i += 2) {
5626 		spdk_blob_io_write(blob, channel, payload_write, i * io_units_per_cluster,
5627 				   io_units_per_cluster, blob_op_complete, NULL);
5628 		poll_threads();
5629 		CU_ASSERT(g_bserrno == 0);
5630 
5631 		/* Update expected result */
5632 		memcpy(payload_clone + (cluster_size * i), payload_write,
5633 		       cluster_size);
5634 	}
5635 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5636 
5637 	/* Check data consistency on clone */
5638 	memset(payload_read, 0xFF, payload_size);
5639 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5640 			  blob_op_complete, NULL);
5641 	poll_threads();
5642 	CU_ASSERT(g_bserrno == 0);
5643 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5644 
5645 	/* 3) Create second levels snapshot from blob */
5646 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5647 	poll_threads();
5648 	CU_ASSERT(g_bserrno == 0);
5649 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5650 	snapshot2id = g_blobid;
5651 
5652 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
5653 	poll_threads();
5654 	CU_ASSERT(g_bserrno == 0);
5655 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5656 	snapshot2 = g_blob;
5657 	CU_ASSERT(snapshot2->data_ro == true);
5658 	CU_ASSERT(snapshot2->md_ro == true);
5659 
5660 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
5661 
5662 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5663 
5664 	/* Write one cluster on the top level blob. This cluster (1) covers
5665 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
5666 	 * at all */
5667 	spdk_blob_io_write(blob, channel, payload_write, io_units_per_cluster,
5668 			   io_units_per_cluster, blob_op_complete, NULL);
5669 	poll_threads();
5670 	CU_ASSERT(g_bserrno == 0);
5671 
5672 	/* Update expected result */
5673 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
5674 
5675 	/* Check data consistency on clone */
5676 	memset(payload_read, 0xFF, payload_size);
5677 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5678 			  blob_op_complete, NULL);
5679 	poll_threads();
5680 	CU_ASSERT(g_bserrno == 0);
5681 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5682 
5683 
5684 	/* Close all blobs */
5685 	spdk_blob_close(blob, blob_op_complete, NULL);
5686 	poll_threads();
5687 	CU_ASSERT(g_bserrno == 0);
5688 
5689 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5690 	poll_threads();
5691 	CU_ASSERT(g_bserrno == 0);
5692 
5693 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5694 	poll_threads();
5695 	CU_ASSERT(g_bserrno == 0);
5696 
5697 	/* Check snapshot-clone relations */
5698 	count = 2;
5699 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5700 	CU_ASSERT(count == 1);
5701 	CU_ASSERT(ids[0] == snapshot2id);
5702 
5703 	count = 2;
5704 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5705 	CU_ASSERT(count == 1);
5706 	CU_ASSERT(ids[0] == blobid);
5707 
5708 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
5709 
5710 	free_clusters = spdk_bs_free_cluster_count(bs);
5711 	if (!decouple_parent) {
5712 		/* Do full blob inflation */
5713 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
5714 		poll_threads();
5715 		CU_ASSERT(g_bserrno == 0);
5716 
5717 		/* All clusters should be inflated (except one already allocated
5718 		 * in a top level blob) */
5719 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5720 
5721 		/* Check if relation tree updated correctly */
5722 		count = 2;
5723 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5724 
5725 		/* snapshotid have one clone */
5726 		CU_ASSERT(count == 1);
5727 		CU_ASSERT(ids[0] == snapshot2id);
5728 
5729 		/* snapshot2id have no clones */
5730 		count = 2;
5731 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5732 		CU_ASSERT(count == 0);
5733 
5734 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5735 	} else {
5736 		/* Decouple parent of blob */
5737 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5738 		poll_threads();
5739 		CU_ASSERT(g_bserrno == 0);
5740 
5741 		/* Only one cluster from a parent should be inflated (second one
5742 		 * is covered by a cluster written on a top level blob, and
5743 		 * already allocated) */
5744 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5745 
5746 		/* Check if relation tree updated correctly */
5747 		count = 2;
5748 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5749 
5750 		/* snapshotid have two clones now */
5751 		CU_ASSERT(count == 2);
5752 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5753 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5754 
5755 		/* snapshot2id have no clones */
5756 		count = 2;
5757 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5758 		CU_ASSERT(count == 0);
5759 
5760 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5761 	}
5762 
5763 	/* Try to delete snapshot2 (should pass) */
5764 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5765 	poll_threads();
5766 	CU_ASSERT(g_bserrno == 0);
5767 
5768 	/* Try to delete base snapshot */
5769 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5770 	poll_threads();
5771 	CU_ASSERT(g_bserrno == 0);
5772 
5773 	/* Reopen blob after snapshot deletion */
5774 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5775 	poll_threads();
5776 	CU_ASSERT(g_bserrno == 0);
5777 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5778 	blob = g_blob;
5779 
5780 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5781 
5782 	/* Check data consistency on inflated blob */
5783 	memset(payload_read, 0xFF, payload_size);
5784 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5785 			  blob_op_complete, NULL);
5786 	poll_threads();
5787 	CU_ASSERT(g_bserrno == 0);
5788 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5789 
5790 	spdk_bs_free_io_channel(channel);
5791 	poll_threads();
5792 
5793 	free(payload_read);
5794 	free(payload_write);
5795 	free(payload_clone);
5796 
5797 	ut_blob_close_and_delete(bs, blob);
5798 }
5799 
5800 static void
5801 blob_inflate_rw(void)
5802 {
5803 	_blob_inflate_rw(false);
5804 	_blob_inflate_rw(true);
5805 }
5806 
5807 /**
5808  * Snapshot-clones relation test
5809  *
5810  *         snapshot
5811  *            |
5812  *      +-----+-----+
5813  *      |           |
5814  *   blob(ro)   snapshot2
5815  *      |           |
5816  *   clone2      clone
5817  */
5818 static void
5819 blob_relations(void)
5820 {
5821 	struct spdk_blob_store *bs;
5822 	struct spdk_bs_dev *dev;
5823 	struct spdk_bs_opts bs_opts;
5824 	struct spdk_blob_opts opts;
5825 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5826 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5827 	int rc;
5828 	size_t count;
5829 	spdk_blob_id ids[10] = {};
5830 
5831 	dev = init_dev();
5832 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5833 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5834 
5835 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5836 	poll_threads();
5837 	CU_ASSERT(g_bserrno == 0);
5838 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5839 	bs = g_bs;
5840 
5841 	/* 1. Create blob with 10 clusters */
5842 
5843 	ut_spdk_blob_opts_init(&opts);
5844 	opts.num_clusters = 10;
5845 
5846 	blob = ut_blob_create_and_open(bs, &opts);
5847 	blobid = spdk_blob_get_id(blob);
5848 
5849 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5850 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5851 	CU_ASSERT(!spdk_blob_is_clone(blob));
5852 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5853 
5854 	/* blob should not have underlying snapshot nor clones */
5855 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5856 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5857 	count = SPDK_COUNTOF(ids);
5858 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5859 	CU_ASSERT(rc == 0);
5860 	CU_ASSERT(count == 0);
5861 
5862 
5863 	/* 2. Create snapshot */
5864 
5865 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5866 	poll_threads();
5867 	CU_ASSERT(g_bserrno == 0);
5868 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5869 	snapshotid = g_blobid;
5870 
5871 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5872 	poll_threads();
5873 	CU_ASSERT(g_bserrno == 0);
5874 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5875 	snapshot = g_blob;
5876 
5877 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5878 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5879 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5880 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5881 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5882 
5883 	/* Check if original blob is converted to the clone of snapshot */
5884 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5885 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5886 	CU_ASSERT(spdk_blob_is_clone(blob));
5887 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5888 	CU_ASSERT(blob->parent_id == snapshotid);
5889 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5890 
5891 	count = SPDK_COUNTOF(ids);
5892 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5893 	CU_ASSERT(rc == 0);
5894 	CU_ASSERT(count == 1);
5895 	CU_ASSERT(ids[0] == blobid);
5896 
5897 
5898 	/* 3. Create clone from snapshot */
5899 
5900 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5901 	poll_threads();
5902 	CU_ASSERT(g_bserrno == 0);
5903 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5904 	cloneid = g_blobid;
5905 
5906 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5907 	poll_threads();
5908 	CU_ASSERT(g_bserrno == 0);
5909 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5910 	clone = g_blob;
5911 
5912 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5913 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5914 	CU_ASSERT(spdk_blob_is_clone(clone));
5915 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5916 	CU_ASSERT(clone->parent_id == snapshotid);
5917 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5918 
5919 	count = SPDK_COUNTOF(ids);
5920 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5921 	CU_ASSERT(rc == 0);
5922 	CU_ASSERT(count == 0);
5923 
5924 	/* Check if clone is on the snapshot's list */
5925 	count = SPDK_COUNTOF(ids);
5926 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5927 	CU_ASSERT(rc == 0);
5928 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5929 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5930 
5931 
5932 	/* 4. Create snapshot of the clone */
5933 
5934 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5935 	poll_threads();
5936 	CU_ASSERT(g_bserrno == 0);
5937 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5938 	snapshotid2 = g_blobid;
5939 
5940 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5941 	poll_threads();
5942 	CU_ASSERT(g_bserrno == 0);
5943 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5944 	snapshot2 = g_blob;
5945 
5946 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5947 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5948 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5949 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5950 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5951 
5952 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5953 	 * is a child of snapshot */
5954 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5955 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5956 	CU_ASSERT(spdk_blob_is_clone(clone));
5957 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5958 	CU_ASSERT(clone->parent_id == snapshotid2);
5959 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5960 
5961 	count = SPDK_COUNTOF(ids);
5962 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5963 	CU_ASSERT(rc == 0);
5964 	CU_ASSERT(count == 1);
5965 	CU_ASSERT(ids[0] == cloneid);
5966 
5967 
5968 	/* 5. Try to create clone from read only blob */
5969 
5970 	/* Mark blob as read only */
5971 	spdk_blob_set_read_only(blob);
5972 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5973 	poll_threads();
5974 	CU_ASSERT(g_bserrno == 0);
5975 
5976 	/* Check if previously created blob is read only clone */
5977 	CU_ASSERT(spdk_blob_is_read_only(blob));
5978 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5979 	CU_ASSERT(spdk_blob_is_clone(blob));
5980 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5981 
5982 	/* Create clone from read only blob */
5983 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5984 	poll_threads();
5985 	CU_ASSERT(g_bserrno == 0);
5986 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5987 	cloneid2 = g_blobid;
5988 
5989 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5990 	poll_threads();
5991 	CU_ASSERT(g_bserrno == 0);
5992 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5993 	clone2 = g_blob;
5994 
5995 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5996 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5997 	CU_ASSERT(spdk_blob_is_clone(clone2));
5998 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5999 
6000 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6001 
6002 	count = SPDK_COUNTOF(ids);
6003 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6004 	CU_ASSERT(rc == 0);
6005 
6006 	CU_ASSERT(count == 1);
6007 	CU_ASSERT(ids[0] == cloneid2);
6008 
6009 	/* Close blobs */
6010 
6011 	spdk_blob_close(clone2, blob_op_complete, NULL);
6012 	poll_threads();
6013 	CU_ASSERT(g_bserrno == 0);
6014 
6015 	spdk_blob_close(blob, blob_op_complete, NULL);
6016 	poll_threads();
6017 	CU_ASSERT(g_bserrno == 0);
6018 
6019 	spdk_blob_close(clone, blob_op_complete, NULL);
6020 	poll_threads();
6021 	CU_ASSERT(g_bserrno == 0);
6022 
6023 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6024 	poll_threads();
6025 	CU_ASSERT(g_bserrno == 0);
6026 
6027 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
6028 	poll_threads();
6029 	CU_ASSERT(g_bserrno == 0);
6030 
6031 	/* Try to delete snapshot with more than 1 clone */
6032 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6033 	poll_threads();
6034 	CU_ASSERT(g_bserrno != 0);
6035 
6036 	ut_bs_reload(&bs, &bs_opts);
6037 
6038 	/* NULL ids array should return number of clones in count */
6039 	count = SPDK_COUNTOF(ids);
6040 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
6041 	CU_ASSERT(rc == -ENOMEM);
6042 	CU_ASSERT(count == 2);
6043 
6044 	/* incorrect array size */
6045 	count = 1;
6046 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6047 	CU_ASSERT(rc == -ENOMEM);
6048 	CU_ASSERT(count == 2);
6049 
6050 
6051 	/* Verify structure of loaded blob store */
6052 
6053 	/* snapshot */
6054 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
6055 
6056 	count = SPDK_COUNTOF(ids);
6057 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6058 	CU_ASSERT(rc == 0);
6059 	CU_ASSERT(count == 2);
6060 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6061 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
6062 
6063 	/* blob */
6064 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6065 	count = SPDK_COUNTOF(ids);
6066 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6067 	CU_ASSERT(rc == 0);
6068 	CU_ASSERT(count == 1);
6069 	CU_ASSERT(ids[0] == cloneid2);
6070 
6071 	/* clone */
6072 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
6073 	count = SPDK_COUNTOF(ids);
6074 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6075 	CU_ASSERT(rc == 0);
6076 	CU_ASSERT(count == 0);
6077 
6078 	/* snapshot2 */
6079 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
6080 	count = SPDK_COUNTOF(ids);
6081 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6082 	CU_ASSERT(rc == 0);
6083 	CU_ASSERT(count == 1);
6084 	CU_ASSERT(ids[0] == cloneid);
6085 
6086 	/* clone2 */
6087 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6088 	count = SPDK_COUNTOF(ids);
6089 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6090 	CU_ASSERT(rc == 0);
6091 	CU_ASSERT(count == 0);
6092 
6093 	/* Try to delete blob that user should not be able to remove */
6094 
6095 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6096 	poll_threads();
6097 	CU_ASSERT(g_bserrno != 0);
6098 
6099 	/* Remove all blobs */
6100 
6101 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6102 	poll_threads();
6103 	CU_ASSERT(g_bserrno == 0);
6104 
6105 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6106 	poll_threads();
6107 	CU_ASSERT(g_bserrno == 0);
6108 
6109 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6110 	poll_threads();
6111 	CU_ASSERT(g_bserrno == 0);
6112 
6113 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6114 	poll_threads();
6115 	CU_ASSERT(g_bserrno == 0);
6116 
6117 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6118 	poll_threads();
6119 	CU_ASSERT(g_bserrno == 0);
6120 
6121 	spdk_bs_unload(bs, bs_op_complete, NULL);
6122 	poll_threads();
6123 	CU_ASSERT(g_bserrno == 0);
6124 
6125 	g_bs = NULL;
6126 }
6127 
6128 /**
6129  * Snapshot-clones relation test 2
6130  *
6131  *         snapshot1
6132  *            |
6133  *         snapshot2
6134  *            |
6135  *      +-----+-----+
6136  *      |           |
6137  *   blob(ro)   snapshot3
6138  *      |           |
6139  *      |       snapshot4
6140  *      |        |     |
6141  *   clone2   clone  clone3
6142  */
6143 static void
6144 blob_relations2(void)
6145 {
6146 	struct spdk_blob_store *bs;
6147 	struct spdk_bs_dev *dev;
6148 	struct spdk_bs_opts bs_opts;
6149 	struct spdk_blob_opts opts;
6150 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
6151 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
6152 		     cloneid3;
6153 	int rc;
6154 	size_t count;
6155 	spdk_blob_id ids[10] = {};
6156 
6157 	dev = init_dev();
6158 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6159 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6160 
6161 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6162 	poll_threads();
6163 	CU_ASSERT(g_bserrno == 0);
6164 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6165 	bs = g_bs;
6166 
6167 	/* 1. Create blob with 10 clusters */
6168 
6169 	ut_spdk_blob_opts_init(&opts);
6170 	opts.num_clusters = 10;
6171 
6172 	blob = ut_blob_create_and_open(bs, &opts);
6173 	blobid = spdk_blob_get_id(blob);
6174 
6175 	/* 2. Create snapshot1 */
6176 
6177 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6178 	poll_threads();
6179 	CU_ASSERT(g_bserrno == 0);
6180 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6181 	snapshotid1 = g_blobid;
6182 
6183 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
6184 	poll_threads();
6185 	CU_ASSERT(g_bserrno == 0);
6186 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6187 	snapshot1 = g_blob;
6188 
6189 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
6190 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
6191 
6192 	CU_ASSERT(blob->parent_id == snapshotid1);
6193 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6194 
6195 	/* Check if blob is the clone of snapshot1 */
6196 	CU_ASSERT(blob->parent_id == snapshotid1);
6197 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6198 
6199 	count = SPDK_COUNTOF(ids);
6200 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
6201 	CU_ASSERT(rc == 0);
6202 	CU_ASSERT(count == 1);
6203 	CU_ASSERT(ids[0] == blobid);
6204 
6205 	/* 3. Create another snapshot */
6206 
6207 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6208 	poll_threads();
6209 	CU_ASSERT(g_bserrno == 0);
6210 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6211 	snapshotid2 = g_blobid;
6212 
6213 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
6214 	poll_threads();
6215 	CU_ASSERT(g_bserrno == 0);
6216 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6217 	snapshot2 = g_blob;
6218 
6219 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
6220 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
6221 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
6222 
6223 	/* Check if snapshot2 is the clone of snapshot1 and blob
6224 	 * is a child of snapshot2 */
6225 	CU_ASSERT(blob->parent_id == snapshotid2);
6226 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6227 
6228 	count = SPDK_COUNTOF(ids);
6229 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6230 	CU_ASSERT(rc == 0);
6231 	CU_ASSERT(count == 1);
6232 	CU_ASSERT(ids[0] == blobid);
6233 
6234 	/* 4. Create clone from snapshot */
6235 
6236 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
6237 	poll_threads();
6238 	CU_ASSERT(g_bserrno == 0);
6239 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6240 	cloneid = g_blobid;
6241 
6242 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
6243 	poll_threads();
6244 	CU_ASSERT(g_bserrno == 0);
6245 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6246 	clone = g_blob;
6247 
6248 	CU_ASSERT(clone->parent_id == snapshotid2);
6249 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
6250 
6251 	/* Check if clone is on the snapshot's list */
6252 	count = SPDK_COUNTOF(ids);
6253 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6254 	CU_ASSERT(rc == 0);
6255 	CU_ASSERT(count == 2);
6256 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6257 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
6258 
6259 	/* 5. Create snapshot of the clone */
6260 
6261 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6262 	poll_threads();
6263 	CU_ASSERT(g_bserrno == 0);
6264 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6265 	snapshotid3 = g_blobid;
6266 
6267 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6268 	poll_threads();
6269 	CU_ASSERT(g_bserrno == 0);
6270 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6271 	snapshot3 = g_blob;
6272 
6273 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
6274 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6275 
6276 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
6277 	 * is a child of snapshot2 */
6278 	CU_ASSERT(clone->parent_id == snapshotid3);
6279 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6280 
6281 	count = SPDK_COUNTOF(ids);
6282 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6283 	CU_ASSERT(rc == 0);
6284 	CU_ASSERT(count == 1);
6285 	CU_ASSERT(ids[0] == cloneid);
6286 
6287 	/* 6. Create another snapshot of the clone */
6288 
6289 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6290 	poll_threads();
6291 	CU_ASSERT(g_bserrno == 0);
6292 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6293 	snapshotid4 = g_blobid;
6294 
6295 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
6296 	poll_threads();
6297 	CU_ASSERT(g_bserrno == 0);
6298 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6299 	snapshot4 = g_blob;
6300 
6301 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
6302 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
6303 
6304 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
6305 	 * is a child of snapshot3 */
6306 	CU_ASSERT(clone->parent_id == snapshotid4);
6307 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
6308 
6309 	count = SPDK_COUNTOF(ids);
6310 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
6311 	CU_ASSERT(rc == 0);
6312 	CU_ASSERT(count == 1);
6313 	CU_ASSERT(ids[0] == cloneid);
6314 
6315 	/* 7. Remove snapshot 4 */
6316 
6317 	ut_blob_close_and_delete(bs, snapshot4);
6318 
6319 	/* Check if relations are back to state from before creating snapshot 4 */
6320 	CU_ASSERT(clone->parent_id == snapshotid3);
6321 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6322 
6323 	count = SPDK_COUNTOF(ids);
6324 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6325 	CU_ASSERT(rc == 0);
6326 	CU_ASSERT(count == 1);
6327 	CU_ASSERT(ids[0] == cloneid);
6328 
6329 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
6330 
6331 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
6332 	poll_threads();
6333 	CU_ASSERT(g_bserrno == 0);
6334 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6335 	cloneid3 = g_blobid;
6336 
6337 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6338 	poll_threads();
6339 	CU_ASSERT(g_bserrno != 0);
6340 
6341 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
6342 
6343 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6344 	poll_threads();
6345 	CU_ASSERT(g_bserrno == 0);
6346 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6347 	snapshot3 = g_blob;
6348 
6349 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6350 	poll_threads();
6351 	CU_ASSERT(g_bserrno != 0);
6352 
6353 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6354 	poll_threads();
6355 	CU_ASSERT(g_bserrno == 0);
6356 
6357 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
6358 	poll_threads();
6359 	CU_ASSERT(g_bserrno == 0);
6360 
6361 	/* 10. Remove snapshot 1 */
6362 
6363 	/* Check snapshot 1 and snapshot 2 allocated clusters */
6364 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot1) == 10);
6365 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
6366 
6367 	ut_blob_close_and_delete(bs, snapshot1);
6368 
6369 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
6370 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
6371 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6372 
6373 	/* Check that snapshot 2 has the clusters that were allocated to snapshot 1 */
6374 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 10);
6375 
6376 	count = SPDK_COUNTOF(ids);
6377 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6378 	CU_ASSERT(rc == 0);
6379 	CU_ASSERT(count == 2);
6380 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6381 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6382 
6383 	/* 11. Try to create clone from read only blob */
6384 
6385 	/* Mark blob as read only */
6386 	spdk_blob_set_read_only(blob);
6387 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6388 	poll_threads();
6389 	CU_ASSERT(g_bserrno == 0);
6390 
6391 	/* Create clone from read only blob */
6392 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6393 	poll_threads();
6394 	CU_ASSERT(g_bserrno == 0);
6395 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6396 	cloneid2 = g_blobid;
6397 
6398 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
6399 	poll_threads();
6400 	CU_ASSERT(g_bserrno == 0);
6401 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6402 	clone2 = g_blob;
6403 
6404 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6405 
6406 	count = SPDK_COUNTOF(ids);
6407 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6408 	CU_ASSERT(rc == 0);
6409 	CU_ASSERT(count == 1);
6410 	CU_ASSERT(ids[0] == cloneid2);
6411 
6412 	/* Close blobs */
6413 
6414 	spdk_blob_close(clone2, blob_op_complete, NULL);
6415 	poll_threads();
6416 	CU_ASSERT(g_bserrno == 0);
6417 
6418 	spdk_blob_close(blob, blob_op_complete, NULL);
6419 	poll_threads();
6420 	CU_ASSERT(g_bserrno == 0);
6421 
6422 	spdk_blob_close(clone, blob_op_complete, NULL);
6423 	poll_threads();
6424 	CU_ASSERT(g_bserrno == 0);
6425 
6426 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
6427 	poll_threads();
6428 	CU_ASSERT(g_bserrno == 0);
6429 
6430 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6431 	poll_threads();
6432 	CU_ASSERT(g_bserrno == 0);
6433 
6434 	ut_bs_reload(&bs, &bs_opts);
6435 
6436 	/* Verify structure of loaded blob store */
6437 
6438 	/* snapshot2 */
6439 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6440 
6441 	count = SPDK_COUNTOF(ids);
6442 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6443 	CU_ASSERT(rc == 0);
6444 	CU_ASSERT(count == 2);
6445 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6446 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6447 
6448 	/* blob */
6449 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6450 	count = SPDK_COUNTOF(ids);
6451 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6452 	CU_ASSERT(rc == 0);
6453 	CU_ASSERT(count == 1);
6454 	CU_ASSERT(ids[0] == cloneid2);
6455 
6456 	/* clone */
6457 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6458 	count = SPDK_COUNTOF(ids);
6459 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6460 	CU_ASSERT(rc == 0);
6461 	CU_ASSERT(count == 0);
6462 
6463 	/* snapshot3 */
6464 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6465 	count = SPDK_COUNTOF(ids);
6466 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6467 	CU_ASSERT(rc == 0);
6468 	CU_ASSERT(count == 1);
6469 	CU_ASSERT(ids[0] == cloneid);
6470 
6471 	/* clone2 */
6472 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6473 	count = SPDK_COUNTOF(ids);
6474 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6475 	CU_ASSERT(rc == 0);
6476 	CU_ASSERT(count == 0);
6477 
6478 	/* Try to delete all blobs in the worse possible order */
6479 
6480 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6481 	poll_threads();
6482 	CU_ASSERT(g_bserrno != 0);
6483 
6484 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6485 	poll_threads();
6486 	CU_ASSERT(g_bserrno == 0);
6487 
6488 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6489 	poll_threads();
6490 	CU_ASSERT(g_bserrno != 0);
6491 
6492 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6493 	poll_threads();
6494 	CU_ASSERT(g_bserrno == 0);
6495 
6496 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6497 	poll_threads();
6498 	CU_ASSERT(g_bserrno == 0);
6499 
6500 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6501 	poll_threads();
6502 	CU_ASSERT(g_bserrno == 0);
6503 
6504 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6505 	poll_threads();
6506 	CU_ASSERT(g_bserrno == 0);
6507 
6508 	spdk_bs_unload(bs, bs_op_complete, NULL);
6509 	poll_threads();
6510 	CU_ASSERT(g_bserrno == 0);
6511 
6512 	g_bs = NULL;
6513 }
6514 
6515 /**
6516  * Snapshot-clones relation test 3
6517  *
6518  *         snapshot0
6519  *            |
6520  *         snapshot1
6521  *            |
6522  *         snapshot2
6523  *            |
6524  *           blob
6525  */
6526 static void
6527 blob_relations3(void)
6528 {
6529 	struct spdk_blob_store *bs;
6530 	struct spdk_bs_dev *dev;
6531 	struct spdk_io_channel *channel;
6532 	struct spdk_bs_opts bs_opts;
6533 	struct spdk_blob_opts opts;
6534 	struct spdk_blob *blob;
6535 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
6536 
6537 	dev = init_dev();
6538 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6539 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6540 
6541 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6542 	poll_threads();
6543 	CU_ASSERT(g_bserrno == 0);
6544 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6545 	bs = g_bs;
6546 
6547 	channel = spdk_bs_alloc_io_channel(bs);
6548 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6549 
6550 	/* 1. Create blob with 10 clusters */
6551 	ut_spdk_blob_opts_init(&opts);
6552 	opts.num_clusters = 10;
6553 
6554 	blob = ut_blob_create_and_open(bs, &opts);
6555 	blobid = spdk_blob_get_id(blob);
6556 
6557 	/* 2. Create snapshot0 */
6558 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6559 	poll_threads();
6560 	CU_ASSERT(g_bserrno == 0);
6561 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6562 	snapshotid0 = g_blobid;
6563 
6564 	/* 3. Create snapshot1 */
6565 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6566 	poll_threads();
6567 	CU_ASSERT(g_bserrno == 0);
6568 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6569 	snapshotid1 = g_blobid;
6570 
6571 	/* 4. Create snapshot2 */
6572 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6573 	poll_threads();
6574 	CU_ASSERT(g_bserrno == 0);
6575 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6576 	snapshotid2 = g_blobid;
6577 
6578 	/* 5. Decouple blob */
6579 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
6580 	poll_threads();
6581 	CU_ASSERT(g_bserrno == 0);
6582 
6583 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
6584 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
6585 	poll_threads();
6586 	CU_ASSERT(g_bserrno == 0);
6587 
6588 	/* 7. Delete blob */
6589 	spdk_blob_close(blob, blob_op_complete, NULL);
6590 	poll_threads();
6591 	CU_ASSERT(g_bserrno == 0);
6592 
6593 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6594 	poll_threads();
6595 	CU_ASSERT(g_bserrno == 0);
6596 
6597 	/* 8. Delete snapshot2.
6598 	 * If md of snapshot 2 was updated, it should be possible to delete it */
6599 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6600 	poll_threads();
6601 	CU_ASSERT(g_bserrno == 0);
6602 
6603 	/* Remove remaining blobs and unload bs */
6604 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
6605 	poll_threads();
6606 	CU_ASSERT(g_bserrno == 0);
6607 
6608 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
6609 	poll_threads();
6610 	CU_ASSERT(g_bserrno == 0);
6611 
6612 	spdk_bs_free_io_channel(channel);
6613 	poll_threads();
6614 
6615 	spdk_bs_unload(bs, bs_op_complete, NULL);
6616 	poll_threads();
6617 	CU_ASSERT(g_bserrno == 0);
6618 
6619 	g_bs = NULL;
6620 }
6621 
6622 static void
6623 blobstore_clean_power_failure(void)
6624 {
6625 	struct spdk_blob_store *bs;
6626 	struct spdk_blob *blob;
6627 	struct spdk_power_failure_thresholds thresholds = {};
6628 	bool clean = false;
6629 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6630 	struct spdk_bs_super_block super_copy = {};
6631 
6632 	thresholds.general_threshold = 1;
6633 	while (!clean) {
6634 		/* Create bs and blob */
6635 		suite_blob_setup();
6636 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6637 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6638 		bs = g_bs;
6639 		blob = g_blob;
6640 
6641 		/* Super block should not change for rest of the UT,
6642 		 * save it and compare later. */
6643 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
6644 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
6645 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6646 
6647 		/* Force bs/super block in a clean state.
6648 		 * Along with marking blob dirty, to cause blob persist. */
6649 		blob->state = SPDK_BLOB_STATE_DIRTY;
6650 		bs->clean = 1;
6651 		super->clean = 1;
6652 		super->crc = blob_md_page_calc_crc(super);
6653 
6654 		g_bserrno = -1;
6655 		dev_set_power_failure_thresholds(thresholds);
6656 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6657 		poll_threads();
6658 		dev_reset_power_failure_event();
6659 
6660 		if (g_bserrno == 0) {
6661 			/* After successful md sync, both bs and super block
6662 			 * should be marked as not clean. */
6663 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6664 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
6665 			clean = true;
6666 		}
6667 
6668 		/* Depending on the point of failure, super block was either updated or not. */
6669 		super_copy.clean = super->clean;
6670 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
6671 		/* Compare that the values in super block remained unchanged. */
6672 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
6673 
6674 		/* Delete blob and unload bs */
6675 		suite_blob_cleanup();
6676 
6677 		thresholds.general_threshold++;
6678 	}
6679 }
6680 
6681 static void
6682 blob_delete_snapshot_power_failure(void)
6683 {
6684 	struct spdk_bs_dev *dev;
6685 	struct spdk_blob_store *bs;
6686 	struct spdk_blob_opts opts;
6687 	struct spdk_blob *blob, *snapshot;
6688 	struct spdk_power_failure_thresholds thresholds = {};
6689 	spdk_blob_id blobid, snapshotid;
6690 	const void *value;
6691 	size_t value_len;
6692 	size_t count;
6693 	spdk_blob_id ids[3] = {};
6694 	int rc;
6695 	bool deleted = false;
6696 	int delete_snapshot_bserrno = -1;
6697 	uint32_t first_data_cluster;
6698 
6699 	thresholds.general_threshold = 1;
6700 	while (!deleted) {
6701 		dev = init_dev();
6702 
6703 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6704 		poll_threads();
6705 		CU_ASSERT(g_bserrno == 0);
6706 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6707 		bs = g_bs;
6708 
6709 		first_data_cluster = FIRST_DATA_CLUSTER(bs);
6710 
6711 		/* Create blob */
6712 		ut_spdk_blob_opts_init(&opts);
6713 		opts.num_clusters = 10;
6714 
6715 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6716 		poll_threads();
6717 		CU_ASSERT(g_bserrno == 0);
6718 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6719 		blobid = g_blobid;
6720 
6721 		/* Create snapshot */
6722 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6723 		poll_threads();
6724 		CU_ASSERT(g_bserrno == 0);
6725 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6726 		snapshotid = g_blobid;
6727 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6728 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6729 
6730 		dev_set_power_failure_thresholds(thresholds);
6731 
6732 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6733 		poll_threads();
6734 		delete_snapshot_bserrno = g_bserrno;
6735 
6736 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6737 		 * reports success, changes to both blobs should already persisted. */
6738 		dev_reset_power_failure_event();
6739 		ut_bs_dirty_load(&bs, NULL);
6740 
6741 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6742 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6743 
6744 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6745 		poll_threads();
6746 		CU_ASSERT(g_bserrno == 0);
6747 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6748 		blob = g_blob;
6749 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6750 
6751 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6752 		poll_threads();
6753 
6754 		if (g_bserrno == 0) {
6755 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6756 			snapshot = g_blob;
6757 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6758 			count = SPDK_COUNTOF(ids);
6759 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6760 			CU_ASSERT(rc == 0);
6761 			CU_ASSERT(count == 1);
6762 			CU_ASSERT(ids[0] == blobid);
6763 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6764 			CU_ASSERT(rc != 0);
6765 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6766 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6767 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6768 
6769 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6770 			poll_threads();
6771 			CU_ASSERT(g_bserrno == 0);
6772 		} else {
6773 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6774 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6775 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6776 			 * Yet delete might perform further changes to the clone after that.
6777 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6778 			if (delete_snapshot_bserrno == 0) {
6779 				deleted = true;
6780 			}
6781 		}
6782 
6783 		spdk_blob_close(blob, blob_op_complete, NULL);
6784 		poll_threads();
6785 		CU_ASSERT(g_bserrno == 0);
6786 
6787 		spdk_bs_unload(bs, bs_op_complete, NULL);
6788 		poll_threads();
6789 		CU_ASSERT(g_bserrno == 0);
6790 
6791 		thresholds.general_threshold++;
6792 	}
6793 }
6794 
6795 static void
6796 blob_create_snapshot_power_failure(void)
6797 {
6798 	struct spdk_blob_store *bs = g_bs;
6799 	struct spdk_bs_dev *dev;
6800 	struct spdk_blob_opts opts;
6801 	struct spdk_blob *blob, *snapshot;
6802 	struct spdk_power_failure_thresholds thresholds = {};
6803 	spdk_blob_id blobid, snapshotid;
6804 	const void *value;
6805 	size_t value_len;
6806 	size_t count;
6807 	spdk_blob_id ids[3] = {};
6808 	int rc;
6809 	bool created = false;
6810 	int create_snapshot_bserrno = -1;
6811 	uint32_t first_data_cluster;
6812 
6813 	thresholds.general_threshold = 1;
6814 	while (!created) {
6815 		dev = init_dev();
6816 
6817 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6818 		poll_threads();
6819 		CU_ASSERT(g_bserrno == 0);
6820 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6821 		bs = g_bs;
6822 
6823 		first_data_cluster = FIRST_DATA_CLUSTER(bs);
6824 
6825 		/* Create blob */
6826 		ut_spdk_blob_opts_init(&opts);
6827 		opts.num_clusters = 10;
6828 
6829 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6830 		poll_threads();
6831 		CU_ASSERT(g_bserrno == 0);
6832 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6833 		blobid = g_blobid;
6834 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6835 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6836 
6837 		dev_set_power_failure_thresholds(thresholds);
6838 
6839 		/* Create snapshot */
6840 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6841 		poll_threads();
6842 		create_snapshot_bserrno = g_bserrno;
6843 		snapshotid = g_blobid;
6844 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6845 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6846 
6847 		/* Do not shut down cleanly. Assumption is that after create snapshot
6848 		 * reports success, both blobs should be power-fail safe. */
6849 		dev_reset_power_failure_event();
6850 		ut_bs_dirty_load(&bs, NULL);
6851 
6852 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6853 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6854 
6855 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6856 		poll_threads();
6857 		CU_ASSERT(g_bserrno == 0);
6858 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6859 		blob = g_blob;
6860 
6861 		if (snapshotid != SPDK_BLOBID_INVALID) {
6862 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6863 			poll_threads();
6864 		}
6865 
6866 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6867 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6868 			snapshot = g_blob;
6869 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6870 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6871 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6872 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6873 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6874 			count = SPDK_COUNTOF(ids);
6875 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6876 			CU_ASSERT(rc == 0);
6877 			CU_ASSERT(count == 1);
6878 			CU_ASSERT(ids[0] == blobid);
6879 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6880 			CU_ASSERT(rc != 0);
6881 
6882 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6883 			poll_threads();
6884 			CU_ASSERT(g_bserrno == 0);
6885 			if (create_snapshot_bserrno == 0) {
6886 				created = true;
6887 			}
6888 		} else {
6889 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6890 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6891 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6892 		}
6893 
6894 		spdk_blob_close(blob, blob_op_complete, NULL);
6895 		poll_threads();
6896 		CU_ASSERT(g_bserrno == 0);
6897 
6898 		spdk_bs_unload(bs, bs_op_complete, NULL);
6899 		poll_threads();
6900 		CU_ASSERT(g_bserrno == 0);
6901 
6902 		thresholds.general_threshold++;
6903 	}
6904 }
6905 
6906 #define IO_UT_BLOCKS_PER_CLUSTER 64
6907 
6908 static void
6909 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6910 {
6911 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
6912 	uint8_t payload_ff[SZ * 512];
6913 	uint8_t payload_aa[SZ * 512];
6914 	uint8_t payload_00[SZ * 512];
6915 	uint8_t *cluster0, *cluster1;
6916 
6917 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6918 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6919 	memset(payload_00, 0x00, sizeof(payload_00));
6920 
6921 	/* Try to perform I/O with io unit = 512 */
6922 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6923 	poll_threads();
6924 	CU_ASSERT(g_bserrno == 0);
6925 
6926 	/* If thin provisioned is set cluster should be allocated now */
6927 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6928 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6929 
6930 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6931 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6932 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6933 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6934 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
6935 
6936 	/* Verify write with offset on first page */
6937 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6938 	poll_threads();
6939 	CU_ASSERT(g_bserrno == 0);
6940 
6941 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6942 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6943 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6944 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6945 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6946 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
6947 
6948 	/* Verify write with offset on first page */
6949 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6950 	poll_threads();
6951 
6952 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6953 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6954 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6955 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6956 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6957 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6958 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
6959 
6960 	/* Verify write with offset on second page */
6961 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6962 	poll_threads();
6963 
6964 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6965 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6966 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6967 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6968 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6969 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6970 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
6971 
6972 	/* Verify write across multiple pages */
6973 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6974 	poll_threads();
6975 
6976 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6977 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6978 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6979 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6980 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6981 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6982 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
6983 
6984 	/* Verify write across multiple clusters */
6985 	spdk_blob_io_write(blob, channel, payload_ff, SZ - 4, 8, blob_op_complete, NULL);
6986 	poll_threads();
6987 
6988 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6989 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6990 
6991 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6992 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6993 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6994 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6995 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6996 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6997 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6998 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
6999 
7000 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7001 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7002 
7003 	/* Verify write to second cluster */
7004 	spdk_blob_io_write(blob, channel, payload_ff, SZ + 12, 2, blob_op_complete, NULL);
7005 	poll_threads();
7006 
7007 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7008 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7009 
7010 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7011 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
7012 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7013 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7014 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7015 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7016 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7017 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7018 
7019 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7020 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
7021 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
7022 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7023 }
7024 
7025 static void
7026 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7027 {
7028 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7029 	uint8_t payload_read[2 * SZ * 512];
7030 	uint8_t payload_ff[SZ * 512];
7031 	uint8_t payload_aa[SZ * 512];
7032 	uint8_t payload_00[SZ * 512];
7033 
7034 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7035 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7036 	memset(payload_00, 0x00, sizeof(payload_00));
7037 
7038 	/* Read only first io unit */
7039 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7040 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7041 	 * payload_read: F000 0000 | 0000 0000 ... */
7042 	memset(payload_read, 0x00, sizeof(payload_read));
7043 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
7044 	poll_threads();
7045 	CU_ASSERT(g_bserrno == 0);
7046 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7047 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7048 
7049 	/* Read four io_units starting from offset = 2
7050 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7051 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7052 	 * payload_read: F0AA 0000 | 0000 0000 ... */
7053 
7054 	memset(payload_read, 0x00, sizeof(payload_read));
7055 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
7056 	poll_threads();
7057 	CU_ASSERT(g_bserrno == 0);
7058 
7059 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7060 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7061 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
7062 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
7063 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7064 
7065 	/* Read eight io_units across multiple pages
7066 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7067 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7068 	 * payload_read: AAAA AAAA | 0000 0000 ... */
7069 	memset(payload_read, 0x00, sizeof(payload_read));
7070 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
7071 	poll_threads();
7072 	CU_ASSERT(g_bserrno == 0);
7073 
7074 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7075 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7076 
7077 	/* Read eight io_units across multiple clusters
7078 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7079 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7080 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7081 	memset(payload_read, 0x00, sizeof(payload_read));
7082 	spdk_blob_io_read(blob, channel, payload_read, SZ - 4, 8, blob_op_complete, NULL);
7083 	poll_threads();
7084 	CU_ASSERT(g_bserrno == 0);
7085 
7086 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7087 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7088 
7089 	/* Read four io_units from second cluster
7090 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7091 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7092 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7093 	memset(payload_read, 0x00, sizeof(payload_read));
7094 	spdk_blob_io_read(blob, channel, payload_read, SZ + 10, 4, blob_op_complete, NULL);
7095 	poll_threads();
7096 	CU_ASSERT(g_bserrno == 0);
7097 
7098 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7099 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7100 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7101 
7102 	/* Read second cluster
7103 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7104 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7105 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7106 	memset(payload_read, 0x00, sizeof(payload_read));
7107 	spdk_blob_io_read(blob, channel, payload_read, SZ, SZ, blob_op_complete, NULL);
7108 	poll_threads();
7109 	CU_ASSERT(g_bserrno == 0);
7110 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7111 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7112 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7113 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7114 
7115 	/* Read whole two clusters
7116 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7117 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7118 	memset(payload_read, 0x00, sizeof(payload_read));
7119 	spdk_blob_io_read(blob, channel, payload_read, 0, SZ * 2, blob_op_complete, NULL);
7120 	poll_threads();
7121 	CU_ASSERT(g_bserrno == 0);
7122 
7123 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7124 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7125 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7126 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7127 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7128 	CU_ASSERT(memcmp(payload_read + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7129 
7130 	CU_ASSERT(memcmp(payload_read + (SZ + 0) * 512, payload_ff, 4 * 512) == 0);
7131 	CU_ASSERT(memcmp(payload_read + (SZ + 4) * 512, payload_00, 8 * 512) == 0);
7132 	CU_ASSERT(memcmp(payload_read + (SZ + 12) * 512, payload_ff, 2 * 512) == 0);
7133 	CU_ASSERT(memcmp(payload_read + (SZ + 14) * 512, payload_00, (SZ - 14) * 512) == 0);
7134 }
7135 
7136 
7137 static void
7138 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7139 {
7140 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7141 	uint8_t payload_ff[SZ * 512];
7142 	uint8_t payload_aa[SZ * 512];
7143 	uint8_t payload_00[SZ * 512];
7144 	uint8_t *cluster0, *cluster1;
7145 
7146 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7147 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7148 	memset(payload_00, 0x00, sizeof(payload_00));
7149 
7150 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7151 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7152 
7153 	/* Unmap */
7154 	spdk_blob_io_unmap(blob, channel, 0, SZ * 2, blob_op_complete, NULL);
7155 	poll_threads();
7156 
7157 	CU_ASSERT(g_bserrno == 0);
7158 
7159 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, SZ * 512) == 0);
7160 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, SZ * 512) == 0);
7161 }
7162 
7163 static void
7164 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7165 {
7166 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7167 	uint8_t payload_ff[SZ * 512];
7168 	uint8_t payload_aa[SZ * 512];
7169 	uint8_t payload_00[SZ * 512];
7170 	uint8_t *cluster0, *cluster1;
7171 
7172 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7173 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7174 	memset(payload_00, 0x00, sizeof(payload_00));
7175 
7176 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7177 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7178 
7179 	/* Write zeroes  */
7180 	spdk_blob_io_write_zeroes(blob, channel, 0, SZ * 2, blob_op_complete, NULL);
7181 	poll_threads();
7182 
7183 	CU_ASSERT(g_bserrno == 0);
7184 
7185 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, SZ * 512) == 0);
7186 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, SZ * 512) == 0);
7187 }
7188 
7189 static inline void
7190 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
7191 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7192 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7193 {
7194 	if (io_opts) {
7195 		g_dev_writev_ext_called = false;
7196 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7197 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
7198 					io_opts);
7199 	} else {
7200 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7201 	}
7202 	poll_threads();
7203 	CU_ASSERT(g_bserrno == 0);
7204 	if (io_opts) {
7205 		CU_ASSERT(g_dev_writev_ext_called);
7206 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7207 	}
7208 }
7209 
7210 static void
7211 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7212 	       bool ext_api)
7213 {
7214 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7215 	uint8_t payload_ff[SZ * 512];
7216 	uint8_t payload_aa[SZ * 512];
7217 	uint8_t payload_00[SZ * 512];
7218 	uint8_t *cluster0, *cluster1;
7219 	struct iovec iov[4];
7220 	struct spdk_blob_ext_io_opts ext_opts = {
7221 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7222 		.memory_domain_ctx = (void *)0xf00df00d,
7223 		.size = sizeof(struct spdk_blob_ext_io_opts),
7224 		.user_ctx = (void *)123,
7225 	};
7226 
7227 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7228 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7229 	memset(payload_00, 0x00, sizeof(payload_00));
7230 
7231 	/* Try to perform I/O with io unit = 512 */
7232 	iov[0].iov_base = payload_ff;
7233 	iov[0].iov_len = 1 * 512;
7234 
7235 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
7236 			    ext_api ? &ext_opts : NULL);
7237 
7238 	/* If thin provisioned is set cluster should be allocated now */
7239 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
7240 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7241 
7242 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
7243 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
7244 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7245 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7246 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7247 
7248 	/* Verify write with offset on first page */
7249 	iov[0].iov_base = payload_ff;
7250 	iov[0].iov_len = 1 * 512;
7251 
7252 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
7253 			    ext_api ? &ext_opts : NULL);
7254 
7255 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7256 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7257 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7258 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7259 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7260 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7261 
7262 	/* Verify write with offset on first page */
7263 	iov[0].iov_base = payload_ff;
7264 	iov[0].iov_len = 4 * 512;
7265 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
7266 	poll_threads();
7267 
7268 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
7269 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7270 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7271 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7272 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7273 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
7274 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7275 
7276 	/* Verify write with offset on second page */
7277 	iov[0].iov_base = payload_ff;
7278 	iov[0].iov_len = 4 * 512;
7279 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
7280 	poll_threads();
7281 
7282 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
7283 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7284 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7285 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7286 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7287 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
7288 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
7289 
7290 	/* Verify write across multiple pages */
7291 	iov[0].iov_base = payload_aa;
7292 	iov[0].iov_len = 8 * 512;
7293 
7294 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
7295 			    ext_api ? &ext_opts : NULL);
7296 
7297 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
7298 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7299 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7300 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7301 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7302 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7303 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
7304 
7305 	/* Verify write across multiple clusters */
7306 
7307 	iov[0].iov_base = payload_ff;
7308 	iov[0].iov_len = 8 * 512;
7309 
7310 	test_blob_io_writev(blob, channel, iov, 1, (SZ - 4), 8, blob_op_complete, NULL,
7311 			    ext_api ? &ext_opts : NULL);
7312 
7313 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7314 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7315 
7316 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7317 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7318 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7319 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7320 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7321 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7322 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7323 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 16) * 512) == 0);
7324 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7325 
7326 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7327 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7328 
7329 	/* Verify write to second cluster */
7330 
7331 	iov[0].iov_base = payload_ff;
7332 	iov[0].iov_len = 2 * 512;
7333 
7334 	test_blob_io_writev(blob, channel, iov, 1, SZ + 12, 2, blob_op_complete, NULL,
7335 			    ext_api ? &ext_opts : NULL);
7336 
7337 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7338 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7339 
7340 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7341 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
7342 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7343 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7344 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7345 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7346 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7347 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7348 
7349 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7350 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
7351 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
7352 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7353 }
7354 
7355 static inline void
7356 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
7357 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7358 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7359 {
7360 	if (io_opts) {
7361 		g_dev_readv_ext_called = false;
7362 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7363 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
7364 	} else {
7365 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7366 	}
7367 	poll_threads();
7368 	CU_ASSERT(g_bserrno == 0);
7369 	if (io_opts) {
7370 		CU_ASSERT(g_dev_readv_ext_called);
7371 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7372 	}
7373 }
7374 
7375 static void
7376 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7377 	      bool ext_api)
7378 {
7379 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7380 	uint8_t payload_read[2 * SZ * 512];
7381 	uint8_t payload_ff[SZ * 512];
7382 	uint8_t payload_aa[SZ * 512];
7383 	uint8_t payload_00[SZ * 512];
7384 	struct iovec iov[4];
7385 	struct spdk_blob_ext_io_opts ext_opts = {
7386 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7387 		.memory_domain_ctx = (void *)0xf00df00d,
7388 		.size = sizeof(struct spdk_blob_ext_io_opts),
7389 		.user_ctx = (void *)123,
7390 	};
7391 
7392 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7393 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7394 	memset(payload_00, 0x00, sizeof(payload_00));
7395 
7396 	/* Read only first io unit */
7397 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7398 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7399 	 * payload_read: F000 0000 | 0000 0000 ... */
7400 	memset(payload_read, 0x00, sizeof(payload_read));
7401 	iov[0].iov_base = payload_read;
7402 	iov[0].iov_len = 1 * 512;
7403 
7404 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7405 
7406 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7407 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7408 
7409 	/* Read four io_units starting from offset = 2
7410 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7411 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7412 	 * payload_read: F0AA 0000 | 0000 0000 ... */
7413 
7414 	memset(payload_read, 0x00, sizeof(payload_read));
7415 	iov[0].iov_base = payload_read;
7416 	iov[0].iov_len = 4 * 512;
7417 
7418 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7419 
7420 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7421 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7422 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
7423 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
7424 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7425 
7426 	/* Read eight io_units across multiple pages
7427 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7428 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7429 	 * payload_read: AAAA AAAA | 0000 0000 ... */
7430 	memset(payload_read, 0x00, sizeof(payload_read));
7431 	iov[0].iov_base = payload_read;
7432 	iov[0].iov_len = 4 * 512;
7433 	iov[1].iov_base = payload_read + 4 * 512;
7434 	iov[1].iov_len = 4 * 512;
7435 
7436 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7437 
7438 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7439 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7440 
7441 	/* Read eight io_units across multiple clusters
7442 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7443 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7444 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7445 	memset(payload_read, 0x00, sizeof(payload_read));
7446 	iov[0].iov_base = payload_read;
7447 	iov[0].iov_len = 2 * 512;
7448 	iov[1].iov_base = payload_read + 2 * 512;
7449 	iov[1].iov_len = 2 * 512;
7450 	iov[2].iov_base = payload_read + 4 * 512;
7451 	iov[2].iov_len = 2 * 512;
7452 	iov[3].iov_base = payload_read + 6 * 512;
7453 	iov[3].iov_len = 2 * 512;
7454 
7455 	test_blob_io_readv(blob, channel, iov, 4, SZ - 4, 8, blob_op_complete, NULL,
7456 			   ext_api ? &ext_opts : NULL);
7457 
7458 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7459 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7460 
7461 	/* Read four io_units from second cluster
7462 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7463 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7464 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7465 	memset(payload_read, 0x00, sizeof(payload_read));
7466 	iov[0].iov_base = payload_read;
7467 	iov[0].iov_len = 1 * 512;
7468 	iov[1].iov_base = payload_read + 1 * 512;
7469 	iov[1].iov_len = 3 * 512;
7470 
7471 	test_blob_io_readv(blob, channel, iov, 2, SZ + 10, 4, blob_op_complete, NULL,
7472 			   ext_api ? &ext_opts : NULL);
7473 
7474 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7475 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7476 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7477 
7478 	/* Read second cluster
7479 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7480 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7481 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7482 	memset(payload_read, 0x00, sizeof(payload_read));
7483 	iov[0].iov_base = payload_read;
7484 	iov[0].iov_len = 1 * 512;
7485 	iov[1].iov_base = payload_read + 1 * 512;
7486 	iov[1].iov_len = 2 * 512;
7487 	iov[2].iov_base = payload_read + 3 * 512;
7488 	iov[2].iov_len = 4 * 512;
7489 	iov[3].iov_base = payload_read + 7 * 512;
7490 	iov[3].iov_len = (SZ - 7) * 512;
7491 
7492 	test_blob_io_readv(blob, channel, iov, 4, SZ, SZ, blob_op_complete, NULL,
7493 			   ext_api ? &ext_opts : NULL);
7494 
7495 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7496 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7497 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7498 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7499 
7500 	/* Read whole two clusters
7501 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7502 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7503 	memset(payload_read, 0x00, sizeof(payload_read));
7504 	iov[0].iov_base = payload_read;
7505 	iov[0].iov_len = 1 * 512;
7506 	iov[1].iov_base = payload_read + 1 * 512;
7507 	iov[1].iov_len = 8 * 512;
7508 	iov[2].iov_base = payload_read + 9 * 512;
7509 	iov[2].iov_len = 16 * 512;
7510 	iov[3].iov_base = payload_read + 25 * 512;
7511 	iov[3].iov_len = (2 * SZ - 25) * 512;
7512 
7513 	test_blob_io_readv(blob, channel, iov, 4, 0, SZ * 2, blob_op_complete, NULL,
7514 			   ext_api ? &ext_opts : NULL);
7515 
7516 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7517 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7518 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7519 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7520 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7521 	CU_ASSERT(memcmp(payload_read + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7522 
7523 	CU_ASSERT(memcmp(payload_read + (SZ + 0) * 512, payload_ff, 4 * 512) == 0);
7524 	CU_ASSERT(memcmp(payload_read + (SZ + 4) * 512, payload_00, 8 * 512) == 0);
7525 	CU_ASSERT(memcmp(payload_read + (SZ + 12) * 512, payload_ff, 2 * 512) == 0);
7526 	CU_ASSERT(memcmp(payload_read + (SZ + 14) * 512, payload_00, (SZ - 14) * 512) == 0);
7527 }
7528 
7529 static void
7530 blob_io_unit(void)
7531 {
7532 	struct spdk_bs_opts bsopts;
7533 	struct spdk_blob_opts opts;
7534 	struct spdk_blob_store *bs;
7535 	struct spdk_bs_dev *dev;
7536 	struct spdk_blob *blob, *snapshot, *clone;
7537 	spdk_blob_id blobid;
7538 	struct spdk_io_channel *channel;
7539 
7540 	/* Create dev with 512 bytes io unit size */
7541 
7542 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7543 	bsopts.cluster_sz = IO_UT_BLOCKS_PER_CLUSTER * 512;
7544 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7545 
7546 	/* Try to initialize a new blob store with unsupported io_unit */
7547 	dev = init_dev();
7548 	dev->blocklen = 512;
7549 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7550 
7551 	/* Initialize a new blob store */
7552 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7553 	poll_threads();
7554 	CU_ASSERT(g_bserrno == 0);
7555 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7556 	bs = g_bs;
7557 
7558 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7559 	channel = spdk_bs_alloc_io_channel(bs);
7560 
7561 	/* Create thick provisioned blob */
7562 	ut_spdk_blob_opts_init(&opts);
7563 	opts.thin_provision = false;
7564 	opts.num_clusters = 32;
7565 
7566 	blob = ut_blob_create_and_open(bs, &opts);
7567 	blobid = spdk_blob_get_id(blob);
7568 
7569 	test_io_write(dev, blob, channel);
7570 	test_io_read(dev, blob, channel);
7571 	test_io_zeroes(dev, blob, channel);
7572 
7573 	test_iov_write(dev, blob, channel, false);
7574 	test_iov_read(dev, blob, channel, false);
7575 	test_io_zeroes(dev, blob, channel);
7576 
7577 	test_iov_write(dev, blob, channel, true);
7578 	test_iov_read(dev, blob, channel, true);
7579 
7580 	test_io_unmap(dev, blob, channel);
7581 
7582 	spdk_blob_close(blob, blob_op_complete, NULL);
7583 	poll_threads();
7584 	CU_ASSERT(g_bserrno == 0);
7585 	blob = NULL;
7586 	g_blob = NULL;
7587 
7588 	/* Create thin provisioned blob */
7589 
7590 	ut_spdk_blob_opts_init(&opts);
7591 	opts.thin_provision = true;
7592 	opts.num_clusters = 32;
7593 
7594 	blob = ut_blob_create_and_open(bs, &opts);
7595 	blobid = spdk_blob_get_id(blob);
7596 
7597 	test_io_write(dev, blob, channel);
7598 	test_io_read(dev, blob, channel);
7599 	test_io_zeroes(dev, blob, channel);
7600 
7601 	test_iov_write(dev, blob, channel, false);
7602 	test_iov_read(dev, blob, channel, false);
7603 	test_io_zeroes(dev, blob, channel);
7604 
7605 	test_iov_write(dev, blob, channel, true);
7606 	test_iov_read(dev, blob, channel, true);
7607 
7608 	/* Create snapshot */
7609 
7610 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7611 	poll_threads();
7612 	CU_ASSERT(g_bserrno == 0);
7613 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7614 	blobid = g_blobid;
7615 
7616 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7617 	poll_threads();
7618 	CU_ASSERT(g_bserrno == 0);
7619 	CU_ASSERT(g_blob != NULL);
7620 	snapshot = g_blob;
7621 
7622 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7623 	poll_threads();
7624 	CU_ASSERT(g_bserrno == 0);
7625 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7626 	blobid = g_blobid;
7627 
7628 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7629 	poll_threads();
7630 	CU_ASSERT(g_bserrno == 0);
7631 	CU_ASSERT(g_blob != NULL);
7632 	clone = g_blob;
7633 
7634 	test_io_read(dev, blob, channel);
7635 	test_io_read(dev, snapshot, channel);
7636 	test_io_read(dev, clone, channel);
7637 
7638 	test_iov_read(dev, blob, channel, false);
7639 	test_iov_read(dev, snapshot, channel, false);
7640 	test_iov_read(dev, clone, channel, false);
7641 
7642 	test_iov_read(dev, blob, channel, true);
7643 	test_iov_read(dev, snapshot, channel, true);
7644 	test_iov_read(dev, clone, channel, true);
7645 
7646 	/* Inflate clone */
7647 
7648 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7649 	poll_threads();
7650 
7651 	CU_ASSERT(g_bserrno == 0);
7652 
7653 	test_io_read(dev, clone, channel);
7654 
7655 	test_io_unmap(dev, clone, channel);
7656 
7657 	test_iov_write(dev, clone, channel, false);
7658 	test_iov_read(dev, clone, channel, false);
7659 	test_io_unmap(dev, clone, channel);
7660 
7661 	test_iov_write(dev, clone, channel, true);
7662 	test_iov_read(dev, clone, channel, true);
7663 
7664 	spdk_blob_close(blob, blob_op_complete, NULL);
7665 	spdk_blob_close(snapshot, blob_op_complete, NULL);
7666 	spdk_blob_close(clone, blob_op_complete, NULL);
7667 	poll_threads();
7668 	CU_ASSERT(g_bserrno == 0);
7669 	blob = NULL;
7670 	g_blob = NULL;
7671 
7672 	spdk_bs_free_io_channel(channel);
7673 	poll_threads();
7674 
7675 	/* Unload the blob store */
7676 	spdk_bs_unload(bs, bs_op_complete, NULL);
7677 	poll_threads();
7678 	CU_ASSERT(g_bserrno == 0);
7679 	g_bs = NULL;
7680 	g_blob = NULL;
7681 	g_blobid = 0;
7682 }
7683 
7684 static void
7685 blob_io_unit_compatibility(void)
7686 {
7687 	struct spdk_bs_opts bsopts;
7688 	struct spdk_blob_store *bs;
7689 	struct spdk_bs_dev *dev;
7690 	struct spdk_bs_super_block *super;
7691 
7692 	/* Create dev with 512 bytes io unit size */
7693 
7694 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7695 	bsopts.cluster_sz = g_phys_blocklen * 4;
7696 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7697 
7698 	/* Try to initialize a new blob store with unsupported io_unit */
7699 	dev = init_dev();
7700 	dev->blocklen = 512;
7701 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7702 
7703 	/* Initialize a new blob store */
7704 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7705 	poll_threads();
7706 	CU_ASSERT(g_bserrno == 0);
7707 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7708 	bs = g_bs;
7709 
7710 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7711 
7712 	/* Unload the blob store */
7713 	spdk_bs_unload(bs, bs_op_complete, NULL);
7714 	poll_threads();
7715 	CU_ASSERT(g_bserrno == 0);
7716 
7717 	/* Modify super block to behave like older version.
7718 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
7719 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
7720 	super->io_unit_size = 0;
7721 	super->crc = blob_md_page_calc_crc(super);
7722 
7723 	dev = init_dev();
7724 	dev->blocklen = 512;
7725 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7726 
7727 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
7728 	poll_threads();
7729 	CU_ASSERT(g_bserrno == 0);
7730 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7731 	bs = g_bs;
7732 
7733 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
7734 
7735 	/* Unload the blob store */
7736 	spdk_bs_unload(bs, bs_op_complete, NULL);
7737 	poll_threads();
7738 	CU_ASSERT(g_bserrno == 0);
7739 
7740 	g_bs = NULL;
7741 	g_blob = NULL;
7742 	g_blobid = 0;
7743 }
7744 
7745 static void
7746 first_sync_complete(void *cb_arg, int bserrno)
7747 {
7748 	struct spdk_blob *blob = cb_arg;
7749 	int rc;
7750 
7751 	CU_ASSERT(bserrno == 0);
7752 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7753 	CU_ASSERT(rc == 0);
7754 	CU_ASSERT(g_bserrno == -1);
7755 
7756 	/* Keep g_bserrno at -1, only the
7757 	 * second sync completion should set it at 0. */
7758 }
7759 
7760 static void
7761 second_sync_complete(void *cb_arg, int bserrno)
7762 {
7763 	struct spdk_blob *blob = cb_arg;
7764 	const void *value;
7765 	size_t value_len;
7766 	int rc;
7767 
7768 	CU_ASSERT(bserrno == 0);
7769 
7770 	/* Verify that the first sync completion had a chance to execute */
7771 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7772 	CU_ASSERT(rc == 0);
7773 	SPDK_CU_ASSERT_FATAL(value != NULL);
7774 	CU_ASSERT(value_len == strlen("second") + 1);
7775 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7776 
7777 	CU_ASSERT(g_bserrno == -1);
7778 	g_bserrno = bserrno;
7779 }
7780 
7781 static void
7782 blob_simultaneous_operations(void)
7783 {
7784 	struct spdk_blob_store *bs = g_bs;
7785 	struct spdk_blob_opts opts;
7786 	struct spdk_blob *blob, *snapshot;
7787 	spdk_blob_id blobid, snapshotid;
7788 	struct spdk_io_channel *channel;
7789 	int rc;
7790 
7791 	channel = spdk_bs_alloc_io_channel(bs);
7792 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7793 
7794 	ut_spdk_blob_opts_init(&opts);
7795 	opts.num_clusters = 10;
7796 
7797 	blob = ut_blob_create_and_open(bs, &opts);
7798 	blobid = spdk_blob_get_id(blob);
7799 
7800 	/* Create snapshot and try to remove blob in the same time:
7801 	 * - snapshot should be created successfully
7802 	 * - delete operation should fail w -EBUSY */
7803 	CU_ASSERT(blob->locked_operation_in_progress == false);
7804 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7805 	CU_ASSERT(blob->locked_operation_in_progress == true);
7806 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7807 	CU_ASSERT(blob->locked_operation_in_progress == true);
7808 	/* Deletion failure */
7809 	CU_ASSERT(g_bserrno == -EBUSY);
7810 	poll_threads();
7811 	CU_ASSERT(blob->locked_operation_in_progress == false);
7812 	/* Snapshot creation success */
7813 	CU_ASSERT(g_bserrno == 0);
7814 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7815 
7816 	snapshotid = g_blobid;
7817 
7818 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7819 	poll_threads();
7820 	CU_ASSERT(g_bserrno == 0);
7821 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7822 	snapshot = g_blob;
7823 
7824 	/* Inflate blob and try to remove blob in the same time:
7825 	 * - blob should be inflated successfully
7826 	 * - delete operation should fail w -EBUSY */
7827 	CU_ASSERT(blob->locked_operation_in_progress == false);
7828 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7829 	CU_ASSERT(blob->locked_operation_in_progress == true);
7830 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7831 	CU_ASSERT(blob->locked_operation_in_progress == true);
7832 	/* Deletion failure */
7833 	CU_ASSERT(g_bserrno == -EBUSY);
7834 	poll_threads();
7835 	CU_ASSERT(blob->locked_operation_in_progress == false);
7836 	/* Inflation success */
7837 	CU_ASSERT(g_bserrno == 0);
7838 
7839 	/* Clone snapshot and try to remove snapshot in the same time:
7840 	 * - snapshot should be cloned successfully
7841 	 * - delete operation should fail w -EBUSY */
7842 	CU_ASSERT(blob->locked_operation_in_progress == false);
7843 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7844 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7845 	/* Deletion failure */
7846 	CU_ASSERT(g_bserrno == -EBUSY);
7847 	poll_threads();
7848 	CU_ASSERT(blob->locked_operation_in_progress == false);
7849 	/* Clone created */
7850 	CU_ASSERT(g_bserrno == 0);
7851 
7852 	/* Resize blob and try to remove blob in the same time:
7853 	 * - blob should be resized successfully
7854 	 * - delete operation should fail w -EBUSY */
7855 	CU_ASSERT(blob->locked_operation_in_progress == false);
7856 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7857 	CU_ASSERT(blob->locked_operation_in_progress == true);
7858 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7859 	CU_ASSERT(blob->locked_operation_in_progress == true);
7860 	/* Deletion failure */
7861 	CU_ASSERT(g_bserrno == -EBUSY);
7862 	poll_threads();
7863 	CU_ASSERT(blob->locked_operation_in_progress == false);
7864 	/* Blob resized successfully */
7865 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7866 	poll_threads();
7867 	CU_ASSERT(g_bserrno == 0);
7868 
7869 	/* Issue two consecutive blob syncs, neither should fail.
7870 	 * Force sync to actually occur by marking blob dirty each time.
7871 	 * Execution of sync should not be enough to complete the operation,
7872 	 * since disk I/O is required to complete it. */
7873 	g_bserrno = -1;
7874 
7875 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7876 	CU_ASSERT(rc == 0);
7877 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7878 	CU_ASSERT(g_bserrno == -1);
7879 
7880 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7881 	CU_ASSERT(g_bserrno == -1);
7882 
7883 	poll_threads();
7884 	CU_ASSERT(g_bserrno == 0);
7885 
7886 	spdk_bs_free_io_channel(channel);
7887 	poll_threads();
7888 
7889 	ut_blob_close_and_delete(bs, snapshot);
7890 	ut_blob_close_and_delete(bs, blob);
7891 }
7892 
7893 static void
7894 blob_persist_test(void)
7895 {
7896 	struct spdk_blob_store *bs = g_bs;
7897 	struct spdk_blob_opts opts;
7898 	struct spdk_blob *blob;
7899 	spdk_blob_id blobid;
7900 	struct spdk_io_channel *channel;
7901 	char *xattr;
7902 	size_t xattr_length;
7903 	int rc;
7904 	uint32_t page_count_clear, page_count_xattr;
7905 	uint64_t poller_iterations;
7906 	bool run_poller;
7907 
7908 	channel = spdk_bs_alloc_io_channel(bs);
7909 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7910 
7911 	ut_spdk_blob_opts_init(&opts);
7912 	opts.num_clusters = 10;
7913 
7914 	blob = ut_blob_create_and_open(bs, &opts);
7915 	blobid = spdk_blob_get_id(blob);
7916 
7917 	/* Save the amount of md pages used after creation of a blob.
7918 	 * This should be consistent after removing xattr. */
7919 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7920 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7921 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7922 
7923 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7924 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7925 		       strlen("large_xattr");
7926 	xattr = calloc(xattr_length, sizeof(char));
7927 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7928 
7929 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7930 	SPDK_CU_ASSERT_FATAL(rc == 0);
7931 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7932 	poll_threads();
7933 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7934 
7935 	/* Save the amount of md pages used after adding the large xattr */
7936 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7937 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7938 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7939 
7940 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7941 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7942 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7943 	poller_iterations = 1;
7944 	run_poller = true;
7945 	while (run_poller) {
7946 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7947 		SPDK_CU_ASSERT_FATAL(rc == 0);
7948 		g_bserrno = -1;
7949 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7950 		poll_thread_times(0, poller_iterations);
7951 		if (g_bserrno == 0) {
7952 			/* Poller iteration count was high enough for first sync to complete.
7953 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7954 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7955 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7956 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7957 			run_poller = false;
7958 		}
7959 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7960 		SPDK_CU_ASSERT_FATAL(rc == 0);
7961 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7962 		poll_threads();
7963 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7964 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7965 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7966 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7967 
7968 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7969 		spdk_blob_close(blob, blob_op_complete, NULL);
7970 		poll_threads();
7971 		CU_ASSERT(g_bserrno == 0);
7972 
7973 		ut_bs_reload(&bs, NULL);
7974 
7975 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7976 		poll_threads();
7977 		CU_ASSERT(g_bserrno == 0);
7978 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7979 		blob = g_blob;
7980 
7981 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7982 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7983 
7984 		poller_iterations++;
7985 		/* Stop at high iteration count to prevent infinite loop.
7986 		 * This value should be enough for first md sync to complete in any case. */
7987 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7988 	}
7989 
7990 	free(xattr);
7991 
7992 	ut_blob_close_and_delete(bs, blob);
7993 
7994 	spdk_bs_free_io_channel(channel);
7995 	poll_threads();
7996 }
7997 
7998 static void
7999 blob_decouple_snapshot(void)
8000 {
8001 	struct spdk_blob_store *bs = g_bs;
8002 	struct spdk_blob_opts opts;
8003 	struct spdk_blob *blob, *snapshot1, *snapshot2;
8004 	struct spdk_io_channel *channel;
8005 	spdk_blob_id blobid, snapshotid;
8006 	uint64_t cluster;
8007 
8008 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
8009 		channel = spdk_bs_alloc_io_channel(bs);
8010 		SPDK_CU_ASSERT_FATAL(channel != NULL);
8011 
8012 		ut_spdk_blob_opts_init(&opts);
8013 		opts.num_clusters = 10;
8014 		opts.thin_provision = false;
8015 
8016 		blob = ut_blob_create_and_open(bs, &opts);
8017 		blobid = spdk_blob_get_id(blob);
8018 
8019 		/* Create first snapshot */
8020 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
8021 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8022 		poll_threads();
8023 		CU_ASSERT(g_bserrno == 0);
8024 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8025 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
8026 		snapshotid = g_blobid;
8027 
8028 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
8029 		poll_threads();
8030 		CU_ASSERT(g_bserrno == 0);
8031 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8032 		snapshot1 = g_blob;
8033 
8034 		/* Create the second one */
8035 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
8036 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8037 		poll_threads();
8038 		CU_ASSERT(g_bserrno == 0);
8039 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8040 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
8041 		snapshotid = g_blobid;
8042 
8043 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
8044 		poll_threads();
8045 		CU_ASSERT(g_bserrno == 0);
8046 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8047 		snapshot2 = g_blob;
8048 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
8049 
8050 		/* Now decouple the second snapshot forcing it to copy the written clusters */
8051 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
8052 		poll_threads();
8053 		CU_ASSERT(g_bserrno == 0);
8054 
8055 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
8056 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
8057 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
8058 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
8059 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
8060 					    snapshot1->active.clusters[cluster]);
8061 		}
8062 
8063 		spdk_bs_free_io_channel(channel);
8064 
8065 		if (delete_snapshot_first) {
8066 			ut_blob_close_and_delete(bs, snapshot2);
8067 			ut_blob_close_and_delete(bs, snapshot1);
8068 			ut_blob_close_and_delete(bs, blob);
8069 		} else {
8070 			ut_blob_close_and_delete(bs, blob);
8071 			ut_blob_close_and_delete(bs, snapshot2);
8072 			ut_blob_close_and_delete(bs, snapshot1);
8073 		}
8074 		poll_threads();
8075 	}
8076 }
8077 
8078 static void
8079 blob_seek_io_unit(void)
8080 {
8081 	struct spdk_blob_store *bs = g_bs;
8082 	struct spdk_blob *blob;
8083 	struct spdk_io_channel *channel;
8084 	struct spdk_blob_opts opts;
8085 	uint64_t free_clusters;
8086 	uint8_t payload[10 * BLOCKLEN];
8087 	uint64_t offset;
8088 	uint64_t io_unit, io_units_per_cluster;
8089 
8090 	free_clusters = spdk_bs_free_cluster_count(bs);
8091 
8092 	channel = spdk_bs_alloc_io_channel(bs);
8093 	CU_ASSERT(channel != NULL);
8094 
8095 	/* Set blob as thin provisioned */
8096 	ut_spdk_blob_opts_init(&opts);
8097 	opts.thin_provision = true;
8098 
8099 	/* Create a blob */
8100 	blob = ut_blob_create_and_open(bs, &opts);
8101 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
8102 
8103 	io_units_per_cluster = bs_io_units_per_cluster(blob);
8104 
8105 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
8106 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
8107 	poll_threads();
8108 	CU_ASSERT(g_bserrno == 0);
8109 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
8110 	CU_ASSERT(blob->active.num_clusters == 5);
8111 
8112 	/* Write at the beginning of first cluster */
8113 	offset = 0;
8114 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8115 	poll_threads();
8116 	CU_ASSERT(g_bserrno == 0);
8117 
8118 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
8119 	CU_ASSERT(io_unit == offset);
8120 
8121 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
8122 	CU_ASSERT(io_unit == io_units_per_cluster);
8123 
8124 	/* Write in the middle of third cluster */
8125 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
8126 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8127 	poll_threads();
8128 	CU_ASSERT(g_bserrno == 0);
8129 
8130 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
8131 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
8132 
8133 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
8134 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
8135 
8136 	/* Write at the end of last cluster */
8137 	offset = 5 * io_units_per_cluster - 1;
8138 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8139 	poll_threads();
8140 	CU_ASSERT(g_bserrno == 0);
8141 
8142 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
8143 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
8144 
8145 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
8146 	CU_ASSERT(io_unit == UINT64_MAX);
8147 
8148 	spdk_bs_free_io_channel(channel);
8149 	poll_threads();
8150 
8151 	ut_blob_close_and_delete(bs, blob);
8152 }
8153 
8154 static void
8155 blob_esnap_create(void)
8156 {
8157 	struct spdk_blob_store	*bs = g_bs;
8158 	struct spdk_bs_opts	bs_opts;
8159 	struct ut_esnap_opts	esnap_opts;
8160 	struct spdk_blob_opts	opts;
8161 	struct spdk_blob_open_opts open_opts;
8162 	struct spdk_blob	*blob;
8163 	uint32_t		cluster_sz, block_sz;
8164 	const uint32_t		esnap_num_clusters = 4;
8165 	uint64_t		esnap_num_blocks;
8166 	uint32_t		sz;
8167 	spdk_blob_id		blobid;
8168 	uint32_t		bs_ctx_count, blob_ctx_count;
8169 
8170 	cluster_sz = spdk_bs_get_cluster_size(bs);
8171 	block_sz = spdk_bs_get_io_unit_size(bs);
8172 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8173 
8174 	/* Create a normal blob and verify it is not an esnap clone. */
8175 	ut_spdk_blob_opts_init(&opts);
8176 	blob = ut_blob_create_and_open(bs, &opts);
8177 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob));
8178 	ut_blob_close_and_delete(bs, blob);
8179 
8180 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
8181 	ut_spdk_blob_opts_init(&opts);
8182 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8183 	opts.esnap_id = &esnap_opts;
8184 	opts.esnap_id_len = sizeof(esnap_opts);
8185 	opts.num_clusters = esnap_num_clusters;
8186 	blob = ut_blob_create_and_open(bs, &opts);
8187 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8188 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8189 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
8190 	SPDK_CU_ASSERT_FATAL(!spdk_blob_is_clone(blob));
8191 	sz = spdk_blob_get_num_clusters(blob);
8192 	CU_ASSERT(sz == esnap_num_clusters);
8193 	ut_blob_close_and_delete(bs, blob);
8194 
8195 	/* Create an esnap clone without the size and verify it can be grown */
8196 	ut_spdk_blob_opts_init(&opts);
8197 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8198 	opts.esnap_id = &esnap_opts;
8199 	opts.esnap_id_len = sizeof(esnap_opts);
8200 	blob = ut_blob_create_and_open(bs, &opts);
8201 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8202 	sz = spdk_blob_get_num_clusters(blob);
8203 	CU_ASSERT(sz == 0);
8204 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
8205 	poll_threads();
8206 	CU_ASSERT(g_bserrno == 0);
8207 	sz = spdk_blob_get_num_clusters(blob);
8208 	CU_ASSERT(sz == 1);
8209 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
8210 	poll_threads();
8211 	CU_ASSERT(g_bserrno == 0);
8212 	sz = spdk_blob_get_num_clusters(blob);
8213 	CU_ASSERT(sz == esnap_num_clusters);
8214 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
8215 	poll_threads();
8216 	CU_ASSERT(g_bserrno == 0);
8217 	sz = spdk_blob_get_num_clusters(blob);
8218 	CU_ASSERT(sz == esnap_num_clusters + 1);
8219 
8220 	/* Reload the blobstore and be sure that the blob can be opened. */
8221 	blobid = spdk_blob_get_id(blob);
8222 	spdk_blob_close(blob, blob_op_complete, NULL);
8223 	poll_threads();
8224 	CU_ASSERT(g_bserrno == 0);
8225 	g_blob = NULL;
8226 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8227 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8228 	ut_bs_reload(&bs, &bs_opts);
8229 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8230 	poll_threads();
8231 	CU_ASSERT(g_bserrno == 0);
8232 	CU_ASSERT(g_blob != NULL);
8233 	blob = g_blob;
8234 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8235 	sz = spdk_blob_get_num_clusters(blob);
8236 	CU_ASSERT(sz == esnap_num_clusters + 1);
8237 
8238 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
8239 	spdk_blob_close(blob, blob_op_complete, NULL);
8240 	poll_threads();
8241 	CU_ASSERT(g_bserrno == 0);
8242 	g_blob = NULL;
8243 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8244 	ut_bs_reload(&bs, &bs_opts);
8245 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8246 	poll_threads();
8247 	CU_ASSERT(g_bserrno != 0);
8248 	CU_ASSERT(g_blob == NULL);
8249 
8250 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
8251 	bs_ctx_count = 0;
8252 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8253 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
8254 	bs_opts.esnap_ctx = &bs_ctx_count;
8255 	ut_bs_reload(&bs, &bs_opts);
8256 	/* Loading the blobstore triggers the esnap to be loaded */
8257 	CU_ASSERT(bs_ctx_count == 1);
8258 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8259 	poll_threads();
8260 	CU_ASSERT(g_bserrno == 0);
8261 	CU_ASSERT(g_blob != NULL);
8262 	/* Opening the blob also triggers the esnap to be loaded */
8263 	CU_ASSERT(bs_ctx_count == 2);
8264 	blob = g_blob;
8265 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8266 	sz = spdk_blob_get_num_clusters(blob);
8267 	CU_ASSERT(sz == esnap_num_clusters + 1);
8268 	spdk_blob_close(blob, blob_op_complete, NULL);
8269 	poll_threads();
8270 	CU_ASSERT(g_bserrno == 0);
8271 	g_blob = NULL;
8272 	/* If open_opts.esnap_ctx is set it is passed to the esnap create callback */
8273 	blob_ctx_count = 0;
8274 	spdk_blob_open_opts_init(&open_opts, sizeof(open_opts));
8275 	open_opts.esnap_ctx = &blob_ctx_count;
8276 	spdk_bs_open_blob_ext(bs, blobid, &open_opts, blob_op_with_handle_complete, NULL);
8277 	poll_threads();
8278 	blob = g_blob;
8279 	CU_ASSERT(bs_ctx_count == 3);
8280 	CU_ASSERT(blob_ctx_count == 1);
8281 	spdk_blob_close(blob, blob_op_complete, NULL);
8282 	poll_threads();
8283 	CU_ASSERT(g_bserrno == 0);
8284 	g_blob = NULL;
8285 }
8286 
8287 static void
8288 blob_esnap_clone_reload(void)
8289 {
8290 	struct spdk_blob_store	*bs = g_bs;
8291 	struct spdk_bs_opts	bs_opts;
8292 	struct ut_esnap_opts	esnap_opts;
8293 	struct spdk_blob_opts	opts;
8294 	struct spdk_blob	*eclone1, *snap1, *clone1;
8295 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8296 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
8297 	const uint32_t		esnap_num_clusters = 4;
8298 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8299 	spdk_blob_id		eclone1_id, snap1_id, clone1_id;
8300 	struct spdk_io_channel	*bs_ch;
8301 	char			buf[block_sz];
8302 	int			bserr1, bserr2, bserr3, bserr4;
8303 	struct spdk_bs_dev	*dev;
8304 
8305 	/* Create and open an esnap clone blob */
8306 	ut_spdk_blob_opts_init(&opts);
8307 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8308 	opts.esnap_id = &esnap_opts;
8309 	opts.esnap_id_len = sizeof(esnap_opts);
8310 	opts.num_clusters = esnap_num_clusters;
8311 	eclone1 = ut_blob_create_and_open(bs, &opts);
8312 	CU_ASSERT(eclone1 != NULL);
8313 	CU_ASSERT(spdk_blob_is_esnap_clone(eclone1));
8314 	eclone1_id = eclone1->id;
8315 
8316 	/* Create and open a snapshot of eclone1 */
8317 	spdk_bs_create_snapshot(bs, eclone1_id, NULL, blob_op_with_id_complete, NULL);
8318 	poll_threads();
8319 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8320 	CU_ASSERT(g_bserrno == 0);
8321 	snap1_id = g_blobid;
8322 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8323 	poll_threads();
8324 	CU_ASSERT(g_bserrno == 0);
8325 	CU_ASSERT(g_blob != NULL);
8326 	snap1 = g_blob;
8327 
8328 	/* Create and open regular clone of snap1 */
8329 	spdk_bs_create_clone(bs, snap1_id, NULL, blob_op_with_id_complete, NULL);
8330 	poll_threads();
8331 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8332 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
8333 	clone1_id = g_blobid;
8334 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8335 	poll_threads();
8336 	CU_ASSERT(g_bserrno == 0);
8337 	CU_ASSERT(g_blob != NULL);
8338 	clone1 = g_blob;
8339 
8340 	/* Close the blobs in preparation for reloading the blobstore */
8341 	spdk_blob_close(clone1, blob_op_complete, NULL);
8342 	poll_threads();
8343 	CU_ASSERT(g_bserrno == 0);
8344 	spdk_blob_close(snap1, blob_op_complete, NULL);
8345 	poll_threads();
8346 	CU_ASSERT(g_bserrno == 0);
8347 	spdk_blob_close(eclone1, blob_op_complete, NULL);
8348 	poll_threads();
8349 	CU_ASSERT(g_bserrno == 0);
8350 	g_blob = NULL;
8351 
8352 	/* Reload the blobstore */
8353 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8354 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8355 	ut_bs_reload(&bs, &bs_opts);
8356 
8357 	/* Be sure each of the blobs can be opened */
8358 	spdk_bs_open_blob(bs, eclone1_id, blob_op_with_handle_complete, NULL);
8359 	poll_threads();
8360 	CU_ASSERT(g_bserrno == 0);
8361 	CU_ASSERT(g_blob != NULL);
8362 	eclone1 = g_blob;
8363 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8364 	poll_threads();
8365 	CU_ASSERT(g_bserrno == 0);
8366 	CU_ASSERT(g_blob != NULL);
8367 	snap1 = g_blob;
8368 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8369 	poll_threads();
8370 	CU_ASSERT(g_bserrno == 0);
8371 	CU_ASSERT(g_blob != NULL);
8372 	clone1 = g_blob;
8373 
8374 	/* Perform some reads on each of them to cause channels to be allocated */
8375 	bs_ch = spdk_bs_alloc_io_channel(bs);
8376 	CU_ASSERT(bs_ch != NULL);
8377 	spdk_blob_io_read(eclone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8378 	poll_threads();
8379 	CU_ASSERT(g_bserrno == 0);
8380 	spdk_blob_io_read(snap1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8381 	poll_threads();
8382 	CU_ASSERT(g_bserrno == 0);
8383 	spdk_blob_io_read(clone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8384 	poll_threads();
8385 	CU_ASSERT(g_bserrno == 0);
8386 
8387 	/*
8388 	 * Unload the blobstore in a way similar to how lvstore unloads it.  This should exercise
8389 	 * the deferred unload path in spdk_bs_unload().
8390 	 */
8391 	bserr1 = 0xbad;
8392 	bserr2 = 0xbad;
8393 	bserr3 = 0xbad;
8394 	bserr4 = 0xbad;
8395 	spdk_blob_close(eclone1, blob_op_complete, &bserr1);
8396 	spdk_blob_close(snap1, blob_op_complete, &bserr2);
8397 	spdk_blob_close(clone1, blob_op_complete, &bserr3);
8398 	spdk_bs_unload(bs, blob_op_complete, &bserr4);
8399 	spdk_bs_free_io_channel(bs_ch);
8400 	poll_threads();
8401 	CU_ASSERT(bserr1 == 0);
8402 	CU_ASSERT(bserr2 == 0);
8403 	CU_ASSERT(bserr3 == 0);
8404 	CU_ASSERT(bserr4 == 0);
8405 	g_blob = NULL;
8406 
8407 	/* Reload the blobstore */
8408 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8409 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8410 	dev = init_dev();
8411 	spdk_bs_load(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8412 	poll_threads();
8413 	CU_ASSERT(g_bserrno == 0);
8414 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8415 }
8416 
8417 static bool
8418 blob_esnap_verify_contents(struct spdk_blob *blob, struct spdk_io_channel *ch,
8419 			   uint64_t offset, uint64_t size, uint32_t readsize, const char *how)
8420 {
8421 	const uint32_t	bs_blksz = blob->bs->io_unit_size;
8422 	const uint32_t	esnap_blksz = blob->back_bs_dev ? blob->back_bs_dev->blocklen : bs_blksz;
8423 	const uint32_t	start_blk = offset / bs_blksz;
8424 	const uint32_t	num_blocks = spdk_max(size, readsize) / bs_blksz;
8425 	const uint32_t	blocks_per_read = spdk_min(size, readsize) / bs_blksz;
8426 	uint32_t	blob_block;
8427 	struct iovec	iov;
8428 	uint8_t		buf[spdk_min(size, readsize)];
8429 	bool		block_ok;
8430 
8431 	SPDK_CU_ASSERT_FATAL(offset % bs_blksz == 0);
8432 	SPDK_CU_ASSERT_FATAL(size % bs_blksz == 0);
8433 	SPDK_CU_ASSERT_FATAL(readsize % bs_blksz == 0);
8434 
8435 	memset(buf, 0, readsize);
8436 	iov.iov_base = buf;
8437 	iov.iov_len = readsize;
8438 	for (blob_block = start_blk; blob_block < num_blocks; blob_block += blocks_per_read) {
8439 		if (strcmp(how, "read") == 0) {
8440 			spdk_blob_io_read(blob, ch, buf, blob_block, blocks_per_read,
8441 					  bs_op_complete, NULL);
8442 		} else if (strcmp(how, "readv") == 0) {
8443 			spdk_blob_io_readv(blob, ch, &iov, 1, blob_block, blocks_per_read,
8444 					   bs_op_complete, NULL);
8445 		} else if (strcmp(how, "readv_ext") == 0) {
8446 			/*
8447 			 * This is currently pointless. NULL ext_opts leads to dev->readv(), not
8448 			 * dev->readv_ext().
8449 			 */
8450 			spdk_blob_io_readv_ext(blob, ch, &iov, 1, blob_block, blocks_per_read,
8451 					       bs_op_complete, NULL, NULL);
8452 		} else {
8453 			abort();
8454 		}
8455 		poll_threads();
8456 		CU_ASSERT(g_bserrno == 0);
8457 		if (g_bserrno != 0) {
8458 			return false;
8459 		}
8460 		block_ok = ut_esnap_content_is_correct(buf, blocks_per_read * bs_blksz, blob->id,
8461 						       blob_block * bs_blksz, esnap_blksz);
8462 		CU_ASSERT(block_ok);
8463 		if (!block_ok) {
8464 			return false;
8465 		}
8466 	}
8467 
8468 	return true;
8469 }
8470 
8471 static void
8472 blob_esnap_io_size(uint32_t bs_blksz, uint32_t esnap_blksz)
8473 {
8474 	struct spdk_bs_dev	*dev;
8475 	struct spdk_blob_store	*bs;
8476 	struct spdk_bs_opts	bsopts;
8477 	struct spdk_blob_opts	opts;
8478 	struct ut_esnap_opts	esnap_opts;
8479 	struct spdk_blob	*blob;
8480 	const uint32_t		cluster_sz = 4 * g_phys_blocklen;
8481 	const uint64_t		esnap_num_clusters = 4;
8482 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8483 	const uint64_t		esnap_num_blocks = esnap_sz / esnap_blksz;
8484 	const uint64_t		blob_num_blocks = esnap_sz / bs_blksz;
8485 	uint32_t		block;
8486 	struct spdk_io_channel	*bs_ch;
8487 
8488 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
8489 	bsopts.cluster_sz = cluster_sz;
8490 	bsopts.esnap_bs_dev_create = ut_esnap_create;
8491 
8492 	/* Create device with desired block size */
8493 	dev = init_dev();
8494 	dev->blocklen = bs_blksz;
8495 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8496 
8497 	/* Initialize a new blob store */
8498 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
8499 	poll_threads();
8500 	CU_ASSERT(g_bserrno == 0);
8501 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8502 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8503 	bs = g_bs;
8504 
8505 	bs_ch = spdk_bs_alloc_io_channel(bs);
8506 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
8507 
8508 	/* Create and open the esnap clone  */
8509 	ut_spdk_blob_opts_init(&opts);
8510 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8511 	opts.esnap_id = &esnap_opts;
8512 	opts.esnap_id_len = sizeof(esnap_opts);
8513 	opts.num_clusters = esnap_num_clusters;
8514 	blob = ut_blob_create_and_open(bs, &opts);
8515 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8516 
8517 	/* Verify that large reads return the content of the esnap device */
8518 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "read"));
8519 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv"));
8520 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv_ext"));
8521 	/* Verify that small reads return the content of the esnap device */
8522 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "read"));
8523 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv"));
8524 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv_ext"));
8525 
8526 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
8527 	for (block = 0; block < blob_num_blocks; block++) {
8528 		char		buf[bs_blksz];
8529 		union ut_word	word;
8530 
8531 		word.f.blob_id = 0xfedcba90;
8532 		word.f.lba = block;
8533 		ut_memset8(buf, word.num, bs_blksz);
8534 
8535 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8536 		poll_threads();
8537 		CU_ASSERT(g_bserrno == 0);
8538 		if (g_bserrno != 0) {
8539 			break;
8540 		}
8541 
8542 		/* Read and verify the block before the current block */
8543 		if (block != 0) {
8544 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
8545 			poll_threads();
8546 			CU_ASSERT(g_bserrno == 0);
8547 			if (g_bserrno != 0) {
8548 				break;
8549 			}
8550 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8551 							      (block - 1) * bs_blksz, bs_blksz));
8552 		}
8553 
8554 		/* Read and verify the current block */
8555 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8556 		poll_threads();
8557 		CU_ASSERT(g_bserrno == 0);
8558 		if (g_bserrno != 0) {
8559 			break;
8560 		}
8561 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8562 						      block * bs_blksz, bs_blksz));
8563 
8564 		/* Check the block that follows */
8565 		if (block + 1 < blob_num_blocks) {
8566 			g_bserrno = 0xbad;
8567 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
8568 			poll_threads();
8569 			CU_ASSERT(g_bserrno == 0);
8570 			if (g_bserrno != 0) {
8571 				break;
8572 			}
8573 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
8574 							      (block + 1) * bs_blksz,
8575 							      esnap_blksz));
8576 		}
8577 	}
8578 
8579 	/* Clean up */
8580 	spdk_bs_free_io_channel(bs_ch);
8581 	g_bserrno = 0xbad;
8582 	spdk_blob_close(blob, blob_op_complete, NULL);
8583 	poll_threads();
8584 	CU_ASSERT(g_bserrno == 0);
8585 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
8586 	poll_threads();
8587 	CU_ASSERT(g_bserrno == 0);
8588 	g_bs = NULL;
8589 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8590 }
8591 
8592 static void
8593 blob_esnap_io_4096_4096(void)
8594 {
8595 	blob_esnap_io_size(4096, 4096);
8596 }
8597 
8598 static void
8599 blob_esnap_io_512_512(void)
8600 {
8601 	blob_esnap_io_size(512, 512);
8602 }
8603 
8604 static void
8605 blob_esnap_io_4096_512(void)
8606 {
8607 	blob_esnap_io_size(4096, 512);
8608 }
8609 
8610 static void
8611 blob_esnap_io_512_4096(void)
8612 {
8613 	struct spdk_bs_dev	*dev;
8614 	struct spdk_blob_store	*bs;
8615 	struct spdk_bs_opts	bs_opts;
8616 	struct spdk_blob_opts	blob_opts;
8617 	struct ut_esnap_opts	esnap_opts;
8618 	uint64_t		cluster_sz = 4 * g_phys_blocklen;
8619 	uint32_t		bs_blksz = 512;
8620 	uint32_t		esnap_blksz = BLOCKLEN;
8621 	uint64_t		esnap_num_blocks = 64;
8622 	spdk_blob_id		blobid;
8623 
8624 	/* Create device with desired block size */
8625 	dev = init_dev();
8626 	dev->blocklen = bs_blksz;
8627 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8628 
8629 	/* Initialize a new blob store */
8630 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8631 	bs_opts.cluster_sz = cluster_sz;
8632 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8633 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8634 	poll_threads();
8635 	CU_ASSERT(g_bserrno == 0);
8636 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8637 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8638 	bs = g_bs;
8639 
8640 	/* Try to create and open the esnap clone. Create should succeed, open should fail. */
8641 	ut_spdk_blob_opts_init(&blob_opts);
8642 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8643 	blob_opts.esnap_id = &esnap_opts;
8644 	blob_opts.esnap_id_len = sizeof(esnap_opts);
8645 	blob_opts.num_clusters = esnap_num_blocks * esnap_blksz / bs_blksz;
8646 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
8647 	poll_threads();
8648 	CU_ASSERT(g_bserrno == 0);
8649 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8650 	blobid = g_blobid;
8651 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8652 	poll_threads();
8653 	CU_ASSERT(g_bserrno == -EINVAL);
8654 	CU_ASSERT(g_blob == NULL);
8655 
8656 	/* Clean up */
8657 	spdk_bs_unload(bs, bs_op_complete, NULL);
8658 	poll_threads();
8659 	CU_ASSERT(g_bserrno == 0);
8660 	g_bs = NULL;
8661 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8662 }
8663 
8664 static void
8665 blob_esnap_thread_add_remove(void)
8666 {
8667 	struct spdk_blob_store	*bs = g_bs;
8668 	struct spdk_blob_opts	opts;
8669 	struct ut_esnap_opts	ut_esnap_opts;
8670 	struct spdk_blob	*blob;
8671 	struct ut_esnap_dev	*ut_dev;
8672 	spdk_blob_id		blobid;
8673 	uint64_t		start_thread = g_ut_thread_id;
8674 	bool			destroyed = false;
8675 	struct spdk_io_channel	*ch0, *ch1;
8676 	struct ut_esnap_channel	*ut_ch0, *ut_ch1;
8677 	const uint32_t		blocklen = bs->io_unit_size;
8678 	char			buf[blocklen * 4];
8679 
8680 	SPDK_CU_ASSERT_FATAL(g_ut_num_threads > 1);
8681 	set_thread(0);
8682 
8683 	/* Create the esnap clone */
8684 	ut_esnap_opts_init(blocklen, 2048, "add_remove_1", &destroyed, &ut_esnap_opts);
8685 	ut_spdk_blob_opts_init(&opts);
8686 	opts.esnap_id = &ut_esnap_opts;
8687 	opts.esnap_id_len = sizeof(ut_esnap_opts);
8688 	opts.num_clusters = 10;
8689 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8690 	poll_threads();
8691 	CU_ASSERT(g_bserrno == 0);
8692 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8693 	blobid = g_blobid;
8694 
8695 	/* Open the blob. No channels should be allocated yet. */
8696 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8697 	poll_threads();
8698 	CU_ASSERT(g_bserrno == 0);
8699 	CU_ASSERT(g_blob != NULL);
8700 	blob = g_blob;
8701 	ut_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8702 	CU_ASSERT(ut_dev != NULL);
8703 	CU_ASSERT(ut_dev->num_channels == 0);
8704 
8705 	/* Create a channel on thread 0. It is lazily created on the first read. */
8706 	ch0 = spdk_bs_alloc_io_channel(bs);
8707 	CU_ASSERT(ch0 != NULL);
8708 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8709 	CU_ASSERT(ut_ch0 == NULL);
8710 	CU_ASSERT(ut_dev->num_channels == 0);
8711 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
8712 	poll_threads();
8713 	CU_ASSERT(g_bserrno == 0);
8714 	CU_ASSERT(ut_dev->num_channels == 1);
8715 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8716 	CU_ASSERT(ut_ch0 != NULL);
8717 	CU_ASSERT(ut_ch0->blocks_read == 1);
8718 
8719 	/* Create a channel on thread 1 and verify its lazy creation too. */
8720 	set_thread(1);
8721 	ch1 = spdk_bs_alloc_io_channel(bs);
8722 	CU_ASSERT(ch1 != NULL);
8723 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8724 	CU_ASSERT(ut_ch1 == NULL);
8725 	CU_ASSERT(ut_dev->num_channels == 1);
8726 	spdk_blob_io_read(blob, ch1, buf, 0, 4, bs_op_complete, NULL);
8727 	poll_threads();
8728 	CU_ASSERT(g_bserrno == 0);
8729 	CU_ASSERT(ut_dev->num_channels == 2);
8730 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8731 	CU_ASSERT(ut_ch1 != NULL);
8732 	CU_ASSERT(ut_ch1->blocks_read == 4);
8733 
8734 	/* Close the channel on thread 0 and verify the bs_dev channel is also gone. */
8735 	set_thread(0);
8736 	spdk_bs_free_io_channel(ch0);
8737 	poll_threads();
8738 	CU_ASSERT(ut_dev->num_channels == 1);
8739 
8740 	/* Close the blob. There is no outstanding IO so it should close right away. */
8741 	g_bserrno = 0xbad;
8742 	spdk_blob_close(blob, blob_op_complete, NULL);
8743 	poll_threads();
8744 	CU_ASSERT(g_bserrno == 0);
8745 	CU_ASSERT(destroyed);
8746 
8747 	/* The esnap channel for the blob should be gone now too. */
8748 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8749 	CU_ASSERT(ut_ch1 == NULL);
8750 
8751 	/* Clean up */
8752 	set_thread(1);
8753 	spdk_bs_free_io_channel(ch1);
8754 	set_thread(start_thread);
8755 }
8756 
8757 static void
8758 freeze_done(void *cb_arg, int bserrno)
8759 {
8760 	uint32_t *freeze_cnt = cb_arg;
8761 
8762 	CU_ASSERT(bserrno == 0);
8763 	(*freeze_cnt)++;
8764 }
8765 
8766 static void
8767 unfreeze_done(void *cb_arg, int bserrno)
8768 {
8769 	uint32_t *unfreeze_cnt = cb_arg;
8770 
8771 	CU_ASSERT(bserrno == 0);
8772 	(*unfreeze_cnt)++;
8773 }
8774 
8775 static void
8776 blob_nested_freezes(void)
8777 {
8778 	struct spdk_blob_store *bs = g_bs;
8779 	struct spdk_blob *blob;
8780 	struct spdk_io_channel *channel[2];
8781 	struct spdk_blob_opts opts;
8782 	uint32_t freeze_cnt, unfreeze_cnt;
8783 	int i;
8784 
8785 	for (i = 0; i < 2; i++) {
8786 		set_thread(i);
8787 		channel[i] = spdk_bs_alloc_io_channel(bs);
8788 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
8789 	}
8790 
8791 	set_thread(0);
8792 
8793 	ut_spdk_blob_opts_init(&opts);
8794 	blob = ut_blob_create_and_open(bs, &opts);
8795 
8796 	/* First just test a single freeze/unfreeze. */
8797 	freeze_cnt = 0;
8798 	unfreeze_cnt = 0;
8799 	CU_ASSERT(blob->frozen_refcnt == 0);
8800 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8801 	CU_ASSERT(blob->frozen_refcnt == 1);
8802 	CU_ASSERT(freeze_cnt == 0);
8803 	poll_threads();
8804 	CU_ASSERT(freeze_cnt == 1);
8805 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8806 	CU_ASSERT(blob->frozen_refcnt == 0);
8807 	CU_ASSERT(unfreeze_cnt == 0);
8808 	poll_threads();
8809 	CU_ASSERT(unfreeze_cnt == 1);
8810 
8811 	/* Now nest multiple freeze/unfreeze operations.  We should
8812 	 * expect a callback for each operation, but only after
8813 	 * the threads have been polled to ensure a for_each_channel()
8814 	 * was executed.
8815 	 */
8816 	freeze_cnt = 0;
8817 	unfreeze_cnt = 0;
8818 	CU_ASSERT(blob->frozen_refcnt == 0);
8819 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8820 	CU_ASSERT(blob->frozen_refcnt == 1);
8821 	CU_ASSERT(freeze_cnt == 0);
8822 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8823 	CU_ASSERT(blob->frozen_refcnt == 2);
8824 	CU_ASSERT(freeze_cnt == 0);
8825 	poll_threads();
8826 	CU_ASSERT(freeze_cnt == 2);
8827 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8828 	CU_ASSERT(blob->frozen_refcnt == 1);
8829 	CU_ASSERT(unfreeze_cnt == 0);
8830 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8831 	CU_ASSERT(blob->frozen_refcnt == 0);
8832 	CU_ASSERT(unfreeze_cnt == 0);
8833 	poll_threads();
8834 	CU_ASSERT(unfreeze_cnt == 2);
8835 
8836 	for (i = 0; i < 2; i++) {
8837 		set_thread(i);
8838 		spdk_bs_free_io_channel(channel[i]);
8839 	}
8840 	set_thread(0);
8841 	ut_blob_close_and_delete(bs, blob);
8842 
8843 	poll_threads();
8844 	g_blob = NULL;
8845 	g_blobid = 0;
8846 }
8847 
8848 static void
8849 blob_ext_md_pages(void)
8850 {
8851 	struct spdk_blob_store *bs;
8852 	struct spdk_bs_dev *dev;
8853 	struct spdk_blob *blob;
8854 	struct spdk_blob_opts opts;
8855 	struct spdk_bs_opts bs_opts;
8856 	uint64_t free_clusters;
8857 
8858 	dev = init_dev();
8859 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8860 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
8861 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
8862 	 * It requires num_md_pages that is much smaller than the number of clusters.
8863 	 * Make sure we can create a blob that uses all of the free clusters.
8864 	 */
8865 	bs_opts.cluster_sz = 65536;
8866 	bs_opts.num_md_pages = 16;
8867 
8868 	/* Initialize a new blob store */
8869 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8870 	poll_threads();
8871 	CU_ASSERT(g_bserrno == 0);
8872 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8873 	bs = g_bs;
8874 
8875 	free_clusters = spdk_bs_free_cluster_count(bs);
8876 
8877 	ut_spdk_blob_opts_init(&opts);
8878 	opts.num_clusters = free_clusters;
8879 
8880 	blob = ut_blob_create_and_open(bs, &opts);
8881 	spdk_blob_close(blob, blob_op_complete, NULL);
8882 	CU_ASSERT(g_bserrno == 0);
8883 
8884 	spdk_bs_unload(bs, bs_op_complete, NULL);
8885 	poll_threads();
8886 	CU_ASSERT(g_bserrno == 0);
8887 	g_bs = NULL;
8888 }
8889 
8890 static void
8891 blob_esnap_clone_snapshot(void)
8892 {
8893 	/*
8894 	 * When a snapshot is created, the blob that is being snapped becomes
8895 	 * the leaf node (a clone of the snapshot) and the newly created
8896 	 * snapshot sits between the snapped blob and the external snapshot.
8897 	 *
8898 	 * Before creating snap1
8899 	 *
8900 	 *   ,--------.     ,----------.
8901 	 *   |  blob  |     |  vbdev   |
8902 	 *   | blob1  |<----| nvme1n42 |
8903 	 *   |  (rw)  |     |   (ro)   |
8904 	 *   `--------'     `----------'
8905 	 *       Figure 1
8906 	 *
8907 	 * After creating snap1
8908 	 *
8909 	 *   ,--------.     ,--------.     ,----------.
8910 	 *   |  blob  |     |  blob  |     |  vbdev   |
8911 	 *   | blob1  |<----| snap1  |<----| nvme1n42 |
8912 	 *   |  (rw)  |     |  (ro)  |     |   (ro)   |
8913 	 *   `--------'     `--------'     `----------'
8914 	 *       Figure 2
8915 	 *
8916 	 * Starting from Figure 2, if snap1 is removed, the chain reverts to
8917 	 * what it looks like in Figure 1.
8918 	 *
8919 	 * Starting from Figure 2, if blob1 is removed, the chain becomes:
8920 	 *
8921 	 *   ,--------.     ,----------.
8922 	 *   |  blob  |     |  vbdev   |
8923 	 *   | snap1  |<----| nvme1n42 |
8924 	 *   |  (ro)  |     |   (ro)   |
8925 	 *   `--------'     `----------'
8926 	 *       Figure 3
8927 	 *
8928 	 * In each case, the blob pointed to by the nvme vbdev is considered
8929 	 * the "esnap clone".  The esnap clone must have:
8930 	 *
8931 	 *   - XATTR_INTERNAL for BLOB_EXTERNAL_SNAPSHOT_ID (e.g. name or UUID)
8932 	 *   - blob->invalid_flags must contain SPDK_BLOB_EXTERNAL_SNAPSHOT
8933 	 *   - blob->parent_id must be SPDK_BLOBID_EXTERNAL_SNAPSHOT.
8934 	 *
8935 	 * No other blob that descends from the esnap clone may have any of
8936 	 * those set.
8937 	 */
8938 	struct spdk_blob_store	*bs = g_bs;
8939 	const uint32_t		blocklen = bs->io_unit_size;
8940 	struct spdk_blob_opts	opts;
8941 	struct ut_esnap_opts	esnap_opts;
8942 	struct spdk_blob	*blob, *snap_blob;
8943 	spdk_blob_id		blobid, snap_blobid;
8944 	bool			destroyed = false;
8945 
8946 	/* Create the esnap clone */
8947 	ut_esnap_opts_init(blocklen, 2048, __func__, &destroyed, &esnap_opts);
8948 	ut_spdk_blob_opts_init(&opts);
8949 	opts.esnap_id = &esnap_opts;
8950 	opts.esnap_id_len = sizeof(esnap_opts);
8951 	opts.num_clusters = 10;
8952 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8953 	poll_threads();
8954 	CU_ASSERT(g_bserrno == 0);
8955 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8956 	blobid = g_blobid;
8957 
8958 	/* Open the blob. */
8959 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8960 	poll_threads();
8961 	CU_ASSERT(g_bserrno == 0);
8962 	CU_ASSERT(g_blob != NULL);
8963 	blob = g_blob;
8964 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8965 
8966 	/*
8967 	 * Create a snapshot of the blob. The snapshot becomes the esnap clone.
8968 	 */
8969 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8970 	poll_threads();
8971 	CU_ASSERT(g_bserrno == 0);
8972 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8973 	snap_blobid = g_blobid;
8974 
8975 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8976 	poll_threads();
8977 	CU_ASSERT(g_bserrno == 0);
8978 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8979 	snap_blob = g_blob;
8980 
8981 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8982 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8983 
8984 	/*
8985 	 * Delete the snapshot.  The original blob becomes the esnap clone.
8986 	 */
8987 	ut_blob_close_and_delete(bs, snap_blob);
8988 	snap_blob = NULL;
8989 	snap_blobid = SPDK_BLOBID_INVALID;
8990 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8991 
8992 	/*
8993 	 * Create the snapshot again, then delete the original blob.  The
8994 	 * snapshot should survive as the esnap clone.
8995 	 */
8996 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8997 	poll_threads();
8998 	CU_ASSERT(g_bserrno == 0);
8999 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9000 	snap_blobid = g_blobid;
9001 
9002 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
9003 	poll_threads();
9004 	CU_ASSERT(g_bserrno == 0);
9005 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9006 	snap_blob = g_blob;
9007 
9008 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
9009 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
9010 
9011 	ut_blob_close_and_delete(bs, blob);
9012 	blob = NULL;
9013 	blobid = SPDK_BLOBID_INVALID;
9014 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
9015 
9016 	/*
9017 	 * Clone the snapshot.  The snapshot continues to be the esnap clone.
9018 	 */
9019 	spdk_bs_create_clone(bs, snap_blobid, NULL, blob_op_with_id_complete, NULL);
9020 	poll_threads();
9021 	CU_ASSERT(g_bserrno == 0);
9022 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9023 	blobid = g_blobid;
9024 
9025 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9026 	poll_threads();
9027 	CU_ASSERT(g_bserrno == 0);
9028 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9029 	blob = g_blob;
9030 
9031 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
9032 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
9033 
9034 	/*
9035 	 * Delete the snapshot. The clone becomes the esnap clone.
9036 	 */
9037 	ut_blob_close_and_delete(bs, snap_blob);
9038 	snap_blob = NULL;
9039 	snap_blobid = SPDK_BLOBID_INVALID;
9040 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
9041 
9042 	/*
9043 	 * Clean up
9044 	 */
9045 	ut_blob_close_and_delete(bs, blob);
9046 }
9047 
9048 static uint64_t
9049 _blob_esnap_clone_hydrate(bool inflate)
9050 {
9051 	struct spdk_blob_store	*bs = g_bs;
9052 	struct spdk_blob_opts	opts;
9053 	struct ut_esnap_opts	esnap_opts;
9054 	struct spdk_blob	*blob;
9055 	spdk_blob_id		blobid;
9056 	struct spdk_io_channel *channel;
9057 	bool			destroyed = false;
9058 	const uint32_t		blocklen = spdk_bs_get_io_unit_size(bs);
9059 	const uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
9060 	const uint64_t		esnap_num_clusters = 4;
9061 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
9062 	const uint64_t		esnap_num_blocks = esnap_sz / blocklen;
9063 	uint64_t		num_failures = CU_get_number_of_failures();
9064 
9065 	channel = spdk_bs_alloc_io_channel(bs);
9066 	SPDK_CU_ASSERT_FATAL(channel != NULL);
9067 
9068 	/* Create the esnap clone */
9069 	ut_spdk_blob_opts_init(&opts);
9070 	ut_esnap_opts_init(blocklen, esnap_num_blocks, __func__, &destroyed, &esnap_opts);
9071 	opts.esnap_id = &esnap_opts;
9072 	opts.esnap_id_len = sizeof(esnap_opts);
9073 	opts.num_clusters = esnap_num_clusters;
9074 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
9075 	poll_threads();
9076 	CU_ASSERT(g_bserrno == 0);
9077 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9078 	blobid = g_blobid;
9079 
9080 	/* Open the esnap clone */
9081 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9082 	poll_threads();
9083 	CU_ASSERT(g_bserrno == 0);
9084 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9085 	blob = g_blob;
9086 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
9087 
9088 	/*
9089 	 * Inflate or decouple  the blob then verify that it is no longer an esnap clone and has
9090 	 * right content
9091 	 */
9092 	if (inflate) {
9093 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
9094 	} else {
9095 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
9096 	}
9097 	poll_threads();
9098 	CU_ASSERT(g_bserrno == 0);
9099 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
9100 	CU_ASSERT(blob_esnap_verify_contents(blob, channel, 0, esnap_sz, esnap_sz, "read"));
9101 	ut_blob_close_and_delete(bs, blob);
9102 
9103 	/*
9104 	 * Clean up
9105 	 */
9106 	spdk_bs_free_io_channel(channel);
9107 	poll_threads();
9108 
9109 	/* Return number of new failures */
9110 	return CU_get_number_of_failures() - num_failures;
9111 }
9112 
9113 static void
9114 blob_esnap_clone_inflate(void)
9115 {
9116 	_blob_esnap_clone_hydrate(true);
9117 }
9118 
9119 static void
9120 blob_esnap_clone_decouple(void)
9121 {
9122 	_blob_esnap_clone_hydrate(false);
9123 }
9124 
9125 static void
9126 blob_esnap_hotplug(void)
9127 {
9128 	struct spdk_blob_store	*bs = g_bs;
9129 	struct ut_esnap_opts	esnap1_opts, esnap2_opts;
9130 	struct spdk_blob_opts	opts;
9131 	struct spdk_blob	*blob;
9132 	struct spdk_bs_dev	*bs_dev;
9133 	struct ut_esnap_dev	*esnap_dev;
9134 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
9135 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
9136 	const uint32_t		esnap_num_clusters = 4;
9137 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9138 	bool			destroyed1 = false, destroyed2 = false;
9139 	uint64_t		start_thread = g_ut_thread_id;
9140 	struct spdk_io_channel	*ch0, *ch1;
9141 	char			buf[block_sz];
9142 
9143 	/* Create and open an esnap clone blob */
9144 	ut_spdk_blob_opts_init(&opts);
9145 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1", &destroyed1, &esnap1_opts);
9146 	opts.esnap_id = &esnap1_opts;
9147 	opts.esnap_id_len = sizeof(esnap1_opts);
9148 	opts.num_clusters = esnap_num_clusters;
9149 	blob = ut_blob_create_and_open(bs, &opts);
9150 	CU_ASSERT(blob != NULL);
9151 	CU_ASSERT(spdk_blob_is_esnap_clone(blob));
9152 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9153 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9154 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1") == 0);
9155 
9156 	/* Replace the external snapshot */
9157 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap2", &destroyed2, &esnap2_opts);
9158 	bs_dev = ut_esnap_dev_alloc(&esnap2_opts);
9159 	CU_ASSERT(!destroyed1);
9160 	CU_ASSERT(!destroyed2);
9161 	g_bserrno = 0xbad;
9162 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9163 	poll_threads();
9164 	CU_ASSERT(g_bserrno == 0);
9165 	CU_ASSERT(destroyed1);
9166 	CU_ASSERT(!destroyed2);
9167 	SPDK_CU_ASSERT_FATAL(bs_dev == blob->back_bs_dev);
9168 	SPDK_CU_ASSERT_FATAL(bs_dev == spdk_blob_get_esnap_bs_dev(blob));
9169 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9170 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap2") == 0);
9171 
9172 	/* Create a couple channels */
9173 	set_thread(0);
9174 	ch0 = spdk_bs_alloc_io_channel(bs);
9175 	CU_ASSERT(ch0 != NULL);
9176 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
9177 	set_thread(1);
9178 	ch1 = spdk_bs_alloc_io_channel(bs);
9179 	CU_ASSERT(ch1 != NULL);
9180 	spdk_blob_io_read(blob, ch1, buf, 0, 1, bs_op_complete, NULL);
9181 	set_thread(start_thread);
9182 	poll_threads();
9183 	CU_ASSERT(esnap_dev->num_channels == 2);
9184 
9185 	/* Replace the external snapshot */
9186 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1a", &destroyed1, &esnap1_opts);
9187 	bs_dev = ut_esnap_dev_alloc(&esnap1_opts);
9188 	destroyed1 = destroyed2 = false;
9189 	g_bserrno = 0xbad;
9190 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9191 	poll_threads();
9192 	CU_ASSERT(g_bserrno == 0);
9193 	CU_ASSERT(!destroyed1);
9194 	CU_ASSERT(destroyed2);
9195 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9196 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9197 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1a") == 0);
9198 
9199 	/* Clean up */
9200 	set_thread(0);
9201 	spdk_bs_free_io_channel(ch0);
9202 	set_thread(1);
9203 	spdk_bs_free_io_channel(ch1);
9204 	set_thread(start_thread);
9205 	g_bserrno = 0xbad;
9206 	spdk_blob_close(blob, bs_op_complete, NULL);
9207 	poll_threads();
9208 	CU_ASSERT(g_bserrno == 0);
9209 }
9210 
9211 static bool g_blob_is_degraded;
9212 static int g_blob_is_degraded_called;
9213 
9214 static bool
9215 _blob_is_degraded(struct spdk_bs_dev *dev)
9216 {
9217 	g_blob_is_degraded_called++;
9218 	return g_blob_is_degraded;
9219 }
9220 
9221 static void
9222 blob_is_degraded(void)
9223 {
9224 	struct spdk_bs_dev bs_is_degraded_null = { 0 };
9225 	struct spdk_bs_dev bs_is_degraded = { .is_degraded = _blob_is_degraded };
9226 
9227 	/* No back_bs_dev, no bs->dev->is_degraded */
9228 	g_blob_is_degraded_called = 0;
9229 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9230 	CU_ASSERT(g_blob_is_degraded_called == 0);
9231 
9232 	/* No back_bs_dev, blobstore device degraded */
9233 	g_bs->dev->is_degraded = _blob_is_degraded;
9234 	g_blob_is_degraded_called = 0;
9235 	g_blob_is_degraded = true;
9236 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9237 	CU_ASSERT(g_blob_is_degraded_called == 1);
9238 
9239 	/* No back_bs_dev, blobstore device not degraded */
9240 	g_bs->dev->is_degraded = _blob_is_degraded;
9241 	g_blob_is_degraded_called = 0;
9242 	g_blob_is_degraded = false;
9243 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9244 	CU_ASSERT(g_blob_is_degraded_called == 1);
9245 
9246 	/* back_bs_dev does not define is_degraded, no bs->dev->is_degraded */
9247 	g_bs->dev->is_degraded = NULL;
9248 	g_blob->back_bs_dev = &bs_is_degraded_null;
9249 	g_blob_is_degraded_called = 0;
9250 	g_blob_is_degraded = false;
9251 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9252 	CU_ASSERT(g_blob_is_degraded_called == 0);
9253 
9254 	/* back_bs_dev is not degraded, no bs->dev->is_degraded */
9255 	g_bs->dev->is_degraded = NULL;
9256 	g_blob->back_bs_dev = &bs_is_degraded;
9257 	g_blob_is_degraded_called = 0;
9258 	g_blob_is_degraded = false;
9259 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9260 	CU_ASSERT(g_blob_is_degraded_called == 1);
9261 
9262 	/* back_bs_dev is degraded, no bs->dev->is_degraded */
9263 	g_bs->dev->is_degraded = NULL;
9264 	g_blob->back_bs_dev = &bs_is_degraded;
9265 	g_blob_is_degraded_called = 0;
9266 	g_blob_is_degraded = true;
9267 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9268 	CU_ASSERT(g_blob_is_degraded_called == 1);
9269 
9270 	/* back_bs_dev is not degraded, blobstore device is not degraded */
9271 	g_bs->dev->is_degraded = _blob_is_degraded;
9272 	g_blob->back_bs_dev = &bs_is_degraded;
9273 	g_blob_is_degraded_called = 0;
9274 	g_blob_is_degraded = false;
9275 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9276 	CU_ASSERT(g_blob_is_degraded_called == 2);
9277 
9278 	g_blob->back_bs_dev = NULL;
9279 }
9280 
9281 /* Resize a blob which is a clone created from snapshot. Verify read/writes to
9282  * expanded clone blob. Then inflate the clone blob. */
9283 static void
9284 blob_clone_resize(void)
9285 {
9286 	struct spdk_blob_store *bs = g_bs;
9287 	struct spdk_blob_opts opts;
9288 	struct spdk_blob *blob, *clone, *snap_blob, *snap_blob_rsz;
9289 	spdk_blob_id blobid, cloneid, snapid1, snapid2;
9290 	uint64_t pages_per_cluster;
9291 	uint8_t payload_read[bs->dev->blocklen];
9292 	uint8_t payload_write[bs->dev->blocklen];
9293 	struct spdk_io_channel *channel;
9294 	uint64_t free_clusters;
9295 
9296 	channel = spdk_bs_alloc_io_channel(bs);
9297 	SPDK_CU_ASSERT_FATAL(channel != NULL);
9298 
9299 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
9300 
9301 	/* Create blob with 10 clusters */
9302 	ut_spdk_blob_opts_init(&opts);
9303 	opts.num_clusters = 10;
9304 
9305 	blob = ut_blob_create_and_open(bs, &opts);
9306 	blobid = spdk_blob_get_id(blob);
9307 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
9308 
9309 	/* Create snapshot */
9310 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9311 	poll_threads();
9312 	CU_ASSERT(g_bserrno == 0);
9313 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9314 	snapid1 = g_blobid;
9315 
9316 	spdk_bs_create_clone(bs, snapid1, NULL, blob_op_with_id_complete, NULL);
9317 	poll_threads();
9318 	CU_ASSERT(g_bserrno == 0);
9319 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9320 	cloneid = g_blobid;
9321 
9322 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
9323 	poll_threads();
9324 	CU_ASSERT(g_bserrno == 0);
9325 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9326 	clone = g_blob;
9327 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
9328 
9329 	g_bserrno = -1;
9330 	spdk_blob_resize(clone, 20, blob_op_complete, NULL);
9331 	poll_threads();
9332 	CU_ASSERT(g_bserrno == 0);
9333 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 20);
9334 
9335 	/* Create another snapshot after resizing the clone */
9336 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
9337 	poll_threads();
9338 	CU_ASSERT(g_bserrno == 0);
9339 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9340 	snapid2 = g_blobid;
9341 
9342 	/* Open the snapshot blobs */
9343 	spdk_bs_open_blob(bs, snapid1, blob_op_with_handle_complete, NULL);
9344 	CU_ASSERT(g_bserrno == 0);
9345 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9346 	snap_blob = g_blob;
9347 	CU_ASSERT(snap_blob->data_ro == true);
9348 	CU_ASSERT(snap_blob->md_ro == true);
9349 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob) == 10);
9350 
9351 	spdk_bs_open_blob(bs, snapid2, blob_op_with_handle_complete, NULL);
9352 	CU_ASSERT(g_bserrno == 0);
9353 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9354 	snap_blob_rsz = g_blob;
9355 	CU_ASSERT(snap_blob_rsz->data_ro == true);
9356 	CU_ASSERT(snap_blob_rsz->md_ro == true);
9357 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob_rsz) == 20);
9358 
9359 	/* Confirm that clone is backed by snap_blob_rsz, and snap_blob_rsz is backed by snap_blob */
9360 	SPDK_CU_ASSERT_FATAL(snap_blob->back_bs_dev == NULL);
9361 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9362 	SPDK_CU_ASSERT_FATAL(snap_blob_rsz->back_bs_dev != NULL);
9363 
9364 	/* Write and read from pre-resize ranges */
9365 	g_bserrno = -1;
9366 	memset(payload_write, 0xE5, sizeof(payload_write));
9367 	spdk_blob_io_write(clone, channel, payload_write, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9368 	poll_threads();
9369 	CU_ASSERT(g_bserrno == 0);
9370 
9371 	g_bserrno = -1;
9372 	memset(payload_read, 0x00, sizeof(payload_read));
9373 	spdk_blob_io_read(clone, channel, payload_read, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9374 	poll_threads();
9375 	CU_ASSERT(g_bserrno == 0);
9376 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
9377 
9378 	/* Write and read from post-resize ranges */
9379 	g_bserrno = -1;
9380 	memset(payload_write, 0xE5, sizeof(payload_write));
9381 	spdk_blob_io_write(clone, channel, payload_write, 15 * pages_per_cluster, 1, blob_op_complete,
9382 			   NULL);
9383 	poll_threads();
9384 	CU_ASSERT(g_bserrno == 0);
9385 
9386 	g_bserrno = -1;
9387 	memset(payload_read, 0x00, sizeof(payload_read));
9388 	spdk_blob_io_read(clone, channel, payload_read, 15 * pages_per_cluster, 1, blob_op_complete, NULL);
9389 	poll_threads();
9390 	CU_ASSERT(g_bserrno == 0);
9391 	CU_ASSERT(memcmp(payload_write, payload_read, bs->dev->blocklen) == 0);
9392 
9393 	/* Now do full blob inflation of the resized blob/clone. */
9394 	free_clusters = spdk_bs_free_cluster_count(bs);
9395 	spdk_bs_inflate_blob(bs, channel, cloneid, blob_op_complete, NULL);
9396 	poll_threads();
9397 	CU_ASSERT(g_bserrno == 0);
9398 	/* We wrote to 2 clusters earlier, all remaining 18 clusters in
9399 	 * blob should get allocated after inflation */
9400 	CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 18);
9401 
9402 	spdk_blob_close(clone, blob_op_complete, NULL);
9403 	poll_threads();
9404 	CU_ASSERT(g_bserrno == 0);
9405 
9406 	spdk_blob_close(snap_blob, blob_op_complete, NULL);
9407 	poll_threads();
9408 	CU_ASSERT(g_bserrno == 0);
9409 
9410 	spdk_blob_close(snap_blob_rsz, blob_op_complete, NULL);
9411 	poll_threads();
9412 	CU_ASSERT(g_bserrno == 0);
9413 
9414 	ut_blob_close_and_delete(bs, blob);
9415 
9416 	spdk_bs_free_io_channel(channel);
9417 }
9418 
9419 
9420 static void
9421 blob_esnap_clone_resize(void)
9422 {
9423 	struct spdk_bs_dev *dev;
9424 	struct spdk_blob_store *bs;
9425 	struct spdk_bs_opts bsopts;
9426 	struct spdk_blob_opts opts;
9427 	struct ut_esnap_opts esnap_opts;
9428 	struct spdk_blob *blob;
9429 	uint32_t block, esnap_blksz = 512, bs_blksz = 512;
9430 	const uint32_t cluster_sz = 4 * g_phys_blocklen;
9431 	const uint64_t esnap_num_clusters = 4;
9432 	const uint32_t esnap_sz = cluster_sz * esnap_num_clusters;
9433 	const uint64_t esnap_num_blocks = esnap_sz / esnap_blksz;
9434 	uint64_t blob_num_blocks = esnap_sz / bs_blksz;
9435 	struct spdk_io_channel *bs_ch;
9436 
9437 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
9438 	bsopts.cluster_sz = cluster_sz;
9439 	bsopts.esnap_bs_dev_create = ut_esnap_create;
9440 	/* Create device with desired block size */
9441 	dev = init_dev();
9442 	dev->blocklen = bs_blksz;
9443 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
9444 	/* Initialize a new blob store */
9445 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
9446 	poll_threads();
9447 	CU_ASSERT(g_bserrno == 0);
9448 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9449 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
9450 	bs = g_bs;
9451 
9452 	bs_ch = spdk_bs_alloc_io_channel(bs);
9453 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
9454 
9455 	/* Create and open the esnap clone  */
9456 	ut_spdk_blob_opts_init(&opts);
9457 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9458 	opts.esnap_id = &esnap_opts;
9459 	opts.esnap_id_len = sizeof(esnap_opts);
9460 	opts.num_clusters = esnap_num_clusters;
9461 	blob = ut_blob_create_and_open(bs, &opts);
9462 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9463 
9464 	g_bserrno = -1;
9465 	spdk_blob_resize(blob, esnap_num_clusters * 2, blob_op_complete, NULL);
9466 	poll_threads();
9467 	CU_ASSERT(g_bserrno == 0);
9468 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == esnap_num_clusters * 2);
9469 
9470 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
9471 	blob_num_blocks = (spdk_blob_get_num_clusters(blob) * cluster_sz) / bs_blksz;
9472 	for (block = 0; block < blob_num_blocks; block++) {
9473 		char buf[bs_blksz];
9474 		union ut_word word;
9475 		word.f.blob_id = 0xfedcba90;
9476 		word.f.lba = block;
9477 		ut_memset8(buf, word.num, bs_blksz);
9478 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9479 		poll_threads();
9480 		CU_ASSERT(g_bserrno == 0);
9481 		if (g_bserrno != 0) {
9482 			break;
9483 		}
9484 		/* Read and verify the block before the current block */
9485 		if (block != 0) {
9486 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
9487 			poll_threads();
9488 			CU_ASSERT(g_bserrno == 0);
9489 			if (g_bserrno != 0) {
9490 				break;
9491 			}
9492 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9493 							      (block - 1) * bs_blksz, bs_blksz));
9494 		}
9495 		/* Read and verify the current block */
9496 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9497 		poll_threads();
9498 		CU_ASSERT(g_bserrno == 0);
9499 		if (g_bserrno != 0) {
9500 			break;
9501 		}
9502 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9503 						      block * bs_blksz, bs_blksz));
9504 		/* Check the block that follows */
9505 		if (block + 1 < blob_num_blocks) {
9506 			g_bserrno = 0xbad;
9507 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
9508 			poll_threads();
9509 			CU_ASSERT(g_bserrno == 0);
9510 			if (g_bserrno != 0) {
9511 				break;
9512 			}
9513 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
9514 							      (block + 1) * bs_blksz,
9515 							      esnap_blksz));
9516 		}
9517 	}
9518 	/* Clean up */
9519 	spdk_bs_free_io_channel(bs_ch);
9520 	g_bserrno = 0xbad;
9521 	spdk_blob_close(blob, blob_op_complete, NULL);
9522 	poll_threads();
9523 	CU_ASSERT(g_bserrno == 0);
9524 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
9525 	poll_threads();
9526 	CU_ASSERT(g_bserrno == 0);
9527 	g_bs = NULL;
9528 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9529 }
9530 
9531 static void
9532 bs_dev_io_complete_cb(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
9533 {
9534 	g_bserrno = bserrno;
9535 }
9536 
9537 static void
9538 blob_shallow_copy(void)
9539 {
9540 	struct spdk_blob_store *bs = g_bs;
9541 	struct spdk_blob_opts blob_opts;
9542 	struct spdk_blob *blob;
9543 	spdk_blob_id blobid;
9544 	uint64_t num_clusters = 4;
9545 	struct spdk_bs_dev *ext_dev;
9546 	struct spdk_bs_dev_cb_args ext_args;
9547 	struct spdk_io_channel *bdev_ch, *blob_ch;
9548 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
9549 	uint8_t buf2[DEV_BUFFER_BLOCKLEN];
9550 	uint64_t io_units_per_cluster;
9551 	uint64_t offset;
9552 	int rc;
9553 
9554 	blob_ch = spdk_bs_alloc_io_channel(bs);
9555 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
9556 
9557 	/* Set blob dimension and as thin provisioned */
9558 	ut_spdk_blob_opts_init(&blob_opts);
9559 	blob_opts.thin_provision = true;
9560 	blob_opts.num_clusters = num_clusters;
9561 
9562 	/* Create a blob */
9563 	blob = ut_blob_create_and_open(bs, &blob_opts);
9564 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9565 	blobid = spdk_blob_get_id(blob);
9566 	io_units_per_cluster = bs_io_units_per_cluster(blob);
9567 
9568 	/* Write on cluster 2 and 4 of blob */
9569 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9570 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9571 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9572 		poll_threads();
9573 		CU_ASSERT(g_bserrno == 0);
9574 	}
9575 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9576 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9577 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9578 		poll_threads();
9579 		CU_ASSERT(g_bserrno == 0);
9580 	}
9581 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9582 
9583 	/* Make a snapshot over blob */
9584 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9585 	poll_threads();
9586 	CU_ASSERT(g_bserrno == 0);
9587 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
9588 
9589 	/* Write on cluster 1 and 3 of blob */
9590 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9591 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9592 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9593 		poll_threads();
9594 		CU_ASSERT(g_bserrno == 0);
9595 	}
9596 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9597 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9598 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9599 		poll_threads();
9600 		CU_ASSERT(g_bserrno == 0);
9601 	}
9602 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9603 
9604 	/* Shallow copy with a not read only blob */
9605 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9606 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9607 				       blob_shallow_copy_status_cb, NULL,
9608 				       blob_op_complete, NULL);
9609 	CU_ASSERT(rc == 0);
9610 	poll_threads();
9611 	CU_ASSERT(g_bserrno == -EPERM);
9612 	ext_dev->destroy(ext_dev);
9613 
9614 	/* Set blob read only */
9615 	spdk_blob_set_read_only(blob);
9616 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
9617 	poll_threads();
9618 	CU_ASSERT(g_bserrno == 0);
9619 
9620 	/* Shallow copy over a spdk_bs_dev with incorrect size */
9621 	ext_dev = init_ext_dev(1, DEV_BUFFER_BLOCKLEN);
9622 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9623 				       blob_shallow_copy_status_cb, NULL,
9624 				       blob_op_complete, NULL);
9625 	CU_ASSERT(rc == 0);
9626 	poll_threads();
9627 	CU_ASSERT(g_bserrno == -EINVAL);
9628 	ext_dev->destroy(ext_dev);
9629 
9630 	/* Shallow copy over a spdk_bs_dev with incorrect block len */
9631 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN * 2);
9632 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9633 				       blob_shallow_copy_status_cb, NULL,
9634 				       blob_op_complete, NULL);
9635 	CU_ASSERT(rc == 0);
9636 	poll_threads();
9637 	CU_ASSERT(g_bserrno == -EINVAL);
9638 	ext_dev->destroy(ext_dev);
9639 
9640 	/* Initialize ext_dev for the successuful shallow copy */
9641 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9642 	bdev_ch = ext_dev->create_channel(ext_dev);
9643 	SPDK_CU_ASSERT_FATAL(bdev_ch != NULL);
9644 	ext_args.cb_fn = bs_dev_io_complete_cb;
9645 	for (offset = 0; offset < 4 * io_units_per_cluster; offset++) {
9646 		memset(buf2, 0xff, DEV_BUFFER_BLOCKLEN);
9647 		ext_dev->write(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9648 		poll_threads();
9649 		CU_ASSERT(g_bserrno == 0);
9650 	}
9651 
9652 	/* Correct shallow copy of blob over bdev */
9653 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9654 				       blob_shallow_copy_status_cb, NULL,
9655 				       blob_op_complete, NULL);
9656 	CU_ASSERT(rc == 0);
9657 	poll_thread_times(0, 1);
9658 	CU_ASSERT(g_copied_clusters_count == 1);
9659 	poll_thread_times(0, 2);
9660 	CU_ASSERT(g_bserrno == 0);
9661 	CU_ASSERT(g_copied_clusters_count == 2);
9662 
9663 	/* Read from bdev */
9664 	/* Only cluster 1 and 3 must be filled */
9665 	/* Clusters 2 and 4 should not have been touched */
9666 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9667 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9668 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9669 		poll_threads();
9670 		CU_ASSERT(g_bserrno == 0);
9671 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9672 	}
9673 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9674 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9675 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9676 		poll_threads();
9677 		CU_ASSERT(g_bserrno == 0);
9678 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9679 	}
9680 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9681 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9682 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9683 		poll_threads();
9684 		CU_ASSERT(g_bserrno == 0);
9685 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9686 	}
9687 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9688 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9689 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9690 		poll_threads();
9691 		CU_ASSERT(g_bserrno == 0);
9692 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9693 	}
9694 
9695 	/* Clean up */
9696 	ext_dev->destroy_channel(ext_dev, bdev_ch);
9697 	ext_dev->destroy(ext_dev);
9698 	spdk_bs_free_io_channel(blob_ch);
9699 	ut_blob_close_and_delete(bs, blob);
9700 	poll_threads();
9701 }
9702 
9703 static void
9704 blob_set_parent(void)
9705 {
9706 	struct spdk_blob_store *bs = g_bs;
9707 	struct spdk_blob_opts opts;
9708 	struct ut_esnap_opts esnap_opts;
9709 	struct spdk_blob *blob1, *blob2, *blob3, *blob4, *blob5;
9710 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, blobid5,
9711 		     snapshotid1, snapshotid2, snapshotid3;
9712 	uint32_t cluster_sz, block_sz;
9713 	const uint32_t esnap_num_clusters = 4;
9714 	uint64_t esnap_num_blocks;
9715 	spdk_blob_id ids[2];
9716 	size_t clone_count = 2;
9717 
9718 	cluster_sz = spdk_bs_get_cluster_size(bs);
9719 	block_sz = spdk_bs_get_io_unit_size(bs);
9720 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9721 
9722 	/* Create a normal blob and make a couple of snapshots */
9723 	ut_spdk_blob_opts_init(&opts);
9724 	blob1 = ut_blob_create_and_open(bs, &opts);
9725 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9726 	blobid1 = spdk_blob_get_id(blob1);
9727 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9728 	poll_threads();
9729 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9730 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9731 	snapshotid1 = g_blobid;
9732 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9733 	poll_threads();
9734 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9735 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9736 	snapshotid2 = g_blobid;
9737 
9738 	/* Call set_parent with an invalid snapshotid */
9739 	spdk_bs_blob_set_parent(bs, blobid1, SPDK_BLOBID_INVALID, blob_op_complete, NULL);
9740 	poll_threads();
9741 	CU_ASSERT(g_bserrno == -EINVAL);
9742 
9743 	/* Call set_parent with blobid and snapshotid the same */
9744 	spdk_bs_blob_set_parent(bs, blobid1, blobid1, blob_op_complete, NULL);
9745 	poll_threads();
9746 	CU_ASSERT(g_bserrno == -EINVAL);
9747 
9748 	/* Call set_parent with a blob and its parent snapshot */
9749 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid2, blob_op_complete, NULL);
9750 	poll_threads();
9751 	CU_ASSERT(g_bserrno == -EEXIST);
9752 
9753 	/* Create an esnap clone blob */
9754 	ut_spdk_blob_opts_init(&opts);
9755 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9756 	opts.esnap_id = &esnap_opts;
9757 	opts.esnap_id_len = sizeof(esnap_opts);
9758 	opts.num_clusters = esnap_num_clusters;
9759 	blob2 = ut_blob_create_and_open(bs, &opts);
9760 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9761 	blobid2 = spdk_blob_get_id(blob2);
9762 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9763 
9764 	/* Call set_parent with a non snapshot parent */
9765 	spdk_bs_blob_set_parent(bs, blobid2, blobid1, blob_op_complete, NULL);
9766 	poll_threads();
9767 	CU_ASSERT(g_bserrno == -EINVAL);
9768 
9769 	/* Call set_parent with blob and snapshot of different size */
9770 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid1, blob_op_complete, NULL);
9771 	poll_threads();
9772 	CU_ASSERT(g_bserrno == -EINVAL);
9773 
9774 	/* Call set_parent correctly with a snapshot's clone blob */
9775 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid1, blob_op_complete, NULL);
9776 	poll_threads();
9777 	CU_ASSERT(g_bserrno == 0);
9778 
9779 	/* Check relations */
9780 	CU_ASSERT(spdk_blob_is_clone(blob1));
9781 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid1) == snapshotid1);
9782 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid1, ids, &clone_count) == 0);
9783 	CU_ASSERT(clone_count == 2);
9784 	CU_ASSERT(ids[1] == blobid1);
9785 
9786 	/* Create another normal blob with size equal to esnap size and make a snapshot */
9787 	ut_spdk_blob_opts_init(&opts);
9788 	opts.num_clusters = esnap_num_clusters;
9789 	opts.thin_provision = true;
9790 	blob3 = ut_blob_create_and_open(bs, &opts);
9791 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9792 	blobid3 = spdk_blob_get_id(blob3);
9793 	spdk_bs_create_snapshot(bs, blobid3, NULL, blob_op_with_id_complete, NULL);
9794 	poll_threads();
9795 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9796 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9797 	snapshotid3 = g_blobid;
9798 
9799 	/* Call set_parent correctly with an esnap's clone blob */
9800 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid3, blob_op_complete, NULL);
9801 	poll_threads();
9802 	CU_ASSERT(g_bserrno == 0);
9803 
9804 	/* Check relations */
9805 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob2));
9806 	CU_ASSERT(spdk_blob_is_clone(blob2));
9807 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid2) == snapshotid3);
9808 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid3, ids, &clone_count) == 0);
9809 	CU_ASSERT(clone_count == 2);
9810 	CU_ASSERT(ids[1] == blobid2);
9811 
9812 	/* Create a not thin-provisioned blob that is not a clone */
9813 	ut_spdk_blob_opts_init(&opts);
9814 	opts.thin_provision = false;
9815 	blob4 = ut_blob_create_and_open(bs, &opts);
9816 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9817 	blobid4 = spdk_blob_get_id(blob4);
9818 
9819 	/* Call set_parent with a blob that isn't a clone and that isn't thin-provisioned */
9820 	spdk_bs_blob_set_parent(bs, blobid4, snapshotid2, blob_op_complete, NULL);
9821 	poll_threads();
9822 	CU_ASSERT(g_bserrno == -EINVAL);
9823 
9824 	/* Create a thin-provisioned blob that is not a clone */
9825 	ut_spdk_blob_opts_init(&opts);
9826 	opts.thin_provision = true;
9827 	blob5 = ut_blob_create_and_open(bs, &opts);
9828 	SPDK_CU_ASSERT_FATAL(blob5 != NULL);
9829 	blobid5 = spdk_blob_get_id(blob5);
9830 
9831 	/* Call set_parent correctly with a blob that isn't a clone */
9832 	spdk_bs_blob_set_parent(bs, blobid5, snapshotid2, blob_op_complete, NULL);
9833 	poll_threads();
9834 	CU_ASSERT(g_bserrno == 0);
9835 
9836 	/* Check relations */
9837 	CU_ASSERT(spdk_blob_is_clone(blob5));
9838 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid5) == snapshotid2);
9839 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &clone_count) == 0);
9840 	CU_ASSERT(clone_count == 1);
9841 	CU_ASSERT(ids[0] == blobid5);
9842 
9843 	/* Clean up */
9844 	ut_blob_close_and_delete(bs, blob5);
9845 	ut_blob_close_and_delete(bs, blob4);
9846 	ut_blob_close_and_delete(bs, blob3);
9847 	ut_blob_close_and_delete(bs, blob2);
9848 	ut_blob_close_and_delete(bs, blob1);
9849 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
9850 	poll_threads();
9851 	CU_ASSERT(g_bserrno == 0);
9852 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
9853 	poll_threads();
9854 	CU_ASSERT(g_bserrno == 0);
9855 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
9856 	poll_threads();
9857 	CU_ASSERT(g_bserrno == 0);
9858 }
9859 
9860 static void
9861 blob_set_external_parent(void)
9862 {
9863 	struct spdk_blob_store *bs = g_bs;
9864 	struct spdk_blob_opts opts;
9865 	struct ut_esnap_opts esnap_opts, esnap_opts2;
9866 	struct spdk_blob *blob1, *blob2, *blob3, *blob4;
9867 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, snapshotid;
9868 	uint32_t cluster_sz, block_sz;
9869 	const uint32_t esnap_num_clusters = 4;
9870 	uint64_t esnap_num_blocks;
9871 	struct spdk_bs_dev *esnap_dev1, *esnap_dev2, *esnap_dev3;
9872 	const void *esnap_id;
9873 	size_t esnap_id_len;
9874 	int rc;
9875 
9876 	cluster_sz = spdk_bs_get_cluster_size(bs);
9877 	block_sz = spdk_bs_get_io_unit_size(bs);
9878 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9879 	esnap_dev1 = init_dev();
9880 	esnap_dev2 = init_dev();
9881 	esnap_dev3 = init_dev();
9882 
9883 	/* Create an esnap clone blob */
9884 	ut_spdk_blob_opts_init(&opts);
9885 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9886 	opts.esnap_id = &esnap_opts;
9887 	opts.esnap_id_len = sizeof(esnap_opts);
9888 	opts.num_clusters = esnap_num_clusters;
9889 	blob1 = ut_blob_create_and_open(bs, &opts);
9890 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9891 	blobid1 = spdk_blob_get_id(blob1);
9892 	CU_ASSERT(spdk_blob_is_esnap_clone(blob1));
9893 
9894 	/* Call set_esternal_parent with blobid and esnapid the same */
9895 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, &blobid1, sizeof(blobid1),
9896 					 blob_op_complete, NULL);
9897 	CU_ASSERT(g_bserrno == -EINVAL);
9898 
9899 	/* Call set_external_parent with esnap of incompatible size */
9900 	esnap_dev1->blockcnt = esnap_num_blocks - 1;
9901 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9902 					 blob_op_complete, NULL);
9903 	CU_ASSERT(g_bserrno == -EINVAL);
9904 
9905 	/* Call set_external_parent with a blob and its parent esnap */
9906 	esnap_dev1->blocklen = block_sz;
9907 	esnap_dev1->blockcnt = esnap_num_blocks;
9908 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9909 					 blob_op_complete, NULL);
9910 	poll_threads();
9911 	CU_ASSERT(g_bserrno == -EEXIST);
9912 
9913 	/* Create a blob that is a clone of a snapshots */
9914 	ut_spdk_blob_opts_init(&opts);
9915 	blob2 = ut_blob_create_and_open(bs, &opts);
9916 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9917 	blobid2 = spdk_blob_get_id(blob2);
9918 	spdk_bs_create_snapshot(bs, blobid2, NULL, blob_op_with_id_complete, NULL);
9919 	poll_threads();
9920 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9921 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9922 	snapshotid = g_blobid;
9923 
9924 	/* Call set_parent correctly with a snapshot's clone blob */
9925 	esnap_dev2->blocklen = block_sz;
9926 	esnap_dev2->blockcnt = esnap_num_blocks;
9927 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts2);
9928 	spdk_bs_blob_set_external_parent(bs, blobid2, esnap_dev2, &esnap_opts2, sizeof(esnap_opts2),
9929 					 blob_op_complete, NULL);
9930 	poll_threads();
9931 	CU_ASSERT(g_bserrno == 0);
9932 
9933 	/* Check relations */
9934 	rc = spdk_blob_get_esnap_id(blob2, &esnap_id, &esnap_id_len);
9935 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9936 	CU_ASSERT(!spdk_blob_is_clone(blob2));
9937 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts2) &&
9938 		  memcmp(esnap_id, &esnap_opts2, esnap_id_len) == 0);
9939 	CU_ASSERT(blob2->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9940 
9941 	/* Create a not thin-provisioned blob that is not a clone */
9942 	ut_spdk_blob_opts_init(&opts);
9943 	opts.thin_provision = false;
9944 	blob3 = ut_blob_create_and_open(bs, &opts);
9945 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9946 	blobid3 = spdk_blob_get_id(blob3);
9947 
9948 	/* Call set_external_parent with a blob that isn't a clone and that isn't thin-provisioned */
9949 	spdk_bs_blob_set_external_parent(bs, blobid3, esnap_dev1, &esnap_opts, sizeof(esnap_opts),
9950 					 blob_op_complete, NULL);
9951 	poll_threads();
9952 	CU_ASSERT(g_bserrno == -EINVAL);
9953 
9954 	/* Create a thin-provisioned blob that is not a clone */
9955 	ut_spdk_blob_opts_init(&opts);
9956 	opts.thin_provision = true;
9957 	blob4 = ut_blob_create_and_open(bs, &opts);
9958 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9959 	blobid4 = spdk_blob_get_id(blob4);
9960 
9961 	/* Call set_external_parent correctly with a blob that isn't a clone */
9962 	esnap_dev3->blocklen = block_sz;
9963 	esnap_dev3->blockcnt = esnap_num_blocks;
9964 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9965 	spdk_bs_blob_set_external_parent(bs, blobid4, esnap_dev3, &esnap_opts, sizeof(esnap_opts),
9966 					 blob_op_complete, NULL);
9967 	poll_threads();
9968 	CU_ASSERT(g_bserrno == 0);
9969 
9970 	/* Check relations */
9971 	rc = spdk_blob_get_esnap_id(blob4, &esnap_id, &esnap_id_len);
9972 	CU_ASSERT(spdk_blob_is_esnap_clone(blob4));
9973 	CU_ASSERT(!spdk_blob_is_clone(blob4));
9974 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts) &&
9975 		  memcmp(esnap_id, &esnap_opts, esnap_id_len) == 0);
9976 	CU_ASSERT(blob4->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9977 
9978 	ut_blob_close_and_delete(bs, blob4);
9979 	ut_blob_close_and_delete(bs, blob3);
9980 	ut_blob_close_and_delete(bs, blob2);
9981 	ut_blob_close_and_delete(bs, blob1);
9982 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
9983 	dev_destroy(esnap_dev1);
9984 	poll_threads();
9985 	CU_ASSERT(g_bserrno == 0);
9986 }
9987 
9988 static void
9989 suite_bs_setup(void)
9990 {
9991 	struct spdk_bs_dev *dev;
9992 
9993 	dev = init_dev();
9994 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9995 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
9996 	poll_threads();
9997 	CU_ASSERT(g_bserrno == 0);
9998 	CU_ASSERT(g_bs != NULL);
9999 }
10000 
10001 static void
10002 suite_esnap_bs_setup(void)
10003 {
10004 	struct spdk_bs_dev	*dev;
10005 	struct spdk_bs_opts	bs_opts;
10006 
10007 	dev = init_dev();
10008 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
10009 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
10010 	bs_opts.cluster_sz = 4 * g_phys_blocklen;
10011 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
10012 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
10013 	poll_threads();
10014 	CU_ASSERT(g_bserrno == 0);
10015 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
10016 }
10017 
10018 static void
10019 suite_bs_cleanup(void)
10020 {
10021 	if (g_bs != NULL) {
10022 		spdk_bs_unload(g_bs, bs_op_complete, NULL);
10023 		poll_threads();
10024 		CU_ASSERT(g_bserrno == 0);
10025 		g_bs = NULL;
10026 	}
10027 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
10028 }
10029 
10030 static struct spdk_blob *
10031 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
10032 {
10033 	struct spdk_blob *blob;
10034 	struct spdk_blob_opts create_blob_opts;
10035 	spdk_blob_id blobid;
10036 
10037 	if (blob_opts == NULL) {
10038 		ut_spdk_blob_opts_init(&create_blob_opts);
10039 		blob_opts = &create_blob_opts;
10040 	}
10041 
10042 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
10043 	poll_threads();
10044 	CU_ASSERT(g_bserrno == 0);
10045 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
10046 	blobid = g_blobid;
10047 	g_blobid = -1;
10048 
10049 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
10050 	poll_threads();
10051 	CU_ASSERT(g_bserrno == 0);
10052 	CU_ASSERT(g_blob != NULL);
10053 	blob = g_blob;
10054 
10055 	g_blob = NULL;
10056 	g_bserrno = -1;
10057 
10058 	return blob;
10059 }
10060 
10061 static void
10062 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
10063 {
10064 	spdk_blob_id blobid = spdk_blob_get_id(blob);
10065 
10066 	spdk_blob_close(blob, blob_op_complete, NULL);
10067 	poll_threads();
10068 	CU_ASSERT(g_bserrno == 0);
10069 	g_blob = NULL;
10070 
10071 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
10072 	poll_threads();
10073 	CU_ASSERT(g_bserrno == 0);
10074 	g_bserrno = -1;
10075 }
10076 
10077 static void
10078 suite_blob_setup(void)
10079 {
10080 	suite_bs_setup();
10081 	CU_ASSERT(g_bs != NULL);
10082 
10083 	g_blob = ut_blob_create_and_open(g_bs, NULL);
10084 	CU_ASSERT(g_blob != NULL);
10085 }
10086 
10087 static void
10088 suite_blob_cleanup(void)
10089 {
10090 	ut_blob_close_and_delete(g_bs, g_blob);
10091 	CU_ASSERT(g_blob == NULL);
10092 
10093 	suite_bs_cleanup();
10094 	CU_ASSERT(g_bs == NULL);
10095 }
10096 
10097 static int
10098 ut_setup_config_nocopy_noextent(void)
10099 {
10100 	g_dev_copy_enabled = false;
10101 	g_use_extent_table = false;
10102 	g_phys_blocklen = 4096;
10103 
10104 	return 0;
10105 }
10106 
10107 static int
10108 ut_setup_config_nocopy_extent(void)
10109 {
10110 	g_dev_copy_enabled = false;
10111 	g_use_extent_table = true;
10112 	g_phys_blocklen = 4096;
10113 
10114 	return 0;
10115 }
10116 
10117 static int
10118 ut_setup_config_nocopy_extent_16k_phys(void)
10119 {
10120 	g_dev_copy_enabled = false;
10121 	g_use_extent_table = true;
10122 	g_phys_blocklen = 16384;
10123 
10124 	return 0;
10125 }
10126 
10127 
10128 static int
10129 ut_setup_config_copy_noextent(void)
10130 {
10131 	g_dev_copy_enabled = true;
10132 	g_use_extent_table = false;
10133 	g_phys_blocklen = 4096;
10134 
10135 	return 0;
10136 }
10137 
10138 static int
10139 ut_setup_config_copy_extent(void)
10140 {
10141 	g_dev_copy_enabled = true;
10142 	g_use_extent_table = true;
10143 	g_phys_blocklen = 4096;
10144 
10145 	return 0;
10146 }
10147 
10148 struct ut_config {
10149 	const char *suffix;
10150 	CU_InitializeFunc setup_cb;
10151 };
10152 
10153 int
10154 main(int argc, char **argv)
10155 {
10156 	CU_pSuite		suite, suite_bs, suite_blob, suite_esnap_bs;
10157 	unsigned int		i, num_failures;
10158 	char			suite_name[4096];
10159 	struct ut_config	*config;
10160 	struct ut_config	configs[] = {
10161 		{"nocopy_noextent", ut_setup_config_nocopy_noextent},
10162 		{"nocopy_extent", ut_setup_config_nocopy_extent},
10163 		{"nocopy_extent_16k_phys", ut_setup_config_nocopy_extent_16k_phys},
10164 		{"copy_noextent", ut_setup_config_copy_noextent},
10165 		{"copy_extent", ut_setup_config_copy_extent},
10166 	};
10167 
10168 	CU_initialize_registry();
10169 
10170 	for (i = 0; i < SPDK_COUNTOF(configs); ++i) {
10171 		config = &configs[i];
10172 
10173 		snprintf(suite_name, sizeof(suite_name), "blob_%s", config->suffix);
10174 		suite = CU_add_suite(suite_name, config->setup_cb, NULL);
10175 
10176 		snprintf(suite_name, sizeof(suite_name), "blob_bs_%s", config->suffix);
10177 		suite_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10178 				suite_bs_setup, suite_bs_cleanup);
10179 
10180 		snprintf(suite_name, sizeof(suite_name), "blob_blob_%s", config->suffix);
10181 		suite_blob = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10182 				suite_blob_setup, suite_blob_cleanup);
10183 
10184 		snprintf(suite_name, sizeof(suite_name), "blob_esnap_bs_%s", config->suffix);
10185 		suite_esnap_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10186 				 suite_esnap_bs_setup,
10187 				 suite_bs_cleanup);
10188 
10189 		CU_ADD_TEST(suite, blob_init);
10190 		CU_ADD_TEST(suite_bs, blob_open);
10191 		CU_ADD_TEST(suite_bs, blob_create);
10192 		CU_ADD_TEST(suite_bs, blob_create_loop);
10193 		CU_ADD_TEST(suite_bs, blob_create_fail);
10194 		CU_ADD_TEST(suite_bs, blob_create_internal);
10195 		CU_ADD_TEST(suite_bs, blob_create_zero_extent);
10196 		CU_ADD_TEST(suite, blob_thin_provision);
10197 		CU_ADD_TEST(suite_bs, blob_snapshot);
10198 		CU_ADD_TEST(suite_bs, blob_clone);
10199 		CU_ADD_TEST(suite_bs, blob_inflate);
10200 		CU_ADD_TEST(suite_bs, blob_delete);
10201 		CU_ADD_TEST(suite_bs, blob_resize_test);
10202 		CU_ADD_TEST(suite_bs, blob_resize_thin_test);
10203 		CU_ADD_TEST(suite, blob_read_only);
10204 		CU_ADD_TEST(suite_bs, channel_ops);
10205 		CU_ADD_TEST(suite_bs, blob_super);
10206 		CU_ADD_TEST(suite_blob, blob_write);
10207 		CU_ADD_TEST(suite_blob, blob_read);
10208 		CU_ADD_TEST(suite_blob, blob_rw_verify);
10209 		CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
10210 		CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
10211 		CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
10212 		CU_ADD_TEST(suite_bs, blob_unmap);
10213 		CU_ADD_TEST(suite_bs, blob_iter);
10214 		CU_ADD_TEST(suite_blob, blob_xattr);
10215 		CU_ADD_TEST(suite_bs, blob_parse_md);
10216 		CU_ADD_TEST(suite, bs_load);
10217 		CU_ADD_TEST(suite_bs, bs_load_pending_removal);
10218 		CU_ADD_TEST(suite, bs_load_custom_cluster_size);
10219 		CU_ADD_TEST(suite, bs_load_after_failed_grow);
10220 		CU_ADD_TEST(suite, bs_load_error);
10221 		CU_ADD_TEST(suite_bs, bs_unload);
10222 		CU_ADD_TEST(suite, bs_cluster_sz);
10223 		CU_ADD_TEST(suite_bs, bs_usable_clusters);
10224 		CU_ADD_TEST(suite, bs_resize_md);
10225 		CU_ADD_TEST(suite, bs_destroy);
10226 		CU_ADD_TEST(suite, bs_type);
10227 		CU_ADD_TEST(suite, bs_super_block);
10228 		CU_ADD_TEST(suite, bs_test_recover_cluster_count);
10229 		CU_ADD_TEST(suite, bs_grow_live);
10230 		CU_ADD_TEST(suite, bs_grow_live_no_space);
10231 		CU_ADD_TEST(suite, bs_test_grow);
10232 		CU_ADD_TEST(suite, blob_serialize_test);
10233 		CU_ADD_TEST(suite_bs, blob_crc);
10234 		CU_ADD_TEST(suite, super_block_crc);
10235 		CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
10236 		CU_ADD_TEST(suite_bs, blob_flags);
10237 		CU_ADD_TEST(suite_bs, bs_version);
10238 		CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
10239 		CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
10240 		CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
10241 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
10242 		CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
10243 		CU_ADD_TEST(suite, blob_thin_prov_unmap_cluster);
10244 		CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
10245 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
10246 		CU_ADD_TEST(suite, bs_load_iter_test);
10247 		CU_ADD_TEST(suite_bs, blob_snapshot_rw);
10248 		CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
10249 		CU_ADD_TEST(suite, blob_relations);
10250 		CU_ADD_TEST(suite, blob_relations2);
10251 		CU_ADD_TEST(suite, blob_relations3);
10252 		CU_ADD_TEST(suite, blobstore_clean_power_failure);
10253 		CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
10254 		CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
10255 		CU_ADD_TEST(suite_bs, blob_inflate_rw);
10256 		CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
10257 		CU_ADD_TEST(suite_bs, blob_operation_split_rw);
10258 		CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
10259 		CU_ADD_TEST(suite, blob_io_unit);
10260 		CU_ADD_TEST(suite, blob_io_unit_compatibility);
10261 		CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
10262 		CU_ADD_TEST(suite_bs, blob_persist_test);
10263 		CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
10264 		CU_ADD_TEST(suite_bs, blob_seek_io_unit);
10265 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
10266 		CU_ADD_TEST(suite_bs, blob_nested_freezes);
10267 		CU_ADD_TEST(suite, blob_ext_md_pages);
10268 		CU_ADD_TEST(suite, blob_esnap_io_4096_4096);
10269 		CU_ADD_TEST(suite, blob_esnap_io_512_512);
10270 		CU_ADD_TEST(suite, blob_esnap_io_4096_512);
10271 		CU_ADD_TEST(suite, blob_esnap_io_512_4096);
10272 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_thread_add_remove);
10273 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_snapshot);
10274 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_inflate);
10275 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_decouple);
10276 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_reload);
10277 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_hotplug);
10278 		CU_ADD_TEST(suite_blob, blob_is_degraded);
10279 		CU_ADD_TEST(suite_bs, blob_clone_resize);
10280 		CU_ADD_TEST(suite, blob_esnap_clone_resize);
10281 		CU_ADD_TEST(suite_bs, blob_shallow_copy);
10282 		CU_ADD_TEST(suite_esnap_bs, blob_set_parent);
10283 		CU_ADD_TEST(suite_esnap_bs, blob_set_external_parent);
10284 	}
10285 
10286 	allocate_threads(2);
10287 	set_thread(0);
10288 
10289 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
10290 
10291 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
10292 
10293 	free(g_dev_buffer);
10294 
10295 	free_threads();
10296 
10297 	return num_failures;
10298 }
10299