xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision e0d7428b482257aa6999b8b4cc44159dcc292df9)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "thread/thread.c"
16 #include "ext_dev.c"
17 #include "blob/blobstore.c"
18 #include "blob/request.c"
19 #include "blob/zeroes.c"
20 #include "blob/blob_bs_dev.c"
21 #include "esnap_dev.c"
22 #define BLOCKLEN DEV_BUFFER_BLOCKLEN
23 
24 struct spdk_blob_store *g_bs;
25 spdk_blob_id g_blobid;
26 struct spdk_blob *g_blob, *g_blob2;
27 int g_bserrno, g_bserrno2;
28 struct spdk_xattr_names *g_names;
29 int g_done;
30 char *g_xattr_names[] = {"first", "second", "third"};
31 char *g_xattr_values[] = {"one", "two", "three"};
32 uint64_t g_ctx = 1729;
33 bool g_use_extent_table = false;
34 uint64_t g_copied_clusters_count = 0;
35 
36 struct spdk_bs_super_block_ver1 {
37 	uint8_t		signature[8];
38 	uint32_t        version;
39 	uint32_t        length;
40 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
41 	spdk_blob_id	super_blob;
42 
43 	uint32_t	cluster_size; /* In bytes */
44 
45 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
46 	uint32_t	used_page_mask_len; /* Count, in pages */
47 
48 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
49 	uint32_t	used_cluster_mask_len; /* Count, in pages */
50 
51 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
52 	uint32_t	md_len; /* Count, in pages */
53 
54 	uint8_t		reserved[4036];
55 	uint32_t	crc;
56 } __attribute__((packed));
57 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
58 
59 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
60 		struct spdk_blob_opts *blob_opts);
61 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
62 static void suite_blob_setup(void);
63 static void suite_blob_cleanup(void);
64 
65 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
66 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
67 		void *cpl_cb_arg), 0);
68 
69 static bool
70 is_esnap_clone(struct spdk_blob *_blob, const void *id, size_t id_len)
71 {
72 	const void *val = NULL;
73 	size_t len = 0;
74 	bool c0, c1, c2, c3;
75 
76 	CU_ASSERT(blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
77 				       true) == 0);
78 	CU_ASSERT((c0 = (len == id_len)));
79 	CU_ASSERT((c1 = (val != NULL && memcmp(val, id, len) == 0)));
80 	CU_ASSERT((c2 = !!(_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT)));
81 	CU_ASSERT((c3 = (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
82 
83 	return c0 && c1 && c2 && c3;
84 }
85 
86 static bool
87 is_not_esnap_clone(struct spdk_blob *_blob)
88 {
89 	const void *val = NULL;
90 	size_t len = 0;
91 	bool c1, c2, c3, c4;
92 
93 	CU_ASSERT((c1 = (blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
94 					      true) == -ENOENT)));
95 	CU_ASSERT((c2 = (val == NULL)));
96 	CU_ASSERT((c3 = ((_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT) == 0)));
97 	CU_ASSERT((c4 = (_blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
98 
99 	return c1 && c2 && c3 && c4;
100 }
101 
102 #define UT_ASSERT_IS_ESNAP_CLONE(_blob, _id, _len) CU_ASSERT(is_esnap_clone(_blob, _id, _len))
103 #define UT_ASSERT_IS_NOT_ESNAP_CLONE(_blob) CU_ASSERT(is_not_esnap_clone(_blob))
104 
105 static void
106 _get_xattr_value(void *arg, const char *name,
107 		 const void **value, size_t *value_len)
108 {
109 	uint64_t i;
110 
111 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
112 	SPDK_CU_ASSERT_FATAL(value != NULL);
113 	CU_ASSERT(arg == &g_ctx);
114 
115 	for (i = 0; i < sizeof(g_xattr_names); i++) {
116 		if (!strcmp(name, g_xattr_names[i])) {
117 			*value_len = strlen(g_xattr_values[i]);
118 			*value = g_xattr_values[i];
119 			break;
120 		}
121 	}
122 }
123 
124 static void
125 _get_xattr_value_null(void *arg, const char *name,
126 		      const void **value, size_t *value_len)
127 {
128 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
129 	SPDK_CU_ASSERT_FATAL(value != NULL);
130 	CU_ASSERT(arg == NULL);
131 
132 	*value_len = 0;
133 	*value = NULL;
134 }
135 
136 static int
137 _get_snapshots_count(struct spdk_blob_store *bs)
138 {
139 	struct spdk_blob_list *snapshot = NULL;
140 	int count = 0;
141 
142 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
143 		count += 1;
144 	}
145 
146 	return count;
147 }
148 
149 static void
150 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
151 {
152 	spdk_blob_opts_init(opts, sizeof(*opts));
153 	opts->use_extent_table = g_use_extent_table;
154 }
155 
156 static void
157 bs_op_complete(void *cb_arg, int bserrno)
158 {
159 	g_bserrno = bserrno;
160 }
161 
162 static void
163 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
164 			   int bserrno)
165 {
166 	g_bs = bs;
167 	g_bserrno = bserrno;
168 }
169 
170 static void
171 blob_op_complete(void *cb_arg, int bserrno)
172 {
173 	if (cb_arg != NULL) {
174 		int *errp = cb_arg;
175 
176 		*errp = bserrno;
177 	}
178 	g_bserrno = bserrno;
179 }
180 
181 static void
182 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
183 {
184 	g_blobid = blobid;
185 	g_bserrno = bserrno;
186 }
187 
188 static void
189 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
190 {
191 	g_blob = blb;
192 	g_bserrno = bserrno;
193 }
194 
195 static void
196 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
197 {
198 	if (g_blob == NULL) {
199 		g_blob = blob;
200 		g_bserrno = bserrno;
201 	} else {
202 		g_blob2 = blob;
203 		g_bserrno2 = bserrno;
204 	}
205 }
206 
207 static void
208 blob_shallow_copy_status_cb(uint64_t copied_clusters, void *cb_arg)
209 {
210 	g_copied_clusters_count = copied_clusters;
211 }
212 
213 static void
214 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
215 {
216 	struct spdk_bs_dev *dev;
217 
218 	/* Unload the blob store */
219 	spdk_bs_unload(*bs, bs_op_complete, NULL);
220 	poll_threads();
221 	CU_ASSERT(g_bserrno == 0);
222 
223 	dev = init_dev();
224 	/* Load an existing blob store */
225 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
226 	poll_threads();
227 	CU_ASSERT(g_bserrno == 0);
228 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
229 	*bs = g_bs;
230 
231 	g_bserrno = -1;
232 }
233 
234 static void
235 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
236 {
237 	struct spdk_bs_dev *dev;
238 
239 	/* Dirty shutdown */
240 	bs_free(*bs);
241 
242 	dev = init_dev();
243 	/* Load an existing blob store */
244 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
245 	poll_threads();
246 	CU_ASSERT(g_bserrno == 0);
247 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
248 	*bs = g_bs;
249 
250 	g_bserrno = -1;
251 }
252 
253 static void
254 blob_init(void)
255 {
256 	struct spdk_blob_store *bs;
257 	struct spdk_bs_dev *dev;
258 
259 	dev = init_dev();
260 
261 	/* should fail for an unsupported blocklen */
262 	dev->blocklen = 500;
263 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
264 	poll_threads();
265 	CU_ASSERT(g_bserrno == -EINVAL);
266 
267 	dev = init_dev();
268 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
269 	poll_threads();
270 	CU_ASSERT(g_bserrno == 0);
271 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
272 	bs = g_bs;
273 
274 	spdk_bs_unload(bs, bs_op_complete, NULL);
275 	poll_threads();
276 	CU_ASSERT(g_bserrno == 0);
277 	g_bs = NULL;
278 }
279 
280 static void
281 blob_super(void)
282 {
283 	struct spdk_blob_store *bs = g_bs;
284 	spdk_blob_id blobid;
285 	struct spdk_blob_opts blob_opts;
286 
287 	/* Get the super blob without having set one */
288 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
289 	poll_threads();
290 	CU_ASSERT(g_bserrno == -ENOENT);
291 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
292 
293 	/* Create a blob */
294 	ut_spdk_blob_opts_init(&blob_opts);
295 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
296 	poll_threads();
297 	CU_ASSERT(g_bserrno == 0);
298 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
299 	blobid = g_blobid;
300 
301 	/* Set the blob as the super blob */
302 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
303 	poll_threads();
304 	CU_ASSERT(g_bserrno == 0);
305 
306 	/* Get the super blob */
307 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
308 	poll_threads();
309 	CU_ASSERT(g_bserrno == 0);
310 	CU_ASSERT(blobid == g_blobid);
311 }
312 
313 static void
314 blob_open(void)
315 {
316 	struct spdk_blob_store *bs = g_bs;
317 	struct spdk_blob *blob;
318 	struct spdk_blob_opts blob_opts;
319 	spdk_blob_id blobid, blobid2;
320 
321 	ut_spdk_blob_opts_init(&blob_opts);
322 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
323 	poll_threads();
324 	CU_ASSERT(g_bserrno == 0);
325 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
326 	blobid = g_blobid;
327 
328 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
329 	poll_threads();
330 	CU_ASSERT(g_bserrno == 0);
331 	CU_ASSERT(g_blob != NULL);
332 	blob = g_blob;
333 
334 	blobid2 = spdk_blob_get_id(blob);
335 	CU_ASSERT(blobid == blobid2);
336 
337 	/* Try to open file again.  It should return success. */
338 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
339 	poll_threads();
340 	CU_ASSERT(g_bserrno == 0);
341 	CU_ASSERT(blob == g_blob);
342 
343 	spdk_blob_close(blob, blob_op_complete, NULL);
344 	poll_threads();
345 	CU_ASSERT(g_bserrno == 0);
346 
347 	/*
348 	 * Close the file a second time, releasing the second reference.  This
349 	 *  should succeed.
350 	 */
351 	blob = g_blob;
352 	spdk_blob_close(blob, blob_op_complete, NULL);
353 	poll_threads();
354 	CU_ASSERT(g_bserrno == 0);
355 
356 	/*
357 	 * Try to open file again.  It should succeed.  This tests the case
358 	 *  where the file is opened, closed, then re-opened again.
359 	 */
360 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
361 	poll_threads();
362 	CU_ASSERT(g_bserrno == 0);
363 	CU_ASSERT(g_blob != NULL);
364 	blob = g_blob;
365 	spdk_blob_close(blob, blob_op_complete, NULL);
366 	poll_threads();
367 	CU_ASSERT(g_bserrno == 0);
368 
369 	/* Try to open file twice in succession.  This should return the same
370 	 * blob object.
371 	 */
372 	g_blob = NULL;
373 	g_blob2 = NULL;
374 	g_bserrno = -1;
375 	g_bserrno2 = -1;
376 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
377 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 	CU_ASSERT(g_bserrno2 == 0);
381 	CU_ASSERT(g_blob != NULL);
382 	CU_ASSERT(g_blob2 != NULL);
383 	CU_ASSERT(g_blob == g_blob2);
384 
385 	g_bserrno = -1;
386 	spdk_blob_close(g_blob, blob_op_complete, NULL);
387 	poll_threads();
388 	CU_ASSERT(g_bserrno == 0);
389 
390 	ut_blob_close_and_delete(bs, g_blob);
391 }
392 
393 static void
394 blob_create(void)
395 {
396 	struct spdk_blob_store *bs = g_bs;
397 	struct spdk_blob *blob;
398 	struct spdk_blob_opts opts;
399 	spdk_blob_id blobid;
400 
401 	/* Create blob with 10 clusters */
402 
403 	ut_spdk_blob_opts_init(&opts);
404 	opts.num_clusters = 10;
405 
406 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
407 	poll_threads();
408 	CU_ASSERT(g_bserrno == 0);
409 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
410 	blobid = g_blobid;
411 
412 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
413 	poll_threads();
414 	CU_ASSERT(g_bserrno == 0);
415 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
416 	blob = g_blob;
417 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
418 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
419 
420 	spdk_blob_close(blob, blob_op_complete, NULL);
421 	poll_threads();
422 	CU_ASSERT(g_bserrno == 0);
423 
424 	/* Create blob with 0 clusters */
425 
426 	ut_spdk_blob_opts_init(&opts);
427 	opts.num_clusters = 0;
428 
429 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
430 	poll_threads();
431 	CU_ASSERT(g_bserrno == 0);
432 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
433 	blobid = g_blobid;
434 
435 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
436 	poll_threads();
437 	CU_ASSERT(g_bserrno == 0);
438 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
439 	blob = g_blob;
440 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
441 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
442 
443 	spdk_blob_close(blob, blob_op_complete, NULL);
444 	poll_threads();
445 	CU_ASSERT(g_bserrno == 0);
446 
447 	/* Create blob with default options (opts == NULL) */
448 
449 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
450 	poll_threads();
451 	CU_ASSERT(g_bserrno == 0);
452 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
453 	blobid = g_blobid;
454 
455 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
456 	poll_threads();
457 	CU_ASSERT(g_bserrno == 0);
458 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
459 	blob = g_blob;
460 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
461 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
462 
463 	spdk_blob_close(blob, blob_op_complete, NULL);
464 	poll_threads();
465 	CU_ASSERT(g_bserrno == 0);
466 
467 	/* Try to create blob with size larger than blobstore */
468 
469 	ut_spdk_blob_opts_init(&opts);
470 	opts.num_clusters = bs->total_clusters + 1;
471 
472 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
473 	poll_threads();
474 	CU_ASSERT(g_bserrno == -ENOSPC);
475 }
476 
477 static void
478 blob_create_zero_extent(void)
479 {
480 	struct spdk_blob_store *bs = g_bs;
481 	struct spdk_blob *blob;
482 	spdk_blob_id blobid;
483 
484 	/* Create blob with default options (opts == NULL) */
485 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
486 	poll_threads();
487 	CU_ASSERT(g_bserrno == 0);
488 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
489 	blobid = g_blobid;
490 
491 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
492 	poll_threads();
493 	CU_ASSERT(g_bserrno == 0);
494 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
495 	blob = g_blob;
496 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
497 	CU_ASSERT(blob->extent_table_found == true);
498 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
499 	CU_ASSERT(blob->active.extent_pages == NULL);
500 
501 	spdk_blob_close(blob, blob_op_complete, NULL);
502 	poll_threads();
503 	CU_ASSERT(g_bserrno == 0);
504 
505 	/* Create blob with NULL internal options  */
506 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
507 	poll_threads();
508 	CU_ASSERT(g_bserrno == 0);
509 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
510 	blobid = g_blobid;
511 
512 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
513 	poll_threads();
514 	CU_ASSERT(g_bserrno == 0);
515 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
516 	blob = g_blob;
517 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
518 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
519 	CU_ASSERT(blob->extent_table_found == true);
520 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
521 	CU_ASSERT(blob->active.extent_pages == NULL);
522 
523 	spdk_blob_close(blob, blob_op_complete, NULL);
524 	poll_threads();
525 	CU_ASSERT(g_bserrno == 0);
526 }
527 
528 /*
529  * Create and delete one blob in a loop over and over again.  This helps ensure
530  * that the internal bit masks tracking used clusters and md_pages are being
531  * tracked correctly.
532  */
533 static void
534 blob_create_loop(void)
535 {
536 	struct spdk_blob_store *bs = g_bs;
537 	struct spdk_blob_opts opts;
538 	uint32_t i, loop_count;
539 
540 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
541 				  spdk_bit_pool_capacity(bs->used_clusters));
542 
543 	for (i = 0; i < loop_count; i++) {
544 		ut_spdk_blob_opts_init(&opts);
545 		opts.num_clusters = 1;
546 		g_bserrno = -1;
547 		g_blobid = SPDK_BLOBID_INVALID;
548 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
549 		poll_threads();
550 		CU_ASSERT(g_bserrno == 0);
551 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
552 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
553 		poll_threads();
554 		CU_ASSERT(g_bserrno == 0);
555 	}
556 }
557 
558 static void
559 blob_create_fail(void)
560 {
561 	struct spdk_blob_store *bs = g_bs;
562 	struct spdk_blob_opts opts;
563 	spdk_blob_id blobid;
564 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
565 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
566 
567 	/* NULL callback */
568 	ut_spdk_blob_opts_init(&opts);
569 	opts.xattrs.names = g_xattr_names;
570 	opts.xattrs.get_value = NULL;
571 	opts.xattrs.count = 1;
572 	opts.xattrs.ctx = &g_ctx;
573 
574 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
575 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
576 	poll_threads();
577 	CU_ASSERT(g_bserrno == -EINVAL);
578 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
579 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
580 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
581 
582 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
583 	poll_threads();
584 	CU_ASSERT(g_bserrno == -ENOENT);
585 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
586 
587 	ut_bs_reload(&bs, NULL);
588 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
589 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
590 
591 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
592 	poll_threads();
593 	CU_ASSERT(g_blob == NULL);
594 	CU_ASSERT(g_bserrno == -ENOENT);
595 }
596 
597 static void
598 blob_create_internal(void)
599 {
600 	struct spdk_blob_store *bs = g_bs;
601 	struct spdk_blob *blob;
602 	struct spdk_blob_opts opts;
603 	struct spdk_blob_xattr_opts internal_xattrs;
604 	const void *value;
605 	size_t value_len;
606 	spdk_blob_id blobid;
607 	int rc;
608 
609 	/* Create blob with custom xattrs */
610 
611 	ut_spdk_blob_opts_init(&opts);
612 	blob_xattrs_init(&internal_xattrs);
613 	internal_xattrs.count = 3;
614 	internal_xattrs.names = g_xattr_names;
615 	internal_xattrs.get_value = _get_xattr_value;
616 	internal_xattrs.ctx = &g_ctx;
617 
618 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
619 	poll_threads();
620 	CU_ASSERT(g_bserrno == 0);
621 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
622 	blobid = g_blobid;
623 
624 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
625 	poll_threads();
626 	CU_ASSERT(g_bserrno == 0);
627 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
628 	blob = g_blob;
629 
630 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
631 	CU_ASSERT(rc == 0);
632 	SPDK_CU_ASSERT_FATAL(value != NULL);
633 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
634 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
635 
636 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
637 	CU_ASSERT(rc == 0);
638 	SPDK_CU_ASSERT_FATAL(value != NULL);
639 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
640 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
641 
642 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
643 	CU_ASSERT(rc == 0);
644 	SPDK_CU_ASSERT_FATAL(value != NULL);
645 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
646 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
647 
648 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
649 	CU_ASSERT(rc != 0);
650 
651 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
652 	CU_ASSERT(rc != 0);
653 
654 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
655 	CU_ASSERT(rc != 0);
656 
657 	spdk_blob_close(blob, blob_op_complete, NULL);
658 	poll_threads();
659 	CU_ASSERT(g_bserrno == 0);
660 
661 	/* Create blob with NULL internal options  */
662 
663 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
664 	poll_threads();
665 	CU_ASSERT(g_bserrno == 0);
666 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
667 	blobid = g_blobid;
668 
669 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
670 	poll_threads();
671 	CU_ASSERT(g_bserrno == 0);
672 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
673 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
674 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
675 
676 	blob = g_blob;
677 
678 	spdk_blob_close(blob, blob_op_complete, NULL);
679 	poll_threads();
680 	CU_ASSERT(g_bserrno == 0);
681 }
682 
683 static void
684 blob_thin_provision(void)
685 {
686 	struct spdk_blob_store *bs;
687 	struct spdk_bs_dev *dev;
688 	struct spdk_blob *blob;
689 	struct spdk_blob_opts opts;
690 	struct spdk_bs_opts bs_opts;
691 	spdk_blob_id blobid;
692 
693 	dev = init_dev();
694 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
695 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
696 
697 	/* Initialize a new blob store */
698 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
699 	poll_threads();
700 	CU_ASSERT(g_bserrno == 0);
701 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
702 
703 	bs = g_bs;
704 
705 	/* Create blob with thin provisioning enabled */
706 
707 	ut_spdk_blob_opts_init(&opts);
708 	opts.thin_provision = true;
709 	opts.num_clusters = 10;
710 
711 	blob = ut_blob_create_and_open(bs, &opts);
712 	blobid = spdk_blob_get_id(blob);
713 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
714 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
715 	/* In thin provisioning with num_clusters is set, if not using the
716 	 * extent table, there is no allocation. If extent table is used,
717 	 * there is related allocation happened. */
718 	if (blob->extent_table_found == true) {
719 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
720 		CU_ASSERT(blob->active.extent_pages != NULL);
721 	} else {
722 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
723 		CU_ASSERT(blob->active.extent_pages == NULL);
724 	}
725 
726 	spdk_blob_close(blob, blob_op_complete, NULL);
727 	CU_ASSERT(g_bserrno == 0);
728 
729 	/* Do not shut down cleanly.  This makes sure that when we load again
730 	 *  and try to recover a valid used_cluster map, that blobstore will
731 	 *  ignore clusters with index 0 since these are unallocated clusters.
732 	 */
733 	ut_bs_dirty_load(&bs, &bs_opts);
734 
735 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
736 	poll_threads();
737 	CU_ASSERT(g_bserrno == 0);
738 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
739 	blob = g_blob;
740 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
741 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
742 
743 	ut_blob_close_and_delete(bs, blob);
744 
745 	spdk_bs_unload(bs, bs_op_complete, NULL);
746 	poll_threads();
747 	CU_ASSERT(g_bserrno == 0);
748 	g_bs = NULL;
749 }
750 
751 static void
752 blob_snapshot(void)
753 {
754 	struct spdk_blob_store *bs = g_bs;
755 	struct spdk_blob *blob;
756 	struct spdk_blob *snapshot, *snapshot2;
757 	struct spdk_blob_bs_dev *blob_bs_dev;
758 	struct spdk_blob_opts opts;
759 	struct spdk_blob_xattr_opts xattrs;
760 	spdk_blob_id blobid;
761 	spdk_blob_id snapshotid;
762 	spdk_blob_id snapshotid2;
763 	const void *value;
764 	size_t value_len;
765 	int rc;
766 	spdk_blob_id ids[2];
767 	size_t count;
768 
769 	/* Create blob with 10 clusters */
770 	ut_spdk_blob_opts_init(&opts);
771 	opts.num_clusters = 10;
772 
773 	blob = ut_blob_create_and_open(bs, &opts);
774 	blobid = spdk_blob_get_id(blob);
775 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
776 
777 	/* Create snapshot from blob */
778 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
779 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
780 	poll_threads();
781 	CU_ASSERT(g_bserrno == 0);
782 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
783 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
784 	snapshotid = g_blobid;
785 
786 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
787 	poll_threads();
788 	CU_ASSERT(g_bserrno == 0);
789 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
790 	snapshot = g_blob;
791 	CU_ASSERT(snapshot->data_ro == true);
792 	CU_ASSERT(snapshot->md_ro == true);
793 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
794 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
795 
796 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
797 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
798 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
799 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
800 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
801 
802 	/* Try to create snapshot from clone with xattrs */
803 	xattrs.names = g_xattr_names;
804 	xattrs.get_value = _get_xattr_value;
805 	xattrs.count = 3;
806 	xattrs.ctx = &g_ctx;
807 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
808 	poll_threads();
809 	CU_ASSERT(g_bserrno == 0);
810 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
811 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
812 	snapshotid2 = g_blobid;
813 
814 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
815 	CU_ASSERT(g_bserrno == 0);
816 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
817 	snapshot2 = g_blob;
818 	CU_ASSERT(snapshot2->data_ro == true);
819 	CU_ASSERT(snapshot2->md_ro == true);
820 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
821 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
822 
823 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
824 	CU_ASSERT(snapshot->back_bs_dev == NULL);
825 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
826 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
827 
828 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
829 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
830 
831 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
832 	CU_ASSERT(blob_bs_dev->blob == snapshot);
833 
834 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
835 	CU_ASSERT(rc == 0);
836 	SPDK_CU_ASSERT_FATAL(value != NULL);
837 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
838 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
839 
840 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
841 	CU_ASSERT(rc == 0);
842 	SPDK_CU_ASSERT_FATAL(value != NULL);
843 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
844 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
845 
846 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
847 	CU_ASSERT(rc == 0);
848 	SPDK_CU_ASSERT_FATAL(value != NULL);
849 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
850 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
851 
852 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
853 	count = 2;
854 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
855 	CU_ASSERT(count == 1);
856 	CU_ASSERT(ids[0] == blobid);
857 
858 	count = 2;
859 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
860 	CU_ASSERT(count == 1);
861 	CU_ASSERT(ids[0] == snapshotid2);
862 
863 	/* Try to create snapshot from snapshot */
864 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
865 	poll_threads();
866 	CU_ASSERT(g_bserrno == -EINVAL);
867 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
868 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
869 
870 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
871 	ut_blob_close_and_delete(bs, blob);
872 	count = 2;
873 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
874 	CU_ASSERT(count == 0);
875 
876 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
877 	ut_blob_close_and_delete(bs, snapshot2);
878 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
879 	count = 2;
880 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
881 	CU_ASSERT(count == 0);
882 
883 	ut_blob_close_and_delete(bs, snapshot);
884 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
885 }
886 
887 static void
888 blob_snapshot_freeze_io(void)
889 {
890 	struct spdk_io_channel *channel;
891 	struct spdk_bs_channel *bs_channel;
892 	struct spdk_blob_store *bs = g_bs;
893 	struct spdk_blob *blob;
894 	struct spdk_blob_opts opts;
895 	spdk_blob_id blobid;
896 	uint32_t num_of_pages = 10;
897 	uint8_t payload_read[num_of_pages * BLOCKLEN];
898 	uint8_t payload_write[num_of_pages * BLOCKLEN];
899 	uint8_t payload_zero[num_of_pages * BLOCKLEN];
900 
901 	memset(payload_write, 0xE5, sizeof(payload_write));
902 	memset(payload_read, 0x00, sizeof(payload_read));
903 	memset(payload_zero, 0x00, sizeof(payload_zero));
904 
905 	/* Test freeze I/O during snapshot */
906 	channel = spdk_bs_alloc_io_channel(bs);
907 	bs_channel = spdk_io_channel_get_ctx(channel);
908 
909 	/* Create blob with 10 clusters */
910 	ut_spdk_blob_opts_init(&opts);
911 	opts.num_clusters = 10;
912 	opts.thin_provision = false;
913 
914 	blob = ut_blob_create_and_open(bs, &opts);
915 	blobid = spdk_blob_get_id(blob);
916 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
917 
918 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
919 
920 	/* This is implementation specific.
921 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
922 	 * Four async I/O operations happen before that. */
923 	poll_thread_times(0, 5);
924 
925 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
926 
927 	/* Blob I/O should be frozen here */
928 	CU_ASSERT(blob->frozen_refcnt == 1);
929 
930 	/* Write to the blob */
931 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
932 
933 	/* Verify that I/O is queued */
934 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
935 	/* Verify that payload is not written to disk, at this point the blobs already switched */
936 	CU_ASSERT(blob->active.clusters[0] == 0);
937 
938 	/* Finish all operations including spdk_bs_create_snapshot */
939 	poll_threads();
940 
941 	/* Verify snapshot */
942 	CU_ASSERT(g_bserrno == 0);
943 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
944 
945 	/* Verify that blob has unset frozen_io */
946 	CU_ASSERT(blob->frozen_refcnt == 0);
947 
948 	/* Verify that postponed I/O completed successfully by comparing payload */
949 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
950 	poll_threads();
951 	CU_ASSERT(g_bserrno == 0);
952 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * BLOCKLEN) == 0);
953 
954 	spdk_bs_free_io_channel(channel);
955 	poll_threads();
956 
957 	ut_blob_close_and_delete(bs, blob);
958 }
959 
960 static void
961 blob_clone(void)
962 {
963 	struct spdk_blob_store *bs = g_bs;
964 	struct spdk_blob_opts opts;
965 	struct spdk_blob *blob, *snapshot, *clone;
966 	spdk_blob_id blobid, cloneid, snapshotid;
967 	struct spdk_blob_xattr_opts xattrs;
968 	const void *value;
969 	size_t value_len;
970 	int rc;
971 
972 	/* Create blob with 10 clusters */
973 
974 	ut_spdk_blob_opts_init(&opts);
975 	opts.num_clusters = 10;
976 
977 	blob = ut_blob_create_and_open(bs, &opts);
978 	blobid = spdk_blob_get_id(blob);
979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
980 
981 	/* Create snapshot */
982 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
983 	poll_threads();
984 	CU_ASSERT(g_bserrno == 0);
985 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
986 	snapshotid = g_blobid;
987 
988 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
989 	poll_threads();
990 	CU_ASSERT(g_bserrno == 0);
991 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
992 	snapshot = g_blob;
993 	CU_ASSERT(snapshot->data_ro == true);
994 	CU_ASSERT(snapshot->md_ro == true);
995 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
996 
997 	spdk_blob_close(snapshot, blob_op_complete, NULL);
998 	poll_threads();
999 	CU_ASSERT(g_bserrno == 0);
1000 
1001 	/* Create clone from snapshot with xattrs */
1002 	xattrs.names = g_xattr_names;
1003 	xattrs.get_value = _get_xattr_value;
1004 	xattrs.count = 3;
1005 	xattrs.ctx = &g_ctx;
1006 
1007 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
1008 	poll_threads();
1009 	CU_ASSERT(g_bserrno == 0);
1010 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1011 	cloneid = g_blobid;
1012 
1013 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1014 	poll_threads();
1015 	CU_ASSERT(g_bserrno == 0);
1016 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1017 	clone = g_blob;
1018 	CU_ASSERT(clone->data_ro == false);
1019 	CU_ASSERT(clone->md_ro == false);
1020 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1021 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(clone) == 0);
1022 
1023 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
1024 	CU_ASSERT(rc == 0);
1025 	SPDK_CU_ASSERT_FATAL(value != NULL);
1026 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
1027 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
1028 
1029 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
1030 	CU_ASSERT(rc == 0);
1031 	SPDK_CU_ASSERT_FATAL(value != NULL);
1032 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1033 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1034 
1035 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1036 	CU_ASSERT(rc == 0);
1037 	SPDK_CU_ASSERT_FATAL(value != NULL);
1038 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1039 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1040 
1041 
1042 	spdk_blob_close(clone, blob_op_complete, NULL);
1043 	poll_threads();
1044 	CU_ASSERT(g_bserrno == 0);
1045 
1046 	/* Try to create clone from not read only blob */
1047 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1048 	poll_threads();
1049 	CU_ASSERT(g_bserrno == -EINVAL);
1050 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1051 
1052 	/* Mark blob as read only */
1053 	spdk_blob_set_read_only(blob);
1054 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1055 	poll_threads();
1056 	CU_ASSERT(g_bserrno == 0);
1057 
1058 	/* Create clone from read only blob */
1059 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1060 	poll_threads();
1061 	CU_ASSERT(g_bserrno == 0);
1062 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1063 	cloneid = g_blobid;
1064 
1065 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1066 	poll_threads();
1067 	CU_ASSERT(g_bserrno == 0);
1068 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1069 	clone = g_blob;
1070 	CU_ASSERT(clone->data_ro == false);
1071 	CU_ASSERT(clone->md_ro == false);
1072 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1073 
1074 	ut_blob_close_and_delete(bs, clone);
1075 	ut_blob_close_and_delete(bs, blob);
1076 }
1077 
1078 static void
1079 _blob_inflate(bool decouple_parent)
1080 {
1081 	struct spdk_blob_store *bs = g_bs;
1082 	struct spdk_blob_opts opts;
1083 	struct spdk_blob *blob, *snapshot;
1084 	spdk_blob_id blobid, snapshotid;
1085 	struct spdk_io_channel *channel;
1086 	uint64_t free_clusters;
1087 
1088 	channel = spdk_bs_alloc_io_channel(bs);
1089 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1090 
1091 	/* Create blob with 10 clusters */
1092 
1093 	ut_spdk_blob_opts_init(&opts);
1094 	opts.num_clusters = 10;
1095 	opts.thin_provision = true;
1096 
1097 	blob = ut_blob_create_and_open(bs, &opts);
1098 	blobid = spdk_blob_get_id(blob);
1099 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1100 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1101 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1102 
1103 	/* 1) Blob with no parent */
1104 	if (decouple_parent) {
1105 		/* Decouple parent of blob with no parent (should fail) */
1106 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1107 		poll_threads();
1108 		CU_ASSERT(g_bserrno != 0);
1109 	} else {
1110 		/* Inflate of thin blob with no parent should made it thick */
1111 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1112 		poll_threads();
1113 		CU_ASSERT(g_bserrno == 0);
1114 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1115 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1116 	}
1117 
1118 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1119 	poll_threads();
1120 	CU_ASSERT(g_bserrno == 0);
1121 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1122 	snapshotid = g_blobid;
1123 
1124 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1125 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1126 
1127 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1128 	poll_threads();
1129 	CU_ASSERT(g_bserrno == 0);
1130 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1131 	snapshot = g_blob;
1132 	CU_ASSERT(snapshot->data_ro == true);
1133 	CU_ASSERT(snapshot->md_ro == true);
1134 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1135 
1136 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1137 	poll_threads();
1138 	CU_ASSERT(g_bserrno == 0);
1139 
1140 	free_clusters = spdk_bs_free_cluster_count(bs);
1141 
1142 	/* 2) Blob with parent */
1143 	if (!decouple_parent) {
1144 		/* Do full blob inflation */
1145 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1146 		poll_threads();
1147 		CU_ASSERT(g_bserrno == 0);
1148 		/* all 10 clusters should be allocated */
1149 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1150 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1151 	} else {
1152 		/* Decouple parent of blob */
1153 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1154 		poll_threads();
1155 		CU_ASSERT(g_bserrno == 0);
1156 		/* when only parent is removed, none of the clusters should be allocated */
1157 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1158 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1159 	}
1160 
1161 	/* Now, it should be possible to delete snapshot */
1162 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1163 	poll_threads();
1164 	CU_ASSERT(g_bserrno == 0);
1165 
1166 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1167 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1168 
1169 	spdk_bs_free_io_channel(channel);
1170 	poll_threads();
1171 
1172 	ut_blob_close_and_delete(bs, blob);
1173 }
1174 
1175 static void
1176 blob_inflate(void)
1177 {
1178 	_blob_inflate(false);
1179 	_blob_inflate(true);
1180 }
1181 
1182 static void
1183 blob_delete(void)
1184 {
1185 	struct spdk_blob_store *bs = g_bs;
1186 	struct spdk_blob_opts blob_opts;
1187 	spdk_blob_id blobid;
1188 
1189 	/* Create a blob and then delete it. */
1190 	ut_spdk_blob_opts_init(&blob_opts);
1191 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1192 	poll_threads();
1193 	CU_ASSERT(g_bserrno == 0);
1194 	CU_ASSERT(g_blobid > 0);
1195 	blobid = g_blobid;
1196 
1197 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1198 	poll_threads();
1199 	CU_ASSERT(g_bserrno == 0);
1200 
1201 	/* Try to open the blob */
1202 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1203 	poll_threads();
1204 	CU_ASSERT(g_bserrno == -ENOENT);
1205 }
1206 
1207 static void
1208 blob_resize_test(void)
1209 {
1210 	struct spdk_blob_store *bs = g_bs;
1211 	struct spdk_blob *blob;
1212 	uint64_t free_clusters;
1213 
1214 	free_clusters = spdk_bs_free_cluster_count(bs);
1215 
1216 	blob = ut_blob_create_and_open(bs, NULL);
1217 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1218 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1219 
1220 	/* Confirm that resize fails if blob is marked read-only. */
1221 	blob->md_ro = true;
1222 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1223 	poll_threads();
1224 	CU_ASSERT(g_bserrno == -EPERM);
1225 	blob->md_ro = false;
1226 
1227 	/* The blob started at 0 clusters. Resize it to be 5. */
1228 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1229 	poll_threads();
1230 	CU_ASSERT(g_bserrno == 0);
1231 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1232 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 5);
1233 
1234 	/* Shrink the blob to 3 clusters. This will not actually release
1235 	 * the old clusters until the blob is synced.
1236 	 */
1237 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1238 	poll_threads();
1239 	CU_ASSERT(g_bserrno == 0);
1240 	/* Verify there are still 5 clusters in use */
1241 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1242 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 3);
1243 
1244 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1245 	poll_threads();
1246 	CU_ASSERT(g_bserrno == 0);
1247 	/* Now there are only 3 clusters in use */
1248 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1249 
1250 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1251 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1252 	poll_threads();
1253 	CU_ASSERT(g_bserrno == 0);
1254 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1255 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1256 
1257 	/* Try to resize the blob to size larger than blobstore. */
1258 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1259 	poll_threads();
1260 	CU_ASSERT(g_bserrno == -ENOSPC);
1261 
1262 	ut_blob_close_and_delete(bs, blob);
1263 }
1264 
1265 static void
1266 blob_resize_thin_test(void)
1267 {
1268 	struct spdk_blob_store *bs = g_bs;
1269 	struct spdk_blob *blob;
1270 	struct spdk_blob_opts opts;
1271 	struct spdk_io_channel *blob_ch;
1272 	uint64_t free_clusters;
1273 	uint64_t io_units_per_cluster;
1274 	uint64_t offset;
1275 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
1276 
1277 	free_clusters = spdk_bs_free_cluster_count(bs);
1278 
1279 	blob_ch = spdk_bs_alloc_io_channel(bs);
1280 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
1281 
1282 	/* Create blob with thin provisioning enabled */
1283 	ut_spdk_blob_opts_init(&opts);
1284 	opts.thin_provision = true;
1285 	opts.num_clusters = 0;
1286 
1287 	blob = ut_blob_create_and_open(bs, &opts);
1288 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1289 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1290 	io_units_per_cluster = bs_io_units_per_cluster(blob);
1291 
1292 	/* The blob started at 0 clusters. Resize it to be 6. */
1293 	spdk_blob_resize(blob, 6, blob_op_complete, NULL);
1294 	poll_threads();
1295 	CU_ASSERT(g_bserrno == 0);
1296 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1297 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1298 
1299 	/* Write on cluster 0,2,4 and 5 of blob */
1300 	for (offset = 0; offset < io_units_per_cluster; offset++) {
1301 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1302 		poll_threads();
1303 		CU_ASSERT(g_bserrno == 0);
1304 	}
1305 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
1306 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1307 		poll_threads();
1308 		CU_ASSERT(g_bserrno == 0);
1309 	}
1310 	for (offset = 4 * io_units_per_cluster; offset < 5 * io_units_per_cluster; offset++) {
1311 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1312 		poll_threads();
1313 		CU_ASSERT(g_bserrno == 0);
1314 	}
1315 	for (offset = 5 * io_units_per_cluster; offset < 6 * io_units_per_cluster; offset++) {
1316 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1317 		poll_threads();
1318 		CU_ASSERT(g_bserrno == 0);
1319 	}
1320 
1321 	/* Check allocated clusters after write */
1322 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1323 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 4);
1324 
1325 	/* Shrink the blob to 2 clusters. This will not actually release
1326 	 * the old clusters until the blob is synced.
1327 	 */
1328 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1329 	poll_threads();
1330 	CU_ASSERT(g_bserrno == 0);
1331 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1332 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1333 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1334 
1335 	/* Sync blob: 4 clusters were truncated but only 3 of them was allocated */
1336 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1337 	poll_threads();
1338 	CU_ASSERT(g_bserrno == 0);
1339 	CU_ASSERT((free_clusters - 1) == spdk_bs_free_cluster_count(bs));
1340 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1341 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1342 
1343 	spdk_bs_free_io_channel(blob_ch);
1344 	ut_blob_close_and_delete(bs, blob);
1345 }
1346 
1347 static void
1348 blob_read_only(void)
1349 {
1350 	struct spdk_blob_store *bs;
1351 	struct spdk_bs_dev *dev;
1352 	struct spdk_blob *blob;
1353 	struct spdk_bs_opts opts;
1354 	spdk_blob_id blobid;
1355 	int rc;
1356 
1357 	dev = init_dev();
1358 	spdk_bs_opts_init(&opts, sizeof(opts));
1359 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1360 
1361 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1362 	poll_threads();
1363 	CU_ASSERT(g_bserrno == 0);
1364 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1365 	bs = g_bs;
1366 
1367 	blob = ut_blob_create_and_open(bs, NULL);
1368 	blobid = spdk_blob_get_id(blob);
1369 
1370 	rc = spdk_blob_set_read_only(blob);
1371 	CU_ASSERT(rc == 0);
1372 
1373 	CU_ASSERT(blob->data_ro == false);
1374 	CU_ASSERT(blob->md_ro == false);
1375 
1376 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1377 	poll_threads();
1378 
1379 	CU_ASSERT(blob->data_ro == true);
1380 	CU_ASSERT(blob->md_ro == true);
1381 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1382 
1383 	spdk_blob_close(blob, blob_op_complete, NULL);
1384 	poll_threads();
1385 	CU_ASSERT(g_bserrno == 0);
1386 
1387 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1388 	poll_threads();
1389 	CU_ASSERT(g_bserrno == 0);
1390 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1391 	blob = g_blob;
1392 
1393 	CU_ASSERT(blob->data_ro == true);
1394 	CU_ASSERT(blob->md_ro == true);
1395 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1396 
1397 	spdk_blob_close(blob, blob_op_complete, NULL);
1398 	poll_threads();
1399 	CU_ASSERT(g_bserrno == 0);
1400 
1401 	ut_bs_reload(&bs, &opts);
1402 
1403 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1404 	poll_threads();
1405 	CU_ASSERT(g_bserrno == 0);
1406 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1407 	blob = g_blob;
1408 
1409 	CU_ASSERT(blob->data_ro == true);
1410 	CU_ASSERT(blob->md_ro == true);
1411 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1412 
1413 	ut_blob_close_and_delete(bs, blob);
1414 
1415 	spdk_bs_unload(bs, bs_op_complete, NULL);
1416 	poll_threads();
1417 	CU_ASSERT(g_bserrno == 0);
1418 }
1419 
1420 static void
1421 channel_ops(void)
1422 {
1423 	struct spdk_blob_store *bs = g_bs;
1424 	struct spdk_io_channel *channel;
1425 
1426 	channel = spdk_bs_alloc_io_channel(bs);
1427 	CU_ASSERT(channel != NULL);
1428 
1429 	spdk_bs_free_io_channel(channel);
1430 	poll_threads();
1431 }
1432 
1433 static void
1434 blob_write(void)
1435 {
1436 	struct spdk_blob_store *bs = g_bs;
1437 	struct spdk_blob *blob = g_blob;
1438 	struct spdk_io_channel *channel;
1439 	uint64_t io_units_per_cluster;
1440 	uint8_t payload[10 * BLOCKLEN];
1441 
1442 	io_units_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_io_unit_size(bs);
1443 
1444 	channel = spdk_bs_alloc_io_channel(bs);
1445 	CU_ASSERT(channel != NULL);
1446 
1447 	/* Write to a blob with 0 size */
1448 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1449 	poll_threads();
1450 	CU_ASSERT(g_bserrno == -EINVAL);
1451 
1452 	/* Resize the blob */
1453 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1454 	poll_threads();
1455 	CU_ASSERT(g_bserrno == 0);
1456 
1457 	/* Confirm that write fails if blob is marked read-only. */
1458 	blob->data_ro = true;
1459 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1460 	poll_threads();
1461 	CU_ASSERT(g_bserrno == -EPERM);
1462 	blob->data_ro = false;
1463 
1464 	/* Write to the blob */
1465 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1466 	poll_threads();
1467 	CU_ASSERT(g_bserrno == 0);
1468 
1469 	/* Write starting beyond the end */
1470 	spdk_blob_io_write(blob, channel, payload, 5 * io_units_per_cluster, 1, blob_op_complete,
1471 			   NULL);
1472 	poll_threads();
1473 	CU_ASSERT(g_bserrno == -EINVAL);
1474 
1475 	/* Write starting at a valid location but going off the end */
1476 	spdk_blob_io_write(blob, channel, payload, 4 * io_units_per_cluster, io_units_per_cluster + 1,
1477 			   blob_op_complete, NULL);
1478 	poll_threads();
1479 	CU_ASSERT(g_bserrno == -EINVAL);
1480 
1481 	spdk_bs_free_io_channel(channel);
1482 	poll_threads();
1483 }
1484 
1485 static void
1486 blob_read(void)
1487 {
1488 	struct spdk_blob_store *bs = g_bs;
1489 	struct spdk_blob *blob = g_blob;
1490 	struct spdk_io_channel *channel;
1491 	uint64_t io_units_per_cluster;
1492 	uint8_t payload[10 * BLOCKLEN];
1493 
1494 	io_units_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_io_unit_size(bs);
1495 
1496 	channel = spdk_bs_alloc_io_channel(bs);
1497 	CU_ASSERT(channel != NULL);
1498 
1499 	/* Read from a blob with 0 size */
1500 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1501 	poll_threads();
1502 	CU_ASSERT(g_bserrno == -EINVAL);
1503 
1504 	/* Resize the blob */
1505 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1506 	poll_threads();
1507 	CU_ASSERT(g_bserrno == 0);
1508 
1509 	/* Confirm that read passes if blob is marked read-only. */
1510 	blob->data_ro = true;
1511 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1512 	poll_threads();
1513 	CU_ASSERT(g_bserrno == 0);
1514 	blob->data_ro = false;
1515 
1516 	/* Read from the blob */
1517 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1518 	poll_threads();
1519 	CU_ASSERT(g_bserrno == 0);
1520 
1521 	/* Read starting beyond the end */
1522 	spdk_blob_io_read(blob, channel, payload, 5 * io_units_per_cluster, 1, blob_op_complete,
1523 			  NULL);
1524 	poll_threads();
1525 	CU_ASSERT(g_bserrno == -EINVAL);
1526 
1527 	/* Read starting at a valid location but going off the end */
1528 	spdk_blob_io_read(blob, channel, payload, 4 * io_units_per_cluster, io_units_per_cluster + 1,
1529 			  blob_op_complete, NULL);
1530 	poll_threads();
1531 	CU_ASSERT(g_bserrno == -EINVAL);
1532 
1533 	spdk_bs_free_io_channel(channel);
1534 	poll_threads();
1535 }
1536 
1537 static void
1538 blob_rw_verify(void)
1539 {
1540 	struct spdk_blob_store *bs = g_bs;
1541 	struct spdk_blob *blob = g_blob;
1542 	struct spdk_io_channel *channel;
1543 	uint8_t payload_read[10 * BLOCKLEN];
1544 	uint8_t payload_write[10 * BLOCKLEN];
1545 
1546 	channel = spdk_bs_alloc_io_channel(bs);
1547 	CU_ASSERT(channel != NULL);
1548 
1549 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1550 	poll_threads();
1551 	CU_ASSERT(g_bserrno == 0);
1552 
1553 	memset(payload_write, 0xE5, sizeof(payload_write));
1554 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1555 	poll_threads();
1556 	CU_ASSERT(g_bserrno == 0);
1557 
1558 	memset(payload_read, 0x00, sizeof(payload_read));
1559 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1560 	poll_threads();
1561 	CU_ASSERT(g_bserrno == 0);
1562 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * BLOCKLEN) == 0);
1563 
1564 	spdk_bs_free_io_channel(channel);
1565 	poll_threads();
1566 }
1567 
1568 static void
1569 blob_rw_verify_iov(void)
1570 {
1571 	struct spdk_blob_store *bs = g_bs;
1572 	struct spdk_blob *blob;
1573 	struct spdk_io_channel *channel;
1574 	uint8_t payload_read[10 * BLOCKLEN];
1575 	uint8_t payload_write[10 * BLOCKLEN];
1576 	struct iovec iov_read[3];
1577 	struct iovec iov_write[3];
1578 	void *buf;
1579 	uint32_t first_data_cluster = FIRST_DATA_CLUSTER(bs);
1580 
1581 	channel = spdk_bs_alloc_io_channel(bs);
1582 	CU_ASSERT(channel != NULL);
1583 
1584 	blob = ut_blob_create_and_open(bs, NULL);
1585 
1586 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1587 	poll_threads();
1588 	CU_ASSERT(g_bserrno == 0);
1589 
1590 	/*
1591 	 * Manually adjust the offset of the blob's second cluster.  This allows
1592 	 *  us to make sure that the readv/write code correctly accounts for I/O
1593 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1594 	 *  clusters are where we expect before modifying the second cluster.
1595 	 */
1596 	CU_ASSERT(blob->active.clusters[0] == first_data_cluster * 256);
1597 	CU_ASSERT(blob->active.clusters[1] == (first_data_cluster + 1) * 256);
1598 	blob->active.clusters[1] = (first_data_cluster + 2) * 256;
1599 
1600 	memset(payload_write, 0xE5, sizeof(payload_write));
1601 	iov_write[0].iov_base = payload_write;
1602 	iov_write[0].iov_len = 1 * BLOCKLEN;
1603 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1604 	iov_write[1].iov_len = 5 * BLOCKLEN;
1605 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1606 	iov_write[2].iov_len = 4 * BLOCKLEN;
1607 	/*
1608 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1609 	 *  will get written to the first cluster, the last 4 to the second cluster.
1610 	 */
1611 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1612 	poll_threads();
1613 	CU_ASSERT(g_bserrno == 0);
1614 
1615 	memset(payload_read, 0xAA, sizeof(payload_read));
1616 	iov_read[0].iov_base = payload_read;
1617 	iov_read[0].iov_len = 3 * BLOCKLEN;
1618 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
1619 	iov_read[1].iov_len = 4 * BLOCKLEN;
1620 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
1621 	iov_read[2].iov_len = 3 * BLOCKLEN;
1622 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1623 	poll_threads();
1624 	CU_ASSERT(g_bserrno == 0);
1625 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
1626 
1627 	buf = calloc(1, 256 * BLOCKLEN);
1628 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1629 	/* Check that cluster 2 on "disk" was not modified. */
1630 	CU_ASSERT(memcmp(buf, &g_dev_buffer[(first_data_cluster + 1) * 256 * BLOCKLEN],
1631 			 256 * BLOCKLEN) == 0);
1632 	free(buf);
1633 
1634 	spdk_blob_close(blob, blob_op_complete, NULL);
1635 	poll_threads();
1636 	CU_ASSERT(g_bserrno == 0);
1637 
1638 	spdk_bs_free_io_channel(channel);
1639 	poll_threads();
1640 }
1641 
1642 static uint32_t
1643 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1644 {
1645 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1646 	struct spdk_bs_request_set *set;
1647 	uint32_t count = 0;
1648 
1649 	TAILQ_FOREACH(set, &channel->reqs, link) {
1650 		count++;
1651 	}
1652 
1653 	return count;
1654 }
1655 
1656 static void
1657 blob_rw_verify_iov_nomem(void)
1658 {
1659 	struct spdk_blob_store *bs = g_bs;
1660 	struct spdk_blob *blob = g_blob;
1661 	struct spdk_io_channel *channel;
1662 	uint8_t payload_write[10 * BLOCKLEN];
1663 	struct iovec iov_write[3];
1664 	uint32_t req_count;
1665 
1666 	channel = spdk_bs_alloc_io_channel(bs);
1667 	CU_ASSERT(channel != NULL);
1668 
1669 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1670 	poll_threads();
1671 	CU_ASSERT(g_bserrno == 0);
1672 
1673 	/*
1674 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1675 	 *  will get written to the first cluster, the last 4 to the second cluster.
1676 	 */
1677 	iov_write[0].iov_base = payload_write;
1678 	iov_write[0].iov_len = 1 * BLOCKLEN;
1679 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1680 	iov_write[1].iov_len = 5 * BLOCKLEN;
1681 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1682 	iov_write[2].iov_len = 4 * BLOCKLEN;
1683 	MOCK_SET(calloc, NULL);
1684 	req_count = bs_channel_get_req_count(channel);
1685 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1686 	poll_threads();
1687 	CU_ASSERT(g_bserrno == -ENOMEM);
1688 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1689 	MOCK_CLEAR(calloc);
1690 
1691 	spdk_bs_free_io_channel(channel);
1692 	poll_threads();
1693 }
1694 
1695 static void
1696 blob_rw_iov_read_only(void)
1697 {
1698 	struct spdk_blob_store *bs = g_bs;
1699 	struct spdk_blob *blob = g_blob;
1700 	struct spdk_io_channel *channel;
1701 	uint8_t payload_read[BLOCKLEN];
1702 	uint8_t payload_write[BLOCKLEN];
1703 	struct iovec iov_read;
1704 	struct iovec iov_write;
1705 
1706 	channel = spdk_bs_alloc_io_channel(bs);
1707 	CU_ASSERT(channel != NULL);
1708 
1709 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1710 	poll_threads();
1711 	CU_ASSERT(g_bserrno == 0);
1712 
1713 	/* Verify that writev failed if read_only flag is set. */
1714 	blob->data_ro = true;
1715 	iov_write.iov_base = payload_write;
1716 	iov_write.iov_len = sizeof(payload_write);
1717 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1718 	poll_threads();
1719 	CU_ASSERT(g_bserrno == -EPERM);
1720 
1721 	/* Verify that reads pass if data_ro flag is set. */
1722 	iov_read.iov_base = payload_read;
1723 	iov_read.iov_len = sizeof(payload_read);
1724 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1725 	poll_threads();
1726 	CU_ASSERT(g_bserrno == 0);
1727 
1728 	spdk_bs_free_io_channel(channel);
1729 	poll_threads();
1730 }
1731 
1732 static void
1733 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1734 		       uint8_t *payload, uint64_t offset, uint64_t length,
1735 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1736 {
1737 	uint64_t i;
1738 	uint8_t *buf;
1739 	uint64_t io_unit_size = spdk_bs_get_io_unit_size(blob->bs);
1740 
1741 	/* To be sure that operation is NOT split, read one io_unit at the time */
1742 	buf = payload;
1743 	for (i = 0; i < length; i++) {
1744 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1745 		poll_threads();
1746 		if (g_bserrno != 0) {
1747 			/* Pass the error code up */
1748 			break;
1749 		}
1750 		buf += io_unit_size;
1751 	}
1752 
1753 	cb_fn(cb_arg, g_bserrno);
1754 }
1755 
1756 static void
1757 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1758 			uint8_t *payload, uint64_t offset, uint64_t length,
1759 			spdk_blob_op_complete cb_fn, void *cb_arg)
1760 {
1761 	uint64_t i;
1762 	uint8_t *buf;
1763 	uint64_t io_unit_size = spdk_bs_get_io_unit_size(blob->bs);
1764 
1765 	/* To be sure that operation is NOT split, write one io_unit at the time */
1766 	buf = payload;
1767 	for (i = 0; i < length; i++) {
1768 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1769 		poll_threads();
1770 		if (g_bserrno != 0) {
1771 			/* Pass the error code up */
1772 			break;
1773 		}
1774 		buf += io_unit_size;
1775 	}
1776 
1777 	cb_fn(cb_arg, g_bserrno);
1778 }
1779 
1780 static void
1781 blob_operation_split_rw(void)
1782 {
1783 	struct spdk_blob_store *bs = g_bs;
1784 	struct spdk_blob *blob;
1785 	struct spdk_io_channel *channel;
1786 	struct spdk_blob_opts opts;
1787 	uint64_t cluster_size;
1788 
1789 	uint64_t payload_size;
1790 	uint8_t *payload_read;
1791 	uint8_t *payload_write;
1792 	uint8_t *payload_pattern;
1793 
1794 	uint64_t io_unit_size;
1795 	uint64_t io_units_per_cluster;
1796 	uint64_t io_units_per_payload;
1797 
1798 	uint64_t i;
1799 
1800 	cluster_size = spdk_bs_get_cluster_size(bs);
1801 	io_unit_size = spdk_bs_get_io_unit_size(bs);
1802 	io_units_per_cluster = cluster_size / io_unit_size;
1803 	io_units_per_payload = io_units_per_cluster * 5;
1804 	payload_size = cluster_size * 5;
1805 
1806 	payload_read = malloc(payload_size);
1807 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1808 
1809 	payload_write = malloc(payload_size);
1810 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1811 
1812 	payload_pattern = malloc(payload_size);
1813 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1814 
1815 	/* Prepare random pattern to write */
1816 	memset(payload_pattern, 0xFF, payload_size);
1817 	for (i = 0; i < io_units_per_payload; i++) {
1818 		*((uint64_t *)(payload_pattern + io_unit_size * i)) = (i + 1);
1819 	}
1820 
1821 	channel = spdk_bs_alloc_io_channel(bs);
1822 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1823 
1824 	/* Create blob */
1825 	ut_spdk_blob_opts_init(&opts);
1826 	opts.thin_provision = false;
1827 	opts.num_clusters = 5;
1828 
1829 	blob = ut_blob_create_and_open(bs, &opts);
1830 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1831 
1832 	/* Initial read should return zeroed payload */
1833 	memset(payload_read, 0xFF, payload_size);
1834 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1835 	poll_threads();
1836 	CU_ASSERT(g_bserrno == 0);
1837 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1838 
1839 	/* Fill whole blob except last page */
1840 	spdk_blob_io_write(blob, channel, payload_pattern, 0, io_units_per_payload - 1,
1841 			   blob_op_complete, NULL);
1842 	poll_threads();
1843 	CU_ASSERT(g_bserrno == 0);
1844 
1845 	/* Write last page with a pattern */
1846 	spdk_blob_io_write(blob, channel, payload_pattern, io_units_per_payload - 1, 1,
1847 			   blob_op_complete, NULL);
1848 	poll_threads();
1849 	CU_ASSERT(g_bserrno == 0);
1850 
1851 	/* Read whole blob and check consistency */
1852 	memset(payload_read, 0xFF, payload_size);
1853 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1854 	poll_threads();
1855 	CU_ASSERT(g_bserrno == 0);
1856 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - io_unit_size) == 0);
1857 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - io_unit_size, io_unit_size) == 0);
1858 
1859 	/* Fill whole blob except first page */
1860 	spdk_blob_io_write(blob, channel, payload_pattern, 1, io_units_per_payload - 1,
1861 			   blob_op_complete, NULL);
1862 	poll_threads();
1863 	CU_ASSERT(g_bserrno == 0);
1864 
1865 	/* Write first page with a pattern */
1866 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1867 			   blob_op_complete, NULL);
1868 	poll_threads();
1869 	CU_ASSERT(g_bserrno == 0);
1870 
1871 	/* Read whole blob and check consistency */
1872 	memset(payload_read, 0xFF, payload_size);
1873 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1874 	poll_threads();
1875 	CU_ASSERT(g_bserrno == 0);
1876 	CU_ASSERT(memcmp(payload_pattern, payload_read + io_unit_size, payload_size - io_unit_size) == 0);
1877 	CU_ASSERT(memcmp(payload_pattern, payload_read, io_unit_size) == 0);
1878 
1879 
1880 	/* Fill whole blob with a pattern (5 clusters) */
1881 
1882 	/* 1. Read test. */
1883 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, io_units_per_payload,
1884 				blob_op_complete, NULL);
1885 	poll_threads();
1886 	CU_ASSERT(g_bserrno == 0);
1887 
1888 	memset(payload_read, 0xFF, payload_size);
1889 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1890 	poll_threads();
1891 	poll_threads();
1892 	CU_ASSERT(g_bserrno == 0);
1893 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1894 
1895 	/* 2. Write test. */
1896 	spdk_blob_io_write(blob, channel, payload_pattern, 0, io_units_per_payload,
1897 			   blob_op_complete, NULL);
1898 	poll_threads();
1899 	CU_ASSERT(g_bserrno == 0);
1900 
1901 	memset(payload_read, 0xFF, payload_size);
1902 	_blob_io_read_no_split(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete,
1903 			       NULL);
1904 	poll_threads();
1905 	CU_ASSERT(g_bserrno == 0);
1906 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1907 
1908 	spdk_bs_free_io_channel(channel);
1909 	poll_threads();
1910 
1911 	g_blob = NULL;
1912 	g_blobid = 0;
1913 
1914 	free(payload_read);
1915 	free(payload_write);
1916 	free(payload_pattern);
1917 
1918 	ut_blob_close_and_delete(bs, blob);
1919 }
1920 
1921 static void
1922 blob_operation_split_rw_iov(void)
1923 {
1924 	struct spdk_blob_store *bs = g_bs;
1925 	struct spdk_blob *blob;
1926 	struct spdk_io_channel *channel;
1927 	struct spdk_blob_opts opts;
1928 	uint64_t cluster_size;
1929 
1930 	uint64_t payload_size;
1931 	uint8_t *payload_read;
1932 	uint8_t *payload_write;
1933 	uint8_t *payload_pattern;
1934 
1935 	uint64_t io_unit_size;
1936 	uint64_t io_units_per_cluster;
1937 	uint64_t io_units_per_payload;
1938 
1939 	struct iovec iov_read[2];
1940 	struct iovec iov_write[2];
1941 
1942 	uint64_t i, j;
1943 
1944 	cluster_size = spdk_bs_get_cluster_size(bs);
1945 	io_unit_size = spdk_bs_get_io_unit_size(bs);
1946 	io_units_per_cluster = cluster_size / io_unit_size;
1947 	io_units_per_payload = io_units_per_cluster * 5;
1948 	payload_size = cluster_size * 5;
1949 
1950 	payload_read = malloc(payload_size);
1951 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1952 
1953 	payload_write = malloc(payload_size);
1954 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1955 
1956 	payload_pattern = malloc(payload_size);
1957 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1958 
1959 	/* Prepare random pattern to write */
1960 	for (i = 0; i < io_units_per_payload; i++) {
1961 		for (j = 0; j < io_unit_size / sizeof(uint64_t); j++) {
1962 			uint64_t *tmp;
1963 
1964 			tmp = (uint64_t *)payload_pattern;
1965 			tmp += ((io_unit_size * i) / sizeof(uint64_t)) + j;
1966 			*tmp = i + 1;
1967 		}
1968 	}
1969 
1970 	channel = spdk_bs_alloc_io_channel(bs);
1971 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1972 
1973 	/* Create blob */
1974 	ut_spdk_blob_opts_init(&opts);
1975 	opts.thin_provision = false;
1976 	opts.num_clusters = 5;
1977 
1978 	blob = ut_blob_create_and_open(bs, &opts);
1979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1980 
1981 	/* Initial read should return zeroes payload */
1982 	memset(payload_read, 0xFF, payload_size);
1983 	iov_read[0].iov_base = payload_read;
1984 	iov_read[0].iov_len = cluster_size * 3;
1985 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1986 	iov_read[1].iov_len = cluster_size * 2;
1987 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
1988 	poll_threads();
1989 	CU_ASSERT(g_bserrno == 0);
1990 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1991 
1992 	/* First of iovs fills whole blob except last io_unit and second of iovs writes last io_unit
1993 	 *  with a pattern. */
1994 	iov_write[0].iov_base = payload_pattern;
1995 	iov_write[0].iov_len = payload_size - io_unit_size;
1996 	iov_write[1].iov_base = payload_pattern;
1997 	iov_write[1].iov_len = io_unit_size;
1998 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
1999 	poll_threads();
2000 	CU_ASSERT(g_bserrno == 0);
2001 
2002 	/* Read whole blob and check consistency */
2003 	memset(payload_read, 0xFF, payload_size);
2004 	iov_read[0].iov_base = payload_read;
2005 	iov_read[0].iov_len = cluster_size * 2;
2006 	iov_read[1].iov_base = payload_read + cluster_size * 2;
2007 	iov_read[1].iov_len = cluster_size * 3;
2008 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2009 	poll_threads();
2010 	CU_ASSERT(g_bserrno == 0);
2011 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - io_unit_size) == 0);
2012 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - io_unit_size, io_unit_size) == 0);
2013 
2014 	/* First of iovs fills only first io_unit and second of iovs writes whole blob except
2015 	 *  first io_unit with a pattern. */
2016 	iov_write[0].iov_base = payload_pattern;
2017 	iov_write[0].iov_len = io_unit_size;
2018 	iov_write[1].iov_base = payload_pattern;
2019 	iov_write[1].iov_len = payload_size - io_unit_size;
2020 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2021 	poll_threads();
2022 	CU_ASSERT(g_bserrno == 0);
2023 
2024 	/* Read whole blob and check consistency */
2025 	memset(payload_read, 0xFF, payload_size);
2026 	iov_read[0].iov_base = payload_read;
2027 	iov_read[0].iov_len = cluster_size * 4;
2028 	iov_read[1].iov_base = payload_read + cluster_size * 4;
2029 	iov_read[1].iov_len = cluster_size;
2030 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2031 	poll_threads();
2032 	CU_ASSERT(g_bserrno == 0);
2033 	CU_ASSERT(memcmp(payload_pattern, payload_read + io_unit_size, payload_size - io_unit_size) == 0);
2034 	CU_ASSERT(memcmp(payload_pattern, payload_read, io_unit_size) == 0);
2035 
2036 
2037 	/* Fill whole blob with a pattern (5 clusters) */
2038 
2039 	/* 1. Read test. */
2040 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, io_units_per_payload,
2041 				blob_op_complete, NULL);
2042 	poll_threads();
2043 	CU_ASSERT(g_bserrno == 0);
2044 
2045 	memset(payload_read, 0xFF, payload_size);
2046 	iov_read[0].iov_base = payload_read;
2047 	iov_read[0].iov_len = cluster_size;
2048 	iov_read[1].iov_base = payload_read + cluster_size;
2049 	iov_read[1].iov_len = cluster_size * 4;
2050 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2051 	poll_threads();
2052 	CU_ASSERT(g_bserrno == 0);
2053 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2054 
2055 	/* 2. Write test. */
2056 	iov_write[0].iov_base = payload_read;
2057 	iov_write[0].iov_len = cluster_size * 2;
2058 	iov_write[1].iov_base = payload_read + cluster_size * 2;
2059 	iov_write[1].iov_len = cluster_size * 3;
2060 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2061 	poll_threads();
2062 	CU_ASSERT(g_bserrno == 0);
2063 
2064 	memset(payload_read, 0xFF, payload_size);
2065 	_blob_io_read_no_split(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete,
2066 			       NULL);
2067 	poll_threads();
2068 	CU_ASSERT(g_bserrno == 0);
2069 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2070 
2071 	spdk_bs_free_io_channel(channel);
2072 	poll_threads();
2073 
2074 	g_blob = NULL;
2075 	g_blobid = 0;
2076 
2077 	free(payload_read);
2078 	free(payload_write);
2079 	free(payload_pattern);
2080 
2081 	ut_blob_close_and_delete(bs, blob);
2082 }
2083 
2084 static void
2085 blob_unmap(void)
2086 {
2087 	struct spdk_blob_store *bs = g_bs;
2088 	struct spdk_blob *blob;
2089 	struct spdk_io_channel *channel;
2090 	struct spdk_blob_opts opts;
2091 	uint8_t payload[BLOCKLEN];
2092 	uint32_t first_data_cluster = FIRST_DATA_CLUSTER(bs);
2093 	int i;
2094 
2095 	channel = spdk_bs_alloc_io_channel(bs);
2096 	CU_ASSERT(channel != NULL);
2097 
2098 	ut_spdk_blob_opts_init(&opts);
2099 	opts.num_clusters = 10;
2100 
2101 	blob = ut_blob_create_and_open(bs, &opts);
2102 
2103 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2104 	poll_threads();
2105 	CU_ASSERT(g_bserrno == 0);
2106 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2107 
2108 	memset(payload, 0, sizeof(payload));
2109 	payload[0] = 0xFF;
2110 
2111 	/*
2112 	 * Set first byte of every cluster to 0xFF.
2113 	 */
2114 	for (i = 0; i < 10; i++) {
2115 		g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
2116 	}
2117 
2118 	/* Confirm writes */
2119 	for (i = 0; i < 10; i++) {
2120 		payload[0] = 0;
2121 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / BLOCKLEN, 1,
2122 				  blob_op_complete, NULL);
2123 		poll_threads();
2124 		CU_ASSERT(g_bserrno == 0);
2125 		CU_ASSERT(payload[0] == 0xFF);
2126 	}
2127 
2128 	/* Mark some clusters as unallocated */
2129 	blob->active.clusters[1] = 0;
2130 	blob->active.clusters[2] = 0;
2131 	blob->active.clusters[3] = 0;
2132 	blob->active.clusters[6] = 0;
2133 	blob->active.clusters[8] = 0;
2134 	blob->active.num_allocated_clusters -= 5;
2135 
2136 	/* Unmap clusters by resizing to 0 */
2137 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2138 	poll_threads();
2139 	CU_ASSERT(g_bserrno == 0);
2140 
2141 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2142 	poll_threads();
2143 	CU_ASSERT(g_bserrno == 0);
2144 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
2145 
2146 	/* Confirm that only 'allocated' clusters were unmapped */
2147 	for (i = 0; i < 10; i++) {
2148 		switch (i) {
2149 		case 1:
2150 		case 2:
2151 		case 3:
2152 		case 6:
2153 		case 8:
2154 			CU_ASSERT(g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2155 			break;
2156 		default:
2157 			CU_ASSERT(g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2158 			break;
2159 		}
2160 	}
2161 
2162 	spdk_bs_free_io_channel(channel);
2163 	poll_threads();
2164 
2165 	ut_blob_close_and_delete(bs, blob);
2166 }
2167 
2168 static void
2169 blob_iter(void)
2170 {
2171 	struct spdk_blob_store *bs = g_bs;
2172 	struct spdk_blob *blob;
2173 	spdk_blob_id blobid;
2174 	struct spdk_blob_opts blob_opts;
2175 
2176 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2177 	poll_threads();
2178 	CU_ASSERT(g_blob == NULL);
2179 	CU_ASSERT(g_bserrno == -ENOENT);
2180 
2181 	ut_spdk_blob_opts_init(&blob_opts);
2182 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2183 	poll_threads();
2184 	CU_ASSERT(g_bserrno == 0);
2185 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2186 	blobid = g_blobid;
2187 
2188 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2189 	poll_threads();
2190 	CU_ASSERT(g_blob != NULL);
2191 	CU_ASSERT(g_bserrno == 0);
2192 	blob = g_blob;
2193 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2194 
2195 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2196 	poll_threads();
2197 	CU_ASSERT(g_blob == NULL);
2198 	CU_ASSERT(g_bserrno == -ENOENT);
2199 }
2200 
2201 static void
2202 blob_xattr(void)
2203 {
2204 	struct spdk_blob_store *bs = g_bs;
2205 	struct spdk_blob *blob = g_blob;
2206 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2207 	uint64_t length;
2208 	int rc;
2209 	const char *name1, *name2;
2210 	const void *value;
2211 	size_t value_len;
2212 	struct spdk_xattr_names *names;
2213 
2214 	/* Test that set_xattr fails if md_ro flag is set. */
2215 	blob->md_ro = true;
2216 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2217 	CU_ASSERT(rc == -EPERM);
2218 
2219 	blob->md_ro = false;
2220 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2221 	CU_ASSERT(rc == 0);
2222 
2223 	length = 2345;
2224 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2225 	CU_ASSERT(rc == 0);
2226 
2227 	/* Overwrite "length" xattr. */
2228 	length = 3456;
2229 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2230 	CU_ASSERT(rc == 0);
2231 
2232 	/* get_xattr should still work even if md_ro flag is set. */
2233 	value = NULL;
2234 	blob->md_ro = true;
2235 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2236 	CU_ASSERT(rc == 0);
2237 	SPDK_CU_ASSERT_FATAL(value != NULL);
2238 	CU_ASSERT(*(uint64_t *)value == length);
2239 	CU_ASSERT(value_len == 8);
2240 	blob->md_ro = false;
2241 
2242 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2243 	CU_ASSERT(rc == -ENOENT);
2244 
2245 	names = NULL;
2246 	rc = spdk_blob_get_xattr_names(blob, &names);
2247 	CU_ASSERT(rc == 0);
2248 	SPDK_CU_ASSERT_FATAL(names != NULL);
2249 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2250 	name1 = spdk_xattr_names_get_name(names, 0);
2251 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2252 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2253 	name2 = spdk_xattr_names_get_name(names, 1);
2254 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2255 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2256 	CU_ASSERT(strcmp(name1, name2));
2257 	spdk_xattr_names_free(names);
2258 
2259 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2260 	blob->md_ro = true;
2261 	rc = spdk_blob_remove_xattr(blob, "name");
2262 	CU_ASSERT(rc == -EPERM);
2263 
2264 	blob->md_ro = false;
2265 	rc = spdk_blob_remove_xattr(blob, "name");
2266 	CU_ASSERT(rc == 0);
2267 
2268 	rc = spdk_blob_remove_xattr(blob, "foobar");
2269 	CU_ASSERT(rc == -ENOENT);
2270 
2271 	/* Set internal xattr */
2272 	length = 7898;
2273 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2274 	CU_ASSERT(rc == 0);
2275 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2276 	CU_ASSERT(rc == 0);
2277 	CU_ASSERT(*(uint64_t *)value == length);
2278 	/* try to get public xattr with same name */
2279 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2280 	CU_ASSERT(rc != 0);
2281 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2282 	CU_ASSERT(rc != 0);
2283 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2284 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2285 		  SPDK_BLOB_INTERNAL_XATTR);
2286 
2287 	spdk_blob_close(blob, blob_op_complete, NULL);
2288 	poll_threads();
2289 
2290 	/* Check if xattrs are persisted */
2291 	ut_bs_reload(&bs, NULL);
2292 
2293 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2294 	poll_threads();
2295 	CU_ASSERT(g_bserrno == 0);
2296 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2297 	blob = g_blob;
2298 
2299 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2300 	CU_ASSERT(rc == 0);
2301 	CU_ASSERT(*(uint64_t *)value == length);
2302 
2303 	/* try to get internal xattr through public call */
2304 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2305 	CU_ASSERT(rc != 0);
2306 
2307 	rc = blob_remove_xattr(blob, "internal", true);
2308 	CU_ASSERT(rc == 0);
2309 
2310 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2311 }
2312 
2313 static void
2314 blob_parse_md(void)
2315 {
2316 	struct spdk_blob_store *bs = g_bs;
2317 	struct spdk_blob *blob;
2318 	int rc;
2319 	uint32_t used_pages;
2320 	size_t xattr_length;
2321 	char *xattr;
2322 
2323 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2324 	blob = ut_blob_create_and_open(bs, NULL);
2325 
2326 	/* Create large extent to force more than 1 page of metadata. */
2327 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2328 		       strlen("large_xattr");
2329 	xattr = calloc(xattr_length, sizeof(char));
2330 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2331 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2332 	free(xattr);
2333 	SPDK_CU_ASSERT_FATAL(rc == 0);
2334 
2335 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2336 	poll_threads();
2337 
2338 	/* Delete the blob and verify that number of pages returned to before its creation. */
2339 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2340 	ut_blob_close_and_delete(bs, blob);
2341 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2342 }
2343 
2344 static void
2345 bs_load(void)
2346 {
2347 	struct spdk_blob_store *bs;
2348 	struct spdk_bs_dev *dev;
2349 	spdk_blob_id blobid;
2350 	struct spdk_blob *blob;
2351 	struct spdk_bs_super_block *super_block;
2352 	uint64_t length;
2353 	int rc;
2354 	const void *value;
2355 	size_t value_len;
2356 	struct spdk_bs_opts opts;
2357 	struct spdk_blob_opts blob_opts;
2358 
2359 	dev = init_dev();
2360 	spdk_bs_opts_init(&opts, sizeof(opts));
2361 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2362 
2363 	/* Initialize a new blob store */
2364 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2365 	poll_threads();
2366 	CU_ASSERT(g_bserrno == 0);
2367 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2368 	bs = g_bs;
2369 
2370 	/* Try to open a blobid that does not exist */
2371 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2372 	poll_threads();
2373 	CU_ASSERT(g_bserrno == -ENOENT);
2374 	CU_ASSERT(g_blob == NULL);
2375 
2376 	/* Create a blob */
2377 	blob = ut_blob_create_and_open(bs, NULL);
2378 	blobid = spdk_blob_get_id(blob);
2379 
2380 	/* Try again to open valid blob but without the upper bit set */
2381 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2382 	poll_threads();
2383 	CU_ASSERT(g_bserrno == -ENOENT);
2384 	CU_ASSERT(g_blob == NULL);
2385 
2386 	/* Set some xattrs */
2387 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2388 	CU_ASSERT(rc == 0);
2389 
2390 	length = 2345;
2391 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2392 	CU_ASSERT(rc == 0);
2393 
2394 	/* Resize the blob */
2395 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2396 	poll_threads();
2397 	CU_ASSERT(g_bserrno == 0);
2398 
2399 	spdk_blob_close(blob, blob_op_complete, NULL);
2400 	poll_threads();
2401 	CU_ASSERT(g_bserrno == 0);
2402 	blob = NULL;
2403 	g_blob = NULL;
2404 	g_blobid = SPDK_BLOBID_INVALID;
2405 
2406 	/* Unload the blob store */
2407 	spdk_bs_unload(bs, bs_op_complete, NULL);
2408 	poll_threads();
2409 	CU_ASSERT(g_bserrno == 0);
2410 	g_bs = NULL;
2411 	g_blob = NULL;
2412 	g_blobid = 0;
2413 
2414 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2415 	CU_ASSERT(super_block->clean == 1);
2416 
2417 	/* Load should fail for device with an unsupported blocklen */
2418 	dev = init_dev();
2419 	dev->blocklen = g_phys_blocklen * 2;
2420 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2421 	poll_threads();
2422 	CU_ASSERT(g_bserrno == -EINVAL);
2423 
2424 	/* Load should when max_md_ops is set to zero */
2425 	dev = init_dev();
2426 	spdk_bs_opts_init(&opts, sizeof(opts));
2427 	opts.max_md_ops = 0;
2428 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2429 	poll_threads();
2430 	CU_ASSERT(g_bserrno == -EINVAL);
2431 
2432 	/* Load should when max_channel_ops is set to zero */
2433 	dev = init_dev();
2434 	spdk_bs_opts_init(&opts, sizeof(opts));
2435 	opts.max_channel_ops = 0;
2436 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2437 	poll_threads();
2438 	CU_ASSERT(g_bserrno == -EINVAL);
2439 
2440 	/* Load an existing blob store */
2441 	dev = init_dev();
2442 	spdk_bs_opts_init(&opts, sizeof(opts));
2443 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2444 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2445 	poll_threads();
2446 	CU_ASSERT(g_bserrno == 0);
2447 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2448 	bs = g_bs;
2449 
2450 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2451 	CU_ASSERT(super_block->clean == 1);
2452 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2453 
2454 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2455 	poll_threads();
2456 	CU_ASSERT(g_bserrno == 0);
2457 	CU_ASSERT(g_blob != NULL);
2458 	blob = g_blob;
2459 
2460 	/* Verify that blobstore is marked dirty after first metadata sync */
2461 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2462 	CU_ASSERT(super_block->clean == 1);
2463 
2464 	/* Get the xattrs */
2465 	value = NULL;
2466 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2467 	CU_ASSERT(rc == 0);
2468 	SPDK_CU_ASSERT_FATAL(value != NULL);
2469 	CU_ASSERT(*(uint64_t *)value == length);
2470 	CU_ASSERT(value_len == 8);
2471 
2472 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2473 	CU_ASSERT(rc == -ENOENT);
2474 
2475 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2476 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2477 
2478 	spdk_blob_close(blob, blob_op_complete, NULL);
2479 	poll_threads();
2480 	CU_ASSERT(g_bserrno == 0);
2481 	blob = NULL;
2482 	g_blob = NULL;
2483 
2484 	spdk_bs_unload(bs, bs_op_complete, NULL);
2485 	poll_threads();
2486 	CU_ASSERT(g_bserrno == 0);
2487 	g_bs = NULL;
2488 
2489 	/* Load should fail: bdev size < saved size */
2490 	dev = init_dev();
2491 	dev->blockcnt /= 2;
2492 
2493 	spdk_bs_opts_init(&opts, sizeof(opts));
2494 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2495 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2496 	poll_threads();
2497 
2498 	CU_ASSERT(g_bserrno == -EILSEQ);
2499 
2500 	/* Load should succeed: bdev size > saved size */
2501 	dev = init_dev();
2502 	dev->blockcnt *= 4;
2503 
2504 	spdk_bs_opts_init(&opts, sizeof(opts));
2505 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2506 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2507 	poll_threads();
2508 	CU_ASSERT(g_bserrno == 0);
2509 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2510 	bs = g_bs;
2511 
2512 	CU_ASSERT(g_bserrno == 0);
2513 	spdk_bs_unload(bs, bs_op_complete, NULL);
2514 	poll_threads();
2515 
2516 
2517 	/* Test compatibility mode */
2518 
2519 	dev = init_dev();
2520 	super_block->size = 0;
2521 	super_block->crc = blob_md_page_calc_crc(super_block);
2522 
2523 	spdk_bs_opts_init(&opts, sizeof(opts));
2524 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2525 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2526 	poll_threads();
2527 	CU_ASSERT(g_bserrno == 0);
2528 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2529 	bs = g_bs;
2530 
2531 	/* Create a blob */
2532 	ut_spdk_blob_opts_init(&blob_opts);
2533 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2534 	poll_threads();
2535 	CU_ASSERT(g_bserrno == 0);
2536 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2537 
2538 	/* Blobstore should update number of blocks in super_block */
2539 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2540 	CU_ASSERT(super_block->clean == 0);
2541 
2542 	spdk_bs_unload(bs, bs_op_complete, NULL);
2543 	poll_threads();
2544 	CU_ASSERT(g_bserrno == 0);
2545 	CU_ASSERT(super_block->clean == 1);
2546 	g_bs = NULL;
2547 
2548 }
2549 
2550 static void
2551 bs_load_pending_removal(void)
2552 {
2553 	struct spdk_blob_store *bs = g_bs;
2554 	struct spdk_blob_opts opts;
2555 	struct spdk_blob *blob, *snapshot;
2556 	spdk_blob_id blobid, snapshotid;
2557 	const void *value;
2558 	size_t value_len;
2559 	int rc;
2560 
2561 	/* Create blob */
2562 	ut_spdk_blob_opts_init(&opts);
2563 	opts.num_clusters = 10;
2564 
2565 	blob = ut_blob_create_and_open(bs, &opts);
2566 	blobid = spdk_blob_get_id(blob);
2567 
2568 	/* Create snapshot */
2569 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2570 	poll_threads();
2571 	CU_ASSERT(g_bserrno == 0);
2572 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2573 	snapshotid = g_blobid;
2574 
2575 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2576 	poll_threads();
2577 	CU_ASSERT(g_bserrno == 0);
2578 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2579 	snapshot = g_blob;
2580 
2581 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2582 	snapshot->md_ro = false;
2583 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2584 	CU_ASSERT(rc == 0);
2585 	snapshot->md_ro = true;
2586 
2587 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2588 	poll_threads();
2589 	CU_ASSERT(g_bserrno == 0);
2590 
2591 	spdk_blob_close(blob, blob_op_complete, NULL);
2592 	poll_threads();
2593 	CU_ASSERT(g_bserrno == 0);
2594 
2595 	/* Reload blobstore */
2596 	ut_bs_reload(&bs, NULL);
2597 
2598 	/* Snapshot should not be removed as blob is still pointing to it */
2599 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2600 	poll_threads();
2601 	CU_ASSERT(g_bserrno == 0);
2602 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2603 	snapshot = g_blob;
2604 
2605 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2606 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2607 	CU_ASSERT(rc != 0);
2608 
2609 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2610 	snapshot->md_ro = false;
2611 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2612 	CU_ASSERT(rc == 0);
2613 	snapshot->md_ro = true;
2614 
2615 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2616 	poll_threads();
2617 	CU_ASSERT(g_bserrno == 0);
2618 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2619 	blob = g_blob;
2620 
2621 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2622 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2623 
2624 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2625 	poll_threads();
2626 	CU_ASSERT(g_bserrno == 0);
2627 
2628 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2629 	poll_threads();
2630 	CU_ASSERT(g_bserrno == 0);
2631 
2632 	spdk_blob_close(blob, blob_op_complete, NULL);
2633 	poll_threads();
2634 	CU_ASSERT(g_bserrno == 0);
2635 
2636 	/* Reload blobstore */
2637 	ut_bs_reload(&bs, NULL);
2638 
2639 	/* Snapshot should be removed as blob is not pointing to it anymore */
2640 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2641 	poll_threads();
2642 	CU_ASSERT(g_bserrno != 0);
2643 }
2644 
2645 static void
2646 bs_load_custom_cluster_size(void)
2647 {
2648 	struct spdk_blob_store *bs;
2649 	struct spdk_bs_dev *dev;
2650 	struct spdk_bs_super_block *super_block;
2651 	struct spdk_bs_opts opts;
2652 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2653 	uint32_t cluster_sz;
2654 	uint64_t total_clusters;
2655 
2656 	dev = init_dev();
2657 	spdk_bs_opts_init(&opts, sizeof(opts));
2658 	opts.cluster_sz = custom_cluster_size;
2659 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2660 
2661 	/* Initialize a new blob store */
2662 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2663 	poll_threads();
2664 	CU_ASSERT(g_bserrno == 0);
2665 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2666 	bs = g_bs;
2667 	cluster_sz = bs->cluster_sz;
2668 	total_clusters = bs->total_clusters;
2669 
2670 	/* Unload the blob store */
2671 	spdk_bs_unload(bs, bs_op_complete, NULL);
2672 	poll_threads();
2673 	CU_ASSERT(g_bserrno == 0);
2674 	g_bs = NULL;
2675 	g_blob = NULL;
2676 	g_blobid = 0;
2677 
2678 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2679 	CU_ASSERT(super_block->clean == 1);
2680 
2681 	/* Load an existing blob store */
2682 	dev = init_dev();
2683 	spdk_bs_opts_init(&opts, sizeof(opts));
2684 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2685 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2686 	poll_threads();
2687 	CU_ASSERT(g_bserrno == 0);
2688 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2689 	bs = g_bs;
2690 	/* Compare cluster size and number to one after initialization */
2691 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2692 	CU_ASSERT(total_clusters == bs->total_clusters);
2693 
2694 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2695 	CU_ASSERT(super_block->clean == 1);
2696 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2697 
2698 	spdk_bs_unload(bs, bs_op_complete, NULL);
2699 	poll_threads();
2700 	CU_ASSERT(g_bserrno == 0);
2701 	CU_ASSERT(super_block->clean == 1);
2702 	g_bs = NULL;
2703 }
2704 
2705 static void
2706 bs_load_after_failed_grow(void)
2707 {
2708 	struct spdk_blob_store *bs;
2709 	struct spdk_bs_dev *dev;
2710 	struct spdk_bs_super_block *super_block;
2711 	struct spdk_bs_opts opts;
2712 	struct spdk_bs_md_mask *mask;
2713 	struct spdk_blob_opts blob_opts;
2714 	struct spdk_blob *blob, *snapshot;
2715 	spdk_blob_id blobid, snapshotid;
2716 	uint64_t total_data_clusters;
2717 
2718 	dev = init_dev();
2719 	spdk_bs_opts_init(&opts, sizeof(opts));
2720 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2721 	/*
2722 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2723 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2724 	 * thus the blobstore could grow to the double size.
2725 	 */
2726 	opts.num_md_pages = 128;
2727 
2728 	/* Initialize a new blob store */
2729 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2730 	poll_threads();
2731 	CU_ASSERT(g_bserrno == 0);
2732 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2733 	bs = g_bs;
2734 
2735 	/* Create blob */
2736 	ut_spdk_blob_opts_init(&blob_opts);
2737 	blob_opts.num_clusters = 10;
2738 
2739 	blob = ut_blob_create_and_open(bs, &blob_opts);
2740 	blobid = spdk_blob_get_id(blob);
2741 
2742 	/* Create snapshot */
2743 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2744 	poll_threads();
2745 	CU_ASSERT(g_bserrno == 0);
2746 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2747 	snapshotid = g_blobid;
2748 
2749 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2750 	poll_threads();
2751 	CU_ASSERT(g_bserrno == 0);
2752 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2753 	snapshot = g_blob;
2754 
2755 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2756 	poll_threads();
2757 	CU_ASSERT(g_bserrno == 0);
2758 
2759 	spdk_blob_close(blob, blob_op_complete, NULL);
2760 	poll_threads();
2761 	CU_ASSERT(g_bserrno == 0);
2762 
2763 	total_data_clusters = bs->total_data_clusters;
2764 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2765 
2766 	/* Unload the blob store */
2767 	spdk_bs_unload(bs, bs_op_complete, NULL);
2768 	poll_threads();
2769 	CU_ASSERT(g_bserrno == 0);
2770 	g_bs = NULL;
2771 	g_blob = NULL;
2772 	g_blobid = 0;
2773 
2774 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2775 	CU_ASSERT(super_block->clean == 1);
2776 
2777 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start *
2778 					  g_phys_blocklen);
2779 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2780 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2781 
2782 	/*
2783 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2784 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2785 	 */
2786 	mask->length *= 2;
2787 
2788 	/* Load an existing blob store */
2789 	dev = init_dev();
2790 	dev->blockcnt *= 2;
2791 	spdk_bs_opts_init(&opts, sizeof(opts));
2792 	opts.clear_method = BS_CLEAR_WITH_NONE;
2793 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2794 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2795 	poll_threads();
2796 	CU_ASSERT(g_bserrno == 0);
2797 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2798 	bs = g_bs;
2799 
2800 	/* Check the capacity is the same as before */
2801 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2802 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2803 
2804 	/* Check the blob and the snapshot are still available */
2805 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2806 	poll_threads();
2807 	CU_ASSERT(g_bserrno == 0);
2808 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2809 	blob = g_blob;
2810 
2811 	spdk_blob_close(blob, blob_op_complete, NULL);
2812 	poll_threads();
2813 	CU_ASSERT(g_bserrno == 0);
2814 
2815 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2816 	poll_threads();
2817 	CU_ASSERT(g_bserrno == 0);
2818 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2819 	snapshot = g_blob;
2820 
2821 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2822 	poll_threads();
2823 	CU_ASSERT(g_bserrno == 0);
2824 
2825 	spdk_bs_unload(bs, bs_op_complete, NULL);
2826 	poll_threads();
2827 	CU_ASSERT(g_bserrno == 0);
2828 	CU_ASSERT(super_block->clean == 1);
2829 	g_bs = NULL;
2830 }
2831 
2832 static void
2833 bs_type(void)
2834 {
2835 	struct spdk_blob_store *bs;
2836 	struct spdk_bs_dev *dev;
2837 	struct spdk_bs_opts opts;
2838 
2839 	dev = init_dev();
2840 	spdk_bs_opts_init(&opts, sizeof(opts));
2841 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2842 
2843 	/* Initialize a new blob store */
2844 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2845 	poll_threads();
2846 	CU_ASSERT(g_bserrno == 0);
2847 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2848 	bs = g_bs;
2849 
2850 	/* Unload the blob store */
2851 	spdk_bs_unload(bs, bs_op_complete, NULL);
2852 	poll_threads();
2853 	CU_ASSERT(g_bserrno == 0);
2854 	g_bs = NULL;
2855 	g_blob = NULL;
2856 	g_blobid = 0;
2857 
2858 	/* Load non existing blobstore type */
2859 	dev = init_dev();
2860 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2861 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2862 	poll_threads();
2863 	CU_ASSERT(g_bserrno != 0);
2864 
2865 	/* Load with empty blobstore type */
2866 	dev = init_dev();
2867 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2868 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2869 	poll_threads();
2870 	CU_ASSERT(g_bserrno == 0);
2871 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2872 	bs = g_bs;
2873 
2874 	spdk_bs_unload(bs, bs_op_complete, NULL);
2875 	poll_threads();
2876 	CU_ASSERT(g_bserrno == 0);
2877 	g_bs = NULL;
2878 
2879 	/* Initialize a new blob store with empty bstype */
2880 	dev = init_dev();
2881 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2882 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2883 	poll_threads();
2884 	CU_ASSERT(g_bserrno == 0);
2885 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2886 	bs = g_bs;
2887 
2888 	spdk_bs_unload(bs, bs_op_complete, NULL);
2889 	poll_threads();
2890 	CU_ASSERT(g_bserrno == 0);
2891 	g_bs = NULL;
2892 
2893 	/* Load non existing blobstore type */
2894 	dev = init_dev();
2895 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2896 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2897 	poll_threads();
2898 	CU_ASSERT(g_bserrno != 0);
2899 
2900 	/* Load with empty blobstore type */
2901 	dev = init_dev();
2902 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2903 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2904 	poll_threads();
2905 	CU_ASSERT(g_bserrno == 0);
2906 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2907 	bs = g_bs;
2908 
2909 	spdk_bs_unload(bs, bs_op_complete, NULL);
2910 	poll_threads();
2911 	CU_ASSERT(g_bserrno == 0);
2912 	g_bs = NULL;
2913 }
2914 
2915 static void
2916 bs_super_block(void)
2917 {
2918 	struct spdk_blob_store *bs;
2919 	struct spdk_bs_dev *dev;
2920 	struct spdk_bs_super_block *super_block;
2921 	struct spdk_bs_opts opts;
2922 	struct spdk_bs_super_block_ver1 super_block_v1;
2923 
2924 	dev = init_dev();
2925 	spdk_bs_opts_init(&opts, sizeof(opts));
2926 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2927 
2928 	/* Initialize a new blob store */
2929 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2930 	poll_threads();
2931 	CU_ASSERT(g_bserrno == 0);
2932 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2933 	bs = g_bs;
2934 
2935 	/* Unload the blob store */
2936 	spdk_bs_unload(bs, bs_op_complete, NULL);
2937 	poll_threads();
2938 	CU_ASSERT(g_bserrno == 0);
2939 	g_bs = NULL;
2940 	g_blob = NULL;
2941 	g_blobid = 0;
2942 
2943 	/* Load an existing blob store with version newer than supported */
2944 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2945 	super_block->version++;
2946 
2947 	dev = init_dev();
2948 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2949 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2950 	poll_threads();
2951 	CU_ASSERT(g_bserrno != 0);
2952 
2953 	/* Create a new blob store with super block version 1 */
2954 	dev = init_dev();
2955 	super_block_v1.version = 1;
2956 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2957 	super_block_v1.length = 0x1000;
2958 	super_block_v1.clean = 1;
2959 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2960 	super_block_v1.cluster_size = 0x100000;
2961 	super_block_v1.used_page_mask_start = 0x01;
2962 	super_block_v1.used_page_mask_len = 0x01;
2963 	super_block_v1.used_cluster_mask_start = 0x02;
2964 	super_block_v1.used_cluster_mask_len = 0x01;
2965 	super_block_v1.md_start = 0x03;
2966 	super_block_v1.md_len = 0x40;
2967 	memset(super_block_v1.reserved, 0, 4036);
2968 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2969 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2970 
2971 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2972 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2973 	poll_threads();
2974 	CU_ASSERT(g_bserrno == 0);
2975 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2976 	bs = g_bs;
2977 
2978 	spdk_bs_unload(bs, bs_op_complete, NULL);
2979 	poll_threads();
2980 	CU_ASSERT(g_bserrno == 0);
2981 	g_bs = NULL;
2982 }
2983 
2984 static void
2985 bs_test_recover_cluster_count(void)
2986 {
2987 	struct spdk_blob_store *bs;
2988 	struct spdk_bs_dev *dev;
2989 	struct spdk_bs_super_block super_block;
2990 	struct spdk_bs_opts opts;
2991 
2992 	dev = init_dev();
2993 	spdk_bs_opts_init(&opts, sizeof(opts));
2994 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2995 
2996 	super_block.version = 3;
2997 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2998 	super_block.length = 0x1000;
2999 	super_block.clean = 0;
3000 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
3001 	super_block.cluster_size = g_phys_blocklen;
3002 	super_block.used_page_mask_start = 0x01;
3003 	super_block.used_page_mask_len = 0x01;
3004 	super_block.used_cluster_mask_start = 0x02;
3005 	super_block.used_cluster_mask_len = 0x01;
3006 	super_block.used_blobid_mask_start = 0x03;
3007 	super_block.used_blobid_mask_len = 0x01;
3008 	super_block.md_page_size = g_phys_blocklen;
3009 	super_block.md_start = 0x04;
3010 	super_block.md_len = 0x40;
3011 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
3012 	super_block.size = dev->blockcnt * dev->blocklen;
3013 	super_block.io_unit_size = 0x1000;
3014 	memset(super_block.reserved, 0, SPDK_SIZEOF_MEMBER(struct spdk_bs_super_block, reserved));
3015 	super_block.crc = blob_md_page_calc_crc(&super_block);
3016 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
3017 
3018 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
3019 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
3020 	poll_threads();
3021 	CU_ASSERT(g_bserrno == 0);
3022 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3023 	bs = g_bs;
3024 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
3025 			super_block.md_len));
3026 
3027 	spdk_bs_unload(bs, bs_op_complete, NULL);
3028 	poll_threads();
3029 	CU_ASSERT(g_bserrno == 0);
3030 	g_bs = NULL;
3031 }
3032 
3033 static void
3034 bs_grow_live_size(uint64_t new_blockcnt)
3035 {
3036 	struct spdk_blob_store *bs;
3037 	struct spdk_bs_dev *dev;
3038 	struct spdk_bs_super_block super_block;
3039 	struct spdk_bs_opts opts;
3040 	struct spdk_bs_md_mask mask;
3041 	uint64_t bdev_size;
3042 	uint64_t total_data_clusters;
3043 
3044 	/*
3045 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3046 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3047 	 * will write beyond the end of g_dev_buffer.
3048 	 */
3049 	dev = init_dev();
3050 	spdk_bs_opts_init(&opts, sizeof(opts));
3051 	opts.clear_method = BS_CLEAR_WITH_NONE;
3052 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3053 	poll_threads();
3054 	CU_ASSERT(g_bserrno == 0);
3055 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3056 	bs = g_bs;
3057 
3058 	/*
3059 	 * Set the dev size according to the new_blockcnt,
3060 	 * then the blobstore will adjust the metadata according to the new size.
3061 	 */
3062 	dev->blockcnt = new_blockcnt;
3063 	bdev_size = dev->blockcnt * dev->blocklen;
3064 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3065 	poll_threads();
3066 	CU_ASSERT(g_bserrno == 0);
3067 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3068 
3069 	/* Make sure the super block is updated. */
3070 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3071 	CU_ASSERT(super_block.size == bdev_size);
3072 	CU_ASSERT(super_block.clean == 0);
3073 	/* The used_cluster mask is not written out until first spdk_bs_unload. */
3074 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3075 	       sizeof(struct spdk_bs_md_mask));
3076 	CU_ASSERT(mask.type == 0);
3077 	CU_ASSERT(mask.length == 0);
3078 
3079 	spdk_bs_unload(bs, bs_op_complete, NULL);
3080 	poll_threads();
3081 	CU_ASSERT(g_bserrno == 0);
3082 	g_bs = NULL;
3083 
3084 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3085 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3086 	CU_ASSERT(super_block.size == bdev_size);
3087 	CU_ASSERT(super_block.clean == 1);
3088 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3089 	       sizeof(struct spdk_bs_md_mask));
3090 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3091 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3092 
3093 	/* Load blobstore and check the cluster counts again. */
3094 	dev = init_dev();
3095 	dev->blockcnt = new_blockcnt;
3096 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3097 	poll_threads();
3098 	CU_ASSERT(g_bserrno == 0);
3099 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3100 	CU_ASSERT(super_block.clean == 1);
3101 	bs = g_bs;
3102 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3103 
3104 	/* Perform grow without change in size, expected pass. */
3105 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3106 	poll_threads();
3107 	CU_ASSERT(g_bserrno == 0);
3108 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3109 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3110 	CU_ASSERT(super_block.size == bdev_size);
3111 	CU_ASSERT(super_block.clean == 1);
3112 
3113 	spdk_bs_unload(bs, bs_op_complete, NULL);
3114 	poll_threads();
3115 	CU_ASSERT(g_bserrno == 0);
3116 	g_bs = NULL;
3117 }
3118 
3119 static void
3120 bs_grow_live(void)
3121 {
3122 	/* No change expected */
3123 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT);
3124 
3125 	/* Size slightly increased, but not enough to increase cluster count */
3126 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT + 1);
3127 
3128 	/* Size doubled, increasing the cluster count */
3129 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT * 2);
3130 }
3131 
3132 static void
3133 bs_grow_live_no_space(void)
3134 {
3135 	struct spdk_blob_store *bs;
3136 	struct spdk_bs_dev *dev;
3137 	struct spdk_bs_super_block super_block;
3138 	struct spdk_bs_opts opts;
3139 	struct spdk_bs_md_mask mask;
3140 	uint64_t bdev_size_init;
3141 	uint64_t total_data_clusters, max_clusters;
3142 
3143 	/*
3144 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3145 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3146 	 * will write beyond the end of g_dev_buffer.
3147 	 */
3148 	dev = init_dev();
3149 	bdev_size_init = dev->blockcnt * dev->blocklen;
3150 	spdk_bs_opts_init(&opts, sizeof(opts));
3151 	opts.clear_method = BS_CLEAR_WITH_NONE;
3152 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3153 	poll_threads();
3154 	CU_ASSERT(g_bserrno == 0);
3155 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3156 	bs = g_bs;
3157 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3158 
3159 	/*
3160 	 * The default dev size is 64M, here we set the dev size to 32M,
3161 	 * expecting EILSEQ due to super_block validation and no change in blobstore.
3162 	 */
3163 	dev->blockcnt = (32L * 1024L * 1024L) / dev->blocklen;
3164 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3165 	poll_threads();
3166 	/* This error code comes from bs_super_validate() */
3167 	CU_ASSERT(g_bserrno == -EILSEQ);
3168 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3169 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3170 	CU_ASSERT(super_block.size == bdev_size_init);
3171 
3172 	/*
3173 	 * Blobstore in this test has only space for single md_page for used_clusters,
3174 	 * which fits 1 bit per cluster minus the md header.
3175 	 *
3176 	 * Dev size is increased to exceed the reserved space for the used_cluster_mask
3177 	 * in the metadata, expecting ENOSPC and no change in blobstore.
3178 	 */
3179 	max_clusters = (spdk_bs_get_page_size(bs) - sizeof(struct spdk_bs_md_mask)) * 8;
3180 	max_clusters += 1;
3181 	dev->blockcnt = (max_clusters * spdk_bs_get_cluster_size(bs)) / dev->blocklen;
3182 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3183 	poll_threads();
3184 	CU_ASSERT(g_bserrno == -ENOSPC);
3185 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3186 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3187 	CU_ASSERT(super_block.size == bdev_size_init);
3188 
3189 	/*
3190 	 * No change should have occurred for the duration of the test,
3191 	 * unload blobstore and check metadata.
3192 	 */
3193 	spdk_bs_unload(bs, bs_op_complete, NULL);
3194 	poll_threads();
3195 	CU_ASSERT(g_bserrno == 0);
3196 	g_bs = NULL;
3197 
3198 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3199 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3200 	CU_ASSERT(super_block.size == bdev_size_init);
3201 	CU_ASSERT(super_block.clean == 1);
3202 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3203 	       sizeof(struct spdk_bs_md_mask));
3204 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3205 	CU_ASSERT(mask.length == bdev_size_init / (1 * 1024 * 1024));
3206 
3207 	/* Load blobstore and check the cluster counts again. */
3208 	dev = init_dev();
3209 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3210 	poll_threads();
3211 	CU_ASSERT(g_bserrno == 0);
3212 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3213 	bs = g_bs;
3214 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3215 
3216 	spdk_bs_unload(bs, bs_op_complete, NULL);
3217 	poll_threads();
3218 	CU_ASSERT(g_bserrno == 0);
3219 	g_bs = NULL;
3220 }
3221 
3222 static void
3223 bs_test_grow(void)
3224 {
3225 	struct spdk_blob_store *bs;
3226 	struct spdk_bs_dev *dev;
3227 	struct spdk_bs_super_block super_block;
3228 	struct spdk_bs_opts opts;
3229 	struct spdk_bs_md_mask mask;
3230 	uint64_t bdev_size;
3231 
3232 	dev = init_dev();
3233 	bdev_size = dev->blockcnt * dev->blocklen;
3234 	spdk_bs_opts_init(&opts, sizeof(opts));
3235 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3236 	poll_threads();
3237 	CU_ASSERT(g_bserrno == 0);
3238 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3239 	bs = g_bs;
3240 
3241 	spdk_bs_unload(bs, bs_op_complete, NULL);
3242 	poll_threads();
3243 	CU_ASSERT(g_bserrno == 0);
3244 	g_bs = NULL;
3245 
3246 	/*
3247 	 * To make sure all the metadata are updated to the disk,
3248 	 * we check the g_dev_buffer after spdk_bs_unload.
3249 	 */
3250 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3251 	CU_ASSERT(super_block.size == bdev_size);
3252 
3253 	/*
3254 	 * Make sure the used_cluster mask is correct.
3255 	 */
3256 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3257 	       sizeof(struct spdk_bs_md_mask));
3258 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3259 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3260 
3261 	/*
3262 	 * The default dev size is 64M, here we set the dev size to 128M,
3263 	 * then the blobstore will adjust the metadata according to the new size.
3264 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
3265 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
3266 	 * the end of g_dev_buffer.
3267 	 */
3268 	dev = init_dev();
3269 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
3270 	bdev_size = dev->blockcnt * dev->blocklen;
3271 	spdk_bs_opts_init(&opts, sizeof(opts));
3272 	opts.clear_method = BS_CLEAR_WITH_NONE;
3273 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
3274 	poll_threads();
3275 	CU_ASSERT(g_bserrno == 0);
3276 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3277 	bs = g_bs;
3278 
3279 	/*
3280 	 * After spdk_bs_grow, all metadata are updated to the disk.
3281 	 * So we can check g_dev_buffer now.
3282 	 */
3283 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3284 	CU_ASSERT(super_block.size == bdev_size);
3285 
3286 	/*
3287 	 * Make sure the used_cluster mask has been updated according to the bdev size
3288 	 */
3289 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3290 	       sizeof(struct spdk_bs_md_mask));
3291 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3292 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3293 
3294 	spdk_bs_unload(bs, bs_op_complete, NULL);
3295 	poll_threads();
3296 	CU_ASSERT(g_bserrno == 0);
3297 	g_bs = NULL;
3298 }
3299 
3300 /*
3301  * Create a blobstore and then unload it.
3302  */
3303 static void
3304 bs_unload(void)
3305 {
3306 	struct spdk_blob_store *bs = g_bs;
3307 	struct spdk_blob *blob;
3308 
3309 	/* Create a blob and open it. */
3310 	blob = ut_blob_create_and_open(bs, NULL);
3311 
3312 	/* Try to unload blobstore, should fail with open blob */
3313 	g_bserrno = -1;
3314 	spdk_bs_unload(bs, bs_op_complete, NULL);
3315 	poll_threads();
3316 	CU_ASSERT(g_bserrno == -EBUSY);
3317 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3318 
3319 	/* Close the blob, then successfully unload blobstore */
3320 	g_bserrno = -1;
3321 	spdk_blob_close(blob, blob_op_complete, NULL);
3322 	poll_threads();
3323 	CU_ASSERT(g_bserrno == 0);
3324 }
3325 
3326 /*
3327  * Create a blobstore with a cluster size different than the default, and ensure it is
3328  *  persisted.
3329  */
3330 static void
3331 bs_cluster_sz(void)
3332 {
3333 	struct spdk_blob_store *bs;
3334 	struct spdk_bs_dev *dev;
3335 	struct spdk_bs_opts opts;
3336 	uint32_t cluster_sz;
3337 
3338 	/* Set cluster size to zero */
3339 	dev = init_dev();
3340 	spdk_bs_opts_init(&opts, sizeof(opts));
3341 	opts.cluster_sz = 0;
3342 
3343 	/* Initialize a new blob store */
3344 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3345 	poll_threads();
3346 	CU_ASSERT(g_bserrno == -EINVAL);
3347 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3348 
3349 	/*
3350 	 * Set cluster size to blobstore page size,
3351 	 * to work it is required to be at least twice the blobstore page size.
3352 	 */
3353 	dev = init_dev();
3354 	spdk_bs_opts_init(&opts, sizeof(opts));
3355 	opts.cluster_sz = g_phys_blocklen;
3356 
3357 	/* Initialize a new blob store */
3358 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3359 	poll_threads();
3360 	CU_ASSERT(g_bserrno == -ENOMEM);
3361 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3362 
3363 	/*
3364 	 * Set cluster size to lower than page size,
3365 	 * to work it is required to be at least twice the blobstore page size.
3366 	 */
3367 	dev = init_dev();
3368 	spdk_bs_opts_init(&opts, sizeof(opts));
3369 	opts.cluster_sz = g_phys_blocklen - 1;
3370 
3371 	/* Initialize a new blob store */
3372 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3373 	poll_threads();
3374 	CU_ASSERT(g_bserrno == -EINVAL);
3375 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3376 
3377 	/* Set cluster size to twice the default */
3378 	dev = init_dev();
3379 	spdk_bs_opts_init(&opts, sizeof(opts));
3380 	opts.cluster_sz *= 2;
3381 	cluster_sz = opts.cluster_sz;
3382 
3383 	/* Initialize a new blob store */
3384 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3385 	poll_threads();
3386 	CU_ASSERT(g_bserrno == 0);
3387 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3388 	bs = g_bs;
3389 
3390 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3391 
3392 	ut_bs_reload(&bs, &opts);
3393 
3394 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3395 
3396 	spdk_bs_unload(bs, bs_op_complete, NULL);
3397 	poll_threads();
3398 	CU_ASSERT(g_bserrno == 0);
3399 	g_bs = NULL;
3400 }
3401 
3402 /*
3403  * Create a blobstore, reload it and ensure total usable cluster count
3404  *  stays the same.
3405  */
3406 static void
3407 bs_usable_clusters(void)
3408 {
3409 	struct spdk_blob_store *bs = g_bs;
3410 	struct spdk_blob *blob;
3411 	uint32_t clusters;
3412 	int i;
3413 
3414 
3415 	clusters = spdk_bs_total_data_cluster_count(bs);
3416 
3417 	ut_bs_reload(&bs, NULL);
3418 
3419 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3420 
3421 	/* Create and resize blobs to make sure that usable cluster count won't change */
3422 	for (i = 0; i < 4; i++) {
3423 		g_bserrno = -1;
3424 		g_blobid = SPDK_BLOBID_INVALID;
3425 		blob = ut_blob_create_and_open(bs, NULL);
3426 
3427 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3428 		poll_threads();
3429 		CU_ASSERT(g_bserrno == 0);
3430 
3431 		g_bserrno = -1;
3432 		spdk_blob_close(blob, blob_op_complete, NULL);
3433 		poll_threads();
3434 		CU_ASSERT(g_bserrno == 0);
3435 
3436 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3437 	}
3438 
3439 	/* Reload the blob store to make sure that nothing changed */
3440 	ut_bs_reload(&bs, NULL);
3441 
3442 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3443 }
3444 
3445 /*
3446  * Test resizing of the metadata blob.  This requires creating enough blobs
3447  *  so that one cluster is not enough to fit the metadata for those blobs.
3448  *  To induce this condition to happen more quickly, we reduce the cluster
3449  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3450  */
3451 static void
3452 bs_resize_md(void)
3453 {
3454 	struct spdk_blob_store *bs;
3455 	const int CLUSTER_PAGE_COUNT = 4;
3456 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3457 	struct spdk_bs_dev *dev;
3458 	struct spdk_bs_opts opts;
3459 	struct spdk_blob *blob;
3460 	struct spdk_blob_opts blob_opts;
3461 	uint32_t cluster_sz;
3462 	spdk_blob_id blobids[NUM_BLOBS];
3463 	int i;
3464 
3465 
3466 	dev = init_dev();
3467 	spdk_bs_opts_init(&opts, sizeof(opts));
3468 	opts.cluster_sz = CLUSTER_PAGE_COUNT * g_phys_blocklen;
3469 	cluster_sz = opts.cluster_sz;
3470 
3471 	/* Initialize a new blob store */
3472 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3473 	poll_threads();
3474 	CU_ASSERT(g_bserrno == 0);
3475 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3476 	bs = g_bs;
3477 
3478 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3479 
3480 	ut_spdk_blob_opts_init(&blob_opts);
3481 
3482 	for (i = 0; i < NUM_BLOBS; i++) {
3483 		g_bserrno = -1;
3484 		g_blobid = SPDK_BLOBID_INVALID;
3485 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3486 		poll_threads();
3487 		CU_ASSERT(g_bserrno == 0);
3488 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3489 		blobids[i] = g_blobid;
3490 	}
3491 
3492 	ut_bs_reload(&bs, &opts);
3493 
3494 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3495 
3496 	for (i = 0; i < NUM_BLOBS; i++) {
3497 		g_bserrno = -1;
3498 		g_blob = NULL;
3499 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3500 		poll_threads();
3501 		CU_ASSERT(g_bserrno == 0);
3502 		CU_ASSERT(g_blob !=  NULL);
3503 		blob = g_blob;
3504 		g_bserrno = -1;
3505 		spdk_blob_close(blob, blob_op_complete, NULL);
3506 		poll_threads();
3507 		CU_ASSERT(g_bserrno == 0);
3508 	}
3509 
3510 	spdk_bs_unload(bs, bs_op_complete, NULL);
3511 	poll_threads();
3512 	CU_ASSERT(g_bserrno == 0);
3513 	g_bs = NULL;
3514 }
3515 
3516 static void
3517 bs_destroy(void)
3518 {
3519 	struct spdk_blob_store *bs;
3520 	struct spdk_bs_dev *dev;
3521 
3522 	/* Initialize a new blob store */
3523 	dev = init_dev();
3524 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3525 	poll_threads();
3526 	CU_ASSERT(g_bserrno == 0);
3527 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3528 	bs = g_bs;
3529 
3530 	/* Destroy the blob store */
3531 	g_bserrno = -1;
3532 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3533 	poll_threads();
3534 	CU_ASSERT(g_bserrno == 0);
3535 
3536 	/* Loading an non-existent blob store should fail. */
3537 	g_bs = NULL;
3538 	dev = init_dev();
3539 
3540 	g_bserrno = 0;
3541 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3542 	poll_threads();
3543 	CU_ASSERT(g_bserrno != 0);
3544 }
3545 
3546 /* Try to hit all of the corner cases associated with serializing
3547  * a blob to disk
3548  */
3549 static void
3550 blob_serialize_test(void)
3551 {
3552 	struct spdk_bs_dev *dev;
3553 	struct spdk_bs_opts opts;
3554 	struct spdk_blob_store *bs;
3555 	spdk_blob_id blobid[2];
3556 	struct spdk_blob *blob[2];
3557 	uint64_t i;
3558 	char *value;
3559 	int rc;
3560 
3561 	dev = init_dev();
3562 
3563 	/* Initialize a new blobstore with very small clusters */
3564 	spdk_bs_opts_init(&opts, sizeof(opts));
3565 	opts.cluster_sz = dev->blocklen * 8;
3566 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3567 	poll_threads();
3568 	CU_ASSERT(g_bserrno == 0);
3569 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3570 	bs = g_bs;
3571 
3572 	/* Create and open two blobs */
3573 	for (i = 0; i < 2; i++) {
3574 		blob[i] = ut_blob_create_and_open(bs, NULL);
3575 		blobid[i] = spdk_blob_get_id(blob[i]);
3576 
3577 		/* Set a fairly large xattr on both blobs to eat up
3578 		 * metadata space
3579 		 */
3580 		value = calloc(dev->blocklen - 64, sizeof(char));
3581 		SPDK_CU_ASSERT_FATAL(value != NULL);
3582 		memset(value, i, dev->blocklen / 2);
3583 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3584 		CU_ASSERT(rc == 0);
3585 		free(value);
3586 	}
3587 
3588 	/* Resize the blobs, alternating 1 cluster at a time.
3589 	 * This thwarts run length encoding and will cause spill
3590 	 * over of the extents.
3591 	 */
3592 	for (i = 0; i < 6; i++) {
3593 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3594 		poll_threads();
3595 		CU_ASSERT(g_bserrno == 0);
3596 	}
3597 
3598 	for (i = 0; i < 2; i++) {
3599 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3600 		poll_threads();
3601 		CU_ASSERT(g_bserrno == 0);
3602 	}
3603 
3604 	/* Close the blobs */
3605 	for (i = 0; i < 2; i++) {
3606 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3607 		poll_threads();
3608 		CU_ASSERT(g_bserrno == 0);
3609 	}
3610 
3611 	ut_bs_reload(&bs, &opts);
3612 
3613 	for (i = 0; i < 2; i++) {
3614 		blob[i] = NULL;
3615 
3616 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3617 		poll_threads();
3618 		CU_ASSERT(g_bserrno == 0);
3619 		CU_ASSERT(g_blob != NULL);
3620 		blob[i] = g_blob;
3621 
3622 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3623 
3624 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3625 		poll_threads();
3626 		CU_ASSERT(g_bserrno == 0);
3627 	}
3628 
3629 	spdk_bs_unload(bs, bs_op_complete, NULL);
3630 	poll_threads();
3631 	CU_ASSERT(g_bserrno == 0);
3632 	g_bs = NULL;
3633 }
3634 
3635 static void
3636 blob_crc(void)
3637 {
3638 	struct spdk_blob_store *bs = g_bs;
3639 	struct spdk_blob *blob;
3640 	spdk_blob_id blobid;
3641 	uint32_t page_num;
3642 	int index;
3643 	struct spdk_blob_md_page *page;
3644 
3645 	blob = ut_blob_create_and_open(bs, NULL);
3646 	blobid = spdk_blob_get_id(blob);
3647 
3648 	spdk_blob_close(blob, blob_op_complete, NULL);
3649 	poll_threads();
3650 	CU_ASSERT(g_bserrno == 0);
3651 
3652 	page_num = bs_blobid_to_page(blobid);
3653 	index = g_phys_blocklen * (bs->md_start + page_num);
3654 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3655 	page->crc = 0;
3656 
3657 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3658 	poll_threads();
3659 	CU_ASSERT(g_bserrno == -EINVAL);
3660 	CU_ASSERT(g_blob == NULL);
3661 	g_bserrno = 0;
3662 
3663 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3664 	poll_threads();
3665 	CU_ASSERT(g_bserrno == -EINVAL);
3666 }
3667 
3668 static void
3669 super_block_crc(void)
3670 {
3671 	struct spdk_blob_store *bs;
3672 	struct spdk_bs_dev *dev;
3673 	struct spdk_bs_super_block *super_block;
3674 
3675 	dev = init_dev();
3676 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3677 	poll_threads();
3678 	CU_ASSERT(g_bserrno == 0);
3679 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3680 	bs = g_bs;
3681 
3682 	spdk_bs_unload(bs, bs_op_complete, NULL);
3683 	poll_threads();
3684 	CU_ASSERT(g_bserrno == 0);
3685 	g_bs = NULL;
3686 
3687 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3688 	super_block->crc = 0;
3689 	dev = init_dev();
3690 
3691 	/* Load an existing blob store */
3692 	g_bserrno = 0;
3693 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3694 	poll_threads();
3695 	CU_ASSERT(g_bserrno == -EILSEQ);
3696 }
3697 
3698 /* For blob dirty shutdown test case we do the following sub-test cases:
3699  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3700  *   dirty shutdown and reload the blob store and verify the xattrs.
3701  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3702  *   reload the blob store and verify the clusters number.
3703  * 3 Create the second blob and then dirty shutdown, reload the blob store
3704  *   and verify the second blob.
3705  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3706  *   and verify the second blob is invalid.
3707  * 5 Create the second blob again and also create the third blob, modify the
3708  *   md of second blob which makes the md invalid, and then dirty shutdown,
3709  *   reload the blob store verify the second blob, it should invalid and also
3710  *   verify the third blob, it should correct.
3711  */
3712 static void
3713 blob_dirty_shutdown(void)
3714 {
3715 	int rc;
3716 	int index;
3717 	struct spdk_blob_store *bs = g_bs;
3718 	spdk_blob_id blobid1, blobid2, blobid3;
3719 	struct spdk_blob *blob = g_blob;
3720 	uint64_t length;
3721 	uint64_t free_clusters;
3722 	const void *value;
3723 	size_t value_len;
3724 	uint32_t page_num;
3725 	struct spdk_blob_md_page *page;
3726 	struct spdk_blob_opts blob_opts;
3727 
3728 	/* Create first blob */
3729 	blobid1 = spdk_blob_get_id(blob);
3730 
3731 	/* Set some xattrs */
3732 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3733 	CU_ASSERT(rc == 0);
3734 
3735 	length = 2345;
3736 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3737 	CU_ASSERT(rc == 0);
3738 
3739 	/* Put xattr that fits exactly single page.
3740 	 * This results in adding additional pages to MD.
3741 	 * First is flags and smaller xattr, second the large xattr,
3742 	 * third are just the extents.
3743 	 */
3744 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3745 			      strlen("large_xattr");
3746 	char *xattr = calloc(xattr_length, sizeof(char));
3747 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3748 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3749 	free(xattr);
3750 	SPDK_CU_ASSERT_FATAL(rc == 0);
3751 
3752 	/* Resize the blob */
3753 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3754 	poll_threads();
3755 	CU_ASSERT(g_bserrno == 0);
3756 
3757 	/* Set the blob as the super blob */
3758 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3759 	poll_threads();
3760 	CU_ASSERT(g_bserrno == 0);
3761 
3762 	free_clusters = spdk_bs_free_cluster_count(bs);
3763 
3764 	spdk_blob_close(blob, blob_op_complete, NULL);
3765 	poll_threads();
3766 	CU_ASSERT(g_bserrno == 0);
3767 	blob = NULL;
3768 	g_blob = NULL;
3769 	g_blobid = SPDK_BLOBID_INVALID;
3770 
3771 	ut_bs_dirty_load(&bs, NULL);
3772 
3773 	/* Get the super blob */
3774 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3775 	poll_threads();
3776 	CU_ASSERT(g_bserrno == 0);
3777 	CU_ASSERT(blobid1 == g_blobid);
3778 
3779 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3780 	poll_threads();
3781 	CU_ASSERT(g_bserrno == 0);
3782 	CU_ASSERT(g_blob != NULL);
3783 	blob = g_blob;
3784 
3785 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3786 
3787 	/* Get the xattrs */
3788 	value = NULL;
3789 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3790 	CU_ASSERT(rc == 0);
3791 	SPDK_CU_ASSERT_FATAL(value != NULL);
3792 	CU_ASSERT(*(uint64_t *)value == length);
3793 	CU_ASSERT(value_len == 8);
3794 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3795 
3796 	/* Resize the blob */
3797 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3798 	poll_threads();
3799 	CU_ASSERT(g_bserrno == 0);
3800 
3801 	free_clusters = spdk_bs_free_cluster_count(bs);
3802 
3803 	spdk_blob_close(blob, blob_op_complete, NULL);
3804 	poll_threads();
3805 	CU_ASSERT(g_bserrno == 0);
3806 	blob = NULL;
3807 	g_blob = NULL;
3808 	g_blobid = SPDK_BLOBID_INVALID;
3809 
3810 	ut_bs_dirty_load(&bs, NULL);
3811 
3812 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3813 	poll_threads();
3814 	CU_ASSERT(g_bserrno == 0);
3815 	CU_ASSERT(g_blob != NULL);
3816 	blob = g_blob;
3817 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3818 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3819 
3820 	spdk_blob_close(blob, blob_op_complete, NULL);
3821 	poll_threads();
3822 	CU_ASSERT(g_bserrno == 0);
3823 	blob = NULL;
3824 	g_blob = NULL;
3825 	g_blobid = SPDK_BLOBID_INVALID;
3826 
3827 	/* Create second blob */
3828 	blob = ut_blob_create_and_open(bs, NULL);
3829 	blobid2 = spdk_blob_get_id(blob);
3830 
3831 	/* Set some xattrs */
3832 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3833 	CU_ASSERT(rc == 0);
3834 
3835 	length = 5432;
3836 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3837 	CU_ASSERT(rc == 0);
3838 
3839 	/* Resize the blob */
3840 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3841 	poll_threads();
3842 	CU_ASSERT(g_bserrno == 0);
3843 
3844 	free_clusters = spdk_bs_free_cluster_count(bs);
3845 
3846 	spdk_blob_close(blob, blob_op_complete, NULL);
3847 	poll_threads();
3848 	CU_ASSERT(g_bserrno == 0);
3849 	blob = NULL;
3850 	g_blob = NULL;
3851 	g_blobid = SPDK_BLOBID_INVALID;
3852 
3853 	ut_bs_dirty_load(&bs, NULL);
3854 
3855 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3856 	poll_threads();
3857 	CU_ASSERT(g_bserrno == 0);
3858 	CU_ASSERT(g_blob != NULL);
3859 	blob = g_blob;
3860 
3861 	/* Get the xattrs */
3862 	value = NULL;
3863 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3864 	CU_ASSERT(rc == 0);
3865 	SPDK_CU_ASSERT_FATAL(value != NULL);
3866 	CU_ASSERT(*(uint64_t *)value == length);
3867 	CU_ASSERT(value_len == 8);
3868 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3869 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3870 
3871 	ut_blob_close_and_delete(bs, blob);
3872 
3873 	free_clusters = spdk_bs_free_cluster_count(bs);
3874 
3875 	ut_bs_dirty_load(&bs, NULL);
3876 
3877 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3878 	poll_threads();
3879 	CU_ASSERT(g_bserrno != 0);
3880 	CU_ASSERT(g_blob == NULL);
3881 
3882 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3883 	poll_threads();
3884 	CU_ASSERT(g_bserrno == 0);
3885 	CU_ASSERT(g_blob != NULL);
3886 	blob = g_blob;
3887 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3888 	spdk_blob_close(blob, blob_op_complete, NULL);
3889 	poll_threads();
3890 	CU_ASSERT(g_bserrno == 0);
3891 
3892 	ut_bs_reload(&bs, NULL);
3893 
3894 	/* Create second blob */
3895 	ut_spdk_blob_opts_init(&blob_opts);
3896 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3897 	poll_threads();
3898 	CU_ASSERT(g_bserrno == 0);
3899 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3900 	blobid2 = g_blobid;
3901 
3902 	/* Create third blob */
3903 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3904 	poll_threads();
3905 	CU_ASSERT(g_bserrno == 0);
3906 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3907 	blobid3 = g_blobid;
3908 
3909 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3910 	poll_threads();
3911 	CU_ASSERT(g_bserrno == 0);
3912 	CU_ASSERT(g_blob != NULL);
3913 	blob = g_blob;
3914 
3915 	/* Set some xattrs for second blob */
3916 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3917 	CU_ASSERT(rc == 0);
3918 
3919 	length = 5432;
3920 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3921 	CU_ASSERT(rc == 0);
3922 
3923 	spdk_blob_close(blob, blob_op_complete, NULL);
3924 	poll_threads();
3925 	CU_ASSERT(g_bserrno == 0);
3926 	blob = NULL;
3927 	g_blob = NULL;
3928 	g_blobid = SPDK_BLOBID_INVALID;
3929 
3930 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3931 	poll_threads();
3932 	CU_ASSERT(g_bserrno == 0);
3933 	CU_ASSERT(g_blob != NULL);
3934 	blob = g_blob;
3935 
3936 	/* Set some xattrs for third blob */
3937 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3938 	CU_ASSERT(rc == 0);
3939 
3940 	length = 5432;
3941 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3942 	CU_ASSERT(rc == 0);
3943 
3944 	spdk_blob_close(blob, blob_op_complete, NULL);
3945 	poll_threads();
3946 	CU_ASSERT(g_bserrno == 0);
3947 	blob = NULL;
3948 	g_blob = NULL;
3949 	g_blobid = SPDK_BLOBID_INVALID;
3950 
3951 	/* Mark second blob as invalid */
3952 	page_num = bs_blobid_to_page(blobid2);
3953 
3954 	index = g_phys_blocklen * (bs->md_start + page_num);
3955 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3956 	page->sequence_num = 1;
3957 	page->crc = blob_md_page_calc_crc(page);
3958 
3959 	free_clusters = spdk_bs_free_cluster_count(bs);
3960 
3961 	ut_bs_dirty_load(&bs, NULL);
3962 
3963 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3964 	poll_threads();
3965 	CU_ASSERT(g_bserrno != 0);
3966 	CU_ASSERT(g_blob == NULL);
3967 
3968 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3969 	poll_threads();
3970 	CU_ASSERT(g_bserrno == 0);
3971 	CU_ASSERT(g_blob != NULL);
3972 	blob = g_blob;
3973 
3974 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3975 }
3976 
3977 static void
3978 blob_flags(void)
3979 {
3980 	struct spdk_blob_store *bs = g_bs;
3981 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3982 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3983 	struct spdk_blob_opts blob_opts;
3984 	int rc;
3985 
3986 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3987 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3988 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3989 
3990 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3991 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3992 
3993 	ut_spdk_blob_opts_init(&blob_opts);
3994 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3995 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3996 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3997 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3998 
3999 	/* Change the size of blob_data_ro to check if flags are serialized
4000 	 * when blob has non zero number of extents */
4001 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
4002 	poll_threads();
4003 	CU_ASSERT(g_bserrno == 0);
4004 
4005 	/* Set the xattr to check if flags are serialized
4006 	 * when blob has non zero number of xattrs */
4007 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
4008 	CU_ASSERT(rc == 0);
4009 
4010 	blob_invalid->invalid_flags = (1ULL << 63);
4011 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
4012 	blob_data_ro->data_ro_flags = (1ULL << 62);
4013 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
4014 	blob_md_ro->md_ro_flags = (1ULL << 61);
4015 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
4016 
4017 	g_bserrno = -1;
4018 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
4019 	poll_threads();
4020 	CU_ASSERT(g_bserrno == 0);
4021 	g_bserrno = -1;
4022 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
4023 	poll_threads();
4024 	CU_ASSERT(g_bserrno == 0);
4025 	g_bserrno = -1;
4026 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4027 	poll_threads();
4028 	CU_ASSERT(g_bserrno == 0);
4029 
4030 	g_bserrno = -1;
4031 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
4032 	poll_threads();
4033 	CU_ASSERT(g_bserrno == 0);
4034 	blob_invalid = NULL;
4035 	g_bserrno = -1;
4036 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
4037 	poll_threads();
4038 	CU_ASSERT(g_bserrno == 0);
4039 	blob_data_ro = NULL;
4040 	g_bserrno = -1;
4041 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
4042 	poll_threads();
4043 	CU_ASSERT(g_bserrno == 0);
4044 	blob_md_ro = NULL;
4045 
4046 	g_blob = NULL;
4047 	g_blobid = SPDK_BLOBID_INVALID;
4048 
4049 	ut_bs_reload(&bs, NULL);
4050 
4051 	g_blob = NULL;
4052 	g_bserrno = 0;
4053 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
4054 	poll_threads();
4055 	CU_ASSERT(g_bserrno != 0);
4056 	CU_ASSERT(g_blob == NULL);
4057 
4058 	g_blob = NULL;
4059 	g_bserrno = -1;
4060 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
4061 	poll_threads();
4062 	CU_ASSERT(g_bserrno == 0);
4063 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4064 	blob_data_ro = g_blob;
4065 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
4066 	CU_ASSERT(blob_data_ro->data_ro == true);
4067 	CU_ASSERT(blob_data_ro->md_ro == true);
4068 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
4069 
4070 	g_blob = NULL;
4071 	g_bserrno = -1;
4072 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
4073 	poll_threads();
4074 	CU_ASSERT(g_bserrno == 0);
4075 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4076 	blob_md_ro = g_blob;
4077 	CU_ASSERT(blob_md_ro->data_ro == false);
4078 	CU_ASSERT(blob_md_ro->md_ro == true);
4079 
4080 	g_bserrno = -1;
4081 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4082 	poll_threads();
4083 	CU_ASSERT(g_bserrno == 0);
4084 
4085 	ut_blob_close_and_delete(bs, blob_data_ro);
4086 	ut_blob_close_and_delete(bs, blob_md_ro);
4087 }
4088 
4089 static void
4090 bs_version(void)
4091 {
4092 	struct spdk_bs_super_block *super;
4093 	struct spdk_blob_store *bs = g_bs;
4094 	struct spdk_bs_dev *dev;
4095 	struct spdk_blob *blob;
4096 	struct spdk_blob_opts blob_opts;
4097 	spdk_blob_id blobid;
4098 
4099 	/* Unload the blob store */
4100 	spdk_bs_unload(bs, bs_op_complete, NULL);
4101 	poll_threads();
4102 	CU_ASSERT(g_bserrno == 0);
4103 	g_bs = NULL;
4104 
4105 	/*
4106 	 * Change the bs version on disk.  This will allow us to
4107 	 *  test that the version does not get modified automatically
4108 	 *  when loading and unloading the blobstore.
4109 	 */
4110 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
4111 	CU_ASSERT(super->version == SPDK_BS_VERSION);
4112 	CU_ASSERT(super->clean == 1);
4113 	super->version = 2;
4114 	/*
4115 	 * Version 2 metadata does not have a used blobid mask, so clear
4116 	 *  those fields in the super block and zero the corresponding
4117 	 *  region on "disk".  We will use this to ensure blob IDs are
4118 	 *  correctly reconstructed.
4119 	 */
4120 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
4121 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
4122 	super->used_blobid_mask_start = 0;
4123 	super->used_blobid_mask_len = 0;
4124 	super->crc = blob_md_page_calc_crc(super);
4125 
4126 	/* Load an existing blob store */
4127 	dev = init_dev();
4128 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4129 	poll_threads();
4130 	CU_ASSERT(g_bserrno == 0);
4131 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4132 	CU_ASSERT(super->clean == 1);
4133 	bs = g_bs;
4134 
4135 	/*
4136 	 * Create a blob - just to make sure that when we unload it
4137 	 *  results in writing the super block (since metadata pages
4138 	 *  were allocated.
4139 	 */
4140 	ut_spdk_blob_opts_init(&blob_opts);
4141 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
4142 	poll_threads();
4143 	CU_ASSERT(g_bserrno == 0);
4144 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4145 	blobid = g_blobid;
4146 
4147 	/* Unload the blob store */
4148 	spdk_bs_unload(bs, bs_op_complete, NULL);
4149 	poll_threads();
4150 	CU_ASSERT(g_bserrno == 0);
4151 	g_bs = NULL;
4152 	CU_ASSERT(super->version == 2);
4153 	CU_ASSERT(super->used_blobid_mask_start == 0);
4154 	CU_ASSERT(super->used_blobid_mask_len == 0);
4155 
4156 	dev = init_dev();
4157 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4158 	poll_threads();
4159 	CU_ASSERT(g_bserrno == 0);
4160 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4161 	bs = g_bs;
4162 
4163 	g_blob = NULL;
4164 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4165 	poll_threads();
4166 	CU_ASSERT(g_bserrno == 0);
4167 	CU_ASSERT(g_blob != NULL);
4168 	blob = g_blob;
4169 
4170 	ut_blob_close_and_delete(bs, blob);
4171 
4172 	CU_ASSERT(super->version == 2);
4173 	CU_ASSERT(super->used_blobid_mask_start == 0);
4174 	CU_ASSERT(super->used_blobid_mask_len == 0);
4175 }
4176 
4177 static void
4178 blob_set_xattrs_test(void)
4179 {
4180 	struct spdk_blob_store *bs = g_bs;
4181 	struct spdk_blob *blob;
4182 	struct spdk_blob_opts opts;
4183 	const void *value;
4184 	size_t value_len;
4185 	char *xattr;
4186 	size_t xattr_length;
4187 	int rc;
4188 
4189 	/* Create blob with extra attributes */
4190 	ut_spdk_blob_opts_init(&opts);
4191 
4192 	opts.xattrs.names = g_xattr_names;
4193 	opts.xattrs.get_value = _get_xattr_value;
4194 	opts.xattrs.count = 3;
4195 	opts.xattrs.ctx = &g_ctx;
4196 
4197 	blob = ut_blob_create_and_open(bs, &opts);
4198 
4199 	/* Get the xattrs */
4200 	value = NULL;
4201 
4202 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
4203 	CU_ASSERT(rc == 0);
4204 	SPDK_CU_ASSERT_FATAL(value != NULL);
4205 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
4206 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
4207 
4208 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
4209 	CU_ASSERT(rc == 0);
4210 	SPDK_CU_ASSERT_FATAL(value != NULL);
4211 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
4212 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
4213 
4214 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
4215 	CU_ASSERT(rc == 0);
4216 	SPDK_CU_ASSERT_FATAL(value != NULL);
4217 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
4218 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
4219 
4220 	/* Try to get non existing attribute */
4221 
4222 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
4223 	CU_ASSERT(rc == -ENOENT);
4224 
4225 	/* Try xattr exceeding maximum length of descriptor in single page */
4226 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
4227 		       strlen("large_xattr") + 1;
4228 	xattr = calloc(xattr_length, sizeof(char));
4229 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
4230 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
4231 	free(xattr);
4232 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
4233 
4234 	spdk_blob_close(blob, blob_op_complete, NULL);
4235 	poll_threads();
4236 	CU_ASSERT(g_bserrno == 0);
4237 	blob = NULL;
4238 	g_blob = NULL;
4239 	g_blobid = SPDK_BLOBID_INVALID;
4240 
4241 	/* NULL callback */
4242 	ut_spdk_blob_opts_init(&opts);
4243 	opts.xattrs.names = g_xattr_names;
4244 	opts.xattrs.get_value = NULL;
4245 	opts.xattrs.count = 1;
4246 	opts.xattrs.ctx = &g_ctx;
4247 
4248 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4249 	poll_threads();
4250 	CU_ASSERT(g_bserrno == -EINVAL);
4251 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4252 
4253 	/* NULL values */
4254 	ut_spdk_blob_opts_init(&opts);
4255 	opts.xattrs.names = g_xattr_names;
4256 	opts.xattrs.get_value = _get_xattr_value_null;
4257 	opts.xattrs.count = 1;
4258 	opts.xattrs.ctx = NULL;
4259 
4260 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4261 	poll_threads();
4262 	CU_ASSERT(g_bserrno == -EINVAL);
4263 }
4264 
4265 static void
4266 blob_thin_prov_alloc(void)
4267 {
4268 	struct spdk_blob_store *bs = g_bs;
4269 	struct spdk_blob *blob;
4270 	struct spdk_blob_opts opts;
4271 	spdk_blob_id blobid;
4272 	uint64_t free_clusters;
4273 
4274 	free_clusters = spdk_bs_free_cluster_count(bs);
4275 
4276 	/* Set blob as thin provisioned */
4277 	ut_spdk_blob_opts_init(&opts);
4278 	opts.thin_provision = true;
4279 
4280 	blob = ut_blob_create_and_open(bs, &opts);
4281 	blobid = spdk_blob_get_id(blob);
4282 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4283 
4284 	CU_ASSERT(blob->active.num_clusters == 0);
4285 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
4286 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4287 
4288 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4289 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4290 	poll_threads();
4291 	CU_ASSERT(g_bserrno == 0);
4292 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4293 	CU_ASSERT(blob->active.num_clusters == 5);
4294 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4295 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4296 
4297 	/* Grow it to 1TB - still unallocated */
4298 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
4299 	poll_threads();
4300 	CU_ASSERT(g_bserrno == 0);
4301 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4302 	CU_ASSERT(blob->active.num_clusters == 262144);
4303 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4304 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4305 
4306 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4307 	poll_threads();
4308 	CU_ASSERT(g_bserrno == 0);
4309 	/* Sync must not change anything */
4310 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4311 	CU_ASSERT(blob->active.num_clusters == 262144);
4312 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4313 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4314 	/* Since clusters are not allocated,
4315 	 * number of metadata pages is expected to be minimal.
4316 	 */
4317 	CU_ASSERT(blob->active.num_pages == 1);
4318 
4319 	/* Shrink the blob to 3 clusters - still unallocated */
4320 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
4321 	poll_threads();
4322 	CU_ASSERT(g_bserrno == 0);
4323 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4324 	CU_ASSERT(blob->active.num_clusters == 3);
4325 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4326 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4327 
4328 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4329 	poll_threads();
4330 	CU_ASSERT(g_bserrno == 0);
4331 	/* Sync must not change anything */
4332 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4333 	CU_ASSERT(blob->active.num_clusters == 3);
4334 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4335 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4336 
4337 	spdk_blob_close(blob, blob_op_complete, NULL);
4338 	poll_threads();
4339 	CU_ASSERT(g_bserrno == 0);
4340 
4341 	ut_bs_reload(&bs, NULL);
4342 
4343 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4344 	poll_threads();
4345 	CU_ASSERT(g_bserrno == 0);
4346 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4347 	blob = g_blob;
4348 
4349 	/* Check that clusters allocation and size is still the same */
4350 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4351 	CU_ASSERT(blob->active.num_clusters == 3);
4352 
4353 	ut_blob_close_and_delete(bs, blob);
4354 }
4355 
4356 static void
4357 blob_insert_cluster_msg_test(void)
4358 {
4359 	struct spdk_blob_store *bs = g_bs;
4360 	struct spdk_blob *blob;
4361 	struct spdk_blob_opts opts;
4362 	/* For now, even if md_page_size is > 4KB, we still only use the first
4363 	 * 4KB of it. The rest is left unused. Future changes may allow using the
4364 	 * rest of the md_page, but that will require more extensive changes since
4365 	 * then the struct spdk_blob_md_page cannot be used directly (since some
4366 	 * fields such as crc would have variable placement in the struct).
4367 	 */
4368 	struct {
4369 		struct spdk_blob_md_page page;
4370 		uint8_t pad[DEV_MAX_PHYS_BLOCKLEN - sizeof(struct spdk_blob_md_page)];
4371 	} md = {};
4372 	spdk_blob_id blobid;
4373 	uint64_t free_clusters;
4374 	uint64_t new_cluster = 0;
4375 	uint32_t cluster_num = 3;
4376 	uint32_t extent_page = 0;
4377 
4378 	free_clusters = spdk_bs_free_cluster_count(bs);
4379 
4380 	/* Set blob as thin provisioned */
4381 	ut_spdk_blob_opts_init(&opts);
4382 	opts.thin_provision = true;
4383 	opts.num_clusters = 4;
4384 
4385 	blob = ut_blob_create_and_open(bs, &opts);
4386 	blobid = spdk_blob_get_id(blob);
4387 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4388 
4389 	CU_ASSERT(blob->active.num_clusters == 4);
4390 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4391 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4392 
4393 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4394 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4395 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4396 	spdk_spin_lock(&bs->used_lock);
4397 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4398 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4399 	spdk_spin_unlock(&bs->used_lock);
4400 
4401 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &md.page,
4402 					 blob_op_complete, NULL);
4403 	poll_threads();
4404 
4405 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4406 
4407 	spdk_blob_close(blob, blob_op_complete, NULL);
4408 	poll_threads();
4409 	CU_ASSERT(g_bserrno == 0);
4410 
4411 	ut_bs_reload(&bs, NULL);
4412 
4413 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4414 	poll_threads();
4415 	CU_ASSERT(g_bserrno == 0);
4416 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4417 	blob = g_blob;
4418 
4419 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4420 
4421 	ut_blob_close_and_delete(bs, blob);
4422 }
4423 
4424 static void
4425 blob_thin_prov_rw(void)
4426 {
4427 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
4428 	struct spdk_blob_store *bs = g_bs;
4429 	struct spdk_blob *blob, *blob_id0;
4430 	struct spdk_io_channel *channel, *channel_thread1;
4431 	struct spdk_blob_opts opts;
4432 	uint64_t free_clusters;
4433 	uint64_t io_unit_size;
4434 	uint8_t payload_read[10 * BLOCKLEN];
4435 	uint8_t payload_write[10 * BLOCKLEN];
4436 	uint64_t write_bytes;
4437 	uint64_t read_bytes;
4438 	uint64_t expected_bytes;
4439 
4440 	free_clusters = spdk_bs_free_cluster_count(bs);
4441 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4442 
4443 	channel = spdk_bs_alloc_io_channel(bs);
4444 	CU_ASSERT(channel != NULL);
4445 
4446 	ut_spdk_blob_opts_init(&opts);
4447 	opts.thin_provision = true;
4448 
4449 	/* Create and delete blob at md page 0, so that next md page allocation
4450 	 * for extent will use that. */
4451 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4452 	blob = ut_blob_create_and_open(bs, &opts);
4453 	ut_blob_close_and_delete(bs, blob_id0);
4454 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4455 
4456 	CU_ASSERT(blob->active.num_clusters == 0);
4457 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4458 
4459 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4460 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4461 	poll_threads();
4462 	CU_ASSERT(g_bserrno == 0);
4463 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4464 	CU_ASSERT(blob->active.num_clusters == 5);
4465 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4466 
4467 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4468 	poll_threads();
4469 	CU_ASSERT(g_bserrno == 0);
4470 	/* Sync must not change anything */
4471 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4472 	CU_ASSERT(blob->active.num_clusters == 5);
4473 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4474 
4475 	/* Payload should be all zeros from unallocated clusters */
4476 	memset(payload_read, 0xFF, sizeof(payload_read));
4477 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4478 	poll_threads();
4479 	CU_ASSERT(g_bserrno == 0);
4480 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
4481 
4482 	write_bytes = g_dev_write_bytes;
4483 	read_bytes = g_dev_read_bytes;
4484 
4485 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4486 	set_thread(1);
4487 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4488 	CU_ASSERT(channel_thread1 != NULL);
4489 	memset(payload_write, 0xE5, sizeof(payload_write));
4490 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4491 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4492 	/* Perform write on thread 0. That will try to allocate cluster,
4493 	 * but fail due to another thread issuing the cluster allocation first. */
4494 	set_thread(0);
4495 	memset(payload_write, 0xE5, sizeof(payload_write));
4496 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4497 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4498 	poll_threads();
4499 	CU_ASSERT(g_bserrno == 0);
4500 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4501 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
4502 	/* For thin-provisioned blob we need to write 20 io_units plus one page metadata and
4503 	 * read 0 bytes */
4504 	expected_bytes = 20 * io_unit_size + spdk_bs_get_page_size(bs);
4505 	if (g_use_extent_table) {
4506 		/* Add one more page for EXTENT_PAGE write */
4507 		expected_bytes += spdk_bs_get_page_size(bs);
4508 	}
4509 	CU_ASSERT(g_dev_write_bytes - write_bytes == expected_bytes);
4510 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4511 
4512 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4513 	poll_threads();
4514 	CU_ASSERT(g_bserrno == 0);
4515 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
4516 
4517 	ut_blob_close_and_delete(bs, blob);
4518 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4519 
4520 	set_thread(1);
4521 	spdk_bs_free_io_channel(channel_thread1);
4522 	set_thread(0);
4523 	spdk_bs_free_io_channel(channel);
4524 	poll_threads();
4525 	g_blob = NULL;
4526 	g_blobid = 0;
4527 }
4528 
4529 static void
4530 blob_thin_prov_write_count_io(void)
4531 {
4532 	struct spdk_blob_store *bs;
4533 	struct spdk_blob *blob;
4534 	struct spdk_io_channel *ch;
4535 	struct spdk_bs_dev *dev;
4536 	struct spdk_bs_opts bs_opts;
4537 	struct spdk_blob_opts opts;
4538 	uint64_t free_clusters;
4539 	uint64_t io_unit_size;
4540 	uint8_t payload_write[BLOCKLEN];
4541 	uint64_t write_bytes;
4542 	uint64_t read_bytes;
4543 	uint64_t expected_bytes;
4544 	const uint32_t CLUSTER_SZ = g_phys_blocklen * 4;
4545 	uint32_t io_units_per_cluster;
4546 	uint32_t io_units_per_extent_page;
4547 	uint32_t i;
4548 
4549 	/* Use a very small cluster size for this test.  This ensures we need multiple
4550 	 * extent pages to hold all of the clusters even for relatively small blobs like
4551 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4552 	 * buffers).
4553 	 */
4554 	dev = init_dev();
4555 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4556 	bs_opts.cluster_sz = CLUSTER_SZ;
4557 
4558 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4559 	poll_threads();
4560 	CU_ASSERT(g_bserrno == 0);
4561 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4562 	bs = g_bs;
4563 
4564 	free_clusters = spdk_bs_free_cluster_count(bs);
4565 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4566 	io_units_per_cluster = CLUSTER_SZ / io_unit_size;
4567 	io_units_per_extent_page = SPDK_EXTENTS_PER_EP * io_units_per_cluster;
4568 
4569 	ch = spdk_bs_alloc_io_channel(bs);
4570 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4571 
4572 	ut_spdk_blob_opts_init(&opts);
4573 	opts.thin_provision = true;
4574 
4575 	blob = ut_blob_create_and_open(bs, &opts);
4576 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4577 
4578 	/* Resize the blob so that it will require 8 extent pages to hold all of
4579 	 * the clusters.
4580 	 */
4581 	g_bserrno = -1;
4582 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4583 	poll_threads();
4584 	CU_ASSERT(g_bserrno == 0);
4585 
4586 	g_bserrno = -1;
4587 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4588 	poll_threads();
4589 	CU_ASSERT(g_bserrno == 0);
4590 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4591 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4592 
4593 	memset(payload_write, 0, sizeof(payload_write));
4594 	for (i = 0; i < 8; i++) {
4595 		write_bytes = g_dev_write_bytes;
4596 		read_bytes = g_dev_read_bytes;
4597 
4598 		g_bserrno = -1;
4599 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i, 1, blob_op_complete,
4600 				   NULL);
4601 		poll_threads();
4602 		CU_ASSERT(g_bserrno == 0);
4603 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4604 
4605 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4606 		if (!g_use_extent_table) {
4607 			/* For legacy metadata, we should have written the io_unit for
4608 			 * the write I/O, plus the blob's primary metadata page
4609 			 */
4610 			expected_bytes = io_unit_size + spdk_bs_get_page_size(bs);
4611 		} else {
4612 			/* For extent table metadata, we should have written the io_unit for
4613 			 * the write I/O, plus 2 metadata pages - the extent page and the
4614 			 * blob's primary metadata page
4615 			 */
4616 			expected_bytes = io_unit_size + 2 * spdk_bs_get_page_size(bs);
4617 		}
4618 		CU_ASSERT((g_dev_write_bytes - write_bytes) == expected_bytes);
4619 
4620 		/* The write should have synced the metadata already.  Do another sync here
4621 		 * just to confirm.
4622 		 */
4623 		write_bytes = g_dev_write_bytes;
4624 		read_bytes = g_dev_read_bytes;
4625 
4626 		g_bserrno = -1;
4627 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4628 		poll_threads();
4629 		CU_ASSERT(g_bserrno == 0);
4630 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4631 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 1);
4632 
4633 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4634 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4635 
4636 		/* Now write to another unallocated cluster that is part of the same extent page. */
4637 		g_bserrno = -1;
4638 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i + io_units_per_cluster,
4639 				   1, blob_op_complete, NULL);
4640 		poll_threads();
4641 		CU_ASSERT(g_bserrno == 0);
4642 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4643 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 2);
4644 
4645 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4646 		/*
4647 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4648 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4649 		 */
4650 		expected_bytes = io_unit_size + spdk_bs_get_page_size(bs);
4651 		CU_ASSERT((g_dev_write_bytes - write_bytes) == expected_bytes);
4652 
4653 		/* Send unmap aligned to the whole cluster - should free it up */
4654 		g_bserrno = -1;
4655 		spdk_blob_io_unmap(blob, ch, io_units_per_extent_page * i, io_units_per_cluster, blob_op_complete,
4656 				   NULL);
4657 		poll_threads();
4658 		CU_ASSERT(g_bserrno == 0);
4659 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4660 
4661 		/* Write back to the freed cluster */
4662 		g_bserrno = -1;
4663 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i, 1, blob_op_complete,
4664 				   NULL);
4665 		poll_threads();
4666 		CU_ASSERT(g_bserrno == 0);
4667 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4668 	}
4669 
4670 	ut_blob_close_and_delete(bs, blob);
4671 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4672 
4673 	spdk_bs_free_io_channel(ch);
4674 	poll_threads();
4675 	g_blob = NULL;
4676 	g_blobid = 0;
4677 
4678 	spdk_bs_unload(bs, bs_op_complete, NULL);
4679 	poll_threads();
4680 	CU_ASSERT(g_bserrno == 0);
4681 	g_bs = NULL;
4682 }
4683 
4684 static void
4685 blob_thin_prov_unmap_cluster(void)
4686 {
4687 	struct spdk_blob_store *bs;
4688 	struct spdk_blob *blob, *snapshot;
4689 	struct spdk_io_channel *ch;
4690 	struct spdk_bs_dev *dev;
4691 	struct spdk_bs_opts bs_opts;
4692 	struct spdk_blob_opts opts;
4693 	uint64_t free_clusters;
4694 	uint64_t io_unit_size;
4695 	uint8_t payload_write[BLOCKLEN];
4696 	uint8_t payload_read[BLOCKLEN];
4697 	const uint32_t CLUSTER_COUNT = 3;
4698 	uint32_t io_units_per_cluster;
4699 	spdk_blob_id blobid, snapshotid;
4700 	uint32_t i;
4701 	int err;
4702 
4703 	/* Use a very large cluster size for this test. Check how the unmap/release cluster code path behaves when
4704 	 * clusters are fully used.
4705 	 */
4706 	dev = init_dev();
4707 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4708 	bs_opts.cluster_sz = dev->blocklen * dev->blockcnt / (CLUSTER_COUNT + 1);
4709 
4710 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4711 	poll_threads();
4712 	CU_ASSERT(g_bserrno == 0);
4713 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4714 	bs = g_bs;
4715 
4716 	free_clusters = spdk_bs_free_cluster_count(bs);
4717 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4718 	io_units_per_cluster = bs_opts.cluster_sz / io_unit_size;
4719 
4720 	ch = spdk_bs_alloc_io_channel(bs);
4721 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4722 
4723 	ut_spdk_blob_opts_init(&opts);
4724 	opts.thin_provision = true;
4725 
4726 	blob = ut_blob_create_and_open(bs, &opts);
4727 	CU_ASSERT(free_clusters == CLUSTER_COUNT);
4728 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4729 	blobid = spdk_blob_get_id(blob);
4730 
4731 	g_bserrno = -1;
4732 	spdk_blob_resize(blob, CLUSTER_COUNT, blob_op_complete, NULL);
4733 	poll_threads();
4734 	CU_ASSERT(g_bserrno == 0);
4735 
4736 	g_bserrno = -1;
4737 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4738 	poll_threads();
4739 	CU_ASSERT(g_bserrno == 0);
4740 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4741 	CU_ASSERT(blob->active.num_clusters == CLUSTER_COUNT);
4742 
4743 	/* Fill all clusters */
4744 	for (i = 0; i < CLUSTER_COUNT; i++) {
4745 		memset(payload_write, i + 1, sizeof(payload_write));
4746 		g_bserrno = -1;
4747 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster * i, 1, blob_op_complete, NULL);
4748 		poll_threads();
4749 		CU_ASSERT(g_bserrno == 0);
4750 		CU_ASSERT(free_clusters - (i + 1) == spdk_bs_free_cluster_count(bs));
4751 	}
4752 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4753 
4754 	/* Unmap one whole cluster */
4755 	g_bserrno = -1;
4756 	spdk_blob_io_unmap(blob, ch, io_units_per_cluster, io_units_per_cluster, blob_op_complete, NULL);
4757 	poll_threads();
4758 	CU_ASSERT(g_bserrno == 0);
4759 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4760 
4761 	/* Verify the data read from the cluster is zeroed out */
4762 	memset(payload_write, 0, sizeof(payload_write));
4763 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4764 	poll_threads();
4765 	CU_ASSERT(g_bserrno == 0);
4766 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4767 
4768 	/* Fill the same cluster with data */
4769 	memset(payload_write, 3, sizeof(payload_write));
4770 	g_bserrno = -1;
4771 	spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster, 1, blob_op_complete, NULL);
4772 	poll_threads();
4773 	CU_ASSERT(g_bserrno == 0);
4774 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4775 
4776 	/* Verify the data read from the cluster has the expected data */
4777 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4778 	poll_threads();
4779 	CU_ASSERT(g_bserrno == 0);
4780 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4781 
4782 	/* Send an unaligned unmap that ecompasses one whole cluster */
4783 	g_bserrno = -1;
4784 	spdk_blob_io_unmap(blob, ch, io_units_per_cluster - 1, io_units_per_cluster + 2, blob_op_complete,
4785 			   NULL);
4786 	poll_threads();
4787 	CU_ASSERT(g_bserrno == 0);
4788 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4789 
4790 	/* Verify the data read from the cluster is zeroed out */
4791 	g_bserrno = -1;
4792 	memset(payload_write, 0, sizeof(payload_write));
4793 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4794 	poll_threads();
4795 	CU_ASSERT(g_bserrno == 0);
4796 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4797 
4798 	/* Send a simultaneous unmap with a write to an unallocated area -
4799 	 * check that writes don't claim the currently unmapped cluster */
4800 	g_bserrno = -1;
4801 	memset(payload_write, 7, sizeof(payload_write));
4802 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4803 	spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster, 1, blob_op_complete, NULL);
4804 	poll_threads();
4805 	CU_ASSERT(g_bserrno == 0);
4806 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4807 
4808 	/* Verify the contents of written sector */
4809 	g_bserrno = -1;
4810 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4811 	poll_threads();
4812 	CU_ASSERT(g_bserrno == 0);
4813 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4814 
4815 	/* Verify the contents of unmapped sector */
4816 	g_bserrno = -1;
4817 	memset(payload_write, 0, sizeof(payload_write));
4818 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4819 	poll_threads();
4820 	CU_ASSERT(g_bserrno == 0);
4821 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4822 
4823 	/* Make sure clusters are not freed until the unmap to the drive is done */
4824 	g_bserrno = -1;
4825 	memset(payload_write, 7, sizeof(payload_write));
4826 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4827 	poll_threads();
4828 	CU_ASSERT(g_bserrno == 0);
4829 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4830 
4831 	g_bserrno = -1;
4832 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4833 	while (memcmp(payload_write, &g_dev_buffer[BLOCKLEN * io_units_per_cluster], BLOCKLEN) == 0) {
4834 		CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4835 		poll_thread_times(0, 1);
4836 	}
4837 	poll_threads();
4838 	CU_ASSERT(g_bserrno == 0);
4839 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4840 
4841 	/* Issue #3358 had a bug with concurrent trims to the same cluster causing an assert, check for regressions.
4842 	 * Send three concurrent unmaps to the same cluster.
4843 	 */
4844 	g_bserrno = -1;
4845 	memset(payload_write, 7, sizeof(payload_write));
4846 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4847 	poll_threads();
4848 	CU_ASSERT(g_bserrno == 0);
4849 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4850 
4851 	g_bserrno = -1;
4852 	err = -1;
4853 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4854 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4855 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, &err);
4856 	poll_threads();
4857 	CU_ASSERT(g_bserrno == 0);
4858 	CU_ASSERT(err == 0);
4859 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4860 
4861 	/* Test thin-provisioned blob that is backed */
4862 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
4863 	poll_threads();
4864 	CU_ASSERT(g_bserrno == 0);
4865 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4866 	poll_threads();
4867 	CU_ASSERT(g_bserrno == 0);
4868 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4869 
4870 	g_bserrno = -1;
4871 	memset(payload_write, 1, sizeof(payload_write));
4872 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4873 	poll_threads();
4874 	CU_ASSERT(g_bserrno == 0);
4875 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4876 
4877 	/* Create a snapshot */
4878 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
4879 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4880 	poll_threads();
4881 	CU_ASSERT(g_bserrno == 0);
4882 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4883 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
4884 	snapshotid = g_blobid;
4885 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4886 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4887 	poll_threads();
4888 	CU_ASSERT(g_bserrno == 0);
4889 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4890 	snapshot = g_blob;
4891 
4892 	/* Write data to blob, it will alloc new cluster */
4893 	g_bserrno = -1;
4894 	memset(payload_write, 2, sizeof(payload_write));
4895 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4896 	poll_threads();
4897 	CU_ASSERT(g_bserrno == 0);
4898 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4899 
4900 	/* Unmap one whole cluster, but do not release this cluster */
4901 	g_bserrno = -1;
4902 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4903 	poll_threads();
4904 	CU_ASSERT(g_bserrno == 0);
4905 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4906 
4907 	/* Verify the data read from the cluster is zeroed out */
4908 	g_bserrno = -1;
4909 	memset(payload_write, 0, sizeof(payload_write));
4910 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4911 	poll_threads();
4912 	CU_ASSERT(g_bserrno == 0);
4913 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4914 
4915 	ut_blob_close_and_delete(bs, blob);
4916 	ut_blob_close_and_delete(bs, snapshot);
4917 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4918 
4919 	spdk_bs_free_io_channel(ch);
4920 	poll_threads();
4921 	g_blob = NULL;
4922 	g_blobid = 0;
4923 
4924 	spdk_bs_unload(bs, bs_op_complete, NULL);
4925 	poll_threads();
4926 	CU_ASSERT(g_bserrno == 0);
4927 	g_bs = NULL;
4928 }
4929 
4930 static void
4931 blob_thin_prov_rle(void)
4932 {
4933 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
4934 	struct spdk_blob_store *bs = g_bs;
4935 	struct spdk_blob *blob;
4936 	struct spdk_io_channel *channel;
4937 	struct spdk_blob_opts opts;
4938 	spdk_blob_id blobid;
4939 	uint64_t free_clusters;
4940 	uint64_t io_unit_size;
4941 	uint8_t payload_read[10 * BLOCKLEN];
4942 	uint8_t payload_write[10 * BLOCKLEN];
4943 	uint64_t write_bytes;
4944 	uint64_t read_bytes;
4945 	uint64_t expected_bytes;
4946 	uint64_t io_unit;
4947 
4948 	/* assert that the stack variables above are of correct size */
4949 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == BLOCKLEN);
4950 
4951 	free_clusters = spdk_bs_free_cluster_count(bs);
4952 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4953 
4954 	ut_spdk_blob_opts_init(&opts);
4955 	opts.thin_provision = true;
4956 	opts.num_clusters = 5;
4957 
4958 	blob = ut_blob_create_and_open(bs, &opts);
4959 	blobid = spdk_blob_get_id(blob);
4960 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4961 
4962 	channel = spdk_bs_alloc_io_channel(bs);
4963 	CU_ASSERT(channel != NULL);
4964 
4965 	/* Target specifically second cluster in a blob as first allocation */
4966 	io_unit = bs_cluster_to_io_unit(bs, 1);
4967 
4968 	/* Payload should be all zeros from unallocated clusters */
4969 	memset(payload_read, 0xFF, sizeof(payload_read));
4970 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4971 	poll_threads();
4972 	CU_ASSERT(g_bserrno == 0);
4973 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
4974 
4975 	write_bytes = g_dev_write_bytes;
4976 	read_bytes = g_dev_read_bytes;
4977 
4978 	/* Issue write to second cluster in a blob */
4979 	memset(payload_write, 0xE5, sizeof(payload_write));
4980 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4981 	poll_threads();
4982 	CU_ASSERT(g_bserrno == 0);
4983 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4984 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4985 	 * read 0 bytes */
4986 	expected_bytes = 10 * io_unit_size + spdk_bs_get_page_size(bs);
4987 	if (g_use_extent_table) {
4988 		/* Add one more page for EXTENT_PAGE write */
4989 		expected_bytes += spdk_bs_get_page_size(bs);
4990 	}
4991 	CU_ASSERT(g_dev_write_bytes - write_bytes == expected_bytes);
4992 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4993 
4994 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4995 	poll_threads();
4996 	CU_ASSERT(g_bserrno == 0);
4997 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
4998 
4999 	spdk_bs_free_io_channel(channel);
5000 	poll_threads();
5001 
5002 	spdk_blob_close(blob, blob_op_complete, NULL);
5003 	poll_threads();
5004 	CU_ASSERT(g_bserrno == 0);
5005 
5006 	ut_bs_reload(&bs, NULL);
5007 
5008 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5009 	poll_threads();
5010 	CU_ASSERT(g_bserrno == 0);
5011 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5012 	blob = g_blob;
5013 
5014 	channel = spdk_bs_alloc_io_channel(bs);
5015 	CU_ASSERT(channel != NULL);
5016 
5017 	/* Read second cluster after blob reload to confirm data written */
5018 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
5019 	poll_threads();
5020 	CU_ASSERT(g_bserrno == 0);
5021 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5022 
5023 	spdk_bs_free_io_channel(channel);
5024 	poll_threads();
5025 
5026 	ut_blob_close_and_delete(bs, blob);
5027 }
5028 
5029 static void
5030 blob_thin_prov_rw_iov(void)
5031 {
5032 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5033 	struct spdk_blob_store *bs = g_bs;
5034 	struct spdk_blob *blob;
5035 	struct spdk_io_channel *channel;
5036 	struct spdk_blob_opts opts;
5037 	uint64_t free_clusters;
5038 	uint8_t payload_read[10 * BLOCKLEN];
5039 	uint8_t payload_write[10 * BLOCKLEN];
5040 	struct iovec iov_read[3];
5041 	struct iovec iov_write[3];
5042 
5043 	free_clusters = spdk_bs_free_cluster_count(bs);
5044 
5045 	channel = spdk_bs_alloc_io_channel(bs);
5046 	CU_ASSERT(channel != NULL);
5047 
5048 	ut_spdk_blob_opts_init(&opts);
5049 	opts.thin_provision = true;
5050 
5051 	blob = ut_blob_create_and_open(bs, &opts);
5052 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5053 
5054 	CU_ASSERT(blob->active.num_clusters == 0);
5055 
5056 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
5057 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
5058 	poll_threads();
5059 	CU_ASSERT(g_bserrno == 0);
5060 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5061 	CU_ASSERT(blob->active.num_clusters == 5);
5062 
5063 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5064 	poll_threads();
5065 	CU_ASSERT(g_bserrno == 0);
5066 	/* Sync must not change anything */
5067 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5068 	CU_ASSERT(blob->active.num_clusters == 5);
5069 
5070 	/* Payload should be all zeros from unallocated clusters */
5071 	memset(payload_read, 0xAA, sizeof(payload_read));
5072 	iov_read[0].iov_base = payload_read;
5073 	iov_read[0].iov_len = 3 * BLOCKLEN;
5074 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5075 	iov_read[1].iov_len = 4 * BLOCKLEN;
5076 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5077 	iov_read[2].iov_len = 3 * BLOCKLEN;
5078 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5079 	poll_threads();
5080 	CU_ASSERT(g_bserrno == 0);
5081 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5082 
5083 	memset(payload_write, 0xE5, sizeof(payload_write));
5084 	iov_write[0].iov_base = payload_write;
5085 	iov_write[0].iov_len = 1 * BLOCKLEN;
5086 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5087 	iov_write[1].iov_len = 5 * BLOCKLEN;
5088 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5089 	iov_write[2].iov_len = 4 * BLOCKLEN;
5090 
5091 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5092 	poll_threads();
5093 	CU_ASSERT(g_bserrno == 0);
5094 
5095 	memset(payload_read, 0xAA, sizeof(payload_read));
5096 	iov_read[0].iov_base = payload_read;
5097 	iov_read[0].iov_len = 3 * BLOCKLEN;
5098 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5099 	iov_read[1].iov_len = 4 * BLOCKLEN;
5100 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5101 	iov_read[2].iov_len = 3 * BLOCKLEN;
5102 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5103 	poll_threads();
5104 	CU_ASSERT(g_bserrno == 0);
5105 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5106 
5107 	spdk_bs_free_io_channel(channel);
5108 	poll_threads();
5109 
5110 	ut_blob_close_and_delete(bs, blob);
5111 }
5112 
5113 struct iter_ctx {
5114 	int		current_iter;
5115 	spdk_blob_id	blobid[4];
5116 };
5117 
5118 static void
5119 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
5120 {
5121 	struct iter_ctx *iter_ctx = arg;
5122 	spdk_blob_id blobid;
5123 
5124 	CU_ASSERT(bserrno == 0);
5125 	blobid = spdk_blob_get_id(blob);
5126 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
5127 }
5128 
5129 static void
5130 bs_load_iter_test(void)
5131 {
5132 	struct spdk_blob_store *bs;
5133 	struct spdk_bs_dev *dev;
5134 	struct iter_ctx iter_ctx = { 0 };
5135 	struct spdk_blob *blob;
5136 	int i, rc;
5137 	struct spdk_bs_opts opts;
5138 
5139 	dev = init_dev();
5140 	spdk_bs_opts_init(&opts, sizeof(opts));
5141 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5142 
5143 	/* Initialize a new blob store */
5144 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
5145 	poll_threads();
5146 	CU_ASSERT(g_bserrno == 0);
5147 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5148 	bs = g_bs;
5149 
5150 	for (i = 0; i < 4; i++) {
5151 		blob = ut_blob_create_and_open(bs, NULL);
5152 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
5153 
5154 		/* Just save the blobid as an xattr for testing purposes. */
5155 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
5156 		CU_ASSERT(rc == 0);
5157 
5158 		/* Resize the blob */
5159 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
5160 		poll_threads();
5161 		CU_ASSERT(g_bserrno == 0);
5162 
5163 		spdk_blob_close(blob, blob_op_complete, NULL);
5164 		poll_threads();
5165 		CU_ASSERT(g_bserrno == 0);
5166 	}
5167 
5168 	g_bserrno = -1;
5169 	spdk_bs_unload(bs, bs_op_complete, NULL);
5170 	poll_threads();
5171 	CU_ASSERT(g_bserrno == 0);
5172 
5173 	dev = init_dev();
5174 	spdk_bs_opts_init(&opts, sizeof(opts));
5175 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5176 	opts.iter_cb_fn = test_iter;
5177 	opts.iter_cb_arg = &iter_ctx;
5178 
5179 	/* Test blob iteration during load after a clean shutdown. */
5180 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5181 	poll_threads();
5182 	CU_ASSERT(g_bserrno == 0);
5183 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5184 	bs = g_bs;
5185 
5186 	/* Dirty shutdown */
5187 	bs_free(bs);
5188 
5189 	dev = init_dev();
5190 	spdk_bs_opts_init(&opts, sizeof(opts));
5191 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5192 	opts.iter_cb_fn = test_iter;
5193 	iter_ctx.current_iter = 0;
5194 	opts.iter_cb_arg = &iter_ctx;
5195 
5196 	/* Test blob iteration during load after a dirty shutdown. */
5197 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5198 	poll_threads();
5199 	CU_ASSERT(g_bserrno == 0);
5200 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5201 	bs = g_bs;
5202 
5203 	spdk_bs_unload(bs, bs_op_complete, NULL);
5204 	poll_threads();
5205 	CU_ASSERT(g_bserrno == 0);
5206 	g_bs = NULL;
5207 }
5208 
5209 static void
5210 blob_snapshot_rw(void)
5211 {
5212 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5213 	struct spdk_blob_store *bs = g_bs;
5214 	struct spdk_blob *blob, *snapshot;
5215 	struct spdk_io_channel *channel;
5216 	struct spdk_blob_opts opts;
5217 	spdk_blob_id blobid, snapshotid;
5218 	uint64_t free_clusters;
5219 	uint64_t cluster_size;
5220 	uint64_t io_unit_size;
5221 	uint8_t payload_read[10 * BLOCKLEN];
5222 	uint8_t payload_write[10 * BLOCKLEN];
5223 	uint64_t write_bytes_start;
5224 	uint64_t read_bytes_start;
5225 	uint64_t copy_bytes_start;
5226 	uint64_t write_bytes;
5227 	uint64_t read_bytes;
5228 	uint64_t copy_bytes;
5229 	uint64_t expected_bytes;
5230 
5231 	free_clusters = spdk_bs_free_cluster_count(bs);
5232 	cluster_size = spdk_bs_get_cluster_size(bs);
5233 	io_unit_size = spdk_bs_get_io_unit_size(bs);
5234 
5235 	channel = spdk_bs_alloc_io_channel(bs);
5236 	CU_ASSERT(channel != NULL);
5237 
5238 	ut_spdk_blob_opts_init(&opts);
5239 	opts.thin_provision = true;
5240 	opts.num_clusters = 5;
5241 
5242 	blob = ut_blob_create_and_open(bs, &opts);
5243 	blobid = spdk_blob_get_id(blob);
5244 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5245 
5246 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5247 
5248 	memset(payload_read, 0xFF, sizeof(payload_read));
5249 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5250 	poll_threads();
5251 	CU_ASSERT(g_bserrno == 0);
5252 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5253 
5254 	memset(payload_write, 0xE5, sizeof(payload_write));
5255 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5256 	poll_threads();
5257 	CU_ASSERT(g_bserrno == 0);
5258 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5259 
5260 	/* Create snapshot from blob */
5261 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5262 	poll_threads();
5263 	CU_ASSERT(g_bserrno == 0);
5264 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5265 	snapshotid = g_blobid;
5266 
5267 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5268 	poll_threads();
5269 	CU_ASSERT(g_bserrno == 0);
5270 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5271 	snapshot = g_blob;
5272 	CU_ASSERT(snapshot->data_ro == true);
5273 	CU_ASSERT(snapshot->md_ro == true);
5274 
5275 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5276 
5277 	write_bytes_start = g_dev_write_bytes;
5278 	read_bytes_start = g_dev_read_bytes;
5279 	copy_bytes_start = g_dev_copy_bytes;
5280 
5281 	memset(payload_write, 0xAA, sizeof(payload_write));
5282 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5283 	poll_threads();
5284 	CU_ASSERT(g_bserrno == 0);
5285 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5286 
5287 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
5288 	 * and then write 10 io units of payload.
5289 	 */
5290 	write_bytes = g_dev_write_bytes - write_bytes_start;
5291 	read_bytes = g_dev_read_bytes - read_bytes_start;
5292 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
5293 	if (g_dev_copy_enabled) {
5294 		CU_ASSERT(copy_bytes == cluster_size);
5295 	} else {
5296 		CU_ASSERT(copy_bytes == 0);
5297 	}
5298 	expected_bytes = 10 * io_unit_size + cluster_size + spdk_bs_get_page_size(bs);
5299 	if (g_use_extent_table) {
5300 		/* Add one more page for EXTENT_PAGE write */
5301 		expected_bytes += spdk_bs_get_page_size(bs);
5302 	}
5303 	CU_ASSERT(write_bytes + copy_bytes == expected_bytes);
5304 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
5305 
5306 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5307 	poll_threads();
5308 	CU_ASSERT(g_bserrno == 0);
5309 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5310 
5311 	/* Data on snapshot should not change after write to clone */
5312 	memset(payload_write, 0xE5, sizeof(payload_write));
5313 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
5314 	poll_threads();
5315 	CU_ASSERT(g_bserrno == 0);
5316 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5317 
5318 	ut_blob_close_and_delete(bs, blob);
5319 	ut_blob_close_and_delete(bs, snapshot);
5320 
5321 	spdk_bs_free_io_channel(channel);
5322 	poll_threads();
5323 	g_blob = NULL;
5324 	g_blobid = 0;
5325 }
5326 
5327 static void
5328 blob_snapshot_rw_iov(void)
5329 {
5330 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5331 	struct spdk_blob_store *bs = g_bs;
5332 	struct spdk_blob *blob, *snapshot;
5333 	struct spdk_io_channel *channel;
5334 	struct spdk_blob_opts opts;
5335 	spdk_blob_id blobid, snapshotid;
5336 	uint64_t free_clusters;
5337 	uint8_t payload_read[10 * BLOCKLEN];
5338 	uint8_t payload_write[10 * BLOCKLEN];
5339 	struct iovec iov_read[3];
5340 	struct iovec iov_write[3];
5341 
5342 	free_clusters = spdk_bs_free_cluster_count(bs);
5343 
5344 	channel = spdk_bs_alloc_io_channel(bs);
5345 	CU_ASSERT(channel != NULL);
5346 
5347 	ut_spdk_blob_opts_init(&opts);
5348 	opts.thin_provision = true;
5349 	opts.num_clusters = 5;
5350 
5351 	blob = ut_blob_create_and_open(bs, &opts);
5352 	blobid = spdk_blob_get_id(blob);
5353 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5354 
5355 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5356 
5357 	/* Create snapshot from blob */
5358 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5359 	poll_threads();
5360 	CU_ASSERT(g_bserrno == 0);
5361 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5362 	snapshotid = g_blobid;
5363 
5364 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5365 	poll_threads();
5366 	CU_ASSERT(g_bserrno == 0);
5367 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5368 	snapshot = g_blob;
5369 	CU_ASSERT(snapshot->data_ro == true);
5370 	CU_ASSERT(snapshot->md_ro == true);
5371 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5372 
5373 	/* Payload should be all zeros from unallocated clusters */
5374 	memset(payload_read, 0xAA, sizeof(payload_read));
5375 	iov_read[0].iov_base = payload_read;
5376 	iov_read[0].iov_len = 3 * BLOCKLEN;
5377 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5378 	iov_read[1].iov_len = 4 * BLOCKLEN;
5379 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5380 	iov_read[2].iov_len = 3 * BLOCKLEN;
5381 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5382 	poll_threads();
5383 	CU_ASSERT(g_bserrno == 0);
5384 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5385 
5386 	memset(payload_write, 0xE5, sizeof(payload_write));
5387 	iov_write[0].iov_base = payload_write;
5388 	iov_write[0].iov_len = 1 * BLOCKLEN;
5389 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5390 	iov_write[1].iov_len = 5 * BLOCKLEN;
5391 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5392 	iov_write[2].iov_len = 4 * BLOCKLEN;
5393 
5394 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5395 	poll_threads();
5396 	CU_ASSERT(g_bserrno == 0);
5397 
5398 	memset(payload_read, 0xAA, sizeof(payload_read));
5399 	iov_read[0].iov_base = payload_read;
5400 	iov_read[0].iov_len = 3 * BLOCKLEN;
5401 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5402 	iov_read[1].iov_len = 4 * BLOCKLEN;
5403 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5404 	iov_read[2].iov_len = 3 * BLOCKLEN;
5405 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5406 	poll_threads();
5407 	CU_ASSERT(g_bserrno == 0);
5408 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5409 
5410 	spdk_bs_free_io_channel(channel);
5411 	poll_threads();
5412 
5413 	ut_blob_close_and_delete(bs, blob);
5414 	ut_blob_close_and_delete(bs, snapshot);
5415 }
5416 
5417 /**
5418  * Inflate / decouple parent rw unit tests.
5419  *
5420  * --------------
5421  * original blob:         0         1         2         3         4
5422  *                   ,---------+---------+---------+---------+---------.
5423  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5424  *                   +---------+---------+---------+---------+---------+
5425  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
5426  *                   +---------+---------+---------+---------+---------+
5427  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
5428  *                   '---------+---------+---------+---------+---------'
5429  *                   .         .         .         .         .         .
5430  * --------          .         .         .         .         .         .
5431  * inflate:          .         .         .         .         .         .
5432  *                   ,---------+---------+---------+---------+---------.
5433  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
5434  *                   '---------+---------+---------+---------+---------'
5435  *
5436  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
5437  *               on snapshot2 and snapshot removed .         .         .
5438  *                   .         .         .         .         .         .
5439  * ----------------  .         .         .         .         .         .
5440  * decouple parent:  .         .         .         .         .         .
5441  *                   ,---------+---------+---------+---------+---------.
5442  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5443  *                   +---------+---------+---------+---------+---------+
5444  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
5445  *                   '---------+---------+---------+---------+---------'
5446  *
5447  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
5448  *               on snapshot2 removed and on snapshot still exists. Snapshot2
5449  *               should remain a clone of snapshot.
5450  */
5451 static void
5452 _blob_inflate_rw(bool decouple_parent)
5453 {
5454 	struct spdk_blob_store *bs = g_bs;
5455 	struct spdk_blob *blob, *snapshot, *snapshot2;
5456 	struct spdk_io_channel *channel;
5457 	struct spdk_blob_opts opts;
5458 	spdk_blob_id blobid, snapshotid, snapshot2id;
5459 	uint64_t free_clusters;
5460 	uint64_t cluster_size;
5461 
5462 	uint64_t payload_size;
5463 	uint8_t *payload_read;
5464 	uint8_t *payload_write;
5465 	uint8_t *payload_clone;
5466 
5467 	uint64_t io_units_per_cluster;
5468 	uint64_t io_units_per_payload;
5469 
5470 	int i;
5471 	spdk_blob_id ids[2];
5472 	size_t count;
5473 
5474 	free_clusters = spdk_bs_free_cluster_count(bs);
5475 	cluster_size = spdk_bs_get_cluster_size(bs);
5476 	io_units_per_cluster = cluster_size / spdk_bs_get_io_unit_size(bs);
5477 	io_units_per_payload = io_units_per_cluster * 5;
5478 
5479 	payload_size = cluster_size * 5;
5480 
5481 	payload_read = malloc(payload_size);
5482 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
5483 
5484 	payload_write = malloc(payload_size);
5485 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
5486 
5487 	payload_clone = malloc(payload_size);
5488 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
5489 
5490 	channel = spdk_bs_alloc_io_channel(bs);
5491 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5492 
5493 	/* Create blob */
5494 	ut_spdk_blob_opts_init(&opts);
5495 	opts.thin_provision = true;
5496 	opts.num_clusters = 5;
5497 
5498 	blob = ut_blob_create_and_open(bs, &opts);
5499 	blobid = spdk_blob_get_id(blob);
5500 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5501 
5502 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5503 
5504 	/* 1) Initial read should return zeroed payload */
5505 	memset(payload_read, 0xFF, payload_size);
5506 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5507 			  blob_op_complete, NULL);
5508 	poll_threads();
5509 	CU_ASSERT(g_bserrno == 0);
5510 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
5511 
5512 	/* Fill whole blob with a pattern, except last cluster (to be sure it
5513 	 * isn't allocated) */
5514 	memset(payload_write, 0xE5, payload_size - cluster_size);
5515 	spdk_blob_io_write(blob, channel, payload_write, 0, io_units_per_payload -
5516 			   io_units_per_cluster, blob_op_complete, NULL);
5517 	poll_threads();
5518 	CU_ASSERT(g_bserrno == 0);
5519 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5520 
5521 	/* 2) Create snapshot from blob (first level) */
5522 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5523 	poll_threads();
5524 	CU_ASSERT(g_bserrno == 0);
5525 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5526 	snapshotid = g_blobid;
5527 
5528 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5529 	poll_threads();
5530 	CU_ASSERT(g_bserrno == 0);
5531 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5532 	snapshot = g_blob;
5533 	CU_ASSERT(snapshot->data_ro == true);
5534 	CU_ASSERT(snapshot->md_ro == true);
5535 
5536 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5537 
5538 	/* Write every second cluster with a pattern.
5539 	 *
5540 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
5541 	 * doesn't allocate it.
5542 	 *
5543 	 * payload_clone stores expected result on "blob" read at the time and
5544 	 * is used only to check data consistency on clone before and after
5545 	 * inflation. Initially we fill it with a backing snapshots pattern
5546 	 * used before.
5547 	 */
5548 	memset(payload_clone, 0xE5, payload_size - cluster_size);
5549 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
5550 	memset(payload_write, 0xAA, payload_size);
5551 	for (i = 1; i < 5; i += 2) {
5552 		spdk_blob_io_write(blob, channel, payload_write, i * io_units_per_cluster,
5553 				   io_units_per_cluster, blob_op_complete, NULL);
5554 		poll_threads();
5555 		CU_ASSERT(g_bserrno == 0);
5556 
5557 		/* Update expected result */
5558 		memcpy(payload_clone + (cluster_size * i), payload_write,
5559 		       cluster_size);
5560 	}
5561 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5562 
5563 	/* Check data consistency on clone */
5564 	memset(payload_read, 0xFF, payload_size);
5565 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5566 			  blob_op_complete, NULL);
5567 	poll_threads();
5568 	CU_ASSERT(g_bserrno == 0);
5569 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5570 
5571 	/* 3) Create second levels snapshot from blob */
5572 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5573 	poll_threads();
5574 	CU_ASSERT(g_bserrno == 0);
5575 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5576 	snapshot2id = g_blobid;
5577 
5578 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
5579 	poll_threads();
5580 	CU_ASSERT(g_bserrno == 0);
5581 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5582 	snapshot2 = g_blob;
5583 	CU_ASSERT(snapshot2->data_ro == true);
5584 	CU_ASSERT(snapshot2->md_ro == true);
5585 
5586 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
5587 
5588 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5589 
5590 	/* Write one cluster on the top level blob. This cluster (1) covers
5591 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
5592 	 * at all */
5593 	spdk_blob_io_write(blob, channel, payload_write, io_units_per_cluster,
5594 			   io_units_per_cluster, blob_op_complete, NULL);
5595 	poll_threads();
5596 	CU_ASSERT(g_bserrno == 0);
5597 
5598 	/* Update expected result */
5599 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
5600 
5601 	/* Check data consistency on clone */
5602 	memset(payload_read, 0xFF, payload_size);
5603 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5604 			  blob_op_complete, NULL);
5605 	poll_threads();
5606 	CU_ASSERT(g_bserrno == 0);
5607 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5608 
5609 
5610 	/* Close all blobs */
5611 	spdk_blob_close(blob, blob_op_complete, NULL);
5612 	poll_threads();
5613 	CU_ASSERT(g_bserrno == 0);
5614 
5615 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5616 	poll_threads();
5617 	CU_ASSERT(g_bserrno == 0);
5618 
5619 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5620 	poll_threads();
5621 	CU_ASSERT(g_bserrno == 0);
5622 
5623 	/* Check snapshot-clone relations */
5624 	count = 2;
5625 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5626 	CU_ASSERT(count == 1);
5627 	CU_ASSERT(ids[0] == snapshot2id);
5628 
5629 	count = 2;
5630 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5631 	CU_ASSERT(count == 1);
5632 	CU_ASSERT(ids[0] == blobid);
5633 
5634 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
5635 
5636 	free_clusters = spdk_bs_free_cluster_count(bs);
5637 	if (!decouple_parent) {
5638 		/* Do full blob inflation */
5639 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
5640 		poll_threads();
5641 		CU_ASSERT(g_bserrno == 0);
5642 
5643 		/* All clusters should be inflated (except one already allocated
5644 		 * in a top level blob) */
5645 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5646 
5647 		/* Check if relation tree updated correctly */
5648 		count = 2;
5649 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5650 
5651 		/* snapshotid have one clone */
5652 		CU_ASSERT(count == 1);
5653 		CU_ASSERT(ids[0] == snapshot2id);
5654 
5655 		/* snapshot2id have no clones */
5656 		count = 2;
5657 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5658 		CU_ASSERT(count == 0);
5659 
5660 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5661 	} else {
5662 		/* Decouple parent of blob */
5663 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5664 		poll_threads();
5665 		CU_ASSERT(g_bserrno == 0);
5666 
5667 		/* Only one cluster from a parent should be inflated (second one
5668 		 * is covered by a cluster written on a top level blob, and
5669 		 * already allocated) */
5670 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5671 
5672 		/* Check if relation tree updated correctly */
5673 		count = 2;
5674 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5675 
5676 		/* snapshotid have two clones now */
5677 		CU_ASSERT(count == 2);
5678 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5679 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5680 
5681 		/* snapshot2id have no clones */
5682 		count = 2;
5683 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5684 		CU_ASSERT(count == 0);
5685 
5686 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5687 	}
5688 
5689 	/* Try to delete snapshot2 (should pass) */
5690 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5691 	poll_threads();
5692 	CU_ASSERT(g_bserrno == 0);
5693 
5694 	/* Try to delete base snapshot */
5695 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5696 	poll_threads();
5697 	CU_ASSERT(g_bserrno == 0);
5698 
5699 	/* Reopen blob after snapshot deletion */
5700 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5701 	poll_threads();
5702 	CU_ASSERT(g_bserrno == 0);
5703 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5704 	blob = g_blob;
5705 
5706 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5707 
5708 	/* Check data consistency on inflated blob */
5709 	memset(payload_read, 0xFF, payload_size);
5710 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5711 			  blob_op_complete, NULL);
5712 	poll_threads();
5713 	CU_ASSERT(g_bserrno == 0);
5714 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5715 
5716 	spdk_bs_free_io_channel(channel);
5717 	poll_threads();
5718 
5719 	free(payload_read);
5720 	free(payload_write);
5721 	free(payload_clone);
5722 
5723 	ut_blob_close_and_delete(bs, blob);
5724 }
5725 
5726 static void
5727 blob_inflate_rw(void)
5728 {
5729 	_blob_inflate_rw(false);
5730 	_blob_inflate_rw(true);
5731 }
5732 
5733 /**
5734  * Snapshot-clones relation test
5735  *
5736  *         snapshot
5737  *            |
5738  *      +-----+-----+
5739  *      |           |
5740  *   blob(ro)   snapshot2
5741  *      |           |
5742  *   clone2      clone
5743  */
5744 static void
5745 blob_relations(void)
5746 {
5747 	struct spdk_blob_store *bs;
5748 	struct spdk_bs_dev *dev;
5749 	struct spdk_bs_opts bs_opts;
5750 	struct spdk_blob_opts opts;
5751 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5752 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5753 	int rc;
5754 	size_t count;
5755 	spdk_blob_id ids[10] = {};
5756 
5757 	dev = init_dev();
5758 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5759 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5760 
5761 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5762 	poll_threads();
5763 	CU_ASSERT(g_bserrno == 0);
5764 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5765 	bs = g_bs;
5766 
5767 	/* 1. Create blob with 10 clusters */
5768 
5769 	ut_spdk_blob_opts_init(&opts);
5770 	opts.num_clusters = 10;
5771 
5772 	blob = ut_blob_create_and_open(bs, &opts);
5773 	blobid = spdk_blob_get_id(blob);
5774 
5775 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5776 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5777 	CU_ASSERT(!spdk_blob_is_clone(blob));
5778 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5779 
5780 	/* blob should not have underlying snapshot nor clones */
5781 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5782 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5783 	count = SPDK_COUNTOF(ids);
5784 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5785 	CU_ASSERT(rc == 0);
5786 	CU_ASSERT(count == 0);
5787 
5788 
5789 	/* 2. Create snapshot */
5790 
5791 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5792 	poll_threads();
5793 	CU_ASSERT(g_bserrno == 0);
5794 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5795 	snapshotid = g_blobid;
5796 
5797 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5798 	poll_threads();
5799 	CU_ASSERT(g_bserrno == 0);
5800 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5801 	snapshot = g_blob;
5802 
5803 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5804 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5805 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5806 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5807 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5808 
5809 	/* Check if original blob is converted to the clone of snapshot */
5810 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5811 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5812 	CU_ASSERT(spdk_blob_is_clone(blob));
5813 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5814 	CU_ASSERT(blob->parent_id == snapshotid);
5815 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5816 
5817 	count = SPDK_COUNTOF(ids);
5818 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5819 	CU_ASSERT(rc == 0);
5820 	CU_ASSERT(count == 1);
5821 	CU_ASSERT(ids[0] == blobid);
5822 
5823 
5824 	/* 3. Create clone from snapshot */
5825 
5826 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5827 	poll_threads();
5828 	CU_ASSERT(g_bserrno == 0);
5829 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5830 	cloneid = g_blobid;
5831 
5832 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5833 	poll_threads();
5834 	CU_ASSERT(g_bserrno == 0);
5835 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5836 	clone = g_blob;
5837 
5838 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5839 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5840 	CU_ASSERT(spdk_blob_is_clone(clone));
5841 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5842 	CU_ASSERT(clone->parent_id == snapshotid);
5843 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5844 
5845 	count = SPDK_COUNTOF(ids);
5846 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5847 	CU_ASSERT(rc == 0);
5848 	CU_ASSERT(count == 0);
5849 
5850 	/* Check if clone is on the snapshot's list */
5851 	count = SPDK_COUNTOF(ids);
5852 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5853 	CU_ASSERT(rc == 0);
5854 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5855 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5856 
5857 
5858 	/* 4. Create snapshot of the clone */
5859 
5860 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5861 	poll_threads();
5862 	CU_ASSERT(g_bserrno == 0);
5863 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5864 	snapshotid2 = g_blobid;
5865 
5866 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5867 	poll_threads();
5868 	CU_ASSERT(g_bserrno == 0);
5869 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5870 	snapshot2 = g_blob;
5871 
5872 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5873 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5874 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5875 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5876 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5877 
5878 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5879 	 * is a child of snapshot */
5880 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5881 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5882 	CU_ASSERT(spdk_blob_is_clone(clone));
5883 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5884 	CU_ASSERT(clone->parent_id == snapshotid2);
5885 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5886 
5887 	count = SPDK_COUNTOF(ids);
5888 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5889 	CU_ASSERT(rc == 0);
5890 	CU_ASSERT(count == 1);
5891 	CU_ASSERT(ids[0] == cloneid);
5892 
5893 
5894 	/* 5. Try to create clone from read only blob */
5895 
5896 	/* Mark blob as read only */
5897 	spdk_blob_set_read_only(blob);
5898 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5899 	poll_threads();
5900 	CU_ASSERT(g_bserrno == 0);
5901 
5902 	/* Check if previously created blob is read only clone */
5903 	CU_ASSERT(spdk_blob_is_read_only(blob));
5904 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5905 	CU_ASSERT(spdk_blob_is_clone(blob));
5906 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5907 
5908 	/* Create clone from read only blob */
5909 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5910 	poll_threads();
5911 	CU_ASSERT(g_bserrno == 0);
5912 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5913 	cloneid2 = g_blobid;
5914 
5915 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5916 	poll_threads();
5917 	CU_ASSERT(g_bserrno == 0);
5918 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5919 	clone2 = g_blob;
5920 
5921 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5922 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5923 	CU_ASSERT(spdk_blob_is_clone(clone2));
5924 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5925 
5926 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5927 
5928 	count = SPDK_COUNTOF(ids);
5929 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5930 	CU_ASSERT(rc == 0);
5931 
5932 	CU_ASSERT(count == 1);
5933 	CU_ASSERT(ids[0] == cloneid2);
5934 
5935 	/* Close blobs */
5936 
5937 	spdk_blob_close(clone2, blob_op_complete, NULL);
5938 	poll_threads();
5939 	CU_ASSERT(g_bserrno == 0);
5940 
5941 	spdk_blob_close(blob, blob_op_complete, NULL);
5942 	poll_threads();
5943 	CU_ASSERT(g_bserrno == 0);
5944 
5945 	spdk_blob_close(clone, blob_op_complete, NULL);
5946 	poll_threads();
5947 	CU_ASSERT(g_bserrno == 0);
5948 
5949 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5950 	poll_threads();
5951 	CU_ASSERT(g_bserrno == 0);
5952 
5953 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5954 	poll_threads();
5955 	CU_ASSERT(g_bserrno == 0);
5956 
5957 	/* Try to delete snapshot with more than 1 clone */
5958 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5959 	poll_threads();
5960 	CU_ASSERT(g_bserrno != 0);
5961 
5962 	ut_bs_reload(&bs, &bs_opts);
5963 
5964 	/* NULL ids array should return number of clones in count */
5965 	count = SPDK_COUNTOF(ids);
5966 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5967 	CU_ASSERT(rc == -ENOMEM);
5968 	CU_ASSERT(count == 2);
5969 
5970 	/* incorrect array size */
5971 	count = 1;
5972 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5973 	CU_ASSERT(rc == -ENOMEM);
5974 	CU_ASSERT(count == 2);
5975 
5976 
5977 	/* Verify structure of loaded blob store */
5978 
5979 	/* snapshot */
5980 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5981 
5982 	count = SPDK_COUNTOF(ids);
5983 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5984 	CU_ASSERT(rc == 0);
5985 	CU_ASSERT(count == 2);
5986 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5987 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5988 
5989 	/* blob */
5990 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5991 	count = SPDK_COUNTOF(ids);
5992 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5993 	CU_ASSERT(rc == 0);
5994 	CU_ASSERT(count == 1);
5995 	CU_ASSERT(ids[0] == cloneid2);
5996 
5997 	/* clone */
5998 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5999 	count = SPDK_COUNTOF(ids);
6000 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6001 	CU_ASSERT(rc == 0);
6002 	CU_ASSERT(count == 0);
6003 
6004 	/* snapshot2 */
6005 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
6006 	count = SPDK_COUNTOF(ids);
6007 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6008 	CU_ASSERT(rc == 0);
6009 	CU_ASSERT(count == 1);
6010 	CU_ASSERT(ids[0] == cloneid);
6011 
6012 	/* clone2 */
6013 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6014 	count = SPDK_COUNTOF(ids);
6015 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6016 	CU_ASSERT(rc == 0);
6017 	CU_ASSERT(count == 0);
6018 
6019 	/* Try to delete blob that user should not be able to remove */
6020 
6021 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6022 	poll_threads();
6023 	CU_ASSERT(g_bserrno != 0);
6024 
6025 	/* Remove all blobs */
6026 
6027 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6028 	poll_threads();
6029 	CU_ASSERT(g_bserrno == 0);
6030 
6031 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6032 	poll_threads();
6033 	CU_ASSERT(g_bserrno == 0);
6034 
6035 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6036 	poll_threads();
6037 	CU_ASSERT(g_bserrno == 0);
6038 
6039 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6040 	poll_threads();
6041 	CU_ASSERT(g_bserrno == 0);
6042 
6043 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6044 	poll_threads();
6045 	CU_ASSERT(g_bserrno == 0);
6046 
6047 	spdk_bs_unload(bs, bs_op_complete, NULL);
6048 	poll_threads();
6049 	CU_ASSERT(g_bserrno == 0);
6050 
6051 	g_bs = NULL;
6052 }
6053 
6054 /**
6055  * Snapshot-clones relation test 2
6056  *
6057  *         snapshot1
6058  *            |
6059  *         snapshot2
6060  *            |
6061  *      +-----+-----+
6062  *      |           |
6063  *   blob(ro)   snapshot3
6064  *      |           |
6065  *      |       snapshot4
6066  *      |        |     |
6067  *   clone2   clone  clone3
6068  */
6069 static void
6070 blob_relations2(void)
6071 {
6072 	struct spdk_blob_store *bs;
6073 	struct spdk_bs_dev *dev;
6074 	struct spdk_bs_opts bs_opts;
6075 	struct spdk_blob_opts opts;
6076 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
6077 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
6078 		     cloneid3;
6079 	int rc;
6080 	size_t count;
6081 	spdk_blob_id ids[10] = {};
6082 
6083 	dev = init_dev();
6084 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6085 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6086 
6087 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6088 	poll_threads();
6089 	CU_ASSERT(g_bserrno == 0);
6090 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6091 	bs = g_bs;
6092 
6093 	/* 1. Create blob with 10 clusters */
6094 
6095 	ut_spdk_blob_opts_init(&opts);
6096 	opts.num_clusters = 10;
6097 
6098 	blob = ut_blob_create_and_open(bs, &opts);
6099 	blobid = spdk_blob_get_id(blob);
6100 
6101 	/* 2. Create snapshot1 */
6102 
6103 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6104 	poll_threads();
6105 	CU_ASSERT(g_bserrno == 0);
6106 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6107 	snapshotid1 = g_blobid;
6108 
6109 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
6110 	poll_threads();
6111 	CU_ASSERT(g_bserrno == 0);
6112 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6113 	snapshot1 = g_blob;
6114 
6115 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
6116 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
6117 
6118 	CU_ASSERT(blob->parent_id == snapshotid1);
6119 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6120 
6121 	/* Check if blob is the clone of snapshot1 */
6122 	CU_ASSERT(blob->parent_id == snapshotid1);
6123 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6124 
6125 	count = SPDK_COUNTOF(ids);
6126 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
6127 	CU_ASSERT(rc == 0);
6128 	CU_ASSERT(count == 1);
6129 	CU_ASSERT(ids[0] == blobid);
6130 
6131 	/* 3. Create another snapshot */
6132 
6133 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6134 	poll_threads();
6135 	CU_ASSERT(g_bserrno == 0);
6136 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6137 	snapshotid2 = g_blobid;
6138 
6139 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
6140 	poll_threads();
6141 	CU_ASSERT(g_bserrno == 0);
6142 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6143 	snapshot2 = g_blob;
6144 
6145 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
6146 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
6147 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
6148 
6149 	/* Check if snapshot2 is the clone of snapshot1 and blob
6150 	 * is a child of snapshot2 */
6151 	CU_ASSERT(blob->parent_id == snapshotid2);
6152 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6153 
6154 	count = SPDK_COUNTOF(ids);
6155 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6156 	CU_ASSERT(rc == 0);
6157 	CU_ASSERT(count == 1);
6158 	CU_ASSERT(ids[0] == blobid);
6159 
6160 	/* 4. Create clone from snapshot */
6161 
6162 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
6163 	poll_threads();
6164 	CU_ASSERT(g_bserrno == 0);
6165 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6166 	cloneid = g_blobid;
6167 
6168 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
6169 	poll_threads();
6170 	CU_ASSERT(g_bserrno == 0);
6171 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6172 	clone = g_blob;
6173 
6174 	CU_ASSERT(clone->parent_id == snapshotid2);
6175 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
6176 
6177 	/* Check if clone is on the snapshot's list */
6178 	count = SPDK_COUNTOF(ids);
6179 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6180 	CU_ASSERT(rc == 0);
6181 	CU_ASSERT(count == 2);
6182 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6183 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
6184 
6185 	/* 5. Create snapshot of the clone */
6186 
6187 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6188 	poll_threads();
6189 	CU_ASSERT(g_bserrno == 0);
6190 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6191 	snapshotid3 = g_blobid;
6192 
6193 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6194 	poll_threads();
6195 	CU_ASSERT(g_bserrno == 0);
6196 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6197 	snapshot3 = g_blob;
6198 
6199 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
6200 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6201 
6202 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
6203 	 * is a child of snapshot2 */
6204 	CU_ASSERT(clone->parent_id == snapshotid3);
6205 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6206 
6207 	count = SPDK_COUNTOF(ids);
6208 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6209 	CU_ASSERT(rc == 0);
6210 	CU_ASSERT(count == 1);
6211 	CU_ASSERT(ids[0] == cloneid);
6212 
6213 	/* 6. Create another snapshot of the clone */
6214 
6215 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6216 	poll_threads();
6217 	CU_ASSERT(g_bserrno == 0);
6218 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6219 	snapshotid4 = g_blobid;
6220 
6221 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
6222 	poll_threads();
6223 	CU_ASSERT(g_bserrno == 0);
6224 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6225 	snapshot4 = g_blob;
6226 
6227 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
6228 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
6229 
6230 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
6231 	 * is a child of snapshot3 */
6232 	CU_ASSERT(clone->parent_id == snapshotid4);
6233 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
6234 
6235 	count = SPDK_COUNTOF(ids);
6236 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
6237 	CU_ASSERT(rc == 0);
6238 	CU_ASSERT(count == 1);
6239 	CU_ASSERT(ids[0] == cloneid);
6240 
6241 	/* 7. Remove snapshot 4 */
6242 
6243 	ut_blob_close_and_delete(bs, snapshot4);
6244 
6245 	/* Check if relations are back to state from before creating snapshot 4 */
6246 	CU_ASSERT(clone->parent_id == snapshotid3);
6247 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6248 
6249 	count = SPDK_COUNTOF(ids);
6250 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6251 	CU_ASSERT(rc == 0);
6252 	CU_ASSERT(count == 1);
6253 	CU_ASSERT(ids[0] == cloneid);
6254 
6255 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
6256 
6257 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
6258 	poll_threads();
6259 	CU_ASSERT(g_bserrno == 0);
6260 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6261 	cloneid3 = g_blobid;
6262 
6263 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6264 	poll_threads();
6265 	CU_ASSERT(g_bserrno != 0);
6266 
6267 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
6268 
6269 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6270 	poll_threads();
6271 	CU_ASSERT(g_bserrno == 0);
6272 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6273 	snapshot3 = g_blob;
6274 
6275 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6276 	poll_threads();
6277 	CU_ASSERT(g_bserrno != 0);
6278 
6279 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6280 	poll_threads();
6281 	CU_ASSERT(g_bserrno == 0);
6282 
6283 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
6284 	poll_threads();
6285 	CU_ASSERT(g_bserrno == 0);
6286 
6287 	/* 10. Remove snapshot 1 */
6288 
6289 	/* Check snapshot 1 and snapshot 2 allocated clusters */
6290 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot1) == 10);
6291 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
6292 
6293 	ut_blob_close_and_delete(bs, snapshot1);
6294 
6295 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
6296 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
6297 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6298 
6299 	/* Check that snapshot 2 has the clusters that were allocated to snapshot 1 */
6300 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 10);
6301 
6302 	count = SPDK_COUNTOF(ids);
6303 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6304 	CU_ASSERT(rc == 0);
6305 	CU_ASSERT(count == 2);
6306 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6307 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6308 
6309 	/* 11. Try to create clone from read only blob */
6310 
6311 	/* Mark blob as read only */
6312 	spdk_blob_set_read_only(blob);
6313 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6314 	poll_threads();
6315 	CU_ASSERT(g_bserrno == 0);
6316 
6317 	/* Create clone from read only blob */
6318 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6319 	poll_threads();
6320 	CU_ASSERT(g_bserrno == 0);
6321 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6322 	cloneid2 = g_blobid;
6323 
6324 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
6325 	poll_threads();
6326 	CU_ASSERT(g_bserrno == 0);
6327 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6328 	clone2 = g_blob;
6329 
6330 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6331 
6332 	count = SPDK_COUNTOF(ids);
6333 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6334 	CU_ASSERT(rc == 0);
6335 	CU_ASSERT(count == 1);
6336 	CU_ASSERT(ids[0] == cloneid2);
6337 
6338 	/* Close blobs */
6339 
6340 	spdk_blob_close(clone2, blob_op_complete, NULL);
6341 	poll_threads();
6342 	CU_ASSERT(g_bserrno == 0);
6343 
6344 	spdk_blob_close(blob, blob_op_complete, NULL);
6345 	poll_threads();
6346 	CU_ASSERT(g_bserrno == 0);
6347 
6348 	spdk_blob_close(clone, blob_op_complete, NULL);
6349 	poll_threads();
6350 	CU_ASSERT(g_bserrno == 0);
6351 
6352 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
6353 	poll_threads();
6354 	CU_ASSERT(g_bserrno == 0);
6355 
6356 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6357 	poll_threads();
6358 	CU_ASSERT(g_bserrno == 0);
6359 
6360 	ut_bs_reload(&bs, &bs_opts);
6361 
6362 	/* Verify structure of loaded blob store */
6363 
6364 	/* snapshot2 */
6365 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6366 
6367 	count = SPDK_COUNTOF(ids);
6368 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6369 	CU_ASSERT(rc == 0);
6370 	CU_ASSERT(count == 2);
6371 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6372 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6373 
6374 	/* blob */
6375 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6376 	count = SPDK_COUNTOF(ids);
6377 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6378 	CU_ASSERT(rc == 0);
6379 	CU_ASSERT(count == 1);
6380 	CU_ASSERT(ids[0] == cloneid2);
6381 
6382 	/* clone */
6383 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6384 	count = SPDK_COUNTOF(ids);
6385 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6386 	CU_ASSERT(rc == 0);
6387 	CU_ASSERT(count == 0);
6388 
6389 	/* snapshot3 */
6390 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6391 	count = SPDK_COUNTOF(ids);
6392 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6393 	CU_ASSERT(rc == 0);
6394 	CU_ASSERT(count == 1);
6395 	CU_ASSERT(ids[0] == cloneid);
6396 
6397 	/* clone2 */
6398 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6399 	count = SPDK_COUNTOF(ids);
6400 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6401 	CU_ASSERT(rc == 0);
6402 	CU_ASSERT(count == 0);
6403 
6404 	/* Try to delete all blobs in the worse possible order */
6405 
6406 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6407 	poll_threads();
6408 	CU_ASSERT(g_bserrno != 0);
6409 
6410 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6411 	poll_threads();
6412 	CU_ASSERT(g_bserrno == 0);
6413 
6414 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6415 	poll_threads();
6416 	CU_ASSERT(g_bserrno != 0);
6417 
6418 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6419 	poll_threads();
6420 	CU_ASSERT(g_bserrno == 0);
6421 
6422 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6423 	poll_threads();
6424 	CU_ASSERT(g_bserrno == 0);
6425 
6426 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6427 	poll_threads();
6428 	CU_ASSERT(g_bserrno == 0);
6429 
6430 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6431 	poll_threads();
6432 	CU_ASSERT(g_bserrno == 0);
6433 
6434 	spdk_bs_unload(bs, bs_op_complete, NULL);
6435 	poll_threads();
6436 	CU_ASSERT(g_bserrno == 0);
6437 
6438 	g_bs = NULL;
6439 }
6440 
6441 /**
6442  * Snapshot-clones relation test 3
6443  *
6444  *         snapshot0
6445  *            |
6446  *         snapshot1
6447  *            |
6448  *         snapshot2
6449  *            |
6450  *           blob
6451  */
6452 static void
6453 blob_relations3(void)
6454 {
6455 	struct spdk_blob_store *bs;
6456 	struct spdk_bs_dev *dev;
6457 	struct spdk_io_channel *channel;
6458 	struct spdk_bs_opts bs_opts;
6459 	struct spdk_blob_opts opts;
6460 	struct spdk_blob *blob;
6461 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
6462 
6463 	dev = init_dev();
6464 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6465 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6466 
6467 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6468 	poll_threads();
6469 	CU_ASSERT(g_bserrno == 0);
6470 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6471 	bs = g_bs;
6472 
6473 	channel = spdk_bs_alloc_io_channel(bs);
6474 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6475 
6476 	/* 1. Create blob with 10 clusters */
6477 	ut_spdk_blob_opts_init(&opts);
6478 	opts.num_clusters = 10;
6479 
6480 	blob = ut_blob_create_and_open(bs, &opts);
6481 	blobid = spdk_blob_get_id(blob);
6482 
6483 	/* 2. Create snapshot0 */
6484 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6485 	poll_threads();
6486 	CU_ASSERT(g_bserrno == 0);
6487 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6488 	snapshotid0 = g_blobid;
6489 
6490 	/* 3. Create snapshot1 */
6491 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6492 	poll_threads();
6493 	CU_ASSERT(g_bserrno == 0);
6494 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6495 	snapshotid1 = g_blobid;
6496 
6497 	/* 4. Create snapshot2 */
6498 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6499 	poll_threads();
6500 	CU_ASSERT(g_bserrno == 0);
6501 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6502 	snapshotid2 = g_blobid;
6503 
6504 	/* 5. Decouple blob */
6505 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
6506 	poll_threads();
6507 	CU_ASSERT(g_bserrno == 0);
6508 
6509 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
6510 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
6511 	poll_threads();
6512 	CU_ASSERT(g_bserrno == 0);
6513 
6514 	/* 7. Delete blob */
6515 	spdk_blob_close(blob, blob_op_complete, NULL);
6516 	poll_threads();
6517 	CU_ASSERT(g_bserrno == 0);
6518 
6519 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6520 	poll_threads();
6521 	CU_ASSERT(g_bserrno == 0);
6522 
6523 	/* 8. Delete snapshot2.
6524 	 * If md of snapshot 2 was updated, it should be possible to delete it */
6525 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6526 	poll_threads();
6527 	CU_ASSERT(g_bserrno == 0);
6528 
6529 	/* Remove remaining blobs and unload bs */
6530 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
6531 	poll_threads();
6532 	CU_ASSERT(g_bserrno == 0);
6533 
6534 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
6535 	poll_threads();
6536 	CU_ASSERT(g_bserrno == 0);
6537 
6538 	spdk_bs_free_io_channel(channel);
6539 	poll_threads();
6540 
6541 	spdk_bs_unload(bs, bs_op_complete, NULL);
6542 	poll_threads();
6543 	CU_ASSERT(g_bserrno == 0);
6544 
6545 	g_bs = NULL;
6546 }
6547 
6548 static void
6549 blobstore_clean_power_failure(void)
6550 {
6551 	struct spdk_blob_store *bs;
6552 	struct spdk_blob *blob;
6553 	struct spdk_power_failure_thresholds thresholds = {};
6554 	bool clean = false;
6555 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6556 	struct spdk_bs_super_block super_copy = {};
6557 
6558 	thresholds.general_threshold = 1;
6559 	while (!clean) {
6560 		/* Create bs and blob */
6561 		suite_blob_setup();
6562 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6563 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6564 		bs = g_bs;
6565 		blob = g_blob;
6566 
6567 		/* Super block should not change for rest of the UT,
6568 		 * save it and compare later. */
6569 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
6570 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
6571 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6572 
6573 		/* Force bs/super block in a clean state.
6574 		 * Along with marking blob dirty, to cause blob persist. */
6575 		blob->state = SPDK_BLOB_STATE_DIRTY;
6576 		bs->clean = 1;
6577 		super->clean = 1;
6578 		super->crc = blob_md_page_calc_crc(super);
6579 
6580 		g_bserrno = -1;
6581 		dev_set_power_failure_thresholds(thresholds);
6582 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6583 		poll_threads();
6584 		dev_reset_power_failure_event();
6585 
6586 		if (g_bserrno == 0) {
6587 			/* After successful md sync, both bs and super block
6588 			 * should be marked as not clean. */
6589 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6590 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
6591 			clean = true;
6592 		}
6593 
6594 		/* Depending on the point of failure, super block was either updated or not. */
6595 		super_copy.clean = super->clean;
6596 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
6597 		/* Compare that the values in super block remained unchanged. */
6598 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
6599 
6600 		/* Delete blob and unload bs */
6601 		suite_blob_cleanup();
6602 
6603 		thresholds.general_threshold++;
6604 	}
6605 }
6606 
6607 static void
6608 blob_delete_snapshot_power_failure(void)
6609 {
6610 	struct spdk_bs_dev *dev;
6611 	struct spdk_blob_store *bs;
6612 	struct spdk_blob_opts opts;
6613 	struct spdk_blob *blob, *snapshot;
6614 	struct spdk_power_failure_thresholds thresholds = {};
6615 	spdk_blob_id blobid, snapshotid;
6616 	const void *value;
6617 	size_t value_len;
6618 	size_t count;
6619 	spdk_blob_id ids[3] = {};
6620 	int rc;
6621 	bool deleted = false;
6622 	int delete_snapshot_bserrno = -1;
6623 	uint32_t first_data_cluster;
6624 
6625 	thresholds.general_threshold = 1;
6626 	while (!deleted) {
6627 		dev = init_dev();
6628 
6629 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6630 		poll_threads();
6631 		CU_ASSERT(g_bserrno == 0);
6632 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6633 		bs = g_bs;
6634 
6635 		first_data_cluster = FIRST_DATA_CLUSTER(bs);
6636 
6637 		/* Create blob */
6638 		ut_spdk_blob_opts_init(&opts);
6639 		opts.num_clusters = 10;
6640 
6641 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6642 		poll_threads();
6643 		CU_ASSERT(g_bserrno == 0);
6644 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6645 		blobid = g_blobid;
6646 
6647 		/* Create snapshot */
6648 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6649 		poll_threads();
6650 		CU_ASSERT(g_bserrno == 0);
6651 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6652 		snapshotid = g_blobid;
6653 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6654 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6655 
6656 		dev_set_power_failure_thresholds(thresholds);
6657 
6658 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6659 		poll_threads();
6660 		delete_snapshot_bserrno = g_bserrno;
6661 
6662 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6663 		 * reports success, changes to both blobs should already persisted. */
6664 		dev_reset_power_failure_event();
6665 		ut_bs_dirty_load(&bs, NULL);
6666 
6667 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6668 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6669 
6670 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6671 		poll_threads();
6672 		CU_ASSERT(g_bserrno == 0);
6673 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6674 		blob = g_blob;
6675 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6676 
6677 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6678 		poll_threads();
6679 
6680 		if (g_bserrno == 0) {
6681 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6682 			snapshot = g_blob;
6683 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6684 			count = SPDK_COUNTOF(ids);
6685 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6686 			CU_ASSERT(rc == 0);
6687 			CU_ASSERT(count == 1);
6688 			CU_ASSERT(ids[0] == blobid);
6689 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6690 			CU_ASSERT(rc != 0);
6691 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6692 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6693 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6694 
6695 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6696 			poll_threads();
6697 			CU_ASSERT(g_bserrno == 0);
6698 		} else {
6699 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6700 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6701 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6702 			 * Yet delete might perform further changes to the clone after that.
6703 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6704 			if (delete_snapshot_bserrno == 0) {
6705 				deleted = true;
6706 			}
6707 		}
6708 
6709 		spdk_blob_close(blob, blob_op_complete, NULL);
6710 		poll_threads();
6711 		CU_ASSERT(g_bserrno == 0);
6712 
6713 		spdk_bs_unload(bs, bs_op_complete, NULL);
6714 		poll_threads();
6715 		CU_ASSERT(g_bserrno == 0);
6716 
6717 		thresholds.general_threshold++;
6718 	}
6719 }
6720 
6721 static void
6722 blob_create_snapshot_power_failure(void)
6723 {
6724 	struct spdk_blob_store *bs = g_bs;
6725 	struct spdk_bs_dev *dev;
6726 	struct spdk_blob_opts opts;
6727 	struct spdk_blob *blob, *snapshot;
6728 	struct spdk_power_failure_thresholds thresholds = {};
6729 	spdk_blob_id blobid, snapshotid;
6730 	const void *value;
6731 	size_t value_len;
6732 	size_t count;
6733 	spdk_blob_id ids[3] = {};
6734 	int rc;
6735 	bool created = false;
6736 	int create_snapshot_bserrno = -1;
6737 	uint32_t first_data_cluster;
6738 
6739 	thresholds.general_threshold = 1;
6740 	while (!created) {
6741 		dev = init_dev();
6742 
6743 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6744 		poll_threads();
6745 		CU_ASSERT(g_bserrno == 0);
6746 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6747 		bs = g_bs;
6748 
6749 		first_data_cluster = FIRST_DATA_CLUSTER(bs);
6750 
6751 		/* Create blob */
6752 		ut_spdk_blob_opts_init(&opts);
6753 		opts.num_clusters = 10;
6754 
6755 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6756 		poll_threads();
6757 		CU_ASSERT(g_bserrno == 0);
6758 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6759 		blobid = g_blobid;
6760 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6761 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6762 
6763 		dev_set_power_failure_thresholds(thresholds);
6764 
6765 		/* Create snapshot */
6766 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6767 		poll_threads();
6768 		create_snapshot_bserrno = g_bserrno;
6769 		snapshotid = g_blobid;
6770 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6771 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6772 
6773 		/* Do not shut down cleanly. Assumption is that after create snapshot
6774 		 * reports success, both blobs should be power-fail safe. */
6775 		dev_reset_power_failure_event();
6776 		ut_bs_dirty_load(&bs, NULL);
6777 
6778 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6779 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6780 
6781 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6782 		poll_threads();
6783 		CU_ASSERT(g_bserrno == 0);
6784 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6785 		blob = g_blob;
6786 
6787 		if (snapshotid != SPDK_BLOBID_INVALID) {
6788 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6789 			poll_threads();
6790 		}
6791 
6792 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6793 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6794 			snapshot = g_blob;
6795 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6796 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6797 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6798 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6799 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6800 			count = SPDK_COUNTOF(ids);
6801 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6802 			CU_ASSERT(rc == 0);
6803 			CU_ASSERT(count == 1);
6804 			CU_ASSERT(ids[0] == blobid);
6805 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6806 			CU_ASSERT(rc != 0);
6807 
6808 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6809 			poll_threads();
6810 			CU_ASSERT(g_bserrno == 0);
6811 			if (create_snapshot_bserrno == 0) {
6812 				created = true;
6813 			}
6814 		} else {
6815 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6816 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6817 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6818 		}
6819 
6820 		spdk_blob_close(blob, blob_op_complete, NULL);
6821 		poll_threads();
6822 		CU_ASSERT(g_bserrno == 0);
6823 
6824 		spdk_bs_unload(bs, bs_op_complete, NULL);
6825 		poll_threads();
6826 		CU_ASSERT(g_bserrno == 0);
6827 
6828 		thresholds.general_threshold++;
6829 	}
6830 }
6831 
6832 #define IO_UT_BLOCKS_PER_CLUSTER 64
6833 
6834 static void
6835 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6836 {
6837 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
6838 	uint8_t payload_ff[SZ * 512];
6839 	uint8_t payload_aa[SZ * 512];
6840 	uint8_t payload_00[SZ * 512];
6841 	uint8_t *cluster0, *cluster1;
6842 
6843 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6844 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6845 	memset(payload_00, 0x00, sizeof(payload_00));
6846 
6847 	/* Try to perform I/O with io unit = 512 */
6848 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6849 	poll_threads();
6850 	CU_ASSERT(g_bserrno == 0);
6851 
6852 	/* If thin provisioned is set cluster should be allocated now */
6853 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6854 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6855 
6856 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6857 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6858 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6859 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6860 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
6861 
6862 	/* Verify write with offset on first page */
6863 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6864 	poll_threads();
6865 	CU_ASSERT(g_bserrno == 0);
6866 
6867 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6868 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6869 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6870 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6871 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6872 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
6873 
6874 	/* Verify write with offset on first page */
6875 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6876 	poll_threads();
6877 
6878 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6879 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6880 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6881 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6882 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6883 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6884 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
6885 
6886 	/* Verify write with offset on second page */
6887 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6888 	poll_threads();
6889 
6890 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6891 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6892 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6893 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6894 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6895 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6896 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
6897 
6898 	/* Verify write across multiple pages */
6899 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6900 	poll_threads();
6901 
6902 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6903 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6904 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6905 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6906 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6907 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6908 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
6909 
6910 	/* Verify write across multiple clusters */
6911 	spdk_blob_io_write(blob, channel, payload_ff, SZ - 4, 8, blob_op_complete, NULL);
6912 	poll_threads();
6913 
6914 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6915 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6916 
6917 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6918 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6919 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6920 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6921 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6922 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6923 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6924 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
6925 
6926 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6927 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
6928 
6929 	/* Verify write to second cluster */
6930 	spdk_blob_io_write(blob, channel, payload_ff, SZ + 12, 2, blob_op_complete, NULL);
6931 	poll_threads();
6932 
6933 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6934 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6935 
6936 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6937 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6938 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6939 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6940 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6941 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6942 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6943 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
6944 
6945 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6946 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6947 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6948 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
6949 }
6950 
6951 static void
6952 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6953 {
6954 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
6955 	uint8_t payload_read[2 * SZ * 512];
6956 	uint8_t payload_ff[SZ * 512];
6957 	uint8_t payload_aa[SZ * 512];
6958 	uint8_t payload_00[SZ * 512];
6959 
6960 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6961 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6962 	memset(payload_00, 0x00, sizeof(payload_00));
6963 
6964 	/* Read only first io unit */
6965 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6966 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6967 	 * payload_read: F000 0000 | 0000 0000 ... */
6968 	memset(payload_read, 0x00, sizeof(payload_read));
6969 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6970 	poll_threads();
6971 	CU_ASSERT(g_bserrno == 0);
6972 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6973 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
6974 
6975 	/* Read four io_units starting from offset = 2
6976 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6977 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6978 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6979 
6980 	memset(payload_read, 0x00, sizeof(payload_read));
6981 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6982 	poll_threads();
6983 	CU_ASSERT(g_bserrno == 0);
6984 
6985 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6986 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6987 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6988 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6989 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
6990 
6991 	/* Read eight io_units across multiple pages
6992 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6993 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6994 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6995 	memset(payload_read, 0x00, sizeof(payload_read));
6996 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6997 	poll_threads();
6998 	CU_ASSERT(g_bserrno == 0);
6999 
7000 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7001 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7002 
7003 	/* Read eight io_units across multiple clusters
7004 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7005 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7006 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7007 	memset(payload_read, 0x00, sizeof(payload_read));
7008 	spdk_blob_io_read(blob, channel, payload_read, SZ - 4, 8, blob_op_complete, NULL);
7009 	poll_threads();
7010 	CU_ASSERT(g_bserrno == 0);
7011 
7012 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7013 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7014 
7015 	/* Read four io_units from second cluster
7016 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7017 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7018 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7019 	memset(payload_read, 0x00, sizeof(payload_read));
7020 	spdk_blob_io_read(blob, channel, payload_read, SZ + 10, 4, blob_op_complete, NULL);
7021 	poll_threads();
7022 	CU_ASSERT(g_bserrno == 0);
7023 
7024 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7025 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7026 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7027 
7028 	/* Read second cluster
7029 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7030 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7031 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7032 	memset(payload_read, 0x00, sizeof(payload_read));
7033 	spdk_blob_io_read(blob, channel, payload_read, SZ, SZ, blob_op_complete, NULL);
7034 	poll_threads();
7035 	CU_ASSERT(g_bserrno == 0);
7036 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7037 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7038 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7039 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7040 
7041 	/* Read whole two clusters
7042 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7043 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7044 	memset(payload_read, 0x00, sizeof(payload_read));
7045 	spdk_blob_io_read(blob, channel, payload_read, 0, SZ * 2, blob_op_complete, NULL);
7046 	poll_threads();
7047 	CU_ASSERT(g_bserrno == 0);
7048 
7049 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7050 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7051 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7052 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7053 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7054 	CU_ASSERT(memcmp(payload_read + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7055 
7056 	CU_ASSERT(memcmp(payload_read + (SZ + 0) * 512, payload_ff, 4 * 512) == 0);
7057 	CU_ASSERT(memcmp(payload_read + (SZ + 4) * 512, payload_00, 8 * 512) == 0);
7058 	CU_ASSERT(memcmp(payload_read + (SZ + 12) * 512, payload_ff, 2 * 512) == 0);
7059 	CU_ASSERT(memcmp(payload_read + (SZ + 14) * 512, payload_00, (SZ - 14) * 512) == 0);
7060 }
7061 
7062 
7063 static void
7064 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7065 {
7066 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7067 	uint8_t payload_ff[SZ * 512];
7068 	uint8_t payload_aa[SZ * 512];
7069 	uint8_t payload_00[SZ * 512];
7070 	uint8_t *cluster0, *cluster1;
7071 
7072 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7073 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7074 	memset(payload_00, 0x00, sizeof(payload_00));
7075 
7076 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7077 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7078 
7079 	/* Unmap */
7080 	spdk_blob_io_unmap(blob, channel, 0, SZ * 2, blob_op_complete, NULL);
7081 	poll_threads();
7082 
7083 	CU_ASSERT(g_bserrno == 0);
7084 
7085 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, SZ * 512) == 0);
7086 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, SZ * 512) == 0);
7087 }
7088 
7089 static void
7090 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7091 {
7092 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7093 	uint8_t payload_ff[SZ * 512];
7094 	uint8_t payload_aa[SZ * 512];
7095 	uint8_t payload_00[SZ * 512];
7096 	uint8_t *cluster0, *cluster1;
7097 
7098 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7099 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7100 	memset(payload_00, 0x00, sizeof(payload_00));
7101 
7102 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7103 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7104 
7105 	/* Write zeroes  */
7106 	spdk_blob_io_write_zeroes(blob, channel, 0, SZ * 2, blob_op_complete, NULL);
7107 	poll_threads();
7108 
7109 	CU_ASSERT(g_bserrno == 0);
7110 
7111 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, SZ * 512) == 0);
7112 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, SZ * 512) == 0);
7113 }
7114 
7115 static inline void
7116 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
7117 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7118 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7119 {
7120 	if (io_opts) {
7121 		g_dev_writev_ext_called = false;
7122 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7123 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
7124 					io_opts);
7125 	} else {
7126 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7127 	}
7128 	poll_threads();
7129 	CU_ASSERT(g_bserrno == 0);
7130 	if (io_opts) {
7131 		CU_ASSERT(g_dev_writev_ext_called);
7132 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7133 	}
7134 }
7135 
7136 static void
7137 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7138 	       bool ext_api)
7139 {
7140 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7141 	uint8_t payload_ff[SZ * 512];
7142 	uint8_t payload_aa[SZ * 512];
7143 	uint8_t payload_00[SZ * 512];
7144 	uint8_t *cluster0, *cluster1;
7145 	struct iovec iov[4];
7146 	struct spdk_blob_ext_io_opts ext_opts = {
7147 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7148 		.memory_domain_ctx = (void *)0xf00df00d,
7149 		.size = sizeof(struct spdk_blob_ext_io_opts),
7150 		.user_ctx = (void *)123,
7151 	};
7152 
7153 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7154 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7155 	memset(payload_00, 0x00, sizeof(payload_00));
7156 
7157 	/* Try to perform I/O with io unit = 512 */
7158 	iov[0].iov_base = payload_ff;
7159 	iov[0].iov_len = 1 * 512;
7160 
7161 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
7162 			    ext_api ? &ext_opts : NULL);
7163 
7164 	/* If thin provisioned is set cluster should be allocated now */
7165 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
7166 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7167 
7168 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
7169 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
7170 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7171 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7172 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7173 
7174 	/* Verify write with offset on first page */
7175 	iov[0].iov_base = payload_ff;
7176 	iov[0].iov_len = 1 * 512;
7177 
7178 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
7179 			    ext_api ? &ext_opts : NULL);
7180 
7181 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7182 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7183 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7184 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7185 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7186 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7187 
7188 	/* Verify write with offset on first page */
7189 	iov[0].iov_base = payload_ff;
7190 	iov[0].iov_len = 4 * 512;
7191 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
7192 	poll_threads();
7193 
7194 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
7195 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7196 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7197 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7198 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7199 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
7200 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7201 
7202 	/* Verify write with offset on second page */
7203 	iov[0].iov_base = payload_ff;
7204 	iov[0].iov_len = 4 * 512;
7205 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
7206 	poll_threads();
7207 
7208 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
7209 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7210 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7211 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7212 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7213 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
7214 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
7215 
7216 	/* Verify write across multiple pages */
7217 	iov[0].iov_base = payload_aa;
7218 	iov[0].iov_len = 8 * 512;
7219 
7220 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
7221 			    ext_api ? &ext_opts : NULL);
7222 
7223 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
7224 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7225 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7226 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7227 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7228 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7229 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
7230 
7231 	/* Verify write across multiple clusters */
7232 
7233 	iov[0].iov_base = payload_ff;
7234 	iov[0].iov_len = 8 * 512;
7235 
7236 	test_blob_io_writev(blob, channel, iov, 1, (SZ - 4), 8, blob_op_complete, NULL,
7237 			    ext_api ? &ext_opts : NULL);
7238 
7239 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7240 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7241 
7242 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7243 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7244 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7245 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7246 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7247 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7248 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7249 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 16) * 512) == 0);
7250 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7251 
7252 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7253 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7254 
7255 	/* Verify write to second cluster */
7256 
7257 	iov[0].iov_base = payload_ff;
7258 	iov[0].iov_len = 2 * 512;
7259 
7260 	test_blob_io_writev(blob, channel, iov, 1, SZ + 12, 2, blob_op_complete, NULL,
7261 			    ext_api ? &ext_opts : NULL);
7262 
7263 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7264 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7265 
7266 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7267 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
7268 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7269 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7270 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7271 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7272 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7273 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7274 
7275 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7276 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
7277 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
7278 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7279 }
7280 
7281 static inline void
7282 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
7283 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7284 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7285 {
7286 	if (io_opts) {
7287 		g_dev_readv_ext_called = false;
7288 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7289 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
7290 	} else {
7291 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7292 	}
7293 	poll_threads();
7294 	CU_ASSERT(g_bserrno == 0);
7295 	if (io_opts) {
7296 		CU_ASSERT(g_dev_readv_ext_called);
7297 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7298 	}
7299 }
7300 
7301 static void
7302 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7303 	      bool ext_api)
7304 {
7305 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7306 	uint8_t payload_read[2 * SZ * 512];
7307 	uint8_t payload_ff[SZ * 512];
7308 	uint8_t payload_aa[SZ * 512];
7309 	uint8_t payload_00[SZ * 512];
7310 	struct iovec iov[4];
7311 	struct spdk_blob_ext_io_opts ext_opts = {
7312 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7313 		.memory_domain_ctx = (void *)0xf00df00d,
7314 		.size = sizeof(struct spdk_blob_ext_io_opts),
7315 		.user_ctx = (void *)123,
7316 	};
7317 
7318 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7319 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7320 	memset(payload_00, 0x00, sizeof(payload_00));
7321 
7322 	/* Read only first io unit */
7323 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7324 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7325 	 * payload_read: F000 0000 | 0000 0000 ... */
7326 	memset(payload_read, 0x00, sizeof(payload_read));
7327 	iov[0].iov_base = payload_read;
7328 	iov[0].iov_len = 1 * 512;
7329 
7330 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7331 
7332 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7333 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7334 
7335 	/* Read four io_units starting from offset = 2
7336 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7337 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7338 	 * payload_read: F0AA 0000 | 0000 0000 ... */
7339 
7340 	memset(payload_read, 0x00, sizeof(payload_read));
7341 	iov[0].iov_base = payload_read;
7342 	iov[0].iov_len = 4 * 512;
7343 
7344 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7345 
7346 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7347 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7348 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
7349 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
7350 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7351 
7352 	/* Read eight io_units across multiple pages
7353 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7354 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7355 	 * payload_read: AAAA AAAA | 0000 0000 ... */
7356 	memset(payload_read, 0x00, sizeof(payload_read));
7357 	iov[0].iov_base = payload_read;
7358 	iov[0].iov_len = 4 * 512;
7359 	iov[1].iov_base = payload_read + 4 * 512;
7360 	iov[1].iov_len = 4 * 512;
7361 
7362 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7363 
7364 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7365 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7366 
7367 	/* Read eight io_units across multiple clusters
7368 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7369 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7370 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7371 	memset(payload_read, 0x00, sizeof(payload_read));
7372 	iov[0].iov_base = payload_read;
7373 	iov[0].iov_len = 2 * 512;
7374 	iov[1].iov_base = payload_read + 2 * 512;
7375 	iov[1].iov_len = 2 * 512;
7376 	iov[2].iov_base = payload_read + 4 * 512;
7377 	iov[2].iov_len = 2 * 512;
7378 	iov[3].iov_base = payload_read + 6 * 512;
7379 	iov[3].iov_len = 2 * 512;
7380 
7381 	test_blob_io_readv(blob, channel, iov, 4, SZ - 4, 8, blob_op_complete, NULL,
7382 			   ext_api ? &ext_opts : NULL);
7383 
7384 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7385 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7386 
7387 	/* Read four io_units from second cluster
7388 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7389 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7390 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7391 	memset(payload_read, 0x00, sizeof(payload_read));
7392 	iov[0].iov_base = payload_read;
7393 	iov[0].iov_len = 1 * 512;
7394 	iov[1].iov_base = payload_read + 1 * 512;
7395 	iov[1].iov_len = 3 * 512;
7396 
7397 	test_blob_io_readv(blob, channel, iov, 2, SZ + 10, 4, blob_op_complete, NULL,
7398 			   ext_api ? &ext_opts : NULL);
7399 
7400 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7401 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7402 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7403 
7404 	/* Read second cluster
7405 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7406 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7407 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7408 	memset(payload_read, 0x00, sizeof(payload_read));
7409 	iov[0].iov_base = payload_read;
7410 	iov[0].iov_len = 1 * 512;
7411 	iov[1].iov_base = payload_read + 1 * 512;
7412 	iov[1].iov_len = 2 * 512;
7413 	iov[2].iov_base = payload_read + 3 * 512;
7414 	iov[2].iov_len = 4 * 512;
7415 	iov[3].iov_base = payload_read + 7 * 512;
7416 	iov[3].iov_len = (SZ - 7) * 512;
7417 
7418 	test_blob_io_readv(blob, channel, iov, 4, SZ, SZ, blob_op_complete, NULL,
7419 			   ext_api ? &ext_opts : NULL);
7420 
7421 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7422 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7423 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7424 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7425 
7426 	/* Read whole two clusters
7427 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7428 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7429 	memset(payload_read, 0x00, sizeof(payload_read));
7430 	iov[0].iov_base = payload_read;
7431 	iov[0].iov_len = 1 * 512;
7432 	iov[1].iov_base = payload_read + 1 * 512;
7433 	iov[1].iov_len = 8 * 512;
7434 	iov[2].iov_base = payload_read + 9 * 512;
7435 	iov[2].iov_len = 16 * 512;
7436 	iov[3].iov_base = payload_read + 25 * 512;
7437 	iov[3].iov_len = (2 * SZ - 25) * 512;
7438 
7439 	test_blob_io_readv(blob, channel, iov, 4, 0, SZ * 2, blob_op_complete, NULL,
7440 			   ext_api ? &ext_opts : NULL);
7441 
7442 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7443 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7444 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7445 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7446 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7447 	CU_ASSERT(memcmp(payload_read + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7448 
7449 	CU_ASSERT(memcmp(payload_read + (SZ + 0) * 512, payload_ff, 4 * 512) == 0);
7450 	CU_ASSERT(memcmp(payload_read + (SZ + 4) * 512, payload_00, 8 * 512) == 0);
7451 	CU_ASSERT(memcmp(payload_read + (SZ + 12) * 512, payload_ff, 2 * 512) == 0);
7452 	CU_ASSERT(memcmp(payload_read + (SZ + 14) * 512, payload_00, (SZ - 14) * 512) == 0);
7453 }
7454 
7455 static void
7456 blob_io_unit(void)
7457 {
7458 	struct spdk_bs_opts bsopts;
7459 	struct spdk_blob_opts opts;
7460 	struct spdk_blob_store *bs;
7461 	struct spdk_bs_dev *dev;
7462 	struct spdk_blob *blob, *snapshot, *clone;
7463 	spdk_blob_id blobid;
7464 	struct spdk_io_channel *channel;
7465 
7466 	/* Create dev with 512 bytes io unit size */
7467 
7468 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7469 	bsopts.cluster_sz = IO_UT_BLOCKS_PER_CLUSTER * 512;
7470 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7471 
7472 	/* Try to initialize a new blob store with unsupported io_unit */
7473 	dev = init_dev();
7474 	dev->blocklen = 512;
7475 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7476 
7477 	/* Initialize a new blob store */
7478 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7479 	poll_threads();
7480 	CU_ASSERT(g_bserrno == 0);
7481 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7482 	bs = g_bs;
7483 
7484 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7485 	channel = spdk_bs_alloc_io_channel(bs);
7486 
7487 	/* Create thick provisioned blob */
7488 	ut_spdk_blob_opts_init(&opts);
7489 	opts.thin_provision = false;
7490 	opts.num_clusters = 32;
7491 
7492 	blob = ut_blob_create_and_open(bs, &opts);
7493 	blobid = spdk_blob_get_id(blob);
7494 
7495 	test_io_write(dev, blob, channel);
7496 	test_io_read(dev, blob, channel);
7497 	test_io_zeroes(dev, blob, channel);
7498 
7499 	test_iov_write(dev, blob, channel, false);
7500 	test_iov_read(dev, blob, channel, false);
7501 	test_io_zeroes(dev, blob, channel);
7502 
7503 	test_iov_write(dev, blob, channel, true);
7504 	test_iov_read(dev, blob, channel, true);
7505 
7506 	test_io_unmap(dev, blob, channel);
7507 
7508 	spdk_blob_close(blob, blob_op_complete, NULL);
7509 	poll_threads();
7510 	CU_ASSERT(g_bserrno == 0);
7511 	blob = NULL;
7512 	g_blob = NULL;
7513 
7514 	/* Create thin provisioned blob */
7515 
7516 	ut_spdk_blob_opts_init(&opts);
7517 	opts.thin_provision = true;
7518 	opts.num_clusters = 32;
7519 
7520 	blob = ut_blob_create_and_open(bs, &opts);
7521 	blobid = spdk_blob_get_id(blob);
7522 
7523 	test_io_write(dev, blob, channel);
7524 	test_io_read(dev, blob, channel);
7525 	test_io_zeroes(dev, blob, channel);
7526 
7527 	test_iov_write(dev, blob, channel, false);
7528 	test_iov_read(dev, blob, channel, false);
7529 	test_io_zeroes(dev, blob, channel);
7530 
7531 	test_iov_write(dev, blob, channel, true);
7532 	test_iov_read(dev, blob, channel, true);
7533 
7534 	/* Create snapshot */
7535 
7536 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7537 	poll_threads();
7538 	CU_ASSERT(g_bserrno == 0);
7539 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7540 	blobid = g_blobid;
7541 
7542 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7543 	poll_threads();
7544 	CU_ASSERT(g_bserrno == 0);
7545 	CU_ASSERT(g_blob != NULL);
7546 	snapshot = g_blob;
7547 
7548 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7549 	poll_threads();
7550 	CU_ASSERT(g_bserrno == 0);
7551 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7552 	blobid = g_blobid;
7553 
7554 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7555 	poll_threads();
7556 	CU_ASSERT(g_bserrno == 0);
7557 	CU_ASSERT(g_blob != NULL);
7558 	clone = g_blob;
7559 
7560 	test_io_read(dev, blob, channel);
7561 	test_io_read(dev, snapshot, channel);
7562 	test_io_read(dev, clone, channel);
7563 
7564 	test_iov_read(dev, blob, channel, false);
7565 	test_iov_read(dev, snapshot, channel, false);
7566 	test_iov_read(dev, clone, channel, false);
7567 
7568 	test_iov_read(dev, blob, channel, true);
7569 	test_iov_read(dev, snapshot, channel, true);
7570 	test_iov_read(dev, clone, channel, true);
7571 
7572 	/* Inflate clone */
7573 
7574 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7575 	poll_threads();
7576 
7577 	CU_ASSERT(g_bserrno == 0);
7578 
7579 	test_io_read(dev, clone, channel);
7580 
7581 	test_io_unmap(dev, clone, channel);
7582 
7583 	test_iov_write(dev, clone, channel, false);
7584 	test_iov_read(dev, clone, channel, false);
7585 	test_io_unmap(dev, clone, channel);
7586 
7587 	test_iov_write(dev, clone, channel, true);
7588 	test_iov_read(dev, clone, channel, true);
7589 
7590 	spdk_blob_close(blob, blob_op_complete, NULL);
7591 	spdk_blob_close(snapshot, blob_op_complete, NULL);
7592 	spdk_blob_close(clone, blob_op_complete, NULL);
7593 	poll_threads();
7594 	CU_ASSERT(g_bserrno == 0);
7595 	blob = NULL;
7596 	g_blob = NULL;
7597 
7598 	spdk_bs_free_io_channel(channel);
7599 	poll_threads();
7600 
7601 	/* Unload the blob store */
7602 	spdk_bs_unload(bs, bs_op_complete, NULL);
7603 	poll_threads();
7604 	CU_ASSERT(g_bserrno == 0);
7605 	g_bs = NULL;
7606 	g_blob = NULL;
7607 	g_blobid = 0;
7608 }
7609 
7610 static void
7611 blob_io_unit_compatibility(void)
7612 {
7613 	struct spdk_bs_opts bsopts;
7614 	struct spdk_blob_store *bs;
7615 	struct spdk_bs_dev *dev;
7616 	struct spdk_bs_super_block *super;
7617 
7618 	/* Create dev with 512 bytes io unit size */
7619 
7620 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7621 	bsopts.cluster_sz = g_phys_blocklen * 4;
7622 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7623 
7624 	/* Try to initialize a new blob store with unsupported io_unit */
7625 	dev = init_dev();
7626 	dev->blocklen = 512;
7627 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7628 
7629 	/* Initialize a new blob store */
7630 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7631 	poll_threads();
7632 	CU_ASSERT(g_bserrno == 0);
7633 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7634 	bs = g_bs;
7635 
7636 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7637 
7638 	/* Unload the blob store */
7639 	spdk_bs_unload(bs, bs_op_complete, NULL);
7640 	poll_threads();
7641 	CU_ASSERT(g_bserrno == 0);
7642 
7643 	/* Modify super block to behave like older version.
7644 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
7645 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
7646 	super->io_unit_size = 0;
7647 	super->crc = blob_md_page_calc_crc(super);
7648 
7649 	dev = init_dev();
7650 	dev->blocklen = 512;
7651 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7652 
7653 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
7654 	poll_threads();
7655 	CU_ASSERT(g_bserrno == 0);
7656 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7657 	bs = g_bs;
7658 
7659 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
7660 
7661 	/* Unload the blob store */
7662 	spdk_bs_unload(bs, bs_op_complete, NULL);
7663 	poll_threads();
7664 	CU_ASSERT(g_bserrno == 0);
7665 
7666 	g_bs = NULL;
7667 	g_blob = NULL;
7668 	g_blobid = 0;
7669 }
7670 
7671 static void
7672 first_sync_complete(void *cb_arg, int bserrno)
7673 {
7674 	struct spdk_blob *blob = cb_arg;
7675 	int rc;
7676 
7677 	CU_ASSERT(bserrno == 0);
7678 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7679 	CU_ASSERT(rc == 0);
7680 	CU_ASSERT(g_bserrno == -1);
7681 
7682 	/* Keep g_bserrno at -1, only the
7683 	 * second sync completion should set it at 0. */
7684 }
7685 
7686 static void
7687 second_sync_complete(void *cb_arg, int bserrno)
7688 {
7689 	struct spdk_blob *blob = cb_arg;
7690 	const void *value;
7691 	size_t value_len;
7692 	int rc;
7693 
7694 	CU_ASSERT(bserrno == 0);
7695 
7696 	/* Verify that the first sync completion had a chance to execute */
7697 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7698 	CU_ASSERT(rc == 0);
7699 	SPDK_CU_ASSERT_FATAL(value != NULL);
7700 	CU_ASSERT(value_len == strlen("second") + 1);
7701 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7702 
7703 	CU_ASSERT(g_bserrno == -1);
7704 	g_bserrno = bserrno;
7705 }
7706 
7707 static void
7708 blob_simultaneous_operations(void)
7709 {
7710 	struct spdk_blob_store *bs = g_bs;
7711 	struct spdk_blob_opts opts;
7712 	struct spdk_blob *blob, *snapshot;
7713 	spdk_blob_id blobid, snapshotid;
7714 	struct spdk_io_channel *channel;
7715 	int rc;
7716 
7717 	channel = spdk_bs_alloc_io_channel(bs);
7718 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7719 
7720 	ut_spdk_blob_opts_init(&opts);
7721 	opts.num_clusters = 10;
7722 
7723 	blob = ut_blob_create_and_open(bs, &opts);
7724 	blobid = spdk_blob_get_id(blob);
7725 
7726 	/* Create snapshot and try to remove blob in the same time:
7727 	 * - snapshot should be created successfully
7728 	 * - delete operation should fail w -EBUSY */
7729 	CU_ASSERT(blob->locked_operation_in_progress == false);
7730 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7731 	CU_ASSERT(blob->locked_operation_in_progress == true);
7732 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7733 	CU_ASSERT(blob->locked_operation_in_progress == true);
7734 	/* Deletion failure */
7735 	CU_ASSERT(g_bserrno == -EBUSY);
7736 	poll_threads();
7737 	CU_ASSERT(blob->locked_operation_in_progress == false);
7738 	/* Snapshot creation success */
7739 	CU_ASSERT(g_bserrno == 0);
7740 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7741 
7742 	snapshotid = g_blobid;
7743 
7744 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7745 	poll_threads();
7746 	CU_ASSERT(g_bserrno == 0);
7747 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7748 	snapshot = g_blob;
7749 
7750 	/* Inflate blob and try to remove blob in the same time:
7751 	 * - blob should be inflated successfully
7752 	 * - delete operation should fail w -EBUSY */
7753 	CU_ASSERT(blob->locked_operation_in_progress == false);
7754 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7755 	CU_ASSERT(blob->locked_operation_in_progress == true);
7756 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7757 	CU_ASSERT(blob->locked_operation_in_progress == true);
7758 	/* Deletion failure */
7759 	CU_ASSERT(g_bserrno == -EBUSY);
7760 	poll_threads();
7761 	CU_ASSERT(blob->locked_operation_in_progress == false);
7762 	/* Inflation success */
7763 	CU_ASSERT(g_bserrno == 0);
7764 
7765 	/* Clone snapshot and try to remove snapshot in the same time:
7766 	 * - snapshot should be cloned successfully
7767 	 * - delete operation should fail w -EBUSY */
7768 	CU_ASSERT(blob->locked_operation_in_progress == false);
7769 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7770 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7771 	/* Deletion failure */
7772 	CU_ASSERT(g_bserrno == -EBUSY);
7773 	poll_threads();
7774 	CU_ASSERT(blob->locked_operation_in_progress == false);
7775 	/* Clone created */
7776 	CU_ASSERT(g_bserrno == 0);
7777 
7778 	/* Resize blob and try to remove blob in the same time:
7779 	 * - blob should be resized successfully
7780 	 * - delete operation should fail w -EBUSY */
7781 	CU_ASSERT(blob->locked_operation_in_progress == false);
7782 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7783 	CU_ASSERT(blob->locked_operation_in_progress == true);
7784 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7785 	CU_ASSERT(blob->locked_operation_in_progress == true);
7786 	/* Deletion failure */
7787 	CU_ASSERT(g_bserrno == -EBUSY);
7788 	poll_threads();
7789 	CU_ASSERT(blob->locked_operation_in_progress == false);
7790 	/* Blob resized successfully */
7791 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7792 	poll_threads();
7793 	CU_ASSERT(g_bserrno == 0);
7794 
7795 	/* Issue two consecutive blob syncs, neither should fail.
7796 	 * Force sync to actually occur by marking blob dirty each time.
7797 	 * Execution of sync should not be enough to complete the operation,
7798 	 * since disk I/O is required to complete it. */
7799 	g_bserrno = -1;
7800 
7801 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7802 	CU_ASSERT(rc == 0);
7803 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7804 	CU_ASSERT(g_bserrno == -1);
7805 
7806 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7807 	CU_ASSERT(g_bserrno == -1);
7808 
7809 	poll_threads();
7810 	CU_ASSERT(g_bserrno == 0);
7811 
7812 	spdk_bs_free_io_channel(channel);
7813 	poll_threads();
7814 
7815 	ut_blob_close_and_delete(bs, snapshot);
7816 	ut_blob_close_and_delete(bs, blob);
7817 }
7818 
7819 static void
7820 blob_persist_test(void)
7821 {
7822 	struct spdk_blob_store *bs = g_bs;
7823 	struct spdk_blob_opts opts;
7824 	struct spdk_blob *blob;
7825 	spdk_blob_id blobid;
7826 	struct spdk_io_channel *channel;
7827 	char *xattr;
7828 	size_t xattr_length;
7829 	int rc;
7830 	uint32_t page_count_clear, page_count_xattr;
7831 	uint64_t poller_iterations;
7832 	bool run_poller;
7833 
7834 	channel = spdk_bs_alloc_io_channel(bs);
7835 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7836 
7837 	ut_spdk_blob_opts_init(&opts);
7838 	opts.num_clusters = 10;
7839 
7840 	blob = ut_blob_create_and_open(bs, &opts);
7841 	blobid = spdk_blob_get_id(blob);
7842 
7843 	/* Save the amount of md pages used after creation of a blob.
7844 	 * This should be consistent after removing xattr. */
7845 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7846 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7847 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7848 
7849 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7850 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7851 		       strlen("large_xattr");
7852 	xattr = calloc(xattr_length, sizeof(char));
7853 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7854 
7855 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7856 	SPDK_CU_ASSERT_FATAL(rc == 0);
7857 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7858 	poll_threads();
7859 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7860 
7861 	/* Save the amount of md pages used after adding the large xattr */
7862 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7863 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7864 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7865 
7866 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7867 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7868 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7869 	poller_iterations = 1;
7870 	run_poller = true;
7871 	while (run_poller) {
7872 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7873 		SPDK_CU_ASSERT_FATAL(rc == 0);
7874 		g_bserrno = -1;
7875 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7876 		poll_thread_times(0, poller_iterations);
7877 		if (g_bserrno == 0) {
7878 			/* Poller iteration count was high enough for first sync to complete.
7879 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7880 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7881 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7882 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7883 			run_poller = false;
7884 		}
7885 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7886 		SPDK_CU_ASSERT_FATAL(rc == 0);
7887 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7888 		poll_threads();
7889 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7890 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7891 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7892 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7893 
7894 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7895 		spdk_blob_close(blob, blob_op_complete, NULL);
7896 		poll_threads();
7897 		CU_ASSERT(g_bserrno == 0);
7898 
7899 		ut_bs_reload(&bs, NULL);
7900 
7901 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7902 		poll_threads();
7903 		CU_ASSERT(g_bserrno == 0);
7904 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7905 		blob = g_blob;
7906 
7907 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7908 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7909 
7910 		poller_iterations++;
7911 		/* Stop at high iteration count to prevent infinite loop.
7912 		 * This value should be enough for first md sync to complete in any case. */
7913 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7914 	}
7915 
7916 	free(xattr);
7917 
7918 	ut_blob_close_and_delete(bs, blob);
7919 
7920 	spdk_bs_free_io_channel(channel);
7921 	poll_threads();
7922 }
7923 
7924 static void
7925 blob_decouple_snapshot(void)
7926 {
7927 	struct spdk_blob_store *bs = g_bs;
7928 	struct spdk_blob_opts opts;
7929 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7930 	struct spdk_io_channel *channel;
7931 	spdk_blob_id blobid, snapshotid;
7932 	uint64_t cluster;
7933 
7934 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7935 		channel = spdk_bs_alloc_io_channel(bs);
7936 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7937 
7938 		ut_spdk_blob_opts_init(&opts);
7939 		opts.num_clusters = 10;
7940 		opts.thin_provision = false;
7941 
7942 		blob = ut_blob_create_and_open(bs, &opts);
7943 		blobid = spdk_blob_get_id(blob);
7944 
7945 		/* Create first snapshot */
7946 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7947 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7948 		poll_threads();
7949 		CU_ASSERT(g_bserrno == 0);
7950 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7951 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7952 		snapshotid = g_blobid;
7953 
7954 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7955 		poll_threads();
7956 		CU_ASSERT(g_bserrno == 0);
7957 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7958 		snapshot1 = g_blob;
7959 
7960 		/* Create the second one */
7961 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7962 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7963 		poll_threads();
7964 		CU_ASSERT(g_bserrno == 0);
7965 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7966 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7967 		snapshotid = g_blobid;
7968 
7969 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7970 		poll_threads();
7971 		CU_ASSERT(g_bserrno == 0);
7972 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7973 		snapshot2 = g_blob;
7974 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7975 
7976 		/* Now decouple the second snapshot forcing it to copy the written clusters */
7977 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7978 		poll_threads();
7979 		CU_ASSERT(g_bserrno == 0);
7980 
7981 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7982 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7983 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7984 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7985 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7986 					    snapshot1->active.clusters[cluster]);
7987 		}
7988 
7989 		spdk_bs_free_io_channel(channel);
7990 
7991 		if (delete_snapshot_first) {
7992 			ut_blob_close_and_delete(bs, snapshot2);
7993 			ut_blob_close_and_delete(bs, snapshot1);
7994 			ut_blob_close_and_delete(bs, blob);
7995 		} else {
7996 			ut_blob_close_and_delete(bs, blob);
7997 			ut_blob_close_and_delete(bs, snapshot2);
7998 			ut_blob_close_and_delete(bs, snapshot1);
7999 		}
8000 		poll_threads();
8001 	}
8002 }
8003 
8004 static void
8005 blob_seek_io_unit(void)
8006 {
8007 	struct spdk_blob_store *bs = g_bs;
8008 	struct spdk_blob *blob;
8009 	struct spdk_io_channel *channel;
8010 	struct spdk_blob_opts opts;
8011 	uint64_t free_clusters;
8012 	uint8_t payload[10 * BLOCKLEN];
8013 	uint64_t offset;
8014 	uint64_t io_unit, io_units_per_cluster;
8015 
8016 	free_clusters = spdk_bs_free_cluster_count(bs);
8017 
8018 	channel = spdk_bs_alloc_io_channel(bs);
8019 	CU_ASSERT(channel != NULL);
8020 
8021 	/* Set blob as thin provisioned */
8022 	ut_spdk_blob_opts_init(&opts);
8023 	opts.thin_provision = true;
8024 
8025 	/* Create a blob */
8026 	blob = ut_blob_create_and_open(bs, &opts);
8027 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
8028 
8029 	io_units_per_cluster = bs_io_units_per_cluster(blob);
8030 
8031 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
8032 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
8033 	poll_threads();
8034 	CU_ASSERT(g_bserrno == 0);
8035 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
8036 	CU_ASSERT(blob->active.num_clusters == 5);
8037 
8038 	/* Write at the beginning of first cluster */
8039 	offset = 0;
8040 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8041 	poll_threads();
8042 	CU_ASSERT(g_bserrno == 0);
8043 
8044 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
8045 	CU_ASSERT(io_unit == offset);
8046 
8047 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
8048 	CU_ASSERT(io_unit == io_units_per_cluster);
8049 
8050 	/* Write in the middle of third cluster */
8051 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
8052 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8053 	poll_threads();
8054 	CU_ASSERT(g_bserrno == 0);
8055 
8056 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
8057 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
8058 
8059 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
8060 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
8061 
8062 	/* Write at the end of last cluster */
8063 	offset = 5 * io_units_per_cluster - 1;
8064 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8065 	poll_threads();
8066 	CU_ASSERT(g_bserrno == 0);
8067 
8068 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
8069 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
8070 
8071 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
8072 	CU_ASSERT(io_unit == UINT64_MAX);
8073 
8074 	spdk_bs_free_io_channel(channel);
8075 	poll_threads();
8076 
8077 	ut_blob_close_and_delete(bs, blob);
8078 }
8079 
8080 static void
8081 blob_esnap_create(void)
8082 {
8083 	struct spdk_blob_store	*bs = g_bs;
8084 	struct spdk_bs_opts	bs_opts;
8085 	struct ut_esnap_opts	esnap_opts;
8086 	struct spdk_blob_opts	opts;
8087 	struct spdk_blob_open_opts open_opts;
8088 	struct spdk_blob	*blob;
8089 	uint32_t		cluster_sz, block_sz;
8090 	const uint32_t		esnap_num_clusters = 4;
8091 	uint64_t		esnap_num_blocks;
8092 	uint32_t		sz;
8093 	spdk_blob_id		blobid;
8094 	uint32_t		bs_ctx_count, blob_ctx_count;
8095 
8096 	cluster_sz = spdk_bs_get_cluster_size(bs);
8097 	block_sz = spdk_bs_get_io_unit_size(bs);
8098 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8099 
8100 	/* Create a normal blob and verify it is not an esnap clone. */
8101 	ut_spdk_blob_opts_init(&opts);
8102 	blob = ut_blob_create_and_open(bs, &opts);
8103 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob));
8104 	ut_blob_close_and_delete(bs, blob);
8105 
8106 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
8107 	ut_spdk_blob_opts_init(&opts);
8108 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8109 	opts.esnap_id = &esnap_opts;
8110 	opts.esnap_id_len = sizeof(esnap_opts);
8111 	opts.num_clusters = esnap_num_clusters;
8112 	blob = ut_blob_create_and_open(bs, &opts);
8113 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8114 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8115 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
8116 	SPDK_CU_ASSERT_FATAL(!spdk_blob_is_clone(blob));
8117 	sz = spdk_blob_get_num_clusters(blob);
8118 	CU_ASSERT(sz == esnap_num_clusters);
8119 	ut_blob_close_and_delete(bs, blob);
8120 
8121 	/* Create an esnap clone without the size and verify it can be grown */
8122 	ut_spdk_blob_opts_init(&opts);
8123 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8124 	opts.esnap_id = &esnap_opts;
8125 	opts.esnap_id_len = sizeof(esnap_opts);
8126 	blob = ut_blob_create_and_open(bs, &opts);
8127 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8128 	sz = spdk_blob_get_num_clusters(blob);
8129 	CU_ASSERT(sz == 0);
8130 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
8131 	poll_threads();
8132 	CU_ASSERT(g_bserrno == 0);
8133 	sz = spdk_blob_get_num_clusters(blob);
8134 	CU_ASSERT(sz == 1);
8135 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
8136 	poll_threads();
8137 	CU_ASSERT(g_bserrno == 0);
8138 	sz = spdk_blob_get_num_clusters(blob);
8139 	CU_ASSERT(sz == esnap_num_clusters);
8140 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
8141 	poll_threads();
8142 	CU_ASSERT(g_bserrno == 0);
8143 	sz = spdk_blob_get_num_clusters(blob);
8144 	CU_ASSERT(sz == esnap_num_clusters + 1);
8145 
8146 	/* Reload the blobstore and be sure that the blob can be opened. */
8147 	blobid = spdk_blob_get_id(blob);
8148 	spdk_blob_close(blob, blob_op_complete, NULL);
8149 	poll_threads();
8150 	CU_ASSERT(g_bserrno == 0);
8151 	g_blob = NULL;
8152 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8153 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8154 	ut_bs_reload(&bs, &bs_opts);
8155 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8156 	poll_threads();
8157 	CU_ASSERT(g_bserrno == 0);
8158 	CU_ASSERT(g_blob != NULL);
8159 	blob = g_blob;
8160 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8161 	sz = spdk_blob_get_num_clusters(blob);
8162 	CU_ASSERT(sz == esnap_num_clusters + 1);
8163 
8164 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
8165 	spdk_blob_close(blob, blob_op_complete, NULL);
8166 	poll_threads();
8167 	CU_ASSERT(g_bserrno == 0);
8168 	g_blob = NULL;
8169 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8170 	ut_bs_reload(&bs, &bs_opts);
8171 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8172 	poll_threads();
8173 	CU_ASSERT(g_bserrno != 0);
8174 	CU_ASSERT(g_blob == NULL);
8175 
8176 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
8177 	bs_ctx_count = 0;
8178 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8179 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
8180 	bs_opts.esnap_ctx = &bs_ctx_count;
8181 	ut_bs_reload(&bs, &bs_opts);
8182 	/* Loading the blobstore triggers the esnap to be loaded */
8183 	CU_ASSERT(bs_ctx_count == 1);
8184 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8185 	poll_threads();
8186 	CU_ASSERT(g_bserrno == 0);
8187 	CU_ASSERT(g_blob != NULL);
8188 	/* Opening the blob also triggers the esnap to be loaded */
8189 	CU_ASSERT(bs_ctx_count == 2);
8190 	blob = g_blob;
8191 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8192 	sz = spdk_blob_get_num_clusters(blob);
8193 	CU_ASSERT(sz == esnap_num_clusters + 1);
8194 	spdk_blob_close(blob, blob_op_complete, NULL);
8195 	poll_threads();
8196 	CU_ASSERT(g_bserrno == 0);
8197 	g_blob = NULL;
8198 	/* If open_opts.esnap_ctx is set it is passed to the esnap create callback */
8199 	blob_ctx_count = 0;
8200 	spdk_blob_open_opts_init(&open_opts, sizeof(open_opts));
8201 	open_opts.esnap_ctx = &blob_ctx_count;
8202 	spdk_bs_open_blob_ext(bs, blobid, &open_opts, blob_op_with_handle_complete, NULL);
8203 	poll_threads();
8204 	blob = g_blob;
8205 	CU_ASSERT(bs_ctx_count == 3);
8206 	CU_ASSERT(blob_ctx_count == 1);
8207 	spdk_blob_close(blob, blob_op_complete, NULL);
8208 	poll_threads();
8209 	CU_ASSERT(g_bserrno == 0);
8210 	g_blob = NULL;
8211 }
8212 
8213 static void
8214 blob_esnap_clone_reload(void)
8215 {
8216 	struct spdk_blob_store	*bs = g_bs;
8217 	struct spdk_bs_opts	bs_opts;
8218 	struct ut_esnap_opts	esnap_opts;
8219 	struct spdk_blob_opts	opts;
8220 	struct spdk_blob	*eclone1, *snap1, *clone1;
8221 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8222 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
8223 	const uint32_t		esnap_num_clusters = 4;
8224 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8225 	spdk_blob_id		eclone1_id, snap1_id, clone1_id;
8226 	struct spdk_io_channel	*bs_ch;
8227 	char			buf[block_sz];
8228 	int			bserr1, bserr2, bserr3, bserr4;
8229 	struct spdk_bs_dev	*dev;
8230 
8231 	/* Create and open an esnap clone blob */
8232 	ut_spdk_blob_opts_init(&opts);
8233 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8234 	opts.esnap_id = &esnap_opts;
8235 	opts.esnap_id_len = sizeof(esnap_opts);
8236 	opts.num_clusters = esnap_num_clusters;
8237 	eclone1 = ut_blob_create_and_open(bs, &opts);
8238 	CU_ASSERT(eclone1 != NULL);
8239 	CU_ASSERT(spdk_blob_is_esnap_clone(eclone1));
8240 	eclone1_id = eclone1->id;
8241 
8242 	/* Create and open a snapshot of eclone1 */
8243 	spdk_bs_create_snapshot(bs, eclone1_id, NULL, blob_op_with_id_complete, NULL);
8244 	poll_threads();
8245 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8246 	CU_ASSERT(g_bserrno == 0);
8247 	snap1_id = g_blobid;
8248 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8249 	poll_threads();
8250 	CU_ASSERT(g_bserrno == 0);
8251 	CU_ASSERT(g_blob != NULL);
8252 	snap1 = g_blob;
8253 
8254 	/* Create and open regular clone of snap1 */
8255 	spdk_bs_create_clone(bs, snap1_id, NULL, blob_op_with_id_complete, NULL);
8256 	poll_threads();
8257 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8258 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
8259 	clone1_id = g_blobid;
8260 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8261 	poll_threads();
8262 	CU_ASSERT(g_bserrno == 0);
8263 	CU_ASSERT(g_blob != NULL);
8264 	clone1 = g_blob;
8265 
8266 	/* Close the blobs in preparation for reloading the blobstore */
8267 	spdk_blob_close(clone1, blob_op_complete, NULL);
8268 	poll_threads();
8269 	CU_ASSERT(g_bserrno == 0);
8270 	spdk_blob_close(snap1, blob_op_complete, NULL);
8271 	poll_threads();
8272 	CU_ASSERT(g_bserrno == 0);
8273 	spdk_blob_close(eclone1, blob_op_complete, NULL);
8274 	poll_threads();
8275 	CU_ASSERT(g_bserrno == 0);
8276 	g_blob = NULL;
8277 
8278 	/* Reload the blobstore */
8279 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8280 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8281 	ut_bs_reload(&bs, &bs_opts);
8282 
8283 	/* Be sure each of the blobs can be opened */
8284 	spdk_bs_open_blob(bs, eclone1_id, blob_op_with_handle_complete, NULL);
8285 	poll_threads();
8286 	CU_ASSERT(g_bserrno == 0);
8287 	CU_ASSERT(g_blob != NULL);
8288 	eclone1 = g_blob;
8289 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8290 	poll_threads();
8291 	CU_ASSERT(g_bserrno == 0);
8292 	CU_ASSERT(g_blob != NULL);
8293 	snap1 = g_blob;
8294 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8295 	poll_threads();
8296 	CU_ASSERT(g_bserrno == 0);
8297 	CU_ASSERT(g_blob != NULL);
8298 	clone1 = g_blob;
8299 
8300 	/* Perform some reads on each of them to cause channels to be allocated */
8301 	bs_ch = spdk_bs_alloc_io_channel(bs);
8302 	CU_ASSERT(bs_ch != NULL);
8303 	spdk_blob_io_read(eclone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8304 	poll_threads();
8305 	CU_ASSERT(g_bserrno == 0);
8306 	spdk_blob_io_read(snap1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8307 	poll_threads();
8308 	CU_ASSERT(g_bserrno == 0);
8309 	spdk_blob_io_read(clone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8310 	poll_threads();
8311 	CU_ASSERT(g_bserrno == 0);
8312 
8313 	/*
8314 	 * Unload the blobstore in a way similar to how lvstore unloads it.  This should exercise
8315 	 * the deferred unload path in spdk_bs_unload().
8316 	 */
8317 	bserr1 = 0xbad;
8318 	bserr2 = 0xbad;
8319 	bserr3 = 0xbad;
8320 	bserr4 = 0xbad;
8321 	spdk_blob_close(eclone1, blob_op_complete, &bserr1);
8322 	spdk_blob_close(snap1, blob_op_complete, &bserr2);
8323 	spdk_blob_close(clone1, blob_op_complete, &bserr3);
8324 	spdk_bs_unload(bs, blob_op_complete, &bserr4);
8325 	spdk_bs_free_io_channel(bs_ch);
8326 	poll_threads();
8327 	CU_ASSERT(bserr1 == 0);
8328 	CU_ASSERT(bserr2 == 0);
8329 	CU_ASSERT(bserr3 == 0);
8330 	CU_ASSERT(bserr4 == 0);
8331 	g_blob = NULL;
8332 
8333 	/* Reload the blobstore */
8334 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8335 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8336 	dev = init_dev();
8337 	spdk_bs_load(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8338 	poll_threads();
8339 	CU_ASSERT(g_bserrno == 0);
8340 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8341 }
8342 
8343 static bool
8344 blob_esnap_verify_contents(struct spdk_blob *blob, struct spdk_io_channel *ch,
8345 			   uint64_t offset, uint64_t size, uint32_t readsize, const char *how)
8346 {
8347 	const uint32_t	bs_blksz = blob->bs->io_unit_size;
8348 	const uint32_t	esnap_blksz = blob->back_bs_dev ? blob->back_bs_dev->blocklen : bs_blksz;
8349 	const uint32_t	start_blk = offset / bs_blksz;
8350 	const uint32_t	num_blocks = spdk_max(size, readsize) / bs_blksz;
8351 	const uint32_t	blocks_per_read = spdk_min(size, readsize) / bs_blksz;
8352 	uint32_t	blob_block;
8353 	struct iovec	iov;
8354 	uint8_t		buf[spdk_min(size, readsize)];
8355 	bool		block_ok;
8356 
8357 	SPDK_CU_ASSERT_FATAL(offset % bs_blksz == 0);
8358 	SPDK_CU_ASSERT_FATAL(size % bs_blksz == 0);
8359 	SPDK_CU_ASSERT_FATAL(readsize % bs_blksz == 0);
8360 
8361 	memset(buf, 0, readsize);
8362 	iov.iov_base = buf;
8363 	iov.iov_len = readsize;
8364 	for (blob_block = start_blk; blob_block < num_blocks; blob_block += blocks_per_read) {
8365 		if (strcmp(how, "read") == 0) {
8366 			spdk_blob_io_read(blob, ch, buf, blob_block, blocks_per_read,
8367 					  bs_op_complete, NULL);
8368 		} else if (strcmp(how, "readv") == 0) {
8369 			spdk_blob_io_readv(blob, ch, &iov, 1, blob_block, blocks_per_read,
8370 					   bs_op_complete, NULL);
8371 		} else if (strcmp(how, "readv_ext") == 0) {
8372 			/*
8373 			 * This is currently pointless. NULL ext_opts leads to dev->readv(), not
8374 			 * dev->readv_ext().
8375 			 */
8376 			spdk_blob_io_readv_ext(blob, ch, &iov, 1, blob_block, blocks_per_read,
8377 					       bs_op_complete, NULL, NULL);
8378 		} else {
8379 			abort();
8380 		}
8381 		poll_threads();
8382 		CU_ASSERT(g_bserrno == 0);
8383 		if (g_bserrno != 0) {
8384 			return false;
8385 		}
8386 		block_ok = ut_esnap_content_is_correct(buf, blocks_per_read * bs_blksz, blob->id,
8387 						       blob_block * bs_blksz, esnap_blksz);
8388 		CU_ASSERT(block_ok);
8389 		if (!block_ok) {
8390 			return false;
8391 		}
8392 	}
8393 
8394 	return true;
8395 }
8396 
8397 static void
8398 blob_esnap_io_size(uint32_t bs_blksz, uint32_t esnap_blksz)
8399 {
8400 	struct spdk_bs_dev	*dev;
8401 	struct spdk_blob_store	*bs;
8402 	struct spdk_bs_opts	bsopts;
8403 	struct spdk_blob_opts	opts;
8404 	struct ut_esnap_opts	esnap_opts;
8405 	struct spdk_blob	*blob;
8406 	const uint32_t		cluster_sz = 4 * g_phys_blocklen;
8407 	const uint64_t		esnap_num_clusters = 4;
8408 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8409 	const uint64_t		esnap_num_blocks = esnap_sz / esnap_blksz;
8410 	const uint64_t		blob_num_blocks = esnap_sz / bs_blksz;
8411 	uint32_t		block;
8412 	struct spdk_io_channel	*bs_ch;
8413 
8414 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
8415 	bsopts.cluster_sz = cluster_sz;
8416 	bsopts.esnap_bs_dev_create = ut_esnap_create;
8417 
8418 	/* Create device with desired block size */
8419 	dev = init_dev();
8420 	dev->blocklen = bs_blksz;
8421 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8422 
8423 	/* Initialize a new blob store */
8424 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
8425 	poll_threads();
8426 	CU_ASSERT(g_bserrno == 0);
8427 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8428 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8429 	bs = g_bs;
8430 
8431 	bs_ch = spdk_bs_alloc_io_channel(bs);
8432 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
8433 
8434 	/* Create and open the esnap clone  */
8435 	ut_spdk_blob_opts_init(&opts);
8436 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8437 	opts.esnap_id = &esnap_opts;
8438 	opts.esnap_id_len = sizeof(esnap_opts);
8439 	opts.num_clusters = esnap_num_clusters;
8440 	blob = ut_blob_create_and_open(bs, &opts);
8441 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8442 
8443 	/* Verify that large reads return the content of the esnap device */
8444 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "read"));
8445 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv"));
8446 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv_ext"));
8447 	/* Verify that small reads return the content of the esnap device */
8448 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "read"));
8449 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv"));
8450 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv_ext"));
8451 
8452 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
8453 	for (block = 0; block < blob_num_blocks; block++) {
8454 		char		buf[bs_blksz];
8455 		union ut_word	word;
8456 
8457 		word.f.blob_id = 0xfedcba90;
8458 		word.f.lba = block;
8459 		ut_memset8(buf, word.num, bs_blksz);
8460 
8461 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8462 		poll_threads();
8463 		CU_ASSERT(g_bserrno == 0);
8464 		if (g_bserrno != 0) {
8465 			break;
8466 		}
8467 
8468 		/* Read and verify the block before the current block */
8469 		if (block != 0) {
8470 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
8471 			poll_threads();
8472 			CU_ASSERT(g_bserrno == 0);
8473 			if (g_bserrno != 0) {
8474 				break;
8475 			}
8476 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8477 							      (block - 1) * bs_blksz, bs_blksz));
8478 		}
8479 
8480 		/* Read and verify the current block */
8481 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8482 		poll_threads();
8483 		CU_ASSERT(g_bserrno == 0);
8484 		if (g_bserrno != 0) {
8485 			break;
8486 		}
8487 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8488 						      block * bs_blksz, bs_blksz));
8489 
8490 		/* Check the block that follows */
8491 		if (block + 1 < blob_num_blocks) {
8492 			g_bserrno = 0xbad;
8493 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
8494 			poll_threads();
8495 			CU_ASSERT(g_bserrno == 0);
8496 			if (g_bserrno != 0) {
8497 				break;
8498 			}
8499 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
8500 							      (block + 1) * bs_blksz,
8501 							      esnap_blksz));
8502 		}
8503 	}
8504 
8505 	/* Clean up */
8506 	spdk_bs_free_io_channel(bs_ch);
8507 	g_bserrno = 0xbad;
8508 	spdk_blob_close(blob, blob_op_complete, NULL);
8509 	poll_threads();
8510 	CU_ASSERT(g_bserrno == 0);
8511 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
8512 	poll_threads();
8513 	CU_ASSERT(g_bserrno == 0);
8514 	g_bs = NULL;
8515 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8516 }
8517 
8518 static void
8519 blob_esnap_io_4096_4096(void)
8520 {
8521 	blob_esnap_io_size(4096, 4096);
8522 }
8523 
8524 static void
8525 blob_esnap_io_512_512(void)
8526 {
8527 	blob_esnap_io_size(512, 512);
8528 }
8529 
8530 static void
8531 blob_esnap_io_4096_512(void)
8532 {
8533 	blob_esnap_io_size(4096, 512);
8534 }
8535 
8536 static void
8537 blob_esnap_io_512_4096(void)
8538 {
8539 	struct spdk_bs_dev	*dev;
8540 	struct spdk_blob_store	*bs;
8541 	struct spdk_bs_opts	bs_opts;
8542 	struct spdk_blob_opts	blob_opts;
8543 	struct ut_esnap_opts	esnap_opts;
8544 	uint64_t		cluster_sz = 4 * g_phys_blocklen;
8545 	uint32_t		bs_blksz = 512;
8546 	uint32_t		esnap_blksz = BLOCKLEN;
8547 	uint64_t		esnap_num_blocks = 64;
8548 	spdk_blob_id		blobid;
8549 
8550 	/* Create device with desired block size */
8551 	dev = init_dev();
8552 	dev->blocklen = bs_blksz;
8553 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8554 
8555 	/* Initialize a new blob store */
8556 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8557 	bs_opts.cluster_sz = cluster_sz;
8558 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8559 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8560 	poll_threads();
8561 	CU_ASSERT(g_bserrno == 0);
8562 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8563 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8564 	bs = g_bs;
8565 
8566 	/* Try to create and open the esnap clone. Create should succeed, open should fail. */
8567 	ut_spdk_blob_opts_init(&blob_opts);
8568 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8569 	blob_opts.esnap_id = &esnap_opts;
8570 	blob_opts.esnap_id_len = sizeof(esnap_opts);
8571 	blob_opts.num_clusters = esnap_num_blocks * esnap_blksz / bs_blksz;
8572 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
8573 	poll_threads();
8574 	CU_ASSERT(g_bserrno == 0);
8575 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8576 	blobid = g_blobid;
8577 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8578 	poll_threads();
8579 	CU_ASSERT(g_bserrno == -EINVAL);
8580 	CU_ASSERT(g_blob == NULL);
8581 
8582 	/* Clean up */
8583 	spdk_bs_unload(bs, bs_op_complete, NULL);
8584 	poll_threads();
8585 	CU_ASSERT(g_bserrno == 0);
8586 	g_bs = NULL;
8587 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8588 }
8589 
8590 static void
8591 blob_esnap_thread_add_remove(void)
8592 {
8593 	struct spdk_blob_store	*bs = g_bs;
8594 	struct spdk_blob_opts	opts;
8595 	struct ut_esnap_opts	ut_esnap_opts;
8596 	struct spdk_blob	*blob;
8597 	struct ut_esnap_dev	*ut_dev;
8598 	spdk_blob_id		blobid;
8599 	uint64_t		start_thread = g_ut_thread_id;
8600 	bool			destroyed = false;
8601 	struct spdk_io_channel	*ch0, *ch1;
8602 	struct ut_esnap_channel	*ut_ch0, *ut_ch1;
8603 	const uint32_t		blocklen = bs->io_unit_size;
8604 	char			buf[blocklen * 4];
8605 
8606 	SPDK_CU_ASSERT_FATAL(g_ut_num_threads > 1);
8607 	set_thread(0);
8608 
8609 	/* Create the esnap clone */
8610 	ut_esnap_opts_init(blocklen, 2048, "add_remove_1", &destroyed, &ut_esnap_opts);
8611 	ut_spdk_blob_opts_init(&opts);
8612 	opts.esnap_id = &ut_esnap_opts;
8613 	opts.esnap_id_len = sizeof(ut_esnap_opts);
8614 	opts.num_clusters = 10;
8615 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8616 	poll_threads();
8617 	CU_ASSERT(g_bserrno == 0);
8618 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8619 	blobid = g_blobid;
8620 
8621 	/* Open the blob. No channels should be allocated yet. */
8622 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8623 	poll_threads();
8624 	CU_ASSERT(g_bserrno == 0);
8625 	CU_ASSERT(g_blob != NULL);
8626 	blob = g_blob;
8627 	ut_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8628 	CU_ASSERT(ut_dev != NULL);
8629 	CU_ASSERT(ut_dev->num_channels == 0);
8630 
8631 	/* Create a channel on thread 0. It is lazily created on the first read. */
8632 	ch0 = spdk_bs_alloc_io_channel(bs);
8633 	CU_ASSERT(ch0 != NULL);
8634 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8635 	CU_ASSERT(ut_ch0 == NULL);
8636 	CU_ASSERT(ut_dev->num_channels == 0);
8637 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
8638 	poll_threads();
8639 	CU_ASSERT(g_bserrno == 0);
8640 	CU_ASSERT(ut_dev->num_channels == 1);
8641 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8642 	CU_ASSERT(ut_ch0 != NULL);
8643 	CU_ASSERT(ut_ch0->blocks_read == 1);
8644 
8645 	/* Create a channel on thread 1 and verify its lazy creation too. */
8646 	set_thread(1);
8647 	ch1 = spdk_bs_alloc_io_channel(bs);
8648 	CU_ASSERT(ch1 != NULL);
8649 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8650 	CU_ASSERT(ut_ch1 == NULL);
8651 	CU_ASSERT(ut_dev->num_channels == 1);
8652 	spdk_blob_io_read(blob, ch1, buf, 0, 4, bs_op_complete, NULL);
8653 	poll_threads();
8654 	CU_ASSERT(g_bserrno == 0);
8655 	CU_ASSERT(ut_dev->num_channels == 2);
8656 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8657 	CU_ASSERT(ut_ch1 != NULL);
8658 	CU_ASSERT(ut_ch1->blocks_read == 4);
8659 
8660 	/* Close the channel on thread 0 and verify the bs_dev channel is also gone. */
8661 	set_thread(0);
8662 	spdk_bs_free_io_channel(ch0);
8663 	poll_threads();
8664 	CU_ASSERT(ut_dev->num_channels == 1);
8665 
8666 	/* Close the blob. There is no outstanding IO so it should close right away. */
8667 	g_bserrno = 0xbad;
8668 	spdk_blob_close(blob, blob_op_complete, NULL);
8669 	poll_threads();
8670 	CU_ASSERT(g_bserrno == 0);
8671 	CU_ASSERT(destroyed);
8672 
8673 	/* The esnap channel for the blob should be gone now too. */
8674 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8675 	CU_ASSERT(ut_ch1 == NULL);
8676 
8677 	/* Clean up */
8678 	set_thread(1);
8679 	spdk_bs_free_io_channel(ch1);
8680 	set_thread(start_thread);
8681 }
8682 
8683 static void
8684 freeze_done(void *cb_arg, int bserrno)
8685 {
8686 	uint32_t *freeze_cnt = cb_arg;
8687 
8688 	CU_ASSERT(bserrno == 0);
8689 	(*freeze_cnt)++;
8690 }
8691 
8692 static void
8693 unfreeze_done(void *cb_arg, int bserrno)
8694 {
8695 	uint32_t *unfreeze_cnt = cb_arg;
8696 
8697 	CU_ASSERT(bserrno == 0);
8698 	(*unfreeze_cnt)++;
8699 }
8700 
8701 static void
8702 blob_nested_freezes(void)
8703 {
8704 	struct spdk_blob_store *bs = g_bs;
8705 	struct spdk_blob *blob;
8706 	struct spdk_io_channel *channel[2];
8707 	struct spdk_blob_opts opts;
8708 	uint32_t freeze_cnt, unfreeze_cnt;
8709 	int i;
8710 
8711 	for (i = 0; i < 2; i++) {
8712 		set_thread(i);
8713 		channel[i] = spdk_bs_alloc_io_channel(bs);
8714 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
8715 	}
8716 
8717 	set_thread(0);
8718 
8719 	ut_spdk_blob_opts_init(&opts);
8720 	blob = ut_blob_create_and_open(bs, &opts);
8721 
8722 	/* First just test a single freeze/unfreeze. */
8723 	freeze_cnt = 0;
8724 	unfreeze_cnt = 0;
8725 	CU_ASSERT(blob->frozen_refcnt == 0);
8726 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8727 	CU_ASSERT(blob->frozen_refcnt == 1);
8728 	CU_ASSERT(freeze_cnt == 0);
8729 	poll_threads();
8730 	CU_ASSERT(freeze_cnt == 1);
8731 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8732 	CU_ASSERT(blob->frozen_refcnt == 0);
8733 	CU_ASSERT(unfreeze_cnt == 0);
8734 	poll_threads();
8735 	CU_ASSERT(unfreeze_cnt == 1);
8736 
8737 	/* Now nest multiple freeze/unfreeze operations.  We should
8738 	 * expect a callback for each operation, but only after
8739 	 * the threads have been polled to ensure a for_each_channel()
8740 	 * was executed.
8741 	 */
8742 	freeze_cnt = 0;
8743 	unfreeze_cnt = 0;
8744 	CU_ASSERT(blob->frozen_refcnt == 0);
8745 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8746 	CU_ASSERT(blob->frozen_refcnt == 1);
8747 	CU_ASSERT(freeze_cnt == 0);
8748 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8749 	CU_ASSERT(blob->frozen_refcnt == 2);
8750 	CU_ASSERT(freeze_cnt == 0);
8751 	poll_threads();
8752 	CU_ASSERT(freeze_cnt == 2);
8753 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8754 	CU_ASSERT(blob->frozen_refcnt == 1);
8755 	CU_ASSERT(unfreeze_cnt == 0);
8756 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8757 	CU_ASSERT(blob->frozen_refcnt == 0);
8758 	CU_ASSERT(unfreeze_cnt == 0);
8759 	poll_threads();
8760 	CU_ASSERT(unfreeze_cnt == 2);
8761 
8762 	for (i = 0; i < 2; i++) {
8763 		set_thread(i);
8764 		spdk_bs_free_io_channel(channel[i]);
8765 	}
8766 	set_thread(0);
8767 	ut_blob_close_and_delete(bs, blob);
8768 
8769 	poll_threads();
8770 	g_blob = NULL;
8771 	g_blobid = 0;
8772 }
8773 
8774 static void
8775 blob_ext_md_pages(void)
8776 {
8777 	struct spdk_blob_store *bs;
8778 	struct spdk_bs_dev *dev;
8779 	struct spdk_blob *blob;
8780 	struct spdk_blob_opts opts;
8781 	struct spdk_bs_opts bs_opts;
8782 	uint64_t free_clusters;
8783 
8784 	dev = init_dev();
8785 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8786 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
8787 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
8788 	 * It requires num_md_pages that is much smaller than the number of clusters.
8789 	 * Make sure we can create a blob that uses all of the free clusters.
8790 	 */
8791 	bs_opts.cluster_sz = 65536;
8792 	bs_opts.num_md_pages = 16;
8793 
8794 	/* Initialize a new blob store */
8795 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8796 	poll_threads();
8797 	CU_ASSERT(g_bserrno == 0);
8798 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8799 	bs = g_bs;
8800 
8801 	free_clusters = spdk_bs_free_cluster_count(bs);
8802 
8803 	ut_spdk_blob_opts_init(&opts);
8804 	opts.num_clusters = free_clusters;
8805 
8806 	blob = ut_blob_create_and_open(bs, &opts);
8807 	spdk_blob_close(blob, blob_op_complete, NULL);
8808 	CU_ASSERT(g_bserrno == 0);
8809 
8810 	spdk_bs_unload(bs, bs_op_complete, NULL);
8811 	poll_threads();
8812 	CU_ASSERT(g_bserrno == 0);
8813 	g_bs = NULL;
8814 }
8815 
8816 static void
8817 blob_esnap_clone_snapshot(void)
8818 {
8819 	/*
8820 	 * When a snapshot is created, the blob that is being snapped becomes
8821 	 * the leaf node (a clone of the snapshot) and the newly created
8822 	 * snapshot sits between the snapped blob and the external snapshot.
8823 	 *
8824 	 * Before creating snap1
8825 	 *
8826 	 *   ,--------.     ,----------.
8827 	 *   |  blob  |     |  vbdev   |
8828 	 *   | blob1  |<----| nvme1n42 |
8829 	 *   |  (rw)  |     |   (ro)   |
8830 	 *   `--------'     `----------'
8831 	 *       Figure 1
8832 	 *
8833 	 * After creating snap1
8834 	 *
8835 	 *   ,--------.     ,--------.     ,----------.
8836 	 *   |  blob  |     |  blob  |     |  vbdev   |
8837 	 *   | blob1  |<----| snap1  |<----| nvme1n42 |
8838 	 *   |  (rw)  |     |  (ro)  |     |   (ro)   |
8839 	 *   `--------'     `--------'     `----------'
8840 	 *       Figure 2
8841 	 *
8842 	 * Starting from Figure 2, if snap1 is removed, the chain reverts to
8843 	 * what it looks like in Figure 1.
8844 	 *
8845 	 * Starting from Figure 2, if blob1 is removed, the chain becomes:
8846 	 *
8847 	 *   ,--------.     ,----------.
8848 	 *   |  blob  |     |  vbdev   |
8849 	 *   | snap1  |<----| nvme1n42 |
8850 	 *   |  (ro)  |     |   (ro)   |
8851 	 *   `--------'     `----------'
8852 	 *       Figure 3
8853 	 *
8854 	 * In each case, the blob pointed to by the nvme vbdev is considered
8855 	 * the "esnap clone".  The esnap clone must have:
8856 	 *
8857 	 *   - XATTR_INTERNAL for BLOB_EXTERNAL_SNAPSHOT_ID (e.g. name or UUID)
8858 	 *   - blob->invalid_flags must contain SPDK_BLOB_EXTERNAL_SNAPSHOT
8859 	 *   - blob->parent_id must be SPDK_BLOBID_EXTERNAL_SNAPSHOT.
8860 	 *
8861 	 * No other blob that descends from the esnap clone may have any of
8862 	 * those set.
8863 	 */
8864 	struct spdk_blob_store	*bs = g_bs;
8865 	const uint32_t		blocklen = bs->io_unit_size;
8866 	struct spdk_blob_opts	opts;
8867 	struct ut_esnap_opts	esnap_opts;
8868 	struct spdk_blob	*blob, *snap_blob;
8869 	spdk_blob_id		blobid, snap_blobid;
8870 	bool			destroyed = false;
8871 
8872 	/* Create the esnap clone */
8873 	ut_esnap_opts_init(blocklen, 2048, __func__, &destroyed, &esnap_opts);
8874 	ut_spdk_blob_opts_init(&opts);
8875 	opts.esnap_id = &esnap_opts;
8876 	opts.esnap_id_len = sizeof(esnap_opts);
8877 	opts.num_clusters = 10;
8878 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8879 	poll_threads();
8880 	CU_ASSERT(g_bserrno == 0);
8881 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8882 	blobid = g_blobid;
8883 
8884 	/* Open the blob. */
8885 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8886 	poll_threads();
8887 	CU_ASSERT(g_bserrno == 0);
8888 	CU_ASSERT(g_blob != NULL);
8889 	blob = g_blob;
8890 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8891 
8892 	/*
8893 	 * Create a snapshot of the blob. The snapshot becomes the esnap clone.
8894 	 */
8895 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8896 	poll_threads();
8897 	CU_ASSERT(g_bserrno == 0);
8898 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8899 	snap_blobid = g_blobid;
8900 
8901 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8902 	poll_threads();
8903 	CU_ASSERT(g_bserrno == 0);
8904 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8905 	snap_blob = g_blob;
8906 
8907 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8908 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8909 
8910 	/*
8911 	 * Delete the snapshot.  The original blob becomes the esnap clone.
8912 	 */
8913 	ut_blob_close_and_delete(bs, snap_blob);
8914 	snap_blob = NULL;
8915 	snap_blobid = SPDK_BLOBID_INVALID;
8916 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8917 
8918 	/*
8919 	 * Create the snapshot again, then delete the original blob.  The
8920 	 * snapshot should survive as the esnap clone.
8921 	 */
8922 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8923 	poll_threads();
8924 	CU_ASSERT(g_bserrno == 0);
8925 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8926 	snap_blobid = g_blobid;
8927 
8928 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8929 	poll_threads();
8930 	CU_ASSERT(g_bserrno == 0);
8931 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8932 	snap_blob = g_blob;
8933 
8934 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8935 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8936 
8937 	ut_blob_close_and_delete(bs, blob);
8938 	blob = NULL;
8939 	blobid = SPDK_BLOBID_INVALID;
8940 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8941 
8942 	/*
8943 	 * Clone the snapshot.  The snapshot continues to be the esnap clone.
8944 	 */
8945 	spdk_bs_create_clone(bs, snap_blobid, NULL, blob_op_with_id_complete, NULL);
8946 	poll_threads();
8947 	CU_ASSERT(g_bserrno == 0);
8948 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8949 	blobid = g_blobid;
8950 
8951 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8952 	poll_threads();
8953 	CU_ASSERT(g_bserrno == 0);
8954 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8955 	blob = g_blob;
8956 
8957 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8958 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8959 
8960 	/*
8961 	 * Delete the snapshot. The clone becomes the esnap clone.
8962 	 */
8963 	ut_blob_close_and_delete(bs, snap_blob);
8964 	snap_blob = NULL;
8965 	snap_blobid = SPDK_BLOBID_INVALID;
8966 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8967 
8968 	/*
8969 	 * Clean up
8970 	 */
8971 	ut_blob_close_and_delete(bs, blob);
8972 }
8973 
8974 static uint64_t
8975 _blob_esnap_clone_hydrate(bool inflate)
8976 {
8977 	struct spdk_blob_store	*bs = g_bs;
8978 	struct spdk_blob_opts	opts;
8979 	struct ut_esnap_opts	esnap_opts;
8980 	struct spdk_blob	*blob;
8981 	spdk_blob_id		blobid;
8982 	struct spdk_io_channel *channel;
8983 	bool			destroyed = false;
8984 	const uint32_t		blocklen = spdk_bs_get_io_unit_size(bs);
8985 	const uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8986 	const uint64_t		esnap_num_clusters = 4;
8987 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8988 	const uint64_t		esnap_num_blocks = esnap_sz / blocklen;
8989 	uint64_t		num_failures = CU_get_number_of_failures();
8990 
8991 	channel = spdk_bs_alloc_io_channel(bs);
8992 	SPDK_CU_ASSERT_FATAL(channel != NULL);
8993 
8994 	/* Create the esnap clone */
8995 	ut_spdk_blob_opts_init(&opts);
8996 	ut_esnap_opts_init(blocklen, esnap_num_blocks, __func__, &destroyed, &esnap_opts);
8997 	opts.esnap_id = &esnap_opts;
8998 	opts.esnap_id_len = sizeof(esnap_opts);
8999 	opts.num_clusters = esnap_num_clusters;
9000 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
9001 	poll_threads();
9002 	CU_ASSERT(g_bserrno == 0);
9003 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9004 	blobid = g_blobid;
9005 
9006 	/* Open the esnap clone */
9007 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9008 	poll_threads();
9009 	CU_ASSERT(g_bserrno == 0);
9010 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9011 	blob = g_blob;
9012 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
9013 
9014 	/*
9015 	 * Inflate or decouple  the blob then verify that it is no longer an esnap clone and has
9016 	 * right content
9017 	 */
9018 	if (inflate) {
9019 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
9020 	} else {
9021 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
9022 	}
9023 	poll_threads();
9024 	CU_ASSERT(g_bserrno == 0);
9025 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
9026 	CU_ASSERT(blob_esnap_verify_contents(blob, channel, 0, esnap_sz, esnap_sz, "read"));
9027 	ut_blob_close_and_delete(bs, blob);
9028 
9029 	/*
9030 	 * Clean up
9031 	 */
9032 	spdk_bs_free_io_channel(channel);
9033 	poll_threads();
9034 
9035 	/* Return number of new failures */
9036 	return CU_get_number_of_failures() - num_failures;
9037 }
9038 
9039 static void
9040 blob_esnap_clone_inflate(void)
9041 {
9042 	_blob_esnap_clone_hydrate(true);
9043 }
9044 
9045 static void
9046 blob_esnap_clone_decouple(void)
9047 {
9048 	_blob_esnap_clone_hydrate(false);
9049 }
9050 
9051 static void
9052 blob_esnap_hotplug(void)
9053 {
9054 	struct spdk_blob_store	*bs = g_bs;
9055 	struct ut_esnap_opts	esnap1_opts, esnap2_opts;
9056 	struct spdk_blob_opts	opts;
9057 	struct spdk_blob	*blob;
9058 	struct spdk_bs_dev	*bs_dev;
9059 	struct ut_esnap_dev	*esnap_dev;
9060 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
9061 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
9062 	const uint32_t		esnap_num_clusters = 4;
9063 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9064 	bool			destroyed1 = false, destroyed2 = false;
9065 	uint64_t		start_thread = g_ut_thread_id;
9066 	struct spdk_io_channel	*ch0, *ch1;
9067 	char			buf[block_sz];
9068 
9069 	/* Create and open an esnap clone blob */
9070 	ut_spdk_blob_opts_init(&opts);
9071 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1", &destroyed1, &esnap1_opts);
9072 	opts.esnap_id = &esnap1_opts;
9073 	opts.esnap_id_len = sizeof(esnap1_opts);
9074 	opts.num_clusters = esnap_num_clusters;
9075 	blob = ut_blob_create_and_open(bs, &opts);
9076 	CU_ASSERT(blob != NULL);
9077 	CU_ASSERT(spdk_blob_is_esnap_clone(blob));
9078 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9079 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9080 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1") == 0);
9081 
9082 	/* Replace the external snapshot */
9083 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap2", &destroyed2, &esnap2_opts);
9084 	bs_dev = ut_esnap_dev_alloc(&esnap2_opts);
9085 	CU_ASSERT(!destroyed1);
9086 	CU_ASSERT(!destroyed2);
9087 	g_bserrno = 0xbad;
9088 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9089 	poll_threads();
9090 	CU_ASSERT(g_bserrno == 0);
9091 	CU_ASSERT(destroyed1);
9092 	CU_ASSERT(!destroyed2);
9093 	SPDK_CU_ASSERT_FATAL(bs_dev == blob->back_bs_dev);
9094 	SPDK_CU_ASSERT_FATAL(bs_dev == spdk_blob_get_esnap_bs_dev(blob));
9095 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9096 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap2") == 0);
9097 
9098 	/* Create a couple channels */
9099 	set_thread(0);
9100 	ch0 = spdk_bs_alloc_io_channel(bs);
9101 	CU_ASSERT(ch0 != NULL);
9102 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
9103 	set_thread(1);
9104 	ch1 = spdk_bs_alloc_io_channel(bs);
9105 	CU_ASSERT(ch1 != NULL);
9106 	spdk_blob_io_read(blob, ch1, buf, 0, 1, bs_op_complete, NULL);
9107 	set_thread(start_thread);
9108 	poll_threads();
9109 	CU_ASSERT(esnap_dev->num_channels == 2);
9110 
9111 	/* Replace the external snapshot */
9112 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1a", &destroyed1, &esnap1_opts);
9113 	bs_dev = ut_esnap_dev_alloc(&esnap1_opts);
9114 	destroyed1 = destroyed2 = false;
9115 	g_bserrno = 0xbad;
9116 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9117 	poll_threads();
9118 	CU_ASSERT(g_bserrno == 0);
9119 	CU_ASSERT(!destroyed1);
9120 	CU_ASSERT(destroyed2);
9121 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9122 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9123 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1a") == 0);
9124 
9125 	/* Clean up */
9126 	set_thread(0);
9127 	spdk_bs_free_io_channel(ch0);
9128 	set_thread(1);
9129 	spdk_bs_free_io_channel(ch1);
9130 	set_thread(start_thread);
9131 	g_bserrno = 0xbad;
9132 	spdk_blob_close(blob, bs_op_complete, NULL);
9133 	poll_threads();
9134 	CU_ASSERT(g_bserrno == 0);
9135 }
9136 
9137 static bool g_blob_is_degraded;
9138 static int g_blob_is_degraded_called;
9139 
9140 static bool
9141 _blob_is_degraded(struct spdk_bs_dev *dev)
9142 {
9143 	g_blob_is_degraded_called++;
9144 	return g_blob_is_degraded;
9145 }
9146 
9147 static void
9148 blob_is_degraded(void)
9149 {
9150 	struct spdk_bs_dev bs_is_degraded_null = { 0 };
9151 	struct spdk_bs_dev bs_is_degraded = { .is_degraded = _blob_is_degraded };
9152 
9153 	/* No back_bs_dev, no bs->dev->is_degraded */
9154 	g_blob_is_degraded_called = 0;
9155 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9156 	CU_ASSERT(g_blob_is_degraded_called == 0);
9157 
9158 	/* No back_bs_dev, blobstore device degraded */
9159 	g_bs->dev->is_degraded = _blob_is_degraded;
9160 	g_blob_is_degraded_called = 0;
9161 	g_blob_is_degraded = true;
9162 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9163 	CU_ASSERT(g_blob_is_degraded_called == 1);
9164 
9165 	/* No back_bs_dev, blobstore device not degraded */
9166 	g_bs->dev->is_degraded = _blob_is_degraded;
9167 	g_blob_is_degraded_called = 0;
9168 	g_blob_is_degraded = false;
9169 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9170 	CU_ASSERT(g_blob_is_degraded_called == 1);
9171 
9172 	/* back_bs_dev does not define is_degraded, no bs->dev->is_degraded */
9173 	g_bs->dev->is_degraded = NULL;
9174 	g_blob->back_bs_dev = &bs_is_degraded_null;
9175 	g_blob_is_degraded_called = 0;
9176 	g_blob_is_degraded = false;
9177 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9178 	CU_ASSERT(g_blob_is_degraded_called == 0);
9179 
9180 	/* back_bs_dev is not degraded, no bs->dev->is_degraded */
9181 	g_bs->dev->is_degraded = NULL;
9182 	g_blob->back_bs_dev = &bs_is_degraded;
9183 	g_blob_is_degraded_called = 0;
9184 	g_blob_is_degraded = false;
9185 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9186 	CU_ASSERT(g_blob_is_degraded_called == 1);
9187 
9188 	/* back_bs_dev is degraded, no bs->dev->is_degraded */
9189 	g_bs->dev->is_degraded = NULL;
9190 	g_blob->back_bs_dev = &bs_is_degraded;
9191 	g_blob_is_degraded_called = 0;
9192 	g_blob_is_degraded = true;
9193 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9194 	CU_ASSERT(g_blob_is_degraded_called == 1);
9195 
9196 	/* back_bs_dev is not degraded, blobstore device is not degraded */
9197 	g_bs->dev->is_degraded = _blob_is_degraded;
9198 	g_blob->back_bs_dev = &bs_is_degraded;
9199 	g_blob_is_degraded_called = 0;
9200 	g_blob_is_degraded = false;
9201 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9202 	CU_ASSERT(g_blob_is_degraded_called == 2);
9203 
9204 	g_blob->back_bs_dev = NULL;
9205 }
9206 
9207 /* Resize a blob which is a clone created from snapshot. Verify read/writes to
9208  * expanded clone blob. Then inflate the clone blob. */
9209 static void
9210 blob_clone_resize(void)
9211 {
9212 	struct spdk_blob_store *bs = g_bs;
9213 	struct spdk_blob_opts opts;
9214 	struct spdk_blob *blob, *clone, *snap_blob, *snap_blob_rsz;
9215 	spdk_blob_id blobid, cloneid, snapid1, snapid2;
9216 	uint64_t pages_per_cluster;
9217 	uint8_t payload_read[bs->dev->blocklen];
9218 	uint8_t payload_write[bs->dev->blocklen];
9219 	struct spdk_io_channel *channel;
9220 	uint64_t free_clusters;
9221 
9222 	channel = spdk_bs_alloc_io_channel(bs);
9223 	SPDK_CU_ASSERT_FATAL(channel != NULL);
9224 
9225 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
9226 
9227 	/* Create blob with 10 clusters */
9228 	ut_spdk_blob_opts_init(&opts);
9229 	opts.num_clusters = 10;
9230 
9231 	blob = ut_blob_create_and_open(bs, &opts);
9232 	blobid = spdk_blob_get_id(blob);
9233 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
9234 
9235 	/* Create snapshot */
9236 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9237 	poll_threads();
9238 	CU_ASSERT(g_bserrno == 0);
9239 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9240 	snapid1 = g_blobid;
9241 
9242 	spdk_bs_create_clone(bs, snapid1, NULL, blob_op_with_id_complete, NULL);
9243 	poll_threads();
9244 	CU_ASSERT(g_bserrno == 0);
9245 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9246 	cloneid = g_blobid;
9247 
9248 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
9249 	poll_threads();
9250 	CU_ASSERT(g_bserrno == 0);
9251 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9252 	clone = g_blob;
9253 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
9254 
9255 	g_bserrno = -1;
9256 	spdk_blob_resize(clone, 20, blob_op_complete, NULL);
9257 	poll_threads();
9258 	CU_ASSERT(g_bserrno == 0);
9259 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 20);
9260 
9261 	/* Create another snapshot after resizing the clone */
9262 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
9263 	poll_threads();
9264 	CU_ASSERT(g_bserrno == 0);
9265 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9266 	snapid2 = g_blobid;
9267 
9268 	/* Open the snapshot blobs */
9269 	spdk_bs_open_blob(bs, snapid1, blob_op_with_handle_complete, NULL);
9270 	CU_ASSERT(g_bserrno == 0);
9271 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9272 	snap_blob = g_blob;
9273 	CU_ASSERT(snap_blob->data_ro == true);
9274 	CU_ASSERT(snap_blob->md_ro == true);
9275 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob) == 10);
9276 
9277 	spdk_bs_open_blob(bs, snapid2, blob_op_with_handle_complete, NULL);
9278 	CU_ASSERT(g_bserrno == 0);
9279 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9280 	snap_blob_rsz = g_blob;
9281 	CU_ASSERT(snap_blob_rsz->data_ro == true);
9282 	CU_ASSERT(snap_blob_rsz->md_ro == true);
9283 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob_rsz) == 20);
9284 
9285 	/* Confirm that clone is backed by snap_blob_rsz, and snap_blob_rsz is backed by snap_blob */
9286 	SPDK_CU_ASSERT_FATAL(snap_blob->back_bs_dev == NULL);
9287 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9288 	SPDK_CU_ASSERT_FATAL(snap_blob_rsz->back_bs_dev != NULL);
9289 
9290 	/* Write and read from pre-resize ranges */
9291 	g_bserrno = -1;
9292 	memset(payload_write, 0xE5, sizeof(payload_write));
9293 	spdk_blob_io_write(clone, channel, payload_write, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9294 	poll_threads();
9295 	CU_ASSERT(g_bserrno == 0);
9296 
9297 	g_bserrno = -1;
9298 	memset(payload_read, 0x00, sizeof(payload_read));
9299 	spdk_blob_io_read(clone, channel, payload_read, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9300 	poll_threads();
9301 	CU_ASSERT(g_bserrno == 0);
9302 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
9303 
9304 	/* Write and read from post-resize ranges */
9305 	g_bserrno = -1;
9306 	memset(payload_write, 0xE5, sizeof(payload_write));
9307 	spdk_blob_io_write(clone, channel, payload_write, 15 * pages_per_cluster, 1, blob_op_complete,
9308 			   NULL);
9309 	poll_threads();
9310 	CU_ASSERT(g_bserrno == 0);
9311 
9312 	g_bserrno = -1;
9313 	memset(payload_read, 0x00, sizeof(payload_read));
9314 	spdk_blob_io_read(clone, channel, payload_read, 15 * pages_per_cluster, 1, blob_op_complete, NULL);
9315 	poll_threads();
9316 	CU_ASSERT(g_bserrno == 0);
9317 	CU_ASSERT(memcmp(payload_write, payload_read, bs->dev->blocklen) == 0);
9318 
9319 	/* Now do full blob inflation of the resized blob/clone. */
9320 	free_clusters = spdk_bs_free_cluster_count(bs);
9321 	spdk_bs_inflate_blob(bs, channel, cloneid, blob_op_complete, NULL);
9322 	poll_threads();
9323 	CU_ASSERT(g_bserrno == 0);
9324 	/* We wrote to 2 clusters earlier, all remaining 18 clusters in
9325 	 * blob should get allocated after inflation */
9326 	CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 18);
9327 
9328 	spdk_blob_close(clone, blob_op_complete, NULL);
9329 	poll_threads();
9330 	CU_ASSERT(g_bserrno == 0);
9331 
9332 	spdk_blob_close(snap_blob, blob_op_complete, NULL);
9333 	poll_threads();
9334 	CU_ASSERT(g_bserrno == 0);
9335 
9336 	spdk_blob_close(snap_blob_rsz, blob_op_complete, NULL);
9337 	poll_threads();
9338 	CU_ASSERT(g_bserrno == 0);
9339 
9340 	ut_blob_close_and_delete(bs, blob);
9341 
9342 	spdk_bs_free_io_channel(channel);
9343 }
9344 
9345 
9346 static void
9347 blob_esnap_clone_resize(void)
9348 {
9349 	struct spdk_bs_dev *dev;
9350 	struct spdk_blob_store *bs;
9351 	struct spdk_bs_opts bsopts;
9352 	struct spdk_blob_opts opts;
9353 	struct ut_esnap_opts esnap_opts;
9354 	struct spdk_blob *blob;
9355 	uint32_t block, esnap_blksz = 512, bs_blksz = 512;
9356 	const uint32_t cluster_sz = 4 * g_phys_blocklen;
9357 	const uint64_t esnap_num_clusters = 4;
9358 	const uint32_t esnap_sz = cluster_sz * esnap_num_clusters;
9359 	const uint64_t esnap_num_blocks = esnap_sz / esnap_blksz;
9360 	uint64_t blob_num_blocks = esnap_sz / bs_blksz;
9361 	struct spdk_io_channel *bs_ch;
9362 
9363 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
9364 	bsopts.cluster_sz = cluster_sz;
9365 	bsopts.esnap_bs_dev_create = ut_esnap_create;
9366 	/* Create device with desired block size */
9367 	dev = init_dev();
9368 	dev->blocklen = bs_blksz;
9369 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
9370 	/* Initialize a new blob store */
9371 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
9372 	poll_threads();
9373 	CU_ASSERT(g_bserrno == 0);
9374 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9375 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
9376 	bs = g_bs;
9377 
9378 	bs_ch = spdk_bs_alloc_io_channel(bs);
9379 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
9380 
9381 	/* Create and open the esnap clone  */
9382 	ut_spdk_blob_opts_init(&opts);
9383 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9384 	opts.esnap_id = &esnap_opts;
9385 	opts.esnap_id_len = sizeof(esnap_opts);
9386 	opts.num_clusters = esnap_num_clusters;
9387 	blob = ut_blob_create_and_open(bs, &opts);
9388 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9389 
9390 	g_bserrno = -1;
9391 	spdk_blob_resize(blob, esnap_num_clusters * 2, blob_op_complete, NULL);
9392 	poll_threads();
9393 	CU_ASSERT(g_bserrno == 0);
9394 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == esnap_num_clusters * 2);
9395 
9396 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
9397 	blob_num_blocks = (spdk_blob_get_num_clusters(blob) * cluster_sz) / bs_blksz;
9398 	for (block = 0; block < blob_num_blocks; block++) {
9399 		char buf[bs_blksz];
9400 		union ut_word word;
9401 		word.f.blob_id = 0xfedcba90;
9402 		word.f.lba = block;
9403 		ut_memset8(buf, word.num, bs_blksz);
9404 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9405 		poll_threads();
9406 		CU_ASSERT(g_bserrno == 0);
9407 		if (g_bserrno != 0) {
9408 			break;
9409 		}
9410 		/* Read and verify the block before the current block */
9411 		if (block != 0) {
9412 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
9413 			poll_threads();
9414 			CU_ASSERT(g_bserrno == 0);
9415 			if (g_bserrno != 0) {
9416 				break;
9417 			}
9418 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9419 							      (block - 1) * bs_blksz, bs_blksz));
9420 		}
9421 		/* Read and verify the current block */
9422 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9423 		poll_threads();
9424 		CU_ASSERT(g_bserrno == 0);
9425 		if (g_bserrno != 0) {
9426 			break;
9427 		}
9428 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9429 						      block * bs_blksz, bs_blksz));
9430 		/* Check the block that follows */
9431 		if (block + 1 < blob_num_blocks) {
9432 			g_bserrno = 0xbad;
9433 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
9434 			poll_threads();
9435 			CU_ASSERT(g_bserrno == 0);
9436 			if (g_bserrno != 0) {
9437 				break;
9438 			}
9439 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
9440 							      (block + 1) * bs_blksz,
9441 							      esnap_blksz));
9442 		}
9443 	}
9444 	/* Clean up */
9445 	spdk_bs_free_io_channel(bs_ch);
9446 	g_bserrno = 0xbad;
9447 	spdk_blob_close(blob, blob_op_complete, NULL);
9448 	poll_threads();
9449 	CU_ASSERT(g_bserrno == 0);
9450 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
9451 	poll_threads();
9452 	CU_ASSERT(g_bserrno == 0);
9453 	g_bs = NULL;
9454 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9455 }
9456 
9457 static void
9458 bs_dev_io_complete_cb(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
9459 {
9460 	g_bserrno = bserrno;
9461 }
9462 
9463 static void
9464 blob_shallow_copy(void)
9465 {
9466 	struct spdk_blob_store *bs = g_bs;
9467 	struct spdk_blob_opts blob_opts;
9468 	struct spdk_blob *blob;
9469 	spdk_blob_id blobid;
9470 	uint64_t num_clusters = 4;
9471 	struct spdk_bs_dev *ext_dev;
9472 	struct spdk_bs_dev_cb_args ext_args;
9473 	struct spdk_io_channel *bdev_ch, *blob_ch;
9474 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
9475 	uint8_t buf2[DEV_BUFFER_BLOCKLEN];
9476 	uint64_t io_units_per_cluster;
9477 	uint64_t offset;
9478 	int rc;
9479 
9480 	blob_ch = spdk_bs_alloc_io_channel(bs);
9481 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
9482 
9483 	/* Set blob dimension and as thin provisioned */
9484 	ut_spdk_blob_opts_init(&blob_opts);
9485 	blob_opts.thin_provision = true;
9486 	blob_opts.num_clusters = num_clusters;
9487 
9488 	/* Create a blob */
9489 	blob = ut_blob_create_and_open(bs, &blob_opts);
9490 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9491 	blobid = spdk_blob_get_id(blob);
9492 	io_units_per_cluster = bs_io_units_per_cluster(blob);
9493 
9494 	/* Write on cluster 2 and 4 of blob */
9495 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9496 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9497 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9498 		poll_threads();
9499 		CU_ASSERT(g_bserrno == 0);
9500 	}
9501 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9502 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9503 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9504 		poll_threads();
9505 		CU_ASSERT(g_bserrno == 0);
9506 	}
9507 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9508 
9509 	/* Make a snapshot over blob */
9510 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9511 	poll_threads();
9512 	CU_ASSERT(g_bserrno == 0);
9513 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
9514 
9515 	/* Write on cluster 1 and 3 of blob */
9516 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9517 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9518 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9519 		poll_threads();
9520 		CU_ASSERT(g_bserrno == 0);
9521 	}
9522 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9523 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9524 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9525 		poll_threads();
9526 		CU_ASSERT(g_bserrno == 0);
9527 	}
9528 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9529 
9530 	/* Shallow copy with a not read only blob */
9531 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9532 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9533 				       blob_shallow_copy_status_cb, NULL,
9534 				       blob_op_complete, NULL);
9535 	CU_ASSERT(rc == 0);
9536 	poll_threads();
9537 	CU_ASSERT(g_bserrno == -EPERM);
9538 	ext_dev->destroy(ext_dev);
9539 
9540 	/* Set blob read only */
9541 	spdk_blob_set_read_only(blob);
9542 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
9543 	poll_threads();
9544 	CU_ASSERT(g_bserrno == 0);
9545 
9546 	/* Shallow copy over a spdk_bs_dev with incorrect size */
9547 	ext_dev = init_ext_dev(1, DEV_BUFFER_BLOCKLEN);
9548 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9549 				       blob_shallow_copy_status_cb, NULL,
9550 				       blob_op_complete, NULL);
9551 	CU_ASSERT(rc == 0);
9552 	poll_threads();
9553 	CU_ASSERT(g_bserrno == -EINVAL);
9554 	ext_dev->destroy(ext_dev);
9555 
9556 	/* Shallow copy over a spdk_bs_dev with incorrect block len */
9557 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN * 2);
9558 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9559 				       blob_shallow_copy_status_cb, NULL,
9560 				       blob_op_complete, NULL);
9561 	CU_ASSERT(rc == 0);
9562 	poll_threads();
9563 	CU_ASSERT(g_bserrno == -EINVAL);
9564 	ext_dev->destroy(ext_dev);
9565 
9566 	/* Initialize ext_dev for the successuful shallow copy */
9567 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9568 	bdev_ch = ext_dev->create_channel(ext_dev);
9569 	SPDK_CU_ASSERT_FATAL(bdev_ch != NULL);
9570 	ext_args.cb_fn = bs_dev_io_complete_cb;
9571 	for (offset = 0; offset < 4 * io_units_per_cluster; offset++) {
9572 		memset(buf2, 0xff, DEV_BUFFER_BLOCKLEN);
9573 		ext_dev->write(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9574 		poll_threads();
9575 		CU_ASSERT(g_bserrno == 0);
9576 	}
9577 
9578 	/* Correct shallow copy of blob over bdev */
9579 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9580 				       blob_shallow_copy_status_cb, NULL,
9581 				       blob_op_complete, NULL);
9582 	CU_ASSERT(rc == 0);
9583 	poll_thread_times(0, 1);
9584 	CU_ASSERT(g_copied_clusters_count == 1);
9585 	poll_thread_times(0, 2);
9586 	CU_ASSERT(g_bserrno == 0);
9587 	CU_ASSERT(g_copied_clusters_count == 2);
9588 
9589 	/* Read from bdev */
9590 	/* Only cluster 1 and 3 must be filled */
9591 	/* Clusters 2 and 4 should not have been touched */
9592 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9593 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9594 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9595 		poll_threads();
9596 		CU_ASSERT(g_bserrno == 0);
9597 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9598 	}
9599 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9600 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9601 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9602 		poll_threads();
9603 		CU_ASSERT(g_bserrno == 0);
9604 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9605 	}
9606 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9607 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9608 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9609 		poll_threads();
9610 		CU_ASSERT(g_bserrno == 0);
9611 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9612 	}
9613 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9614 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9615 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9616 		poll_threads();
9617 		CU_ASSERT(g_bserrno == 0);
9618 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9619 	}
9620 
9621 	/* Clean up */
9622 	ext_dev->destroy_channel(ext_dev, bdev_ch);
9623 	ext_dev->destroy(ext_dev);
9624 	spdk_bs_free_io_channel(blob_ch);
9625 	ut_blob_close_and_delete(bs, blob);
9626 	poll_threads();
9627 }
9628 
9629 static void
9630 blob_set_parent(void)
9631 {
9632 	struct spdk_blob_store *bs = g_bs;
9633 	struct spdk_blob_opts opts;
9634 	struct ut_esnap_opts esnap_opts;
9635 	struct spdk_blob *blob1, *blob2, *blob3, *blob4, *blob5;
9636 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, blobid5,
9637 		     snapshotid1, snapshotid2, snapshotid3;
9638 	uint32_t cluster_sz, block_sz;
9639 	const uint32_t esnap_num_clusters = 4;
9640 	uint64_t esnap_num_blocks;
9641 	spdk_blob_id ids[2];
9642 	size_t clone_count = 2;
9643 
9644 	cluster_sz = spdk_bs_get_cluster_size(bs);
9645 	block_sz = spdk_bs_get_io_unit_size(bs);
9646 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9647 
9648 	/* Create a normal blob and make a couple of snapshots */
9649 	ut_spdk_blob_opts_init(&opts);
9650 	blob1 = ut_blob_create_and_open(bs, &opts);
9651 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9652 	blobid1 = spdk_blob_get_id(blob1);
9653 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9654 	poll_threads();
9655 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9656 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9657 	snapshotid1 = g_blobid;
9658 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9659 	poll_threads();
9660 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9661 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9662 	snapshotid2 = g_blobid;
9663 
9664 	/* Call set_parent with an invalid snapshotid */
9665 	spdk_bs_blob_set_parent(bs, blobid1, SPDK_BLOBID_INVALID, blob_op_complete, NULL);
9666 	poll_threads();
9667 	CU_ASSERT(g_bserrno == -EINVAL);
9668 
9669 	/* Call set_parent with blobid and snapshotid the same */
9670 	spdk_bs_blob_set_parent(bs, blobid1, blobid1, blob_op_complete, NULL);
9671 	poll_threads();
9672 	CU_ASSERT(g_bserrno == -EINVAL);
9673 
9674 	/* Call set_parent with a blob and its parent snapshot */
9675 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid2, blob_op_complete, NULL);
9676 	poll_threads();
9677 	CU_ASSERT(g_bserrno == -EEXIST);
9678 
9679 	/* Create an esnap clone blob */
9680 	ut_spdk_blob_opts_init(&opts);
9681 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9682 	opts.esnap_id = &esnap_opts;
9683 	opts.esnap_id_len = sizeof(esnap_opts);
9684 	opts.num_clusters = esnap_num_clusters;
9685 	blob2 = ut_blob_create_and_open(bs, &opts);
9686 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9687 	blobid2 = spdk_blob_get_id(blob2);
9688 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9689 
9690 	/* Call set_parent with a non snapshot parent */
9691 	spdk_bs_blob_set_parent(bs, blobid2, blobid1, blob_op_complete, NULL);
9692 	poll_threads();
9693 	CU_ASSERT(g_bserrno == -EINVAL);
9694 
9695 	/* Call set_parent with blob and snapshot of different size */
9696 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid1, blob_op_complete, NULL);
9697 	poll_threads();
9698 	CU_ASSERT(g_bserrno == -EINVAL);
9699 
9700 	/* Call set_parent correctly with a snapshot's clone blob */
9701 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid1, blob_op_complete, NULL);
9702 	poll_threads();
9703 	CU_ASSERT(g_bserrno == 0);
9704 
9705 	/* Check relations */
9706 	CU_ASSERT(spdk_blob_is_clone(blob1));
9707 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid1) == snapshotid1);
9708 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid1, ids, &clone_count) == 0);
9709 	CU_ASSERT(clone_count == 2);
9710 	CU_ASSERT(ids[1] == blobid1);
9711 
9712 	/* Create another normal blob with size equal to esnap size and make a snapshot */
9713 	ut_spdk_blob_opts_init(&opts);
9714 	opts.num_clusters = esnap_num_clusters;
9715 	opts.thin_provision = true;
9716 	blob3 = ut_blob_create_and_open(bs, &opts);
9717 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9718 	blobid3 = spdk_blob_get_id(blob3);
9719 	spdk_bs_create_snapshot(bs, blobid3, NULL, blob_op_with_id_complete, NULL);
9720 	poll_threads();
9721 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9722 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9723 	snapshotid3 = g_blobid;
9724 
9725 	/* Call set_parent correctly with an esnap's clone blob */
9726 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid3, blob_op_complete, NULL);
9727 	poll_threads();
9728 	CU_ASSERT(g_bserrno == 0);
9729 
9730 	/* Check relations */
9731 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob2));
9732 	CU_ASSERT(spdk_blob_is_clone(blob2));
9733 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid2) == snapshotid3);
9734 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid3, ids, &clone_count) == 0);
9735 	CU_ASSERT(clone_count == 2);
9736 	CU_ASSERT(ids[1] == blobid2);
9737 
9738 	/* Create a not thin-provisioned blob that is not a clone */
9739 	ut_spdk_blob_opts_init(&opts);
9740 	opts.thin_provision = false;
9741 	blob4 = ut_blob_create_and_open(bs, &opts);
9742 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9743 	blobid4 = spdk_blob_get_id(blob4);
9744 
9745 	/* Call set_parent with a blob that isn't a clone and that isn't thin-provisioned */
9746 	spdk_bs_blob_set_parent(bs, blobid4, snapshotid2, blob_op_complete, NULL);
9747 	poll_threads();
9748 	CU_ASSERT(g_bserrno == -EINVAL);
9749 
9750 	/* Create a thin-provisioned blob that is not a clone */
9751 	ut_spdk_blob_opts_init(&opts);
9752 	opts.thin_provision = true;
9753 	blob5 = ut_blob_create_and_open(bs, &opts);
9754 	SPDK_CU_ASSERT_FATAL(blob5 != NULL);
9755 	blobid5 = spdk_blob_get_id(blob5);
9756 
9757 	/* Call set_parent correctly with a blob that isn't a clone */
9758 	spdk_bs_blob_set_parent(bs, blobid5, snapshotid2, blob_op_complete, NULL);
9759 	poll_threads();
9760 	CU_ASSERT(g_bserrno == 0);
9761 
9762 	/* Check relations */
9763 	CU_ASSERT(spdk_blob_is_clone(blob5));
9764 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid5) == snapshotid2);
9765 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &clone_count) == 0);
9766 	CU_ASSERT(clone_count == 1);
9767 	CU_ASSERT(ids[0] == blobid5);
9768 
9769 	/* Clean up */
9770 	ut_blob_close_and_delete(bs, blob5);
9771 	ut_blob_close_and_delete(bs, blob4);
9772 	ut_blob_close_and_delete(bs, blob3);
9773 	ut_blob_close_and_delete(bs, blob2);
9774 	ut_blob_close_and_delete(bs, blob1);
9775 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
9776 	poll_threads();
9777 	CU_ASSERT(g_bserrno == 0);
9778 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
9779 	poll_threads();
9780 	CU_ASSERT(g_bserrno == 0);
9781 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
9782 	poll_threads();
9783 	CU_ASSERT(g_bserrno == 0);
9784 }
9785 
9786 static void
9787 blob_set_external_parent(void)
9788 {
9789 	struct spdk_blob_store *bs = g_bs;
9790 	struct spdk_blob_opts opts;
9791 	struct ut_esnap_opts esnap_opts, esnap_opts2;
9792 	struct spdk_blob *blob1, *blob2, *blob3, *blob4;
9793 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, snapshotid;
9794 	uint32_t cluster_sz, block_sz;
9795 	const uint32_t esnap_num_clusters = 4;
9796 	uint64_t esnap_num_blocks;
9797 	struct spdk_bs_dev *esnap_dev1, *esnap_dev2, *esnap_dev3;
9798 	const void *esnap_id;
9799 	size_t esnap_id_len;
9800 	int rc;
9801 
9802 	cluster_sz = spdk_bs_get_cluster_size(bs);
9803 	block_sz = spdk_bs_get_io_unit_size(bs);
9804 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9805 	esnap_dev1 = init_dev();
9806 	esnap_dev2 = init_dev();
9807 	esnap_dev3 = init_dev();
9808 
9809 	/* Create an esnap clone blob */
9810 	ut_spdk_blob_opts_init(&opts);
9811 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9812 	opts.esnap_id = &esnap_opts;
9813 	opts.esnap_id_len = sizeof(esnap_opts);
9814 	opts.num_clusters = esnap_num_clusters;
9815 	blob1 = ut_blob_create_and_open(bs, &opts);
9816 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9817 	blobid1 = spdk_blob_get_id(blob1);
9818 	CU_ASSERT(spdk_blob_is_esnap_clone(blob1));
9819 
9820 	/* Call set_esternal_parent with blobid and esnapid the same */
9821 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, &blobid1, sizeof(blobid1),
9822 					 blob_op_complete, NULL);
9823 	CU_ASSERT(g_bserrno == -EINVAL);
9824 
9825 	/* Call set_external_parent with esnap of incompatible size */
9826 	esnap_dev1->blockcnt = esnap_num_blocks - 1;
9827 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9828 					 blob_op_complete, NULL);
9829 	CU_ASSERT(g_bserrno == -EINVAL);
9830 
9831 	/* Call set_external_parent with a blob and its parent esnap */
9832 	esnap_dev1->blocklen = block_sz;
9833 	esnap_dev1->blockcnt = esnap_num_blocks;
9834 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9835 					 blob_op_complete, NULL);
9836 	poll_threads();
9837 	CU_ASSERT(g_bserrno == -EEXIST);
9838 
9839 	/* Create a blob that is a clone of a snapshots */
9840 	ut_spdk_blob_opts_init(&opts);
9841 	blob2 = ut_blob_create_and_open(bs, &opts);
9842 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9843 	blobid2 = spdk_blob_get_id(blob2);
9844 	spdk_bs_create_snapshot(bs, blobid2, NULL, blob_op_with_id_complete, NULL);
9845 	poll_threads();
9846 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9847 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9848 	snapshotid = g_blobid;
9849 
9850 	/* Call set_parent correctly with a snapshot's clone blob */
9851 	esnap_dev2->blocklen = block_sz;
9852 	esnap_dev2->blockcnt = esnap_num_blocks;
9853 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts2);
9854 	spdk_bs_blob_set_external_parent(bs, blobid2, esnap_dev2, &esnap_opts2, sizeof(esnap_opts2),
9855 					 blob_op_complete, NULL);
9856 	poll_threads();
9857 	CU_ASSERT(g_bserrno == 0);
9858 
9859 	/* Check relations */
9860 	rc = spdk_blob_get_esnap_id(blob2, &esnap_id, &esnap_id_len);
9861 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9862 	CU_ASSERT(!spdk_blob_is_clone(blob2));
9863 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts2) &&
9864 		  memcmp(esnap_id, &esnap_opts2, esnap_id_len) == 0);
9865 	CU_ASSERT(blob2->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9866 
9867 	/* Create a not thin-provisioned blob that is not a clone */
9868 	ut_spdk_blob_opts_init(&opts);
9869 	opts.thin_provision = false;
9870 	blob3 = ut_blob_create_and_open(bs, &opts);
9871 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9872 	blobid3 = spdk_blob_get_id(blob3);
9873 
9874 	/* Call set_external_parent with a blob that isn't a clone and that isn't thin-provisioned */
9875 	spdk_bs_blob_set_external_parent(bs, blobid3, esnap_dev1, &esnap_opts, sizeof(esnap_opts),
9876 					 blob_op_complete, NULL);
9877 	poll_threads();
9878 	CU_ASSERT(g_bserrno == -EINVAL);
9879 
9880 	/* Create a thin-provisioned blob that is not a clone */
9881 	ut_spdk_blob_opts_init(&opts);
9882 	opts.thin_provision = true;
9883 	blob4 = ut_blob_create_and_open(bs, &opts);
9884 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9885 	blobid4 = spdk_blob_get_id(blob4);
9886 
9887 	/* Call set_external_parent correctly with a blob that isn't a clone */
9888 	esnap_dev3->blocklen = block_sz;
9889 	esnap_dev3->blockcnt = esnap_num_blocks;
9890 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9891 	spdk_bs_blob_set_external_parent(bs, blobid4, esnap_dev3, &esnap_opts, sizeof(esnap_opts),
9892 					 blob_op_complete, NULL);
9893 	poll_threads();
9894 	CU_ASSERT(g_bserrno == 0);
9895 
9896 	/* Check relations */
9897 	rc = spdk_blob_get_esnap_id(blob4, &esnap_id, &esnap_id_len);
9898 	CU_ASSERT(spdk_blob_is_esnap_clone(blob4));
9899 	CU_ASSERT(!spdk_blob_is_clone(blob4));
9900 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts) &&
9901 		  memcmp(esnap_id, &esnap_opts, esnap_id_len) == 0);
9902 	CU_ASSERT(blob4->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9903 
9904 	ut_blob_close_and_delete(bs, blob4);
9905 	ut_blob_close_and_delete(bs, blob3);
9906 	ut_blob_close_and_delete(bs, blob2);
9907 	ut_blob_close_and_delete(bs, blob1);
9908 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
9909 	dev_destroy(esnap_dev1);
9910 	poll_threads();
9911 	CU_ASSERT(g_bserrno == 0);
9912 }
9913 
9914 static void
9915 suite_bs_setup(void)
9916 {
9917 	struct spdk_bs_dev *dev;
9918 
9919 	dev = init_dev();
9920 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9921 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
9922 	poll_threads();
9923 	CU_ASSERT(g_bserrno == 0);
9924 	CU_ASSERT(g_bs != NULL);
9925 }
9926 
9927 static void
9928 suite_esnap_bs_setup(void)
9929 {
9930 	struct spdk_bs_dev	*dev;
9931 	struct spdk_bs_opts	bs_opts;
9932 
9933 	dev = init_dev();
9934 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9935 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
9936 	bs_opts.cluster_sz = 4 * g_phys_blocklen;
9937 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
9938 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
9939 	poll_threads();
9940 	CU_ASSERT(g_bserrno == 0);
9941 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9942 }
9943 
9944 static void
9945 suite_bs_cleanup(void)
9946 {
9947 	if (g_bs != NULL) {
9948 		spdk_bs_unload(g_bs, bs_op_complete, NULL);
9949 		poll_threads();
9950 		CU_ASSERT(g_bserrno == 0);
9951 		g_bs = NULL;
9952 	}
9953 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9954 }
9955 
9956 static struct spdk_blob *
9957 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
9958 {
9959 	struct spdk_blob *blob;
9960 	struct spdk_blob_opts create_blob_opts;
9961 	spdk_blob_id blobid;
9962 
9963 	if (blob_opts == NULL) {
9964 		ut_spdk_blob_opts_init(&create_blob_opts);
9965 		blob_opts = &create_blob_opts;
9966 	}
9967 
9968 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
9969 	poll_threads();
9970 	CU_ASSERT(g_bserrno == 0);
9971 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9972 	blobid = g_blobid;
9973 	g_blobid = -1;
9974 
9975 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9976 	poll_threads();
9977 	CU_ASSERT(g_bserrno == 0);
9978 	CU_ASSERT(g_blob != NULL);
9979 	blob = g_blob;
9980 
9981 	g_blob = NULL;
9982 	g_bserrno = -1;
9983 
9984 	return blob;
9985 }
9986 
9987 static void
9988 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
9989 {
9990 	spdk_blob_id blobid = spdk_blob_get_id(blob);
9991 
9992 	spdk_blob_close(blob, blob_op_complete, NULL);
9993 	poll_threads();
9994 	CU_ASSERT(g_bserrno == 0);
9995 	g_blob = NULL;
9996 
9997 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
9998 	poll_threads();
9999 	CU_ASSERT(g_bserrno == 0);
10000 	g_bserrno = -1;
10001 }
10002 
10003 static void
10004 suite_blob_setup(void)
10005 {
10006 	suite_bs_setup();
10007 	CU_ASSERT(g_bs != NULL);
10008 
10009 	g_blob = ut_blob_create_and_open(g_bs, NULL);
10010 	CU_ASSERT(g_blob != NULL);
10011 }
10012 
10013 static void
10014 suite_blob_cleanup(void)
10015 {
10016 	ut_blob_close_and_delete(g_bs, g_blob);
10017 	CU_ASSERT(g_blob == NULL);
10018 
10019 	suite_bs_cleanup();
10020 	CU_ASSERT(g_bs == NULL);
10021 }
10022 
10023 static int
10024 ut_setup_config_nocopy_noextent(void)
10025 {
10026 	g_dev_copy_enabled = false;
10027 	g_use_extent_table = false;
10028 	g_phys_blocklen = 4096;
10029 
10030 	return 0;
10031 }
10032 
10033 static int
10034 ut_setup_config_nocopy_extent(void)
10035 {
10036 	g_dev_copy_enabled = false;
10037 	g_use_extent_table = true;
10038 	g_phys_blocklen = 4096;
10039 
10040 	return 0;
10041 }
10042 
10043 static int
10044 ut_setup_config_nocopy_extent_16k_phys(void)
10045 {
10046 	g_dev_copy_enabled = false;
10047 	g_use_extent_table = true;
10048 	g_phys_blocklen = 16384;
10049 
10050 	return 0;
10051 }
10052 
10053 
10054 static int
10055 ut_setup_config_copy_noextent(void)
10056 {
10057 	g_dev_copy_enabled = true;
10058 	g_use_extent_table = false;
10059 	g_phys_blocklen = 4096;
10060 
10061 	return 0;
10062 }
10063 
10064 static int
10065 ut_setup_config_copy_extent(void)
10066 {
10067 	g_dev_copy_enabled = true;
10068 	g_use_extent_table = true;
10069 	g_phys_blocklen = 4096;
10070 
10071 	return 0;
10072 }
10073 
10074 struct ut_config {
10075 	const char *suffix;
10076 	CU_InitializeFunc setup_cb;
10077 };
10078 
10079 int
10080 main(int argc, char **argv)
10081 {
10082 	CU_pSuite		suite, suite_bs, suite_blob, suite_esnap_bs;
10083 	unsigned int		i, num_failures;
10084 	char			suite_name[4096];
10085 	struct ut_config	*config;
10086 	struct ut_config	configs[] = {
10087 		{"nocopy_noextent", ut_setup_config_nocopy_noextent},
10088 		{"nocopy_extent", ut_setup_config_nocopy_extent},
10089 		{"nocopy_extent_16k_phys", ut_setup_config_nocopy_extent_16k_phys},
10090 		{"copy_noextent", ut_setup_config_copy_noextent},
10091 		{"copy_extent", ut_setup_config_copy_extent},
10092 	};
10093 
10094 	CU_initialize_registry();
10095 
10096 	for (i = 0; i < SPDK_COUNTOF(configs); ++i) {
10097 		config = &configs[i];
10098 
10099 		snprintf(suite_name, sizeof(suite_name), "blob_%s", config->suffix);
10100 		suite = CU_add_suite(suite_name, config->setup_cb, NULL);
10101 
10102 		snprintf(suite_name, sizeof(suite_name), "blob_bs_%s", config->suffix);
10103 		suite_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10104 				suite_bs_setup, suite_bs_cleanup);
10105 
10106 		snprintf(suite_name, sizeof(suite_name), "blob_blob_%s", config->suffix);
10107 		suite_blob = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10108 				suite_blob_setup, suite_blob_cleanup);
10109 
10110 		snprintf(suite_name, sizeof(suite_name), "blob_esnap_bs_%s", config->suffix);
10111 		suite_esnap_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10112 				 suite_esnap_bs_setup,
10113 				 suite_bs_cleanup);
10114 
10115 		CU_ADD_TEST(suite, blob_init);
10116 		CU_ADD_TEST(suite_bs, blob_open);
10117 		CU_ADD_TEST(suite_bs, blob_create);
10118 		CU_ADD_TEST(suite_bs, blob_create_loop);
10119 		CU_ADD_TEST(suite_bs, blob_create_fail);
10120 		CU_ADD_TEST(suite_bs, blob_create_internal);
10121 		CU_ADD_TEST(suite_bs, blob_create_zero_extent);
10122 		CU_ADD_TEST(suite, blob_thin_provision);
10123 		CU_ADD_TEST(suite_bs, blob_snapshot);
10124 		CU_ADD_TEST(suite_bs, blob_clone);
10125 		CU_ADD_TEST(suite_bs, blob_inflate);
10126 		CU_ADD_TEST(suite_bs, blob_delete);
10127 		CU_ADD_TEST(suite_bs, blob_resize_test);
10128 		CU_ADD_TEST(suite_bs, blob_resize_thin_test);
10129 		CU_ADD_TEST(suite, blob_read_only);
10130 		CU_ADD_TEST(suite_bs, channel_ops);
10131 		CU_ADD_TEST(suite_bs, blob_super);
10132 		CU_ADD_TEST(suite_blob, blob_write);
10133 		CU_ADD_TEST(suite_blob, blob_read);
10134 		CU_ADD_TEST(suite_blob, blob_rw_verify);
10135 		CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
10136 		CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
10137 		CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
10138 		CU_ADD_TEST(suite_bs, blob_unmap);
10139 		CU_ADD_TEST(suite_bs, blob_iter);
10140 		CU_ADD_TEST(suite_blob, blob_xattr);
10141 		CU_ADD_TEST(suite_bs, blob_parse_md);
10142 		CU_ADD_TEST(suite, bs_load);
10143 		CU_ADD_TEST(suite_bs, bs_load_pending_removal);
10144 		CU_ADD_TEST(suite, bs_load_custom_cluster_size);
10145 		CU_ADD_TEST(suite, bs_load_after_failed_grow);
10146 		CU_ADD_TEST(suite_bs, bs_unload);
10147 		CU_ADD_TEST(suite, bs_cluster_sz);
10148 		CU_ADD_TEST(suite_bs, bs_usable_clusters);
10149 		CU_ADD_TEST(suite, bs_resize_md);
10150 		CU_ADD_TEST(suite, bs_destroy);
10151 		CU_ADD_TEST(suite, bs_type);
10152 		CU_ADD_TEST(suite, bs_super_block);
10153 		CU_ADD_TEST(suite, bs_test_recover_cluster_count);
10154 		CU_ADD_TEST(suite, bs_grow_live);
10155 		CU_ADD_TEST(suite, bs_grow_live_no_space);
10156 		CU_ADD_TEST(suite, bs_test_grow);
10157 		CU_ADD_TEST(suite, blob_serialize_test);
10158 		CU_ADD_TEST(suite_bs, blob_crc);
10159 		CU_ADD_TEST(suite, super_block_crc);
10160 		CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
10161 		CU_ADD_TEST(suite_bs, blob_flags);
10162 		CU_ADD_TEST(suite_bs, bs_version);
10163 		CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
10164 		CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
10165 		CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
10166 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
10167 		CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
10168 		CU_ADD_TEST(suite, blob_thin_prov_unmap_cluster);
10169 		CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
10170 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
10171 		CU_ADD_TEST(suite, bs_load_iter_test);
10172 		CU_ADD_TEST(suite_bs, blob_snapshot_rw);
10173 		CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
10174 		CU_ADD_TEST(suite, blob_relations);
10175 		CU_ADD_TEST(suite, blob_relations2);
10176 		CU_ADD_TEST(suite, blob_relations3);
10177 		CU_ADD_TEST(suite, blobstore_clean_power_failure);
10178 		CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
10179 		CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
10180 		CU_ADD_TEST(suite_bs, blob_inflate_rw);
10181 		CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
10182 		CU_ADD_TEST(suite_bs, blob_operation_split_rw);
10183 		CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
10184 		CU_ADD_TEST(suite, blob_io_unit);
10185 		CU_ADD_TEST(suite, blob_io_unit_compatibility);
10186 		CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
10187 		CU_ADD_TEST(suite_bs, blob_persist_test);
10188 		CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
10189 		CU_ADD_TEST(suite_bs, blob_seek_io_unit);
10190 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
10191 		CU_ADD_TEST(suite_bs, blob_nested_freezes);
10192 		CU_ADD_TEST(suite, blob_ext_md_pages);
10193 		CU_ADD_TEST(suite, blob_esnap_io_4096_4096);
10194 		CU_ADD_TEST(suite, blob_esnap_io_512_512);
10195 		CU_ADD_TEST(suite, blob_esnap_io_4096_512);
10196 		CU_ADD_TEST(suite, blob_esnap_io_512_4096);
10197 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_thread_add_remove);
10198 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_snapshot);
10199 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_inflate);
10200 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_decouple);
10201 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_reload);
10202 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_hotplug);
10203 		CU_ADD_TEST(suite_blob, blob_is_degraded);
10204 		CU_ADD_TEST(suite_bs, blob_clone_resize);
10205 		CU_ADD_TEST(suite, blob_esnap_clone_resize);
10206 		CU_ADD_TEST(suite_bs, blob_shallow_copy);
10207 		CU_ADD_TEST(suite_esnap_bs, blob_set_parent);
10208 		CU_ADD_TEST(suite_esnap_bs, blob_set_external_parent);
10209 	}
10210 
10211 	allocate_threads(2);
10212 	set_thread(0);
10213 
10214 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
10215 
10216 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
10217 
10218 	free(g_dev_buffer);
10219 
10220 	free_threads();
10221 
10222 	return num_failures;
10223 }
10224