xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 95d6c9fac17572b107042103439aafd696d60b0e)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "thread/thread.c"
16 #include "ext_dev.c"
17 #include "blob/blobstore.c"
18 #include "blob/request.c"
19 #include "blob/zeroes.c"
20 #include "blob/blob_bs_dev.c"
21 #include "esnap_dev.c"
22 #define BLOCKLEN DEV_BUFFER_BLOCKLEN
23 
24 struct spdk_blob_store *g_bs;
25 spdk_blob_id g_blobid;
26 struct spdk_blob *g_blob, *g_blob2;
27 int g_bserrno, g_bserrno2;
28 struct spdk_xattr_names *g_names;
29 int g_done;
30 char *g_xattr_names[] = {"first", "second", "third"};
31 char *g_xattr_values[] = {"one", "two", "three"};
32 uint64_t g_ctx = 1729;
33 bool g_use_extent_table = false;
34 uint64_t g_copied_clusters_count = 0;
35 
36 struct spdk_bs_super_block_ver1 {
37 	uint8_t		signature[8];
38 	uint32_t        version;
39 	uint32_t        length;
40 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
41 	spdk_blob_id	super_blob;
42 
43 	uint32_t	cluster_size; /* In bytes */
44 
45 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
46 	uint32_t	used_page_mask_len; /* Count, in pages */
47 
48 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
49 	uint32_t	used_cluster_mask_len; /* Count, in pages */
50 
51 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
52 	uint32_t	md_len; /* Count, in pages */
53 
54 	uint8_t		reserved[4036];
55 	uint32_t	crc;
56 } __attribute__((packed));
57 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
58 
59 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
60 		struct spdk_blob_opts *blob_opts);
61 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
62 static void suite_blob_setup(void);
63 static void suite_blob_cleanup(void);
64 
65 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
66 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
67 		void *cpl_cb_arg), 0);
68 
69 static bool
70 is_esnap_clone(struct spdk_blob *_blob, const void *id, size_t id_len)
71 {
72 	const void *val = NULL;
73 	size_t len = 0;
74 	bool c0, c1, c2, c3;
75 
76 	CU_ASSERT(blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
77 				       true) == 0);
78 	CU_ASSERT((c0 = (len == id_len)));
79 	CU_ASSERT((c1 = (val != NULL && memcmp(val, id, len) == 0)));
80 	CU_ASSERT((c2 = !!(_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT)));
81 	CU_ASSERT((c3 = (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
82 
83 	return c0 && c1 && c2 && c3;
84 }
85 
86 static bool
87 is_not_esnap_clone(struct spdk_blob *_blob)
88 {
89 	const void *val = NULL;
90 	size_t len = 0;
91 	bool c1, c2, c3, c4;
92 
93 	CU_ASSERT((c1 = (blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
94 					      true) == -ENOENT)));
95 	CU_ASSERT((c2 = (val == NULL)));
96 	CU_ASSERT((c3 = ((_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT) == 0)));
97 	CU_ASSERT((c4 = (_blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
98 
99 	return c1 && c2 && c3 && c4;
100 }
101 
102 #define UT_ASSERT_IS_ESNAP_CLONE(_blob, _id, _len) CU_ASSERT(is_esnap_clone(_blob, _id, _len))
103 #define UT_ASSERT_IS_NOT_ESNAP_CLONE(_blob) CU_ASSERT(is_not_esnap_clone(_blob))
104 
105 static void
106 _get_xattr_value(void *arg, const char *name,
107 		 const void **value, size_t *value_len)
108 {
109 	uint64_t i;
110 
111 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
112 	SPDK_CU_ASSERT_FATAL(value != NULL);
113 	CU_ASSERT(arg == &g_ctx);
114 
115 	for (i = 0; i < sizeof(g_xattr_names); i++) {
116 		if (!strcmp(name, g_xattr_names[i])) {
117 			*value_len = strlen(g_xattr_values[i]);
118 			*value = g_xattr_values[i];
119 			break;
120 		}
121 	}
122 }
123 
124 static void
125 _get_xattr_value_null(void *arg, const char *name,
126 		      const void **value, size_t *value_len)
127 {
128 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
129 	SPDK_CU_ASSERT_FATAL(value != NULL);
130 	CU_ASSERT(arg == NULL);
131 
132 	*value_len = 0;
133 	*value = NULL;
134 }
135 
136 static int
137 _get_snapshots_count(struct spdk_blob_store *bs)
138 {
139 	struct spdk_blob_list *snapshot = NULL;
140 	int count = 0;
141 
142 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
143 		count += 1;
144 	}
145 
146 	return count;
147 }
148 
149 static void
150 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
151 {
152 	spdk_blob_opts_init(opts, sizeof(*opts));
153 	opts->use_extent_table = g_use_extent_table;
154 }
155 
156 static void
157 bs_op_complete(void *cb_arg, int bserrno)
158 {
159 	g_bserrno = bserrno;
160 }
161 
162 static void
163 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
164 			   int bserrno)
165 {
166 	g_bs = bs;
167 	g_bserrno = bserrno;
168 }
169 
170 static void
171 blob_op_complete(void *cb_arg, int bserrno)
172 {
173 	if (cb_arg != NULL) {
174 		int *errp = cb_arg;
175 
176 		*errp = bserrno;
177 	}
178 	g_bserrno = bserrno;
179 }
180 
181 static void
182 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
183 {
184 	g_blobid = blobid;
185 	g_bserrno = bserrno;
186 }
187 
188 static void
189 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
190 {
191 	g_blob = blb;
192 	g_bserrno = bserrno;
193 }
194 
195 static void
196 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
197 {
198 	if (g_blob == NULL) {
199 		g_blob = blob;
200 		g_bserrno = bserrno;
201 	} else {
202 		g_blob2 = blob;
203 		g_bserrno2 = bserrno;
204 	}
205 }
206 
207 static void
208 blob_shallow_copy_status_cb(uint64_t copied_clusters, void *cb_arg)
209 {
210 	g_copied_clusters_count = copied_clusters;
211 }
212 
213 static void
214 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
215 {
216 	struct spdk_bs_dev *dev;
217 
218 	/* Unload the blob store */
219 	spdk_bs_unload(*bs, bs_op_complete, NULL);
220 	poll_threads();
221 	CU_ASSERT(g_bserrno == 0);
222 
223 	dev = init_dev();
224 	/* Load an existing blob store */
225 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
226 	poll_threads();
227 	CU_ASSERT(g_bserrno == 0);
228 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
229 	*bs = g_bs;
230 
231 	g_bserrno = -1;
232 }
233 
234 static void
235 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
236 {
237 	struct spdk_bs_dev *dev;
238 
239 	/* Dirty shutdown */
240 	bs_free(*bs);
241 
242 	dev = init_dev();
243 	/* Load an existing blob store */
244 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
245 	poll_threads();
246 	CU_ASSERT(g_bserrno == 0);
247 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
248 	*bs = g_bs;
249 
250 	g_bserrno = -1;
251 }
252 
253 static void
254 blob_init(void)
255 {
256 	struct spdk_blob_store *bs;
257 	struct spdk_bs_dev *dev;
258 
259 	dev = init_dev();
260 
261 	/* should fail for an unsupported blocklen */
262 	dev->blocklen = 500;
263 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
264 	poll_threads();
265 	CU_ASSERT(g_bserrno == -EINVAL);
266 
267 	dev = init_dev();
268 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
269 	poll_threads();
270 	CU_ASSERT(g_bserrno == 0);
271 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
272 	bs = g_bs;
273 
274 	spdk_bs_unload(bs, bs_op_complete, NULL);
275 	poll_threads();
276 	CU_ASSERT(g_bserrno == 0);
277 	g_bs = NULL;
278 }
279 
280 static void
281 blob_super(void)
282 {
283 	struct spdk_blob_store *bs = g_bs;
284 	spdk_blob_id blobid;
285 	struct spdk_blob_opts blob_opts;
286 
287 	/* Get the super blob without having set one */
288 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
289 	poll_threads();
290 	CU_ASSERT(g_bserrno == -ENOENT);
291 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
292 
293 	/* Create a blob */
294 	ut_spdk_blob_opts_init(&blob_opts);
295 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
296 	poll_threads();
297 	CU_ASSERT(g_bserrno == 0);
298 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
299 	blobid = g_blobid;
300 
301 	/* Set the blob as the super blob */
302 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
303 	poll_threads();
304 	CU_ASSERT(g_bserrno == 0);
305 
306 	/* Get the super blob */
307 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
308 	poll_threads();
309 	CU_ASSERT(g_bserrno == 0);
310 	CU_ASSERT(blobid == g_blobid);
311 }
312 
313 static void
314 blob_open(void)
315 {
316 	struct spdk_blob_store *bs = g_bs;
317 	struct spdk_blob *blob;
318 	struct spdk_blob_opts blob_opts;
319 	spdk_blob_id blobid, blobid2;
320 
321 	ut_spdk_blob_opts_init(&blob_opts);
322 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
323 	poll_threads();
324 	CU_ASSERT(g_bserrno == 0);
325 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
326 	blobid = g_blobid;
327 
328 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
329 	poll_threads();
330 	CU_ASSERT(g_bserrno == 0);
331 	CU_ASSERT(g_blob != NULL);
332 	blob = g_blob;
333 
334 	blobid2 = spdk_blob_get_id(blob);
335 	CU_ASSERT(blobid == blobid2);
336 
337 	/* Try to open file again.  It should return success. */
338 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
339 	poll_threads();
340 	CU_ASSERT(g_bserrno == 0);
341 	CU_ASSERT(blob == g_blob);
342 
343 	spdk_blob_close(blob, blob_op_complete, NULL);
344 	poll_threads();
345 	CU_ASSERT(g_bserrno == 0);
346 
347 	/*
348 	 * Close the file a second time, releasing the second reference.  This
349 	 *  should succeed.
350 	 */
351 	blob = g_blob;
352 	spdk_blob_close(blob, blob_op_complete, NULL);
353 	poll_threads();
354 	CU_ASSERT(g_bserrno == 0);
355 
356 	/*
357 	 * Try to open file again.  It should succeed.  This tests the case
358 	 *  where the file is opened, closed, then re-opened again.
359 	 */
360 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
361 	poll_threads();
362 	CU_ASSERT(g_bserrno == 0);
363 	CU_ASSERT(g_blob != NULL);
364 	blob = g_blob;
365 	spdk_blob_close(blob, blob_op_complete, NULL);
366 	poll_threads();
367 	CU_ASSERT(g_bserrno == 0);
368 
369 	/* Try to open file twice in succession.  This should return the same
370 	 * blob object.
371 	 */
372 	g_blob = NULL;
373 	g_blob2 = NULL;
374 	g_bserrno = -1;
375 	g_bserrno2 = -1;
376 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
377 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 	CU_ASSERT(g_bserrno2 == 0);
381 	CU_ASSERT(g_blob != NULL);
382 	CU_ASSERT(g_blob2 != NULL);
383 	CU_ASSERT(g_blob == g_blob2);
384 
385 	g_bserrno = -1;
386 	spdk_blob_close(g_blob, blob_op_complete, NULL);
387 	poll_threads();
388 	CU_ASSERT(g_bserrno == 0);
389 
390 	ut_blob_close_and_delete(bs, g_blob);
391 }
392 
393 static void
394 blob_create(void)
395 {
396 	struct spdk_blob_store *bs = g_bs;
397 	struct spdk_blob *blob;
398 	struct spdk_blob_opts opts;
399 	spdk_blob_id blobid;
400 
401 	/* Create blob with 10 clusters */
402 
403 	ut_spdk_blob_opts_init(&opts);
404 	opts.num_clusters = 10;
405 
406 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
407 	poll_threads();
408 	CU_ASSERT(g_bserrno == 0);
409 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
410 	blobid = g_blobid;
411 
412 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
413 	poll_threads();
414 	CU_ASSERT(g_bserrno == 0);
415 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
416 	blob = g_blob;
417 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
418 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
419 
420 	spdk_blob_close(blob, blob_op_complete, NULL);
421 	poll_threads();
422 	CU_ASSERT(g_bserrno == 0);
423 
424 	/* Create blob with 0 clusters */
425 
426 	ut_spdk_blob_opts_init(&opts);
427 	opts.num_clusters = 0;
428 
429 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
430 	poll_threads();
431 	CU_ASSERT(g_bserrno == 0);
432 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
433 	blobid = g_blobid;
434 
435 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
436 	poll_threads();
437 	CU_ASSERT(g_bserrno == 0);
438 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
439 	blob = g_blob;
440 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
441 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
442 
443 	spdk_blob_close(blob, blob_op_complete, NULL);
444 	poll_threads();
445 	CU_ASSERT(g_bserrno == 0);
446 
447 	/* Create blob with default options (opts == NULL) */
448 
449 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
450 	poll_threads();
451 	CU_ASSERT(g_bserrno == 0);
452 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
453 	blobid = g_blobid;
454 
455 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
456 	poll_threads();
457 	CU_ASSERT(g_bserrno == 0);
458 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
459 	blob = g_blob;
460 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
461 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
462 
463 	spdk_blob_close(blob, blob_op_complete, NULL);
464 	poll_threads();
465 	CU_ASSERT(g_bserrno == 0);
466 
467 	/* Try to create blob with size larger than blobstore */
468 
469 	ut_spdk_blob_opts_init(&opts);
470 	opts.num_clusters = bs->total_clusters + 1;
471 
472 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
473 	poll_threads();
474 	CU_ASSERT(g_bserrno == -ENOSPC);
475 }
476 
477 static void
478 blob_create_zero_extent(void)
479 {
480 	struct spdk_blob_store *bs = g_bs;
481 	struct spdk_blob *blob;
482 	spdk_blob_id blobid;
483 
484 	/* Create blob with default options (opts == NULL) */
485 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
486 	poll_threads();
487 	CU_ASSERT(g_bserrno == 0);
488 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
489 	blobid = g_blobid;
490 
491 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
492 	poll_threads();
493 	CU_ASSERT(g_bserrno == 0);
494 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
495 	blob = g_blob;
496 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
497 	CU_ASSERT(blob->extent_table_found == true);
498 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
499 	CU_ASSERT(blob->active.extent_pages == NULL);
500 
501 	spdk_blob_close(blob, blob_op_complete, NULL);
502 	poll_threads();
503 	CU_ASSERT(g_bserrno == 0);
504 
505 	/* Create blob with NULL internal options  */
506 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
507 	poll_threads();
508 	CU_ASSERT(g_bserrno == 0);
509 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
510 	blobid = g_blobid;
511 
512 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
513 	poll_threads();
514 	CU_ASSERT(g_bserrno == 0);
515 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
516 	blob = g_blob;
517 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
518 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
519 	CU_ASSERT(blob->extent_table_found == true);
520 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
521 	CU_ASSERT(blob->active.extent_pages == NULL);
522 
523 	spdk_blob_close(blob, blob_op_complete, NULL);
524 	poll_threads();
525 	CU_ASSERT(g_bserrno == 0);
526 }
527 
528 /*
529  * Create and delete one blob in a loop over and over again.  This helps ensure
530  * that the internal bit masks tracking used clusters and md_pages are being
531  * tracked correctly.
532  */
533 static void
534 blob_create_loop(void)
535 {
536 	struct spdk_blob_store *bs = g_bs;
537 	struct spdk_blob_opts opts;
538 	uint32_t i, loop_count;
539 
540 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
541 				  spdk_bit_pool_capacity(bs->used_clusters));
542 
543 	for (i = 0; i < loop_count; i++) {
544 		ut_spdk_blob_opts_init(&opts);
545 		opts.num_clusters = 1;
546 		g_bserrno = -1;
547 		g_blobid = SPDK_BLOBID_INVALID;
548 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
549 		poll_threads();
550 		CU_ASSERT(g_bserrno == 0);
551 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
552 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
553 		poll_threads();
554 		CU_ASSERT(g_bserrno == 0);
555 	}
556 }
557 
558 static void
559 blob_create_fail(void)
560 {
561 	struct spdk_blob_store *bs = g_bs;
562 	struct spdk_blob_opts opts;
563 	spdk_blob_id blobid;
564 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
565 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
566 
567 	/* NULL callback */
568 	ut_spdk_blob_opts_init(&opts);
569 	opts.xattrs.names = g_xattr_names;
570 	opts.xattrs.get_value = NULL;
571 	opts.xattrs.count = 1;
572 	opts.xattrs.ctx = &g_ctx;
573 
574 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
575 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
576 	poll_threads();
577 	CU_ASSERT(g_bserrno == -EINVAL);
578 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
579 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
580 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
581 
582 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
583 	poll_threads();
584 	CU_ASSERT(g_bserrno == -ENOENT);
585 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
586 
587 	ut_bs_reload(&bs, NULL);
588 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
589 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
590 
591 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
592 	poll_threads();
593 	CU_ASSERT(g_blob == NULL);
594 	CU_ASSERT(g_bserrno == -ENOENT);
595 }
596 
597 static void
598 blob_create_internal(void)
599 {
600 	struct spdk_blob_store *bs = g_bs;
601 	struct spdk_blob *blob;
602 	struct spdk_blob_opts opts;
603 	struct spdk_blob_xattr_opts internal_xattrs;
604 	const void *value;
605 	size_t value_len;
606 	spdk_blob_id blobid;
607 	int rc;
608 
609 	/* Create blob with custom xattrs */
610 
611 	ut_spdk_blob_opts_init(&opts);
612 	blob_xattrs_init(&internal_xattrs);
613 	internal_xattrs.count = 3;
614 	internal_xattrs.names = g_xattr_names;
615 	internal_xattrs.get_value = _get_xattr_value;
616 	internal_xattrs.ctx = &g_ctx;
617 
618 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
619 	poll_threads();
620 	CU_ASSERT(g_bserrno == 0);
621 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
622 	blobid = g_blobid;
623 
624 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
625 	poll_threads();
626 	CU_ASSERT(g_bserrno == 0);
627 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
628 	blob = g_blob;
629 
630 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
631 	CU_ASSERT(rc == 0);
632 	SPDK_CU_ASSERT_FATAL(value != NULL);
633 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
634 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
635 
636 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
637 	CU_ASSERT(rc == 0);
638 	SPDK_CU_ASSERT_FATAL(value != NULL);
639 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
640 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
641 
642 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
643 	CU_ASSERT(rc == 0);
644 	SPDK_CU_ASSERT_FATAL(value != NULL);
645 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
646 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
647 
648 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
649 	CU_ASSERT(rc != 0);
650 
651 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
652 	CU_ASSERT(rc != 0);
653 
654 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
655 	CU_ASSERT(rc != 0);
656 
657 	spdk_blob_close(blob, blob_op_complete, NULL);
658 	poll_threads();
659 	CU_ASSERT(g_bserrno == 0);
660 
661 	/* Create blob with NULL internal options  */
662 
663 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
664 	poll_threads();
665 	CU_ASSERT(g_bserrno == 0);
666 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
667 	blobid = g_blobid;
668 
669 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
670 	poll_threads();
671 	CU_ASSERT(g_bserrno == 0);
672 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
673 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
674 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
675 
676 	blob = g_blob;
677 
678 	spdk_blob_close(blob, blob_op_complete, NULL);
679 	poll_threads();
680 	CU_ASSERT(g_bserrno == 0);
681 }
682 
683 static void
684 blob_thin_provision(void)
685 {
686 	struct spdk_blob_store *bs;
687 	struct spdk_bs_dev *dev;
688 	struct spdk_blob *blob;
689 	struct spdk_blob_opts opts;
690 	struct spdk_bs_opts bs_opts;
691 	spdk_blob_id blobid;
692 
693 	dev = init_dev();
694 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
695 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
696 
697 	/* Initialize a new blob store */
698 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
699 	poll_threads();
700 	CU_ASSERT(g_bserrno == 0);
701 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
702 
703 	bs = g_bs;
704 
705 	/* Create blob with thin provisioning enabled */
706 
707 	ut_spdk_blob_opts_init(&opts);
708 	opts.thin_provision = true;
709 	opts.num_clusters = 10;
710 
711 	blob = ut_blob_create_and_open(bs, &opts);
712 	blobid = spdk_blob_get_id(blob);
713 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
714 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
715 	/* In thin provisioning with num_clusters is set, if not using the
716 	 * extent table, there is no allocation. If extent table is used,
717 	 * there is related allocation happened. */
718 	if (blob->extent_table_found == true) {
719 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
720 		CU_ASSERT(blob->active.extent_pages != NULL);
721 	} else {
722 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
723 		CU_ASSERT(blob->active.extent_pages == NULL);
724 	}
725 
726 	spdk_blob_close(blob, blob_op_complete, NULL);
727 	CU_ASSERT(g_bserrno == 0);
728 
729 	/* Do not shut down cleanly.  This makes sure that when we load again
730 	 *  and try to recover a valid used_cluster map, that blobstore will
731 	 *  ignore clusters with index 0 since these are unallocated clusters.
732 	 */
733 	ut_bs_dirty_load(&bs, &bs_opts);
734 
735 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
736 	poll_threads();
737 	CU_ASSERT(g_bserrno == 0);
738 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
739 	blob = g_blob;
740 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
741 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
742 
743 	ut_blob_close_and_delete(bs, blob);
744 
745 	spdk_bs_unload(bs, bs_op_complete, NULL);
746 	poll_threads();
747 	CU_ASSERT(g_bserrno == 0);
748 	g_bs = NULL;
749 }
750 
751 static void
752 blob_snapshot(void)
753 {
754 	struct spdk_blob_store *bs = g_bs;
755 	struct spdk_blob *blob;
756 	struct spdk_blob *snapshot, *snapshot2;
757 	struct spdk_blob_bs_dev *blob_bs_dev;
758 	struct spdk_blob_opts opts;
759 	struct spdk_blob_xattr_opts xattrs;
760 	spdk_blob_id blobid;
761 	spdk_blob_id snapshotid;
762 	spdk_blob_id snapshotid2;
763 	const void *value;
764 	size_t value_len;
765 	int rc;
766 	spdk_blob_id ids[2];
767 	size_t count;
768 
769 	/* Create blob with 10 clusters */
770 	ut_spdk_blob_opts_init(&opts);
771 	opts.num_clusters = 10;
772 
773 	blob = ut_blob_create_and_open(bs, &opts);
774 	blobid = spdk_blob_get_id(blob);
775 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
776 
777 	/* Create snapshot from blob */
778 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
779 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
780 	poll_threads();
781 	CU_ASSERT(g_bserrno == 0);
782 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
783 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
784 	snapshotid = g_blobid;
785 
786 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
787 	poll_threads();
788 	CU_ASSERT(g_bserrno == 0);
789 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
790 	snapshot = g_blob;
791 	CU_ASSERT(snapshot->data_ro == true);
792 	CU_ASSERT(snapshot->md_ro == true);
793 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
794 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
795 
796 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
797 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
798 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
799 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
800 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
801 
802 	/* Try to create snapshot from clone with xattrs */
803 	xattrs.names = g_xattr_names;
804 	xattrs.get_value = _get_xattr_value;
805 	xattrs.count = 3;
806 	xattrs.ctx = &g_ctx;
807 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
808 	poll_threads();
809 	CU_ASSERT(g_bserrno == 0);
810 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
811 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
812 	snapshotid2 = g_blobid;
813 
814 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
815 	CU_ASSERT(g_bserrno == 0);
816 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
817 	snapshot2 = g_blob;
818 	CU_ASSERT(snapshot2->data_ro == true);
819 	CU_ASSERT(snapshot2->md_ro == true);
820 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
821 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
822 
823 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
824 	CU_ASSERT(snapshot->back_bs_dev == NULL);
825 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
826 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
827 
828 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
829 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
830 
831 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
832 	CU_ASSERT(blob_bs_dev->blob == snapshot);
833 
834 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
835 	CU_ASSERT(rc == 0);
836 	SPDK_CU_ASSERT_FATAL(value != NULL);
837 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
838 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
839 
840 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
841 	CU_ASSERT(rc == 0);
842 	SPDK_CU_ASSERT_FATAL(value != NULL);
843 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
844 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
845 
846 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
847 	CU_ASSERT(rc == 0);
848 	SPDK_CU_ASSERT_FATAL(value != NULL);
849 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
850 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
851 
852 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
853 	count = 2;
854 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
855 	CU_ASSERT(count == 1);
856 	CU_ASSERT(ids[0] == blobid);
857 
858 	count = 2;
859 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
860 	CU_ASSERT(count == 1);
861 	CU_ASSERT(ids[0] == snapshotid2);
862 
863 	/* Try to create snapshot from snapshot */
864 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
865 	poll_threads();
866 	CU_ASSERT(g_bserrno == -EINVAL);
867 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
868 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
869 
870 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
871 	ut_blob_close_and_delete(bs, blob);
872 	count = 2;
873 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
874 	CU_ASSERT(count == 0);
875 
876 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
877 	ut_blob_close_and_delete(bs, snapshot2);
878 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
879 	count = 2;
880 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
881 	CU_ASSERT(count == 0);
882 
883 	ut_blob_close_and_delete(bs, snapshot);
884 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
885 }
886 
887 static void
888 blob_snapshot_freeze_io(void)
889 {
890 	struct spdk_io_channel *channel;
891 	struct spdk_bs_channel *bs_channel;
892 	struct spdk_blob_store *bs = g_bs;
893 	struct spdk_blob *blob;
894 	struct spdk_blob_opts opts;
895 	spdk_blob_id blobid;
896 	uint32_t num_of_pages = 10;
897 	uint8_t payload_read[num_of_pages * BLOCKLEN];
898 	uint8_t payload_write[num_of_pages * BLOCKLEN];
899 	uint8_t payload_zero[num_of_pages * BLOCKLEN];
900 
901 	memset(payload_write, 0xE5, sizeof(payload_write));
902 	memset(payload_read, 0x00, sizeof(payload_read));
903 	memset(payload_zero, 0x00, sizeof(payload_zero));
904 
905 	/* Test freeze I/O during snapshot */
906 	channel = spdk_bs_alloc_io_channel(bs);
907 	bs_channel = spdk_io_channel_get_ctx(channel);
908 
909 	/* Create blob with 10 clusters */
910 	ut_spdk_blob_opts_init(&opts);
911 	opts.num_clusters = 10;
912 	opts.thin_provision = false;
913 
914 	blob = ut_blob_create_and_open(bs, &opts);
915 	blobid = spdk_blob_get_id(blob);
916 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
917 
918 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
919 
920 	/* This is implementation specific.
921 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
922 	 * Four async I/O operations happen before that. */
923 	poll_thread_times(0, 5);
924 
925 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
926 
927 	/* Blob I/O should be frozen here */
928 	CU_ASSERT(blob->frozen_refcnt == 1);
929 
930 	/* Write to the blob */
931 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
932 
933 	/* Verify that I/O is queued */
934 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
935 	/* Verify that payload is not written to disk, at this point the blobs already switched */
936 	CU_ASSERT(blob->active.clusters[0] == 0);
937 
938 	/* Finish all operations including spdk_bs_create_snapshot */
939 	poll_threads();
940 
941 	/* Verify snapshot */
942 	CU_ASSERT(g_bserrno == 0);
943 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
944 
945 	/* Verify that blob has unset frozen_io */
946 	CU_ASSERT(blob->frozen_refcnt == 0);
947 
948 	/* Verify that postponed I/O completed successfully by comparing payload */
949 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
950 	poll_threads();
951 	CU_ASSERT(g_bserrno == 0);
952 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * BLOCKLEN) == 0);
953 
954 	spdk_bs_free_io_channel(channel);
955 	poll_threads();
956 
957 	ut_blob_close_and_delete(bs, blob);
958 }
959 
960 static void
961 blob_clone(void)
962 {
963 	struct spdk_blob_store *bs = g_bs;
964 	struct spdk_blob_opts opts;
965 	struct spdk_blob *blob, *snapshot, *clone;
966 	spdk_blob_id blobid, cloneid, snapshotid;
967 	struct spdk_blob_xattr_opts xattrs;
968 	const void *value;
969 	size_t value_len;
970 	int rc;
971 
972 	/* Create blob with 10 clusters */
973 
974 	ut_spdk_blob_opts_init(&opts);
975 	opts.num_clusters = 10;
976 
977 	blob = ut_blob_create_and_open(bs, &opts);
978 	blobid = spdk_blob_get_id(blob);
979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
980 
981 	/* Create snapshot */
982 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
983 	poll_threads();
984 	CU_ASSERT(g_bserrno == 0);
985 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
986 	snapshotid = g_blobid;
987 
988 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
989 	poll_threads();
990 	CU_ASSERT(g_bserrno == 0);
991 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
992 	snapshot = g_blob;
993 	CU_ASSERT(snapshot->data_ro == true);
994 	CU_ASSERT(snapshot->md_ro == true);
995 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
996 
997 	spdk_blob_close(snapshot, blob_op_complete, NULL);
998 	poll_threads();
999 	CU_ASSERT(g_bserrno == 0);
1000 
1001 	/* Create clone from snapshot with xattrs */
1002 	xattrs.names = g_xattr_names;
1003 	xattrs.get_value = _get_xattr_value;
1004 	xattrs.count = 3;
1005 	xattrs.ctx = &g_ctx;
1006 
1007 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
1008 	poll_threads();
1009 	CU_ASSERT(g_bserrno == 0);
1010 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1011 	cloneid = g_blobid;
1012 
1013 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1014 	poll_threads();
1015 	CU_ASSERT(g_bserrno == 0);
1016 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1017 	clone = g_blob;
1018 	CU_ASSERT(clone->data_ro == false);
1019 	CU_ASSERT(clone->md_ro == false);
1020 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1021 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(clone) == 0);
1022 
1023 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
1024 	CU_ASSERT(rc == 0);
1025 	SPDK_CU_ASSERT_FATAL(value != NULL);
1026 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
1027 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
1028 
1029 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
1030 	CU_ASSERT(rc == 0);
1031 	SPDK_CU_ASSERT_FATAL(value != NULL);
1032 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1033 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1034 
1035 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1036 	CU_ASSERT(rc == 0);
1037 	SPDK_CU_ASSERT_FATAL(value != NULL);
1038 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1039 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1040 
1041 
1042 	spdk_blob_close(clone, blob_op_complete, NULL);
1043 	poll_threads();
1044 	CU_ASSERT(g_bserrno == 0);
1045 
1046 	/* Try to create clone from not read only blob */
1047 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1048 	poll_threads();
1049 	CU_ASSERT(g_bserrno == -EINVAL);
1050 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1051 
1052 	/* Mark blob as read only */
1053 	spdk_blob_set_read_only(blob);
1054 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1055 	poll_threads();
1056 	CU_ASSERT(g_bserrno == 0);
1057 
1058 	/* Create clone from read only blob */
1059 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1060 	poll_threads();
1061 	CU_ASSERT(g_bserrno == 0);
1062 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1063 	cloneid = g_blobid;
1064 
1065 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1066 	poll_threads();
1067 	CU_ASSERT(g_bserrno == 0);
1068 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1069 	clone = g_blob;
1070 	CU_ASSERT(clone->data_ro == false);
1071 	CU_ASSERT(clone->md_ro == false);
1072 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1073 
1074 	ut_blob_close_and_delete(bs, clone);
1075 	ut_blob_close_and_delete(bs, blob);
1076 }
1077 
1078 static void
1079 _blob_inflate(bool decouple_parent)
1080 {
1081 	struct spdk_blob_store *bs = g_bs;
1082 	struct spdk_blob_opts opts;
1083 	struct spdk_blob *blob, *snapshot;
1084 	spdk_blob_id blobid, snapshotid;
1085 	struct spdk_io_channel *channel;
1086 	uint64_t free_clusters;
1087 
1088 	channel = spdk_bs_alloc_io_channel(bs);
1089 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1090 
1091 	/* Create blob with 10 clusters */
1092 
1093 	ut_spdk_blob_opts_init(&opts);
1094 	opts.num_clusters = 10;
1095 	opts.thin_provision = true;
1096 
1097 	blob = ut_blob_create_and_open(bs, &opts);
1098 	blobid = spdk_blob_get_id(blob);
1099 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1100 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1101 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1102 
1103 	/* 1) Blob with no parent */
1104 	if (decouple_parent) {
1105 		/* Decouple parent of blob with no parent (should fail) */
1106 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1107 		poll_threads();
1108 		CU_ASSERT(g_bserrno != 0);
1109 	} else {
1110 		/* Inflate of thin blob with no parent should made it thick */
1111 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1112 		poll_threads();
1113 		CU_ASSERT(g_bserrno == 0);
1114 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1115 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1116 	}
1117 
1118 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1119 	poll_threads();
1120 	CU_ASSERT(g_bserrno == 0);
1121 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1122 	snapshotid = g_blobid;
1123 
1124 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1125 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1126 
1127 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1128 	poll_threads();
1129 	CU_ASSERT(g_bserrno == 0);
1130 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1131 	snapshot = g_blob;
1132 	CU_ASSERT(snapshot->data_ro == true);
1133 	CU_ASSERT(snapshot->md_ro == true);
1134 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1135 
1136 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1137 	poll_threads();
1138 	CU_ASSERT(g_bserrno == 0);
1139 
1140 	free_clusters = spdk_bs_free_cluster_count(bs);
1141 
1142 	/* 2) Blob with parent */
1143 	if (!decouple_parent) {
1144 		/* Do full blob inflation */
1145 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1146 		poll_threads();
1147 		CU_ASSERT(g_bserrno == 0);
1148 		/* all 10 clusters should be allocated */
1149 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1150 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1151 	} else {
1152 		/* Decouple parent of blob */
1153 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1154 		poll_threads();
1155 		CU_ASSERT(g_bserrno == 0);
1156 		/* when only parent is removed, none of the clusters should be allocated */
1157 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1158 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1159 	}
1160 
1161 	/* Now, it should be possible to delete snapshot */
1162 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1163 	poll_threads();
1164 	CU_ASSERT(g_bserrno == 0);
1165 
1166 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1167 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1168 
1169 	spdk_bs_free_io_channel(channel);
1170 	poll_threads();
1171 
1172 	ut_blob_close_and_delete(bs, blob);
1173 }
1174 
1175 static void
1176 blob_inflate(void)
1177 {
1178 	_blob_inflate(false);
1179 	_blob_inflate(true);
1180 }
1181 
1182 static void
1183 blob_delete(void)
1184 {
1185 	struct spdk_blob_store *bs = g_bs;
1186 	struct spdk_blob_opts blob_opts;
1187 	spdk_blob_id blobid;
1188 
1189 	/* Create a blob and then delete it. */
1190 	ut_spdk_blob_opts_init(&blob_opts);
1191 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1192 	poll_threads();
1193 	CU_ASSERT(g_bserrno == 0);
1194 	CU_ASSERT(g_blobid > 0);
1195 	blobid = g_blobid;
1196 
1197 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1198 	poll_threads();
1199 	CU_ASSERT(g_bserrno == 0);
1200 
1201 	/* Try to open the blob */
1202 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1203 	poll_threads();
1204 	CU_ASSERT(g_bserrno == -ENOENT);
1205 }
1206 
1207 static void
1208 blob_resize_test(void)
1209 {
1210 	struct spdk_blob_store *bs = g_bs;
1211 	struct spdk_blob *blob;
1212 	uint64_t free_clusters;
1213 
1214 	free_clusters = spdk_bs_free_cluster_count(bs);
1215 
1216 	blob = ut_blob_create_and_open(bs, NULL);
1217 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1218 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1219 
1220 	/* Confirm that resize fails if blob is marked read-only. */
1221 	blob->md_ro = true;
1222 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1223 	poll_threads();
1224 	CU_ASSERT(g_bserrno == -EPERM);
1225 	blob->md_ro = false;
1226 
1227 	/* The blob started at 0 clusters. Resize it to be 5. */
1228 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1229 	poll_threads();
1230 	CU_ASSERT(g_bserrno == 0);
1231 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1232 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 5);
1233 
1234 	/* Shrink the blob to 3 clusters. This will not actually release
1235 	 * the old clusters until the blob is synced.
1236 	 */
1237 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1238 	poll_threads();
1239 	CU_ASSERT(g_bserrno == 0);
1240 	/* Verify there are still 5 clusters in use */
1241 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1242 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 3);
1243 
1244 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1245 	poll_threads();
1246 	CU_ASSERT(g_bserrno == 0);
1247 	/* Now there are only 3 clusters in use */
1248 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1249 
1250 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1251 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1252 	poll_threads();
1253 	CU_ASSERT(g_bserrno == 0);
1254 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1255 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1256 
1257 	/* Try to resize the blob to size larger than blobstore. */
1258 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1259 	poll_threads();
1260 	CU_ASSERT(g_bserrno == -ENOSPC);
1261 
1262 	ut_blob_close_and_delete(bs, blob);
1263 }
1264 
1265 static void
1266 blob_resize_thin_test(void)
1267 {
1268 	struct spdk_blob_store *bs = g_bs;
1269 	struct spdk_blob *blob;
1270 	struct spdk_blob_opts opts;
1271 	struct spdk_io_channel *blob_ch;
1272 	uint64_t free_clusters;
1273 	uint64_t io_units_per_cluster;
1274 	uint64_t offset;
1275 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
1276 
1277 	free_clusters = spdk_bs_free_cluster_count(bs);
1278 
1279 	blob_ch = spdk_bs_alloc_io_channel(bs);
1280 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
1281 
1282 	/* Create blob with thin provisioning enabled */
1283 	ut_spdk_blob_opts_init(&opts);
1284 	opts.thin_provision = true;
1285 	opts.num_clusters = 0;
1286 
1287 	blob = ut_blob_create_and_open(bs, &opts);
1288 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1289 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1290 	io_units_per_cluster = bs_io_units_per_cluster(blob);
1291 
1292 	/* The blob started at 0 clusters. Resize it to be 6. */
1293 	spdk_blob_resize(blob, 6, blob_op_complete, NULL);
1294 	poll_threads();
1295 	CU_ASSERT(g_bserrno == 0);
1296 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1297 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1298 
1299 	/* Write on cluster 0,2,4 and 5 of blob */
1300 	for (offset = 0; offset < io_units_per_cluster; offset++) {
1301 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1302 		poll_threads();
1303 		CU_ASSERT(g_bserrno == 0);
1304 	}
1305 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
1306 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1307 		poll_threads();
1308 		CU_ASSERT(g_bserrno == 0);
1309 	}
1310 	for (offset = 4 * io_units_per_cluster; offset < 5 * io_units_per_cluster; offset++) {
1311 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1312 		poll_threads();
1313 		CU_ASSERT(g_bserrno == 0);
1314 	}
1315 	for (offset = 5 * io_units_per_cluster; offset < 6 * io_units_per_cluster; offset++) {
1316 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1317 		poll_threads();
1318 		CU_ASSERT(g_bserrno == 0);
1319 	}
1320 
1321 	/* Check allocated clusters after write */
1322 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1323 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 4);
1324 
1325 	/* Shrink the blob to 2 clusters. This will not actually release
1326 	 * the old clusters until the blob is synced.
1327 	 */
1328 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1329 	poll_threads();
1330 	CU_ASSERT(g_bserrno == 0);
1331 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1332 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1333 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1334 
1335 	/* Sync blob: 4 clusters were truncated but only 3 of them was allocated */
1336 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1337 	poll_threads();
1338 	CU_ASSERT(g_bserrno == 0);
1339 	CU_ASSERT((free_clusters - 1) == spdk_bs_free_cluster_count(bs));
1340 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1341 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1342 
1343 	spdk_bs_free_io_channel(blob_ch);
1344 	ut_blob_close_and_delete(bs, blob);
1345 }
1346 
1347 static void
1348 blob_read_only(void)
1349 {
1350 	struct spdk_blob_store *bs;
1351 	struct spdk_bs_dev *dev;
1352 	struct spdk_blob *blob;
1353 	struct spdk_bs_opts opts;
1354 	spdk_blob_id blobid;
1355 	int rc;
1356 
1357 	dev = init_dev();
1358 	spdk_bs_opts_init(&opts, sizeof(opts));
1359 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1360 
1361 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1362 	poll_threads();
1363 	CU_ASSERT(g_bserrno == 0);
1364 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1365 	bs = g_bs;
1366 
1367 	blob = ut_blob_create_and_open(bs, NULL);
1368 	blobid = spdk_blob_get_id(blob);
1369 
1370 	rc = spdk_blob_set_read_only(blob);
1371 	CU_ASSERT(rc == 0);
1372 
1373 	CU_ASSERT(blob->data_ro == false);
1374 	CU_ASSERT(blob->md_ro == false);
1375 
1376 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1377 	poll_threads();
1378 
1379 	CU_ASSERT(blob->data_ro == true);
1380 	CU_ASSERT(blob->md_ro == true);
1381 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1382 
1383 	spdk_blob_close(blob, blob_op_complete, NULL);
1384 	poll_threads();
1385 	CU_ASSERT(g_bserrno == 0);
1386 
1387 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1388 	poll_threads();
1389 	CU_ASSERT(g_bserrno == 0);
1390 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1391 	blob = g_blob;
1392 
1393 	CU_ASSERT(blob->data_ro == true);
1394 	CU_ASSERT(blob->md_ro == true);
1395 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1396 
1397 	spdk_blob_close(blob, blob_op_complete, NULL);
1398 	poll_threads();
1399 	CU_ASSERT(g_bserrno == 0);
1400 
1401 	ut_bs_reload(&bs, &opts);
1402 
1403 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1404 	poll_threads();
1405 	CU_ASSERT(g_bserrno == 0);
1406 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1407 	blob = g_blob;
1408 
1409 	CU_ASSERT(blob->data_ro == true);
1410 	CU_ASSERT(blob->md_ro == true);
1411 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1412 
1413 	ut_blob_close_and_delete(bs, blob);
1414 
1415 	spdk_bs_unload(bs, bs_op_complete, NULL);
1416 	poll_threads();
1417 	CU_ASSERT(g_bserrno == 0);
1418 }
1419 
1420 static void
1421 channel_ops(void)
1422 {
1423 	struct spdk_blob_store *bs = g_bs;
1424 	struct spdk_io_channel *channel;
1425 
1426 	channel = spdk_bs_alloc_io_channel(bs);
1427 	CU_ASSERT(channel != NULL);
1428 
1429 	spdk_bs_free_io_channel(channel);
1430 	poll_threads();
1431 }
1432 
1433 static void
1434 blob_write(void)
1435 {
1436 	struct spdk_blob_store *bs = g_bs;
1437 	struct spdk_blob *blob = g_blob;
1438 	struct spdk_io_channel *channel;
1439 	uint64_t pages_per_cluster;
1440 	uint8_t payload[10 * BLOCKLEN];
1441 
1442 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1443 
1444 	channel = spdk_bs_alloc_io_channel(bs);
1445 	CU_ASSERT(channel != NULL);
1446 
1447 	/* Write to a blob with 0 size */
1448 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1449 	poll_threads();
1450 	CU_ASSERT(g_bserrno == -EINVAL);
1451 
1452 	/* Resize the blob */
1453 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1454 	poll_threads();
1455 	CU_ASSERT(g_bserrno == 0);
1456 
1457 	/* Confirm that write fails if blob is marked read-only. */
1458 	blob->data_ro = true;
1459 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1460 	poll_threads();
1461 	CU_ASSERT(g_bserrno == -EPERM);
1462 	blob->data_ro = false;
1463 
1464 	/* Write to the blob */
1465 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1466 	poll_threads();
1467 	CU_ASSERT(g_bserrno == 0);
1468 
1469 	/* Write starting beyond the end */
1470 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1471 			   NULL);
1472 	poll_threads();
1473 	CU_ASSERT(g_bserrno == -EINVAL);
1474 
1475 	/* Write starting at a valid location but going off the end */
1476 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1477 			   blob_op_complete, NULL);
1478 	poll_threads();
1479 	CU_ASSERT(g_bserrno == -EINVAL);
1480 
1481 	spdk_bs_free_io_channel(channel);
1482 	poll_threads();
1483 }
1484 
1485 static void
1486 blob_read(void)
1487 {
1488 	struct spdk_blob_store *bs = g_bs;
1489 	struct spdk_blob *blob = g_blob;
1490 	struct spdk_io_channel *channel;
1491 	uint64_t pages_per_cluster;
1492 	uint8_t payload[10 * BLOCKLEN];
1493 
1494 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1495 
1496 	channel = spdk_bs_alloc_io_channel(bs);
1497 	CU_ASSERT(channel != NULL);
1498 
1499 	/* Read from a blob with 0 size */
1500 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1501 	poll_threads();
1502 	CU_ASSERT(g_bserrno == -EINVAL);
1503 
1504 	/* Resize the blob */
1505 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1506 	poll_threads();
1507 	CU_ASSERT(g_bserrno == 0);
1508 
1509 	/* Confirm that read passes if blob is marked read-only. */
1510 	blob->data_ro = true;
1511 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1512 	poll_threads();
1513 	CU_ASSERT(g_bserrno == 0);
1514 	blob->data_ro = false;
1515 
1516 	/* Read from the blob */
1517 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1518 	poll_threads();
1519 	CU_ASSERT(g_bserrno == 0);
1520 
1521 	/* Read starting beyond the end */
1522 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1523 			  NULL);
1524 	poll_threads();
1525 	CU_ASSERT(g_bserrno == -EINVAL);
1526 
1527 	/* Read starting at a valid location but going off the end */
1528 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1529 			  blob_op_complete, NULL);
1530 	poll_threads();
1531 	CU_ASSERT(g_bserrno == -EINVAL);
1532 
1533 	spdk_bs_free_io_channel(channel);
1534 	poll_threads();
1535 }
1536 
1537 static void
1538 blob_rw_verify(void)
1539 {
1540 	struct spdk_blob_store *bs = g_bs;
1541 	struct spdk_blob *blob = g_blob;
1542 	struct spdk_io_channel *channel;
1543 	uint8_t payload_read[10 * BLOCKLEN];
1544 	uint8_t payload_write[10 * BLOCKLEN];
1545 
1546 	channel = spdk_bs_alloc_io_channel(bs);
1547 	CU_ASSERT(channel != NULL);
1548 
1549 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1550 	poll_threads();
1551 	CU_ASSERT(g_bserrno == 0);
1552 
1553 	memset(payload_write, 0xE5, sizeof(payload_write));
1554 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1555 	poll_threads();
1556 	CU_ASSERT(g_bserrno == 0);
1557 
1558 	memset(payload_read, 0x00, sizeof(payload_read));
1559 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1560 	poll_threads();
1561 	CU_ASSERT(g_bserrno == 0);
1562 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * BLOCKLEN) == 0);
1563 
1564 	spdk_bs_free_io_channel(channel);
1565 	poll_threads();
1566 }
1567 
1568 static void
1569 blob_rw_verify_iov(void)
1570 {
1571 	struct spdk_blob_store *bs = g_bs;
1572 	struct spdk_blob *blob;
1573 	struct spdk_io_channel *channel;
1574 	uint8_t payload_read[10 * BLOCKLEN];
1575 	uint8_t payload_write[10 * BLOCKLEN];
1576 	struct iovec iov_read[3];
1577 	struct iovec iov_write[3];
1578 	void *buf;
1579 
1580 	channel = spdk_bs_alloc_io_channel(bs);
1581 	CU_ASSERT(channel != NULL);
1582 
1583 	blob = ut_blob_create_and_open(bs, NULL);
1584 
1585 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1586 	poll_threads();
1587 	CU_ASSERT(g_bserrno == 0);
1588 
1589 	/*
1590 	 * Manually adjust the offset of the blob's second cluster.  This allows
1591 	 *  us to make sure that the readv/write code correctly accounts for I/O
1592 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1593 	 *  clusters are where we expect before modifying the second cluster.
1594 	 */
1595 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1596 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1597 	blob->active.clusters[1] = 3 * 256;
1598 
1599 	memset(payload_write, 0xE5, sizeof(payload_write));
1600 	iov_write[0].iov_base = payload_write;
1601 	iov_write[0].iov_len = 1 * BLOCKLEN;
1602 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1603 	iov_write[1].iov_len = 5 * BLOCKLEN;
1604 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1605 	iov_write[2].iov_len = 4 * BLOCKLEN;
1606 	/*
1607 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1608 	 *  will get written to the first cluster, the last 4 to the second cluster.
1609 	 */
1610 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1611 	poll_threads();
1612 	CU_ASSERT(g_bserrno == 0);
1613 
1614 	memset(payload_read, 0xAA, sizeof(payload_read));
1615 	iov_read[0].iov_base = payload_read;
1616 	iov_read[0].iov_len = 3 * BLOCKLEN;
1617 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
1618 	iov_read[1].iov_len = 4 * BLOCKLEN;
1619 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
1620 	iov_read[2].iov_len = 3 * BLOCKLEN;
1621 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1622 	poll_threads();
1623 	CU_ASSERT(g_bserrno == 0);
1624 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
1625 
1626 	buf = calloc(1, 256 * BLOCKLEN);
1627 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1628 	/* Check that cluster 2 on "disk" was not modified. */
1629 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * BLOCKLEN], 256 * BLOCKLEN) == 0);
1630 	free(buf);
1631 
1632 	spdk_blob_close(blob, blob_op_complete, NULL);
1633 	poll_threads();
1634 	CU_ASSERT(g_bserrno == 0);
1635 
1636 	spdk_bs_free_io_channel(channel);
1637 	poll_threads();
1638 }
1639 
1640 static uint32_t
1641 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1642 {
1643 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1644 	struct spdk_bs_request_set *set;
1645 	uint32_t count = 0;
1646 
1647 	TAILQ_FOREACH(set, &channel->reqs, link) {
1648 		count++;
1649 	}
1650 
1651 	return count;
1652 }
1653 
1654 static void
1655 blob_rw_verify_iov_nomem(void)
1656 {
1657 	struct spdk_blob_store *bs = g_bs;
1658 	struct spdk_blob *blob = g_blob;
1659 	struct spdk_io_channel *channel;
1660 	uint8_t payload_write[10 * BLOCKLEN];
1661 	struct iovec iov_write[3];
1662 	uint32_t req_count;
1663 
1664 	channel = spdk_bs_alloc_io_channel(bs);
1665 	CU_ASSERT(channel != NULL);
1666 
1667 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1668 	poll_threads();
1669 	CU_ASSERT(g_bserrno == 0);
1670 
1671 	/*
1672 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1673 	 *  will get written to the first cluster, the last 4 to the second cluster.
1674 	 */
1675 	iov_write[0].iov_base = payload_write;
1676 	iov_write[0].iov_len = 1 * BLOCKLEN;
1677 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1678 	iov_write[1].iov_len = 5 * BLOCKLEN;
1679 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1680 	iov_write[2].iov_len = 4 * BLOCKLEN;
1681 	MOCK_SET(calloc, NULL);
1682 	req_count = bs_channel_get_req_count(channel);
1683 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1684 	poll_threads();
1685 	CU_ASSERT(g_bserrno = -ENOMEM);
1686 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1687 	MOCK_CLEAR(calloc);
1688 
1689 	spdk_bs_free_io_channel(channel);
1690 	poll_threads();
1691 }
1692 
1693 static void
1694 blob_rw_iov_read_only(void)
1695 {
1696 	struct spdk_blob_store *bs = g_bs;
1697 	struct spdk_blob *blob = g_blob;
1698 	struct spdk_io_channel *channel;
1699 	uint8_t payload_read[BLOCKLEN];
1700 	uint8_t payload_write[BLOCKLEN];
1701 	struct iovec iov_read;
1702 	struct iovec iov_write;
1703 
1704 	channel = spdk_bs_alloc_io_channel(bs);
1705 	CU_ASSERT(channel != NULL);
1706 
1707 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1708 	poll_threads();
1709 	CU_ASSERT(g_bserrno == 0);
1710 
1711 	/* Verify that writev failed if read_only flag is set. */
1712 	blob->data_ro = true;
1713 	iov_write.iov_base = payload_write;
1714 	iov_write.iov_len = sizeof(payload_write);
1715 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1716 	poll_threads();
1717 	CU_ASSERT(g_bserrno == -EPERM);
1718 
1719 	/* Verify that reads pass if data_ro flag is set. */
1720 	iov_read.iov_base = payload_read;
1721 	iov_read.iov_len = sizeof(payload_read);
1722 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1723 	poll_threads();
1724 	CU_ASSERT(g_bserrno == 0);
1725 
1726 	spdk_bs_free_io_channel(channel);
1727 	poll_threads();
1728 }
1729 
1730 static void
1731 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1732 		       uint8_t *payload, uint64_t offset, uint64_t length,
1733 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1734 {
1735 	uint64_t i;
1736 	uint8_t *buf;
1737 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1738 
1739 	/* To be sure that operation is NOT split, read one page at the time */
1740 	buf = payload;
1741 	for (i = 0; i < length; i++) {
1742 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1743 		poll_threads();
1744 		if (g_bserrno != 0) {
1745 			/* Pass the error code up */
1746 			break;
1747 		}
1748 		buf += page_size;
1749 	}
1750 
1751 	cb_fn(cb_arg, g_bserrno);
1752 }
1753 
1754 static void
1755 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1756 			uint8_t *payload, uint64_t offset, uint64_t length,
1757 			spdk_blob_op_complete cb_fn, void *cb_arg)
1758 {
1759 	uint64_t i;
1760 	uint8_t *buf;
1761 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1762 
1763 	/* To be sure that operation is NOT split, write one page at the time */
1764 	buf = payload;
1765 	for (i = 0; i < length; i++) {
1766 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1767 		poll_threads();
1768 		if (g_bserrno != 0) {
1769 			/* Pass the error code up */
1770 			break;
1771 		}
1772 		buf += page_size;
1773 	}
1774 
1775 	cb_fn(cb_arg, g_bserrno);
1776 }
1777 
1778 static void
1779 blob_operation_split_rw(void)
1780 {
1781 	struct spdk_blob_store *bs = g_bs;
1782 	struct spdk_blob *blob;
1783 	struct spdk_io_channel *channel;
1784 	struct spdk_blob_opts opts;
1785 	uint64_t cluster_size;
1786 
1787 	uint64_t payload_size;
1788 	uint8_t *payload_read;
1789 	uint8_t *payload_write;
1790 	uint8_t *payload_pattern;
1791 
1792 	uint64_t page_size;
1793 	uint64_t pages_per_cluster;
1794 	uint64_t pages_per_payload;
1795 
1796 	uint64_t i;
1797 
1798 	cluster_size = spdk_bs_get_cluster_size(bs);
1799 	page_size = spdk_bs_get_page_size(bs);
1800 	pages_per_cluster = cluster_size / page_size;
1801 	pages_per_payload = pages_per_cluster * 5;
1802 	payload_size = cluster_size * 5;
1803 
1804 	payload_read = malloc(payload_size);
1805 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1806 
1807 	payload_write = malloc(payload_size);
1808 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1809 
1810 	payload_pattern = malloc(payload_size);
1811 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1812 
1813 	/* Prepare random pattern to write */
1814 	memset(payload_pattern, 0xFF, payload_size);
1815 	for (i = 0; i < pages_per_payload; i++) {
1816 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1817 	}
1818 
1819 	channel = spdk_bs_alloc_io_channel(bs);
1820 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1821 
1822 	/* Create blob */
1823 	ut_spdk_blob_opts_init(&opts);
1824 	opts.thin_provision = false;
1825 	opts.num_clusters = 5;
1826 
1827 	blob = ut_blob_create_and_open(bs, &opts);
1828 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1829 
1830 	/* Initial read should return zeroed payload */
1831 	memset(payload_read, 0xFF, payload_size);
1832 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1833 	poll_threads();
1834 	CU_ASSERT(g_bserrno == 0);
1835 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1836 
1837 	/* Fill whole blob except last page */
1838 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1839 			   blob_op_complete, NULL);
1840 	poll_threads();
1841 	CU_ASSERT(g_bserrno == 0);
1842 
1843 	/* Write last page with a pattern */
1844 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1845 			   blob_op_complete, NULL);
1846 	poll_threads();
1847 	CU_ASSERT(g_bserrno == 0);
1848 
1849 	/* Read whole blob and check consistency */
1850 	memset(payload_read, 0xFF, payload_size);
1851 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1852 	poll_threads();
1853 	CU_ASSERT(g_bserrno == 0);
1854 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1855 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1856 
1857 	/* Fill whole blob except first page */
1858 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1859 			   blob_op_complete, NULL);
1860 	poll_threads();
1861 	CU_ASSERT(g_bserrno == 0);
1862 
1863 	/* Write first page with a pattern */
1864 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1865 			   blob_op_complete, NULL);
1866 	poll_threads();
1867 	CU_ASSERT(g_bserrno == 0);
1868 
1869 	/* Read whole blob and check consistency */
1870 	memset(payload_read, 0xFF, payload_size);
1871 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1872 	poll_threads();
1873 	CU_ASSERT(g_bserrno == 0);
1874 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1875 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1876 
1877 
1878 	/* Fill whole blob with a pattern (5 clusters) */
1879 
1880 	/* 1. Read test. */
1881 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1882 				blob_op_complete, NULL);
1883 	poll_threads();
1884 	CU_ASSERT(g_bserrno == 0);
1885 
1886 	memset(payload_read, 0xFF, payload_size);
1887 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1888 	poll_threads();
1889 	poll_threads();
1890 	CU_ASSERT(g_bserrno == 0);
1891 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1892 
1893 	/* 2. Write test. */
1894 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1895 			   blob_op_complete, NULL);
1896 	poll_threads();
1897 	CU_ASSERT(g_bserrno == 0);
1898 
1899 	memset(payload_read, 0xFF, payload_size);
1900 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1901 	poll_threads();
1902 	CU_ASSERT(g_bserrno == 0);
1903 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1904 
1905 	spdk_bs_free_io_channel(channel);
1906 	poll_threads();
1907 
1908 	g_blob = NULL;
1909 	g_blobid = 0;
1910 
1911 	free(payload_read);
1912 	free(payload_write);
1913 	free(payload_pattern);
1914 
1915 	ut_blob_close_and_delete(bs, blob);
1916 }
1917 
1918 static void
1919 blob_operation_split_rw_iov(void)
1920 {
1921 	struct spdk_blob_store *bs = g_bs;
1922 	struct spdk_blob *blob;
1923 	struct spdk_io_channel *channel;
1924 	struct spdk_blob_opts opts;
1925 	uint64_t cluster_size;
1926 
1927 	uint64_t payload_size;
1928 	uint8_t *payload_read;
1929 	uint8_t *payload_write;
1930 	uint8_t *payload_pattern;
1931 
1932 	uint64_t page_size;
1933 	uint64_t pages_per_cluster;
1934 	uint64_t pages_per_payload;
1935 
1936 	struct iovec iov_read[2];
1937 	struct iovec iov_write[2];
1938 
1939 	uint64_t i, j;
1940 
1941 	cluster_size = spdk_bs_get_cluster_size(bs);
1942 	page_size = spdk_bs_get_page_size(bs);
1943 	pages_per_cluster = cluster_size / page_size;
1944 	pages_per_payload = pages_per_cluster * 5;
1945 	payload_size = cluster_size * 5;
1946 
1947 	payload_read = malloc(payload_size);
1948 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1949 
1950 	payload_write = malloc(payload_size);
1951 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1952 
1953 	payload_pattern = malloc(payload_size);
1954 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1955 
1956 	/* Prepare random pattern to write */
1957 	for (i = 0; i < pages_per_payload; i++) {
1958 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1959 			uint64_t *tmp;
1960 
1961 			tmp = (uint64_t *)payload_pattern;
1962 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1963 			*tmp = i + 1;
1964 		}
1965 	}
1966 
1967 	channel = spdk_bs_alloc_io_channel(bs);
1968 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1969 
1970 	/* Create blob */
1971 	ut_spdk_blob_opts_init(&opts);
1972 	opts.thin_provision = false;
1973 	opts.num_clusters = 5;
1974 
1975 	blob = ut_blob_create_and_open(bs, &opts);
1976 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1977 
1978 	/* Initial read should return zeroes payload */
1979 	memset(payload_read, 0xFF, payload_size);
1980 	iov_read[0].iov_base = payload_read;
1981 	iov_read[0].iov_len = cluster_size * 3;
1982 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1983 	iov_read[1].iov_len = cluster_size * 2;
1984 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1985 	poll_threads();
1986 	CU_ASSERT(g_bserrno == 0);
1987 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1988 
1989 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1990 	 *  with a pattern. */
1991 	iov_write[0].iov_base = payload_pattern;
1992 	iov_write[0].iov_len = payload_size - page_size;
1993 	iov_write[1].iov_base = payload_pattern;
1994 	iov_write[1].iov_len = page_size;
1995 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1996 	poll_threads();
1997 	CU_ASSERT(g_bserrno == 0);
1998 
1999 	/* Read whole blob and check consistency */
2000 	memset(payload_read, 0xFF, payload_size);
2001 	iov_read[0].iov_base = payload_read;
2002 	iov_read[0].iov_len = cluster_size * 2;
2003 	iov_read[1].iov_base = payload_read + cluster_size * 2;
2004 	iov_read[1].iov_len = cluster_size * 3;
2005 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
2006 	poll_threads();
2007 	CU_ASSERT(g_bserrno == 0);
2008 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
2009 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
2010 
2011 	/* First of iovs fills only first page and second of iovs writes whole blob except
2012 	 *  first page with a pattern. */
2013 	iov_write[0].iov_base = payload_pattern;
2014 	iov_write[0].iov_len = page_size;
2015 	iov_write[1].iov_base = payload_pattern;
2016 	iov_write[1].iov_len = payload_size - page_size;
2017 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
2018 	poll_threads();
2019 	CU_ASSERT(g_bserrno == 0);
2020 
2021 	/* Read whole blob and check consistency */
2022 	memset(payload_read, 0xFF, payload_size);
2023 	iov_read[0].iov_base = payload_read;
2024 	iov_read[0].iov_len = cluster_size * 4;
2025 	iov_read[1].iov_base = payload_read + cluster_size * 4;
2026 	iov_read[1].iov_len = cluster_size;
2027 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
2028 	poll_threads();
2029 	CU_ASSERT(g_bserrno == 0);
2030 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
2031 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
2032 
2033 
2034 	/* Fill whole blob with a pattern (5 clusters) */
2035 
2036 	/* 1. Read test. */
2037 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
2038 				blob_op_complete, NULL);
2039 	poll_threads();
2040 	CU_ASSERT(g_bserrno == 0);
2041 
2042 	memset(payload_read, 0xFF, payload_size);
2043 	iov_read[0].iov_base = payload_read;
2044 	iov_read[0].iov_len = cluster_size;
2045 	iov_read[1].iov_base = payload_read + cluster_size;
2046 	iov_read[1].iov_len = cluster_size * 4;
2047 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
2048 	poll_threads();
2049 	CU_ASSERT(g_bserrno == 0);
2050 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2051 
2052 	/* 2. Write test. */
2053 	iov_write[0].iov_base = payload_read;
2054 	iov_write[0].iov_len = cluster_size * 2;
2055 	iov_write[1].iov_base = payload_read + cluster_size * 2;
2056 	iov_write[1].iov_len = cluster_size * 3;
2057 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
2058 	poll_threads();
2059 	CU_ASSERT(g_bserrno == 0);
2060 
2061 	memset(payload_read, 0xFF, payload_size);
2062 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
2063 	poll_threads();
2064 	CU_ASSERT(g_bserrno == 0);
2065 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2066 
2067 	spdk_bs_free_io_channel(channel);
2068 	poll_threads();
2069 
2070 	g_blob = NULL;
2071 	g_blobid = 0;
2072 
2073 	free(payload_read);
2074 	free(payload_write);
2075 	free(payload_pattern);
2076 
2077 	ut_blob_close_and_delete(bs, blob);
2078 }
2079 
2080 static void
2081 blob_unmap(void)
2082 {
2083 	struct spdk_blob_store *bs = g_bs;
2084 	struct spdk_blob *blob;
2085 	struct spdk_io_channel *channel;
2086 	struct spdk_blob_opts opts;
2087 	uint8_t payload[BLOCKLEN];
2088 	int i;
2089 
2090 	channel = spdk_bs_alloc_io_channel(bs);
2091 	CU_ASSERT(channel != NULL);
2092 
2093 	ut_spdk_blob_opts_init(&opts);
2094 	opts.num_clusters = 10;
2095 
2096 	blob = ut_blob_create_and_open(bs, &opts);
2097 
2098 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2099 	poll_threads();
2100 	CU_ASSERT(g_bserrno == 0);
2101 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2102 
2103 	memset(payload, 0, sizeof(payload));
2104 	payload[0] = 0xFF;
2105 
2106 	/*
2107 	 * Set first byte of every cluster to 0xFF.
2108 	 * First cluster on device is reserved so let's start from cluster number 1
2109 	 */
2110 	for (i = 1; i < 11; i++) {
2111 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
2112 	}
2113 
2114 	/* Confirm writes */
2115 	for (i = 0; i < 10; i++) {
2116 		payload[0] = 0;
2117 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / BLOCKLEN, 1,
2118 				  blob_op_complete, NULL);
2119 		poll_threads();
2120 		CU_ASSERT(g_bserrno == 0);
2121 		CU_ASSERT(payload[0] == 0xFF);
2122 	}
2123 
2124 	/* Mark some clusters as unallocated */
2125 	blob->active.clusters[1] = 0;
2126 	blob->active.clusters[2] = 0;
2127 	blob->active.clusters[3] = 0;
2128 	blob->active.clusters[6] = 0;
2129 	blob->active.clusters[8] = 0;
2130 	blob->active.num_allocated_clusters -= 5;
2131 
2132 	/* Unmap clusters by resizing to 0 */
2133 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2134 	poll_threads();
2135 	CU_ASSERT(g_bserrno == 0);
2136 
2137 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2138 	poll_threads();
2139 	CU_ASSERT(g_bserrno == 0);
2140 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
2141 
2142 	/* Confirm that only 'allocated' clusters were unmapped */
2143 	for (i = 1; i < 11; i++) {
2144 		switch (i) {
2145 		case 2:
2146 		case 3:
2147 		case 4:
2148 		case 7:
2149 		case 9:
2150 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2151 			break;
2152 		default:
2153 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2154 			break;
2155 		}
2156 	}
2157 
2158 	spdk_bs_free_io_channel(channel);
2159 	poll_threads();
2160 
2161 	ut_blob_close_and_delete(bs, blob);
2162 }
2163 
2164 static void
2165 blob_iter(void)
2166 {
2167 	struct spdk_blob_store *bs = g_bs;
2168 	struct spdk_blob *blob;
2169 	spdk_blob_id blobid;
2170 	struct spdk_blob_opts blob_opts;
2171 
2172 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2173 	poll_threads();
2174 	CU_ASSERT(g_blob == NULL);
2175 	CU_ASSERT(g_bserrno == -ENOENT);
2176 
2177 	ut_spdk_blob_opts_init(&blob_opts);
2178 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2179 	poll_threads();
2180 	CU_ASSERT(g_bserrno == 0);
2181 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2182 	blobid = g_blobid;
2183 
2184 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2185 	poll_threads();
2186 	CU_ASSERT(g_blob != NULL);
2187 	CU_ASSERT(g_bserrno == 0);
2188 	blob = g_blob;
2189 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2190 
2191 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2192 	poll_threads();
2193 	CU_ASSERT(g_blob == NULL);
2194 	CU_ASSERT(g_bserrno == -ENOENT);
2195 }
2196 
2197 static void
2198 blob_xattr(void)
2199 {
2200 	struct spdk_blob_store *bs = g_bs;
2201 	struct spdk_blob *blob = g_blob;
2202 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2203 	uint64_t length;
2204 	int rc;
2205 	const char *name1, *name2;
2206 	const void *value;
2207 	size_t value_len;
2208 	struct spdk_xattr_names *names;
2209 
2210 	/* Test that set_xattr fails if md_ro flag is set. */
2211 	blob->md_ro = true;
2212 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2213 	CU_ASSERT(rc == -EPERM);
2214 
2215 	blob->md_ro = false;
2216 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2217 	CU_ASSERT(rc == 0);
2218 
2219 	length = 2345;
2220 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2221 	CU_ASSERT(rc == 0);
2222 
2223 	/* Overwrite "length" xattr. */
2224 	length = 3456;
2225 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2226 	CU_ASSERT(rc == 0);
2227 
2228 	/* get_xattr should still work even if md_ro flag is set. */
2229 	value = NULL;
2230 	blob->md_ro = true;
2231 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2232 	CU_ASSERT(rc == 0);
2233 	SPDK_CU_ASSERT_FATAL(value != NULL);
2234 	CU_ASSERT(*(uint64_t *)value == length);
2235 	CU_ASSERT(value_len == 8);
2236 	blob->md_ro = false;
2237 
2238 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2239 	CU_ASSERT(rc == -ENOENT);
2240 
2241 	names = NULL;
2242 	rc = spdk_blob_get_xattr_names(blob, &names);
2243 	CU_ASSERT(rc == 0);
2244 	SPDK_CU_ASSERT_FATAL(names != NULL);
2245 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2246 	name1 = spdk_xattr_names_get_name(names, 0);
2247 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2248 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2249 	name2 = spdk_xattr_names_get_name(names, 1);
2250 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2251 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2252 	CU_ASSERT(strcmp(name1, name2));
2253 	spdk_xattr_names_free(names);
2254 
2255 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2256 	blob->md_ro = true;
2257 	rc = spdk_blob_remove_xattr(blob, "name");
2258 	CU_ASSERT(rc == -EPERM);
2259 
2260 	blob->md_ro = false;
2261 	rc = spdk_blob_remove_xattr(blob, "name");
2262 	CU_ASSERT(rc == 0);
2263 
2264 	rc = spdk_blob_remove_xattr(blob, "foobar");
2265 	CU_ASSERT(rc == -ENOENT);
2266 
2267 	/* Set internal xattr */
2268 	length = 7898;
2269 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2270 	CU_ASSERT(rc == 0);
2271 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2272 	CU_ASSERT(rc == 0);
2273 	CU_ASSERT(*(uint64_t *)value == length);
2274 	/* try to get public xattr with same name */
2275 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2276 	CU_ASSERT(rc != 0);
2277 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2278 	CU_ASSERT(rc != 0);
2279 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2280 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2281 		  SPDK_BLOB_INTERNAL_XATTR);
2282 
2283 	spdk_blob_close(blob, blob_op_complete, NULL);
2284 	poll_threads();
2285 
2286 	/* Check if xattrs are persisted */
2287 	ut_bs_reload(&bs, NULL);
2288 
2289 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2290 	poll_threads();
2291 	CU_ASSERT(g_bserrno == 0);
2292 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2293 	blob = g_blob;
2294 
2295 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2296 	CU_ASSERT(rc == 0);
2297 	CU_ASSERT(*(uint64_t *)value == length);
2298 
2299 	/* try to get internal xattr through public call */
2300 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2301 	CU_ASSERT(rc != 0);
2302 
2303 	rc = blob_remove_xattr(blob, "internal", true);
2304 	CU_ASSERT(rc == 0);
2305 
2306 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2307 }
2308 
2309 static void
2310 blob_parse_md(void)
2311 {
2312 	struct spdk_blob_store *bs = g_bs;
2313 	struct spdk_blob *blob;
2314 	int rc;
2315 	uint32_t used_pages;
2316 	size_t xattr_length;
2317 	char *xattr;
2318 
2319 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2320 	blob = ut_blob_create_and_open(bs, NULL);
2321 
2322 	/* Create large extent to force more than 1 page of metadata. */
2323 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2324 		       strlen("large_xattr");
2325 	xattr = calloc(xattr_length, sizeof(char));
2326 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2327 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2328 	free(xattr);
2329 	SPDK_CU_ASSERT_FATAL(rc == 0);
2330 
2331 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2332 	poll_threads();
2333 
2334 	/* Delete the blob and verify that number of pages returned to before its creation. */
2335 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2336 	ut_blob_close_and_delete(bs, blob);
2337 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2338 }
2339 
2340 static void
2341 bs_load(void)
2342 {
2343 	struct spdk_blob_store *bs;
2344 	struct spdk_bs_dev *dev;
2345 	spdk_blob_id blobid;
2346 	struct spdk_blob *blob;
2347 	struct spdk_bs_super_block *super_block;
2348 	uint64_t length;
2349 	int rc;
2350 	const void *value;
2351 	size_t value_len;
2352 	struct spdk_bs_opts opts;
2353 	struct spdk_blob_opts blob_opts;
2354 
2355 	dev = init_dev();
2356 	spdk_bs_opts_init(&opts, sizeof(opts));
2357 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2358 
2359 	/* Initialize a new blob store */
2360 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2361 	poll_threads();
2362 	CU_ASSERT(g_bserrno == 0);
2363 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2364 	bs = g_bs;
2365 
2366 	/* Try to open a blobid that does not exist */
2367 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2368 	poll_threads();
2369 	CU_ASSERT(g_bserrno == -ENOENT);
2370 	CU_ASSERT(g_blob == NULL);
2371 
2372 	/* Create a blob */
2373 	blob = ut_blob_create_and_open(bs, NULL);
2374 	blobid = spdk_blob_get_id(blob);
2375 
2376 	/* Try again to open valid blob but without the upper bit set */
2377 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2378 	poll_threads();
2379 	CU_ASSERT(g_bserrno == -ENOENT);
2380 	CU_ASSERT(g_blob == NULL);
2381 
2382 	/* Set some xattrs */
2383 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2384 	CU_ASSERT(rc == 0);
2385 
2386 	length = 2345;
2387 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2388 	CU_ASSERT(rc == 0);
2389 
2390 	/* Resize the blob */
2391 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2392 	poll_threads();
2393 	CU_ASSERT(g_bserrno == 0);
2394 
2395 	spdk_blob_close(blob, blob_op_complete, NULL);
2396 	poll_threads();
2397 	CU_ASSERT(g_bserrno == 0);
2398 	blob = NULL;
2399 	g_blob = NULL;
2400 	g_blobid = SPDK_BLOBID_INVALID;
2401 
2402 	/* Unload the blob store */
2403 	spdk_bs_unload(bs, bs_op_complete, NULL);
2404 	poll_threads();
2405 	CU_ASSERT(g_bserrno == 0);
2406 	g_bs = NULL;
2407 	g_blob = NULL;
2408 	g_blobid = 0;
2409 
2410 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2411 	CU_ASSERT(super_block->clean == 1);
2412 
2413 	/* Load should fail for device with an unsupported blocklen */
2414 	dev = init_dev();
2415 	dev->blocklen = BLOCKLEN * 2;
2416 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2417 	poll_threads();
2418 	CU_ASSERT(g_bserrno == -EINVAL);
2419 
2420 	/* Load should when max_md_ops is set to zero */
2421 	dev = init_dev();
2422 	spdk_bs_opts_init(&opts, sizeof(opts));
2423 	opts.max_md_ops = 0;
2424 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2425 	poll_threads();
2426 	CU_ASSERT(g_bserrno == -EINVAL);
2427 
2428 	/* Load should when max_channel_ops is set to zero */
2429 	dev = init_dev();
2430 	spdk_bs_opts_init(&opts, sizeof(opts));
2431 	opts.max_channel_ops = 0;
2432 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2433 	poll_threads();
2434 	CU_ASSERT(g_bserrno == -EINVAL);
2435 
2436 	/* Load an existing blob store */
2437 	dev = init_dev();
2438 	spdk_bs_opts_init(&opts, sizeof(opts));
2439 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2440 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2441 	poll_threads();
2442 	CU_ASSERT(g_bserrno == 0);
2443 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2444 	bs = g_bs;
2445 
2446 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2447 	CU_ASSERT(super_block->clean == 1);
2448 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2449 
2450 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2451 	poll_threads();
2452 	CU_ASSERT(g_bserrno == 0);
2453 	CU_ASSERT(g_blob != NULL);
2454 	blob = g_blob;
2455 
2456 	/* Verify that blobstore is marked dirty after first metadata sync */
2457 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2458 	CU_ASSERT(super_block->clean == 1);
2459 
2460 	/* Get the xattrs */
2461 	value = NULL;
2462 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2463 	CU_ASSERT(rc == 0);
2464 	SPDK_CU_ASSERT_FATAL(value != NULL);
2465 	CU_ASSERT(*(uint64_t *)value == length);
2466 	CU_ASSERT(value_len == 8);
2467 
2468 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2469 	CU_ASSERT(rc == -ENOENT);
2470 
2471 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2472 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2473 
2474 	spdk_blob_close(blob, blob_op_complete, NULL);
2475 	poll_threads();
2476 	CU_ASSERT(g_bserrno == 0);
2477 	blob = NULL;
2478 	g_blob = NULL;
2479 
2480 	spdk_bs_unload(bs, bs_op_complete, NULL);
2481 	poll_threads();
2482 	CU_ASSERT(g_bserrno == 0);
2483 	g_bs = NULL;
2484 
2485 	/* Load should fail: bdev size < saved size */
2486 	dev = init_dev();
2487 	dev->blockcnt /= 2;
2488 
2489 	spdk_bs_opts_init(&opts, sizeof(opts));
2490 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2491 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2492 	poll_threads();
2493 
2494 	CU_ASSERT(g_bserrno == -EILSEQ);
2495 
2496 	/* Load should succeed: bdev size > saved size */
2497 	dev = init_dev();
2498 	dev->blockcnt *= 4;
2499 
2500 	spdk_bs_opts_init(&opts, sizeof(opts));
2501 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2502 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2503 	poll_threads();
2504 	CU_ASSERT(g_bserrno == 0);
2505 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2506 	bs = g_bs;
2507 
2508 	CU_ASSERT(g_bserrno == 0);
2509 	spdk_bs_unload(bs, bs_op_complete, NULL);
2510 	poll_threads();
2511 
2512 
2513 	/* Test compatibility mode */
2514 
2515 	dev = init_dev();
2516 	super_block->size = 0;
2517 	super_block->crc = blob_md_page_calc_crc(super_block);
2518 
2519 	spdk_bs_opts_init(&opts, sizeof(opts));
2520 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2521 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2522 	poll_threads();
2523 	CU_ASSERT(g_bserrno == 0);
2524 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2525 	bs = g_bs;
2526 
2527 	/* Create a blob */
2528 	ut_spdk_blob_opts_init(&blob_opts);
2529 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2530 	poll_threads();
2531 	CU_ASSERT(g_bserrno == 0);
2532 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2533 
2534 	/* Blobstore should update number of blocks in super_block */
2535 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2536 	CU_ASSERT(super_block->clean == 0);
2537 
2538 	spdk_bs_unload(bs, bs_op_complete, NULL);
2539 	poll_threads();
2540 	CU_ASSERT(g_bserrno == 0);
2541 	CU_ASSERT(super_block->clean == 1);
2542 	g_bs = NULL;
2543 
2544 }
2545 
2546 static void
2547 bs_load_pending_removal(void)
2548 {
2549 	struct spdk_blob_store *bs = g_bs;
2550 	struct spdk_blob_opts opts;
2551 	struct spdk_blob *blob, *snapshot;
2552 	spdk_blob_id blobid, snapshotid;
2553 	const void *value;
2554 	size_t value_len;
2555 	int rc;
2556 
2557 	/* Create blob */
2558 	ut_spdk_blob_opts_init(&opts);
2559 	opts.num_clusters = 10;
2560 
2561 	blob = ut_blob_create_and_open(bs, &opts);
2562 	blobid = spdk_blob_get_id(blob);
2563 
2564 	/* Create snapshot */
2565 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2566 	poll_threads();
2567 	CU_ASSERT(g_bserrno == 0);
2568 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2569 	snapshotid = g_blobid;
2570 
2571 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2572 	poll_threads();
2573 	CU_ASSERT(g_bserrno == 0);
2574 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2575 	snapshot = g_blob;
2576 
2577 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2578 	snapshot->md_ro = false;
2579 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2580 	CU_ASSERT(rc == 0);
2581 	snapshot->md_ro = true;
2582 
2583 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2584 	poll_threads();
2585 	CU_ASSERT(g_bserrno == 0);
2586 
2587 	spdk_blob_close(blob, blob_op_complete, NULL);
2588 	poll_threads();
2589 	CU_ASSERT(g_bserrno == 0);
2590 
2591 	/* Reload blobstore */
2592 	ut_bs_reload(&bs, NULL);
2593 
2594 	/* Snapshot should not be removed as blob is still pointing to it */
2595 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2596 	poll_threads();
2597 	CU_ASSERT(g_bserrno == 0);
2598 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2599 	snapshot = g_blob;
2600 
2601 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2602 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2603 	CU_ASSERT(rc != 0);
2604 
2605 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2606 	snapshot->md_ro = false;
2607 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2608 	CU_ASSERT(rc == 0);
2609 	snapshot->md_ro = true;
2610 
2611 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2612 	poll_threads();
2613 	CU_ASSERT(g_bserrno == 0);
2614 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2615 	blob = g_blob;
2616 
2617 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2618 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2619 
2620 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2621 	poll_threads();
2622 	CU_ASSERT(g_bserrno == 0);
2623 
2624 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2625 	poll_threads();
2626 	CU_ASSERT(g_bserrno == 0);
2627 
2628 	spdk_blob_close(blob, blob_op_complete, NULL);
2629 	poll_threads();
2630 	CU_ASSERT(g_bserrno == 0);
2631 
2632 	/* Reload blobstore */
2633 	ut_bs_reload(&bs, NULL);
2634 
2635 	/* Snapshot should be removed as blob is not pointing to it anymore */
2636 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2637 	poll_threads();
2638 	CU_ASSERT(g_bserrno != 0);
2639 }
2640 
2641 static void
2642 bs_load_custom_cluster_size(void)
2643 {
2644 	struct spdk_blob_store *bs;
2645 	struct spdk_bs_dev *dev;
2646 	struct spdk_bs_super_block *super_block;
2647 	struct spdk_bs_opts opts;
2648 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2649 	uint32_t cluster_sz;
2650 	uint64_t total_clusters;
2651 
2652 	dev = init_dev();
2653 	spdk_bs_opts_init(&opts, sizeof(opts));
2654 	opts.cluster_sz = custom_cluster_size;
2655 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2656 
2657 	/* Initialize a new blob store */
2658 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2659 	poll_threads();
2660 	CU_ASSERT(g_bserrno == 0);
2661 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2662 	bs = g_bs;
2663 	cluster_sz = bs->cluster_sz;
2664 	total_clusters = bs->total_clusters;
2665 
2666 	/* Unload the blob store */
2667 	spdk_bs_unload(bs, bs_op_complete, NULL);
2668 	poll_threads();
2669 	CU_ASSERT(g_bserrno == 0);
2670 	g_bs = NULL;
2671 	g_blob = NULL;
2672 	g_blobid = 0;
2673 
2674 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2675 	CU_ASSERT(super_block->clean == 1);
2676 
2677 	/* Load an existing blob store */
2678 	dev = init_dev();
2679 	spdk_bs_opts_init(&opts, sizeof(opts));
2680 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2681 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2682 	poll_threads();
2683 	CU_ASSERT(g_bserrno == 0);
2684 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2685 	bs = g_bs;
2686 	/* Compare cluster size and number to one after initialization */
2687 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2688 	CU_ASSERT(total_clusters == bs->total_clusters);
2689 
2690 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2691 	CU_ASSERT(super_block->clean == 1);
2692 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2693 
2694 	spdk_bs_unload(bs, bs_op_complete, NULL);
2695 	poll_threads();
2696 	CU_ASSERT(g_bserrno == 0);
2697 	CU_ASSERT(super_block->clean == 1);
2698 	g_bs = NULL;
2699 }
2700 
2701 static void
2702 bs_load_after_failed_grow(void)
2703 {
2704 	struct spdk_blob_store *bs;
2705 	struct spdk_bs_dev *dev;
2706 	struct spdk_bs_super_block *super_block;
2707 	struct spdk_bs_opts opts;
2708 	struct spdk_bs_md_mask *mask;
2709 	struct spdk_blob_opts blob_opts;
2710 	struct spdk_blob *blob, *snapshot;
2711 	spdk_blob_id blobid, snapshotid;
2712 	uint64_t total_data_clusters;
2713 
2714 	dev = init_dev();
2715 	spdk_bs_opts_init(&opts, sizeof(opts));
2716 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2717 	/*
2718 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2719 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2720 	 * thus the blobstore could grow to the double size.
2721 	 */
2722 	opts.num_md_pages = 128;
2723 
2724 	/* Initialize a new blob store */
2725 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2726 	poll_threads();
2727 	CU_ASSERT(g_bserrno == 0);
2728 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2729 	bs = g_bs;
2730 
2731 	/* Create blob */
2732 	ut_spdk_blob_opts_init(&blob_opts);
2733 	blob_opts.num_clusters = 10;
2734 
2735 	blob = ut_blob_create_and_open(bs, &blob_opts);
2736 	blobid = spdk_blob_get_id(blob);
2737 
2738 	/* Create snapshot */
2739 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2740 	poll_threads();
2741 	CU_ASSERT(g_bserrno == 0);
2742 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2743 	snapshotid = g_blobid;
2744 
2745 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2746 	poll_threads();
2747 	CU_ASSERT(g_bserrno == 0);
2748 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2749 	snapshot = g_blob;
2750 
2751 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2752 	poll_threads();
2753 	CU_ASSERT(g_bserrno == 0);
2754 
2755 	spdk_blob_close(blob, blob_op_complete, NULL);
2756 	poll_threads();
2757 	CU_ASSERT(g_bserrno == 0);
2758 
2759 	total_data_clusters = bs->total_data_clusters;
2760 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2761 
2762 	/* Unload the blob store */
2763 	spdk_bs_unload(bs, bs_op_complete, NULL);
2764 	poll_threads();
2765 	CU_ASSERT(g_bserrno == 0);
2766 	g_bs = NULL;
2767 	g_blob = NULL;
2768 	g_blobid = 0;
2769 
2770 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2771 	CU_ASSERT(super_block->clean == 1);
2772 
2773 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start * BLOCKLEN);
2774 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2775 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2776 
2777 	/*
2778 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2779 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2780 	 */
2781 	mask->length *= 2;
2782 
2783 	/* Load an existing blob store */
2784 	dev = init_dev();
2785 	dev->blockcnt *= 2;
2786 	spdk_bs_opts_init(&opts, sizeof(opts));
2787 	opts.clear_method = BS_CLEAR_WITH_NONE;
2788 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2789 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2790 	poll_threads();
2791 	CU_ASSERT(g_bserrno == 0);
2792 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2793 	bs = g_bs;
2794 
2795 	/* Check the capacity is the same as before */
2796 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2797 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2798 
2799 	/* Check the blob and the snapshot are still available */
2800 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2801 	poll_threads();
2802 	CU_ASSERT(g_bserrno == 0);
2803 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2804 	blob = g_blob;
2805 
2806 	spdk_blob_close(blob, blob_op_complete, NULL);
2807 	poll_threads();
2808 	CU_ASSERT(g_bserrno == 0);
2809 
2810 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2811 	poll_threads();
2812 	CU_ASSERT(g_bserrno == 0);
2813 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2814 	snapshot = g_blob;
2815 
2816 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2817 	poll_threads();
2818 	CU_ASSERT(g_bserrno == 0);
2819 
2820 	spdk_bs_unload(bs, bs_op_complete, NULL);
2821 	poll_threads();
2822 	CU_ASSERT(g_bserrno == 0);
2823 	CU_ASSERT(super_block->clean == 1);
2824 	g_bs = NULL;
2825 }
2826 
2827 static void
2828 bs_type(void)
2829 {
2830 	struct spdk_blob_store *bs;
2831 	struct spdk_bs_dev *dev;
2832 	struct spdk_bs_opts opts;
2833 
2834 	dev = init_dev();
2835 	spdk_bs_opts_init(&opts, sizeof(opts));
2836 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2837 
2838 	/* Initialize a new blob store */
2839 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2840 	poll_threads();
2841 	CU_ASSERT(g_bserrno == 0);
2842 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2843 	bs = g_bs;
2844 
2845 	/* Unload the blob store */
2846 	spdk_bs_unload(bs, bs_op_complete, NULL);
2847 	poll_threads();
2848 	CU_ASSERT(g_bserrno == 0);
2849 	g_bs = NULL;
2850 	g_blob = NULL;
2851 	g_blobid = 0;
2852 
2853 	/* Load non existing blobstore type */
2854 	dev = init_dev();
2855 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2856 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2857 	poll_threads();
2858 	CU_ASSERT(g_bserrno != 0);
2859 
2860 	/* Load with empty blobstore type */
2861 	dev = init_dev();
2862 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2863 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2864 	poll_threads();
2865 	CU_ASSERT(g_bserrno == 0);
2866 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2867 	bs = g_bs;
2868 
2869 	spdk_bs_unload(bs, bs_op_complete, NULL);
2870 	poll_threads();
2871 	CU_ASSERT(g_bserrno == 0);
2872 	g_bs = NULL;
2873 
2874 	/* Initialize a new blob store with empty bstype */
2875 	dev = init_dev();
2876 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2877 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2878 	poll_threads();
2879 	CU_ASSERT(g_bserrno == 0);
2880 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2881 	bs = g_bs;
2882 
2883 	spdk_bs_unload(bs, bs_op_complete, NULL);
2884 	poll_threads();
2885 	CU_ASSERT(g_bserrno == 0);
2886 	g_bs = NULL;
2887 
2888 	/* Load non existing blobstore type */
2889 	dev = init_dev();
2890 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2891 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2892 	poll_threads();
2893 	CU_ASSERT(g_bserrno != 0);
2894 
2895 	/* Load with empty blobstore type */
2896 	dev = init_dev();
2897 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2898 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2899 	poll_threads();
2900 	CU_ASSERT(g_bserrno == 0);
2901 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2902 	bs = g_bs;
2903 
2904 	spdk_bs_unload(bs, bs_op_complete, NULL);
2905 	poll_threads();
2906 	CU_ASSERT(g_bserrno == 0);
2907 	g_bs = NULL;
2908 }
2909 
2910 static void
2911 bs_super_block(void)
2912 {
2913 	struct spdk_blob_store *bs;
2914 	struct spdk_bs_dev *dev;
2915 	struct spdk_bs_super_block *super_block;
2916 	struct spdk_bs_opts opts;
2917 	struct spdk_bs_super_block_ver1 super_block_v1;
2918 
2919 	dev = init_dev();
2920 	spdk_bs_opts_init(&opts, sizeof(opts));
2921 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2922 
2923 	/* Initialize a new blob store */
2924 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2925 	poll_threads();
2926 	CU_ASSERT(g_bserrno == 0);
2927 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2928 	bs = g_bs;
2929 
2930 	/* Unload the blob store */
2931 	spdk_bs_unload(bs, bs_op_complete, NULL);
2932 	poll_threads();
2933 	CU_ASSERT(g_bserrno == 0);
2934 	g_bs = NULL;
2935 	g_blob = NULL;
2936 	g_blobid = 0;
2937 
2938 	/* Load an existing blob store with version newer than supported */
2939 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2940 	super_block->version++;
2941 
2942 	dev = init_dev();
2943 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2944 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2945 	poll_threads();
2946 	CU_ASSERT(g_bserrno != 0);
2947 
2948 	/* Create a new blob store with super block version 1 */
2949 	dev = init_dev();
2950 	super_block_v1.version = 1;
2951 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2952 	super_block_v1.length = 0x1000;
2953 	super_block_v1.clean = 1;
2954 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2955 	super_block_v1.cluster_size = 0x100000;
2956 	super_block_v1.used_page_mask_start = 0x01;
2957 	super_block_v1.used_page_mask_len = 0x01;
2958 	super_block_v1.used_cluster_mask_start = 0x02;
2959 	super_block_v1.used_cluster_mask_len = 0x01;
2960 	super_block_v1.md_start = 0x03;
2961 	super_block_v1.md_len = 0x40;
2962 	memset(super_block_v1.reserved, 0, 4036);
2963 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2964 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2965 
2966 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2967 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2968 	poll_threads();
2969 	CU_ASSERT(g_bserrno == 0);
2970 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2971 	bs = g_bs;
2972 
2973 	spdk_bs_unload(bs, bs_op_complete, NULL);
2974 	poll_threads();
2975 	CU_ASSERT(g_bserrno == 0);
2976 	g_bs = NULL;
2977 }
2978 
2979 static void
2980 bs_test_recover_cluster_count(void)
2981 {
2982 	struct spdk_blob_store *bs;
2983 	struct spdk_bs_dev *dev;
2984 	struct spdk_bs_super_block super_block;
2985 	struct spdk_bs_opts opts;
2986 
2987 	dev = init_dev();
2988 	spdk_bs_opts_init(&opts, sizeof(opts));
2989 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2990 
2991 	super_block.version = 3;
2992 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2993 	super_block.length = 0x1000;
2994 	super_block.clean = 0;
2995 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2996 	super_block.cluster_size = BLOCKLEN;
2997 	super_block.used_page_mask_start = 0x01;
2998 	super_block.used_page_mask_len = 0x01;
2999 	super_block.used_cluster_mask_start = 0x02;
3000 	super_block.used_cluster_mask_len = 0x01;
3001 	super_block.used_blobid_mask_start = 0x03;
3002 	super_block.used_blobid_mask_len = 0x01;
3003 	super_block.md_start = 0x04;
3004 	super_block.md_len = 0x40;
3005 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
3006 	super_block.size = dev->blockcnt * dev->blocklen;
3007 	super_block.io_unit_size = 0x1000;
3008 	memset(super_block.reserved, 0, 4000);
3009 	super_block.crc = blob_md_page_calc_crc(&super_block);
3010 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
3011 
3012 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
3013 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
3014 	poll_threads();
3015 	CU_ASSERT(g_bserrno == 0);
3016 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3017 	bs = g_bs;
3018 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
3019 			super_block.md_len));
3020 
3021 	spdk_bs_unload(bs, bs_op_complete, NULL);
3022 	poll_threads();
3023 	CU_ASSERT(g_bserrno == 0);
3024 	g_bs = NULL;
3025 }
3026 
3027 static void
3028 bs_grow_live_size(uint64_t new_blockcnt)
3029 {
3030 	struct spdk_blob_store *bs;
3031 	struct spdk_bs_dev *dev;
3032 	struct spdk_bs_super_block super_block;
3033 	struct spdk_bs_opts opts;
3034 	struct spdk_bs_md_mask mask;
3035 	uint64_t bdev_size;
3036 	uint64_t total_data_clusters;
3037 
3038 	/*
3039 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3040 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3041 	 * will write beyond the end of g_dev_buffer.
3042 	 */
3043 	dev = init_dev();
3044 	spdk_bs_opts_init(&opts, sizeof(opts));
3045 	opts.clear_method = BS_CLEAR_WITH_NONE;
3046 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3047 	poll_threads();
3048 	CU_ASSERT(g_bserrno == 0);
3049 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3050 	bs = g_bs;
3051 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == 63);
3052 
3053 	/*
3054 	 * Set the dev size according to the new_blockcnt,
3055 	 * then the blobstore will adjust the metadata according to the new size.
3056 	 */
3057 	dev->blockcnt = new_blockcnt;
3058 	bdev_size = dev->blockcnt * dev->blocklen;
3059 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3060 	poll_threads();
3061 	CU_ASSERT(g_bserrno == 0);
3062 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3063 	/* One cluster of 1MiB size is used for metadata */
3064 	CU_ASSERT(total_data_clusters == (bdev_size / (1 * 1024 * 1024)) - 1);
3065 
3066 	/* Make sure the super block is updated. */
3067 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3068 	CU_ASSERT(super_block.size == bdev_size);
3069 	CU_ASSERT(super_block.clean == 0);
3070 	/* The used_cluster mask is not written out until first spdk_bs_unload. */
3071 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * BLOCKLEN,
3072 	       sizeof(struct spdk_bs_md_mask));
3073 	CU_ASSERT(mask.type == 0);
3074 	CU_ASSERT(mask.length == 0);
3075 
3076 	spdk_bs_unload(bs, bs_op_complete, NULL);
3077 	poll_threads();
3078 	CU_ASSERT(g_bserrno == 0);
3079 	g_bs = NULL;
3080 
3081 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3082 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3083 	CU_ASSERT(super_block.size == bdev_size);
3084 	CU_ASSERT(super_block.clean == 1);
3085 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * BLOCKLEN,
3086 	       sizeof(struct spdk_bs_md_mask));
3087 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3088 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3089 
3090 	/* Load blobstore and check the cluster counts again. */
3091 	dev = init_dev();
3092 	dev->blockcnt = new_blockcnt;
3093 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3094 	poll_threads();
3095 	CU_ASSERT(g_bserrno == 0);
3096 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3097 	CU_ASSERT(super_block.clean == 1);
3098 	bs = g_bs;
3099 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3100 
3101 	/* Perform grow without change in size, expected pass. */
3102 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3103 	poll_threads();
3104 	CU_ASSERT(g_bserrno == 0);
3105 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3106 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3107 	CU_ASSERT(super_block.size == bdev_size);
3108 	CU_ASSERT(super_block.clean == 1);
3109 
3110 	spdk_bs_unload(bs, bs_op_complete, NULL);
3111 	poll_threads();
3112 	CU_ASSERT(g_bserrno == 0);
3113 	g_bs = NULL;
3114 }
3115 
3116 static void
3117 bs_grow_live(void)
3118 {
3119 	/* No change expected */
3120 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT);
3121 
3122 	/* Size slightly increased, but not enough to increase cluster count */
3123 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT + 1);
3124 
3125 	/* Size doubled, increasing the cluster count */
3126 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT * 2);
3127 }
3128 
3129 static void
3130 bs_grow_live_no_space(void)
3131 {
3132 	struct spdk_blob_store *bs;
3133 	struct spdk_bs_dev *dev;
3134 	struct spdk_bs_super_block super_block;
3135 	struct spdk_bs_opts opts;
3136 	struct spdk_bs_md_mask mask;
3137 	uint64_t bdev_size_init;
3138 	uint64_t total_data_clusters, max_clusters;
3139 
3140 	/*
3141 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3142 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3143 	 * will write beyond the end of g_dev_buffer.
3144 	 */
3145 	dev = init_dev();
3146 	bdev_size_init = dev->blockcnt * dev->blocklen;
3147 	spdk_bs_opts_init(&opts, sizeof(opts));
3148 	opts.clear_method = BS_CLEAR_WITH_NONE;
3149 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3150 	poll_threads();
3151 	CU_ASSERT(g_bserrno == 0);
3152 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3153 	bs = g_bs;
3154 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3155 	CU_ASSERT(total_data_clusters == 63);
3156 
3157 	/*
3158 	 * The default dev size is 64M, here we set the dev size to 32M,
3159 	 * expecting EILSEQ due to super_block validation and no change in blobstore.
3160 	 */
3161 	dev->blockcnt = (32L * 1024L * 1024L) / dev->blocklen;
3162 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3163 	poll_threads();
3164 	/* This error code comes from bs_super_validate() */
3165 	CU_ASSERT(g_bserrno == -EILSEQ);
3166 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3167 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3168 	CU_ASSERT(super_block.size == bdev_size_init);
3169 
3170 	/*
3171 	 * Blobstore in this test has only space for single md_page for used_clusters,
3172 	 * which fits 1 bit per cluster minus the md header.
3173 	 *
3174 	 * Dev size is increased to exceed the reserved space for the used_cluster_mask
3175 	 * in the metadata, expecting ENOSPC and no change in blobstore.
3176 	 */
3177 	max_clusters = (spdk_bs_get_page_size(bs) - sizeof(struct spdk_bs_md_mask)) * 8;
3178 	max_clusters += 1;
3179 	dev->blockcnt = (max_clusters * spdk_bs_get_cluster_size(bs)) / dev->blocklen;
3180 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3181 	poll_threads();
3182 	CU_ASSERT(g_bserrno == -ENOSPC);
3183 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3184 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3185 	CU_ASSERT(super_block.size == bdev_size_init);
3186 
3187 	/*
3188 	 * No change should have occurred for the duration of the test,
3189 	 * unload blobstore and check metadata.
3190 	 */
3191 	spdk_bs_unload(bs, bs_op_complete, NULL);
3192 	poll_threads();
3193 	CU_ASSERT(g_bserrno == 0);
3194 	g_bs = NULL;
3195 
3196 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3197 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3198 	CU_ASSERT(super_block.size == bdev_size_init);
3199 	CU_ASSERT(super_block.clean == 1);
3200 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * BLOCKLEN,
3201 	       sizeof(struct spdk_bs_md_mask));
3202 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3203 	CU_ASSERT(mask.length == bdev_size_init / (1 * 1024 * 1024));
3204 
3205 	/* Load blobstore and check the cluster counts again. */
3206 	dev = init_dev();
3207 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3208 	poll_threads();
3209 	CU_ASSERT(g_bserrno == 0);
3210 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3211 	bs = g_bs;
3212 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3213 
3214 	spdk_bs_unload(bs, bs_op_complete, NULL);
3215 	poll_threads();
3216 	CU_ASSERT(g_bserrno == 0);
3217 	g_bs = NULL;
3218 }
3219 
3220 static void
3221 bs_test_grow(void)
3222 {
3223 	struct spdk_blob_store *bs;
3224 	struct spdk_bs_dev *dev;
3225 	struct spdk_bs_super_block super_block;
3226 	struct spdk_bs_opts opts;
3227 	struct spdk_bs_md_mask mask;
3228 	uint64_t bdev_size;
3229 
3230 	dev = init_dev();
3231 	bdev_size = dev->blockcnt * dev->blocklen;
3232 	spdk_bs_opts_init(&opts, sizeof(opts));
3233 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3234 	poll_threads();
3235 	CU_ASSERT(g_bserrno == 0);
3236 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3237 	bs = g_bs;
3238 
3239 	spdk_bs_unload(bs, bs_op_complete, NULL);
3240 	poll_threads();
3241 	CU_ASSERT(g_bserrno == 0);
3242 	g_bs = NULL;
3243 
3244 	/*
3245 	 * To make sure all the metadata are updated to the disk,
3246 	 * we check the g_dev_buffer after spdk_bs_unload.
3247 	 */
3248 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3249 	CU_ASSERT(super_block.size == bdev_size);
3250 
3251 	/*
3252 	 * Make sure the used_cluster mask is correct.
3253 	 */
3254 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * BLOCKLEN,
3255 	       sizeof(struct spdk_bs_md_mask));
3256 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3257 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3258 
3259 	/*
3260 	 * The default dev size is 64M, here we set the dev size to 128M,
3261 	 * then the blobstore will adjust the metadata according to the new size.
3262 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
3263 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
3264 	 * the end of g_dev_buffer.
3265 	 */
3266 	dev = init_dev();
3267 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
3268 	bdev_size = dev->blockcnt * dev->blocklen;
3269 	spdk_bs_opts_init(&opts, sizeof(opts));
3270 	opts.clear_method = BS_CLEAR_WITH_NONE;
3271 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
3272 	poll_threads();
3273 	CU_ASSERT(g_bserrno == 0);
3274 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3275 	bs = g_bs;
3276 
3277 	/*
3278 	 * After spdk_bs_grow, all metadata are updated to the disk.
3279 	 * So we can check g_dev_buffer now.
3280 	 */
3281 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3282 	CU_ASSERT(super_block.size == bdev_size);
3283 
3284 	/*
3285 	 * Make sure the used_cluster mask has been updated according to the bdev size
3286 	 */
3287 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * BLOCKLEN,
3288 	       sizeof(struct spdk_bs_md_mask));
3289 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3290 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3291 
3292 	spdk_bs_unload(bs, bs_op_complete, NULL);
3293 	poll_threads();
3294 	CU_ASSERT(g_bserrno == 0);
3295 	g_bs = NULL;
3296 }
3297 
3298 /*
3299  * Create a blobstore and then unload it.
3300  */
3301 static void
3302 bs_unload(void)
3303 {
3304 	struct spdk_blob_store *bs = g_bs;
3305 	struct spdk_blob *blob;
3306 
3307 	/* Create a blob and open it. */
3308 	blob = ut_blob_create_and_open(bs, NULL);
3309 
3310 	/* Try to unload blobstore, should fail with open blob */
3311 	g_bserrno = -1;
3312 	spdk_bs_unload(bs, bs_op_complete, NULL);
3313 	poll_threads();
3314 	CU_ASSERT(g_bserrno == -EBUSY);
3315 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3316 
3317 	/* Close the blob, then successfully unload blobstore */
3318 	g_bserrno = -1;
3319 	spdk_blob_close(blob, blob_op_complete, NULL);
3320 	poll_threads();
3321 	CU_ASSERT(g_bserrno == 0);
3322 }
3323 
3324 /*
3325  * Create a blobstore with a cluster size different than the default, and ensure it is
3326  *  persisted.
3327  */
3328 static void
3329 bs_cluster_sz(void)
3330 {
3331 	struct spdk_blob_store *bs;
3332 	struct spdk_bs_dev *dev;
3333 	struct spdk_bs_opts opts;
3334 	uint32_t cluster_sz;
3335 
3336 	/* Set cluster size to zero */
3337 	dev = init_dev();
3338 	spdk_bs_opts_init(&opts, sizeof(opts));
3339 	opts.cluster_sz = 0;
3340 
3341 	/* Initialize a new blob store */
3342 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3343 	poll_threads();
3344 	CU_ASSERT(g_bserrno == -EINVAL);
3345 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3346 
3347 	/*
3348 	 * Set cluster size to blobstore page size,
3349 	 * to work it is required to be at least twice the blobstore page size.
3350 	 */
3351 	dev = init_dev();
3352 	spdk_bs_opts_init(&opts, sizeof(opts));
3353 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
3354 
3355 	/* Initialize a new blob store */
3356 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3357 	poll_threads();
3358 	CU_ASSERT(g_bserrno == -ENOMEM);
3359 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3360 
3361 	/*
3362 	 * Set cluster size to lower than page size,
3363 	 * to work it is required to be at least twice the blobstore page size.
3364 	 */
3365 	dev = init_dev();
3366 	spdk_bs_opts_init(&opts, sizeof(opts));
3367 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
3368 
3369 	/* Initialize a new blob store */
3370 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3371 	poll_threads();
3372 	CU_ASSERT(g_bserrno == -EINVAL);
3373 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3374 
3375 	/* Set cluster size to twice the default */
3376 	dev = init_dev();
3377 	spdk_bs_opts_init(&opts, sizeof(opts));
3378 	opts.cluster_sz *= 2;
3379 	cluster_sz = opts.cluster_sz;
3380 
3381 	/* Initialize a new blob store */
3382 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3383 	poll_threads();
3384 	CU_ASSERT(g_bserrno == 0);
3385 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3386 	bs = g_bs;
3387 
3388 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3389 
3390 	ut_bs_reload(&bs, &opts);
3391 
3392 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3393 
3394 	spdk_bs_unload(bs, bs_op_complete, NULL);
3395 	poll_threads();
3396 	CU_ASSERT(g_bserrno == 0);
3397 	g_bs = NULL;
3398 }
3399 
3400 /*
3401  * Create a blobstore, reload it and ensure total usable cluster count
3402  *  stays the same.
3403  */
3404 static void
3405 bs_usable_clusters(void)
3406 {
3407 	struct spdk_blob_store *bs = g_bs;
3408 	struct spdk_blob *blob;
3409 	uint32_t clusters;
3410 	int i;
3411 
3412 
3413 	clusters = spdk_bs_total_data_cluster_count(bs);
3414 
3415 	ut_bs_reload(&bs, NULL);
3416 
3417 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3418 
3419 	/* Create and resize blobs to make sure that usable cluster count won't change */
3420 	for (i = 0; i < 4; i++) {
3421 		g_bserrno = -1;
3422 		g_blobid = SPDK_BLOBID_INVALID;
3423 		blob = ut_blob_create_and_open(bs, NULL);
3424 
3425 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3426 		poll_threads();
3427 		CU_ASSERT(g_bserrno == 0);
3428 
3429 		g_bserrno = -1;
3430 		spdk_blob_close(blob, blob_op_complete, NULL);
3431 		poll_threads();
3432 		CU_ASSERT(g_bserrno == 0);
3433 
3434 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3435 	}
3436 
3437 	/* Reload the blob store to make sure that nothing changed */
3438 	ut_bs_reload(&bs, NULL);
3439 
3440 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3441 }
3442 
3443 /*
3444  * Test resizing of the metadata blob.  This requires creating enough blobs
3445  *  so that one cluster is not enough to fit the metadata for those blobs.
3446  *  To induce this condition to happen more quickly, we reduce the cluster
3447  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3448  */
3449 static void
3450 bs_resize_md(void)
3451 {
3452 	struct spdk_blob_store *bs;
3453 	const int CLUSTER_PAGE_COUNT = 4;
3454 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3455 	struct spdk_bs_dev *dev;
3456 	struct spdk_bs_opts opts;
3457 	struct spdk_blob *blob;
3458 	struct spdk_blob_opts blob_opts;
3459 	uint32_t cluster_sz;
3460 	spdk_blob_id blobids[NUM_BLOBS];
3461 	int i;
3462 
3463 
3464 	dev = init_dev();
3465 	spdk_bs_opts_init(&opts, sizeof(opts));
3466 	opts.cluster_sz = CLUSTER_PAGE_COUNT * BLOCKLEN;
3467 	cluster_sz = opts.cluster_sz;
3468 
3469 	/* Initialize a new blob store */
3470 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3471 	poll_threads();
3472 	CU_ASSERT(g_bserrno == 0);
3473 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3474 	bs = g_bs;
3475 
3476 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3477 
3478 	ut_spdk_blob_opts_init(&blob_opts);
3479 
3480 	for (i = 0; i < NUM_BLOBS; i++) {
3481 		g_bserrno = -1;
3482 		g_blobid = SPDK_BLOBID_INVALID;
3483 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3484 		poll_threads();
3485 		CU_ASSERT(g_bserrno == 0);
3486 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3487 		blobids[i] = g_blobid;
3488 	}
3489 
3490 	ut_bs_reload(&bs, &opts);
3491 
3492 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3493 
3494 	for (i = 0; i < NUM_BLOBS; i++) {
3495 		g_bserrno = -1;
3496 		g_blob = NULL;
3497 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3498 		poll_threads();
3499 		CU_ASSERT(g_bserrno == 0);
3500 		CU_ASSERT(g_blob !=  NULL);
3501 		blob = g_blob;
3502 		g_bserrno = -1;
3503 		spdk_blob_close(blob, blob_op_complete, NULL);
3504 		poll_threads();
3505 		CU_ASSERT(g_bserrno == 0);
3506 	}
3507 
3508 	spdk_bs_unload(bs, bs_op_complete, NULL);
3509 	poll_threads();
3510 	CU_ASSERT(g_bserrno == 0);
3511 	g_bs = NULL;
3512 }
3513 
3514 static void
3515 bs_destroy(void)
3516 {
3517 	struct spdk_blob_store *bs;
3518 	struct spdk_bs_dev *dev;
3519 
3520 	/* Initialize a new blob store */
3521 	dev = init_dev();
3522 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3523 	poll_threads();
3524 	CU_ASSERT(g_bserrno == 0);
3525 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3526 	bs = g_bs;
3527 
3528 	/* Destroy the blob store */
3529 	g_bserrno = -1;
3530 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3531 	poll_threads();
3532 	CU_ASSERT(g_bserrno == 0);
3533 
3534 	/* Loading an non-existent blob store should fail. */
3535 	g_bs = NULL;
3536 	dev = init_dev();
3537 
3538 	g_bserrno = 0;
3539 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3540 	poll_threads();
3541 	CU_ASSERT(g_bserrno != 0);
3542 }
3543 
3544 /* Try to hit all of the corner cases associated with serializing
3545  * a blob to disk
3546  */
3547 static void
3548 blob_serialize_test(void)
3549 {
3550 	struct spdk_bs_dev *dev;
3551 	struct spdk_bs_opts opts;
3552 	struct spdk_blob_store *bs;
3553 	spdk_blob_id blobid[2];
3554 	struct spdk_blob *blob[2];
3555 	uint64_t i;
3556 	char *value;
3557 	int rc;
3558 
3559 	dev = init_dev();
3560 
3561 	/* Initialize a new blobstore with very small clusters */
3562 	spdk_bs_opts_init(&opts, sizeof(opts));
3563 	opts.cluster_sz = dev->blocklen * 8;
3564 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3565 	poll_threads();
3566 	CU_ASSERT(g_bserrno == 0);
3567 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3568 	bs = g_bs;
3569 
3570 	/* Create and open two blobs */
3571 	for (i = 0; i < 2; i++) {
3572 		blob[i] = ut_blob_create_and_open(bs, NULL);
3573 		blobid[i] = spdk_blob_get_id(blob[i]);
3574 
3575 		/* Set a fairly large xattr on both blobs to eat up
3576 		 * metadata space
3577 		 */
3578 		value = calloc(dev->blocklen - 64, sizeof(char));
3579 		SPDK_CU_ASSERT_FATAL(value != NULL);
3580 		memset(value, i, dev->blocklen / 2);
3581 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3582 		CU_ASSERT(rc == 0);
3583 		free(value);
3584 	}
3585 
3586 	/* Resize the blobs, alternating 1 cluster at a time.
3587 	 * This thwarts run length encoding and will cause spill
3588 	 * over of the extents.
3589 	 */
3590 	for (i = 0; i < 6; i++) {
3591 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3592 		poll_threads();
3593 		CU_ASSERT(g_bserrno == 0);
3594 	}
3595 
3596 	for (i = 0; i < 2; i++) {
3597 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3598 		poll_threads();
3599 		CU_ASSERT(g_bserrno == 0);
3600 	}
3601 
3602 	/* Close the blobs */
3603 	for (i = 0; i < 2; i++) {
3604 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3605 		poll_threads();
3606 		CU_ASSERT(g_bserrno == 0);
3607 	}
3608 
3609 	ut_bs_reload(&bs, &opts);
3610 
3611 	for (i = 0; i < 2; i++) {
3612 		blob[i] = NULL;
3613 
3614 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3615 		poll_threads();
3616 		CU_ASSERT(g_bserrno == 0);
3617 		CU_ASSERT(g_blob != NULL);
3618 		blob[i] = g_blob;
3619 
3620 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3621 
3622 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3623 		poll_threads();
3624 		CU_ASSERT(g_bserrno == 0);
3625 	}
3626 
3627 	spdk_bs_unload(bs, bs_op_complete, NULL);
3628 	poll_threads();
3629 	CU_ASSERT(g_bserrno == 0);
3630 	g_bs = NULL;
3631 }
3632 
3633 static void
3634 blob_crc(void)
3635 {
3636 	struct spdk_blob_store *bs = g_bs;
3637 	struct spdk_blob *blob;
3638 	spdk_blob_id blobid;
3639 	uint32_t page_num;
3640 	int index;
3641 	struct spdk_blob_md_page *page;
3642 
3643 	blob = ut_blob_create_and_open(bs, NULL);
3644 	blobid = spdk_blob_get_id(blob);
3645 
3646 	spdk_blob_close(blob, blob_op_complete, NULL);
3647 	poll_threads();
3648 	CU_ASSERT(g_bserrno == 0);
3649 
3650 	page_num = bs_blobid_to_page(blobid);
3651 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3652 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3653 	page->crc = 0;
3654 
3655 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3656 	poll_threads();
3657 	CU_ASSERT(g_bserrno == -EINVAL);
3658 	CU_ASSERT(g_blob == NULL);
3659 	g_bserrno = 0;
3660 
3661 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3662 	poll_threads();
3663 	CU_ASSERT(g_bserrno == -EINVAL);
3664 }
3665 
3666 static void
3667 super_block_crc(void)
3668 {
3669 	struct spdk_blob_store *bs;
3670 	struct spdk_bs_dev *dev;
3671 	struct spdk_bs_super_block *super_block;
3672 
3673 	dev = init_dev();
3674 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3675 	poll_threads();
3676 	CU_ASSERT(g_bserrno == 0);
3677 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3678 	bs = g_bs;
3679 
3680 	spdk_bs_unload(bs, bs_op_complete, NULL);
3681 	poll_threads();
3682 	CU_ASSERT(g_bserrno == 0);
3683 	g_bs = NULL;
3684 
3685 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3686 	super_block->crc = 0;
3687 	dev = init_dev();
3688 
3689 	/* Load an existing blob store */
3690 	g_bserrno = 0;
3691 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3692 	poll_threads();
3693 	CU_ASSERT(g_bserrno == -EILSEQ);
3694 }
3695 
3696 /* For blob dirty shutdown test case we do the following sub-test cases:
3697  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3698  *   dirty shutdown and reload the blob store and verify the xattrs.
3699  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3700  *   reload the blob store and verify the clusters number.
3701  * 3 Create the second blob and then dirty shutdown, reload the blob store
3702  *   and verify the second blob.
3703  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3704  *   and verify the second blob is invalid.
3705  * 5 Create the second blob again and also create the third blob, modify the
3706  *   md of second blob which makes the md invalid, and then dirty shutdown,
3707  *   reload the blob store verify the second blob, it should invalid and also
3708  *   verify the third blob, it should correct.
3709  */
3710 static void
3711 blob_dirty_shutdown(void)
3712 {
3713 	int rc;
3714 	int index;
3715 	struct spdk_blob_store *bs = g_bs;
3716 	spdk_blob_id blobid1, blobid2, blobid3;
3717 	struct spdk_blob *blob = g_blob;
3718 	uint64_t length;
3719 	uint64_t free_clusters;
3720 	const void *value;
3721 	size_t value_len;
3722 	uint32_t page_num;
3723 	struct spdk_blob_md_page *page;
3724 	struct spdk_blob_opts blob_opts;
3725 
3726 	/* Create first blob */
3727 	blobid1 = spdk_blob_get_id(blob);
3728 
3729 	/* Set some xattrs */
3730 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3731 	CU_ASSERT(rc == 0);
3732 
3733 	length = 2345;
3734 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3735 	CU_ASSERT(rc == 0);
3736 
3737 	/* Put xattr that fits exactly single page.
3738 	 * This results in adding additional pages to MD.
3739 	 * First is flags and smaller xattr, second the large xattr,
3740 	 * third are just the extents.
3741 	 */
3742 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3743 			      strlen("large_xattr");
3744 	char *xattr = calloc(xattr_length, sizeof(char));
3745 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3746 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3747 	free(xattr);
3748 	SPDK_CU_ASSERT_FATAL(rc == 0);
3749 
3750 	/* Resize the blob */
3751 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3752 	poll_threads();
3753 	CU_ASSERT(g_bserrno == 0);
3754 
3755 	/* Set the blob as the super blob */
3756 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3757 	poll_threads();
3758 	CU_ASSERT(g_bserrno == 0);
3759 
3760 	free_clusters = spdk_bs_free_cluster_count(bs);
3761 
3762 	spdk_blob_close(blob, blob_op_complete, NULL);
3763 	poll_threads();
3764 	CU_ASSERT(g_bserrno == 0);
3765 	blob = NULL;
3766 	g_blob = NULL;
3767 	g_blobid = SPDK_BLOBID_INVALID;
3768 
3769 	ut_bs_dirty_load(&bs, NULL);
3770 
3771 	/* Get the super blob */
3772 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3773 	poll_threads();
3774 	CU_ASSERT(g_bserrno == 0);
3775 	CU_ASSERT(blobid1 == g_blobid);
3776 
3777 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3778 	poll_threads();
3779 	CU_ASSERT(g_bserrno == 0);
3780 	CU_ASSERT(g_blob != NULL);
3781 	blob = g_blob;
3782 
3783 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3784 
3785 	/* Get the xattrs */
3786 	value = NULL;
3787 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3788 	CU_ASSERT(rc == 0);
3789 	SPDK_CU_ASSERT_FATAL(value != NULL);
3790 	CU_ASSERT(*(uint64_t *)value == length);
3791 	CU_ASSERT(value_len == 8);
3792 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3793 
3794 	/* Resize the blob */
3795 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3796 	poll_threads();
3797 	CU_ASSERT(g_bserrno == 0);
3798 
3799 	free_clusters = spdk_bs_free_cluster_count(bs);
3800 
3801 	spdk_blob_close(blob, blob_op_complete, NULL);
3802 	poll_threads();
3803 	CU_ASSERT(g_bserrno == 0);
3804 	blob = NULL;
3805 	g_blob = NULL;
3806 	g_blobid = SPDK_BLOBID_INVALID;
3807 
3808 	ut_bs_dirty_load(&bs, NULL);
3809 
3810 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3811 	poll_threads();
3812 	CU_ASSERT(g_bserrno == 0);
3813 	CU_ASSERT(g_blob != NULL);
3814 	blob = g_blob;
3815 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3816 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3817 
3818 	spdk_blob_close(blob, blob_op_complete, NULL);
3819 	poll_threads();
3820 	CU_ASSERT(g_bserrno == 0);
3821 	blob = NULL;
3822 	g_blob = NULL;
3823 	g_blobid = SPDK_BLOBID_INVALID;
3824 
3825 	/* Create second blob */
3826 	blob = ut_blob_create_and_open(bs, NULL);
3827 	blobid2 = spdk_blob_get_id(blob);
3828 
3829 	/* Set some xattrs */
3830 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3831 	CU_ASSERT(rc == 0);
3832 
3833 	length = 5432;
3834 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3835 	CU_ASSERT(rc == 0);
3836 
3837 	/* Resize the blob */
3838 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3839 	poll_threads();
3840 	CU_ASSERT(g_bserrno == 0);
3841 
3842 	free_clusters = spdk_bs_free_cluster_count(bs);
3843 
3844 	spdk_blob_close(blob, blob_op_complete, NULL);
3845 	poll_threads();
3846 	CU_ASSERT(g_bserrno == 0);
3847 	blob = NULL;
3848 	g_blob = NULL;
3849 	g_blobid = SPDK_BLOBID_INVALID;
3850 
3851 	ut_bs_dirty_load(&bs, NULL);
3852 
3853 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3854 	poll_threads();
3855 	CU_ASSERT(g_bserrno == 0);
3856 	CU_ASSERT(g_blob != NULL);
3857 	blob = g_blob;
3858 
3859 	/* Get the xattrs */
3860 	value = NULL;
3861 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3862 	CU_ASSERT(rc == 0);
3863 	SPDK_CU_ASSERT_FATAL(value != NULL);
3864 	CU_ASSERT(*(uint64_t *)value == length);
3865 	CU_ASSERT(value_len == 8);
3866 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3867 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3868 
3869 	ut_blob_close_and_delete(bs, blob);
3870 
3871 	free_clusters = spdk_bs_free_cluster_count(bs);
3872 
3873 	ut_bs_dirty_load(&bs, NULL);
3874 
3875 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3876 	poll_threads();
3877 	CU_ASSERT(g_bserrno != 0);
3878 	CU_ASSERT(g_blob == NULL);
3879 
3880 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3881 	poll_threads();
3882 	CU_ASSERT(g_bserrno == 0);
3883 	CU_ASSERT(g_blob != NULL);
3884 	blob = g_blob;
3885 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3886 	spdk_blob_close(blob, blob_op_complete, NULL);
3887 	poll_threads();
3888 	CU_ASSERT(g_bserrno == 0);
3889 
3890 	ut_bs_reload(&bs, NULL);
3891 
3892 	/* Create second blob */
3893 	ut_spdk_blob_opts_init(&blob_opts);
3894 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3895 	poll_threads();
3896 	CU_ASSERT(g_bserrno == 0);
3897 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3898 	blobid2 = g_blobid;
3899 
3900 	/* Create third blob */
3901 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3902 	poll_threads();
3903 	CU_ASSERT(g_bserrno == 0);
3904 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3905 	blobid3 = g_blobid;
3906 
3907 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3908 	poll_threads();
3909 	CU_ASSERT(g_bserrno == 0);
3910 	CU_ASSERT(g_blob != NULL);
3911 	blob = g_blob;
3912 
3913 	/* Set some xattrs for second blob */
3914 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3915 	CU_ASSERT(rc == 0);
3916 
3917 	length = 5432;
3918 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3919 	CU_ASSERT(rc == 0);
3920 
3921 	spdk_blob_close(blob, blob_op_complete, NULL);
3922 	poll_threads();
3923 	CU_ASSERT(g_bserrno == 0);
3924 	blob = NULL;
3925 	g_blob = NULL;
3926 	g_blobid = SPDK_BLOBID_INVALID;
3927 
3928 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3929 	poll_threads();
3930 	CU_ASSERT(g_bserrno == 0);
3931 	CU_ASSERT(g_blob != NULL);
3932 	blob = g_blob;
3933 
3934 	/* Set some xattrs for third blob */
3935 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3936 	CU_ASSERT(rc == 0);
3937 
3938 	length = 5432;
3939 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3940 	CU_ASSERT(rc == 0);
3941 
3942 	spdk_blob_close(blob, blob_op_complete, NULL);
3943 	poll_threads();
3944 	CU_ASSERT(g_bserrno == 0);
3945 	blob = NULL;
3946 	g_blob = NULL;
3947 	g_blobid = SPDK_BLOBID_INVALID;
3948 
3949 	/* Mark second blob as invalid */
3950 	page_num = bs_blobid_to_page(blobid2);
3951 
3952 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3953 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3954 	page->sequence_num = 1;
3955 	page->crc = blob_md_page_calc_crc(page);
3956 
3957 	free_clusters = spdk_bs_free_cluster_count(bs);
3958 
3959 	ut_bs_dirty_load(&bs, NULL);
3960 
3961 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3962 	poll_threads();
3963 	CU_ASSERT(g_bserrno != 0);
3964 	CU_ASSERT(g_blob == NULL);
3965 
3966 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3967 	poll_threads();
3968 	CU_ASSERT(g_bserrno == 0);
3969 	CU_ASSERT(g_blob != NULL);
3970 	blob = g_blob;
3971 
3972 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3973 }
3974 
3975 static void
3976 blob_flags(void)
3977 {
3978 	struct spdk_blob_store *bs = g_bs;
3979 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3980 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3981 	struct spdk_blob_opts blob_opts;
3982 	int rc;
3983 
3984 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3985 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3986 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3987 
3988 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3989 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3990 
3991 	ut_spdk_blob_opts_init(&blob_opts);
3992 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3993 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3994 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3995 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3996 
3997 	/* Change the size of blob_data_ro to check if flags are serialized
3998 	 * when blob has non zero number of extents */
3999 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
4000 	poll_threads();
4001 	CU_ASSERT(g_bserrno == 0);
4002 
4003 	/* Set the xattr to check if flags are serialized
4004 	 * when blob has non zero number of xattrs */
4005 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
4006 	CU_ASSERT(rc == 0);
4007 
4008 	blob_invalid->invalid_flags = (1ULL << 63);
4009 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
4010 	blob_data_ro->data_ro_flags = (1ULL << 62);
4011 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
4012 	blob_md_ro->md_ro_flags = (1ULL << 61);
4013 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
4014 
4015 	g_bserrno = -1;
4016 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
4017 	poll_threads();
4018 	CU_ASSERT(g_bserrno == 0);
4019 	g_bserrno = -1;
4020 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
4021 	poll_threads();
4022 	CU_ASSERT(g_bserrno == 0);
4023 	g_bserrno = -1;
4024 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4025 	poll_threads();
4026 	CU_ASSERT(g_bserrno == 0);
4027 
4028 	g_bserrno = -1;
4029 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
4030 	poll_threads();
4031 	CU_ASSERT(g_bserrno == 0);
4032 	blob_invalid = NULL;
4033 	g_bserrno = -1;
4034 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
4035 	poll_threads();
4036 	CU_ASSERT(g_bserrno == 0);
4037 	blob_data_ro = NULL;
4038 	g_bserrno = -1;
4039 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
4040 	poll_threads();
4041 	CU_ASSERT(g_bserrno == 0);
4042 	blob_md_ro = NULL;
4043 
4044 	g_blob = NULL;
4045 	g_blobid = SPDK_BLOBID_INVALID;
4046 
4047 	ut_bs_reload(&bs, NULL);
4048 
4049 	g_blob = NULL;
4050 	g_bserrno = 0;
4051 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
4052 	poll_threads();
4053 	CU_ASSERT(g_bserrno != 0);
4054 	CU_ASSERT(g_blob == NULL);
4055 
4056 	g_blob = NULL;
4057 	g_bserrno = -1;
4058 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
4059 	poll_threads();
4060 	CU_ASSERT(g_bserrno == 0);
4061 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4062 	blob_data_ro = g_blob;
4063 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
4064 	CU_ASSERT(blob_data_ro->data_ro == true);
4065 	CU_ASSERT(blob_data_ro->md_ro == true);
4066 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
4067 
4068 	g_blob = NULL;
4069 	g_bserrno = -1;
4070 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
4071 	poll_threads();
4072 	CU_ASSERT(g_bserrno == 0);
4073 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4074 	blob_md_ro = g_blob;
4075 	CU_ASSERT(blob_md_ro->data_ro == false);
4076 	CU_ASSERT(blob_md_ro->md_ro == true);
4077 
4078 	g_bserrno = -1;
4079 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4080 	poll_threads();
4081 	CU_ASSERT(g_bserrno == 0);
4082 
4083 	ut_blob_close_and_delete(bs, blob_data_ro);
4084 	ut_blob_close_and_delete(bs, blob_md_ro);
4085 }
4086 
4087 static void
4088 bs_version(void)
4089 {
4090 	struct spdk_bs_super_block *super;
4091 	struct spdk_blob_store *bs = g_bs;
4092 	struct spdk_bs_dev *dev;
4093 	struct spdk_blob *blob;
4094 	struct spdk_blob_opts blob_opts;
4095 	spdk_blob_id blobid;
4096 
4097 	/* Unload the blob store */
4098 	spdk_bs_unload(bs, bs_op_complete, NULL);
4099 	poll_threads();
4100 	CU_ASSERT(g_bserrno == 0);
4101 	g_bs = NULL;
4102 
4103 	/*
4104 	 * Change the bs version on disk.  This will allow us to
4105 	 *  test that the version does not get modified automatically
4106 	 *  when loading and unloading the blobstore.
4107 	 */
4108 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
4109 	CU_ASSERT(super->version == SPDK_BS_VERSION);
4110 	CU_ASSERT(super->clean == 1);
4111 	super->version = 2;
4112 	/*
4113 	 * Version 2 metadata does not have a used blobid mask, so clear
4114 	 *  those fields in the super block and zero the corresponding
4115 	 *  region on "disk".  We will use this to ensure blob IDs are
4116 	 *  correctly reconstructed.
4117 	 */
4118 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
4119 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
4120 	super->used_blobid_mask_start = 0;
4121 	super->used_blobid_mask_len = 0;
4122 	super->crc = blob_md_page_calc_crc(super);
4123 
4124 	/* Load an existing blob store */
4125 	dev = init_dev();
4126 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4127 	poll_threads();
4128 	CU_ASSERT(g_bserrno == 0);
4129 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4130 	CU_ASSERT(super->clean == 1);
4131 	bs = g_bs;
4132 
4133 	/*
4134 	 * Create a blob - just to make sure that when we unload it
4135 	 *  results in writing the super block (since metadata pages
4136 	 *  were allocated.
4137 	 */
4138 	ut_spdk_blob_opts_init(&blob_opts);
4139 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
4140 	poll_threads();
4141 	CU_ASSERT(g_bserrno == 0);
4142 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4143 	blobid = g_blobid;
4144 
4145 	/* Unload the blob store */
4146 	spdk_bs_unload(bs, bs_op_complete, NULL);
4147 	poll_threads();
4148 	CU_ASSERT(g_bserrno == 0);
4149 	g_bs = NULL;
4150 	CU_ASSERT(super->version == 2);
4151 	CU_ASSERT(super->used_blobid_mask_start == 0);
4152 	CU_ASSERT(super->used_blobid_mask_len == 0);
4153 
4154 	dev = init_dev();
4155 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4156 	poll_threads();
4157 	CU_ASSERT(g_bserrno == 0);
4158 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4159 	bs = g_bs;
4160 
4161 	g_blob = NULL;
4162 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4163 	poll_threads();
4164 	CU_ASSERT(g_bserrno == 0);
4165 	CU_ASSERT(g_blob != NULL);
4166 	blob = g_blob;
4167 
4168 	ut_blob_close_and_delete(bs, blob);
4169 
4170 	CU_ASSERT(super->version == 2);
4171 	CU_ASSERT(super->used_blobid_mask_start == 0);
4172 	CU_ASSERT(super->used_blobid_mask_len == 0);
4173 }
4174 
4175 static void
4176 blob_set_xattrs_test(void)
4177 {
4178 	struct spdk_blob_store *bs = g_bs;
4179 	struct spdk_blob *blob;
4180 	struct spdk_blob_opts opts;
4181 	const void *value;
4182 	size_t value_len;
4183 	char *xattr;
4184 	size_t xattr_length;
4185 	int rc;
4186 
4187 	/* Create blob with extra attributes */
4188 	ut_spdk_blob_opts_init(&opts);
4189 
4190 	opts.xattrs.names = g_xattr_names;
4191 	opts.xattrs.get_value = _get_xattr_value;
4192 	opts.xattrs.count = 3;
4193 	opts.xattrs.ctx = &g_ctx;
4194 
4195 	blob = ut_blob_create_and_open(bs, &opts);
4196 
4197 	/* Get the xattrs */
4198 	value = NULL;
4199 
4200 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
4201 	CU_ASSERT(rc == 0);
4202 	SPDK_CU_ASSERT_FATAL(value != NULL);
4203 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
4204 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
4205 
4206 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
4207 	CU_ASSERT(rc == 0);
4208 	SPDK_CU_ASSERT_FATAL(value != NULL);
4209 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
4210 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
4211 
4212 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
4213 	CU_ASSERT(rc == 0);
4214 	SPDK_CU_ASSERT_FATAL(value != NULL);
4215 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
4216 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
4217 
4218 	/* Try to get non existing attribute */
4219 
4220 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
4221 	CU_ASSERT(rc == -ENOENT);
4222 
4223 	/* Try xattr exceeding maximum length of descriptor in single page */
4224 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
4225 		       strlen("large_xattr") + 1;
4226 	xattr = calloc(xattr_length, sizeof(char));
4227 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
4228 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
4229 	free(xattr);
4230 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
4231 
4232 	spdk_blob_close(blob, blob_op_complete, NULL);
4233 	poll_threads();
4234 	CU_ASSERT(g_bserrno == 0);
4235 	blob = NULL;
4236 	g_blob = NULL;
4237 	g_blobid = SPDK_BLOBID_INVALID;
4238 
4239 	/* NULL callback */
4240 	ut_spdk_blob_opts_init(&opts);
4241 	opts.xattrs.names = g_xattr_names;
4242 	opts.xattrs.get_value = NULL;
4243 	opts.xattrs.count = 1;
4244 	opts.xattrs.ctx = &g_ctx;
4245 
4246 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4247 	poll_threads();
4248 	CU_ASSERT(g_bserrno == -EINVAL);
4249 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4250 
4251 	/* NULL values */
4252 	ut_spdk_blob_opts_init(&opts);
4253 	opts.xattrs.names = g_xattr_names;
4254 	opts.xattrs.get_value = _get_xattr_value_null;
4255 	opts.xattrs.count = 1;
4256 	opts.xattrs.ctx = NULL;
4257 
4258 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4259 	poll_threads();
4260 	CU_ASSERT(g_bserrno == -EINVAL);
4261 }
4262 
4263 static void
4264 blob_thin_prov_alloc(void)
4265 {
4266 	struct spdk_blob_store *bs = g_bs;
4267 	struct spdk_blob *blob;
4268 	struct spdk_blob_opts opts;
4269 	spdk_blob_id blobid;
4270 	uint64_t free_clusters;
4271 
4272 	free_clusters = spdk_bs_free_cluster_count(bs);
4273 
4274 	/* Set blob as thin provisioned */
4275 	ut_spdk_blob_opts_init(&opts);
4276 	opts.thin_provision = true;
4277 
4278 	blob = ut_blob_create_and_open(bs, &opts);
4279 	blobid = spdk_blob_get_id(blob);
4280 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4281 
4282 	CU_ASSERT(blob->active.num_clusters == 0);
4283 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
4284 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4285 
4286 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4287 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4288 	poll_threads();
4289 	CU_ASSERT(g_bserrno == 0);
4290 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4291 	CU_ASSERT(blob->active.num_clusters == 5);
4292 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4293 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4294 
4295 	/* Grow it to 1TB - still unallocated */
4296 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
4297 	poll_threads();
4298 	CU_ASSERT(g_bserrno == 0);
4299 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4300 	CU_ASSERT(blob->active.num_clusters == 262144);
4301 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4302 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4303 
4304 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4305 	poll_threads();
4306 	CU_ASSERT(g_bserrno == 0);
4307 	/* Sync must not change anything */
4308 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4309 	CU_ASSERT(blob->active.num_clusters == 262144);
4310 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4311 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4312 	/* Since clusters are not allocated,
4313 	 * number of metadata pages is expected to be minimal.
4314 	 */
4315 	CU_ASSERT(blob->active.num_pages == 1);
4316 
4317 	/* Shrink the blob to 3 clusters - still unallocated */
4318 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
4319 	poll_threads();
4320 	CU_ASSERT(g_bserrno == 0);
4321 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4322 	CU_ASSERT(blob->active.num_clusters == 3);
4323 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4324 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4325 
4326 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4327 	poll_threads();
4328 	CU_ASSERT(g_bserrno == 0);
4329 	/* Sync must not change anything */
4330 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4331 	CU_ASSERT(blob->active.num_clusters == 3);
4332 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4333 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4334 
4335 	spdk_blob_close(blob, blob_op_complete, NULL);
4336 	poll_threads();
4337 	CU_ASSERT(g_bserrno == 0);
4338 
4339 	ut_bs_reload(&bs, NULL);
4340 
4341 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4342 	poll_threads();
4343 	CU_ASSERT(g_bserrno == 0);
4344 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4345 	blob = g_blob;
4346 
4347 	/* Check that clusters allocation and size is still the same */
4348 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4349 	CU_ASSERT(blob->active.num_clusters == 3);
4350 
4351 	ut_blob_close_and_delete(bs, blob);
4352 }
4353 
4354 static void
4355 blob_insert_cluster_msg_test(void)
4356 {
4357 	struct spdk_blob_store *bs = g_bs;
4358 	struct spdk_blob *blob;
4359 	struct spdk_blob_opts opts;
4360 	struct spdk_blob_md_page page = {};
4361 	spdk_blob_id blobid;
4362 	uint64_t free_clusters;
4363 	uint64_t new_cluster = 0;
4364 	uint32_t cluster_num = 3;
4365 	uint32_t extent_page = 0;
4366 
4367 	free_clusters = spdk_bs_free_cluster_count(bs);
4368 
4369 	/* Set blob as thin provisioned */
4370 	ut_spdk_blob_opts_init(&opts);
4371 	opts.thin_provision = true;
4372 	opts.num_clusters = 4;
4373 
4374 	blob = ut_blob_create_and_open(bs, &opts);
4375 	blobid = spdk_blob_get_id(blob);
4376 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4377 
4378 	CU_ASSERT(blob->active.num_clusters == 4);
4379 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4380 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4381 
4382 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4383 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4384 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4385 	spdk_spin_lock(&bs->used_lock);
4386 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4387 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4388 	spdk_spin_unlock(&bs->used_lock);
4389 
4390 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
4391 					 blob_op_complete, NULL);
4392 	poll_threads();
4393 
4394 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4395 
4396 	spdk_blob_close(blob, blob_op_complete, NULL);
4397 	poll_threads();
4398 	CU_ASSERT(g_bserrno == 0);
4399 
4400 	ut_bs_reload(&bs, NULL);
4401 
4402 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4403 	poll_threads();
4404 	CU_ASSERT(g_bserrno == 0);
4405 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4406 	blob = g_blob;
4407 
4408 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4409 
4410 	ut_blob_close_and_delete(bs, blob);
4411 }
4412 
4413 static void
4414 blob_thin_prov_rw(void)
4415 {
4416 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
4417 	struct spdk_blob_store *bs = g_bs;
4418 	struct spdk_blob *blob, *blob_id0;
4419 	struct spdk_io_channel *channel, *channel_thread1;
4420 	struct spdk_blob_opts opts;
4421 	uint64_t free_clusters;
4422 	uint64_t page_size;
4423 	uint8_t payload_read[10 * BLOCKLEN];
4424 	uint8_t payload_write[10 * BLOCKLEN];
4425 	uint64_t write_bytes;
4426 	uint64_t read_bytes;
4427 
4428 	free_clusters = spdk_bs_free_cluster_count(bs);
4429 	page_size = spdk_bs_get_page_size(bs);
4430 
4431 	channel = spdk_bs_alloc_io_channel(bs);
4432 	CU_ASSERT(channel != NULL);
4433 
4434 	ut_spdk_blob_opts_init(&opts);
4435 	opts.thin_provision = true;
4436 
4437 	/* Create and delete blob at md page 0, so that next md page allocation
4438 	 * for extent will use that. */
4439 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4440 	blob = ut_blob_create_and_open(bs, &opts);
4441 	ut_blob_close_and_delete(bs, blob_id0);
4442 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4443 
4444 	CU_ASSERT(blob->active.num_clusters == 0);
4445 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4446 
4447 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4448 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4449 	poll_threads();
4450 	CU_ASSERT(g_bserrno == 0);
4451 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4452 	CU_ASSERT(blob->active.num_clusters == 5);
4453 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4454 
4455 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4456 	poll_threads();
4457 	CU_ASSERT(g_bserrno == 0);
4458 	/* Sync must not change anything */
4459 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4460 	CU_ASSERT(blob->active.num_clusters == 5);
4461 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4462 
4463 	/* Payload should be all zeros from unallocated clusters */
4464 	memset(payload_read, 0xFF, sizeof(payload_read));
4465 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4466 	poll_threads();
4467 	CU_ASSERT(g_bserrno == 0);
4468 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
4469 
4470 	write_bytes = g_dev_write_bytes;
4471 	read_bytes = g_dev_read_bytes;
4472 
4473 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4474 	set_thread(1);
4475 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4476 	CU_ASSERT(channel_thread1 != NULL);
4477 	memset(payload_write, 0xE5, sizeof(payload_write));
4478 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4479 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4480 	/* Perform write on thread 0. That will try to allocate cluster,
4481 	 * but fail due to another thread issuing the cluster allocation first. */
4482 	set_thread(0);
4483 	memset(payload_write, 0xE5, sizeof(payload_write));
4484 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4485 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4486 	poll_threads();
4487 	CU_ASSERT(g_bserrno == 0);
4488 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4489 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
4490 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
4491 	 * read 0 bytes */
4492 	if (g_use_extent_table) {
4493 		/* Add one more page for EXTENT_PAGE write */
4494 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
4495 	} else {
4496 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
4497 	}
4498 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4499 
4500 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4501 	poll_threads();
4502 	CU_ASSERT(g_bserrno == 0);
4503 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
4504 
4505 	ut_blob_close_and_delete(bs, blob);
4506 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4507 
4508 	set_thread(1);
4509 	spdk_bs_free_io_channel(channel_thread1);
4510 	set_thread(0);
4511 	spdk_bs_free_io_channel(channel);
4512 	poll_threads();
4513 	g_blob = NULL;
4514 	g_blobid = 0;
4515 }
4516 
4517 static void
4518 blob_thin_prov_write_count_io(void)
4519 {
4520 	struct spdk_blob_store *bs;
4521 	struct spdk_blob *blob;
4522 	struct spdk_io_channel *ch;
4523 	struct spdk_bs_dev *dev;
4524 	struct spdk_bs_opts bs_opts;
4525 	struct spdk_blob_opts opts;
4526 	uint64_t free_clusters;
4527 	uint64_t page_size;
4528 	uint8_t payload_write[BLOCKLEN];
4529 	uint64_t write_bytes;
4530 	uint64_t read_bytes;
4531 	const uint32_t CLUSTER_SZ = 16384;
4532 	uint32_t pages_per_cluster;
4533 	uint32_t pages_per_extent_page;
4534 	uint32_t i;
4535 
4536 	/* Use a very small cluster size for this test.  This ensures we need multiple
4537 	 * extent pages to hold all of the clusters even for relatively small blobs like
4538 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4539 	 * buffers).
4540 	 */
4541 	dev = init_dev();
4542 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4543 	bs_opts.cluster_sz = CLUSTER_SZ;
4544 
4545 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4546 	poll_threads();
4547 	CU_ASSERT(g_bserrno == 0);
4548 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4549 	bs = g_bs;
4550 
4551 	free_clusters = spdk_bs_free_cluster_count(bs);
4552 	page_size = spdk_bs_get_page_size(bs);
4553 	pages_per_cluster = CLUSTER_SZ / page_size;
4554 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4555 
4556 	ch = spdk_bs_alloc_io_channel(bs);
4557 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4558 
4559 	ut_spdk_blob_opts_init(&opts);
4560 	opts.thin_provision = true;
4561 
4562 	blob = ut_blob_create_and_open(bs, &opts);
4563 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4564 
4565 	/* Resize the blob so that it will require 8 extent pages to hold all of
4566 	 * the clusters.
4567 	 */
4568 	g_bserrno = -1;
4569 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4570 	poll_threads();
4571 	CU_ASSERT(g_bserrno == 0);
4572 
4573 	g_bserrno = -1;
4574 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4575 	poll_threads();
4576 	CU_ASSERT(g_bserrno == 0);
4577 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4578 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4579 
4580 	memset(payload_write, 0, sizeof(payload_write));
4581 	for (i = 0; i < 8; i++) {
4582 		write_bytes = g_dev_write_bytes;
4583 		read_bytes = g_dev_read_bytes;
4584 
4585 		g_bserrno = -1;
4586 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4587 		poll_threads();
4588 		CU_ASSERT(g_bserrno == 0);
4589 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4590 
4591 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4592 		if (!g_use_extent_table) {
4593 			/* For legacy metadata, we should have written two pages - one for the
4594 			 * write I/O itself, another for the blob's primary metadata.
4595 			 */
4596 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4597 		} else {
4598 			/* For extent table metadata, we should have written three pages - one
4599 			 * for the write I/O, one for the extent page, one for the blob's primary
4600 			 * metadata.
4601 			 */
4602 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4603 		}
4604 
4605 		/* The write should have synced the metadata already.  Do another sync here
4606 		 * just to confirm.
4607 		 */
4608 		write_bytes = g_dev_write_bytes;
4609 		read_bytes = g_dev_read_bytes;
4610 
4611 		g_bserrno = -1;
4612 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4613 		poll_threads();
4614 		CU_ASSERT(g_bserrno == 0);
4615 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4616 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 1);
4617 
4618 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4619 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4620 
4621 		/* Now write to another unallocated cluster that is part of the same extent page. */
4622 		g_bserrno = -1;
4623 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4624 				   1, blob_op_complete, NULL);
4625 		poll_threads();
4626 		CU_ASSERT(g_bserrno == 0);
4627 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4628 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 2);
4629 
4630 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4631 		/*
4632 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4633 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4634 		 */
4635 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4636 
4637 		/* Send unmap aligned to the whole cluster - should free it up */
4638 		g_bserrno = -1;
4639 		spdk_blob_io_unmap(blob, ch, pages_per_extent_page * i, pages_per_cluster, blob_op_complete, NULL);
4640 		poll_threads();
4641 		CU_ASSERT(g_bserrno == 0);
4642 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4643 
4644 		/* Write back to the freed cluster */
4645 		g_bserrno = -1;
4646 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4647 		poll_threads();
4648 		CU_ASSERT(g_bserrno == 0);
4649 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4650 	}
4651 
4652 	ut_blob_close_and_delete(bs, blob);
4653 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4654 
4655 	spdk_bs_free_io_channel(ch);
4656 	poll_threads();
4657 	g_blob = NULL;
4658 	g_blobid = 0;
4659 
4660 	spdk_bs_unload(bs, bs_op_complete, NULL);
4661 	poll_threads();
4662 	CU_ASSERT(g_bserrno == 0);
4663 	g_bs = NULL;
4664 }
4665 
4666 static void
4667 blob_thin_prov_unmap_cluster(void)
4668 {
4669 	struct spdk_blob_store *bs;
4670 	struct spdk_blob *blob, *snapshot;
4671 	struct spdk_io_channel *ch;
4672 	struct spdk_bs_dev *dev;
4673 	struct spdk_bs_opts bs_opts;
4674 	struct spdk_blob_opts opts;
4675 	uint64_t free_clusters;
4676 	uint64_t page_size;
4677 	uint8_t payload_write[BLOCKLEN];
4678 	uint8_t payload_read[BLOCKLEN];
4679 	const uint32_t CLUSTER_COUNT = 3;
4680 	uint32_t pages_per_cluster;
4681 	spdk_blob_id blobid, snapshotid;
4682 	uint32_t i;
4683 	int err;
4684 
4685 	/* Use a very large cluster size for this test. Check how the unmap/release cluster code path behaves when
4686 	 * clusters are fully used.
4687 	 */
4688 	dev = init_dev();
4689 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4690 	bs_opts.cluster_sz = dev->blocklen * dev->blockcnt / (CLUSTER_COUNT + 1);
4691 
4692 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4693 	poll_threads();
4694 	CU_ASSERT(g_bserrno == 0);
4695 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4696 	bs = g_bs;
4697 
4698 	free_clusters = spdk_bs_free_cluster_count(bs);
4699 	page_size = spdk_bs_get_page_size(bs);
4700 	pages_per_cluster = bs_opts.cluster_sz / page_size;
4701 
4702 	ch = spdk_bs_alloc_io_channel(bs);
4703 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4704 
4705 	ut_spdk_blob_opts_init(&opts);
4706 	opts.thin_provision = true;
4707 
4708 	blob = ut_blob_create_and_open(bs, &opts);
4709 	CU_ASSERT(free_clusters == CLUSTER_COUNT);
4710 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4711 	blobid = spdk_blob_get_id(blob);
4712 
4713 	g_bserrno = -1;
4714 	spdk_blob_resize(blob, CLUSTER_COUNT, blob_op_complete, NULL);
4715 	poll_threads();
4716 	CU_ASSERT(g_bserrno == 0);
4717 
4718 	g_bserrno = -1;
4719 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4720 	poll_threads();
4721 	CU_ASSERT(g_bserrno == 0);
4722 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4723 	CU_ASSERT(blob->active.num_clusters == CLUSTER_COUNT);
4724 
4725 	/* Fill all clusters */
4726 	for (i = 0; i < CLUSTER_COUNT; i++) {
4727 		memset(payload_write, i + 1, sizeof(payload_write));
4728 		g_bserrno = -1;
4729 		spdk_blob_io_write(blob, ch, payload_write, pages_per_cluster * i, 1, blob_op_complete, NULL);
4730 		poll_threads();
4731 		CU_ASSERT(g_bserrno == 0);
4732 		CU_ASSERT(free_clusters - (i + 1) == spdk_bs_free_cluster_count(bs));
4733 	}
4734 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4735 
4736 	/* Unmap one whole cluster */
4737 	g_bserrno = -1;
4738 	spdk_blob_io_unmap(blob, ch, pages_per_cluster, pages_per_cluster, blob_op_complete, NULL);
4739 	poll_threads();
4740 	CU_ASSERT(g_bserrno == 0);
4741 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4742 
4743 	/* Verify the data read from the cluster is zeroed out */
4744 	memset(payload_write, 0, sizeof(payload_write));
4745 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4746 	poll_threads();
4747 	CU_ASSERT(g_bserrno == 0);
4748 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4749 
4750 	/* Fill the same cluster with data */
4751 	memset(payload_write, 3, sizeof(payload_write));
4752 	g_bserrno = -1;
4753 	spdk_blob_io_write(blob, ch, payload_write, pages_per_cluster, 1, blob_op_complete, NULL);
4754 	poll_threads();
4755 	CU_ASSERT(g_bserrno == 0);
4756 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4757 
4758 	/* Verify the data read from the cluster has the expected data */
4759 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4760 	poll_threads();
4761 	CU_ASSERT(g_bserrno == 0);
4762 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4763 
4764 	/* Send an unaligned unmap that ecompasses one whole cluster */
4765 	g_bserrno = -1;
4766 	spdk_blob_io_unmap(blob, ch, pages_per_cluster - 1, pages_per_cluster + 2, blob_op_complete, NULL);
4767 	poll_threads();
4768 	CU_ASSERT(g_bserrno == 0);
4769 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4770 
4771 	/* Verify the data read from the cluster is zeroed out */
4772 	g_bserrno = -1;
4773 	memset(payload_write, 0, sizeof(payload_write));
4774 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4775 	poll_threads();
4776 	CU_ASSERT(g_bserrno == 0);
4777 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4778 
4779 	/* Send a simultaneous unmap with a write to an unallocated area -
4780 	 * check that writes don't claim the currently unmapped cluster */
4781 	g_bserrno = -1;
4782 	memset(payload_write, 7, sizeof(payload_write));
4783 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4784 	spdk_blob_io_write(blob, ch, payload_write, pages_per_cluster, 1, blob_op_complete, NULL);
4785 	poll_threads();
4786 	CU_ASSERT(g_bserrno == 0);
4787 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4788 
4789 	/* Verify the contents of written sector */
4790 	g_bserrno = -1;
4791 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4792 	poll_threads();
4793 	CU_ASSERT(g_bserrno == 0);
4794 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4795 
4796 	/* Verify the contents of unmapped sector */
4797 	g_bserrno = -1;
4798 	memset(payload_write, 0, sizeof(payload_write));
4799 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4800 	poll_threads();
4801 	CU_ASSERT(g_bserrno == 0);
4802 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4803 
4804 	/* Make sure clusters are not freed until the unmap to the drive is done */
4805 	g_bserrno = -1;
4806 	memset(payload_write, 7, sizeof(payload_write));
4807 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4808 	poll_threads();
4809 	CU_ASSERT(g_bserrno == 0);
4810 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4811 
4812 	g_bserrno = -1;
4813 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4814 	while (memcmp(payload_write, &g_dev_buffer[BLOCKLEN * pages_per_cluster], BLOCKLEN) == 0) {
4815 		CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4816 		poll_thread_times(0, 1);
4817 	}
4818 	poll_threads();
4819 	CU_ASSERT(g_bserrno == 0);
4820 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4821 
4822 	/* Issue #3358 had a bug with concurrent trims to the same cluster causing an assert, check for regressions.
4823 	 * Send three concurrent unmaps to the same cluster.
4824 	 */
4825 	g_bserrno = -1;
4826 	memset(payload_write, 7, sizeof(payload_write));
4827 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4828 	poll_threads();
4829 	CU_ASSERT(g_bserrno == 0);
4830 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4831 
4832 	g_bserrno = -1;
4833 	err = -1;
4834 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4835 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4836 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, &err);
4837 	poll_threads();
4838 	CU_ASSERT(g_bserrno == 0);
4839 	CU_ASSERT(err == 0);
4840 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4841 
4842 	/* Test thin-provisioned blob that is backed */
4843 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
4844 	poll_threads();
4845 	CU_ASSERT(g_bserrno == 0);
4846 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4847 	poll_threads();
4848 	CU_ASSERT(g_bserrno == 0);
4849 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4850 
4851 	g_bserrno = -1;
4852 	memset(payload_write, 1, sizeof(payload_write));
4853 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4854 	poll_threads();
4855 	CU_ASSERT(g_bserrno == 0);
4856 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4857 
4858 	/* Create a snapshot */
4859 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
4860 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4861 	poll_threads();
4862 	CU_ASSERT(g_bserrno == 0);
4863 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4864 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
4865 	snapshotid = g_blobid;
4866 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4867 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4868 	poll_threads();
4869 	CU_ASSERT(g_bserrno == 0);
4870 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4871 	snapshot = g_blob;
4872 
4873 	/* Write data to blob, it will alloc new cluster */
4874 	g_bserrno = -1;
4875 	memset(payload_write, 2, sizeof(payload_write));
4876 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4877 	poll_threads();
4878 	CU_ASSERT(g_bserrno == 0);
4879 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4880 
4881 	/* Unmap one whole cluster, but do not release this cluster */
4882 	g_bserrno = -1;
4883 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4884 	poll_threads();
4885 	CU_ASSERT(g_bserrno == 0);
4886 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4887 
4888 	/* Verify the data read from the cluster is zeroed out */
4889 	g_bserrno = -1;
4890 	memset(payload_write, 0, sizeof(payload_write));
4891 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4892 	poll_threads();
4893 	CU_ASSERT(g_bserrno == 0);
4894 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4895 
4896 	ut_blob_close_and_delete(bs, blob);
4897 	ut_blob_close_and_delete(bs, snapshot);
4898 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4899 
4900 	spdk_bs_free_io_channel(ch);
4901 	poll_threads();
4902 	g_blob = NULL;
4903 	g_blobid = 0;
4904 
4905 	spdk_bs_unload(bs, bs_op_complete, NULL);
4906 	poll_threads();
4907 	CU_ASSERT(g_bserrno == 0);
4908 	g_bs = NULL;
4909 }
4910 
4911 static void
4912 blob_thin_prov_rle(void)
4913 {
4914 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
4915 	struct spdk_blob_store *bs = g_bs;
4916 	struct spdk_blob *blob;
4917 	struct spdk_io_channel *channel;
4918 	struct spdk_blob_opts opts;
4919 	spdk_blob_id blobid;
4920 	uint64_t free_clusters;
4921 	uint64_t page_size;
4922 	uint8_t payload_read[10 * BLOCKLEN];
4923 	uint8_t payload_write[10 * BLOCKLEN];
4924 	uint64_t write_bytes;
4925 	uint64_t read_bytes;
4926 	uint64_t io_unit;
4927 
4928 	free_clusters = spdk_bs_free_cluster_count(bs);
4929 	page_size = spdk_bs_get_page_size(bs);
4930 
4931 	ut_spdk_blob_opts_init(&opts);
4932 	opts.thin_provision = true;
4933 	opts.num_clusters = 5;
4934 
4935 	blob = ut_blob_create_and_open(bs, &opts);
4936 	blobid = spdk_blob_get_id(blob);
4937 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4938 
4939 	channel = spdk_bs_alloc_io_channel(bs);
4940 	CU_ASSERT(channel != NULL);
4941 
4942 	/* Target specifically second cluster in a blob as first allocation */
4943 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4944 
4945 	/* Payload should be all zeros from unallocated clusters */
4946 	memset(payload_read, 0xFF, sizeof(payload_read));
4947 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4948 	poll_threads();
4949 	CU_ASSERT(g_bserrno == 0);
4950 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
4951 
4952 	write_bytes = g_dev_write_bytes;
4953 	read_bytes = g_dev_read_bytes;
4954 
4955 	/* Issue write to second cluster in a blob */
4956 	memset(payload_write, 0xE5, sizeof(payload_write));
4957 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4958 	poll_threads();
4959 	CU_ASSERT(g_bserrno == 0);
4960 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4961 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4962 	 * read 0 bytes */
4963 	if (g_use_extent_table) {
4964 		/* Add one more page for EXTENT_PAGE write */
4965 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4966 	} else {
4967 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4968 	}
4969 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4970 
4971 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4972 	poll_threads();
4973 	CU_ASSERT(g_bserrno == 0);
4974 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
4975 
4976 	spdk_bs_free_io_channel(channel);
4977 	poll_threads();
4978 
4979 	spdk_blob_close(blob, blob_op_complete, NULL);
4980 	poll_threads();
4981 	CU_ASSERT(g_bserrno == 0);
4982 
4983 	ut_bs_reload(&bs, NULL);
4984 
4985 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4986 	poll_threads();
4987 	CU_ASSERT(g_bserrno == 0);
4988 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4989 	blob = g_blob;
4990 
4991 	channel = spdk_bs_alloc_io_channel(bs);
4992 	CU_ASSERT(channel != NULL);
4993 
4994 	/* Read second cluster after blob reload to confirm data written */
4995 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4996 	poll_threads();
4997 	CU_ASSERT(g_bserrno == 0);
4998 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
4999 
5000 	spdk_bs_free_io_channel(channel);
5001 	poll_threads();
5002 
5003 	ut_blob_close_and_delete(bs, blob);
5004 }
5005 
5006 static void
5007 blob_thin_prov_rw_iov(void)
5008 {
5009 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5010 	struct spdk_blob_store *bs = g_bs;
5011 	struct spdk_blob *blob;
5012 	struct spdk_io_channel *channel;
5013 	struct spdk_blob_opts opts;
5014 	uint64_t free_clusters;
5015 	uint8_t payload_read[10 * BLOCKLEN];
5016 	uint8_t payload_write[10 * BLOCKLEN];
5017 	struct iovec iov_read[3];
5018 	struct iovec iov_write[3];
5019 
5020 	free_clusters = spdk_bs_free_cluster_count(bs);
5021 
5022 	channel = spdk_bs_alloc_io_channel(bs);
5023 	CU_ASSERT(channel != NULL);
5024 
5025 	ut_spdk_blob_opts_init(&opts);
5026 	opts.thin_provision = true;
5027 
5028 	blob = ut_blob_create_and_open(bs, &opts);
5029 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5030 
5031 	CU_ASSERT(blob->active.num_clusters == 0);
5032 
5033 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
5034 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
5035 	poll_threads();
5036 	CU_ASSERT(g_bserrno == 0);
5037 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5038 	CU_ASSERT(blob->active.num_clusters == 5);
5039 
5040 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5041 	poll_threads();
5042 	CU_ASSERT(g_bserrno == 0);
5043 	/* Sync must not change anything */
5044 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5045 	CU_ASSERT(blob->active.num_clusters == 5);
5046 
5047 	/* Payload should be all zeros from unallocated clusters */
5048 	memset(payload_read, 0xAA, sizeof(payload_read));
5049 	iov_read[0].iov_base = payload_read;
5050 	iov_read[0].iov_len = 3 * BLOCKLEN;
5051 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5052 	iov_read[1].iov_len = 4 * BLOCKLEN;
5053 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5054 	iov_read[2].iov_len = 3 * BLOCKLEN;
5055 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5056 	poll_threads();
5057 	CU_ASSERT(g_bserrno == 0);
5058 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5059 
5060 	memset(payload_write, 0xE5, sizeof(payload_write));
5061 	iov_write[0].iov_base = payload_write;
5062 	iov_write[0].iov_len = 1 * BLOCKLEN;
5063 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5064 	iov_write[1].iov_len = 5 * BLOCKLEN;
5065 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5066 	iov_write[2].iov_len = 4 * BLOCKLEN;
5067 
5068 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5069 	poll_threads();
5070 	CU_ASSERT(g_bserrno == 0);
5071 
5072 	memset(payload_read, 0xAA, sizeof(payload_read));
5073 	iov_read[0].iov_base = payload_read;
5074 	iov_read[0].iov_len = 3 * BLOCKLEN;
5075 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5076 	iov_read[1].iov_len = 4 * BLOCKLEN;
5077 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5078 	iov_read[2].iov_len = 3 * BLOCKLEN;
5079 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5080 	poll_threads();
5081 	CU_ASSERT(g_bserrno == 0);
5082 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5083 
5084 	spdk_bs_free_io_channel(channel);
5085 	poll_threads();
5086 
5087 	ut_blob_close_and_delete(bs, blob);
5088 }
5089 
5090 struct iter_ctx {
5091 	int		current_iter;
5092 	spdk_blob_id	blobid[4];
5093 };
5094 
5095 static void
5096 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
5097 {
5098 	struct iter_ctx *iter_ctx = arg;
5099 	spdk_blob_id blobid;
5100 
5101 	CU_ASSERT(bserrno == 0);
5102 	blobid = spdk_blob_get_id(blob);
5103 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
5104 }
5105 
5106 static void
5107 bs_load_iter_test(void)
5108 {
5109 	struct spdk_blob_store *bs;
5110 	struct spdk_bs_dev *dev;
5111 	struct iter_ctx iter_ctx = { 0 };
5112 	struct spdk_blob *blob;
5113 	int i, rc;
5114 	struct spdk_bs_opts opts;
5115 
5116 	dev = init_dev();
5117 	spdk_bs_opts_init(&opts, sizeof(opts));
5118 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5119 
5120 	/* Initialize a new blob store */
5121 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
5122 	poll_threads();
5123 	CU_ASSERT(g_bserrno == 0);
5124 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5125 	bs = g_bs;
5126 
5127 	for (i = 0; i < 4; i++) {
5128 		blob = ut_blob_create_and_open(bs, NULL);
5129 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
5130 
5131 		/* Just save the blobid as an xattr for testing purposes. */
5132 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
5133 		CU_ASSERT(rc == 0);
5134 
5135 		/* Resize the blob */
5136 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
5137 		poll_threads();
5138 		CU_ASSERT(g_bserrno == 0);
5139 
5140 		spdk_blob_close(blob, blob_op_complete, NULL);
5141 		poll_threads();
5142 		CU_ASSERT(g_bserrno == 0);
5143 	}
5144 
5145 	g_bserrno = -1;
5146 	spdk_bs_unload(bs, bs_op_complete, NULL);
5147 	poll_threads();
5148 	CU_ASSERT(g_bserrno == 0);
5149 
5150 	dev = init_dev();
5151 	spdk_bs_opts_init(&opts, sizeof(opts));
5152 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5153 	opts.iter_cb_fn = test_iter;
5154 	opts.iter_cb_arg = &iter_ctx;
5155 
5156 	/* Test blob iteration during load after a clean shutdown. */
5157 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5158 	poll_threads();
5159 	CU_ASSERT(g_bserrno == 0);
5160 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5161 	bs = g_bs;
5162 
5163 	/* Dirty shutdown */
5164 	bs_free(bs);
5165 
5166 	dev = init_dev();
5167 	spdk_bs_opts_init(&opts, sizeof(opts));
5168 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5169 	opts.iter_cb_fn = test_iter;
5170 	iter_ctx.current_iter = 0;
5171 	opts.iter_cb_arg = &iter_ctx;
5172 
5173 	/* Test blob iteration during load after a dirty shutdown. */
5174 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5175 	poll_threads();
5176 	CU_ASSERT(g_bserrno == 0);
5177 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5178 	bs = g_bs;
5179 
5180 	spdk_bs_unload(bs, bs_op_complete, NULL);
5181 	poll_threads();
5182 	CU_ASSERT(g_bserrno == 0);
5183 	g_bs = NULL;
5184 }
5185 
5186 static void
5187 blob_snapshot_rw(void)
5188 {
5189 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5190 	struct spdk_blob_store *bs = g_bs;
5191 	struct spdk_blob *blob, *snapshot;
5192 	struct spdk_io_channel *channel;
5193 	struct spdk_blob_opts opts;
5194 	spdk_blob_id blobid, snapshotid;
5195 	uint64_t free_clusters;
5196 	uint64_t cluster_size;
5197 	uint64_t page_size;
5198 	uint8_t payload_read[10 * BLOCKLEN];
5199 	uint8_t payload_write[10 * BLOCKLEN];
5200 	uint64_t write_bytes_start;
5201 	uint64_t read_bytes_start;
5202 	uint64_t copy_bytes_start;
5203 	uint64_t write_bytes;
5204 	uint64_t read_bytes;
5205 	uint64_t copy_bytes;
5206 
5207 	free_clusters = spdk_bs_free_cluster_count(bs);
5208 	cluster_size = spdk_bs_get_cluster_size(bs);
5209 	page_size = spdk_bs_get_page_size(bs);
5210 
5211 	channel = spdk_bs_alloc_io_channel(bs);
5212 	CU_ASSERT(channel != NULL);
5213 
5214 	ut_spdk_blob_opts_init(&opts);
5215 	opts.thin_provision = true;
5216 	opts.num_clusters = 5;
5217 
5218 	blob = ut_blob_create_and_open(bs, &opts);
5219 	blobid = spdk_blob_get_id(blob);
5220 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5221 
5222 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5223 
5224 	memset(payload_read, 0xFF, sizeof(payload_read));
5225 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5226 	poll_threads();
5227 	CU_ASSERT(g_bserrno == 0);
5228 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5229 
5230 	memset(payload_write, 0xE5, sizeof(payload_write));
5231 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5232 	poll_threads();
5233 	CU_ASSERT(g_bserrno == 0);
5234 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5235 
5236 	/* Create snapshot from blob */
5237 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5238 	poll_threads();
5239 	CU_ASSERT(g_bserrno == 0);
5240 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5241 	snapshotid = g_blobid;
5242 
5243 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5244 	poll_threads();
5245 	CU_ASSERT(g_bserrno == 0);
5246 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5247 	snapshot = g_blob;
5248 	CU_ASSERT(snapshot->data_ro == true);
5249 	CU_ASSERT(snapshot->md_ro == true);
5250 
5251 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5252 
5253 	write_bytes_start = g_dev_write_bytes;
5254 	read_bytes_start = g_dev_read_bytes;
5255 	copy_bytes_start = g_dev_copy_bytes;
5256 
5257 	memset(payload_write, 0xAA, sizeof(payload_write));
5258 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5259 	poll_threads();
5260 	CU_ASSERT(g_bserrno == 0);
5261 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5262 
5263 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
5264 	 * and then write 10 pages of payload.
5265 	 */
5266 	write_bytes = g_dev_write_bytes - write_bytes_start;
5267 	read_bytes = g_dev_read_bytes - read_bytes_start;
5268 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
5269 	if (g_dev_copy_enabled) {
5270 		CU_ASSERT(copy_bytes == cluster_size);
5271 	} else {
5272 		CU_ASSERT(copy_bytes == 0);
5273 	}
5274 	if (g_use_extent_table) {
5275 		/* Add one more page for EXTENT_PAGE write */
5276 		CU_ASSERT(write_bytes + copy_bytes == page_size * 12 + cluster_size);
5277 	} else {
5278 		CU_ASSERT(write_bytes + copy_bytes == page_size * 11 + cluster_size);
5279 	}
5280 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
5281 
5282 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5283 	poll_threads();
5284 	CU_ASSERT(g_bserrno == 0);
5285 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5286 
5287 	/* Data on snapshot should not change after write to clone */
5288 	memset(payload_write, 0xE5, sizeof(payload_write));
5289 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
5290 	poll_threads();
5291 	CU_ASSERT(g_bserrno == 0);
5292 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5293 
5294 	ut_blob_close_and_delete(bs, blob);
5295 	ut_blob_close_and_delete(bs, snapshot);
5296 
5297 	spdk_bs_free_io_channel(channel);
5298 	poll_threads();
5299 	g_blob = NULL;
5300 	g_blobid = 0;
5301 }
5302 
5303 static void
5304 blob_snapshot_rw_iov(void)
5305 {
5306 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5307 	struct spdk_blob_store *bs = g_bs;
5308 	struct spdk_blob *blob, *snapshot;
5309 	struct spdk_io_channel *channel;
5310 	struct spdk_blob_opts opts;
5311 	spdk_blob_id blobid, snapshotid;
5312 	uint64_t free_clusters;
5313 	uint8_t payload_read[10 * BLOCKLEN];
5314 	uint8_t payload_write[10 * BLOCKLEN];
5315 	struct iovec iov_read[3];
5316 	struct iovec iov_write[3];
5317 
5318 	free_clusters = spdk_bs_free_cluster_count(bs);
5319 
5320 	channel = spdk_bs_alloc_io_channel(bs);
5321 	CU_ASSERT(channel != NULL);
5322 
5323 	ut_spdk_blob_opts_init(&opts);
5324 	opts.thin_provision = true;
5325 	opts.num_clusters = 5;
5326 
5327 	blob = ut_blob_create_and_open(bs, &opts);
5328 	blobid = spdk_blob_get_id(blob);
5329 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5330 
5331 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5332 
5333 	/* Create snapshot from blob */
5334 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5335 	poll_threads();
5336 	CU_ASSERT(g_bserrno == 0);
5337 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5338 	snapshotid = g_blobid;
5339 
5340 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5341 	poll_threads();
5342 	CU_ASSERT(g_bserrno == 0);
5343 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5344 	snapshot = g_blob;
5345 	CU_ASSERT(snapshot->data_ro == true);
5346 	CU_ASSERT(snapshot->md_ro == true);
5347 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5348 
5349 	/* Payload should be all zeros from unallocated clusters */
5350 	memset(payload_read, 0xAA, sizeof(payload_read));
5351 	iov_read[0].iov_base = payload_read;
5352 	iov_read[0].iov_len = 3 * BLOCKLEN;
5353 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5354 	iov_read[1].iov_len = 4 * BLOCKLEN;
5355 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5356 	iov_read[2].iov_len = 3 * BLOCKLEN;
5357 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5358 	poll_threads();
5359 	CU_ASSERT(g_bserrno == 0);
5360 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5361 
5362 	memset(payload_write, 0xE5, sizeof(payload_write));
5363 	iov_write[0].iov_base = payload_write;
5364 	iov_write[0].iov_len = 1 * BLOCKLEN;
5365 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5366 	iov_write[1].iov_len = 5 * BLOCKLEN;
5367 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5368 	iov_write[2].iov_len = 4 * BLOCKLEN;
5369 
5370 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5371 	poll_threads();
5372 	CU_ASSERT(g_bserrno == 0);
5373 
5374 	memset(payload_read, 0xAA, sizeof(payload_read));
5375 	iov_read[0].iov_base = payload_read;
5376 	iov_read[0].iov_len = 3 * BLOCKLEN;
5377 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5378 	iov_read[1].iov_len = 4 * BLOCKLEN;
5379 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5380 	iov_read[2].iov_len = 3 * BLOCKLEN;
5381 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5382 	poll_threads();
5383 	CU_ASSERT(g_bserrno == 0);
5384 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5385 
5386 	spdk_bs_free_io_channel(channel);
5387 	poll_threads();
5388 
5389 	ut_blob_close_and_delete(bs, blob);
5390 	ut_blob_close_and_delete(bs, snapshot);
5391 }
5392 
5393 /**
5394  * Inflate / decouple parent rw unit tests.
5395  *
5396  * --------------
5397  * original blob:         0         1         2         3         4
5398  *                   ,---------+---------+---------+---------+---------.
5399  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5400  *                   +---------+---------+---------+---------+---------+
5401  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
5402  *                   +---------+---------+---------+---------+---------+
5403  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
5404  *                   '---------+---------+---------+---------+---------'
5405  *                   .         .         .         .         .         .
5406  * --------          .         .         .         .         .         .
5407  * inflate:          .         .         .         .         .         .
5408  *                   ,---------+---------+---------+---------+---------.
5409  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
5410  *                   '---------+---------+---------+---------+---------'
5411  *
5412  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
5413  *               on snapshot2 and snapshot removed .         .         .
5414  *                   .         .         .         .         .         .
5415  * ----------------  .         .         .         .         .         .
5416  * decouple parent:  .         .         .         .         .         .
5417  *                   ,---------+---------+---------+---------+---------.
5418  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5419  *                   +---------+---------+---------+---------+---------+
5420  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
5421  *                   '---------+---------+---------+---------+---------'
5422  *
5423  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
5424  *               on snapshot2 removed and on snapshot still exists. Snapshot2
5425  *               should remain a clone of snapshot.
5426  */
5427 static void
5428 _blob_inflate_rw(bool decouple_parent)
5429 {
5430 	struct spdk_blob_store *bs = g_bs;
5431 	struct spdk_blob *blob, *snapshot, *snapshot2;
5432 	struct spdk_io_channel *channel;
5433 	struct spdk_blob_opts opts;
5434 	spdk_blob_id blobid, snapshotid, snapshot2id;
5435 	uint64_t free_clusters;
5436 	uint64_t cluster_size;
5437 
5438 	uint64_t payload_size;
5439 	uint8_t *payload_read;
5440 	uint8_t *payload_write;
5441 	uint8_t *payload_clone;
5442 
5443 	uint64_t pages_per_cluster;
5444 	uint64_t pages_per_payload;
5445 
5446 	int i;
5447 	spdk_blob_id ids[2];
5448 	size_t count;
5449 
5450 	free_clusters = spdk_bs_free_cluster_count(bs);
5451 	cluster_size = spdk_bs_get_cluster_size(bs);
5452 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
5453 	pages_per_payload = pages_per_cluster * 5;
5454 
5455 	payload_size = cluster_size * 5;
5456 
5457 	payload_read = malloc(payload_size);
5458 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
5459 
5460 	payload_write = malloc(payload_size);
5461 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
5462 
5463 	payload_clone = malloc(payload_size);
5464 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
5465 
5466 	channel = spdk_bs_alloc_io_channel(bs);
5467 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5468 
5469 	/* Create blob */
5470 	ut_spdk_blob_opts_init(&opts);
5471 	opts.thin_provision = true;
5472 	opts.num_clusters = 5;
5473 
5474 	blob = ut_blob_create_and_open(bs, &opts);
5475 	blobid = spdk_blob_get_id(blob);
5476 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5477 
5478 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5479 
5480 	/* 1) Initial read should return zeroed payload */
5481 	memset(payload_read, 0xFF, payload_size);
5482 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5483 			  blob_op_complete, NULL);
5484 	poll_threads();
5485 	CU_ASSERT(g_bserrno == 0);
5486 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
5487 
5488 	/* Fill whole blob with a pattern, except last cluster (to be sure it
5489 	 * isn't allocated) */
5490 	memset(payload_write, 0xE5, payload_size - cluster_size);
5491 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
5492 			   pages_per_cluster, blob_op_complete, NULL);
5493 	poll_threads();
5494 	CU_ASSERT(g_bserrno == 0);
5495 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5496 
5497 	/* 2) Create snapshot from blob (first level) */
5498 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5499 	poll_threads();
5500 	CU_ASSERT(g_bserrno == 0);
5501 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5502 	snapshotid = g_blobid;
5503 
5504 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5505 	poll_threads();
5506 	CU_ASSERT(g_bserrno == 0);
5507 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5508 	snapshot = g_blob;
5509 	CU_ASSERT(snapshot->data_ro == true);
5510 	CU_ASSERT(snapshot->md_ro == true);
5511 
5512 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5513 
5514 	/* Write every second cluster with a pattern.
5515 	 *
5516 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
5517 	 * doesn't allocate it.
5518 	 *
5519 	 * payload_clone stores expected result on "blob" read at the time and
5520 	 * is used only to check data consistency on clone before and after
5521 	 * inflation. Initially we fill it with a backing snapshots pattern
5522 	 * used before.
5523 	 */
5524 	memset(payload_clone, 0xE5, payload_size - cluster_size);
5525 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
5526 	memset(payload_write, 0xAA, payload_size);
5527 	for (i = 1; i < 5; i += 2) {
5528 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
5529 				   pages_per_cluster, blob_op_complete, NULL);
5530 		poll_threads();
5531 		CU_ASSERT(g_bserrno == 0);
5532 
5533 		/* Update expected result */
5534 		memcpy(payload_clone + (cluster_size * i), payload_write,
5535 		       cluster_size);
5536 	}
5537 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5538 
5539 	/* Check data consistency on clone */
5540 	memset(payload_read, 0xFF, payload_size);
5541 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5542 			  blob_op_complete, NULL);
5543 	poll_threads();
5544 	CU_ASSERT(g_bserrno == 0);
5545 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5546 
5547 	/* 3) Create second levels snapshot from blob */
5548 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5549 	poll_threads();
5550 	CU_ASSERT(g_bserrno == 0);
5551 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5552 	snapshot2id = g_blobid;
5553 
5554 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
5555 	poll_threads();
5556 	CU_ASSERT(g_bserrno == 0);
5557 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5558 	snapshot2 = g_blob;
5559 	CU_ASSERT(snapshot2->data_ro == true);
5560 	CU_ASSERT(snapshot2->md_ro == true);
5561 
5562 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
5563 
5564 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5565 
5566 	/* Write one cluster on the top level blob. This cluster (1) covers
5567 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
5568 	 * at all */
5569 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
5570 			   pages_per_cluster, blob_op_complete, NULL);
5571 	poll_threads();
5572 	CU_ASSERT(g_bserrno == 0);
5573 
5574 	/* Update expected result */
5575 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
5576 
5577 	/* Check data consistency on clone */
5578 	memset(payload_read, 0xFF, payload_size);
5579 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5580 			  blob_op_complete, NULL);
5581 	poll_threads();
5582 	CU_ASSERT(g_bserrno == 0);
5583 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5584 
5585 
5586 	/* Close all blobs */
5587 	spdk_blob_close(blob, blob_op_complete, NULL);
5588 	poll_threads();
5589 	CU_ASSERT(g_bserrno == 0);
5590 
5591 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5592 	poll_threads();
5593 	CU_ASSERT(g_bserrno == 0);
5594 
5595 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5596 	poll_threads();
5597 	CU_ASSERT(g_bserrno == 0);
5598 
5599 	/* Check snapshot-clone relations */
5600 	count = 2;
5601 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5602 	CU_ASSERT(count == 1);
5603 	CU_ASSERT(ids[0] == snapshot2id);
5604 
5605 	count = 2;
5606 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5607 	CU_ASSERT(count == 1);
5608 	CU_ASSERT(ids[0] == blobid);
5609 
5610 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
5611 
5612 	free_clusters = spdk_bs_free_cluster_count(bs);
5613 	if (!decouple_parent) {
5614 		/* Do full blob inflation */
5615 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
5616 		poll_threads();
5617 		CU_ASSERT(g_bserrno == 0);
5618 
5619 		/* All clusters should be inflated (except one already allocated
5620 		 * in a top level blob) */
5621 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5622 
5623 		/* Check if relation tree updated correctly */
5624 		count = 2;
5625 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5626 
5627 		/* snapshotid have one clone */
5628 		CU_ASSERT(count == 1);
5629 		CU_ASSERT(ids[0] == snapshot2id);
5630 
5631 		/* snapshot2id have no clones */
5632 		count = 2;
5633 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5634 		CU_ASSERT(count == 0);
5635 
5636 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5637 	} else {
5638 		/* Decouple parent of blob */
5639 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5640 		poll_threads();
5641 		CU_ASSERT(g_bserrno == 0);
5642 
5643 		/* Only one cluster from a parent should be inflated (second one
5644 		 * is covered by a cluster written on a top level blob, and
5645 		 * already allocated) */
5646 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5647 
5648 		/* Check if relation tree updated correctly */
5649 		count = 2;
5650 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5651 
5652 		/* snapshotid have two clones now */
5653 		CU_ASSERT(count == 2);
5654 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5655 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5656 
5657 		/* snapshot2id have no clones */
5658 		count = 2;
5659 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5660 		CU_ASSERT(count == 0);
5661 
5662 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5663 	}
5664 
5665 	/* Try to delete snapshot2 (should pass) */
5666 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5667 	poll_threads();
5668 	CU_ASSERT(g_bserrno == 0);
5669 
5670 	/* Try to delete base snapshot */
5671 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5672 	poll_threads();
5673 	CU_ASSERT(g_bserrno == 0);
5674 
5675 	/* Reopen blob after snapshot deletion */
5676 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5677 	poll_threads();
5678 	CU_ASSERT(g_bserrno == 0);
5679 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5680 	blob = g_blob;
5681 
5682 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5683 
5684 	/* Check data consistency on inflated blob */
5685 	memset(payload_read, 0xFF, payload_size);
5686 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5687 			  blob_op_complete, NULL);
5688 	poll_threads();
5689 	CU_ASSERT(g_bserrno == 0);
5690 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5691 
5692 	spdk_bs_free_io_channel(channel);
5693 	poll_threads();
5694 
5695 	free(payload_read);
5696 	free(payload_write);
5697 	free(payload_clone);
5698 
5699 	ut_blob_close_and_delete(bs, blob);
5700 }
5701 
5702 static void
5703 blob_inflate_rw(void)
5704 {
5705 	_blob_inflate_rw(false);
5706 	_blob_inflate_rw(true);
5707 }
5708 
5709 /**
5710  * Snapshot-clones relation test
5711  *
5712  *         snapshot
5713  *            |
5714  *      +-----+-----+
5715  *      |           |
5716  *   blob(ro)   snapshot2
5717  *      |           |
5718  *   clone2      clone
5719  */
5720 static void
5721 blob_relations(void)
5722 {
5723 	struct spdk_blob_store *bs;
5724 	struct spdk_bs_dev *dev;
5725 	struct spdk_bs_opts bs_opts;
5726 	struct spdk_blob_opts opts;
5727 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5728 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5729 	int rc;
5730 	size_t count;
5731 	spdk_blob_id ids[10] = {};
5732 
5733 	dev = init_dev();
5734 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5735 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5736 
5737 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5738 	poll_threads();
5739 	CU_ASSERT(g_bserrno == 0);
5740 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5741 	bs = g_bs;
5742 
5743 	/* 1. Create blob with 10 clusters */
5744 
5745 	ut_spdk_blob_opts_init(&opts);
5746 	opts.num_clusters = 10;
5747 
5748 	blob = ut_blob_create_and_open(bs, &opts);
5749 	blobid = spdk_blob_get_id(blob);
5750 
5751 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5752 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5753 	CU_ASSERT(!spdk_blob_is_clone(blob));
5754 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5755 
5756 	/* blob should not have underlying snapshot nor clones */
5757 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5758 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5759 	count = SPDK_COUNTOF(ids);
5760 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5761 	CU_ASSERT(rc == 0);
5762 	CU_ASSERT(count == 0);
5763 
5764 
5765 	/* 2. Create snapshot */
5766 
5767 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5768 	poll_threads();
5769 	CU_ASSERT(g_bserrno == 0);
5770 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5771 	snapshotid = g_blobid;
5772 
5773 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5774 	poll_threads();
5775 	CU_ASSERT(g_bserrno == 0);
5776 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5777 	snapshot = g_blob;
5778 
5779 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5780 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5781 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5782 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5783 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5784 
5785 	/* Check if original blob is converted to the clone of snapshot */
5786 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5787 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5788 	CU_ASSERT(spdk_blob_is_clone(blob));
5789 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5790 	CU_ASSERT(blob->parent_id == snapshotid);
5791 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5792 
5793 	count = SPDK_COUNTOF(ids);
5794 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5795 	CU_ASSERT(rc == 0);
5796 	CU_ASSERT(count == 1);
5797 	CU_ASSERT(ids[0] == blobid);
5798 
5799 
5800 	/* 3. Create clone from snapshot */
5801 
5802 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5803 	poll_threads();
5804 	CU_ASSERT(g_bserrno == 0);
5805 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5806 	cloneid = g_blobid;
5807 
5808 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5809 	poll_threads();
5810 	CU_ASSERT(g_bserrno == 0);
5811 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5812 	clone = g_blob;
5813 
5814 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5815 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5816 	CU_ASSERT(spdk_blob_is_clone(clone));
5817 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5818 	CU_ASSERT(clone->parent_id == snapshotid);
5819 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5820 
5821 	count = SPDK_COUNTOF(ids);
5822 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5823 	CU_ASSERT(rc == 0);
5824 	CU_ASSERT(count == 0);
5825 
5826 	/* Check if clone is on the snapshot's list */
5827 	count = SPDK_COUNTOF(ids);
5828 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5829 	CU_ASSERT(rc == 0);
5830 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5831 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5832 
5833 
5834 	/* 4. Create snapshot of the clone */
5835 
5836 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5837 	poll_threads();
5838 	CU_ASSERT(g_bserrno == 0);
5839 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5840 	snapshotid2 = g_blobid;
5841 
5842 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5843 	poll_threads();
5844 	CU_ASSERT(g_bserrno == 0);
5845 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5846 	snapshot2 = g_blob;
5847 
5848 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5849 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5850 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5851 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5852 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5853 
5854 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5855 	 * is a child of snapshot */
5856 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5857 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5858 	CU_ASSERT(spdk_blob_is_clone(clone));
5859 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5860 	CU_ASSERT(clone->parent_id == snapshotid2);
5861 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5862 
5863 	count = SPDK_COUNTOF(ids);
5864 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5865 	CU_ASSERT(rc == 0);
5866 	CU_ASSERT(count == 1);
5867 	CU_ASSERT(ids[0] == cloneid);
5868 
5869 
5870 	/* 5. Try to create clone from read only blob */
5871 
5872 	/* Mark blob as read only */
5873 	spdk_blob_set_read_only(blob);
5874 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5875 	poll_threads();
5876 	CU_ASSERT(g_bserrno == 0);
5877 
5878 	/* Check if previously created blob is read only clone */
5879 	CU_ASSERT(spdk_blob_is_read_only(blob));
5880 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5881 	CU_ASSERT(spdk_blob_is_clone(blob));
5882 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5883 
5884 	/* Create clone from read only blob */
5885 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5886 	poll_threads();
5887 	CU_ASSERT(g_bserrno == 0);
5888 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5889 	cloneid2 = g_blobid;
5890 
5891 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5892 	poll_threads();
5893 	CU_ASSERT(g_bserrno == 0);
5894 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5895 	clone2 = g_blob;
5896 
5897 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5898 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5899 	CU_ASSERT(spdk_blob_is_clone(clone2));
5900 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5901 
5902 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5903 
5904 	count = SPDK_COUNTOF(ids);
5905 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5906 	CU_ASSERT(rc == 0);
5907 
5908 	CU_ASSERT(count == 1);
5909 	CU_ASSERT(ids[0] == cloneid2);
5910 
5911 	/* Close blobs */
5912 
5913 	spdk_blob_close(clone2, blob_op_complete, NULL);
5914 	poll_threads();
5915 	CU_ASSERT(g_bserrno == 0);
5916 
5917 	spdk_blob_close(blob, blob_op_complete, NULL);
5918 	poll_threads();
5919 	CU_ASSERT(g_bserrno == 0);
5920 
5921 	spdk_blob_close(clone, blob_op_complete, NULL);
5922 	poll_threads();
5923 	CU_ASSERT(g_bserrno == 0);
5924 
5925 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5926 	poll_threads();
5927 	CU_ASSERT(g_bserrno == 0);
5928 
5929 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5930 	poll_threads();
5931 	CU_ASSERT(g_bserrno == 0);
5932 
5933 	/* Try to delete snapshot with more than 1 clone */
5934 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5935 	poll_threads();
5936 	CU_ASSERT(g_bserrno != 0);
5937 
5938 	ut_bs_reload(&bs, &bs_opts);
5939 
5940 	/* NULL ids array should return number of clones in count */
5941 	count = SPDK_COUNTOF(ids);
5942 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5943 	CU_ASSERT(rc == -ENOMEM);
5944 	CU_ASSERT(count == 2);
5945 
5946 	/* incorrect array size */
5947 	count = 1;
5948 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5949 	CU_ASSERT(rc == -ENOMEM);
5950 	CU_ASSERT(count == 2);
5951 
5952 
5953 	/* Verify structure of loaded blob store */
5954 
5955 	/* snapshot */
5956 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5957 
5958 	count = SPDK_COUNTOF(ids);
5959 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5960 	CU_ASSERT(rc == 0);
5961 	CU_ASSERT(count == 2);
5962 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5963 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5964 
5965 	/* blob */
5966 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5967 	count = SPDK_COUNTOF(ids);
5968 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5969 	CU_ASSERT(rc == 0);
5970 	CU_ASSERT(count == 1);
5971 	CU_ASSERT(ids[0] == cloneid2);
5972 
5973 	/* clone */
5974 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5975 	count = SPDK_COUNTOF(ids);
5976 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5977 	CU_ASSERT(rc == 0);
5978 	CU_ASSERT(count == 0);
5979 
5980 	/* snapshot2 */
5981 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5982 	count = SPDK_COUNTOF(ids);
5983 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5984 	CU_ASSERT(rc == 0);
5985 	CU_ASSERT(count == 1);
5986 	CU_ASSERT(ids[0] == cloneid);
5987 
5988 	/* clone2 */
5989 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5990 	count = SPDK_COUNTOF(ids);
5991 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5992 	CU_ASSERT(rc == 0);
5993 	CU_ASSERT(count == 0);
5994 
5995 	/* Try to delete blob that user should not be able to remove */
5996 
5997 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5998 	poll_threads();
5999 	CU_ASSERT(g_bserrno != 0);
6000 
6001 	/* Remove all blobs */
6002 
6003 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6004 	poll_threads();
6005 	CU_ASSERT(g_bserrno == 0);
6006 
6007 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6008 	poll_threads();
6009 	CU_ASSERT(g_bserrno == 0);
6010 
6011 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6012 	poll_threads();
6013 	CU_ASSERT(g_bserrno == 0);
6014 
6015 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6016 	poll_threads();
6017 	CU_ASSERT(g_bserrno == 0);
6018 
6019 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6020 	poll_threads();
6021 	CU_ASSERT(g_bserrno == 0);
6022 
6023 	spdk_bs_unload(bs, bs_op_complete, NULL);
6024 	poll_threads();
6025 	CU_ASSERT(g_bserrno == 0);
6026 
6027 	g_bs = NULL;
6028 }
6029 
6030 /**
6031  * Snapshot-clones relation test 2
6032  *
6033  *         snapshot1
6034  *            |
6035  *         snapshot2
6036  *            |
6037  *      +-----+-----+
6038  *      |           |
6039  *   blob(ro)   snapshot3
6040  *      |           |
6041  *      |       snapshot4
6042  *      |        |     |
6043  *   clone2   clone  clone3
6044  */
6045 static void
6046 blob_relations2(void)
6047 {
6048 	struct spdk_blob_store *bs;
6049 	struct spdk_bs_dev *dev;
6050 	struct spdk_bs_opts bs_opts;
6051 	struct spdk_blob_opts opts;
6052 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
6053 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
6054 		     cloneid3;
6055 	int rc;
6056 	size_t count;
6057 	spdk_blob_id ids[10] = {};
6058 
6059 	dev = init_dev();
6060 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6061 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6062 
6063 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6064 	poll_threads();
6065 	CU_ASSERT(g_bserrno == 0);
6066 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6067 	bs = g_bs;
6068 
6069 	/* 1. Create blob with 10 clusters */
6070 
6071 	ut_spdk_blob_opts_init(&opts);
6072 	opts.num_clusters = 10;
6073 
6074 	blob = ut_blob_create_and_open(bs, &opts);
6075 	blobid = spdk_blob_get_id(blob);
6076 
6077 	/* 2. Create snapshot1 */
6078 
6079 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6080 	poll_threads();
6081 	CU_ASSERT(g_bserrno == 0);
6082 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6083 	snapshotid1 = g_blobid;
6084 
6085 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
6086 	poll_threads();
6087 	CU_ASSERT(g_bserrno == 0);
6088 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6089 	snapshot1 = g_blob;
6090 
6091 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
6092 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
6093 
6094 	CU_ASSERT(blob->parent_id == snapshotid1);
6095 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6096 
6097 	/* Check if blob is the clone of snapshot1 */
6098 	CU_ASSERT(blob->parent_id == snapshotid1);
6099 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6100 
6101 	count = SPDK_COUNTOF(ids);
6102 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
6103 	CU_ASSERT(rc == 0);
6104 	CU_ASSERT(count == 1);
6105 	CU_ASSERT(ids[0] == blobid);
6106 
6107 	/* 3. Create another snapshot */
6108 
6109 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6110 	poll_threads();
6111 	CU_ASSERT(g_bserrno == 0);
6112 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6113 	snapshotid2 = g_blobid;
6114 
6115 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
6116 	poll_threads();
6117 	CU_ASSERT(g_bserrno == 0);
6118 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6119 	snapshot2 = g_blob;
6120 
6121 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
6122 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
6123 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
6124 
6125 	/* Check if snapshot2 is the clone of snapshot1 and blob
6126 	 * is a child of snapshot2 */
6127 	CU_ASSERT(blob->parent_id == snapshotid2);
6128 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6129 
6130 	count = SPDK_COUNTOF(ids);
6131 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6132 	CU_ASSERT(rc == 0);
6133 	CU_ASSERT(count == 1);
6134 	CU_ASSERT(ids[0] == blobid);
6135 
6136 	/* 4. Create clone from snapshot */
6137 
6138 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
6139 	poll_threads();
6140 	CU_ASSERT(g_bserrno == 0);
6141 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6142 	cloneid = g_blobid;
6143 
6144 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
6145 	poll_threads();
6146 	CU_ASSERT(g_bserrno == 0);
6147 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6148 	clone = g_blob;
6149 
6150 	CU_ASSERT(clone->parent_id == snapshotid2);
6151 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
6152 
6153 	/* Check if clone is on the snapshot's list */
6154 	count = SPDK_COUNTOF(ids);
6155 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6156 	CU_ASSERT(rc == 0);
6157 	CU_ASSERT(count == 2);
6158 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6159 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
6160 
6161 	/* 5. Create snapshot of the clone */
6162 
6163 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6164 	poll_threads();
6165 	CU_ASSERT(g_bserrno == 0);
6166 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6167 	snapshotid3 = g_blobid;
6168 
6169 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6170 	poll_threads();
6171 	CU_ASSERT(g_bserrno == 0);
6172 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6173 	snapshot3 = g_blob;
6174 
6175 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
6176 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6177 
6178 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
6179 	 * is a child of snapshot2 */
6180 	CU_ASSERT(clone->parent_id == snapshotid3);
6181 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6182 
6183 	count = SPDK_COUNTOF(ids);
6184 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6185 	CU_ASSERT(rc == 0);
6186 	CU_ASSERT(count == 1);
6187 	CU_ASSERT(ids[0] == cloneid);
6188 
6189 	/* 6. Create another snapshot of the clone */
6190 
6191 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6192 	poll_threads();
6193 	CU_ASSERT(g_bserrno == 0);
6194 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6195 	snapshotid4 = g_blobid;
6196 
6197 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
6198 	poll_threads();
6199 	CU_ASSERT(g_bserrno == 0);
6200 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6201 	snapshot4 = g_blob;
6202 
6203 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
6204 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
6205 
6206 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
6207 	 * is a child of snapshot3 */
6208 	CU_ASSERT(clone->parent_id == snapshotid4);
6209 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
6210 
6211 	count = SPDK_COUNTOF(ids);
6212 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
6213 	CU_ASSERT(rc == 0);
6214 	CU_ASSERT(count == 1);
6215 	CU_ASSERT(ids[0] == cloneid);
6216 
6217 	/* 7. Remove snapshot 4 */
6218 
6219 	ut_blob_close_and_delete(bs, snapshot4);
6220 
6221 	/* Check if relations are back to state from before creating snapshot 4 */
6222 	CU_ASSERT(clone->parent_id == snapshotid3);
6223 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6224 
6225 	count = SPDK_COUNTOF(ids);
6226 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6227 	CU_ASSERT(rc == 0);
6228 	CU_ASSERT(count == 1);
6229 	CU_ASSERT(ids[0] == cloneid);
6230 
6231 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
6232 
6233 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
6234 	poll_threads();
6235 	CU_ASSERT(g_bserrno == 0);
6236 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6237 	cloneid3 = g_blobid;
6238 
6239 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6240 	poll_threads();
6241 	CU_ASSERT(g_bserrno != 0);
6242 
6243 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
6244 
6245 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6246 	poll_threads();
6247 	CU_ASSERT(g_bserrno == 0);
6248 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6249 	snapshot3 = g_blob;
6250 
6251 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6252 	poll_threads();
6253 	CU_ASSERT(g_bserrno != 0);
6254 
6255 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6256 	poll_threads();
6257 	CU_ASSERT(g_bserrno == 0);
6258 
6259 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
6260 	poll_threads();
6261 	CU_ASSERT(g_bserrno == 0);
6262 
6263 	/* 10. Remove snapshot 1 */
6264 
6265 	/* Check snapshot 1 and snapshot 2 allocated clusters */
6266 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot1) == 10);
6267 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
6268 
6269 	ut_blob_close_and_delete(bs, snapshot1);
6270 
6271 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
6272 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
6273 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6274 
6275 	/* Check that snapshot 2 has the clusters that were allocated to snapshot 1 */
6276 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 10);
6277 
6278 	count = SPDK_COUNTOF(ids);
6279 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6280 	CU_ASSERT(rc == 0);
6281 	CU_ASSERT(count == 2);
6282 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6283 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6284 
6285 	/* 11. Try to create clone from read only blob */
6286 
6287 	/* Mark blob as read only */
6288 	spdk_blob_set_read_only(blob);
6289 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6290 	poll_threads();
6291 	CU_ASSERT(g_bserrno == 0);
6292 
6293 	/* Create clone from read only blob */
6294 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6295 	poll_threads();
6296 	CU_ASSERT(g_bserrno == 0);
6297 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6298 	cloneid2 = g_blobid;
6299 
6300 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
6301 	poll_threads();
6302 	CU_ASSERT(g_bserrno == 0);
6303 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6304 	clone2 = g_blob;
6305 
6306 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6307 
6308 	count = SPDK_COUNTOF(ids);
6309 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6310 	CU_ASSERT(rc == 0);
6311 	CU_ASSERT(count == 1);
6312 	CU_ASSERT(ids[0] == cloneid2);
6313 
6314 	/* Close blobs */
6315 
6316 	spdk_blob_close(clone2, blob_op_complete, NULL);
6317 	poll_threads();
6318 	CU_ASSERT(g_bserrno == 0);
6319 
6320 	spdk_blob_close(blob, blob_op_complete, NULL);
6321 	poll_threads();
6322 	CU_ASSERT(g_bserrno == 0);
6323 
6324 	spdk_blob_close(clone, blob_op_complete, NULL);
6325 	poll_threads();
6326 	CU_ASSERT(g_bserrno == 0);
6327 
6328 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
6329 	poll_threads();
6330 	CU_ASSERT(g_bserrno == 0);
6331 
6332 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6333 	poll_threads();
6334 	CU_ASSERT(g_bserrno == 0);
6335 
6336 	ut_bs_reload(&bs, &bs_opts);
6337 
6338 	/* Verify structure of loaded blob store */
6339 
6340 	/* snapshot2 */
6341 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6342 
6343 	count = SPDK_COUNTOF(ids);
6344 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6345 	CU_ASSERT(rc == 0);
6346 	CU_ASSERT(count == 2);
6347 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6348 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6349 
6350 	/* blob */
6351 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6352 	count = SPDK_COUNTOF(ids);
6353 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6354 	CU_ASSERT(rc == 0);
6355 	CU_ASSERT(count == 1);
6356 	CU_ASSERT(ids[0] == cloneid2);
6357 
6358 	/* clone */
6359 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6360 	count = SPDK_COUNTOF(ids);
6361 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6362 	CU_ASSERT(rc == 0);
6363 	CU_ASSERT(count == 0);
6364 
6365 	/* snapshot3 */
6366 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6367 	count = SPDK_COUNTOF(ids);
6368 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6369 	CU_ASSERT(rc == 0);
6370 	CU_ASSERT(count == 1);
6371 	CU_ASSERT(ids[0] == cloneid);
6372 
6373 	/* clone2 */
6374 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6375 	count = SPDK_COUNTOF(ids);
6376 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6377 	CU_ASSERT(rc == 0);
6378 	CU_ASSERT(count == 0);
6379 
6380 	/* Try to delete all blobs in the worse possible order */
6381 
6382 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6383 	poll_threads();
6384 	CU_ASSERT(g_bserrno != 0);
6385 
6386 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6387 	poll_threads();
6388 	CU_ASSERT(g_bserrno == 0);
6389 
6390 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6391 	poll_threads();
6392 	CU_ASSERT(g_bserrno != 0);
6393 
6394 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6395 	poll_threads();
6396 	CU_ASSERT(g_bserrno == 0);
6397 
6398 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6399 	poll_threads();
6400 	CU_ASSERT(g_bserrno == 0);
6401 
6402 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6403 	poll_threads();
6404 	CU_ASSERT(g_bserrno == 0);
6405 
6406 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6407 	poll_threads();
6408 	CU_ASSERT(g_bserrno == 0);
6409 
6410 	spdk_bs_unload(bs, bs_op_complete, NULL);
6411 	poll_threads();
6412 	CU_ASSERT(g_bserrno == 0);
6413 
6414 	g_bs = NULL;
6415 }
6416 
6417 /**
6418  * Snapshot-clones relation test 3
6419  *
6420  *         snapshot0
6421  *            |
6422  *         snapshot1
6423  *            |
6424  *         snapshot2
6425  *            |
6426  *           blob
6427  */
6428 static void
6429 blob_relations3(void)
6430 {
6431 	struct spdk_blob_store *bs;
6432 	struct spdk_bs_dev *dev;
6433 	struct spdk_io_channel *channel;
6434 	struct spdk_bs_opts bs_opts;
6435 	struct spdk_blob_opts opts;
6436 	struct spdk_blob *blob;
6437 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
6438 
6439 	dev = init_dev();
6440 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6441 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6442 
6443 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6444 	poll_threads();
6445 	CU_ASSERT(g_bserrno == 0);
6446 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6447 	bs = g_bs;
6448 
6449 	channel = spdk_bs_alloc_io_channel(bs);
6450 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6451 
6452 	/* 1. Create blob with 10 clusters */
6453 	ut_spdk_blob_opts_init(&opts);
6454 	opts.num_clusters = 10;
6455 
6456 	blob = ut_blob_create_and_open(bs, &opts);
6457 	blobid = spdk_blob_get_id(blob);
6458 
6459 	/* 2. Create snapshot0 */
6460 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6461 	poll_threads();
6462 	CU_ASSERT(g_bserrno == 0);
6463 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6464 	snapshotid0 = g_blobid;
6465 
6466 	/* 3. Create snapshot1 */
6467 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6468 	poll_threads();
6469 	CU_ASSERT(g_bserrno == 0);
6470 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6471 	snapshotid1 = g_blobid;
6472 
6473 	/* 4. Create snapshot2 */
6474 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6475 	poll_threads();
6476 	CU_ASSERT(g_bserrno == 0);
6477 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6478 	snapshotid2 = g_blobid;
6479 
6480 	/* 5. Decouple blob */
6481 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
6482 	poll_threads();
6483 	CU_ASSERT(g_bserrno == 0);
6484 
6485 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
6486 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
6487 	poll_threads();
6488 	CU_ASSERT(g_bserrno == 0);
6489 
6490 	/* 7. Delete blob */
6491 	spdk_blob_close(blob, blob_op_complete, NULL);
6492 	poll_threads();
6493 	CU_ASSERT(g_bserrno == 0);
6494 
6495 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6496 	poll_threads();
6497 	CU_ASSERT(g_bserrno == 0);
6498 
6499 	/* 8. Delete snapshot2.
6500 	 * If md of snapshot 2 was updated, it should be possible to delete it */
6501 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6502 	poll_threads();
6503 	CU_ASSERT(g_bserrno == 0);
6504 
6505 	/* Remove remaining blobs and unload bs */
6506 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
6507 	poll_threads();
6508 	CU_ASSERT(g_bserrno == 0);
6509 
6510 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
6511 	poll_threads();
6512 	CU_ASSERT(g_bserrno == 0);
6513 
6514 	spdk_bs_free_io_channel(channel);
6515 	poll_threads();
6516 
6517 	spdk_bs_unload(bs, bs_op_complete, NULL);
6518 	poll_threads();
6519 	CU_ASSERT(g_bserrno == 0);
6520 
6521 	g_bs = NULL;
6522 }
6523 
6524 static void
6525 blobstore_clean_power_failure(void)
6526 {
6527 	struct spdk_blob_store *bs;
6528 	struct spdk_blob *blob;
6529 	struct spdk_power_failure_thresholds thresholds = {};
6530 	bool clean = false;
6531 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6532 	struct spdk_bs_super_block super_copy = {};
6533 
6534 	thresholds.general_threshold = 1;
6535 	while (!clean) {
6536 		/* Create bs and blob */
6537 		suite_blob_setup();
6538 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6539 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6540 		bs = g_bs;
6541 		blob = g_blob;
6542 
6543 		/* Super block should not change for rest of the UT,
6544 		 * save it and compare later. */
6545 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
6546 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
6547 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6548 
6549 		/* Force bs/super block in a clean state.
6550 		 * Along with marking blob dirty, to cause blob persist. */
6551 		blob->state = SPDK_BLOB_STATE_DIRTY;
6552 		bs->clean = 1;
6553 		super->clean = 1;
6554 		super->crc = blob_md_page_calc_crc(super);
6555 
6556 		g_bserrno = -1;
6557 		dev_set_power_failure_thresholds(thresholds);
6558 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6559 		poll_threads();
6560 		dev_reset_power_failure_event();
6561 
6562 		if (g_bserrno == 0) {
6563 			/* After successful md sync, both bs and super block
6564 			 * should be marked as not clean. */
6565 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6566 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
6567 			clean = true;
6568 		}
6569 
6570 		/* Depending on the point of failure, super block was either updated or not. */
6571 		super_copy.clean = super->clean;
6572 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
6573 		/* Compare that the values in super block remained unchanged. */
6574 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
6575 
6576 		/* Delete blob and unload bs */
6577 		suite_blob_cleanup();
6578 
6579 		thresholds.general_threshold++;
6580 	}
6581 }
6582 
6583 static void
6584 blob_delete_snapshot_power_failure(void)
6585 {
6586 	struct spdk_bs_dev *dev;
6587 	struct spdk_blob_store *bs;
6588 	struct spdk_blob_opts opts;
6589 	struct spdk_blob *blob, *snapshot;
6590 	struct spdk_power_failure_thresholds thresholds = {};
6591 	spdk_blob_id blobid, snapshotid;
6592 	const void *value;
6593 	size_t value_len;
6594 	size_t count;
6595 	spdk_blob_id ids[3] = {};
6596 	int rc;
6597 	bool deleted = false;
6598 	int delete_snapshot_bserrno = -1;
6599 
6600 	thresholds.general_threshold = 1;
6601 	while (!deleted) {
6602 		dev = init_dev();
6603 
6604 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6605 		poll_threads();
6606 		CU_ASSERT(g_bserrno == 0);
6607 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6608 		bs = g_bs;
6609 
6610 		/* Create blob */
6611 		ut_spdk_blob_opts_init(&opts);
6612 		opts.num_clusters = 10;
6613 
6614 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6615 		poll_threads();
6616 		CU_ASSERT(g_bserrno == 0);
6617 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6618 		blobid = g_blobid;
6619 
6620 		/* Create snapshot */
6621 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6622 		poll_threads();
6623 		CU_ASSERT(g_bserrno == 0);
6624 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6625 		snapshotid = g_blobid;
6626 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6627 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6628 
6629 		dev_set_power_failure_thresholds(thresholds);
6630 
6631 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6632 		poll_threads();
6633 		delete_snapshot_bserrno = g_bserrno;
6634 
6635 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6636 		 * reports success, changes to both blobs should already persisted. */
6637 		dev_reset_power_failure_event();
6638 		ut_bs_dirty_load(&bs, NULL);
6639 
6640 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6641 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6642 
6643 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6644 		poll_threads();
6645 		CU_ASSERT(g_bserrno == 0);
6646 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6647 		blob = g_blob;
6648 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6649 
6650 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6651 		poll_threads();
6652 
6653 		if (g_bserrno == 0) {
6654 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6655 			snapshot = g_blob;
6656 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6657 			count = SPDK_COUNTOF(ids);
6658 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6659 			CU_ASSERT(rc == 0);
6660 			CU_ASSERT(count == 1);
6661 			CU_ASSERT(ids[0] == blobid);
6662 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6663 			CU_ASSERT(rc != 0);
6664 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6665 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6666 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6667 
6668 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6669 			poll_threads();
6670 			CU_ASSERT(g_bserrno == 0);
6671 		} else {
6672 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6673 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6674 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6675 			 * Yet delete might perform further changes to the clone after that.
6676 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6677 			if (delete_snapshot_bserrno == 0) {
6678 				deleted = true;
6679 			}
6680 		}
6681 
6682 		spdk_blob_close(blob, blob_op_complete, NULL);
6683 		poll_threads();
6684 		CU_ASSERT(g_bserrno == 0);
6685 
6686 		spdk_bs_unload(bs, bs_op_complete, NULL);
6687 		poll_threads();
6688 		CU_ASSERT(g_bserrno == 0);
6689 
6690 		thresholds.general_threshold++;
6691 	}
6692 }
6693 
6694 static void
6695 blob_create_snapshot_power_failure(void)
6696 {
6697 	struct spdk_blob_store *bs = g_bs;
6698 	struct spdk_bs_dev *dev;
6699 	struct spdk_blob_opts opts;
6700 	struct spdk_blob *blob, *snapshot;
6701 	struct spdk_power_failure_thresholds thresholds = {};
6702 	spdk_blob_id blobid, snapshotid;
6703 	const void *value;
6704 	size_t value_len;
6705 	size_t count;
6706 	spdk_blob_id ids[3] = {};
6707 	int rc;
6708 	bool created = false;
6709 	int create_snapshot_bserrno = -1;
6710 
6711 	thresholds.general_threshold = 1;
6712 	while (!created) {
6713 		dev = init_dev();
6714 
6715 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6716 		poll_threads();
6717 		CU_ASSERT(g_bserrno == 0);
6718 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6719 		bs = g_bs;
6720 
6721 		/* Create blob */
6722 		ut_spdk_blob_opts_init(&opts);
6723 		opts.num_clusters = 10;
6724 
6725 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6726 		poll_threads();
6727 		CU_ASSERT(g_bserrno == 0);
6728 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6729 		blobid = g_blobid;
6730 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6731 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6732 
6733 		dev_set_power_failure_thresholds(thresholds);
6734 
6735 		/* Create snapshot */
6736 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6737 		poll_threads();
6738 		create_snapshot_bserrno = g_bserrno;
6739 		snapshotid = g_blobid;
6740 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6741 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6742 
6743 		/* Do not shut down cleanly. Assumption is that after create snapshot
6744 		 * reports success, both blobs should be power-fail safe. */
6745 		dev_reset_power_failure_event();
6746 		ut_bs_dirty_load(&bs, NULL);
6747 
6748 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6749 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6750 
6751 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6752 		poll_threads();
6753 		CU_ASSERT(g_bserrno == 0);
6754 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6755 		blob = g_blob;
6756 
6757 		if (snapshotid != SPDK_BLOBID_INVALID) {
6758 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6759 			poll_threads();
6760 		}
6761 
6762 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6763 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6764 			snapshot = g_blob;
6765 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6766 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6767 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6768 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6769 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6770 			count = SPDK_COUNTOF(ids);
6771 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6772 			CU_ASSERT(rc == 0);
6773 			CU_ASSERT(count == 1);
6774 			CU_ASSERT(ids[0] == blobid);
6775 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6776 			CU_ASSERT(rc != 0);
6777 
6778 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6779 			poll_threads();
6780 			CU_ASSERT(g_bserrno == 0);
6781 			if (create_snapshot_bserrno == 0) {
6782 				created = true;
6783 			}
6784 		} else {
6785 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6786 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6787 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6788 		}
6789 
6790 		spdk_blob_close(blob, blob_op_complete, NULL);
6791 		poll_threads();
6792 		CU_ASSERT(g_bserrno == 0);
6793 
6794 		spdk_bs_unload(bs, bs_op_complete, NULL);
6795 		poll_threads();
6796 		CU_ASSERT(g_bserrno == 0);
6797 
6798 		thresholds.general_threshold++;
6799 	}
6800 }
6801 
6802 static void
6803 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6804 {
6805 	uint8_t payload_ff[64 * 512];
6806 	uint8_t payload_aa[64 * 512];
6807 	uint8_t payload_00[64 * 512];
6808 	uint8_t *cluster0, *cluster1;
6809 
6810 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6811 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6812 	memset(payload_00, 0x00, sizeof(payload_00));
6813 
6814 	/* Try to perform I/O with io unit = 512 */
6815 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6816 	poll_threads();
6817 	CU_ASSERT(g_bserrno == 0);
6818 
6819 	/* If thin provisioned is set cluster should be allocated now */
6820 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6821 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6822 
6823 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6824 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6825 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6826 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6827 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6828 
6829 	/* Verify write with offset on first page */
6830 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6831 	poll_threads();
6832 	CU_ASSERT(g_bserrno == 0);
6833 
6834 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6835 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6836 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6837 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6838 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6839 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6840 
6841 	/* Verify write with offset on first page */
6842 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6843 	poll_threads();
6844 
6845 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6846 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6847 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6848 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6849 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6850 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6851 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6852 
6853 	/* Verify write with offset on second page */
6854 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6855 	poll_threads();
6856 
6857 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6858 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6859 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6860 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6861 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6862 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6863 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6864 
6865 	/* Verify write across multiple pages */
6866 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6867 	poll_threads();
6868 
6869 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6870 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6871 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6872 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6873 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6874 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6875 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6876 
6877 	/* Verify write across multiple clusters */
6878 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6879 	poll_threads();
6880 
6881 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6882 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6883 
6884 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6885 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6886 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6887 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6888 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6889 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6890 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6891 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6892 
6893 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6894 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6895 
6896 	/* Verify write to second cluster */
6897 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6898 	poll_threads();
6899 
6900 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6901 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6902 
6903 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6904 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6905 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6906 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6907 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6908 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6909 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6910 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6911 
6912 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6913 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6914 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6915 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6916 }
6917 
6918 static void
6919 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6920 {
6921 	uint8_t payload_read[64 * 512];
6922 	uint8_t payload_ff[64 * 512];
6923 	uint8_t payload_aa[64 * 512];
6924 	uint8_t payload_00[64 * 512];
6925 
6926 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6927 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6928 	memset(payload_00, 0x00, sizeof(payload_00));
6929 
6930 	/* Read only first io unit */
6931 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6932 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6933 	 * payload_read: F000 0000 | 0000 0000 ... */
6934 	memset(payload_read, 0x00, sizeof(payload_read));
6935 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6936 	poll_threads();
6937 	CU_ASSERT(g_bserrno == 0);
6938 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6939 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6940 
6941 	/* Read four io_units starting from offset = 2
6942 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6943 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6944 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6945 
6946 	memset(payload_read, 0x00, sizeof(payload_read));
6947 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6948 	poll_threads();
6949 	CU_ASSERT(g_bserrno == 0);
6950 
6951 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6952 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6953 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6954 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6955 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6956 
6957 	/* Read eight io_units across multiple pages
6958 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6959 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6960 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6961 	memset(payload_read, 0x00, sizeof(payload_read));
6962 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6963 	poll_threads();
6964 	CU_ASSERT(g_bserrno == 0);
6965 
6966 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6967 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6968 
6969 	/* Read eight io_units across multiple clusters
6970 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6971 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6972 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6973 	memset(payload_read, 0x00, sizeof(payload_read));
6974 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6975 	poll_threads();
6976 	CU_ASSERT(g_bserrno == 0);
6977 
6978 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6979 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6980 
6981 	/* Read four io_units from second cluster
6982 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6983 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6984 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6985 	memset(payload_read, 0x00, sizeof(payload_read));
6986 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6987 	poll_threads();
6988 	CU_ASSERT(g_bserrno == 0);
6989 
6990 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6991 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6992 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6993 
6994 	/* Read second cluster
6995 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6996 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6997 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6998 	memset(payload_read, 0x00, sizeof(payload_read));
6999 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
7000 	poll_threads();
7001 	CU_ASSERT(g_bserrno == 0);
7002 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7003 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7004 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7005 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
7006 
7007 	/* Read whole two clusters
7008 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7009 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7010 	memset(payload_read, 0x00, sizeof(payload_read));
7011 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
7012 	poll_threads();
7013 	CU_ASSERT(g_bserrno == 0);
7014 
7015 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7016 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7017 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7018 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7019 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7020 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
7021 
7022 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
7023 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
7024 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
7025 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
7026 }
7027 
7028 
7029 static void
7030 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7031 {
7032 	uint8_t payload_ff[64 * 512];
7033 	uint8_t payload_aa[64 * 512];
7034 	uint8_t payload_00[64 * 512];
7035 	uint8_t *cluster0, *cluster1;
7036 
7037 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7038 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7039 	memset(payload_00, 0x00, sizeof(payload_00));
7040 
7041 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7042 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7043 
7044 	/* Unmap */
7045 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
7046 	poll_threads();
7047 
7048 	CU_ASSERT(g_bserrno == 0);
7049 
7050 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
7051 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
7052 }
7053 
7054 static void
7055 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7056 {
7057 	uint8_t payload_ff[64 * 512];
7058 	uint8_t payload_aa[64 * 512];
7059 	uint8_t payload_00[64 * 512];
7060 	uint8_t *cluster0, *cluster1;
7061 
7062 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7063 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7064 	memset(payload_00, 0x00, sizeof(payload_00));
7065 
7066 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7067 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7068 
7069 	/* Write zeroes  */
7070 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
7071 	poll_threads();
7072 
7073 	CU_ASSERT(g_bserrno == 0);
7074 
7075 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
7076 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
7077 }
7078 
7079 static inline void
7080 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
7081 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7082 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7083 {
7084 	if (io_opts) {
7085 		g_dev_writev_ext_called = false;
7086 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7087 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
7088 					io_opts);
7089 	} else {
7090 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7091 	}
7092 	poll_threads();
7093 	CU_ASSERT(g_bserrno == 0);
7094 	if (io_opts) {
7095 		CU_ASSERT(g_dev_writev_ext_called);
7096 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7097 	}
7098 }
7099 
7100 static void
7101 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7102 	       bool ext_api)
7103 {
7104 	uint8_t payload_ff[64 * 512];
7105 	uint8_t payload_aa[64 * 512];
7106 	uint8_t payload_00[64 * 512];
7107 	uint8_t *cluster0, *cluster1;
7108 	struct iovec iov[4];
7109 	struct spdk_blob_ext_io_opts ext_opts = {
7110 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7111 		.memory_domain_ctx = (void *)0xf00df00d,
7112 		.size = sizeof(struct spdk_blob_ext_io_opts),
7113 		.user_ctx = (void *)123,
7114 	};
7115 
7116 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7117 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7118 	memset(payload_00, 0x00, sizeof(payload_00));
7119 
7120 	/* Try to perform I/O with io unit = 512 */
7121 	iov[0].iov_base = payload_ff;
7122 	iov[0].iov_len = 1 * 512;
7123 
7124 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
7125 			    ext_api ? &ext_opts : NULL);
7126 
7127 	/* If thin provisioned is set cluster should be allocated now */
7128 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
7129 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7130 
7131 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
7132 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
7133 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7134 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7135 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
7136 
7137 	/* Verify write with offset on first page */
7138 	iov[0].iov_base = payload_ff;
7139 	iov[0].iov_len = 1 * 512;
7140 
7141 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
7142 			    ext_api ? &ext_opts : NULL);
7143 
7144 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7145 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7146 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7147 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7148 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7149 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
7150 
7151 	/* Verify write with offset on first page */
7152 	iov[0].iov_base = payload_ff;
7153 	iov[0].iov_len = 4 * 512;
7154 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
7155 	poll_threads();
7156 
7157 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
7158 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7159 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7160 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7161 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7162 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
7163 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
7164 
7165 	/* Verify write with offset on second page */
7166 	iov[0].iov_base = payload_ff;
7167 	iov[0].iov_len = 4 * 512;
7168 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
7169 	poll_threads();
7170 
7171 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
7172 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7173 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7174 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7175 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7176 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
7177 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
7178 
7179 	/* Verify write across multiple pages */
7180 	iov[0].iov_base = payload_aa;
7181 	iov[0].iov_len = 8 * 512;
7182 
7183 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
7184 			    ext_api ? &ext_opts : NULL);
7185 
7186 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
7187 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7188 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7189 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7190 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7191 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7192 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
7193 
7194 	/* Verify write across multiple clusters */
7195 
7196 	iov[0].iov_base = payload_ff;
7197 	iov[0].iov_len = 8 * 512;
7198 
7199 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
7200 			    ext_api ? &ext_opts : NULL);
7201 
7202 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7203 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7204 
7205 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7206 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7207 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7208 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7209 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7210 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7211 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7212 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
7213 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
7214 
7215 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7216 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
7217 
7218 	/* Verify write to second cluster */
7219 
7220 	iov[0].iov_base = payload_ff;
7221 	iov[0].iov_len = 2 * 512;
7222 
7223 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
7224 			    ext_api ? &ext_opts : NULL);
7225 
7226 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7227 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7228 
7229 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7230 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
7231 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7232 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7233 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7234 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7235 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7236 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
7237 
7238 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7239 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
7240 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
7241 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
7242 }
7243 
7244 static inline void
7245 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
7246 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7247 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7248 {
7249 	if (io_opts) {
7250 		g_dev_readv_ext_called = false;
7251 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7252 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
7253 	} else {
7254 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7255 	}
7256 	poll_threads();
7257 	CU_ASSERT(g_bserrno == 0);
7258 	if (io_opts) {
7259 		CU_ASSERT(g_dev_readv_ext_called);
7260 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7261 	}
7262 }
7263 
7264 static void
7265 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7266 	      bool ext_api)
7267 {
7268 	uint8_t payload_read[64 * 512];
7269 	uint8_t payload_ff[64 * 512];
7270 	uint8_t payload_aa[64 * 512];
7271 	uint8_t payload_00[64 * 512];
7272 	struct iovec iov[4];
7273 	struct spdk_blob_ext_io_opts ext_opts = {
7274 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7275 		.memory_domain_ctx = (void *)0xf00df00d,
7276 		.size = sizeof(struct spdk_blob_ext_io_opts),
7277 		.user_ctx = (void *)123,
7278 	};
7279 
7280 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7281 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7282 	memset(payload_00, 0x00, sizeof(payload_00));
7283 
7284 	/* Read only first io unit */
7285 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7286 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7287 	 * payload_read: F000 0000 | 0000 0000 ... */
7288 	memset(payload_read, 0x00, sizeof(payload_read));
7289 	iov[0].iov_base = payload_read;
7290 	iov[0].iov_len = 1 * 512;
7291 
7292 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7293 
7294 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7295 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
7296 
7297 	/* Read four io_units starting from offset = 2
7298 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7299 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7300 	 * payload_read: F0AA 0000 | 0000 0000 ... */
7301 
7302 	memset(payload_read, 0x00, sizeof(payload_read));
7303 	iov[0].iov_base = payload_read;
7304 	iov[0].iov_len = 4 * 512;
7305 
7306 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7307 
7308 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7309 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7310 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
7311 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
7312 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
7313 
7314 	/* Read eight io_units across multiple pages
7315 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7316 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7317 	 * payload_read: AAAA AAAA | 0000 0000 ... */
7318 	memset(payload_read, 0x00, sizeof(payload_read));
7319 	iov[0].iov_base = payload_read;
7320 	iov[0].iov_len = 4 * 512;
7321 	iov[1].iov_base = payload_read + 4 * 512;
7322 	iov[1].iov_len = 4 * 512;
7323 
7324 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7325 
7326 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7327 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
7328 
7329 	/* Read eight io_units across multiple clusters
7330 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7331 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7332 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7333 	memset(payload_read, 0x00, sizeof(payload_read));
7334 	iov[0].iov_base = payload_read;
7335 	iov[0].iov_len = 2 * 512;
7336 	iov[1].iov_base = payload_read + 2 * 512;
7337 	iov[1].iov_len = 2 * 512;
7338 	iov[2].iov_base = payload_read + 4 * 512;
7339 	iov[2].iov_len = 2 * 512;
7340 	iov[3].iov_base = payload_read + 6 * 512;
7341 	iov[3].iov_len = 2 * 512;
7342 
7343 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
7344 			   ext_api ? &ext_opts : NULL);
7345 
7346 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7347 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
7348 
7349 	/* Read four io_units from second cluster
7350 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7351 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7352 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7353 	memset(payload_read, 0x00, sizeof(payload_read));
7354 	iov[0].iov_base = payload_read;
7355 	iov[0].iov_len = 1 * 512;
7356 	iov[1].iov_base = payload_read + 1 * 512;
7357 	iov[1].iov_len = 3 * 512;
7358 
7359 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
7360 			   ext_api ? &ext_opts : NULL);
7361 
7362 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7363 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7364 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
7365 
7366 	/* Read second cluster
7367 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7368 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7369 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7370 	memset(payload_read, 0x00, sizeof(payload_read));
7371 	iov[0].iov_base = payload_read;
7372 	iov[0].iov_len = 1 * 512;
7373 	iov[1].iov_base = payload_read + 1 * 512;
7374 	iov[1].iov_len = 2 * 512;
7375 	iov[2].iov_base = payload_read + 3 * 512;
7376 	iov[2].iov_len = 4 * 512;
7377 	iov[3].iov_base = payload_read + 7 * 512;
7378 	iov[3].iov_len = 25 * 512;
7379 
7380 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
7381 			   ext_api ? &ext_opts : NULL);
7382 
7383 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7384 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7385 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7386 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
7387 
7388 	/* Read whole two clusters
7389 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7390 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7391 	memset(payload_read, 0x00, sizeof(payload_read));
7392 	iov[0].iov_base = payload_read;
7393 	iov[0].iov_len = 1 * 512;
7394 	iov[1].iov_base = payload_read + 1 * 512;
7395 	iov[1].iov_len = 8 * 512;
7396 	iov[2].iov_base = payload_read + 9 * 512;
7397 	iov[2].iov_len = 16 * 512;
7398 	iov[3].iov_base = payload_read + 25 * 512;
7399 	iov[3].iov_len = 39 * 512;
7400 
7401 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
7402 			   ext_api ? &ext_opts : NULL);
7403 
7404 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7405 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7406 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7407 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7408 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7409 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
7410 
7411 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
7412 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
7413 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
7414 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
7415 }
7416 
7417 static void
7418 blob_io_unit(void)
7419 {
7420 	struct spdk_bs_opts bsopts;
7421 	struct spdk_blob_opts opts;
7422 	struct spdk_blob_store *bs;
7423 	struct spdk_bs_dev *dev;
7424 	struct spdk_blob *blob, *snapshot, *clone;
7425 	spdk_blob_id blobid;
7426 	struct spdk_io_channel *channel;
7427 
7428 	/* Create dev with 512 bytes io unit size */
7429 
7430 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7431 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
7432 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7433 
7434 	/* Try to initialize a new blob store with unsupported io_unit */
7435 	dev = init_dev();
7436 	dev->blocklen = 512;
7437 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7438 
7439 	/* Initialize a new blob store */
7440 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7441 	poll_threads();
7442 	CU_ASSERT(g_bserrno == 0);
7443 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7444 	bs = g_bs;
7445 
7446 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7447 	channel = spdk_bs_alloc_io_channel(bs);
7448 
7449 	/* Create thick provisioned blob */
7450 	ut_spdk_blob_opts_init(&opts);
7451 	opts.thin_provision = false;
7452 	opts.num_clusters = 32;
7453 
7454 	blob = ut_blob_create_and_open(bs, &opts);
7455 	blobid = spdk_blob_get_id(blob);
7456 
7457 	test_io_write(dev, blob, channel);
7458 	test_io_read(dev, blob, channel);
7459 	test_io_zeroes(dev, blob, channel);
7460 
7461 	test_iov_write(dev, blob, channel, false);
7462 	test_iov_read(dev, blob, channel, false);
7463 	test_io_zeroes(dev, blob, channel);
7464 
7465 	test_iov_write(dev, blob, channel, true);
7466 	test_iov_read(dev, blob, channel, true);
7467 
7468 	test_io_unmap(dev, blob, channel);
7469 
7470 	spdk_blob_close(blob, blob_op_complete, NULL);
7471 	poll_threads();
7472 	CU_ASSERT(g_bserrno == 0);
7473 	blob = NULL;
7474 	g_blob = NULL;
7475 
7476 	/* Create thin provisioned blob */
7477 
7478 	ut_spdk_blob_opts_init(&opts);
7479 	opts.thin_provision = true;
7480 	opts.num_clusters = 32;
7481 
7482 	blob = ut_blob_create_and_open(bs, &opts);
7483 	blobid = spdk_blob_get_id(blob);
7484 
7485 	test_io_write(dev, blob, channel);
7486 	test_io_read(dev, blob, channel);
7487 	test_io_zeroes(dev, blob, channel);
7488 
7489 	test_iov_write(dev, blob, channel, false);
7490 	test_iov_read(dev, blob, channel, false);
7491 	test_io_zeroes(dev, blob, channel);
7492 
7493 	test_iov_write(dev, blob, channel, true);
7494 	test_iov_read(dev, blob, channel, true);
7495 
7496 	/* Create snapshot */
7497 
7498 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7499 	poll_threads();
7500 	CU_ASSERT(g_bserrno == 0);
7501 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7502 	blobid = g_blobid;
7503 
7504 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7505 	poll_threads();
7506 	CU_ASSERT(g_bserrno == 0);
7507 	CU_ASSERT(g_blob != NULL);
7508 	snapshot = g_blob;
7509 
7510 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7511 	poll_threads();
7512 	CU_ASSERT(g_bserrno == 0);
7513 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7514 	blobid = g_blobid;
7515 
7516 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7517 	poll_threads();
7518 	CU_ASSERT(g_bserrno == 0);
7519 	CU_ASSERT(g_blob != NULL);
7520 	clone = g_blob;
7521 
7522 	test_io_read(dev, blob, channel);
7523 	test_io_read(dev, snapshot, channel);
7524 	test_io_read(dev, clone, channel);
7525 
7526 	test_iov_read(dev, blob, channel, false);
7527 	test_iov_read(dev, snapshot, channel, false);
7528 	test_iov_read(dev, clone, channel, false);
7529 
7530 	test_iov_read(dev, blob, channel, true);
7531 	test_iov_read(dev, snapshot, channel, true);
7532 	test_iov_read(dev, clone, channel, true);
7533 
7534 	/* Inflate clone */
7535 
7536 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7537 	poll_threads();
7538 
7539 	CU_ASSERT(g_bserrno == 0);
7540 
7541 	test_io_read(dev, clone, channel);
7542 
7543 	test_io_unmap(dev, clone, channel);
7544 
7545 	test_iov_write(dev, clone, channel, false);
7546 	test_iov_read(dev, clone, channel, false);
7547 	test_io_unmap(dev, clone, channel);
7548 
7549 	test_iov_write(dev, clone, channel, true);
7550 	test_iov_read(dev, clone, channel, true);
7551 
7552 	spdk_blob_close(blob, blob_op_complete, NULL);
7553 	spdk_blob_close(snapshot, blob_op_complete, NULL);
7554 	spdk_blob_close(clone, blob_op_complete, NULL);
7555 	poll_threads();
7556 	CU_ASSERT(g_bserrno == 0);
7557 	blob = NULL;
7558 	g_blob = NULL;
7559 
7560 	spdk_bs_free_io_channel(channel);
7561 	poll_threads();
7562 
7563 	/* Unload the blob store */
7564 	spdk_bs_unload(bs, bs_op_complete, NULL);
7565 	poll_threads();
7566 	CU_ASSERT(g_bserrno == 0);
7567 	g_bs = NULL;
7568 	g_blob = NULL;
7569 	g_blobid = 0;
7570 }
7571 
7572 static void
7573 blob_io_unit_compatibility(void)
7574 {
7575 	struct spdk_bs_opts bsopts;
7576 	struct spdk_blob_store *bs;
7577 	struct spdk_bs_dev *dev;
7578 	struct spdk_bs_super_block *super;
7579 
7580 	/* Create dev with 512 bytes io unit size */
7581 
7582 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7583 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
7584 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7585 
7586 	/* Try to initialize a new blob store with unsupported io_unit */
7587 	dev = init_dev();
7588 	dev->blocklen = 512;
7589 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7590 
7591 	/* Initialize a new blob store */
7592 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7593 	poll_threads();
7594 	CU_ASSERT(g_bserrno == 0);
7595 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7596 	bs = g_bs;
7597 
7598 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7599 
7600 	/* Unload the blob store */
7601 	spdk_bs_unload(bs, bs_op_complete, NULL);
7602 	poll_threads();
7603 	CU_ASSERT(g_bserrno == 0);
7604 
7605 	/* Modify super block to behave like older version.
7606 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
7607 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
7608 	super->io_unit_size = 0;
7609 	super->crc = blob_md_page_calc_crc(super);
7610 
7611 	dev = init_dev();
7612 	dev->blocklen = 512;
7613 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7614 
7615 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
7616 	poll_threads();
7617 	CU_ASSERT(g_bserrno == 0);
7618 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7619 	bs = g_bs;
7620 
7621 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
7622 
7623 	/* Unload the blob store */
7624 	spdk_bs_unload(bs, bs_op_complete, NULL);
7625 	poll_threads();
7626 	CU_ASSERT(g_bserrno == 0);
7627 
7628 	g_bs = NULL;
7629 	g_blob = NULL;
7630 	g_blobid = 0;
7631 }
7632 
7633 static void
7634 first_sync_complete(void *cb_arg, int bserrno)
7635 {
7636 	struct spdk_blob *blob = cb_arg;
7637 	int rc;
7638 
7639 	CU_ASSERT(bserrno == 0);
7640 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7641 	CU_ASSERT(rc == 0);
7642 	CU_ASSERT(g_bserrno == -1);
7643 
7644 	/* Keep g_bserrno at -1, only the
7645 	 * second sync completion should set it at 0. */
7646 }
7647 
7648 static void
7649 second_sync_complete(void *cb_arg, int bserrno)
7650 {
7651 	struct spdk_blob *blob = cb_arg;
7652 	const void *value;
7653 	size_t value_len;
7654 	int rc;
7655 
7656 	CU_ASSERT(bserrno == 0);
7657 
7658 	/* Verify that the first sync completion had a chance to execute */
7659 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7660 	CU_ASSERT(rc == 0);
7661 	SPDK_CU_ASSERT_FATAL(value != NULL);
7662 	CU_ASSERT(value_len == strlen("second") + 1);
7663 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7664 
7665 	CU_ASSERT(g_bserrno == -1);
7666 	g_bserrno = bserrno;
7667 }
7668 
7669 static void
7670 blob_simultaneous_operations(void)
7671 {
7672 	struct spdk_blob_store *bs = g_bs;
7673 	struct spdk_blob_opts opts;
7674 	struct spdk_blob *blob, *snapshot;
7675 	spdk_blob_id blobid, snapshotid;
7676 	struct spdk_io_channel *channel;
7677 	int rc;
7678 
7679 	channel = spdk_bs_alloc_io_channel(bs);
7680 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7681 
7682 	ut_spdk_blob_opts_init(&opts);
7683 	opts.num_clusters = 10;
7684 
7685 	blob = ut_blob_create_and_open(bs, &opts);
7686 	blobid = spdk_blob_get_id(blob);
7687 
7688 	/* Create snapshot and try to remove blob in the same time:
7689 	 * - snapshot should be created successfully
7690 	 * - delete operation should fail w -EBUSY */
7691 	CU_ASSERT(blob->locked_operation_in_progress == false);
7692 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7693 	CU_ASSERT(blob->locked_operation_in_progress == true);
7694 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7695 	CU_ASSERT(blob->locked_operation_in_progress == true);
7696 	/* Deletion failure */
7697 	CU_ASSERT(g_bserrno == -EBUSY);
7698 	poll_threads();
7699 	CU_ASSERT(blob->locked_operation_in_progress == false);
7700 	/* Snapshot creation success */
7701 	CU_ASSERT(g_bserrno == 0);
7702 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7703 
7704 	snapshotid = g_blobid;
7705 
7706 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7707 	poll_threads();
7708 	CU_ASSERT(g_bserrno == 0);
7709 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7710 	snapshot = g_blob;
7711 
7712 	/* Inflate blob and try to remove blob in the same time:
7713 	 * - blob should be inflated successfully
7714 	 * - delete operation should fail w -EBUSY */
7715 	CU_ASSERT(blob->locked_operation_in_progress == false);
7716 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7717 	CU_ASSERT(blob->locked_operation_in_progress == true);
7718 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7719 	CU_ASSERT(blob->locked_operation_in_progress == true);
7720 	/* Deletion failure */
7721 	CU_ASSERT(g_bserrno == -EBUSY);
7722 	poll_threads();
7723 	CU_ASSERT(blob->locked_operation_in_progress == false);
7724 	/* Inflation success */
7725 	CU_ASSERT(g_bserrno == 0);
7726 
7727 	/* Clone snapshot and try to remove snapshot in the same time:
7728 	 * - snapshot should be cloned successfully
7729 	 * - delete operation should fail w -EBUSY */
7730 	CU_ASSERT(blob->locked_operation_in_progress == false);
7731 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7732 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7733 	/* Deletion failure */
7734 	CU_ASSERT(g_bserrno == -EBUSY);
7735 	poll_threads();
7736 	CU_ASSERT(blob->locked_operation_in_progress == false);
7737 	/* Clone created */
7738 	CU_ASSERT(g_bserrno == 0);
7739 
7740 	/* Resize blob and try to remove blob in the same time:
7741 	 * - blob should be resized successfully
7742 	 * - delete operation should fail w -EBUSY */
7743 	CU_ASSERT(blob->locked_operation_in_progress == false);
7744 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7745 	CU_ASSERT(blob->locked_operation_in_progress == true);
7746 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7747 	CU_ASSERT(blob->locked_operation_in_progress == true);
7748 	/* Deletion failure */
7749 	CU_ASSERT(g_bserrno == -EBUSY);
7750 	poll_threads();
7751 	CU_ASSERT(blob->locked_operation_in_progress == false);
7752 	/* Blob resized successfully */
7753 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7754 	poll_threads();
7755 	CU_ASSERT(g_bserrno == 0);
7756 
7757 	/* Issue two consecutive blob syncs, neither should fail.
7758 	 * Force sync to actually occur by marking blob dirty each time.
7759 	 * Execution of sync should not be enough to complete the operation,
7760 	 * since disk I/O is required to complete it. */
7761 	g_bserrno = -1;
7762 
7763 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7764 	CU_ASSERT(rc == 0);
7765 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7766 	CU_ASSERT(g_bserrno == -1);
7767 
7768 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7769 	CU_ASSERT(g_bserrno == -1);
7770 
7771 	poll_threads();
7772 	CU_ASSERT(g_bserrno == 0);
7773 
7774 	spdk_bs_free_io_channel(channel);
7775 	poll_threads();
7776 
7777 	ut_blob_close_and_delete(bs, snapshot);
7778 	ut_blob_close_and_delete(bs, blob);
7779 }
7780 
7781 static void
7782 blob_persist_test(void)
7783 {
7784 	struct spdk_blob_store *bs = g_bs;
7785 	struct spdk_blob_opts opts;
7786 	struct spdk_blob *blob;
7787 	spdk_blob_id blobid;
7788 	struct spdk_io_channel *channel;
7789 	char *xattr;
7790 	size_t xattr_length;
7791 	int rc;
7792 	uint32_t page_count_clear, page_count_xattr;
7793 	uint64_t poller_iterations;
7794 	bool run_poller;
7795 
7796 	channel = spdk_bs_alloc_io_channel(bs);
7797 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7798 
7799 	ut_spdk_blob_opts_init(&opts);
7800 	opts.num_clusters = 10;
7801 
7802 	blob = ut_blob_create_and_open(bs, &opts);
7803 	blobid = spdk_blob_get_id(blob);
7804 
7805 	/* Save the amount of md pages used after creation of a blob.
7806 	 * This should be consistent after removing xattr. */
7807 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7808 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7809 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7810 
7811 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7812 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7813 		       strlen("large_xattr");
7814 	xattr = calloc(xattr_length, sizeof(char));
7815 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7816 
7817 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7818 	SPDK_CU_ASSERT_FATAL(rc == 0);
7819 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7820 	poll_threads();
7821 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7822 
7823 	/* Save the amount of md pages used after adding the large xattr */
7824 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7825 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7826 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7827 
7828 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7829 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7830 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7831 	poller_iterations = 1;
7832 	run_poller = true;
7833 	while (run_poller) {
7834 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7835 		SPDK_CU_ASSERT_FATAL(rc == 0);
7836 		g_bserrno = -1;
7837 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7838 		poll_thread_times(0, poller_iterations);
7839 		if (g_bserrno == 0) {
7840 			/* Poller iteration count was high enough for first sync to complete.
7841 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7842 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7843 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7844 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7845 			run_poller = false;
7846 		}
7847 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7848 		SPDK_CU_ASSERT_FATAL(rc == 0);
7849 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7850 		poll_threads();
7851 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7852 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7853 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7854 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7855 
7856 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7857 		spdk_blob_close(blob, blob_op_complete, NULL);
7858 		poll_threads();
7859 		CU_ASSERT(g_bserrno == 0);
7860 
7861 		ut_bs_reload(&bs, NULL);
7862 
7863 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7864 		poll_threads();
7865 		CU_ASSERT(g_bserrno == 0);
7866 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7867 		blob = g_blob;
7868 
7869 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7870 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7871 
7872 		poller_iterations++;
7873 		/* Stop at high iteration count to prevent infinite loop.
7874 		 * This value should be enough for first md sync to complete in any case. */
7875 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7876 	}
7877 
7878 	free(xattr);
7879 
7880 	ut_blob_close_and_delete(bs, blob);
7881 
7882 	spdk_bs_free_io_channel(channel);
7883 	poll_threads();
7884 }
7885 
7886 static void
7887 blob_decouple_snapshot(void)
7888 {
7889 	struct spdk_blob_store *bs = g_bs;
7890 	struct spdk_blob_opts opts;
7891 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7892 	struct spdk_io_channel *channel;
7893 	spdk_blob_id blobid, snapshotid;
7894 	uint64_t cluster;
7895 
7896 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7897 		channel = spdk_bs_alloc_io_channel(bs);
7898 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7899 
7900 		ut_spdk_blob_opts_init(&opts);
7901 		opts.num_clusters = 10;
7902 		opts.thin_provision = false;
7903 
7904 		blob = ut_blob_create_and_open(bs, &opts);
7905 		blobid = spdk_blob_get_id(blob);
7906 
7907 		/* Create first snapshot */
7908 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7909 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7910 		poll_threads();
7911 		CU_ASSERT(g_bserrno == 0);
7912 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7913 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7914 		snapshotid = g_blobid;
7915 
7916 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7917 		poll_threads();
7918 		CU_ASSERT(g_bserrno == 0);
7919 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7920 		snapshot1 = g_blob;
7921 
7922 		/* Create the second one */
7923 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7924 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7925 		poll_threads();
7926 		CU_ASSERT(g_bserrno == 0);
7927 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7928 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7929 		snapshotid = g_blobid;
7930 
7931 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7932 		poll_threads();
7933 		CU_ASSERT(g_bserrno == 0);
7934 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7935 		snapshot2 = g_blob;
7936 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7937 
7938 		/* Now decouple the second snapshot forcing it to copy the written clusters */
7939 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7940 		poll_threads();
7941 		CU_ASSERT(g_bserrno == 0);
7942 
7943 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7944 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7945 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7946 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7947 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7948 					    snapshot1->active.clusters[cluster]);
7949 		}
7950 
7951 		spdk_bs_free_io_channel(channel);
7952 
7953 		if (delete_snapshot_first) {
7954 			ut_blob_close_and_delete(bs, snapshot2);
7955 			ut_blob_close_and_delete(bs, snapshot1);
7956 			ut_blob_close_and_delete(bs, blob);
7957 		} else {
7958 			ut_blob_close_and_delete(bs, blob);
7959 			ut_blob_close_and_delete(bs, snapshot2);
7960 			ut_blob_close_and_delete(bs, snapshot1);
7961 		}
7962 		poll_threads();
7963 	}
7964 }
7965 
7966 static void
7967 blob_seek_io_unit(void)
7968 {
7969 	struct spdk_blob_store *bs = g_bs;
7970 	struct spdk_blob *blob;
7971 	struct spdk_io_channel *channel;
7972 	struct spdk_blob_opts opts;
7973 	uint64_t free_clusters;
7974 	uint8_t payload[10 * BLOCKLEN];
7975 	uint64_t offset;
7976 	uint64_t io_unit, io_units_per_cluster;
7977 
7978 	free_clusters = spdk_bs_free_cluster_count(bs);
7979 
7980 	channel = spdk_bs_alloc_io_channel(bs);
7981 	CU_ASSERT(channel != NULL);
7982 
7983 	/* Set blob as thin provisioned */
7984 	ut_spdk_blob_opts_init(&opts);
7985 	opts.thin_provision = true;
7986 
7987 	/* Create a blob */
7988 	blob = ut_blob_create_and_open(bs, &opts);
7989 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7990 
7991 	io_units_per_cluster = bs_io_units_per_cluster(blob);
7992 
7993 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
7994 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
7995 	poll_threads();
7996 	CU_ASSERT(g_bserrno == 0);
7997 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7998 	CU_ASSERT(blob->active.num_clusters == 5);
7999 
8000 	/* Write at the beginning of first cluster */
8001 	offset = 0;
8002 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8003 	poll_threads();
8004 	CU_ASSERT(g_bserrno == 0);
8005 
8006 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
8007 	CU_ASSERT(io_unit == offset);
8008 
8009 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
8010 	CU_ASSERT(io_unit == io_units_per_cluster);
8011 
8012 	/* Write in the middle of third cluster */
8013 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
8014 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8015 	poll_threads();
8016 	CU_ASSERT(g_bserrno == 0);
8017 
8018 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
8019 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
8020 
8021 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
8022 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
8023 
8024 	/* Write at the end of last cluster */
8025 	offset = 5 * io_units_per_cluster - 1;
8026 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8027 	poll_threads();
8028 	CU_ASSERT(g_bserrno == 0);
8029 
8030 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
8031 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
8032 
8033 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
8034 	CU_ASSERT(io_unit == UINT64_MAX);
8035 
8036 	spdk_bs_free_io_channel(channel);
8037 	poll_threads();
8038 
8039 	ut_blob_close_and_delete(bs, blob);
8040 }
8041 
8042 static void
8043 blob_esnap_create(void)
8044 {
8045 	struct spdk_blob_store	*bs = g_bs;
8046 	struct spdk_bs_opts	bs_opts;
8047 	struct ut_esnap_opts	esnap_opts;
8048 	struct spdk_blob_opts	opts;
8049 	struct spdk_blob_open_opts open_opts;
8050 	struct spdk_blob	*blob;
8051 	uint32_t		cluster_sz, block_sz;
8052 	const uint32_t		esnap_num_clusters = 4;
8053 	uint64_t		esnap_num_blocks;
8054 	uint32_t		sz;
8055 	spdk_blob_id		blobid;
8056 	uint32_t		bs_ctx_count, blob_ctx_count;
8057 
8058 	cluster_sz = spdk_bs_get_cluster_size(bs);
8059 	block_sz = spdk_bs_get_io_unit_size(bs);
8060 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8061 
8062 	/* Create a normal blob and verify it is not an esnap clone. */
8063 	ut_spdk_blob_opts_init(&opts);
8064 	blob = ut_blob_create_and_open(bs, &opts);
8065 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob));
8066 	ut_blob_close_and_delete(bs, blob);
8067 
8068 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
8069 	ut_spdk_blob_opts_init(&opts);
8070 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8071 	opts.esnap_id = &esnap_opts;
8072 	opts.esnap_id_len = sizeof(esnap_opts);
8073 	opts.num_clusters = esnap_num_clusters;
8074 	blob = ut_blob_create_and_open(bs, &opts);
8075 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8076 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8077 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
8078 	SPDK_CU_ASSERT_FATAL(!spdk_blob_is_clone(blob));
8079 	sz = spdk_blob_get_num_clusters(blob);
8080 	CU_ASSERT(sz == esnap_num_clusters);
8081 	ut_blob_close_and_delete(bs, blob);
8082 
8083 	/* Create an esnap clone without the size and verify it can be grown */
8084 	ut_spdk_blob_opts_init(&opts);
8085 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8086 	opts.esnap_id = &esnap_opts;
8087 	opts.esnap_id_len = sizeof(esnap_opts);
8088 	blob = ut_blob_create_and_open(bs, &opts);
8089 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8090 	sz = spdk_blob_get_num_clusters(blob);
8091 	CU_ASSERT(sz == 0);
8092 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
8093 	poll_threads();
8094 	CU_ASSERT(g_bserrno == 0);
8095 	sz = spdk_blob_get_num_clusters(blob);
8096 	CU_ASSERT(sz == 1);
8097 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
8098 	poll_threads();
8099 	CU_ASSERT(g_bserrno == 0);
8100 	sz = spdk_blob_get_num_clusters(blob);
8101 	CU_ASSERT(sz == esnap_num_clusters);
8102 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
8103 	poll_threads();
8104 	CU_ASSERT(g_bserrno == 0);
8105 	sz = spdk_blob_get_num_clusters(blob);
8106 	CU_ASSERT(sz == esnap_num_clusters + 1);
8107 
8108 	/* Reload the blobstore and be sure that the blob can be opened. */
8109 	blobid = spdk_blob_get_id(blob);
8110 	spdk_blob_close(blob, blob_op_complete, NULL);
8111 	poll_threads();
8112 	CU_ASSERT(g_bserrno == 0);
8113 	g_blob = NULL;
8114 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8115 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8116 	ut_bs_reload(&bs, &bs_opts);
8117 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8118 	poll_threads();
8119 	CU_ASSERT(g_bserrno == 0);
8120 	CU_ASSERT(g_blob != NULL);
8121 	blob = g_blob;
8122 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8123 	sz = spdk_blob_get_num_clusters(blob);
8124 	CU_ASSERT(sz == esnap_num_clusters + 1);
8125 
8126 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
8127 	spdk_blob_close(blob, blob_op_complete, NULL);
8128 	poll_threads();
8129 	CU_ASSERT(g_bserrno == 0);
8130 	g_blob = NULL;
8131 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8132 	ut_bs_reload(&bs, &bs_opts);
8133 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8134 	poll_threads();
8135 	CU_ASSERT(g_bserrno != 0);
8136 	CU_ASSERT(g_blob == NULL);
8137 
8138 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
8139 	bs_ctx_count = 0;
8140 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8141 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
8142 	bs_opts.esnap_ctx = &bs_ctx_count;
8143 	ut_bs_reload(&bs, &bs_opts);
8144 	/* Loading the blobstore triggers the esnap to be loaded */
8145 	CU_ASSERT(bs_ctx_count == 1);
8146 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8147 	poll_threads();
8148 	CU_ASSERT(g_bserrno == 0);
8149 	CU_ASSERT(g_blob != NULL);
8150 	/* Opening the blob also triggers the esnap to be loaded */
8151 	CU_ASSERT(bs_ctx_count == 2);
8152 	blob = g_blob;
8153 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8154 	sz = spdk_blob_get_num_clusters(blob);
8155 	CU_ASSERT(sz == esnap_num_clusters + 1);
8156 	spdk_blob_close(blob, blob_op_complete, NULL);
8157 	poll_threads();
8158 	CU_ASSERT(g_bserrno == 0);
8159 	g_blob = NULL;
8160 	/* If open_opts.esnap_ctx is set it is passed to the esnap create callback */
8161 	blob_ctx_count = 0;
8162 	spdk_blob_open_opts_init(&open_opts, sizeof(open_opts));
8163 	open_opts.esnap_ctx = &blob_ctx_count;
8164 	spdk_bs_open_blob_ext(bs, blobid, &open_opts, blob_op_with_handle_complete, NULL);
8165 	poll_threads();
8166 	blob = g_blob;
8167 	CU_ASSERT(bs_ctx_count == 3);
8168 	CU_ASSERT(blob_ctx_count == 1);
8169 	spdk_blob_close(blob, blob_op_complete, NULL);
8170 	poll_threads();
8171 	CU_ASSERT(g_bserrno == 0);
8172 	g_blob = NULL;
8173 }
8174 
8175 static void
8176 blob_esnap_clone_reload(void)
8177 {
8178 	struct spdk_blob_store	*bs = g_bs;
8179 	struct spdk_bs_opts	bs_opts;
8180 	struct ut_esnap_opts	esnap_opts;
8181 	struct spdk_blob_opts	opts;
8182 	struct spdk_blob	*eclone1, *snap1, *clone1;
8183 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8184 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
8185 	const uint32_t		esnap_num_clusters = 4;
8186 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8187 	spdk_blob_id		eclone1_id, snap1_id, clone1_id;
8188 	struct spdk_io_channel	*bs_ch;
8189 	char			buf[block_sz];
8190 	int			bserr1, bserr2, bserr3, bserr4;
8191 	struct spdk_bs_dev	*dev;
8192 
8193 	/* Create and open an esnap clone blob */
8194 	ut_spdk_blob_opts_init(&opts);
8195 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8196 	opts.esnap_id = &esnap_opts;
8197 	opts.esnap_id_len = sizeof(esnap_opts);
8198 	opts.num_clusters = esnap_num_clusters;
8199 	eclone1 = ut_blob_create_and_open(bs, &opts);
8200 	CU_ASSERT(eclone1 != NULL);
8201 	CU_ASSERT(spdk_blob_is_esnap_clone(eclone1));
8202 	eclone1_id = eclone1->id;
8203 
8204 	/* Create and open a snapshot of eclone1 */
8205 	spdk_bs_create_snapshot(bs, eclone1_id, NULL, blob_op_with_id_complete, NULL);
8206 	poll_threads();
8207 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8208 	CU_ASSERT(g_bserrno == 0);
8209 	snap1_id = g_blobid;
8210 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8211 	poll_threads();
8212 	CU_ASSERT(g_bserrno == 0);
8213 	CU_ASSERT(g_blob != NULL);
8214 	snap1 = g_blob;
8215 
8216 	/* Create and open regular clone of snap1 */
8217 	spdk_bs_create_clone(bs, snap1_id, NULL, blob_op_with_id_complete, NULL);
8218 	poll_threads();
8219 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8220 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
8221 	clone1_id = g_blobid;
8222 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8223 	poll_threads();
8224 	CU_ASSERT(g_bserrno == 0);
8225 	CU_ASSERT(g_blob != NULL);
8226 	clone1 = g_blob;
8227 
8228 	/* Close the blobs in preparation for reloading the blobstore */
8229 	spdk_blob_close(clone1, blob_op_complete, NULL);
8230 	poll_threads();
8231 	CU_ASSERT(g_bserrno == 0);
8232 	spdk_blob_close(snap1, blob_op_complete, NULL);
8233 	poll_threads();
8234 	CU_ASSERT(g_bserrno == 0);
8235 	spdk_blob_close(eclone1, blob_op_complete, NULL);
8236 	poll_threads();
8237 	CU_ASSERT(g_bserrno == 0);
8238 	g_blob = NULL;
8239 
8240 	/* Reload the blobstore */
8241 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8242 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8243 	ut_bs_reload(&bs, &bs_opts);
8244 
8245 	/* Be sure each of the blobs can be opened */
8246 	spdk_bs_open_blob(bs, eclone1_id, blob_op_with_handle_complete, NULL);
8247 	poll_threads();
8248 	CU_ASSERT(g_bserrno == 0);
8249 	CU_ASSERT(g_blob != NULL);
8250 	eclone1 = g_blob;
8251 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8252 	poll_threads();
8253 	CU_ASSERT(g_bserrno == 0);
8254 	CU_ASSERT(g_blob != NULL);
8255 	snap1 = g_blob;
8256 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8257 	poll_threads();
8258 	CU_ASSERT(g_bserrno == 0);
8259 	CU_ASSERT(g_blob != NULL);
8260 	clone1 = g_blob;
8261 
8262 	/* Perform some reads on each of them to cause channels to be allocated */
8263 	bs_ch = spdk_bs_alloc_io_channel(bs);
8264 	CU_ASSERT(bs_ch != NULL);
8265 	spdk_blob_io_read(eclone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8266 	poll_threads();
8267 	CU_ASSERT(g_bserrno == 0);
8268 	spdk_blob_io_read(snap1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8269 	poll_threads();
8270 	CU_ASSERT(g_bserrno == 0);
8271 	spdk_blob_io_read(clone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8272 	poll_threads();
8273 	CU_ASSERT(g_bserrno == 0);
8274 
8275 	/*
8276 	 * Unload the blobstore in a way similar to how lvstore unloads it.  This should exercise
8277 	 * the deferred unload path in spdk_bs_unload().
8278 	 */
8279 	bserr1 = 0xbad;
8280 	bserr2 = 0xbad;
8281 	bserr3 = 0xbad;
8282 	bserr4 = 0xbad;
8283 	spdk_blob_close(eclone1, blob_op_complete, &bserr1);
8284 	spdk_blob_close(snap1, blob_op_complete, &bserr2);
8285 	spdk_blob_close(clone1, blob_op_complete, &bserr3);
8286 	spdk_bs_unload(bs, blob_op_complete, &bserr4);
8287 	spdk_bs_free_io_channel(bs_ch);
8288 	poll_threads();
8289 	CU_ASSERT(bserr1 == 0);
8290 	CU_ASSERT(bserr2 == 0);
8291 	CU_ASSERT(bserr3 == 0);
8292 	CU_ASSERT(bserr4 == 0);
8293 	g_blob = NULL;
8294 
8295 	/* Reload the blobstore */
8296 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8297 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8298 	dev = init_dev();
8299 	spdk_bs_load(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8300 	poll_threads();
8301 	CU_ASSERT(g_bserrno == 0);
8302 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8303 }
8304 
8305 static bool
8306 blob_esnap_verify_contents(struct spdk_blob *blob, struct spdk_io_channel *ch,
8307 			   uint64_t offset, uint64_t size, uint32_t readsize, const char *how)
8308 {
8309 	const uint32_t	bs_blksz = blob->bs->io_unit_size;
8310 	const uint32_t	esnap_blksz = blob->back_bs_dev ? blob->back_bs_dev->blocklen : bs_blksz;
8311 	const uint32_t	start_blk = offset / bs_blksz;
8312 	const uint32_t	num_blocks = spdk_max(size, readsize) / bs_blksz;
8313 	const uint32_t	blocks_per_read = spdk_min(size, readsize) / bs_blksz;
8314 	uint32_t	blob_block;
8315 	struct iovec	iov;
8316 	uint8_t		buf[spdk_min(size, readsize)];
8317 	bool		block_ok;
8318 
8319 	SPDK_CU_ASSERT_FATAL(offset % bs_blksz == 0);
8320 	SPDK_CU_ASSERT_FATAL(size % bs_blksz == 0);
8321 	SPDK_CU_ASSERT_FATAL(readsize % bs_blksz == 0);
8322 
8323 	memset(buf, 0, readsize);
8324 	iov.iov_base = buf;
8325 	iov.iov_len = readsize;
8326 	for (blob_block = start_blk; blob_block < num_blocks; blob_block += blocks_per_read) {
8327 		if (strcmp(how, "read") == 0) {
8328 			spdk_blob_io_read(blob, ch, buf, blob_block, blocks_per_read,
8329 					  bs_op_complete, NULL);
8330 		} else if (strcmp(how, "readv") == 0) {
8331 			spdk_blob_io_readv(blob, ch, &iov, 1, blob_block, blocks_per_read,
8332 					   bs_op_complete, NULL);
8333 		} else if (strcmp(how, "readv_ext") == 0) {
8334 			/*
8335 			 * This is currently pointless. NULL ext_opts leads to dev->readv(), not
8336 			 * dev->readv_ext().
8337 			 */
8338 			spdk_blob_io_readv_ext(blob, ch, &iov, 1, blob_block, blocks_per_read,
8339 					       bs_op_complete, NULL, NULL);
8340 		} else {
8341 			abort();
8342 		}
8343 		poll_threads();
8344 		CU_ASSERT(g_bserrno == 0);
8345 		if (g_bserrno != 0) {
8346 			return false;
8347 		}
8348 		block_ok = ut_esnap_content_is_correct(buf, blocks_per_read * bs_blksz, blob->id,
8349 						       blob_block * bs_blksz, esnap_blksz);
8350 		CU_ASSERT(block_ok);
8351 		if (!block_ok) {
8352 			return false;
8353 		}
8354 	}
8355 
8356 	return true;
8357 }
8358 
8359 static void
8360 blob_esnap_io_size(uint32_t bs_blksz, uint32_t esnap_blksz)
8361 {
8362 	struct spdk_bs_dev	*dev;
8363 	struct spdk_blob_store	*bs;
8364 	struct spdk_bs_opts	bsopts;
8365 	struct spdk_blob_opts	opts;
8366 	struct ut_esnap_opts	esnap_opts;
8367 	struct spdk_blob	*blob;
8368 	const uint32_t		cluster_sz = 16 * 1024;
8369 	const uint64_t		esnap_num_clusters = 4;
8370 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8371 	const uint64_t		esnap_num_blocks = esnap_sz / esnap_blksz;
8372 	const uint64_t		blob_num_blocks = esnap_sz / bs_blksz;
8373 	uint32_t		block;
8374 	struct spdk_io_channel	*bs_ch;
8375 
8376 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
8377 	bsopts.cluster_sz = cluster_sz;
8378 	bsopts.esnap_bs_dev_create = ut_esnap_create;
8379 
8380 	/* Create device with desired block size */
8381 	dev = init_dev();
8382 	dev->blocklen = bs_blksz;
8383 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8384 
8385 	/* Initialize a new blob store */
8386 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
8387 	poll_threads();
8388 	CU_ASSERT(g_bserrno == 0);
8389 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8390 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8391 	bs = g_bs;
8392 
8393 	bs_ch = spdk_bs_alloc_io_channel(bs);
8394 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
8395 
8396 	/* Create and open the esnap clone  */
8397 	ut_spdk_blob_opts_init(&opts);
8398 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8399 	opts.esnap_id = &esnap_opts;
8400 	opts.esnap_id_len = sizeof(esnap_opts);
8401 	opts.num_clusters = esnap_num_clusters;
8402 	blob = ut_blob_create_and_open(bs, &opts);
8403 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8404 
8405 	/* Verify that large reads return the content of the esnap device */
8406 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "read"));
8407 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv"));
8408 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv_ext"));
8409 	/* Verify that small reads return the content of the esnap device */
8410 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "read"));
8411 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv"));
8412 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv_ext"));
8413 
8414 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
8415 	for (block = 0; block < blob_num_blocks; block++) {
8416 		char		buf[bs_blksz];
8417 		union ut_word	word;
8418 
8419 		word.f.blob_id = 0xfedcba90;
8420 		word.f.lba = block;
8421 		ut_memset8(buf, word.num, bs_blksz);
8422 
8423 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8424 		poll_threads();
8425 		CU_ASSERT(g_bserrno == 0);
8426 		if (g_bserrno != 0) {
8427 			break;
8428 		}
8429 
8430 		/* Read and verify the block before the current block */
8431 		if (block != 0) {
8432 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
8433 			poll_threads();
8434 			CU_ASSERT(g_bserrno == 0);
8435 			if (g_bserrno != 0) {
8436 				break;
8437 			}
8438 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8439 							      (block - 1) * bs_blksz, bs_blksz));
8440 		}
8441 
8442 		/* Read and verify the current block */
8443 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8444 		poll_threads();
8445 		CU_ASSERT(g_bserrno == 0);
8446 		if (g_bserrno != 0) {
8447 			break;
8448 		}
8449 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8450 						      block * bs_blksz, bs_blksz));
8451 
8452 		/* Check the block that follows */
8453 		if (block + 1 < blob_num_blocks) {
8454 			g_bserrno = 0xbad;
8455 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
8456 			poll_threads();
8457 			CU_ASSERT(g_bserrno == 0);
8458 			if (g_bserrno != 0) {
8459 				break;
8460 			}
8461 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
8462 							      (block + 1) * bs_blksz,
8463 							      esnap_blksz));
8464 		}
8465 	}
8466 
8467 	/* Clean up */
8468 	spdk_bs_free_io_channel(bs_ch);
8469 	g_bserrno = 0xbad;
8470 	spdk_blob_close(blob, blob_op_complete, NULL);
8471 	poll_threads();
8472 	CU_ASSERT(g_bserrno == 0);
8473 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
8474 	poll_threads();
8475 	CU_ASSERT(g_bserrno == 0);
8476 	g_bs = NULL;
8477 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8478 }
8479 
8480 static void
8481 blob_esnap_io_4096_4096(void)
8482 {
8483 	blob_esnap_io_size(4096, 4096);
8484 }
8485 
8486 static void
8487 blob_esnap_io_512_512(void)
8488 {
8489 	blob_esnap_io_size(512, 512);
8490 }
8491 
8492 static void
8493 blob_esnap_io_4096_512(void)
8494 {
8495 	blob_esnap_io_size(4096, 512);
8496 }
8497 
8498 static void
8499 blob_esnap_io_512_4096(void)
8500 {
8501 	struct spdk_bs_dev	*dev;
8502 	struct spdk_blob_store	*bs;
8503 	struct spdk_bs_opts	bs_opts;
8504 	struct spdk_blob_opts	blob_opts;
8505 	struct ut_esnap_opts	esnap_opts;
8506 	uint64_t		cluster_sz = 16 * 1024;
8507 	uint32_t		bs_blksz = 512;
8508 	uint32_t		esnap_blksz = BLOCKLEN;
8509 	uint64_t		esnap_num_blocks = 64;
8510 	spdk_blob_id		blobid;
8511 
8512 	/* Create device with desired block size */
8513 	dev = init_dev();
8514 	dev->blocklen = bs_blksz;
8515 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8516 
8517 	/* Initialize a new blob store */
8518 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8519 	bs_opts.cluster_sz = cluster_sz;
8520 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8521 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8522 	poll_threads();
8523 	CU_ASSERT(g_bserrno == 0);
8524 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8525 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8526 	bs = g_bs;
8527 
8528 	/* Try to create and open the esnap clone. Create should succeed, open should fail. */
8529 	ut_spdk_blob_opts_init(&blob_opts);
8530 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8531 	blob_opts.esnap_id = &esnap_opts;
8532 	blob_opts.esnap_id_len = sizeof(esnap_opts);
8533 	blob_opts.num_clusters = esnap_num_blocks * esnap_blksz / bs_blksz;
8534 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
8535 	poll_threads();
8536 	CU_ASSERT(g_bserrno == 0);
8537 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8538 	blobid = g_blobid;
8539 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8540 	poll_threads();
8541 	CU_ASSERT(g_bserrno == -EINVAL);
8542 	CU_ASSERT(g_blob == NULL);
8543 
8544 	/* Clean up */
8545 	spdk_bs_unload(bs, bs_op_complete, NULL);
8546 	poll_threads();
8547 	CU_ASSERT(g_bserrno == 0);
8548 	g_bs = NULL;
8549 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8550 }
8551 
8552 static void
8553 blob_esnap_thread_add_remove(void)
8554 {
8555 	struct spdk_blob_store	*bs = g_bs;
8556 	struct spdk_blob_opts	opts;
8557 	struct ut_esnap_opts	ut_esnap_opts;
8558 	struct spdk_blob	*blob;
8559 	struct ut_esnap_dev	*ut_dev;
8560 	spdk_blob_id		blobid;
8561 	uint64_t		start_thread = g_ut_thread_id;
8562 	bool			destroyed = false;
8563 	struct spdk_io_channel	*ch0, *ch1;
8564 	struct ut_esnap_channel	*ut_ch0, *ut_ch1;
8565 	const uint32_t		blocklen = bs->io_unit_size;
8566 	char			buf[blocklen * 4];
8567 
8568 	SPDK_CU_ASSERT_FATAL(g_ut_num_threads > 1);
8569 	set_thread(0);
8570 
8571 	/* Create the esnap clone */
8572 	ut_esnap_opts_init(blocklen, 2048, "add_remove_1", &destroyed, &ut_esnap_opts);
8573 	ut_spdk_blob_opts_init(&opts);
8574 	opts.esnap_id = &ut_esnap_opts;
8575 	opts.esnap_id_len = sizeof(ut_esnap_opts);
8576 	opts.num_clusters = 10;
8577 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8578 	poll_threads();
8579 	CU_ASSERT(g_bserrno == 0);
8580 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8581 	blobid = g_blobid;
8582 
8583 	/* Open the blob. No channels should be allocated yet. */
8584 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8585 	poll_threads();
8586 	CU_ASSERT(g_bserrno == 0);
8587 	CU_ASSERT(g_blob != NULL);
8588 	blob = g_blob;
8589 	ut_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8590 	CU_ASSERT(ut_dev != NULL);
8591 	CU_ASSERT(ut_dev->num_channels == 0);
8592 
8593 	/* Create a channel on thread 0. It is lazily created on the first read. */
8594 	ch0 = spdk_bs_alloc_io_channel(bs);
8595 	CU_ASSERT(ch0 != NULL);
8596 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8597 	CU_ASSERT(ut_ch0 == NULL);
8598 	CU_ASSERT(ut_dev->num_channels == 0);
8599 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
8600 	poll_threads();
8601 	CU_ASSERT(g_bserrno == 0);
8602 	CU_ASSERT(ut_dev->num_channels == 1);
8603 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8604 	CU_ASSERT(ut_ch0 != NULL);
8605 	CU_ASSERT(ut_ch0->blocks_read == 1);
8606 
8607 	/* Create a channel on thread 1 and verify its lazy creation too. */
8608 	set_thread(1);
8609 	ch1 = spdk_bs_alloc_io_channel(bs);
8610 	CU_ASSERT(ch1 != NULL);
8611 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8612 	CU_ASSERT(ut_ch1 == NULL);
8613 	CU_ASSERT(ut_dev->num_channels == 1);
8614 	spdk_blob_io_read(blob, ch1, buf, 0, 4, bs_op_complete, NULL);
8615 	poll_threads();
8616 	CU_ASSERT(g_bserrno == 0);
8617 	CU_ASSERT(ut_dev->num_channels == 2);
8618 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8619 	CU_ASSERT(ut_ch1 != NULL);
8620 	CU_ASSERT(ut_ch1->blocks_read == 4);
8621 
8622 	/* Close the channel on thread 0 and verify the bs_dev channel is also gone. */
8623 	set_thread(0);
8624 	spdk_bs_free_io_channel(ch0);
8625 	poll_threads();
8626 	CU_ASSERT(ut_dev->num_channels == 1);
8627 
8628 	/* Close the blob. There is no outstanding IO so it should close right away. */
8629 	g_bserrno = 0xbad;
8630 	spdk_blob_close(blob, blob_op_complete, NULL);
8631 	poll_threads();
8632 	CU_ASSERT(g_bserrno == 0);
8633 	CU_ASSERT(destroyed);
8634 
8635 	/* The esnap channel for the blob should be gone now too. */
8636 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8637 	CU_ASSERT(ut_ch1 == NULL);
8638 
8639 	/* Clean up */
8640 	set_thread(1);
8641 	spdk_bs_free_io_channel(ch1);
8642 	set_thread(start_thread);
8643 }
8644 
8645 static void
8646 freeze_done(void *cb_arg, int bserrno)
8647 {
8648 	uint32_t *freeze_cnt = cb_arg;
8649 
8650 	CU_ASSERT(bserrno == 0);
8651 	(*freeze_cnt)++;
8652 }
8653 
8654 static void
8655 unfreeze_done(void *cb_arg, int bserrno)
8656 {
8657 	uint32_t *unfreeze_cnt = cb_arg;
8658 
8659 	CU_ASSERT(bserrno == 0);
8660 	(*unfreeze_cnt)++;
8661 }
8662 
8663 static void
8664 blob_nested_freezes(void)
8665 {
8666 	struct spdk_blob_store *bs = g_bs;
8667 	struct spdk_blob *blob;
8668 	struct spdk_io_channel *channel[2];
8669 	struct spdk_blob_opts opts;
8670 	uint32_t freeze_cnt, unfreeze_cnt;
8671 	int i;
8672 
8673 	for (i = 0; i < 2; i++) {
8674 		set_thread(i);
8675 		channel[i] = spdk_bs_alloc_io_channel(bs);
8676 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
8677 	}
8678 
8679 	set_thread(0);
8680 
8681 	ut_spdk_blob_opts_init(&opts);
8682 	blob = ut_blob_create_and_open(bs, &opts);
8683 
8684 	/* First just test a single freeze/unfreeze. */
8685 	freeze_cnt = 0;
8686 	unfreeze_cnt = 0;
8687 	CU_ASSERT(blob->frozen_refcnt == 0);
8688 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8689 	CU_ASSERT(blob->frozen_refcnt == 1);
8690 	CU_ASSERT(freeze_cnt == 0);
8691 	poll_threads();
8692 	CU_ASSERT(freeze_cnt == 1);
8693 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8694 	CU_ASSERT(blob->frozen_refcnt == 0);
8695 	CU_ASSERT(unfreeze_cnt == 0);
8696 	poll_threads();
8697 	CU_ASSERT(unfreeze_cnt == 1);
8698 
8699 	/* Now nest multiple freeze/unfreeze operations.  We should
8700 	 * expect a callback for each operation, but only after
8701 	 * the threads have been polled to ensure a for_each_channel()
8702 	 * was executed.
8703 	 */
8704 	freeze_cnt = 0;
8705 	unfreeze_cnt = 0;
8706 	CU_ASSERT(blob->frozen_refcnt == 0);
8707 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8708 	CU_ASSERT(blob->frozen_refcnt == 1);
8709 	CU_ASSERT(freeze_cnt == 0);
8710 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8711 	CU_ASSERT(blob->frozen_refcnt == 2);
8712 	CU_ASSERT(freeze_cnt == 0);
8713 	poll_threads();
8714 	CU_ASSERT(freeze_cnt == 2);
8715 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8716 	CU_ASSERT(blob->frozen_refcnt == 1);
8717 	CU_ASSERT(unfreeze_cnt == 0);
8718 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8719 	CU_ASSERT(blob->frozen_refcnt == 0);
8720 	CU_ASSERT(unfreeze_cnt == 0);
8721 	poll_threads();
8722 	CU_ASSERT(unfreeze_cnt == 2);
8723 
8724 	for (i = 0; i < 2; i++) {
8725 		set_thread(i);
8726 		spdk_bs_free_io_channel(channel[i]);
8727 	}
8728 	set_thread(0);
8729 	ut_blob_close_and_delete(bs, blob);
8730 
8731 	poll_threads();
8732 	g_blob = NULL;
8733 	g_blobid = 0;
8734 }
8735 
8736 static void
8737 blob_ext_md_pages(void)
8738 {
8739 	struct spdk_blob_store *bs;
8740 	struct spdk_bs_dev *dev;
8741 	struct spdk_blob *blob;
8742 	struct spdk_blob_opts opts;
8743 	struct spdk_bs_opts bs_opts;
8744 	uint64_t free_clusters;
8745 
8746 	dev = init_dev();
8747 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8748 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
8749 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
8750 	 * It requires num_md_pages that is much smaller than the number of clusters.
8751 	 * Make sure we can create a blob that uses all of the free clusters.
8752 	 */
8753 	bs_opts.cluster_sz = 65536;
8754 	bs_opts.num_md_pages = 16;
8755 
8756 	/* Initialize a new blob store */
8757 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8758 	poll_threads();
8759 	CU_ASSERT(g_bserrno == 0);
8760 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8761 	bs = g_bs;
8762 
8763 	free_clusters = spdk_bs_free_cluster_count(bs);
8764 
8765 	ut_spdk_blob_opts_init(&opts);
8766 	opts.num_clusters = free_clusters;
8767 
8768 	blob = ut_blob_create_and_open(bs, &opts);
8769 	spdk_blob_close(blob, blob_op_complete, NULL);
8770 	CU_ASSERT(g_bserrno == 0);
8771 
8772 	spdk_bs_unload(bs, bs_op_complete, NULL);
8773 	poll_threads();
8774 	CU_ASSERT(g_bserrno == 0);
8775 	g_bs = NULL;
8776 }
8777 
8778 static void
8779 blob_esnap_clone_snapshot(void)
8780 {
8781 	/*
8782 	 * When a snapshot is created, the blob that is being snapped becomes
8783 	 * the leaf node (a clone of the snapshot) and the newly created
8784 	 * snapshot sits between the snapped blob and the external snapshot.
8785 	 *
8786 	 * Before creating snap1
8787 	 *
8788 	 *   ,--------.     ,----------.
8789 	 *   |  blob  |     |  vbdev   |
8790 	 *   | blob1  |<----| nvme1n42 |
8791 	 *   |  (rw)  |     |   (ro)   |
8792 	 *   `--------'     `----------'
8793 	 *       Figure 1
8794 	 *
8795 	 * After creating snap1
8796 	 *
8797 	 *   ,--------.     ,--------.     ,----------.
8798 	 *   |  blob  |     |  blob  |     |  vbdev   |
8799 	 *   | blob1  |<----| snap1  |<----| nvme1n42 |
8800 	 *   |  (rw)  |     |  (ro)  |     |   (ro)   |
8801 	 *   `--------'     `--------'     `----------'
8802 	 *       Figure 2
8803 	 *
8804 	 * Starting from Figure 2, if snap1 is removed, the chain reverts to
8805 	 * what it looks like in Figure 1.
8806 	 *
8807 	 * Starting from Figure 2, if blob1 is removed, the chain becomes:
8808 	 *
8809 	 *   ,--------.     ,----------.
8810 	 *   |  blob  |     |  vbdev   |
8811 	 *   | snap1  |<----| nvme1n42 |
8812 	 *   |  (ro)  |     |   (ro)   |
8813 	 *   `--------'     `----------'
8814 	 *       Figure 3
8815 	 *
8816 	 * In each case, the blob pointed to by the nvme vbdev is considered
8817 	 * the "esnap clone".  The esnap clone must have:
8818 	 *
8819 	 *   - XATTR_INTERNAL for BLOB_EXTERNAL_SNAPSHOT_ID (e.g. name or UUID)
8820 	 *   - blob->invalid_flags must contain SPDK_BLOB_EXTERNAL_SNAPSHOT
8821 	 *   - blob->parent_id must be SPDK_BLOBID_EXTERNAL_SNAPSHOT.
8822 	 *
8823 	 * No other blob that descends from the esnap clone may have any of
8824 	 * those set.
8825 	 */
8826 	struct spdk_blob_store	*bs = g_bs;
8827 	const uint32_t		blocklen = bs->io_unit_size;
8828 	struct spdk_blob_opts	opts;
8829 	struct ut_esnap_opts	esnap_opts;
8830 	struct spdk_blob	*blob, *snap_blob;
8831 	spdk_blob_id		blobid, snap_blobid;
8832 	bool			destroyed = false;
8833 
8834 	/* Create the esnap clone */
8835 	ut_esnap_opts_init(blocklen, 2048, __func__, &destroyed, &esnap_opts);
8836 	ut_spdk_blob_opts_init(&opts);
8837 	opts.esnap_id = &esnap_opts;
8838 	opts.esnap_id_len = sizeof(esnap_opts);
8839 	opts.num_clusters = 10;
8840 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8841 	poll_threads();
8842 	CU_ASSERT(g_bserrno == 0);
8843 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8844 	blobid = g_blobid;
8845 
8846 	/* Open the blob. */
8847 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8848 	poll_threads();
8849 	CU_ASSERT(g_bserrno == 0);
8850 	CU_ASSERT(g_blob != NULL);
8851 	blob = g_blob;
8852 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8853 
8854 	/*
8855 	 * Create a snapshot of the blob. The snapshot becomes the esnap clone.
8856 	 */
8857 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8858 	poll_threads();
8859 	CU_ASSERT(g_bserrno == 0);
8860 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8861 	snap_blobid = g_blobid;
8862 
8863 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8864 	poll_threads();
8865 	CU_ASSERT(g_bserrno == 0);
8866 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8867 	snap_blob = g_blob;
8868 
8869 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8870 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8871 
8872 	/*
8873 	 * Delete the snapshot.  The original blob becomes the esnap clone.
8874 	 */
8875 	ut_blob_close_and_delete(bs, snap_blob);
8876 	snap_blob = NULL;
8877 	snap_blobid = SPDK_BLOBID_INVALID;
8878 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8879 
8880 	/*
8881 	 * Create the snapshot again, then delete the original blob.  The
8882 	 * snapshot should survive as the esnap clone.
8883 	 */
8884 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8885 	poll_threads();
8886 	CU_ASSERT(g_bserrno == 0);
8887 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8888 	snap_blobid = g_blobid;
8889 
8890 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8891 	poll_threads();
8892 	CU_ASSERT(g_bserrno == 0);
8893 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8894 	snap_blob = g_blob;
8895 
8896 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8897 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8898 
8899 	ut_blob_close_and_delete(bs, blob);
8900 	blob = NULL;
8901 	blobid = SPDK_BLOBID_INVALID;
8902 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8903 
8904 	/*
8905 	 * Clone the snapshot.  The snapshot continues to be the esnap clone.
8906 	 */
8907 	spdk_bs_create_clone(bs, snap_blobid, NULL, blob_op_with_id_complete, NULL);
8908 	poll_threads();
8909 	CU_ASSERT(g_bserrno == 0);
8910 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8911 	blobid = g_blobid;
8912 
8913 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8914 	poll_threads();
8915 	CU_ASSERT(g_bserrno == 0);
8916 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8917 	blob = g_blob;
8918 
8919 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8920 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8921 
8922 	/*
8923 	 * Delete the snapshot. The clone becomes the esnap clone.
8924 	 */
8925 	ut_blob_close_and_delete(bs, snap_blob);
8926 	snap_blob = NULL;
8927 	snap_blobid = SPDK_BLOBID_INVALID;
8928 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8929 
8930 	/*
8931 	 * Clean up
8932 	 */
8933 	ut_blob_close_and_delete(bs, blob);
8934 }
8935 
8936 static uint64_t
8937 _blob_esnap_clone_hydrate(bool inflate)
8938 {
8939 	struct spdk_blob_store	*bs = g_bs;
8940 	struct spdk_blob_opts	opts;
8941 	struct ut_esnap_opts	esnap_opts;
8942 	struct spdk_blob	*blob;
8943 	spdk_blob_id		blobid;
8944 	struct spdk_io_channel *channel;
8945 	bool			destroyed = false;
8946 	const uint32_t		blocklen = spdk_bs_get_io_unit_size(bs);
8947 	const uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8948 	const uint64_t		esnap_num_clusters = 4;
8949 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8950 	const uint64_t		esnap_num_blocks = esnap_sz / blocklen;
8951 	uint64_t		num_failures = CU_get_number_of_failures();
8952 
8953 	channel = spdk_bs_alloc_io_channel(bs);
8954 	SPDK_CU_ASSERT_FATAL(channel != NULL);
8955 
8956 	/* Create the esnap clone */
8957 	ut_spdk_blob_opts_init(&opts);
8958 	ut_esnap_opts_init(blocklen, esnap_num_blocks, __func__, &destroyed, &esnap_opts);
8959 	opts.esnap_id = &esnap_opts;
8960 	opts.esnap_id_len = sizeof(esnap_opts);
8961 	opts.num_clusters = esnap_num_clusters;
8962 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8963 	poll_threads();
8964 	CU_ASSERT(g_bserrno == 0);
8965 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8966 	blobid = g_blobid;
8967 
8968 	/* Open the esnap clone */
8969 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8970 	poll_threads();
8971 	CU_ASSERT(g_bserrno == 0);
8972 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8973 	blob = g_blob;
8974 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8975 
8976 	/*
8977 	 * Inflate or decouple  the blob then verify that it is no longer an esnap clone and has
8978 	 * right content
8979 	 */
8980 	if (inflate) {
8981 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
8982 	} else {
8983 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
8984 	}
8985 	poll_threads();
8986 	CU_ASSERT(g_bserrno == 0);
8987 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8988 	CU_ASSERT(blob_esnap_verify_contents(blob, channel, 0, esnap_sz, esnap_sz, "read"));
8989 	ut_blob_close_and_delete(bs, blob);
8990 
8991 	/*
8992 	 * Clean up
8993 	 */
8994 	spdk_bs_free_io_channel(channel);
8995 	poll_threads();
8996 
8997 	/* Return number of new failures */
8998 	return CU_get_number_of_failures() - num_failures;
8999 }
9000 
9001 static void
9002 blob_esnap_clone_inflate(void)
9003 {
9004 	_blob_esnap_clone_hydrate(true);
9005 }
9006 
9007 static void
9008 blob_esnap_clone_decouple(void)
9009 {
9010 	_blob_esnap_clone_hydrate(false);
9011 }
9012 
9013 static void
9014 blob_esnap_hotplug(void)
9015 {
9016 	struct spdk_blob_store	*bs = g_bs;
9017 	struct ut_esnap_opts	esnap1_opts, esnap2_opts;
9018 	struct spdk_blob_opts	opts;
9019 	struct spdk_blob	*blob;
9020 	struct spdk_bs_dev	*bs_dev;
9021 	struct ut_esnap_dev	*esnap_dev;
9022 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
9023 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
9024 	const uint32_t		esnap_num_clusters = 4;
9025 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9026 	bool			destroyed1 = false, destroyed2 = false;
9027 	uint64_t		start_thread = g_ut_thread_id;
9028 	struct spdk_io_channel	*ch0, *ch1;
9029 	char			buf[block_sz];
9030 
9031 	/* Create and open an esnap clone blob */
9032 	ut_spdk_blob_opts_init(&opts);
9033 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1", &destroyed1, &esnap1_opts);
9034 	opts.esnap_id = &esnap1_opts;
9035 	opts.esnap_id_len = sizeof(esnap1_opts);
9036 	opts.num_clusters = esnap_num_clusters;
9037 	blob = ut_blob_create_and_open(bs, &opts);
9038 	CU_ASSERT(blob != NULL);
9039 	CU_ASSERT(spdk_blob_is_esnap_clone(blob));
9040 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9041 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9042 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1") == 0);
9043 
9044 	/* Replace the external snapshot */
9045 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap2", &destroyed2, &esnap2_opts);
9046 	bs_dev = ut_esnap_dev_alloc(&esnap2_opts);
9047 	CU_ASSERT(!destroyed1);
9048 	CU_ASSERT(!destroyed2);
9049 	g_bserrno = 0xbad;
9050 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9051 	poll_threads();
9052 	CU_ASSERT(g_bserrno == 0);
9053 	CU_ASSERT(destroyed1);
9054 	CU_ASSERT(!destroyed2);
9055 	SPDK_CU_ASSERT_FATAL(bs_dev == blob->back_bs_dev);
9056 	SPDK_CU_ASSERT_FATAL(bs_dev == spdk_blob_get_esnap_bs_dev(blob));
9057 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9058 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap2") == 0);
9059 
9060 	/* Create a couple channels */
9061 	set_thread(0);
9062 	ch0 = spdk_bs_alloc_io_channel(bs);
9063 	CU_ASSERT(ch0 != NULL);
9064 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
9065 	set_thread(1);
9066 	ch1 = spdk_bs_alloc_io_channel(bs);
9067 	CU_ASSERT(ch1 != NULL);
9068 	spdk_blob_io_read(blob, ch1, buf, 0, 1, bs_op_complete, NULL);
9069 	set_thread(start_thread);
9070 	poll_threads();
9071 	CU_ASSERT(esnap_dev->num_channels == 2);
9072 
9073 	/* Replace the external snapshot */
9074 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1a", &destroyed1, &esnap1_opts);
9075 	bs_dev = ut_esnap_dev_alloc(&esnap1_opts);
9076 	destroyed1 = destroyed2 = false;
9077 	g_bserrno = 0xbad;
9078 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9079 	poll_threads();
9080 	CU_ASSERT(g_bserrno == 0);
9081 	CU_ASSERT(!destroyed1);
9082 	CU_ASSERT(destroyed2);
9083 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9084 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9085 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1a") == 0);
9086 
9087 	/* Clean up */
9088 	set_thread(0);
9089 	spdk_bs_free_io_channel(ch0);
9090 	set_thread(1);
9091 	spdk_bs_free_io_channel(ch1);
9092 	set_thread(start_thread);
9093 	g_bserrno = 0xbad;
9094 	spdk_blob_close(blob, bs_op_complete, NULL);
9095 	poll_threads();
9096 	CU_ASSERT(g_bserrno == 0);
9097 }
9098 
9099 static bool g_blob_is_degraded;
9100 static int g_blob_is_degraded_called;
9101 
9102 static bool
9103 _blob_is_degraded(struct spdk_bs_dev *dev)
9104 {
9105 	g_blob_is_degraded_called++;
9106 	return g_blob_is_degraded;
9107 }
9108 
9109 static void
9110 blob_is_degraded(void)
9111 {
9112 	struct spdk_bs_dev bs_is_degraded_null = { 0 };
9113 	struct spdk_bs_dev bs_is_degraded = { .is_degraded = _blob_is_degraded };
9114 
9115 	/* No back_bs_dev, no bs->dev->is_degraded */
9116 	g_blob_is_degraded_called = 0;
9117 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9118 	CU_ASSERT(g_blob_is_degraded_called == 0);
9119 
9120 	/* No back_bs_dev, blobstore device degraded */
9121 	g_bs->dev->is_degraded = _blob_is_degraded;
9122 	g_blob_is_degraded_called = 0;
9123 	g_blob_is_degraded = true;
9124 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9125 	CU_ASSERT(g_blob_is_degraded_called == 1);
9126 
9127 	/* No back_bs_dev, blobstore device not degraded */
9128 	g_bs->dev->is_degraded = _blob_is_degraded;
9129 	g_blob_is_degraded_called = 0;
9130 	g_blob_is_degraded = false;
9131 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9132 	CU_ASSERT(g_blob_is_degraded_called == 1);
9133 
9134 	/* back_bs_dev does not define is_degraded, no bs->dev->is_degraded */
9135 	g_bs->dev->is_degraded = NULL;
9136 	g_blob->back_bs_dev = &bs_is_degraded_null;
9137 	g_blob_is_degraded_called = 0;
9138 	g_blob_is_degraded = false;
9139 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9140 	CU_ASSERT(g_blob_is_degraded_called == 0);
9141 
9142 	/* back_bs_dev is not degraded, no bs->dev->is_degraded */
9143 	g_bs->dev->is_degraded = NULL;
9144 	g_blob->back_bs_dev = &bs_is_degraded;
9145 	g_blob_is_degraded_called = 0;
9146 	g_blob_is_degraded = false;
9147 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9148 	CU_ASSERT(g_blob_is_degraded_called == 1);
9149 
9150 	/* back_bs_dev is degraded, no bs->dev->is_degraded */
9151 	g_bs->dev->is_degraded = NULL;
9152 	g_blob->back_bs_dev = &bs_is_degraded;
9153 	g_blob_is_degraded_called = 0;
9154 	g_blob_is_degraded = true;
9155 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9156 	CU_ASSERT(g_blob_is_degraded_called == 1);
9157 
9158 	/* back_bs_dev is not degraded, blobstore device is not degraded */
9159 	g_bs->dev->is_degraded = _blob_is_degraded;
9160 	g_blob->back_bs_dev = &bs_is_degraded;
9161 	g_blob_is_degraded_called = 0;
9162 	g_blob_is_degraded = false;
9163 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9164 	CU_ASSERT(g_blob_is_degraded_called == 2);
9165 
9166 	g_blob->back_bs_dev = NULL;
9167 }
9168 
9169 /* Resize a blob which is a clone created from snapshot. Verify read/writes to
9170  * expanded clone blob. Then inflate the clone blob. */
9171 static void
9172 blob_clone_resize(void)
9173 {
9174 	struct spdk_blob_store *bs = g_bs;
9175 	struct spdk_blob_opts opts;
9176 	struct spdk_blob *blob, *clone, *snap_blob, *snap_blob_rsz;
9177 	spdk_blob_id blobid, cloneid, snapid1, snapid2;
9178 	uint64_t pages_per_cluster;
9179 	uint8_t payload_read[bs->dev->blocklen];
9180 	uint8_t payload_write[bs->dev->blocklen];
9181 	struct spdk_io_channel *channel;
9182 	uint64_t free_clusters;
9183 
9184 	channel = spdk_bs_alloc_io_channel(bs);
9185 	SPDK_CU_ASSERT_FATAL(channel != NULL);
9186 
9187 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
9188 
9189 	/* Create blob with 10 clusters */
9190 	ut_spdk_blob_opts_init(&opts);
9191 	opts.num_clusters = 10;
9192 
9193 	blob = ut_blob_create_and_open(bs, &opts);
9194 	blobid = spdk_blob_get_id(blob);
9195 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
9196 
9197 	/* Create snapshot */
9198 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9199 	poll_threads();
9200 	CU_ASSERT(g_bserrno == 0);
9201 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9202 	snapid1 = g_blobid;
9203 
9204 	spdk_bs_create_clone(bs, snapid1, NULL, blob_op_with_id_complete, NULL);
9205 	poll_threads();
9206 	CU_ASSERT(g_bserrno == 0);
9207 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9208 	cloneid = g_blobid;
9209 
9210 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
9211 	poll_threads();
9212 	CU_ASSERT(g_bserrno == 0);
9213 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9214 	clone = g_blob;
9215 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
9216 
9217 	g_bserrno = -1;
9218 	spdk_blob_resize(clone, 20, blob_op_complete, NULL);
9219 	poll_threads();
9220 	CU_ASSERT(g_bserrno == 0);
9221 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 20);
9222 
9223 	/* Create another snapshot after resizing the clone */
9224 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
9225 	poll_threads();
9226 	CU_ASSERT(g_bserrno == 0);
9227 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9228 	snapid2 = g_blobid;
9229 
9230 	/* Open the snapshot blobs */
9231 	spdk_bs_open_blob(bs, snapid1, blob_op_with_handle_complete, NULL);
9232 	CU_ASSERT(g_bserrno == 0);
9233 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9234 	snap_blob = g_blob;
9235 	CU_ASSERT(snap_blob->data_ro == true);
9236 	CU_ASSERT(snap_blob->md_ro == true);
9237 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob) == 10);
9238 
9239 	spdk_bs_open_blob(bs, snapid2, blob_op_with_handle_complete, NULL);
9240 	CU_ASSERT(g_bserrno == 0);
9241 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9242 	snap_blob_rsz = g_blob;
9243 	CU_ASSERT(snap_blob_rsz->data_ro == true);
9244 	CU_ASSERT(snap_blob_rsz->md_ro == true);
9245 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob_rsz) == 20);
9246 
9247 	/* Confirm that clone is backed by snap_blob_rsz, and snap_blob_rsz is backed by snap_blob */
9248 	SPDK_CU_ASSERT_FATAL(snap_blob->back_bs_dev == NULL);
9249 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9250 	SPDK_CU_ASSERT_FATAL(snap_blob_rsz->back_bs_dev != NULL);
9251 
9252 	/* Write and read from pre-resize ranges */
9253 	g_bserrno = -1;
9254 	memset(payload_write, 0xE5, sizeof(payload_write));
9255 	spdk_blob_io_write(clone, channel, payload_write, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9256 	poll_threads();
9257 	CU_ASSERT(g_bserrno == 0);
9258 
9259 	g_bserrno = -1;
9260 	memset(payload_read, 0x00, sizeof(payload_read));
9261 	spdk_blob_io_read(clone, channel, payload_read, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9262 	poll_threads();
9263 	CU_ASSERT(g_bserrno == 0);
9264 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
9265 
9266 	/* Write and read from post-resize ranges */
9267 	g_bserrno = -1;
9268 	memset(payload_write, 0xE5, sizeof(payload_write));
9269 	spdk_blob_io_write(clone, channel, payload_write, 15 * pages_per_cluster, 1, blob_op_complete,
9270 			   NULL);
9271 	poll_threads();
9272 	CU_ASSERT(g_bserrno == 0);
9273 
9274 	g_bserrno = -1;
9275 	memset(payload_read, 0x00, sizeof(payload_read));
9276 	spdk_blob_io_read(clone, channel, payload_read, 15 * pages_per_cluster, 1, blob_op_complete, NULL);
9277 	poll_threads();
9278 	CU_ASSERT(g_bserrno == 0);
9279 	CU_ASSERT(memcmp(payload_write, payload_read, bs->dev->blocklen) == 0);
9280 
9281 	/* Now do full blob inflation of the resized blob/clone. */
9282 	free_clusters = spdk_bs_free_cluster_count(bs);
9283 	spdk_bs_inflate_blob(bs, channel, cloneid, blob_op_complete, NULL);
9284 	poll_threads();
9285 	CU_ASSERT(g_bserrno == 0);
9286 	/* We wrote to 2 clusters earlier, all remaining 18 clusters in
9287 	 * blob should get allocated after inflation */
9288 	CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 18);
9289 
9290 	spdk_blob_close(clone, blob_op_complete, NULL);
9291 	poll_threads();
9292 	CU_ASSERT(g_bserrno == 0);
9293 
9294 	spdk_blob_close(snap_blob, blob_op_complete, NULL);
9295 	poll_threads();
9296 	CU_ASSERT(g_bserrno == 0);
9297 
9298 	spdk_blob_close(snap_blob_rsz, blob_op_complete, NULL);
9299 	poll_threads();
9300 	CU_ASSERT(g_bserrno == 0);
9301 
9302 	ut_blob_close_and_delete(bs, blob);
9303 
9304 	spdk_bs_free_io_channel(channel);
9305 }
9306 
9307 
9308 static void
9309 blob_esnap_clone_resize(void)
9310 {
9311 	struct spdk_bs_dev *dev;
9312 	struct spdk_blob_store *bs;
9313 	struct spdk_bs_opts bsopts;
9314 	struct spdk_blob_opts opts;
9315 	struct ut_esnap_opts esnap_opts;
9316 	struct spdk_blob *blob;
9317 	uint32_t block, esnap_blksz = 512, bs_blksz = 512;
9318 	const uint32_t cluster_sz = 16 * 1024;
9319 	const uint64_t esnap_num_clusters = 4;
9320 	const uint32_t esnap_sz = cluster_sz * esnap_num_clusters;
9321 	const uint64_t esnap_num_blocks = esnap_sz / esnap_blksz;
9322 	uint64_t blob_num_blocks = esnap_sz / bs_blksz;
9323 	struct spdk_io_channel *bs_ch;
9324 
9325 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
9326 	bsopts.cluster_sz = cluster_sz;
9327 	bsopts.esnap_bs_dev_create = ut_esnap_create;
9328 	/* Create device with desired block size */
9329 	dev = init_dev();
9330 	dev->blocklen = bs_blksz;
9331 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
9332 	/* Initialize a new blob store */
9333 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
9334 	poll_threads();
9335 	CU_ASSERT(g_bserrno == 0);
9336 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9337 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
9338 	bs = g_bs;
9339 
9340 	bs_ch = spdk_bs_alloc_io_channel(bs);
9341 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
9342 
9343 	/* Create and open the esnap clone  */
9344 	ut_spdk_blob_opts_init(&opts);
9345 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9346 	opts.esnap_id = &esnap_opts;
9347 	opts.esnap_id_len = sizeof(esnap_opts);
9348 	opts.num_clusters = esnap_num_clusters;
9349 	blob = ut_blob_create_and_open(bs, &opts);
9350 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9351 
9352 	g_bserrno = -1;
9353 	spdk_blob_resize(blob, esnap_num_clusters * 2, blob_op_complete, NULL);
9354 	poll_threads();
9355 	CU_ASSERT(g_bserrno == 0);
9356 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == esnap_num_clusters * 2);
9357 
9358 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
9359 	blob_num_blocks = (spdk_blob_get_num_clusters(blob) * cluster_sz) / bs_blksz;
9360 	for (block = 0; block < blob_num_blocks; block++) {
9361 		char buf[bs_blksz];
9362 		union ut_word word;
9363 		word.f.blob_id = 0xfedcba90;
9364 		word.f.lba = block;
9365 		ut_memset8(buf, word.num, bs_blksz);
9366 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9367 		poll_threads();
9368 		CU_ASSERT(g_bserrno == 0);
9369 		if (g_bserrno != 0) {
9370 			break;
9371 		}
9372 		/* Read and verify the block before the current block */
9373 		if (block != 0) {
9374 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
9375 			poll_threads();
9376 			CU_ASSERT(g_bserrno == 0);
9377 			if (g_bserrno != 0) {
9378 				break;
9379 			}
9380 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9381 							      (block - 1) * bs_blksz, bs_blksz));
9382 		}
9383 		/* Read and verify the current block */
9384 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9385 		poll_threads();
9386 		CU_ASSERT(g_bserrno == 0);
9387 		if (g_bserrno != 0) {
9388 			break;
9389 		}
9390 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9391 						      block * bs_blksz, bs_blksz));
9392 		/* Check the block that follows */
9393 		if (block + 1 < blob_num_blocks) {
9394 			g_bserrno = 0xbad;
9395 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
9396 			poll_threads();
9397 			CU_ASSERT(g_bserrno == 0);
9398 			if (g_bserrno != 0) {
9399 				break;
9400 			}
9401 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
9402 							      (block + 1) * bs_blksz,
9403 							      esnap_blksz));
9404 		}
9405 	}
9406 	/* Clean up */
9407 	spdk_bs_free_io_channel(bs_ch);
9408 	g_bserrno = 0xbad;
9409 	spdk_blob_close(blob, blob_op_complete, NULL);
9410 	poll_threads();
9411 	CU_ASSERT(g_bserrno == 0);
9412 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
9413 	poll_threads();
9414 	CU_ASSERT(g_bserrno == 0);
9415 	g_bs = NULL;
9416 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9417 }
9418 
9419 static void
9420 bs_dev_io_complete_cb(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
9421 {
9422 	g_bserrno = bserrno;
9423 }
9424 
9425 static void
9426 blob_shallow_copy(void)
9427 {
9428 	struct spdk_blob_store *bs = g_bs;
9429 	struct spdk_blob_opts blob_opts;
9430 	struct spdk_blob *blob;
9431 	spdk_blob_id blobid;
9432 	uint64_t num_clusters = 4;
9433 	struct spdk_bs_dev *ext_dev;
9434 	struct spdk_bs_dev_cb_args ext_args;
9435 	struct spdk_io_channel *bdev_ch, *blob_ch;
9436 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
9437 	uint8_t buf2[DEV_BUFFER_BLOCKLEN];
9438 	uint64_t io_units_per_cluster;
9439 	uint64_t offset;
9440 	int rc;
9441 
9442 	blob_ch = spdk_bs_alloc_io_channel(bs);
9443 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
9444 
9445 	/* Set blob dimension and as thin provisioned */
9446 	ut_spdk_blob_opts_init(&blob_opts);
9447 	blob_opts.thin_provision = true;
9448 	blob_opts.num_clusters = num_clusters;
9449 
9450 	/* Create a blob */
9451 	blob = ut_blob_create_and_open(bs, &blob_opts);
9452 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9453 	blobid = spdk_blob_get_id(blob);
9454 	io_units_per_cluster = bs_io_units_per_cluster(blob);
9455 
9456 	/* Write on cluster 2 and 4 of blob */
9457 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9458 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9459 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9460 		poll_threads();
9461 		CU_ASSERT(g_bserrno == 0);
9462 	}
9463 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9464 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9465 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9466 		poll_threads();
9467 		CU_ASSERT(g_bserrno == 0);
9468 	}
9469 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9470 
9471 	/* Make a snapshot over blob */
9472 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9473 	poll_threads();
9474 	CU_ASSERT(g_bserrno == 0);
9475 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
9476 
9477 	/* Write on cluster 1 and 3 of blob */
9478 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9479 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9480 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9481 		poll_threads();
9482 		CU_ASSERT(g_bserrno == 0);
9483 	}
9484 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9485 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9486 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9487 		poll_threads();
9488 		CU_ASSERT(g_bserrno == 0);
9489 	}
9490 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9491 
9492 	/* Shallow copy with a not read only blob */
9493 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9494 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9495 				       blob_shallow_copy_status_cb, NULL,
9496 				       blob_op_complete, NULL);
9497 	CU_ASSERT(rc == 0);
9498 	poll_threads();
9499 	CU_ASSERT(g_bserrno == -EPERM);
9500 	ext_dev->destroy(ext_dev);
9501 
9502 	/* Set blob read only */
9503 	spdk_blob_set_read_only(blob);
9504 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
9505 	poll_threads();
9506 	CU_ASSERT(g_bserrno == 0);
9507 
9508 	/* Shallow copy over a spdk_bs_dev with incorrect size */
9509 	ext_dev = init_ext_dev(1, DEV_BUFFER_BLOCKLEN);
9510 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9511 				       blob_shallow_copy_status_cb, NULL,
9512 				       blob_op_complete, NULL);
9513 	CU_ASSERT(rc == 0);
9514 	poll_threads();
9515 	CU_ASSERT(g_bserrno == -EINVAL);
9516 	ext_dev->destroy(ext_dev);
9517 
9518 	/* Shallow copy over a spdk_bs_dev with incorrect block len */
9519 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN * 2);
9520 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9521 				       blob_shallow_copy_status_cb, NULL,
9522 				       blob_op_complete, NULL);
9523 	CU_ASSERT(rc == 0);
9524 	poll_threads();
9525 	CU_ASSERT(g_bserrno == -EINVAL);
9526 	ext_dev->destroy(ext_dev);
9527 
9528 	/* Initialize ext_dev for the successuful shallow copy */
9529 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9530 	bdev_ch = ext_dev->create_channel(ext_dev);
9531 	SPDK_CU_ASSERT_FATAL(bdev_ch != NULL);
9532 	ext_args.cb_fn = bs_dev_io_complete_cb;
9533 	for (offset = 0; offset < 4 * io_units_per_cluster; offset++) {
9534 		memset(buf2, 0xff, DEV_BUFFER_BLOCKLEN);
9535 		ext_dev->write(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9536 		poll_threads();
9537 		CU_ASSERT(g_bserrno == 0);
9538 	}
9539 
9540 	/* Correct shallow copy of blob over bdev */
9541 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9542 				       blob_shallow_copy_status_cb, NULL,
9543 				       blob_op_complete, NULL);
9544 	CU_ASSERT(rc == 0);
9545 	poll_thread_times(0, 1);
9546 	CU_ASSERT(g_copied_clusters_count == 1);
9547 	poll_thread_times(0, 2);
9548 	CU_ASSERT(g_bserrno == 0);
9549 	CU_ASSERT(g_copied_clusters_count == 2);
9550 
9551 	/* Read from bdev */
9552 	/* Only cluster 1 and 3 must be filled */
9553 	/* Clusters 2 and 4 should not have been touched */
9554 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9555 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9556 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9557 		poll_threads();
9558 		CU_ASSERT(g_bserrno == 0);
9559 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9560 	}
9561 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9562 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9563 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9564 		poll_threads();
9565 		CU_ASSERT(g_bserrno == 0);
9566 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9567 	}
9568 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9569 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9570 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9571 		poll_threads();
9572 		CU_ASSERT(g_bserrno == 0);
9573 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9574 	}
9575 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9576 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9577 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9578 		poll_threads();
9579 		CU_ASSERT(g_bserrno == 0);
9580 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9581 	}
9582 
9583 	/* Clean up */
9584 	ext_dev->destroy_channel(ext_dev, bdev_ch);
9585 	ext_dev->destroy(ext_dev);
9586 	spdk_bs_free_io_channel(blob_ch);
9587 	ut_blob_close_and_delete(bs, blob);
9588 	poll_threads();
9589 }
9590 
9591 static void
9592 blob_set_parent(void)
9593 {
9594 	struct spdk_blob_store *bs = g_bs;
9595 	struct spdk_blob_opts opts;
9596 	struct ut_esnap_opts esnap_opts;
9597 	struct spdk_blob *blob1, *blob2, *blob3, *blob4, *blob5;
9598 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, blobid5,
9599 		     snapshotid1, snapshotid2, snapshotid3;
9600 	uint32_t cluster_sz, block_sz;
9601 	const uint32_t esnap_num_clusters = 4;
9602 	uint64_t esnap_num_blocks;
9603 	spdk_blob_id ids[2];
9604 	size_t clone_count = 2;
9605 
9606 	cluster_sz = spdk_bs_get_cluster_size(bs);
9607 	block_sz = spdk_bs_get_io_unit_size(bs);
9608 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9609 
9610 	/* Create a normal blob and make a couple of snapshots */
9611 	ut_spdk_blob_opts_init(&opts);
9612 	blob1 = ut_blob_create_and_open(bs, &opts);
9613 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9614 	blobid1 = spdk_blob_get_id(blob1);
9615 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9616 	poll_threads();
9617 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9618 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9619 	snapshotid1 = g_blobid;
9620 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9621 	poll_threads();
9622 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9623 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9624 	snapshotid2 = g_blobid;
9625 
9626 	/* Call set_parent with an invalid snapshotid */
9627 	spdk_bs_blob_set_parent(bs, blobid1, SPDK_BLOBID_INVALID, blob_op_complete, NULL);
9628 	poll_threads();
9629 	CU_ASSERT(g_bserrno == -EINVAL);
9630 
9631 	/* Call set_parent with blobid and snapshotid the same */
9632 	spdk_bs_blob_set_parent(bs, blobid1, blobid1, blob_op_complete, NULL);
9633 	poll_threads();
9634 	CU_ASSERT(g_bserrno == -EINVAL);
9635 
9636 	/* Call set_parent with a blob and its parent snapshot */
9637 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid2, blob_op_complete, NULL);
9638 	poll_threads();
9639 	CU_ASSERT(g_bserrno == -EEXIST);
9640 
9641 	/* Create an esnap clone blob */
9642 	ut_spdk_blob_opts_init(&opts);
9643 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9644 	opts.esnap_id = &esnap_opts;
9645 	opts.esnap_id_len = sizeof(esnap_opts);
9646 	opts.num_clusters = esnap_num_clusters;
9647 	blob2 = ut_blob_create_and_open(bs, &opts);
9648 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9649 	blobid2 = spdk_blob_get_id(blob2);
9650 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9651 
9652 	/* Call set_parent with a non snapshot parent */
9653 	spdk_bs_blob_set_parent(bs, blobid2, blobid1, blob_op_complete, NULL);
9654 	poll_threads();
9655 	CU_ASSERT(g_bserrno == -EINVAL);
9656 
9657 	/* Call set_parent with blob and snapshot of different size */
9658 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid1, blob_op_complete, NULL);
9659 	poll_threads();
9660 	CU_ASSERT(g_bserrno == -EINVAL);
9661 
9662 	/* Call set_parent correctly with a snapshot's clone blob */
9663 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid1, blob_op_complete, NULL);
9664 	poll_threads();
9665 	CU_ASSERT(g_bserrno == 0);
9666 
9667 	/* Check relations */
9668 	CU_ASSERT(spdk_blob_is_clone(blob1));
9669 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid1) == snapshotid1);
9670 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid1, ids, &clone_count) == 0);
9671 	CU_ASSERT(clone_count == 2);
9672 	CU_ASSERT(ids[1] == blobid1);
9673 
9674 	/* Create another normal blob with size equal to esnap size and make a snapshot */
9675 	ut_spdk_blob_opts_init(&opts);
9676 	opts.num_clusters = esnap_num_clusters;
9677 	opts.thin_provision = true;
9678 	blob3 = ut_blob_create_and_open(bs, &opts);
9679 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9680 	blobid3 = spdk_blob_get_id(blob3);
9681 	spdk_bs_create_snapshot(bs, blobid3, NULL, blob_op_with_id_complete, NULL);
9682 	poll_threads();
9683 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9684 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9685 	snapshotid3 = g_blobid;
9686 
9687 	/* Call set_parent correctly with an esnap's clone blob */
9688 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid3, blob_op_complete, NULL);
9689 	poll_threads();
9690 	CU_ASSERT(g_bserrno == 0);
9691 
9692 	/* Check relations */
9693 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob2));
9694 	CU_ASSERT(spdk_blob_is_clone(blob2));
9695 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid2) == snapshotid3);
9696 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid3, ids, &clone_count) == 0);
9697 	CU_ASSERT(clone_count == 2);
9698 	CU_ASSERT(ids[1] == blobid2);
9699 
9700 	/* Create a not thin-provisioned blob that is not a clone */
9701 	ut_spdk_blob_opts_init(&opts);
9702 	opts.thin_provision = false;
9703 	blob4 = ut_blob_create_and_open(bs, &opts);
9704 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9705 	blobid4 = spdk_blob_get_id(blob4);
9706 
9707 	/* Call set_parent with a blob that isn't a clone and that isn't thin-provisioned */
9708 	spdk_bs_blob_set_parent(bs, blobid4, snapshotid2, blob_op_complete, NULL);
9709 	poll_threads();
9710 	CU_ASSERT(g_bserrno == -EINVAL);
9711 
9712 	/* Create a thin-provisioned blob that is not a clone */
9713 	ut_spdk_blob_opts_init(&opts);
9714 	opts.thin_provision = true;
9715 	blob5 = ut_blob_create_and_open(bs, &opts);
9716 	SPDK_CU_ASSERT_FATAL(blob5 != NULL);
9717 	blobid5 = spdk_blob_get_id(blob5);
9718 
9719 	/* Call set_parent correctly with a blob that isn't a clone */
9720 	spdk_bs_blob_set_parent(bs, blobid5, snapshotid2, blob_op_complete, NULL);
9721 	poll_threads();
9722 	CU_ASSERT(g_bserrno == 0);
9723 
9724 	/* Check relations */
9725 	CU_ASSERT(spdk_blob_is_clone(blob5));
9726 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid5) == snapshotid2);
9727 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &clone_count) == 0);
9728 	CU_ASSERT(clone_count == 1);
9729 	CU_ASSERT(ids[0] == blobid5);
9730 
9731 	/* Clean up */
9732 	ut_blob_close_and_delete(bs, blob5);
9733 	ut_blob_close_and_delete(bs, blob4);
9734 	ut_blob_close_and_delete(bs, blob3);
9735 	ut_blob_close_and_delete(bs, blob2);
9736 	ut_blob_close_and_delete(bs, blob1);
9737 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
9738 	poll_threads();
9739 	CU_ASSERT(g_bserrno == 0);
9740 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
9741 	poll_threads();
9742 	CU_ASSERT(g_bserrno == 0);
9743 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
9744 	poll_threads();
9745 	CU_ASSERT(g_bserrno == 0);
9746 }
9747 
9748 static void
9749 blob_set_external_parent(void)
9750 {
9751 	struct spdk_blob_store *bs = g_bs;
9752 	struct spdk_blob_opts opts;
9753 	struct ut_esnap_opts esnap_opts, esnap_opts2;
9754 	struct spdk_blob *blob1, *blob2, *blob3, *blob4;
9755 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, snapshotid;
9756 	uint32_t cluster_sz, block_sz;
9757 	const uint32_t esnap_num_clusters = 4;
9758 	uint64_t esnap_num_blocks;
9759 	struct spdk_bs_dev *esnap_dev1, *esnap_dev2, *esnap_dev3;
9760 	const void *esnap_id;
9761 	size_t esnap_id_len;
9762 	int rc;
9763 
9764 	cluster_sz = spdk_bs_get_cluster_size(bs);
9765 	block_sz = spdk_bs_get_io_unit_size(bs);
9766 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9767 	esnap_dev1 = init_dev();
9768 	esnap_dev2 = init_dev();
9769 	esnap_dev3 = init_dev();
9770 
9771 	/* Create an esnap clone blob */
9772 	ut_spdk_blob_opts_init(&opts);
9773 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9774 	opts.esnap_id = &esnap_opts;
9775 	opts.esnap_id_len = sizeof(esnap_opts);
9776 	opts.num_clusters = esnap_num_clusters;
9777 	blob1 = ut_blob_create_and_open(bs, &opts);
9778 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9779 	blobid1 = spdk_blob_get_id(blob1);
9780 	CU_ASSERT(spdk_blob_is_esnap_clone(blob1));
9781 
9782 	/* Call set_esternal_parent with blobid and esnapid the same */
9783 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, &blobid1, sizeof(blobid1),
9784 					 blob_op_complete, NULL);
9785 	CU_ASSERT(g_bserrno == -EINVAL);
9786 
9787 	/* Call set_external_parent with esnap of incompatible size */
9788 	esnap_dev1->blockcnt = esnap_num_blocks - 1;
9789 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9790 					 blob_op_complete, NULL);
9791 	CU_ASSERT(g_bserrno == -EINVAL);
9792 
9793 	/* Call set_external_parent with a blob and its parent esnap */
9794 	esnap_dev1->blocklen = block_sz;
9795 	esnap_dev1->blockcnt = esnap_num_blocks;
9796 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9797 					 blob_op_complete, NULL);
9798 	poll_threads();
9799 	CU_ASSERT(g_bserrno == -EEXIST);
9800 
9801 	/* Create a blob that is a clone of a snapshots */
9802 	ut_spdk_blob_opts_init(&opts);
9803 	blob2 = ut_blob_create_and_open(bs, &opts);
9804 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9805 	blobid2 = spdk_blob_get_id(blob2);
9806 	spdk_bs_create_snapshot(bs, blobid2, NULL, blob_op_with_id_complete, NULL);
9807 	poll_threads();
9808 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9809 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9810 	snapshotid = g_blobid;
9811 
9812 	/* Call set_parent correctly with a snapshot's clone blob */
9813 	esnap_dev2->blocklen = block_sz;
9814 	esnap_dev2->blockcnt = esnap_num_blocks;
9815 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts2);
9816 	spdk_bs_blob_set_external_parent(bs, blobid2, esnap_dev2, &esnap_opts2, sizeof(esnap_opts2),
9817 					 blob_op_complete, NULL);
9818 	poll_threads();
9819 	CU_ASSERT(g_bserrno == 0);
9820 
9821 	/* Check relations */
9822 	rc = spdk_blob_get_esnap_id(blob2, &esnap_id, &esnap_id_len);
9823 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9824 	CU_ASSERT(!spdk_blob_is_clone(blob2));
9825 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts2) &&
9826 		  memcmp(esnap_id, &esnap_opts2, esnap_id_len) == 0);
9827 	CU_ASSERT(blob2->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9828 
9829 	/* Create a not thin-provisioned blob that is not a clone */
9830 	ut_spdk_blob_opts_init(&opts);
9831 	opts.thin_provision = false;
9832 	blob3 = ut_blob_create_and_open(bs, &opts);
9833 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9834 	blobid3 = spdk_blob_get_id(blob3);
9835 
9836 	/* Call set_external_parent with a blob that isn't a clone and that isn't thin-provisioned */
9837 	spdk_bs_blob_set_external_parent(bs, blobid3, esnap_dev1, &esnap_opts, sizeof(esnap_opts),
9838 					 blob_op_complete, NULL);
9839 	poll_threads();
9840 	CU_ASSERT(g_bserrno == -EINVAL);
9841 
9842 	/* Create a thin-provisioned blob that is not a clone */
9843 	ut_spdk_blob_opts_init(&opts);
9844 	opts.thin_provision = true;
9845 	blob4 = ut_blob_create_and_open(bs, &opts);
9846 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9847 	blobid4 = spdk_blob_get_id(blob4);
9848 
9849 	/* Call set_external_parent correctly with a blob that isn't a clone */
9850 	esnap_dev3->blocklen = block_sz;
9851 	esnap_dev3->blockcnt = esnap_num_blocks;
9852 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9853 	spdk_bs_blob_set_external_parent(bs, blobid4, esnap_dev3, &esnap_opts, sizeof(esnap_opts),
9854 					 blob_op_complete, NULL);
9855 	poll_threads();
9856 	CU_ASSERT(g_bserrno == 0);
9857 
9858 	/* Check relations */
9859 	rc = spdk_blob_get_esnap_id(blob4, &esnap_id, &esnap_id_len);
9860 	CU_ASSERT(spdk_blob_is_esnap_clone(blob4));
9861 	CU_ASSERT(!spdk_blob_is_clone(blob4));
9862 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts) &&
9863 		  memcmp(esnap_id, &esnap_opts, esnap_id_len) == 0);
9864 	CU_ASSERT(blob4->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9865 
9866 	ut_blob_close_and_delete(bs, blob4);
9867 	ut_blob_close_and_delete(bs, blob3);
9868 	ut_blob_close_and_delete(bs, blob2);
9869 	ut_blob_close_and_delete(bs, blob1);
9870 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
9871 	dev_destroy(esnap_dev1);
9872 	poll_threads();
9873 	CU_ASSERT(g_bserrno == 0);
9874 }
9875 
9876 static void
9877 suite_bs_setup(void)
9878 {
9879 	struct spdk_bs_dev *dev;
9880 
9881 	dev = init_dev();
9882 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9883 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
9884 	poll_threads();
9885 	CU_ASSERT(g_bserrno == 0);
9886 	CU_ASSERT(g_bs != NULL);
9887 }
9888 
9889 static void
9890 suite_esnap_bs_setup(void)
9891 {
9892 	struct spdk_bs_dev	*dev;
9893 	struct spdk_bs_opts	bs_opts;
9894 
9895 	dev = init_dev();
9896 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9897 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
9898 	bs_opts.cluster_sz = 16 * 1024;
9899 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
9900 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
9901 	poll_threads();
9902 	CU_ASSERT(g_bserrno == 0);
9903 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9904 }
9905 
9906 static void
9907 suite_bs_cleanup(void)
9908 {
9909 	if (g_bs != NULL) {
9910 		spdk_bs_unload(g_bs, bs_op_complete, NULL);
9911 		poll_threads();
9912 		CU_ASSERT(g_bserrno == 0);
9913 		g_bs = NULL;
9914 	}
9915 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9916 }
9917 
9918 static struct spdk_blob *
9919 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
9920 {
9921 	struct spdk_blob *blob;
9922 	struct spdk_blob_opts create_blob_opts;
9923 	spdk_blob_id blobid;
9924 
9925 	if (blob_opts == NULL) {
9926 		ut_spdk_blob_opts_init(&create_blob_opts);
9927 		blob_opts = &create_blob_opts;
9928 	}
9929 
9930 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
9931 	poll_threads();
9932 	CU_ASSERT(g_bserrno == 0);
9933 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9934 	blobid = g_blobid;
9935 	g_blobid = -1;
9936 
9937 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9938 	poll_threads();
9939 	CU_ASSERT(g_bserrno == 0);
9940 	CU_ASSERT(g_blob != NULL);
9941 	blob = g_blob;
9942 
9943 	g_blob = NULL;
9944 	g_bserrno = -1;
9945 
9946 	return blob;
9947 }
9948 
9949 static void
9950 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
9951 {
9952 	spdk_blob_id blobid = spdk_blob_get_id(blob);
9953 
9954 	spdk_blob_close(blob, blob_op_complete, NULL);
9955 	poll_threads();
9956 	CU_ASSERT(g_bserrno == 0);
9957 	g_blob = NULL;
9958 
9959 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
9960 	poll_threads();
9961 	CU_ASSERT(g_bserrno == 0);
9962 	g_bserrno = -1;
9963 }
9964 
9965 static void
9966 suite_blob_setup(void)
9967 {
9968 	suite_bs_setup();
9969 	CU_ASSERT(g_bs != NULL);
9970 
9971 	g_blob = ut_blob_create_and_open(g_bs, NULL);
9972 	CU_ASSERT(g_blob != NULL);
9973 }
9974 
9975 static void
9976 suite_blob_cleanup(void)
9977 {
9978 	ut_blob_close_and_delete(g_bs, g_blob);
9979 	CU_ASSERT(g_blob == NULL);
9980 
9981 	suite_bs_cleanup();
9982 	CU_ASSERT(g_bs == NULL);
9983 }
9984 
9985 static int
9986 ut_setup_config_nocopy_noextent(void)
9987 {
9988 	g_dev_copy_enabled = false;
9989 	g_use_extent_table = false;
9990 
9991 	return 0;
9992 }
9993 
9994 static int
9995 ut_setup_config_nocopy_extent(void)
9996 {
9997 	g_dev_copy_enabled = false;
9998 	g_use_extent_table = true;
9999 
10000 	return 0;
10001 }
10002 
10003 static int
10004 ut_setup_config_copy_noextent(void)
10005 {
10006 	g_dev_copy_enabled = true;
10007 	g_use_extent_table = false;
10008 
10009 	return 0;
10010 }
10011 
10012 static int
10013 ut_setup_config_copy_extent(void)
10014 {
10015 	g_dev_copy_enabled = true;
10016 	g_use_extent_table = true;
10017 
10018 	return 0;
10019 }
10020 
10021 struct ut_config {
10022 	const char *suffix;
10023 	CU_InitializeFunc setup_cb;
10024 };
10025 
10026 int
10027 main(int argc, char **argv)
10028 {
10029 	CU_pSuite		suite, suite_bs, suite_blob, suite_esnap_bs;
10030 	unsigned int		i, num_failures;
10031 	char			suite_name[4096];
10032 	struct ut_config	*config;
10033 	struct ut_config	configs[] = {
10034 		{"nocopy_noextent", ut_setup_config_nocopy_noextent},
10035 		{"nocopy_extent", ut_setup_config_nocopy_extent},
10036 		{"copy_noextent", ut_setup_config_copy_noextent},
10037 		{"copy_extent", ut_setup_config_copy_extent},
10038 	};
10039 
10040 	CU_initialize_registry();
10041 
10042 	for (i = 0; i < SPDK_COUNTOF(configs); ++i) {
10043 		config = &configs[i];
10044 
10045 		snprintf(suite_name, sizeof(suite_name), "blob_%s", config->suffix);
10046 		suite = CU_add_suite(suite_name, config->setup_cb, NULL);
10047 
10048 		snprintf(suite_name, sizeof(suite_name), "blob_bs_%s", config->suffix);
10049 		suite_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10050 				suite_bs_setup, suite_bs_cleanup);
10051 
10052 		snprintf(suite_name, sizeof(suite_name), "blob_blob_%s", config->suffix);
10053 		suite_blob = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10054 				suite_blob_setup, suite_blob_cleanup);
10055 
10056 		snprintf(suite_name, sizeof(suite_name), "blob_esnap_bs_%s", config->suffix);
10057 		suite_esnap_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10058 				 suite_esnap_bs_setup,
10059 				 suite_bs_cleanup);
10060 
10061 		CU_ADD_TEST(suite, blob_init);
10062 		CU_ADD_TEST(suite_bs, blob_open);
10063 		CU_ADD_TEST(suite_bs, blob_create);
10064 		CU_ADD_TEST(suite_bs, blob_create_loop);
10065 		CU_ADD_TEST(suite_bs, blob_create_fail);
10066 		CU_ADD_TEST(suite_bs, blob_create_internal);
10067 		CU_ADD_TEST(suite_bs, blob_create_zero_extent);
10068 		CU_ADD_TEST(suite, blob_thin_provision);
10069 		CU_ADD_TEST(suite_bs, blob_snapshot);
10070 		CU_ADD_TEST(suite_bs, blob_clone);
10071 		CU_ADD_TEST(suite_bs, blob_inflate);
10072 		CU_ADD_TEST(suite_bs, blob_delete);
10073 		CU_ADD_TEST(suite_bs, blob_resize_test);
10074 		CU_ADD_TEST(suite_bs, blob_resize_thin_test);
10075 		CU_ADD_TEST(suite, blob_read_only);
10076 		CU_ADD_TEST(suite_bs, channel_ops);
10077 		CU_ADD_TEST(suite_bs, blob_super);
10078 		CU_ADD_TEST(suite_blob, blob_write);
10079 		CU_ADD_TEST(suite_blob, blob_read);
10080 		CU_ADD_TEST(suite_blob, blob_rw_verify);
10081 		CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
10082 		CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
10083 		CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
10084 		CU_ADD_TEST(suite_bs, blob_unmap);
10085 		CU_ADD_TEST(suite_bs, blob_iter);
10086 		CU_ADD_TEST(suite_blob, blob_xattr);
10087 		CU_ADD_TEST(suite_bs, blob_parse_md);
10088 		CU_ADD_TEST(suite, bs_load);
10089 		CU_ADD_TEST(suite_bs, bs_load_pending_removal);
10090 		CU_ADD_TEST(suite, bs_load_custom_cluster_size);
10091 		CU_ADD_TEST(suite, bs_load_after_failed_grow);
10092 		CU_ADD_TEST(suite_bs, bs_unload);
10093 		CU_ADD_TEST(suite, bs_cluster_sz);
10094 		CU_ADD_TEST(suite_bs, bs_usable_clusters);
10095 		CU_ADD_TEST(suite, bs_resize_md);
10096 		CU_ADD_TEST(suite, bs_destroy);
10097 		CU_ADD_TEST(suite, bs_type);
10098 		CU_ADD_TEST(suite, bs_super_block);
10099 		CU_ADD_TEST(suite, bs_test_recover_cluster_count);
10100 		CU_ADD_TEST(suite, bs_grow_live);
10101 		CU_ADD_TEST(suite, bs_grow_live_no_space);
10102 		CU_ADD_TEST(suite, bs_test_grow);
10103 		CU_ADD_TEST(suite, blob_serialize_test);
10104 		CU_ADD_TEST(suite_bs, blob_crc);
10105 		CU_ADD_TEST(suite, super_block_crc);
10106 		CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
10107 		CU_ADD_TEST(suite_bs, blob_flags);
10108 		CU_ADD_TEST(suite_bs, bs_version);
10109 		CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
10110 		CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
10111 		CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
10112 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
10113 		CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
10114 		CU_ADD_TEST(suite, blob_thin_prov_unmap_cluster);
10115 		CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
10116 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
10117 		CU_ADD_TEST(suite, bs_load_iter_test);
10118 		CU_ADD_TEST(suite_bs, blob_snapshot_rw);
10119 		CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
10120 		CU_ADD_TEST(suite, blob_relations);
10121 		CU_ADD_TEST(suite, blob_relations2);
10122 		CU_ADD_TEST(suite, blob_relations3);
10123 		CU_ADD_TEST(suite, blobstore_clean_power_failure);
10124 		CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
10125 		CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
10126 		CU_ADD_TEST(suite_bs, blob_inflate_rw);
10127 		CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
10128 		CU_ADD_TEST(suite_bs, blob_operation_split_rw);
10129 		CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
10130 		CU_ADD_TEST(suite, blob_io_unit);
10131 		CU_ADD_TEST(suite, blob_io_unit_compatibility);
10132 		CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
10133 		CU_ADD_TEST(suite_bs, blob_persist_test);
10134 		CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
10135 		CU_ADD_TEST(suite_bs, blob_seek_io_unit);
10136 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
10137 		CU_ADD_TEST(suite_bs, blob_nested_freezes);
10138 		CU_ADD_TEST(suite, blob_ext_md_pages);
10139 		CU_ADD_TEST(suite, blob_esnap_io_4096_4096);
10140 		CU_ADD_TEST(suite, blob_esnap_io_512_512);
10141 		CU_ADD_TEST(suite, blob_esnap_io_4096_512);
10142 		CU_ADD_TEST(suite, blob_esnap_io_512_4096);
10143 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_thread_add_remove);
10144 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_snapshot);
10145 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_inflate);
10146 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_decouple);
10147 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_reload);
10148 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_hotplug);
10149 		CU_ADD_TEST(suite_blob, blob_is_degraded);
10150 		CU_ADD_TEST(suite_bs, blob_clone_resize);
10151 		CU_ADD_TEST(suite, blob_esnap_clone_resize);
10152 		CU_ADD_TEST(suite_bs, blob_shallow_copy);
10153 		CU_ADD_TEST(suite_esnap_bs, blob_set_parent);
10154 		CU_ADD_TEST(suite_esnap_bs, blob_set_external_parent);
10155 	}
10156 
10157 	allocate_threads(2);
10158 	set_thread(0);
10159 
10160 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
10161 
10162 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
10163 
10164 	free(g_dev_buffer);
10165 
10166 	free_threads();
10167 
10168 	return num_failures;
10169 }
10170