xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 8130039ee5287100d9eb93eb886967645da3d545)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "thread/thread.c"
16 #include "blob/blobstore.c"
17 #include "blob/request.c"
18 #include "blob/zeroes.c"
19 #include "blob/blob_bs_dev.c"
20 #include "esnap_dev.c"
21 
22 struct spdk_blob_store *g_bs;
23 spdk_blob_id g_blobid;
24 struct spdk_blob *g_blob, *g_blob2;
25 int g_bserrno, g_bserrno2;
26 struct spdk_xattr_names *g_names;
27 int g_done;
28 char *g_xattr_names[] = {"first", "second", "third"};
29 char *g_xattr_values[] = {"one", "two", "three"};
30 uint64_t g_ctx = 1729;
31 bool g_use_extent_table = false;
32 
33 struct spdk_bs_super_block_ver1 {
34 	uint8_t		signature[8];
35 	uint32_t        version;
36 	uint32_t        length;
37 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
38 	spdk_blob_id	super_blob;
39 
40 	uint32_t	cluster_size; /* In bytes */
41 
42 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
43 	uint32_t	used_page_mask_len; /* Count, in pages */
44 
45 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
46 	uint32_t	used_cluster_mask_len; /* Count, in pages */
47 
48 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
49 	uint32_t	md_len; /* Count, in pages */
50 
51 	uint8_t		reserved[4036];
52 	uint32_t	crc;
53 } __attribute__((packed));
54 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
55 
56 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
57 		struct spdk_blob_opts *blob_opts);
58 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
59 static void suite_blob_setup(void);
60 static void suite_blob_cleanup(void);
61 
62 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
63 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
64 		void *cpl_cb_arg), 0);
65 
66 static bool
67 is_esnap_clone(struct spdk_blob *_blob, const void *id, size_t id_len)
68 {
69 	const void *val = NULL;
70 	size_t len = 0;
71 	bool c0, c1, c2, c3;
72 
73 	CU_ASSERT(blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
74 				       true) == 0);
75 	CU_ASSERT((c0 = (len == id_len)));
76 	CU_ASSERT((c1 = (val != NULL && memcmp(val, id, len) == 0)));
77 	CU_ASSERT((c2 = !!(_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT)));
78 	CU_ASSERT((c3 = (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
79 
80 	return c0 && c1 && c2 && c3;
81 }
82 
83 static bool
84 is_not_esnap_clone(struct spdk_blob *_blob)
85 {
86 	const void *val = NULL;
87 	size_t len = 0;
88 	bool c1, c2, c3, c4;
89 
90 	CU_ASSERT((c1 = (blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
91 					      true) == -ENOENT)));
92 	CU_ASSERT((c2 = (val == NULL)));
93 	CU_ASSERT((c3 = ((_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT) == 0)));
94 	CU_ASSERT((c4 = (_blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
95 
96 	return c1 && c2 && c3 && c4;
97 }
98 
99 #define UT_ASSERT_IS_ESNAP_CLONE(_blob, _id, _len) CU_ASSERT(is_esnap_clone(_blob, _id, _len))
100 #define UT_ASSERT_IS_NOT_ESNAP_CLONE(_blob) CU_ASSERT(is_not_esnap_clone(_blob))
101 
102 static void
103 _get_xattr_value(void *arg, const char *name,
104 		 const void **value, size_t *value_len)
105 {
106 	uint64_t i;
107 
108 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
109 	SPDK_CU_ASSERT_FATAL(value != NULL);
110 	CU_ASSERT(arg == &g_ctx);
111 
112 	for (i = 0; i < sizeof(g_xattr_names); i++) {
113 		if (!strcmp(name, g_xattr_names[i])) {
114 			*value_len = strlen(g_xattr_values[i]);
115 			*value = g_xattr_values[i];
116 			break;
117 		}
118 	}
119 }
120 
121 static void
122 _get_xattr_value_null(void *arg, const char *name,
123 		      const void **value, size_t *value_len)
124 {
125 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
126 	SPDK_CU_ASSERT_FATAL(value != NULL);
127 	CU_ASSERT(arg == NULL);
128 
129 	*value_len = 0;
130 	*value = NULL;
131 }
132 
133 static int
134 _get_snapshots_count(struct spdk_blob_store *bs)
135 {
136 	struct spdk_blob_list *snapshot = NULL;
137 	int count = 0;
138 
139 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
140 		count += 1;
141 	}
142 
143 	return count;
144 }
145 
146 static void
147 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
148 {
149 	spdk_blob_opts_init(opts, sizeof(*opts));
150 	opts->use_extent_table = g_use_extent_table;
151 }
152 
153 static void
154 bs_op_complete(void *cb_arg, int bserrno)
155 {
156 	g_bserrno = bserrno;
157 }
158 
159 static void
160 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
161 			   int bserrno)
162 {
163 	g_bs = bs;
164 	g_bserrno = bserrno;
165 }
166 
167 static void
168 blob_op_complete(void *cb_arg, int bserrno)
169 {
170 	if (cb_arg != NULL) {
171 		int *errp = cb_arg;
172 
173 		*errp = bserrno;
174 	}
175 	g_bserrno = bserrno;
176 }
177 
178 static void
179 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
180 {
181 	g_blobid = blobid;
182 	g_bserrno = bserrno;
183 }
184 
185 static void
186 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
187 {
188 	g_blob = blb;
189 	g_bserrno = bserrno;
190 }
191 
192 static void
193 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
194 {
195 	if (g_blob == NULL) {
196 		g_blob = blob;
197 		g_bserrno = bserrno;
198 	} else {
199 		g_blob2 = blob;
200 		g_bserrno2 = bserrno;
201 	}
202 }
203 
204 static void
205 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
206 {
207 	struct spdk_bs_dev *dev;
208 
209 	/* Unload the blob store */
210 	spdk_bs_unload(*bs, bs_op_complete, NULL);
211 	poll_threads();
212 	CU_ASSERT(g_bserrno == 0);
213 
214 	dev = init_dev();
215 	/* Load an existing blob store */
216 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
217 	poll_threads();
218 	CU_ASSERT(g_bserrno == 0);
219 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
220 	*bs = g_bs;
221 
222 	g_bserrno = -1;
223 }
224 
225 static void
226 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
227 {
228 	struct spdk_bs_dev *dev;
229 
230 	/* Dirty shutdown */
231 	bs_free(*bs);
232 
233 	dev = init_dev();
234 	/* Load an existing blob store */
235 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
236 	poll_threads();
237 	CU_ASSERT(g_bserrno == 0);
238 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
239 	*bs = g_bs;
240 
241 	g_bserrno = -1;
242 }
243 
244 static void
245 blob_init(void)
246 {
247 	struct spdk_blob_store *bs;
248 	struct spdk_bs_dev *dev;
249 
250 	dev = init_dev();
251 
252 	/* should fail for an unsupported blocklen */
253 	dev->blocklen = 500;
254 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
255 	poll_threads();
256 	CU_ASSERT(g_bserrno == -EINVAL);
257 
258 	dev = init_dev();
259 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
260 	poll_threads();
261 	CU_ASSERT(g_bserrno == 0);
262 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
263 	bs = g_bs;
264 
265 	spdk_bs_unload(bs, bs_op_complete, NULL);
266 	poll_threads();
267 	CU_ASSERT(g_bserrno == 0);
268 	g_bs = NULL;
269 }
270 
271 static void
272 blob_super(void)
273 {
274 	struct spdk_blob_store *bs = g_bs;
275 	spdk_blob_id blobid;
276 	struct spdk_blob_opts blob_opts;
277 
278 	/* Get the super blob without having set one */
279 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
280 	poll_threads();
281 	CU_ASSERT(g_bserrno == -ENOENT);
282 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
283 
284 	/* Create a blob */
285 	ut_spdk_blob_opts_init(&blob_opts);
286 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
287 	poll_threads();
288 	CU_ASSERT(g_bserrno == 0);
289 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
290 	blobid = g_blobid;
291 
292 	/* Set the blob as the super blob */
293 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
294 	poll_threads();
295 	CU_ASSERT(g_bserrno == 0);
296 
297 	/* Get the super blob */
298 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
299 	poll_threads();
300 	CU_ASSERT(g_bserrno == 0);
301 	CU_ASSERT(blobid == g_blobid);
302 }
303 
304 static void
305 blob_open(void)
306 {
307 	struct spdk_blob_store *bs = g_bs;
308 	struct spdk_blob *blob;
309 	struct spdk_blob_opts blob_opts;
310 	spdk_blob_id blobid, blobid2;
311 
312 	ut_spdk_blob_opts_init(&blob_opts);
313 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
314 	poll_threads();
315 	CU_ASSERT(g_bserrno == 0);
316 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
317 	blobid = g_blobid;
318 
319 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
320 	poll_threads();
321 	CU_ASSERT(g_bserrno == 0);
322 	CU_ASSERT(g_blob != NULL);
323 	blob = g_blob;
324 
325 	blobid2 = spdk_blob_get_id(blob);
326 	CU_ASSERT(blobid == blobid2);
327 
328 	/* Try to open file again.  It should return success. */
329 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
330 	poll_threads();
331 	CU_ASSERT(g_bserrno == 0);
332 	CU_ASSERT(blob == g_blob);
333 
334 	spdk_blob_close(blob, blob_op_complete, NULL);
335 	poll_threads();
336 	CU_ASSERT(g_bserrno == 0);
337 
338 	/*
339 	 * Close the file a second time, releasing the second reference.  This
340 	 *  should succeed.
341 	 */
342 	blob = g_blob;
343 	spdk_blob_close(blob, blob_op_complete, NULL);
344 	poll_threads();
345 	CU_ASSERT(g_bserrno == 0);
346 
347 	/*
348 	 * Try to open file again.  It should succeed.  This tests the case
349 	 *  where the file is opened, closed, then re-opened again.
350 	 */
351 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
352 	poll_threads();
353 	CU_ASSERT(g_bserrno == 0);
354 	CU_ASSERT(g_blob != NULL);
355 	blob = g_blob;
356 	spdk_blob_close(blob, blob_op_complete, NULL);
357 	poll_threads();
358 	CU_ASSERT(g_bserrno == 0);
359 
360 	/* Try to open file twice in succession.  This should return the same
361 	 * blob object.
362 	 */
363 	g_blob = NULL;
364 	g_blob2 = NULL;
365 	g_bserrno = -1;
366 	g_bserrno2 = -1;
367 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
368 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
369 	poll_threads();
370 	CU_ASSERT(g_bserrno == 0);
371 	CU_ASSERT(g_bserrno2 == 0);
372 	CU_ASSERT(g_blob != NULL);
373 	CU_ASSERT(g_blob2 != NULL);
374 	CU_ASSERT(g_blob == g_blob2);
375 
376 	g_bserrno = -1;
377 	spdk_blob_close(g_blob, blob_op_complete, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 
381 	ut_blob_close_and_delete(bs, g_blob);
382 }
383 
384 static void
385 blob_create(void)
386 {
387 	struct spdk_blob_store *bs = g_bs;
388 	struct spdk_blob *blob;
389 	struct spdk_blob_opts opts;
390 	spdk_blob_id blobid;
391 
392 	/* Create blob with 10 clusters */
393 
394 	ut_spdk_blob_opts_init(&opts);
395 	opts.num_clusters = 10;
396 
397 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
398 	poll_threads();
399 	CU_ASSERT(g_bserrno == 0);
400 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
401 	blobid = g_blobid;
402 
403 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
404 	poll_threads();
405 	CU_ASSERT(g_bserrno == 0);
406 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
407 	blob = g_blob;
408 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
409 
410 	spdk_blob_close(blob, blob_op_complete, NULL);
411 	poll_threads();
412 	CU_ASSERT(g_bserrno == 0);
413 
414 	/* Create blob with 0 clusters */
415 
416 	ut_spdk_blob_opts_init(&opts);
417 	opts.num_clusters = 0;
418 
419 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
420 	poll_threads();
421 	CU_ASSERT(g_bserrno == 0);
422 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
423 	blobid = g_blobid;
424 
425 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
426 	poll_threads();
427 	CU_ASSERT(g_bserrno == 0);
428 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
429 	blob = g_blob;
430 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
431 
432 	spdk_blob_close(blob, blob_op_complete, NULL);
433 	poll_threads();
434 	CU_ASSERT(g_bserrno == 0);
435 
436 	/* Create blob with default options (opts == NULL) */
437 
438 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
439 	poll_threads();
440 	CU_ASSERT(g_bserrno == 0);
441 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
442 	blobid = g_blobid;
443 
444 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
445 	poll_threads();
446 	CU_ASSERT(g_bserrno == 0);
447 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
448 	blob = g_blob;
449 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
450 
451 	spdk_blob_close(blob, blob_op_complete, NULL);
452 	poll_threads();
453 	CU_ASSERT(g_bserrno == 0);
454 
455 	/* Try to create blob with size larger than blobstore */
456 
457 	ut_spdk_blob_opts_init(&opts);
458 	opts.num_clusters = bs->total_clusters + 1;
459 
460 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
461 	poll_threads();
462 	CU_ASSERT(g_bserrno == -ENOSPC);
463 }
464 
465 static void
466 blob_create_zero_extent(void)
467 {
468 	struct spdk_blob_store *bs = g_bs;
469 	struct spdk_blob *blob;
470 	spdk_blob_id blobid;
471 
472 	/* Create blob with default options (opts == NULL) */
473 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
474 	poll_threads();
475 	CU_ASSERT(g_bserrno == 0);
476 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
477 	blobid = g_blobid;
478 
479 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
480 	poll_threads();
481 	CU_ASSERT(g_bserrno == 0);
482 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
483 	blob = g_blob;
484 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
485 	CU_ASSERT(blob->extent_table_found == true);
486 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
487 	CU_ASSERT(blob->active.extent_pages == NULL);
488 
489 	spdk_blob_close(blob, blob_op_complete, NULL);
490 	poll_threads();
491 	CU_ASSERT(g_bserrno == 0);
492 
493 	/* Create blob with NULL internal options  */
494 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
495 	poll_threads();
496 	CU_ASSERT(g_bserrno == 0);
497 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
498 	blobid = g_blobid;
499 
500 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
501 	poll_threads();
502 	CU_ASSERT(g_bserrno == 0);
503 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
504 	blob = g_blob;
505 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
506 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
507 	CU_ASSERT(blob->extent_table_found == true);
508 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
509 	CU_ASSERT(blob->active.extent_pages == NULL);
510 
511 	spdk_blob_close(blob, blob_op_complete, NULL);
512 	poll_threads();
513 	CU_ASSERT(g_bserrno == 0);
514 }
515 
516 /*
517  * Create and delete one blob in a loop over and over again.  This helps ensure
518  * that the internal bit masks tracking used clusters and md_pages are being
519  * tracked correctly.
520  */
521 static void
522 blob_create_loop(void)
523 {
524 	struct spdk_blob_store *bs = g_bs;
525 	struct spdk_blob_opts opts;
526 	uint32_t i, loop_count;
527 
528 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
529 				  spdk_bit_pool_capacity(bs->used_clusters));
530 
531 	for (i = 0; i < loop_count; i++) {
532 		ut_spdk_blob_opts_init(&opts);
533 		opts.num_clusters = 1;
534 		g_bserrno = -1;
535 		g_blobid = SPDK_BLOBID_INVALID;
536 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
537 		poll_threads();
538 		CU_ASSERT(g_bserrno == 0);
539 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
540 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
541 		poll_threads();
542 		CU_ASSERT(g_bserrno == 0);
543 	}
544 }
545 
546 static void
547 blob_create_fail(void)
548 {
549 	struct spdk_blob_store *bs = g_bs;
550 	struct spdk_blob_opts opts;
551 	spdk_blob_id blobid;
552 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
553 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
554 
555 	/* NULL callback */
556 	ut_spdk_blob_opts_init(&opts);
557 	opts.xattrs.names = g_xattr_names;
558 	opts.xattrs.get_value = NULL;
559 	opts.xattrs.count = 1;
560 	opts.xattrs.ctx = &g_ctx;
561 
562 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
563 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
564 	poll_threads();
565 	CU_ASSERT(g_bserrno == -EINVAL);
566 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
567 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
568 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
569 
570 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
571 	poll_threads();
572 	CU_ASSERT(g_bserrno == -ENOENT);
573 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
574 
575 	ut_bs_reload(&bs, NULL);
576 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
577 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
578 
579 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
580 	poll_threads();
581 	CU_ASSERT(g_blob == NULL);
582 	CU_ASSERT(g_bserrno == -ENOENT);
583 }
584 
585 static void
586 blob_create_internal(void)
587 {
588 	struct spdk_blob_store *bs = g_bs;
589 	struct spdk_blob *blob;
590 	struct spdk_blob_opts opts;
591 	struct spdk_blob_xattr_opts internal_xattrs;
592 	const void *value;
593 	size_t value_len;
594 	spdk_blob_id blobid;
595 	int rc;
596 
597 	/* Create blob with custom xattrs */
598 
599 	ut_spdk_blob_opts_init(&opts);
600 	blob_xattrs_init(&internal_xattrs);
601 	internal_xattrs.count = 3;
602 	internal_xattrs.names = g_xattr_names;
603 	internal_xattrs.get_value = _get_xattr_value;
604 	internal_xattrs.ctx = &g_ctx;
605 
606 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
607 	poll_threads();
608 	CU_ASSERT(g_bserrno == 0);
609 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
610 	blobid = g_blobid;
611 
612 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
613 	poll_threads();
614 	CU_ASSERT(g_bserrno == 0);
615 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
616 	blob = g_blob;
617 
618 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
619 	CU_ASSERT(rc == 0);
620 	SPDK_CU_ASSERT_FATAL(value != NULL);
621 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
622 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
623 
624 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
625 	CU_ASSERT(rc == 0);
626 	SPDK_CU_ASSERT_FATAL(value != NULL);
627 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
628 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
629 
630 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
631 	CU_ASSERT(rc == 0);
632 	SPDK_CU_ASSERT_FATAL(value != NULL);
633 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
634 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
635 
636 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
637 	CU_ASSERT(rc != 0);
638 
639 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
640 	CU_ASSERT(rc != 0);
641 
642 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
643 	CU_ASSERT(rc != 0);
644 
645 	spdk_blob_close(blob, blob_op_complete, NULL);
646 	poll_threads();
647 	CU_ASSERT(g_bserrno == 0);
648 
649 	/* Create blob with NULL internal options  */
650 
651 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
652 	poll_threads();
653 	CU_ASSERT(g_bserrno == 0);
654 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
655 	blobid = g_blobid;
656 
657 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
658 	poll_threads();
659 	CU_ASSERT(g_bserrno == 0);
660 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
661 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
662 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
663 
664 	blob = g_blob;
665 
666 	spdk_blob_close(blob, blob_op_complete, NULL);
667 	poll_threads();
668 	CU_ASSERT(g_bserrno == 0);
669 }
670 
671 static void
672 blob_thin_provision(void)
673 {
674 	struct spdk_blob_store *bs;
675 	struct spdk_bs_dev *dev;
676 	struct spdk_blob *blob;
677 	struct spdk_blob_opts opts;
678 	struct spdk_bs_opts bs_opts;
679 	spdk_blob_id blobid;
680 
681 	dev = init_dev();
682 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
683 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
684 
685 	/* Initialize a new blob store */
686 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
687 	poll_threads();
688 	CU_ASSERT(g_bserrno == 0);
689 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
690 
691 	bs = g_bs;
692 
693 	/* Create blob with thin provisioning enabled */
694 
695 	ut_spdk_blob_opts_init(&opts);
696 	opts.thin_provision = true;
697 	opts.num_clusters = 10;
698 
699 	blob = ut_blob_create_and_open(bs, &opts);
700 	blobid = spdk_blob_get_id(blob);
701 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
702 	/* In thin provisioning with num_clusters is set, if not using the
703 	 * extent table, there is no allocation. If extent table is used,
704 	 * there is related allocation happened. */
705 	if (blob->extent_table_found == true) {
706 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
707 		CU_ASSERT(blob->active.extent_pages != NULL);
708 	} else {
709 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
710 		CU_ASSERT(blob->active.extent_pages == NULL);
711 	}
712 
713 	spdk_blob_close(blob, blob_op_complete, NULL);
714 	CU_ASSERT(g_bserrno == 0);
715 
716 	/* Do not shut down cleanly.  This makes sure that when we load again
717 	 *  and try to recover a valid used_cluster map, that blobstore will
718 	 *  ignore clusters with index 0 since these are unallocated clusters.
719 	 */
720 	ut_bs_dirty_load(&bs, &bs_opts);
721 
722 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
723 	poll_threads();
724 	CU_ASSERT(g_bserrno == 0);
725 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
726 	blob = g_blob;
727 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
728 
729 	ut_blob_close_and_delete(bs, blob);
730 
731 	spdk_bs_unload(bs, bs_op_complete, NULL);
732 	poll_threads();
733 	CU_ASSERT(g_bserrno == 0);
734 	g_bs = NULL;
735 }
736 
737 static void
738 blob_snapshot(void)
739 {
740 	struct spdk_blob_store *bs = g_bs;
741 	struct spdk_blob *blob;
742 	struct spdk_blob *snapshot, *snapshot2;
743 	struct spdk_blob_bs_dev *blob_bs_dev;
744 	struct spdk_blob_opts opts;
745 	struct spdk_blob_xattr_opts xattrs;
746 	spdk_blob_id blobid;
747 	spdk_blob_id snapshotid;
748 	spdk_blob_id snapshotid2;
749 	const void *value;
750 	size_t value_len;
751 	int rc;
752 	spdk_blob_id ids[2];
753 	size_t count;
754 
755 	/* Create blob with 10 clusters */
756 	ut_spdk_blob_opts_init(&opts);
757 	opts.num_clusters = 10;
758 
759 	blob = ut_blob_create_and_open(bs, &opts);
760 	blobid = spdk_blob_get_id(blob);
761 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
762 
763 	/* Create snapshot from blob */
764 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
765 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
766 	poll_threads();
767 	CU_ASSERT(g_bserrno == 0);
768 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
769 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
770 	snapshotid = g_blobid;
771 
772 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
773 	poll_threads();
774 	CU_ASSERT(g_bserrno == 0);
775 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
776 	snapshot = g_blob;
777 	CU_ASSERT(snapshot->data_ro == true);
778 	CU_ASSERT(snapshot->md_ro == true);
779 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
780 
781 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
782 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
783 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
784 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
785 
786 	/* Try to create snapshot from clone with xattrs */
787 	xattrs.names = g_xattr_names;
788 	xattrs.get_value = _get_xattr_value;
789 	xattrs.count = 3;
790 	xattrs.ctx = &g_ctx;
791 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
792 	poll_threads();
793 	CU_ASSERT(g_bserrno == 0);
794 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
795 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
796 	snapshotid2 = g_blobid;
797 
798 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
799 	CU_ASSERT(g_bserrno == 0);
800 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
801 	snapshot2 = g_blob;
802 	CU_ASSERT(snapshot2->data_ro == true);
803 	CU_ASSERT(snapshot2->md_ro == true);
804 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
805 
806 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
807 	CU_ASSERT(snapshot->back_bs_dev == NULL);
808 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
809 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
810 
811 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
812 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
813 
814 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
815 	CU_ASSERT(blob_bs_dev->blob == snapshot);
816 
817 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
818 	CU_ASSERT(rc == 0);
819 	SPDK_CU_ASSERT_FATAL(value != NULL);
820 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
821 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
822 
823 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
824 	CU_ASSERT(rc == 0);
825 	SPDK_CU_ASSERT_FATAL(value != NULL);
826 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
827 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
828 
829 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
830 	CU_ASSERT(rc == 0);
831 	SPDK_CU_ASSERT_FATAL(value != NULL);
832 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
833 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
834 
835 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
836 	count = 2;
837 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
838 	CU_ASSERT(count == 1);
839 	CU_ASSERT(ids[0] == blobid);
840 
841 	count = 2;
842 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
843 	CU_ASSERT(count == 1);
844 	CU_ASSERT(ids[0] == snapshotid2);
845 
846 	/* Try to create snapshot from snapshot */
847 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
848 	poll_threads();
849 	CU_ASSERT(g_bserrno == -EINVAL);
850 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
851 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
852 
853 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
854 	ut_blob_close_and_delete(bs, blob);
855 	count = 2;
856 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
857 	CU_ASSERT(count == 0);
858 
859 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
860 	ut_blob_close_and_delete(bs, snapshot2);
861 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
862 	count = 2;
863 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
864 	CU_ASSERT(count == 0);
865 
866 	ut_blob_close_and_delete(bs, snapshot);
867 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
868 }
869 
870 static void
871 blob_snapshot_freeze_io(void)
872 {
873 	struct spdk_io_channel *channel;
874 	struct spdk_bs_channel *bs_channel;
875 	struct spdk_blob_store *bs = g_bs;
876 	struct spdk_blob *blob;
877 	struct spdk_blob_opts opts;
878 	spdk_blob_id blobid;
879 	uint32_t num_of_pages = 10;
880 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
881 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
882 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
883 
884 	memset(payload_write, 0xE5, sizeof(payload_write));
885 	memset(payload_read, 0x00, sizeof(payload_read));
886 	memset(payload_zero, 0x00, sizeof(payload_zero));
887 
888 	/* Test freeze I/O during snapshot */
889 	channel = spdk_bs_alloc_io_channel(bs);
890 	bs_channel = spdk_io_channel_get_ctx(channel);
891 
892 	/* Create blob with 10 clusters */
893 	ut_spdk_blob_opts_init(&opts);
894 	opts.num_clusters = 10;
895 	opts.thin_provision = false;
896 
897 	blob = ut_blob_create_and_open(bs, &opts);
898 	blobid = spdk_blob_get_id(blob);
899 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
900 
901 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
902 
903 	/* This is implementation specific.
904 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
905 	 * Four async I/O operations happen before that. */
906 	poll_thread_times(0, 5);
907 
908 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
909 
910 	/* Blob I/O should be frozen here */
911 	CU_ASSERT(blob->frozen_refcnt == 1);
912 
913 	/* Write to the blob */
914 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
915 
916 	/* Verify that I/O is queued */
917 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
918 	/* Verify that payload is not written to disk, at this point the blobs already switched */
919 	CU_ASSERT(blob->active.clusters[0] == 0);
920 
921 	/* Finish all operations including spdk_bs_create_snapshot */
922 	poll_threads();
923 
924 	/* Verify snapshot */
925 	CU_ASSERT(g_bserrno == 0);
926 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
927 
928 	/* Verify that blob has unset frozen_io */
929 	CU_ASSERT(blob->frozen_refcnt == 0);
930 
931 	/* Verify that postponed I/O completed successfully by comparing payload */
932 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
933 	poll_threads();
934 	CU_ASSERT(g_bserrno == 0);
935 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
936 
937 	spdk_bs_free_io_channel(channel);
938 	poll_threads();
939 
940 	ut_blob_close_and_delete(bs, blob);
941 }
942 
943 static void
944 blob_clone(void)
945 {
946 	struct spdk_blob_store *bs = g_bs;
947 	struct spdk_blob_opts opts;
948 	struct spdk_blob *blob, *snapshot, *clone;
949 	spdk_blob_id blobid, cloneid, snapshotid;
950 	struct spdk_blob_xattr_opts xattrs;
951 	const void *value;
952 	size_t value_len;
953 	int rc;
954 
955 	/* Create blob with 10 clusters */
956 
957 	ut_spdk_blob_opts_init(&opts);
958 	opts.num_clusters = 10;
959 
960 	blob = ut_blob_create_and_open(bs, &opts);
961 	blobid = spdk_blob_get_id(blob);
962 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
963 
964 	/* Create snapshot */
965 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
966 	poll_threads();
967 	CU_ASSERT(g_bserrno == 0);
968 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
969 	snapshotid = g_blobid;
970 
971 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
972 	poll_threads();
973 	CU_ASSERT(g_bserrno == 0);
974 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
975 	snapshot = g_blob;
976 	CU_ASSERT(snapshot->data_ro == true);
977 	CU_ASSERT(snapshot->md_ro == true);
978 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
979 
980 	spdk_blob_close(snapshot, blob_op_complete, NULL);
981 	poll_threads();
982 	CU_ASSERT(g_bserrno == 0);
983 
984 	/* Create clone from snapshot with xattrs */
985 	xattrs.names = g_xattr_names;
986 	xattrs.get_value = _get_xattr_value;
987 	xattrs.count = 3;
988 	xattrs.ctx = &g_ctx;
989 
990 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
991 	poll_threads();
992 	CU_ASSERT(g_bserrno == 0);
993 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
994 	cloneid = g_blobid;
995 
996 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
997 	poll_threads();
998 	CU_ASSERT(g_bserrno == 0);
999 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1000 	clone = g_blob;
1001 	CU_ASSERT(clone->data_ro == false);
1002 	CU_ASSERT(clone->md_ro == false);
1003 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1004 
1005 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
1006 	CU_ASSERT(rc == 0);
1007 	SPDK_CU_ASSERT_FATAL(value != NULL);
1008 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
1009 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
1010 
1011 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
1012 	CU_ASSERT(rc == 0);
1013 	SPDK_CU_ASSERT_FATAL(value != NULL);
1014 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1015 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1016 
1017 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1018 	CU_ASSERT(rc == 0);
1019 	SPDK_CU_ASSERT_FATAL(value != NULL);
1020 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1021 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1022 
1023 
1024 	spdk_blob_close(clone, blob_op_complete, NULL);
1025 	poll_threads();
1026 	CU_ASSERT(g_bserrno == 0);
1027 
1028 	/* Try to create clone from not read only blob */
1029 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1030 	poll_threads();
1031 	CU_ASSERT(g_bserrno == -EINVAL);
1032 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1033 
1034 	/* Mark blob as read only */
1035 	spdk_blob_set_read_only(blob);
1036 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1037 	poll_threads();
1038 	CU_ASSERT(g_bserrno == 0);
1039 
1040 	/* Create clone from read only blob */
1041 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1042 	poll_threads();
1043 	CU_ASSERT(g_bserrno == 0);
1044 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1045 	cloneid = g_blobid;
1046 
1047 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1048 	poll_threads();
1049 	CU_ASSERT(g_bserrno == 0);
1050 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1051 	clone = g_blob;
1052 	CU_ASSERT(clone->data_ro == false);
1053 	CU_ASSERT(clone->md_ro == false);
1054 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1055 
1056 	ut_blob_close_and_delete(bs, clone);
1057 	ut_blob_close_and_delete(bs, blob);
1058 }
1059 
1060 static void
1061 _blob_inflate(bool decouple_parent)
1062 {
1063 	struct spdk_blob_store *bs = g_bs;
1064 	struct spdk_blob_opts opts;
1065 	struct spdk_blob *blob, *snapshot;
1066 	spdk_blob_id blobid, snapshotid;
1067 	struct spdk_io_channel *channel;
1068 	uint64_t free_clusters;
1069 
1070 	channel = spdk_bs_alloc_io_channel(bs);
1071 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1072 
1073 	/* Create blob with 10 clusters */
1074 
1075 	ut_spdk_blob_opts_init(&opts);
1076 	opts.num_clusters = 10;
1077 	opts.thin_provision = true;
1078 
1079 	blob = ut_blob_create_and_open(bs, &opts);
1080 	blobid = spdk_blob_get_id(blob);
1081 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1082 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1083 
1084 	/* 1) Blob with no parent */
1085 	if (decouple_parent) {
1086 		/* Decouple parent of blob with no parent (should fail) */
1087 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1088 		poll_threads();
1089 		CU_ASSERT(g_bserrno != 0);
1090 	} else {
1091 		/* Inflate of thin blob with no parent should made it thick */
1092 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1093 		poll_threads();
1094 		CU_ASSERT(g_bserrno == 0);
1095 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1096 	}
1097 
1098 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1099 	poll_threads();
1100 	CU_ASSERT(g_bserrno == 0);
1101 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1102 	snapshotid = g_blobid;
1103 
1104 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1105 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1106 
1107 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1108 	poll_threads();
1109 	CU_ASSERT(g_bserrno == 0);
1110 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1111 	snapshot = g_blob;
1112 	CU_ASSERT(snapshot->data_ro == true);
1113 	CU_ASSERT(snapshot->md_ro == true);
1114 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1115 
1116 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1117 	poll_threads();
1118 	CU_ASSERT(g_bserrno == 0);
1119 
1120 	free_clusters = spdk_bs_free_cluster_count(bs);
1121 
1122 	/* 2) Blob with parent */
1123 	if (!decouple_parent) {
1124 		/* Do full blob inflation */
1125 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1126 		poll_threads();
1127 		CU_ASSERT(g_bserrno == 0);
1128 		/* all 10 clusters should be allocated */
1129 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1130 	} else {
1131 		/* Decouple parent of blob */
1132 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1133 		poll_threads();
1134 		CU_ASSERT(g_bserrno == 0);
1135 		/* when only parent is removed, none of the clusters should be allocated */
1136 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1137 	}
1138 
1139 	/* Now, it should be possible to delete snapshot */
1140 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1141 	poll_threads();
1142 	CU_ASSERT(g_bserrno == 0);
1143 
1144 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1145 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1146 
1147 	spdk_bs_free_io_channel(channel);
1148 	poll_threads();
1149 
1150 	ut_blob_close_and_delete(bs, blob);
1151 }
1152 
1153 static void
1154 blob_inflate(void)
1155 {
1156 	_blob_inflate(false);
1157 	_blob_inflate(true);
1158 }
1159 
1160 static void
1161 blob_delete(void)
1162 {
1163 	struct spdk_blob_store *bs = g_bs;
1164 	struct spdk_blob_opts blob_opts;
1165 	spdk_blob_id blobid;
1166 
1167 	/* Create a blob and then delete it. */
1168 	ut_spdk_blob_opts_init(&blob_opts);
1169 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1170 	poll_threads();
1171 	CU_ASSERT(g_bserrno == 0);
1172 	CU_ASSERT(g_blobid > 0);
1173 	blobid = g_blobid;
1174 
1175 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1176 	poll_threads();
1177 	CU_ASSERT(g_bserrno == 0);
1178 
1179 	/* Try to open the blob */
1180 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1181 	poll_threads();
1182 	CU_ASSERT(g_bserrno == -ENOENT);
1183 }
1184 
1185 static void
1186 blob_resize_test(void)
1187 {
1188 	struct spdk_blob_store *bs = g_bs;
1189 	struct spdk_blob *blob;
1190 	uint64_t free_clusters;
1191 
1192 	free_clusters = spdk_bs_free_cluster_count(bs);
1193 
1194 	blob = ut_blob_create_and_open(bs, NULL);
1195 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1196 
1197 	/* Confirm that resize fails if blob is marked read-only. */
1198 	blob->md_ro = true;
1199 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1200 	poll_threads();
1201 	CU_ASSERT(g_bserrno == -EPERM);
1202 	blob->md_ro = false;
1203 
1204 	/* The blob started at 0 clusters. Resize it to be 5. */
1205 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1206 	poll_threads();
1207 	CU_ASSERT(g_bserrno == 0);
1208 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1209 
1210 	/* Shrink the blob to 3 clusters. This will not actually release
1211 	 * the old clusters until the blob is synced.
1212 	 */
1213 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1214 	poll_threads();
1215 	CU_ASSERT(g_bserrno == 0);
1216 	/* Verify there are still 5 clusters in use */
1217 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1218 
1219 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1220 	poll_threads();
1221 	CU_ASSERT(g_bserrno == 0);
1222 	/* Now there are only 3 clusters in use */
1223 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1224 
1225 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1226 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1227 	poll_threads();
1228 	CU_ASSERT(g_bserrno == 0);
1229 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1230 
1231 	/* Try to resize the blob to size larger than blobstore. */
1232 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1233 	poll_threads();
1234 	CU_ASSERT(g_bserrno == -ENOSPC);
1235 
1236 	ut_blob_close_and_delete(bs, blob);
1237 }
1238 
1239 static void
1240 blob_read_only(void)
1241 {
1242 	struct spdk_blob_store *bs;
1243 	struct spdk_bs_dev *dev;
1244 	struct spdk_blob *blob;
1245 	struct spdk_bs_opts opts;
1246 	spdk_blob_id blobid;
1247 	int rc;
1248 
1249 	dev = init_dev();
1250 	spdk_bs_opts_init(&opts, sizeof(opts));
1251 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1252 
1253 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1254 	poll_threads();
1255 	CU_ASSERT(g_bserrno == 0);
1256 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1257 	bs = g_bs;
1258 
1259 	blob = ut_blob_create_and_open(bs, NULL);
1260 	blobid = spdk_blob_get_id(blob);
1261 
1262 	rc = spdk_blob_set_read_only(blob);
1263 	CU_ASSERT(rc == 0);
1264 
1265 	CU_ASSERT(blob->data_ro == false);
1266 	CU_ASSERT(blob->md_ro == false);
1267 
1268 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1269 	poll_threads();
1270 
1271 	CU_ASSERT(blob->data_ro == true);
1272 	CU_ASSERT(blob->md_ro == true);
1273 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1274 
1275 	spdk_blob_close(blob, blob_op_complete, NULL);
1276 	poll_threads();
1277 	CU_ASSERT(g_bserrno == 0);
1278 
1279 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1280 	poll_threads();
1281 	CU_ASSERT(g_bserrno == 0);
1282 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1283 	blob = g_blob;
1284 
1285 	CU_ASSERT(blob->data_ro == true);
1286 	CU_ASSERT(blob->md_ro == true);
1287 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1288 
1289 	spdk_blob_close(blob, blob_op_complete, NULL);
1290 	poll_threads();
1291 	CU_ASSERT(g_bserrno == 0);
1292 
1293 	ut_bs_reload(&bs, &opts);
1294 
1295 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1296 	poll_threads();
1297 	CU_ASSERT(g_bserrno == 0);
1298 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1299 	blob = g_blob;
1300 
1301 	CU_ASSERT(blob->data_ro == true);
1302 	CU_ASSERT(blob->md_ro == true);
1303 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1304 
1305 	ut_blob_close_and_delete(bs, blob);
1306 
1307 	spdk_bs_unload(bs, bs_op_complete, NULL);
1308 	poll_threads();
1309 	CU_ASSERT(g_bserrno == 0);
1310 }
1311 
1312 static void
1313 channel_ops(void)
1314 {
1315 	struct spdk_blob_store *bs = g_bs;
1316 	struct spdk_io_channel *channel;
1317 
1318 	channel = spdk_bs_alloc_io_channel(bs);
1319 	CU_ASSERT(channel != NULL);
1320 
1321 	spdk_bs_free_io_channel(channel);
1322 	poll_threads();
1323 }
1324 
1325 static void
1326 blob_write(void)
1327 {
1328 	struct spdk_blob_store *bs = g_bs;
1329 	struct spdk_blob *blob = g_blob;
1330 	struct spdk_io_channel *channel;
1331 	uint64_t pages_per_cluster;
1332 	uint8_t payload[10 * 4096];
1333 
1334 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1335 
1336 	channel = spdk_bs_alloc_io_channel(bs);
1337 	CU_ASSERT(channel != NULL);
1338 
1339 	/* Write to a blob with 0 size */
1340 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1341 	poll_threads();
1342 	CU_ASSERT(g_bserrno == -EINVAL);
1343 
1344 	/* Resize the blob */
1345 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1346 	poll_threads();
1347 	CU_ASSERT(g_bserrno == 0);
1348 
1349 	/* Confirm that write fails if blob is marked read-only. */
1350 	blob->data_ro = true;
1351 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1352 	poll_threads();
1353 	CU_ASSERT(g_bserrno == -EPERM);
1354 	blob->data_ro = false;
1355 
1356 	/* Write to the blob */
1357 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1358 	poll_threads();
1359 	CU_ASSERT(g_bserrno == 0);
1360 
1361 	/* Write starting beyond the end */
1362 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1363 			   NULL);
1364 	poll_threads();
1365 	CU_ASSERT(g_bserrno == -EINVAL);
1366 
1367 	/* Write starting at a valid location but going off the end */
1368 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1369 			   blob_op_complete, NULL);
1370 	poll_threads();
1371 	CU_ASSERT(g_bserrno == -EINVAL);
1372 
1373 	spdk_bs_free_io_channel(channel);
1374 	poll_threads();
1375 }
1376 
1377 static void
1378 blob_read(void)
1379 {
1380 	struct spdk_blob_store *bs = g_bs;
1381 	struct spdk_blob *blob = g_blob;
1382 	struct spdk_io_channel *channel;
1383 	uint64_t pages_per_cluster;
1384 	uint8_t payload[10 * 4096];
1385 
1386 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1387 
1388 	channel = spdk_bs_alloc_io_channel(bs);
1389 	CU_ASSERT(channel != NULL);
1390 
1391 	/* Read from a blob with 0 size */
1392 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1393 	poll_threads();
1394 	CU_ASSERT(g_bserrno == -EINVAL);
1395 
1396 	/* Resize the blob */
1397 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1398 	poll_threads();
1399 	CU_ASSERT(g_bserrno == 0);
1400 
1401 	/* Confirm that read passes if blob is marked read-only. */
1402 	blob->data_ro = true;
1403 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1404 	poll_threads();
1405 	CU_ASSERT(g_bserrno == 0);
1406 	blob->data_ro = false;
1407 
1408 	/* Read from the blob */
1409 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1410 	poll_threads();
1411 	CU_ASSERT(g_bserrno == 0);
1412 
1413 	/* Read starting beyond the end */
1414 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1415 			  NULL);
1416 	poll_threads();
1417 	CU_ASSERT(g_bserrno == -EINVAL);
1418 
1419 	/* Read starting at a valid location but going off the end */
1420 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1421 			  blob_op_complete, NULL);
1422 	poll_threads();
1423 	CU_ASSERT(g_bserrno == -EINVAL);
1424 
1425 	spdk_bs_free_io_channel(channel);
1426 	poll_threads();
1427 }
1428 
1429 static void
1430 blob_rw_verify(void)
1431 {
1432 	struct spdk_blob_store *bs = g_bs;
1433 	struct spdk_blob *blob = g_blob;
1434 	struct spdk_io_channel *channel;
1435 	uint8_t payload_read[10 * 4096];
1436 	uint8_t payload_write[10 * 4096];
1437 
1438 	channel = spdk_bs_alloc_io_channel(bs);
1439 	CU_ASSERT(channel != NULL);
1440 
1441 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1442 	poll_threads();
1443 	CU_ASSERT(g_bserrno == 0);
1444 
1445 	memset(payload_write, 0xE5, sizeof(payload_write));
1446 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1447 	poll_threads();
1448 	CU_ASSERT(g_bserrno == 0);
1449 
1450 	memset(payload_read, 0x00, sizeof(payload_read));
1451 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1452 	poll_threads();
1453 	CU_ASSERT(g_bserrno == 0);
1454 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1455 
1456 	spdk_bs_free_io_channel(channel);
1457 	poll_threads();
1458 }
1459 
1460 static void
1461 blob_rw_verify_iov(void)
1462 {
1463 	struct spdk_blob_store *bs = g_bs;
1464 	struct spdk_blob *blob;
1465 	struct spdk_io_channel *channel;
1466 	uint8_t payload_read[10 * 4096];
1467 	uint8_t payload_write[10 * 4096];
1468 	struct iovec iov_read[3];
1469 	struct iovec iov_write[3];
1470 	void *buf;
1471 
1472 	channel = spdk_bs_alloc_io_channel(bs);
1473 	CU_ASSERT(channel != NULL);
1474 
1475 	blob = ut_blob_create_and_open(bs, NULL);
1476 
1477 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1478 	poll_threads();
1479 	CU_ASSERT(g_bserrno == 0);
1480 
1481 	/*
1482 	 * Manually adjust the offset of the blob's second cluster.  This allows
1483 	 *  us to make sure that the readv/write code correctly accounts for I/O
1484 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1485 	 *  clusters are where we expect before modifying the second cluster.
1486 	 */
1487 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1488 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1489 	blob->active.clusters[1] = 3 * 256;
1490 
1491 	memset(payload_write, 0xE5, sizeof(payload_write));
1492 	iov_write[0].iov_base = payload_write;
1493 	iov_write[0].iov_len = 1 * 4096;
1494 	iov_write[1].iov_base = payload_write + 1 * 4096;
1495 	iov_write[1].iov_len = 5 * 4096;
1496 	iov_write[2].iov_base = payload_write + 6 * 4096;
1497 	iov_write[2].iov_len = 4 * 4096;
1498 	/*
1499 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1500 	 *  will get written to the first cluster, the last 4 to the second cluster.
1501 	 */
1502 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1503 	poll_threads();
1504 	CU_ASSERT(g_bserrno == 0);
1505 
1506 	memset(payload_read, 0xAA, sizeof(payload_read));
1507 	iov_read[0].iov_base = payload_read;
1508 	iov_read[0].iov_len = 3 * 4096;
1509 	iov_read[1].iov_base = payload_read + 3 * 4096;
1510 	iov_read[1].iov_len = 4 * 4096;
1511 	iov_read[2].iov_base = payload_read + 7 * 4096;
1512 	iov_read[2].iov_len = 3 * 4096;
1513 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1514 	poll_threads();
1515 	CU_ASSERT(g_bserrno == 0);
1516 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1517 
1518 	buf = calloc(1, 256 * 4096);
1519 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1520 	/* Check that cluster 2 on "disk" was not modified. */
1521 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1522 	free(buf);
1523 
1524 	spdk_blob_close(blob, blob_op_complete, NULL);
1525 	poll_threads();
1526 	CU_ASSERT(g_bserrno == 0);
1527 
1528 	spdk_bs_free_io_channel(channel);
1529 	poll_threads();
1530 }
1531 
1532 static uint32_t
1533 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1534 {
1535 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1536 	struct spdk_bs_request_set *set;
1537 	uint32_t count = 0;
1538 
1539 	TAILQ_FOREACH(set, &channel->reqs, link) {
1540 		count++;
1541 	}
1542 
1543 	return count;
1544 }
1545 
1546 static void
1547 blob_rw_verify_iov_nomem(void)
1548 {
1549 	struct spdk_blob_store *bs = g_bs;
1550 	struct spdk_blob *blob = g_blob;
1551 	struct spdk_io_channel *channel;
1552 	uint8_t payload_write[10 * 4096];
1553 	struct iovec iov_write[3];
1554 	uint32_t req_count;
1555 
1556 	channel = spdk_bs_alloc_io_channel(bs);
1557 	CU_ASSERT(channel != NULL);
1558 
1559 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1560 	poll_threads();
1561 	CU_ASSERT(g_bserrno == 0);
1562 
1563 	/*
1564 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1565 	 *  will get written to the first cluster, the last 4 to the second cluster.
1566 	 */
1567 	iov_write[0].iov_base = payload_write;
1568 	iov_write[0].iov_len = 1 * 4096;
1569 	iov_write[1].iov_base = payload_write + 1 * 4096;
1570 	iov_write[1].iov_len = 5 * 4096;
1571 	iov_write[2].iov_base = payload_write + 6 * 4096;
1572 	iov_write[2].iov_len = 4 * 4096;
1573 	MOCK_SET(calloc, NULL);
1574 	req_count = bs_channel_get_req_count(channel);
1575 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1576 	poll_threads();
1577 	CU_ASSERT(g_bserrno = -ENOMEM);
1578 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1579 	MOCK_CLEAR(calloc);
1580 
1581 	spdk_bs_free_io_channel(channel);
1582 	poll_threads();
1583 }
1584 
1585 static void
1586 blob_rw_iov_read_only(void)
1587 {
1588 	struct spdk_blob_store *bs = g_bs;
1589 	struct spdk_blob *blob = g_blob;
1590 	struct spdk_io_channel *channel;
1591 	uint8_t payload_read[4096];
1592 	uint8_t payload_write[4096];
1593 	struct iovec iov_read;
1594 	struct iovec iov_write;
1595 
1596 	channel = spdk_bs_alloc_io_channel(bs);
1597 	CU_ASSERT(channel != NULL);
1598 
1599 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1600 	poll_threads();
1601 	CU_ASSERT(g_bserrno == 0);
1602 
1603 	/* Verify that writev failed if read_only flag is set. */
1604 	blob->data_ro = true;
1605 	iov_write.iov_base = payload_write;
1606 	iov_write.iov_len = sizeof(payload_write);
1607 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1608 	poll_threads();
1609 	CU_ASSERT(g_bserrno == -EPERM);
1610 
1611 	/* Verify that reads pass if data_ro flag is set. */
1612 	iov_read.iov_base = payload_read;
1613 	iov_read.iov_len = sizeof(payload_read);
1614 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1615 	poll_threads();
1616 	CU_ASSERT(g_bserrno == 0);
1617 
1618 	spdk_bs_free_io_channel(channel);
1619 	poll_threads();
1620 }
1621 
1622 static void
1623 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1624 		       uint8_t *payload, uint64_t offset, uint64_t length,
1625 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1626 {
1627 	uint64_t i;
1628 	uint8_t *buf;
1629 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1630 
1631 	/* To be sure that operation is NOT split, read one page at the time */
1632 	buf = payload;
1633 	for (i = 0; i < length; i++) {
1634 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1635 		poll_threads();
1636 		if (g_bserrno != 0) {
1637 			/* Pass the error code up */
1638 			break;
1639 		}
1640 		buf += page_size;
1641 	}
1642 
1643 	cb_fn(cb_arg, g_bserrno);
1644 }
1645 
1646 static void
1647 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1648 			uint8_t *payload, uint64_t offset, uint64_t length,
1649 			spdk_blob_op_complete cb_fn, void *cb_arg)
1650 {
1651 	uint64_t i;
1652 	uint8_t *buf;
1653 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1654 
1655 	/* To be sure that operation is NOT split, write one page at the time */
1656 	buf = payload;
1657 	for (i = 0; i < length; i++) {
1658 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1659 		poll_threads();
1660 		if (g_bserrno != 0) {
1661 			/* Pass the error code up */
1662 			break;
1663 		}
1664 		buf += page_size;
1665 	}
1666 
1667 	cb_fn(cb_arg, g_bserrno);
1668 }
1669 
1670 static void
1671 blob_operation_split_rw(void)
1672 {
1673 	struct spdk_blob_store *bs = g_bs;
1674 	struct spdk_blob *blob;
1675 	struct spdk_io_channel *channel;
1676 	struct spdk_blob_opts opts;
1677 	uint64_t cluster_size;
1678 
1679 	uint64_t payload_size;
1680 	uint8_t *payload_read;
1681 	uint8_t *payload_write;
1682 	uint8_t *payload_pattern;
1683 
1684 	uint64_t page_size;
1685 	uint64_t pages_per_cluster;
1686 	uint64_t pages_per_payload;
1687 
1688 	uint64_t i;
1689 
1690 	cluster_size = spdk_bs_get_cluster_size(bs);
1691 	page_size = spdk_bs_get_page_size(bs);
1692 	pages_per_cluster = cluster_size / page_size;
1693 	pages_per_payload = pages_per_cluster * 5;
1694 	payload_size = cluster_size * 5;
1695 
1696 	payload_read = malloc(payload_size);
1697 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1698 
1699 	payload_write = malloc(payload_size);
1700 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1701 
1702 	payload_pattern = malloc(payload_size);
1703 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1704 
1705 	/* Prepare random pattern to write */
1706 	memset(payload_pattern, 0xFF, payload_size);
1707 	for (i = 0; i < pages_per_payload; i++) {
1708 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1709 	}
1710 
1711 	channel = spdk_bs_alloc_io_channel(bs);
1712 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1713 
1714 	/* Create blob */
1715 	ut_spdk_blob_opts_init(&opts);
1716 	opts.thin_provision = false;
1717 	opts.num_clusters = 5;
1718 
1719 	blob = ut_blob_create_and_open(bs, &opts);
1720 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1721 
1722 	/* Initial read should return zeroed payload */
1723 	memset(payload_read, 0xFF, payload_size);
1724 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1725 	poll_threads();
1726 	CU_ASSERT(g_bserrno == 0);
1727 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1728 
1729 	/* Fill whole blob except last page */
1730 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1731 			   blob_op_complete, NULL);
1732 	poll_threads();
1733 	CU_ASSERT(g_bserrno == 0);
1734 
1735 	/* Write last page with a pattern */
1736 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1737 			   blob_op_complete, NULL);
1738 	poll_threads();
1739 	CU_ASSERT(g_bserrno == 0);
1740 
1741 	/* Read whole blob and check consistency */
1742 	memset(payload_read, 0xFF, payload_size);
1743 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1744 	poll_threads();
1745 	CU_ASSERT(g_bserrno == 0);
1746 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1747 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1748 
1749 	/* Fill whole blob except first page */
1750 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1751 			   blob_op_complete, NULL);
1752 	poll_threads();
1753 	CU_ASSERT(g_bserrno == 0);
1754 
1755 	/* Write first page with a pattern */
1756 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1757 			   blob_op_complete, NULL);
1758 	poll_threads();
1759 	CU_ASSERT(g_bserrno == 0);
1760 
1761 	/* Read whole blob and check consistency */
1762 	memset(payload_read, 0xFF, payload_size);
1763 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1764 	poll_threads();
1765 	CU_ASSERT(g_bserrno == 0);
1766 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1767 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1768 
1769 
1770 	/* Fill whole blob with a pattern (5 clusters) */
1771 
1772 	/* 1. Read test. */
1773 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1774 				blob_op_complete, NULL);
1775 	poll_threads();
1776 	CU_ASSERT(g_bserrno == 0);
1777 
1778 	memset(payload_read, 0xFF, payload_size);
1779 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1780 	poll_threads();
1781 	poll_threads();
1782 	CU_ASSERT(g_bserrno == 0);
1783 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1784 
1785 	/* 2. Write test. */
1786 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1787 			   blob_op_complete, NULL);
1788 	poll_threads();
1789 	CU_ASSERT(g_bserrno == 0);
1790 
1791 	memset(payload_read, 0xFF, payload_size);
1792 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1793 	poll_threads();
1794 	CU_ASSERT(g_bserrno == 0);
1795 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1796 
1797 	spdk_bs_free_io_channel(channel);
1798 	poll_threads();
1799 
1800 	g_blob = NULL;
1801 	g_blobid = 0;
1802 
1803 	free(payload_read);
1804 	free(payload_write);
1805 	free(payload_pattern);
1806 
1807 	ut_blob_close_and_delete(bs, blob);
1808 }
1809 
1810 static void
1811 blob_operation_split_rw_iov(void)
1812 {
1813 	struct spdk_blob_store *bs = g_bs;
1814 	struct spdk_blob *blob;
1815 	struct spdk_io_channel *channel;
1816 	struct spdk_blob_opts opts;
1817 	uint64_t cluster_size;
1818 
1819 	uint64_t payload_size;
1820 	uint8_t *payload_read;
1821 	uint8_t *payload_write;
1822 	uint8_t *payload_pattern;
1823 
1824 	uint64_t page_size;
1825 	uint64_t pages_per_cluster;
1826 	uint64_t pages_per_payload;
1827 
1828 	struct iovec iov_read[2];
1829 	struct iovec iov_write[2];
1830 
1831 	uint64_t i, j;
1832 
1833 	cluster_size = spdk_bs_get_cluster_size(bs);
1834 	page_size = spdk_bs_get_page_size(bs);
1835 	pages_per_cluster = cluster_size / page_size;
1836 	pages_per_payload = pages_per_cluster * 5;
1837 	payload_size = cluster_size * 5;
1838 
1839 	payload_read = malloc(payload_size);
1840 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1841 
1842 	payload_write = malloc(payload_size);
1843 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1844 
1845 	payload_pattern = malloc(payload_size);
1846 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1847 
1848 	/* Prepare random pattern to write */
1849 	for (i = 0; i < pages_per_payload; i++) {
1850 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1851 			uint64_t *tmp;
1852 
1853 			tmp = (uint64_t *)payload_pattern;
1854 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1855 			*tmp = i + 1;
1856 		}
1857 	}
1858 
1859 	channel = spdk_bs_alloc_io_channel(bs);
1860 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1861 
1862 	/* Create blob */
1863 	ut_spdk_blob_opts_init(&opts);
1864 	opts.thin_provision = false;
1865 	opts.num_clusters = 5;
1866 
1867 	blob = ut_blob_create_and_open(bs, &opts);
1868 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1869 
1870 	/* Initial read should return zeroes payload */
1871 	memset(payload_read, 0xFF, payload_size);
1872 	iov_read[0].iov_base = payload_read;
1873 	iov_read[0].iov_len = cluster_size * 3;
1874 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1875 	iov_read[1].iov_len = cluster_size * 2;
1876 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1877 	poll_threads();
1878 	CU_ASSERT(g_bserrno == 0);
1879 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1880 
1881 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1882 	 *  with a pattern. */
1883 	iov_write[0].iov_base = payload_pattern;
1884 	iov_write[0].iov_len = payload_size - page_size;
1885 	iov_write[1].iov_base = payload_pattern;
1886 	iov_write[1].iov_len = page_size;
1887 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1888 	poll_threads();
1889 	CU_ASSERT(g_bserrno == 0);
1890 
1891 	/* Read whole blob and check consistency */
1892 	memset(payload_read, 0xFF, payload_size);
1893 	iov_read[0].iov_base = payload_read;
1894 	iov_read[0].iov_len = cluster_size * 2;
1895 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1896 	iov_read[1].iov_len = cluster_size * 3;
1897 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1898 	poll_threads();
1899 	CU_ASSERT(g_bserrno == 0);
1900 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1901 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1902 
1903 	/* First of iovs fills only first page and second of iovs writes whole blob except
1904 	 *  first page with a pattern. */
1905 	iov_write[0].iov_base = payload_pattern;
1906 	iov_write[0].iov_len = page_size;
1907 	iov_write[1].iov_base = payload_pattern;
1908 	iov_write[1].iov_len = payload_size - page_size;
1909 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1910 	poll_threads();
1911 	CU_ASSERT(g_bserrno == 0);
1912 
1913 	/* Read whole blob and check consistency */
1914 	memset(payload_read, 0xFF, payload_size);
1915 	iov_read[0].iov_base = payload_read;
1916 	iov_read[0].iov_len = cluster_size * 4;
1917 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1918 	iov_read[1].iov_len = cluster_size;
1919 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1920 	poll_threads();
1921 	CU_ASSERT(g_bserrno == 0);
1922 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1923 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1924 
1925 
1926 	/* Fill whole blob with a pattern (5 clusters) */
1927 
1928 	/* 1. Read test. */
1929 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1930 				blob_op_complete, NULL);
1931 	poll_threads();
1932 	CU_ASSERT(g_bserrno == 0);
1933 
1934 	memset(payload_read, 0xFF, payload_size);
1935 	iov_read[0].iov_base = payload_read;
1936 	iov_read[0].iov_len = cluster_size;
1937 	iov_read[1].iov_base = payload_read + cluster_size;
1938 	iov_read[1].iov_len = cluster_size * 4;
1939 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1940 	poll_threads();
1941 	CU_ASSERT(g_bserrno == 0);
1942 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1943 
1944 	/* 2. Write test. */
1945 	iov_write[0].iov_base = payload_read;
1946 	iov_write[0].iov_len = cluster_size * 2;
1947 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1948 	iov_write[1].iov_len = cluster_size * 3;
1949 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1950 	poll_threads();
1951 	CU_ASSERT(g_bserrno == 0);
1952 
1953 	memset(payload_read, 0xFF, payload_size);
1954 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1955 	poll_threads();
1956 	CU_ASSERT(g_bserrno == 0);
1957 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1958 
1959 	spdk_bs_free_io_channel(channel);
1960 	poll_threads();
1961 
1962 	g_blob = NULL;
1963 	g_blobid = 0;
1964 
1965 	free(payload_read);
1966 	free(payload_write);
1967 	free(payload_pattern);
1968 
1969 	ut_blob_close_and_delete(bs, blob);
1970 }
1971 
1972 static void
1973 blob_unmap(void)
1974 {
1975 	struct spdk_blob_store *bs = g_bs;
1976 	struct spdk_blob *blob;
1977 	struct spdk_io_channel *channel;
1978 	struct spdk_blob_opts opts;
1979 	uint8_t payload[4096];
1980 	int i;
1981 
1982 	channel = spdk_bs_alloc_io_channel(bs);
1983 	CU_ASSERT(channel != NULL);
1984 
1985 	ut_spdk_blob_opts_init(&opts);
1986 	opts.num_clusters = 10;
1987 
1988 	blob = ut_blob_create_and_open(bs, &opts);
1989 
1990 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1991 	poll_threads();
1992 	CU_ASSERT(g_bserrno == 0);
1993 
1994 	memset(payload, 0, sizeof(payload));
1995 	payload[0] = 0xFF;
1996 
1997 	/*
1998 	 * Set first byte of every cluster to 0xFF.
1999 	 * First cluster on device is reserved so let's start from cluster number 1
2000 	 */
2001 	for (i = 1; i < 11; i++) {
2002 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
2003 	}
2004 
2005 	/* Confirm writes */
2006 	for (i = 0; i < 10; i++) {
2007 		payload[0] = 0;
2008 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
2009 				  blob_op_complete, NULL);
2010 		poll_threads();
2011 		CU_ASSERT(g_bserrno == 0);
2012 		CU_ASSERT(payload[0] == 0xFF);
2013 	}
2014 
2015 	/* Mark some clusters as unallocated */
2016 	blob->active.clusters[1] = 0;
2017 	blob->active.clusters[2] = 0;
2018 	blob->active.clusters[3] = 0;
2019 	blob->active.clusters[6] = 0;
2020 	blob->active.clusters[8] = 0;
2021 
2022 	/* Unmap clusters by resizing to 0 */
2023 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2024 	poll_threads();
2025 	CU_ASSERT(g_bserrno == 0);
2026 
2027 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2028 	poll_threads();
2029 	CU_ASSERT(g_bserrno == 0);
2030 
2031 	/* Confirm that only 'allocated' clusters were unmapped */
2032 	for (i = 1; i < 11; i++) {
2033 		switch (i) {
2034 		case 2:
2035 		case 3:
2036 		case 4:
2037 		case 7:
2038 		case 9:
2039 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2040 			break;
2041 		default:
2042 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2043 			break;
2044 		}
2045 	}
2046 
2047 	spdk_bs_free_io_channel(channel);
2048 	poll_threads();
2049 
2050 	ut_blob_close_and_delete(bs, blob);
2051 }
2052 
2053 static void
2054 blob_iter(void)
2055 {
2056 	struct spdk_blob_store *bs = g_bs;
2057 	struct spdk_blob *blob;
2058 	spdk_blob_id blobid;
2059 	struct spdk_blob_opts blob_opts;
2060 
2061 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2062 	poll_threads();
2063 	CU_ASSERT(g_blob == NULL);
2064 	CU_ASSERT(g_bserrno == -ENOENT);
2065 
2066 	ut_spdk_blob_opts_init(&blob_opts);
2067 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2068 	poll_threads();
2069 	CU_ASSERT(g_bserrno == 0);
2070 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2071 	blobid = g_blobid;
2072 
2073 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2074 	poll_threads();
2075 	CU_ASSERT(g_blob != NULL);
2076 	CU_ASSERT(g_bserrno == 0);
2077 	blob = g_blob;
2078 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2079 
2080 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2081 	poll_threads();
2082 	CU_ASSERT(g_blob == NULL);
2083 	CU_ASSERT(g_bserrno == -ENOENT);
2084 }
2085 
2086 static void
2087 blob_xattr(void)
2088 {
2089 	struct spdk_blob_store *bs = g_bs;
2090 	struct spdk_blob *blob = g_blob;
2091 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2092 	uint64_t length;
2093 	int rc;
2094 	const char *name1, *name2;
2095 	const void *value;
2096 	size_t value_len;
2097 	struct spdk_xattr_names *names;
2098 
2099 	/* Test that set_xattr fails if md_ro flag is set. */
2100 	blob->md_ro = true;
2101 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2102 	CU_ASSERT(rc == -EPERM);
2103 
2104 	blob->md_ro = false;
2105 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2106 	CU_ASSERT(rc == 0);
2107 
2108 	length = 2345;
2109 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2110 	CU_ASSERT(rc == 0);
2111 
2112 	/* Overwrite "length" xattr. */
2113 	length = 3456;
2114 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2115 	CU_ASSERT(rc == 0);
2116 
2117 	/* get_xattr should still work even if md_ro flag is set. */
2118 	value = NULL;
2119 	blob->md_ro = true;
2120 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2121 	CU_ASSERT(rc == 0);
2122 	SPDK_CU_ASSERT_FATAL(value != NULL);
2123 	CU_ASSERT(*(uint64_t *)value == length);
2124 	CU_ASSERT(value_len == 8);
2125 	blob->md_ro = false;
2126 
2127 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2128 	CU_ASSERT(rc == -ENOENT);
2129 
2130 	names = NULL;
2131 	rc = spdk_blob_get_xattr_names(blob, &names);
2132 	CU_ASSERT(rc == 0);
2133 	SPDK_CU_ASSERT_FATAL(names != NULL);
2134 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2135 	name1 = spdk_xattr_names_get_name(names, 0);
2136 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2137 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2138 	name2 = spdk_xattr_names_get_name(names, 1);
2139 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2140 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2141 	CU_ASSERT(strcmp(name1, name2));
2142 	spdk_xattr_names_free(names);
2143 
2144 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2145 	blob->md_ro = true;
2146 	rc = spdk_blob_remove_xattr(blob, "name");
2147 	CU_ASSERT(rc == -EPERM);
2148 
2149 	blob->md_ro = false;
2150 	rc = spdk_blob_remove_xattr(blob, "name");
2151 	CU_ASSERT(rc == 0);
2152 
2153 	rc = spdk_blob_remove_xattr(blob, "foobar");
2154 	CU_ASSERT(rc == -ENOENT);
2155 
2156 	/* Set internal xattr */
2157 	length = 7898;
2158 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2159 	CU_ASSERT(rc == 0);
2160 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2161 	CU_ASSERT(rc == 0);
2162 	CU_ASSERT(*(uint64_t *)value == length);
2163 	/* try to get public xattr with same name */
2164 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2165 	CU_ASSERT(rc != 0);
2166 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2167 	CU_ASSERT(rc != 0);
2168 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2169 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2170 		  SPDK_BLOB_INTERNAL_XATTR);
2171 
2172 	spdk_blob_close(blob, blob_op_complete, NULL);
2173 	poll_threads();
2174 
2175 	/* Check if xattrs are persisted */
2176 	ut_bs_reload(&bs, NULL);
2177 
2178 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2179 	poll_threads();
2180 	CU_ASSERT(g_bserrno == 0);
2181 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2182 	blob = g_blob;
2183 
2184 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2185 	CU_ASSERT(rc == 0);
2186 	CU_ASSERT(*(uint64_t *)value == length);
2187 
2188 	/* try to get internal xattr trough public call */
2189 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2190 	CU_ASSERT(rc != 0);
2191 
2192 	rc = blob_remove_xattr(blob, "internal", true);
2193 	CU_ASSERT(rc == 0);
2194 
2195 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2196 }
2197 
2198 static void
2199 blob_parse_md(void)
2200 {
2201 	struct spdk_blob_store *bs = g_bs;
2202 	struct spdk_blob *blob;
2203 	int rc;
2204 	uint32_t used_pages;
2205 	size_t xattr_length;
2206 	char *xattr;
2207 
2208 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2209 	blob = ut_blob_create_and_open(bs, NULL);
2210 
2211 	/* Create large extent to force more than 1 page of metadata. */
2212 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2213 		       strlen("large_xattr");
2214 	xattr = calloc(xattr_length, sizeof(char));
2215 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2216 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2217 	free(xattr);
2218 	SPDK_CU_ASSERT_FATAL(rc == 0);
2219 
2220 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2221 	poll_threads();
2222 
2223 	/* Delete the blob and verify that number of pages returned to before its creation. */
2224 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2225 	ut_blob_close_and_delete(bs, blob);
2226 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2227 }
2228 
2229 static void
2230 bs_load(void)
2231 {
2232 	struct spdk_blob_store *bs;
2233 	struct spdk_bs_dev *dev;
2234 	spdk_blob_id blobid;
2235 	struct spdk_blob *blob;
2236 	struct spdk_bs_super_block *super_block;
2237 	uint64_t length;
2238 	int rc;
2239 	const void *value;
2240 	size_t value_len;
2241 	struct spdk_bs_opts opts;
2242 	struct spdk_blob_opts blob_opts;
2243 
2244 	dev = init_dev();
2245 	spdk_bs_opts_init(&opts, sizeof(opts));
2246 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2247 
2248 	/* Initialize a new blob store */
2249 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2250 	poll_threads();
2251 	CU_ASSERT(g_bserrno == 0);
2252 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2253 	bs = g_bs;
2254 
2255 	/* Try to open a blobid that does not exist */
2256 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2257 	poll_threads();
2258 	CU_ASSERT(g_bserrno == -ENOENT);
2259 	CU_ASSERT(g_blob == NULL);
2260 
2261 	/* Create a blob */
2262 	blob = ut_blob_create_and_open(bs, NULL);
2263 	blobid = spdk_blob_get_id(blob);
2264 
2265 	/* Try again to open valid blob but without the upper bit set */
2266 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2267 	poll_threads();
2268 	CU_ASSERT(g_bserrno == -ENOENT);
2269 	CU_ASSERT(g_blob == NULL);
2270 
2271 	/* Set some xattrs */
2272 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2273 	CU_ASSERT(rc == 0);
2274 
2275 	length = 2345;
2276 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2277 	CU_ASSERT(rc == 0);
2278 
2279 	/* Resize the blob */
2280 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2281 	poll_threads();
2282 	CU_ASSERT(g_bserrno == 0);
2283 
2284 	spdk_blob_close(blob, blob_op_complete, NULL);
2285 	poll_threads();
2286 	CU_ASSERT(g_bserrno == 0);
2287 	blob = NULL;
2288 	g_blob = NULL;
2289 	g_blobid = SPDK_BLOBID_INVALID;
2290 
2291 	/* Unload the blob store */
2292 	spdk_bs_unload(bs, bs_op_complete, NULL);
2293 	poll_threads();
2294 	CU_ASSERT(g_bserrno == 0);
2295 	g_bs = NULL;
2296 	g_blob = NULL;
2297 	g_blobid = 0;
2298 
2299 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2300 	CU_ASSERT(super_block->clean == 1);
2301 
2302 	/* Load should fail for device with an unsupported blocklen */
2303 	dev = init_dev();
2304 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2305 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2306 	poll_threads();
2307 	CU_ASSERT(g_bserrno == -EINVAL);
2308 
2309 	/* Load should when max_md_ops is set to zero */
2310 	dev = init_dev();
2311 	spdk_bs_opts_init(&opts, sizeof(opts));
2312 	opts.max_md_ops = 0;
2313 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2314 	poll_threads();
2315 	CU_ASSERT(g_bserrno == -EINVAL);
2316 
2317 	/* Load should when max_channel_ops is set to zero */
2318 	dev = init_dev();
2319 	spdk_bs_opts_init(&opts, sizeof(opts));
2320 	opts.max_channel_ops = 0;
2321 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2322 	poll_threads();
2323 	CU_ASSERT(g_bserrno == -EINVAL);
2324 
2325 	/* Load an existing blob store */
2326 	dev = init_dev();
2327 	spdk_bs_opts_init(&opts, sizeof(opts));
2328 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2329 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2330 	poll_threads();
2331 	CU_ASSERT(g_bserrno == 0);
2332 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2333 	bs = g_bs;
2334 
2335 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2336 	CU_ASSERT(super_block->clean == 1);
2337 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2338 
2339 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2340 	poll_threads();
2341 	CU_ASSERT(g_bserrno == 0);
2342 	CU_ASSERT(g_blob != NULL);
2343 	blob = g_blob;
2344 
2345 	/* Verify that blobstore is marked dirty after first metadata sync */
2346 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2347 	CU_ASSERT(super_block->clean == 1);
2348 
2349 	/* Get the xattrs */
2350 	value = NULL;
2351 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2352 	CU_ASSERT(rc == 0);
2353 	SPDK_CU_ASSERT_FATAL(value != NULL);
2354 	CU_ASSERT(*(uint64_t *)value == length);
2355 	CU_ASSERT(value_len == 8);
2356 
2357 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2358 	CU_ASSERT(rc == -ENOENT);
2359 
2360 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2361 
2362 	spdk_blob_close(blob, blob_op_complete, NULL);
2363 	poll_threads();
2364 	CU_ASSERT(g_bserrno == 0);
2365 	blob = NULL;
2366 	g_blob = NULL;
2367 
2368 	spdk_bs_unload(bs, bs_op_complete, NULL);
2369 	poll_threads();
2370 	CU_ASSERT(g_bserrno == 0);
2371 	g_bs = NULL;
2372 
2373 	/* Load should fail: bdev size < saved size */
2374 	dev = init_dev();
2375 	dev->blockcnt /= 2;
2376 
2377 	spdk_bs_opts_init(&opts, sizeof(opts));
2378 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2379 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2380 	poll_threads();
2381 
2382 	CU_ASSERT(g_bserrno == -EILSEQ);
2383 
2384 	/* Load should succeed: bdev size > saved size */
2385 	dev = init_dev();
2386 	dev->blockcnt *= 4;
2387 
2388 	spdk_bs_opts_init(&opts, sizeof(opts));
2389 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2390 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2391 	poll_threads();
2392 	CU_ASSERT(g_bserrno == 0);
2393 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2394 	bs = g_bs;
2395 
2396 	CU_ASSERT(g_bserrno == 0);
2397 	spdk_bs_unload(bs, bs_op_complete, NULL);
2398 	poll_threads();
2399 
2400 
2401 	/* Test compatibility mode */
2402 
2403 	dev = init_dev();
2404 	super_block->size = 0;
2405 	super_block->crc = blob_md_page_calc_crc(super_block);
2406 
2407 	spdk_bs_opts_init(&opts, sizeof(opts));
2408 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2409 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2410 	poll_threads();
2411 	CU_ASSERT(g_bserrno == 0);
2412 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2413 	bs = g_bs;
2414 
2415 	/* Create a blob */
2416 	ut_spdk_blob_opts_init(&blob_opts);
2417 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2418 	poll_threads();
2419 	CU_ASSERT(g_bserrno == 0);
2420 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2421 
2422 	/* Blobstore should update number of blocks in super_block */
2423 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2424 	CU_ASSERT(super_block->clean == 0);
2425 
2426 	spdk_bs_unload(bs, bs_op_complete, NULL);
2427 	poll_threads();
2428 	CU_ASSERT(g_bserrno == 0);
2429 	CU_ASSERT(super_block->clean == 1);
2430 	g_bs = NULL;
2431 
2432 }
2433 
2434 static void
2435 bs_load_pending_removal(void)
2436 {
2437 	struct spdk_blob_store *bs = g_bs;
2438 	struct spdk_blob_opts opts;
2439 	struct spdk_blob *blob, *snapshot;
2440 	spdk_blob_id blobid, snapshotid;
2441 	const void *value;
2442 	size_t value_len;
2443 	int rc;
2444 
2445 	/* Create blob */
2446 	ut_spdk_blob_opts_init(&opts);
2447 	opts.num_clusters = 10;
2448 
2449 	blob = ut_blob_create_and_open(bs, &opts);
2450 	blobid = spdk_blob_get_id(blob);
2451 
2452 	/* Create snapshot */
2453 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2454 	poll_threads();
2455 	CU_ASSERT(g_bserrno == 0);
2456 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2457 	snapshotid = g_blobid;
2458 
2459 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2460 	poll_threads();
2461 	CU_ASSERT(g_bserrno == 0);
2462 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2463 	snapshot = g_blob;
2464 
2465 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2466 	snapshot->md_ro = false;
2467 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2468 	CU_ASSERT(rc == 0);
2469 	snapshot->md_ro = true;
2470 
2471 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2472 	poll_threads();
2473 	CU_ASSERT(g_bserrno == 0);
2474 
2475 	spdk_blob_close(blob, blob_op_complete, NULL);
2476 	poll_threads();
2477 	CU_ASSERT(g_bserrno == 0);
2478 
2479 	/* Reload blobstore */
2480 	ut_bs_reload(&bs, NULL);
2481 
2482 	/* Snapshot should not be removed as blob is still pointing to it */
2483 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2484 	poll_threads();
2485 	CU_ASSERT(g_bserrno == 0);
2486 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2487 	snapshot = g_blob;
2488 
2489 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2490 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2491 	CU_ASSERT(rc != 0);
2492 
2493 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2494 	snapshot->md_ro = false;
2495 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2496 	CU_ASSERT(rc == 0);
2497 	snapshot->md_ro = true;
2498 
2499 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2500 	poll_threads();
2501 	CU_ASSERT(g_bserrno == 0);
2502 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2503 	blob = g_blob;
2504 
2505 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2506 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2507 
2508 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2509 	poll_threads();
2510 	CU_ASSERT(g_bserrno == 0);
2511 
2512 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2513 	poll_threads();
2514 	CU_ASSERT(g_bserrno == 0);
2515 
2516 	spdk_blob_close(blob, blob_op_complete, NULL);
2517 	poll_threads();
2518 	CU_ASSERT(g_bserrno == 0);
2519 
2520 	/* Reload blobstore */
2521 	ut_bs_reload(&bs, NULL);
2522 
2523 	/* Snapshot should be removed as blob is not pointing to it anymore */
2524 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2525 	poll_threads();
2526 	CU_ASSERT(g_bserrno != 0);
2527 }
2528 
2529 static void
2530 bs_load_custom_cluster_size(void)
2531 {
2532 	struct spdk_blob_store *bs;
2533 	struct spdk_bs_dev *dev;
2534 	struct spdk_bs_super_block *super_block;
2535 	struct spdk_bs_opts opts;
2536 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2537 	uint32_t cluster_sz;
2538 	uint64_t total_clusters;
2539 
2540 	dev = init_dev();
2541 	spdk_bs_opts_init(&opts, sizeof(opts));
2542 	opts.cluster_sz = custom_cluster_size;
2543 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2544 
2545 	/* Initialize a new blob store */
2546 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2547 	poll_threads();
2548 	CU_ASSERT(g_bserrno == 0);
2549 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2550 	bs = g_bs;
2551 	cluster_sz = bs->cluster_sz;
2552 	total_clusters = bs->total_clusters;
2553 
2554 	/* Unload the blob store */
2555 	spdk_bs_unload(bs, bs_op_complete, NULL);
2556 	poll_threads();
2557 	CU_ASSERT(g_bserrno == 0);
2558 	g_bs = NULL;
2559 	g_blob = NULL;
2560 	g_blobid = 0;
2561 
2562 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2563 	CU_ASSERT(super_block->clean == 1);
2564 
2565 	/* Load an existing blob store */
2566 	dev = init_dev();
2567 	spdk_bs_opts_init(&opts, sizeof(opts));
2568 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2569 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2570 	poll_threads();
2571 	CU_ASSERT(g_bserrno == 0);
2572 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2573 	bs = g_bs;
2574 	/* Compare cluster size and number to one after initialization */
2575 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2576 	CU_ASSERT(total_clusters == bs->total_clusters);
2577 
2578 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2579 	CU_ASSERT(super_block->clean == 1);
2580 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2581 
2582 	spdk_bs_unload(bs, bs_op_complete, NULL);
2583 	poll_threads();
2584 	CU_ASSERT(g_bserrno == 0);
2585 	CU_ASSERT(super_block->clean == 1);
2586 	g_bs = NULL;
2587 }
2588 
2589 static void
2590 bs_load_after_failed_grow(void)
2591 {
2592 	struct spdk_blob_store *bs;
2593 	struct spdk_bs_dev *dev;
2594 	struct spdk_bs_super_block *super_block;
2595 	struct spdk_bs_opts opts;
2596 	struct spdk_bs_md_mask *mask;
2597 	struct spdk_blob_opts blob_opts;
2598 	struct spdk_blob *blob, *snapshot;
2599 	spdk_blob_id blobid, snapshotid;
2600 	uint64_t total_data_clusters;
2601 
2602 	dev = init_dev();
2603 	spdk_bs_opts_init(&opts, sizeof(opts));
2604 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2605 	/*
2606 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2607 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2608 	 * thus the blobstore could grow to the double size.
2609 	 */
2610 	opts.num_md_pages = 128;
2611 
2612 	/* Initialize a new blob store */
2613 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2614 	poll_threads();
2615 	CU_ASSERT(g_bserrno == 0);
2616 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2617 	bs = g_bs;
2618 
2619 	/* Create blob */
2620 	ut_spdk_blob_opts_init(&blob_opts);
2621 	blob_opts.num_clusters = 10;
2622 
2623 	blob = ut_blob_create_and_open(bs, &blob_opts);
2624 	blobid = spdk_blob_get_id(blob);
2625 
2626 	/* Create snapshot */
2627 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2628 	poll_threads();
2629 	CU_ASSERT(g_bserrno == 0);
2630 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2631 	snapshotid = g_blobid;
2632 
2633 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2634 	poll_threads();
2635 	CU_ASSERT(g_bserrno == 0);
2636 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2637 	snapshot = g_blob;
2638 
2639 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2640 	poll_threads();
2641 	CU_ASSERT(g_bserrno == 0);
2642 
2643 	spdk_blob_close(blob, blob_op_complete, NULL);
2644 	poll_threads();
2645 	CU_ASSERT(g_bserrno == 0);
2646 
2647 	total_data_clusters = bs->total_data_clusters;
2648 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2649 
2650 	/* Unload the blob store */
2651 	spdk_bs_unload(bs, bs_op_complete, NULL);
2652 	poll_threads();
2653 	CU_ASSERT(g_bserrno == 0);
2654 	g_bs = NULL;
2655 	g_blob = NULL;
2656 	g_blobid = 0;
2657 
2658 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2659 	CU_ASSERT(super_block->clean == 1);
2660 
2661 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start * 4096);
2662 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2663 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2664 
2665 	/*
2666 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2667 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2668 	 */
2669 	mask->length *= 2;
2670 
2671 	/* Load an existing blob store */
2672 	dev = init_dev();
2673 	dev->blockcnt *= 2;
2674 	spdk_bs_opts_init(&opts, sizeof(opts));
2675 	opts.clear_method = BS_CLEAR_WITH_NONE;
2676 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2677 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2678 	poll_threads();
2679 	CU_ASSERT(g_bserrno == 0);
2680 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2681 	bs = g_bs;
2682 
2683 	/* Check the capacity is the same as before */
2684 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2685 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2686 
2687 	/* Check the blob and the snapshot are still available */
2688 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2689 	poll_threads();
2690 	CU_ASSERT(g_bserrno == 0);
2691 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2692 	blob = g_blob;
2693 
2694 	spdk_blob_close(blob, blob_op_complete, NULL);
2695 	poll_threads();
2696 	CU_ASSERT(g_bserrno == 0);
2697 
2698 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2699 	poll_threads();
2700 	CU_ASSERT(g_bserrno == 0);
2701 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2702 	snapshot = g_blob;
2703 
2704 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2705 	poll_threads();
2706 	CU_ASSERT(g_bserrno == 0);
2707 
2708 	spdk_bs_unload(bs, bs_op_complete, NULL);
2709 	poll_threads();
2710 	CU_ASSERT(g_bserrno == 0);
2711 	CU_ASSERT(super_block->clean == 1);
2712 	g_bs = NULL;
2713 }
2714 
2715 static void
2716 bs_type(void)
2717 {
2718 	struct spdk_blob_store *bs;
2719 	struct spdk_bs_dev *dev;
2720 	struct spdk_bs_opts opts;
2721 
2722 	dev = init_dev();
2723 	spdk_bs_opts_init(&opts, sizeof(opts));
2724 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2725 
2726 	/* Initialize a new blob store */
2727 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2728 	poll_threads();
2729 	CU_ASSERT(g_bserrno == 0);
2730 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2731 	bs = g_bs;
2732 
2733 	/* Unload the blob store */
2734 	spdk_bs_unload(bs, bs_op_complete, NULL);
2735 	poll_threads();
2736 	CU_ASSERT(g_bserrno == 0);
2737 	g_bs = NULL;
2738 	g_blob = NULL;
2739 	g_blobid = 0;
2740 
2741 	/* Load non existing blobstore type */
2742 	dev = init_dev();
2743 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2744 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2745 	poll_threads();
2746 	CU_ASSERT(g_bserrno != 0);
2747 
2748 	/* Load with empty blobstore type */
2749 	dev = init_dev();
2750 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2751 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2752 	poll_threads();
2753 	CU_ASSERT(g_bserrno == 0);
2754 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2755 	bs = g_bs;
2756 
2757 	spdk_bs_unload(bs, bs_op_complete, NULL);
2758 	poll_threads();
2759 	CU_ASSERT(g_bserrno == 0);
2760 	g_bs = NULL;
2761 
2762 	/* Initialize a new blob store with empty bstype */
2763 	dev = init_dev();
2764 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2765 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2766 	poll_threads();
2767 	CU_ASSERT(g_bserrno == 0);
2768 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2769 	bs = g_bs;
2770 
2771 	spdk_bs_unload(bs, bs_op_complete, NULL);
2772 	poll_threads();
2773 	CU_ASSERT(g_bserrno == 0);
2774 	g_bs = NULL;
2775 
2776 	/* Load non existing blobstore type */
2777 	dev = init_dev();
2778 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2779 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2780 	poll_threads();
2781 	CU_ASSERT(g_bserrno != 0);
2782 
2783 	/* Load with empty blobstore type */
2784 	dev = init_dev();
2785 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2786 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2787 	poll_threads();
2788 	CU_ASSERT(g_bserrno == 0);
2789 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2790 	bs = g_bs;
2791 
2792 	spdk_bs_unload(bs, bs_op_complete, NULL);
2793 	poll_threads();
2794 	CU_ASSERT(g_bserrno == 0);
2795 	g_bs = NULL;
2796 }
2797 
2798 static void
2799 bs_super_block(void)
2800 {
2801 	struct spdk_blob_store *bs;
2802 	struct spdk_bs_dev *dev;
2803 	struct spdk_bs_super_block *super_block;
2804 	struct spdk_bs_opts opts;
2805 	struct spdk_bs_super_block_ver1 super_block_v1;
2806 
2807 	dev = init_dev();
2808 	spdk_bs_opts_init(&opts, sizeof(opts));
2809 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2810 
2811 	/* Initialize a new blob store */
2812 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2813 	poll_threads();
2814 	CU_ASSERT(g_bserrno == 0);
2815 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2816 	bs = g_bs;
2817 
2818 	/* Unload the blob store */
2819 	spdk_bs_unload(bs, bs_op_complete, NULL);
2820 	poll_threads();
2821 	CU_ASSERT(g_bserrno == 0);
2822 	g_bs = NULL;
2823 	g_blob = NULL;
2824 	g_blobid = 0;
2825 
2826 	/* Load an existing blob store with version newer than supported */
2827 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2828 	super_block->version++;
2829 
2830 	dev = init_dev();
2831 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2832 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2833 	poll_threads();
2834 	CU_ASSERT(g_bserrno != 0);
2835 
2836 	/* Create a new blob store with super block version 1 */
2837 	dev = init_dev();
2838 	super_block_v1.version = 1;
2839 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2840 	super_block_v1.length = 0x1000;
2841 	super_block_v1.clean = 1;
2842 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2843 	super_block_v1.cluster_size = 0x100000;
2844 	super_block_v1.used_page_mask_start = 0x01;
2845 	super_block_v1.used_page_mask_len = 0x01;
2846 	super_block_v1.used_cluster_mask_start = 0x02;
2847 	super_block_v1.used_cluster_mask_len = 0x01;
2848 	super_block_v1.md_start = 0x03;
2849 	super_block_v1.md_len = 0x40;
2850 	memset(super_block_v1.reserved, 0, 4036);
2851 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2852 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2853 
2854 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2855 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2856 	poll_threads();
2857 	CU_ASSERT(g_bserrno == 0);
2858 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2859 	bs = g_bs;
2860 
2861 	spdk_bs_unload(bs, bs_op_complete, NULL);
2862 	poll_threads();
2863 	CU_ASSERT(g_bserrno == 0);
2864 	g_bs = NULL;
2865 }
2866 
2867 static void
2868 bs_test_recover_cluster_count(void)
2869 {
2870 	struct spdk_blob_store *bs;
2871 	struct spdk_bs_dev *dev;
2872 	struct spdk_bs_super_block super_block;
2873 	struct spdk_bs_opts opts;
2874 
2875 	dev = init_dev();
2876 	spdk_bs_opts_init(&opts, sizeof(opts));
2877 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2878 
2879 	super_block.version = 3;
2880 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2881 	super_block.length = 0x1000;
2882 	super_block.clean = 0;
2883 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2884 	super_block.cluster_size = 4096;
2885 	super_block.used_page_mask_start = 0x01;
2886 	super_block.used_page_mask_len = 0x01;
2887 	super_block.used_cluster_mask_start = 0x02;
2888 	super_block.used_cluster_mask_len = 0x01;
2889 	super_block.used_blobid_mask_start = 0x03;
2890 	super_block.used_blobid_mask_len = 0x01;
2891 	super_block.md_start = 0x04;
2892 	super_block.md_len = 0x40;
2893 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2894 	super_block.size = dev->blockcnt * dev->blocklen;
2895 	super_block.io_unit_size = 0x1000;
2896 	memset(super_block.reserved, 0, 4000);
2897 	super_block.crc = blob_md_page_calc_crc(&super_block);
2898 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2899 
2900 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2901 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2902 	poll_threads();
2903 	CU_ASSERT(g_bserrno == 0);
2904 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2905 	bs = g_bs;
2906 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2907 			super_block.md_len));
2908 
2909 	spdk_bs_unload(bs, bs_op_complete, NULL);
2910 	poll_threads();
2911 	CU_ASSERT(g_bserrno == 0);
2912 	g_bs = NULL;
2913 }
2914 
2915 static void
2916 bs_grow_live_size(uint64_t new_blockcnt)
2917 {
2918 	struct spdk_blob_store *bs;
2919 	struct spdk_bs_dev *dev;
2920 	struct spdk_bs_super_block super_block;
2921 	struct spdk_bs_opts opts;
2922 	struct spdk_bs_md_mask mask;
2923 	uint64_t bdev_size;
2924 	uint64_t total_data_clusters;
2925 
2926 	/*
2927 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
2928 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
2929 	 * will write beyond the end of g_dev_buffer.
2930 	 */
2931 	dev = init_dev();
2932 	spdk_bs_opts_init(&opts, sizeof(opts));
2933 	opts.clear_method = BS_CLEAR_WITH_NONE;
2934 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2935 	poll_threads();
2936 	CU_ASSERT(g_bserrno == 0);
2937 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2938 	bs = g_bs;
2939 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == 63);
2940 
2941 	/*
2942 	 * Set the dev size according to the new_blockcnt,
2943 	 * then the blobstore will adjust the metadata according to the new size.
2944 	 */
2945 	dev->blockcnt = new_blockcnt;
2946 	bdev_size = dev->blockcnt * dev->blocklen;
2947 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
2948 	poll_threads();
2949 	CU_ASSERT(g_bserrno == 0);
2950 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
2951 	/* One cluster of 1MiB size is used for metadata */
2952 	CU_ASSERT(total_data_clusters == (bdev_size / (1 * 1024 * 1024)) - 1);
2953 
2954 	/* Make sure the super block is updated. */
2955 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2956 	CU_ASSERT(super_block.size == bdev_size);
2957 	CU_ASSERT(super_block.clean == 0);
2958 	/* The used_cluster mask is not written out until first spdk_bs_unload. */
2959 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2960 	       sizeof(struct spdk_bs_md_mask));
2961 	CU_ASSERT(mask.type == 0);
2962 	CU_ASSERT(mask.length == 0);
2963 
2964 	spdk_bs_unload(bs, bs_op_complete, NULL);
2965 	poll_threads();
2966 	CU_ASSERT(g_bserrno == 0);
2967 	g_bs = NULL;
2968 
2969 	/* Make sure all metadata is correct, super block and used_cluster mask. */
2970 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2971 	CU_ASSERT(super_block.size == bdev_size);
2972 	CU_ASSERT(super_block.clean == 1);
2973 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2974 	       sizeof(struct spdk_bs_md_mask));
2975 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2976 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2977 
2978 	/* Load blobstore and check the cluster counts again. */
2979 	dev = init_dev();
2980 	dev->blockcnt = new_blockcnt;
2981 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2982 	poll_threads();
2983 	CU_ASSERT(g_bserrno == 0);
2984 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2985 	CU_ASSERT(super_block.clean == 1);
2986 	bs = g_bs;
2987 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
2988 
2989 	/* Perform grow without change in size, expected pass. */
2990 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
2991 	poll_threads();
2992 	CU_ASSERT(g_bserrno == 0);
2993 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
2994 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2995 	CU_ASSERT(super_block.size == bdev_size);
2996 	CU_ASSERT(super_block.clean == 1);
2997 
2998 	spdk_bs_unload(bs, bs_op_complete, NULL);
2999 	poll_threads();
3000 	CU_ASSERT(g_bserrno == 0);
3001 	g_bs = NULL;
3002 }
3003 
3004 static void
3005 bs_grow_live(void)
3006 {
3007 	/* No change expected */
3008 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT);
3009 
3010 	/* Size slightly increased, but not enough to increase cluster count */
3011 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT + 1);
3012 
3013 	/* Size doubled, increasing the cluster count */
3014 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT * 2);
3015 }
3016 
3017 static void
3018 bs_grow_live_no_space(void)
3019 {
3020 	struct spdk_blob_store *bs;
3021 	struct spdk_bs_dev *dev;
3022 	struct spdk_bs_super_block super_block;
3023 	struct spdk_bs_opts opts;
3024 	struct spdk_bs_md_mask mask;
3025 	uint64_t bdev_size_init;
3026 	uint64_t total_data_clusters, max_clusters;
3027 
3028 	/*
3029 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3030 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3031 	 * will write beyond the end of g_dev_buffer.
3032 	 */
3033 	dev = init_dev();
3034 	bdev_size_init = dev->blockcnt * dev->blocklen;
3035 	spdk_bs_opts_init(&opts, sizeof(opts));
3036 	opts.clear_method = BS_CLEAR_WITH_NONE;
3037 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3038 	poll_threads();
3039 	CU_ASSERT(g_bserrno == 0);
3040 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3041 	bs = g_bs;
3042 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3043 	CU_ASSERT(total_data_clusters == 63);
3044 
3045 	/*
3046 	 * The default dev size is 64M, here we set the dev size to 32M,
3047 	 * expecting EILSEQ due to super_block validation and no change in blobstore.
3048 	 */
3049 	dev->blockcnt = (32L * 1024L * 1024L) / dev->blocklen;
3050 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3051 	poll_threads();
3052 	/* This error code comes from bs_super_validate() */
3053 	CU_ASSERT(g_bserrno == -EILSEQ);
3054 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3055 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3056 	CU_ASSERT(super_block.size == bdev_size_init);
3057 
3058 	/*
3059 	 * Blobstore in this test has only space for single md_page for used_clusters,
3060 	 * which fits 1 bit per cluster minus the md header.
3061 	 *
3062 	 * Dev size is increased to exceed the reserved space for the used_cluster_mask
3063 	 * in the metadata, expecting ENOSPC and no change in blobstore.
3064 	 */
3065 	max_clusters = (spdk_bs_get_page_size(bs) - sizeof(struct spdk_bs_md_mask)) * 8;
3066 	max_clusters += 1;
3067 	dev->blockcnt = (max_clusters * spdk_bs_get_cluster_size(bs)) / dev->blocklen;
3068 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3069 	poll_threads();
3070 	CU_ASSERT(g_bserrno == -ENOSPC);
3071 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3072 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3073 	CU_ASSERT(super_block.size == bdev_size_init);
3074 
3075 	/*
3076 	 * No change should have occurred for the duration of the test,
3077 	 * unload blobstore and check metadata.
3078 	 */
3079 	spdk_bs_unload(bs, bs_op_complete, NULL);
3080 	poll_threads();
3081 	CU_ASSERT(g_bserrno == 0);
3082 	g_bs = NULL;
3083 
3084 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3085 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3086 	CU_ASSERT(super_block.size == bdev_size_init);
3087 	CU_ASSERT(super_block.clean == 1);
3088 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
3089 	       sizeof(struct spdk_bs_md_mask));
3090 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3091 	CU_ASSERT(mask.length == bdev_size_init / (1 * 1024 * 1024));
3092 
3093 	/* Load blobstore and check the cluster counts again. */
3094 	dev = init_dev();
3095 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3096 	poll_threads();
3097 	CU_ASSERT(g_bserrno == 0);
3098 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3099 	bs = g_bs;
3100 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3101 
3102 	spdk_bs_unload(bs, bs_op_complete, NULL);
3103 	poll_threads();
3104 	CU_ASSERT(g_bserrno == 0);
3105 	g_bs = NULL;
3106 }
3107 
3108 static void
3109 bs_test_grow(void)
3110 {
3111 	struct spdk_blob_store *bs;
3112 	struct spdk_bs_dev *dev;
3113 	struct spdk_bs_super_block super_block;
3114 	struct spdk_bs_opts opts;
3115 	struct spdk_bs_md_mask mask;
3116 	uint64_t bdev_size;
3117 
3118 	dev = init_dev();
3119 	bdev_size = dev->blockcnt * dev->blocklen;
3120 	spdk_bs_opts_init(&opts, sizeof(opts));
3121 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3122 	poll_threads();
3123 	CU_ASSERT(g_bserrno == 0);
3124 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3125 	bs = g_bs;
3126 
3127 	spdk_bs_unload(bs, bs_op_complete, NULL);
3128 	poll_threads();
3129 	CU_ASSERT(g_bserrno == 0);
3130 	g_bs = NULL;
3131 
3132 	/*
3133 	 * To make sure all the metadata are updated to the disk,
3134 	 * we check the g_dev_buffer after spdk_bs_unload.
3135 	 */
3136 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3137 	CU_ASSERT(super_block.size == bdev_size);
3138 
3139 	/*
3140 	 * Make sure the used_cluster mask is correct.
3141 	 */
3142 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
3143 	       sizeof(struct spdk_bs_md_mask));
3144 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3145 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3146 
3147 	/*
3148 	 * The default dev size is 64M, here we set the dev size to 128M,
3149 	 * then the blobstore will adjust the metadata according to the new size.
3150 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
3151 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
3152 	 * the end of g_dev_buffer.
3153 	 */
3154 	dev = init_dev();
3155 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
3156 	bdev_size = dev->blockcnt * dev->blocklen;
3157 	spdk_bs_opts_init(&opts, sizeof(opts));
3158 	opts.clear_method = BS_CLEAR_WITH_NONE;
3159 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
3160 	poll_threads();
3161 	CU_ASSERT(g_bserrno == 0);
3162 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3163 	bs = g_bs;
3164 
3165 	/*
3166 	 * After spdk_bs_grow, all metadata are updated to the disk.
3167 	 * So we can check g_dev_buffer now.
3168 	 */
3169 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3170 	CU_ASSERT(super_block.size == bdev_size);
3171 
3172 	/*
3173 	 * Make sure the used_cluster mask has been updated according to the bdev size
3174 	 */
3175 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
3176 	       sizeof(struct spdk_bs_md_mask));
3177 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3178 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3179 
3180 	spdk_bs_unload(bs, bs_op_complete, NULL);
3181 	poll_threads();
3182 	CU_ASSERT(g_bserrno == 0);
3183 	g_bs = NULL;
3184 }
3185 
3186 /*
3187  * Create a blobstore and then unload it.
3188  */
3189 static void
3190 bs_unload(void)
3191 {
3192 	struct spdk_blob_store *bs = g_bs;
3193 	struct spdk_blob *blob;
3194 
3195 	/* Create a blob and open it. */
3196 	blob = ut_blob_create_and_open(bs, NULL);
3197 
3198 	/* Try to unload blobstore, should fail with open blob */
3199 	g_bserrno = -1;
3200 	spdk_bs_unload(bs, bs_op_complete, NULL);
3201 	poll_threads();
3202 	CU_ASSERT(g_bserrno == -EBUSY);
3203 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3204 
3205 	/* Close the blob, then successfully unload blobstore */
3206 	g_bserrno = -1;
3207 	spdk_blob_close(blob, blob_op_complete, NULL);
3208 	poll_threads();
3209 	CU_ASSERT(g_bserrno == 0);
3210 }
3211 
3212 /*
3213  * Create a blobstore with a cluster size different than the default, and ensure it is
3214  *  persisted.
3215  */
3216 static void
3217 bs_cluster_sz(void)
3218 {
3219 	struct spdk_blob_store *bs;
3220 	struct spdk_bs_dev *dev;
3221 	struct spdk_bs_opts opts;
3222 	uint32_t cluster_sz;
3223 
3224 	/* Set cluster size to zero */
3225 	dev = init_dev();
3226 	spdk_bs_opts_init(&opts, sizeof(opts));
3227 	opts.cluster_sz = 0;
3228 
3229 	/* Initialize a new blob store */
3230 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3231 	poll_threads();
3232 	CU_ASSERT(g_bserrno == -EINVAL);
3233 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3234 
3235 	/*
3236 	 * Set cluster size to blobstore page size,
3237 	 * to work it is required to be at least twice the blobstore page size.
3238 	 */
3239 	dev = init_dev();
3240 	spdk_bs_opts_init(&opts, sizeof(opts));
3241 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
3242 
3243 	/* Initialize a new blob store */
3244 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3245 	poll_threads();
3246 	CU_ASSERT(g_bserrno == -ENOMEM);
3247 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3248 
3249 	/*
3250 	 * Set cluster size to lower than page size,
3251 	 * to work it is required to be at least twice the blobstore page size.
3252 	 */
3253 	dev = init_dev();
3254 	spdk_bs_opts_init(&opts, sizeof(opts));
3255 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
3256 
3257 	/* Initialize a new blob store */
3258 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3259 	poll_threads();
3260 	CU_ASSERT(g_bserrno == -EINVAL);
3261 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3262 
3263 	/* Set cluster size to twice the default */
3264 	dev = init_dev();
3265 	spdk_bs_opts_init(&opts, sizeof(opts));
3266 	opts.cluster_sz *= 2;
3267 	cluster_sz = opts.cluster_sz;
3268 
3269 	/* Initialize a new blob store */
3270 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3271 	poll_threads();
3272 	CU_ASSERT(g_bserrno == 0);
3273 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3274 	bs = g_bs;
3275 
3276 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3277 
3278 	ut_bs_reload(&bs, &opts);
3279 
3280 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3281 
3282 	spdk_bs_unload(bs, bs_op_complete, NULL);
3283 	poll_threads();
3284 	CU_ASSERT(g_bserrno == 0);
3285 	g_bs = NULL;
3286 }
3287 
3288 /*
3289  * Create a blobstore, reload it and ensure total usable cluster count
3290  *  stays the same.
3291  */
3292 static void
3293 bs_usable_clusters(void)
3294 {
3295 	struct spdk_blob_store *bs = g_bs;
3296 	struct spdk_blob *blob;
3297 	uint32_t clusters;
3298 	int i;
3299 
3300 
3301 	clusters = spdk_bs_total_data_cluster_count(bs);
3302 
3303 	ut_bs_reload(&bs, NULL);
3304 
3305 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3306 
3307 	/* Create and resize blobs to make sure that useable cluster count won't change */
3308 	for (i = 0; i < 4; i++) {
3309 		g_bserrno = -1;
3310 		g_blobid = SPDK_BLOBID_INVALID;
3311 		blob = ut_blob_create_and_open(bs, NULL);
3312 
3313 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3314 		poll_threads();
3315 		CU_ASSERT(g_bserrno == 0);
3316 
3317 		g_bserrno = -1;
3318 		spdk_blob_close(blob, blob_op_complete, NULL);
3319 		poll_threads();
3320 		CU_ASSERT(g_bserrno == 0);
3321 
3322 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3323 	}
3324 
3325 	/* Reload the blob store to make sure that nothing changed */
3326 	ut_bs_reload(&bs, NULL);
3327 
3328 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3329 }
3330 
3331 /*
3332  * Test resizing of the metadata blob.  This requires creating enough blobs
3333  *  so that one cluster is not enough to fit the metadata for those blobs.
3334  *  To induce this condition to happen more quickly, we reduce the cluster
3335  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3336  */
3337 static void
3338 bs_resize_md(void)
3339 {
3340 	struct spdk_blob_store *bs;
3341 	const int CLUSTER_PAGE_COUNT = 4;
3342 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3343 	struct spdk_bs_dev *dev;
3344 	struct spdk_bs_opts opts;
3345 	struct spdk_blob *blob;
3346 	struct spdk_blob_opts blob_opts;
3347 	uint32_t cluster_sz;
3348 	spdk_blob_id blobids[NUM_BLOBS];
3349 	int i;
3350 
3351 
3352 	dev = init_dev();
3353 	spdk_bs_opts_init(&opts, sizeof(opts));
3354 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
3355 	cluster_sz = opts.cluster_sz;
3356 
3357 	/* Initialize a new blob store */
3358 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3359 	poll_threads();
3360 	CU_ASSERT(g_bserrno == 0);
3361 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3362 	bs = g_bs;
3363 
3364 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3365 
3366 	ut_spdk_blob_opts_init(&blob_opts);
3367 
3368 	for (i = 0; i < NUM_BLOBS; i++) {
3369 		g_bserrno = -1;
3370 		g_blobid = SPDK_BLOBID_INVALID;
3371 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3372 		poll_threads();
3373 		CU_ASSERT(g_bserrno == 0);
3374 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3375 		blobids[i] = g_blobid;
3376 	}
3377 
3378 	ut_bs_reload(&bs, &opts);
3379 
3380 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3381 
3382 	for (i = 0; i < NUM_BLOBS; i++) {
3383 		g_bserrno = -1;
3384 		g_blob = NULL;
3385 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3386 		poll_threads();
3387 		CU_ASSERT(g_bserrno == 0);
3388 		CU_ASSERT(g_blob !=  NULL);
3389 		blob = g_blob;
3390 		g_bserrno = -1;
3391 		spdk_blob_close(blob, blob_op_complete, NULL);
3392 		poll_threads();
3393 		CU_ASSERT(g_bserrno == 0);
3394 	}
3395 
3396 	spdk_bs_unload(bs, bs_op_complete, NULL);
3397 	poll_threads();
3398 	CU_ASSERT(g_bserrno == 0);
3399 	g_bs = NULL;
3400 }
3401 
3402 static void
3403 bs_destroy(void)
3404 {
3405 	struct spdk_blob_store *bs;
3406 	struct spdk_bs_dev *dev;
3407 
3408 	/* Initialize a new blob store */
3409 	dev = init_dev();
3410 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3411 	poll_threads();
3412 	CU_ASSERT(g_bserrno == 0);
3413 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3414 	bs = g_bs;
3415 
3416 	/* Destroy the blob store */
3417 	g_bserrno = -1;
3418 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3419 	poll_threads();
3420 	CU_ASSERT(g_bserrno == 0);
3421 
3422 	/* Loading an non-existent blob store should fail. */
3423 	g_bs = NULL;
3424 	dev = init_dev();
3425 
3426 	g_bserrno = 0;
3427 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3428 	poll_threads();
3429 	CU_ASSERT(g_bserrno != 0);
3430 }
3431 
3432 /* Try to hit all of the corner cases associated with serializing
3433  * a blob to disk
3434  */
3435 static void
3436 blob_serialize_test(void)
3437 {
3438 	struct spdk_bs_dev *dev;
3439 	struct spdk_bs_opts opts;
3440 	struct spdk_blob_store *bs;
3441 	spdk_blob_id blobid[2];
3442 	struct spdk_blob *blob[2];
3443 	uint64_t i;
3444 	char *value;
3445 	int rc;
3446 
3447 	dev = init_dev();
3448 
3449 	/* Initialize a new blobstore with very small clusters */
3450 	spdk_bs_opts_init(&opts, sizeof(opts));
3451 	opts.cluster_sz = dev->blocklen * 8;
3452 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3453 	poll_threads();
3454 	CU_ASSERT(g_bserrno == 0);
3455 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3456 	bs = g_bs;
3457 
3458 	/* Create and open two blobs */
3459 	for (i = 0; i < 2; i++) {
3460 		blob[i] = ut_blob_create_and_open(bs, NULL);
3461 		blobid[i] = spdk_blob_get_id(blob[i]);
3462 
3463 		/* Set a fairly large xattr on both blobs to eat up
3464 		 * metadata space
3465 		 */
3466 		value = calloc(dev->blocklen - 64, sizeof(char));
3467 		SPDK_CU_ASSERT_FATAL(value != NULL);
3468 		memset(value, i, dev->blocklen / 2);
3469 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3470 		CU_ASSERT(rc == 0);
3471 		free(value);
3472 	}
3473 
3474 	/* Resize the blobs, alternating 1 cluster at a time.
3475 	 * This thwarts run length encoding and will cause spill
3476 	 * over of the extents.
3477 	 */
3478 	for (i = 0; i < 6; i++) {
3479 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3480 		poll_threads();
3481 		CU_ASSERT(g_bserrno == 0);
3482 	}
3483 
3484 	for (i = 0; i < 2; i++) {
3485 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3486 		poll_threads();
3487 		CU_ASSERT(g_bserrno == 0);
3488 	}
3489 
3490 	/* Close the blobs */
3491 	for (i = 0; i < 2; i++) {
3492 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3493 		poll_threads();
3494 		CU_ASSERT(g_bserrno == 0);
3495 	}
3496 
3497 	ut_bs_reload(&bs, &opts);
3498 
3499 	for (i = 0; i < 2; i++) {
3500 		blob[i] = NULL;
3501 
3502 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3503 		poll_threads();
3504 		CU_ASSERT(g_bserrno == 0);
3505 		CU_ASSERT(g_blob != NULL);
3506 		blob[i] = g_blob;
3507 
3508 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3509 
3510 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3511 		poll_threads();
3512 		CU_ASSERT(g_bserrno == 0);
3513 	}
3514 
3515 	spdk_bs_unload(bs, bs_op_complete, NULL);
3516 	poll_threads();
3517 	CU_ASSERT(g_bserrno == 0);
3518 	g_bs = NULL;
3519 }
3520 
3521 static void
3522 blob_crc(void)
3523 {
3524 	struct spdk_blob_store *bs = g_bs;
3525 	struct spdk_blob *blob;
3526 	spdk_blob_id blobid;
3527 	uint32_t page_num;
3528 	int index;
3529 	struct spdk_blob_md_page *page;
3530 
3531 	blob = ut_blob_create_and_open(bs, NULL);
3532 	blobid = spdk_blob_get_id(blob);
3533 
3534 	spdk_blob_close(blob, blob_op_complete, NULL);
3535 	poll_threads();
3536 	CU_ASSERT(g_bserrno == 0);
3537 
3538 	page_num = bs_blobid_to_page(blobid);
3539 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3540 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3541 	page->crc = 0;
3542 
3543 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3544 	poll_threads();
3545 	CU_ASSERT(g_bserrno == -EINVAL);
3546 	CU_ASSERT(g_blob == NULL);
3547 	g_bserrno = 0;
3548 
3549 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3550 	poll_threads();
3551 	CU_ASSERT(g_bserrno == -EINVAL);
3552 }
3553 
3554 static void
3555 super_block_crc(void)
3556 {
3557 	struct spdk_blob_store *bs;
3558 	struct spdk_bs_dev *dev;
3559 	struct spdk_bs_super_block *super_block;
3560 
3561 	dev = init_dev();
3562 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3563 	poll_threads();
3564 	CU_ASSERT(g_bserrno == 0);
3565 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3566 	bs = g_bs;
3567 
3568 	spdk_bs_unload(bs, bs_op_complete, NULL);
3569 	poll_threads();
3570 	CU_ASSERT(g_bserrno == 0);
3571 	g_bs = NULL;
3572 
3573 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3574 	super_block->crc = 0;
3575 	dev = init_dev();
3576 
3577 	/* Load an existing blob store */
3578 	g_bserrno = 0;
3579 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3580 	poll_threads();
3581 	CU_ASSERT(g_bserrno == -EILSEQ);
3582 }
3583 
3584 /* For blob dirty shutdown test case we do the following sub-test cases:
3585  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3586  *   dirty shutdown and reload the blob store and verify the xattrs.
3587  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3588  *   reload the blob store and verify the clusters number.
3589  * 3 Create the second blob and then dirty shutdown, reload the blob store
3590  *   and verify the second blob.
3591  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3592  *   and verify the second blob is invalid.
3593  * 5 Create the second blob again and also create the third blob, modify the
3594  *   md of second blob which makes the md invalid, and then dirty shutdown,
3595  *   reload the blob store verify the second blob, it should invalid and also
3596  *   verify the third blob, it should correct.
3597  */
3598 static void
3599 blob_dirty_shutdown(void)
3600 {
3601 	int rc;
3602 	int index;
3603 	struct spdk_blob_store *bs = g_bs;
3604 	spdk_blob_id blobid1, blobid2, blobid3;
3605 	struct spdk_blob *blob = g_blob;
3606 	uint64_t length;
3607 	uint64_t free_clusters;
3608 	const void *value;
3609 	size_t value_len;
3610 	uint32_t page_num;
3611 	struct spdk_blob_md_page *page;
3612 	struct spdk_blob_opts blob_opts;
3613 
3614 	/* Create first blob */
3615 	blobid1 = spdk_blob_get_id(blob);
3616 
3617 	/* Set some xattrs */
3618 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3619 	CU_ASSERT(rc == 0);
3620 
3621 	length = 2345;
3622 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3623 	CU_ASSERT(rc == 0);
3624 
3625 	/* Put xattr that fits exactly single page.
3626 	 * This results in adding additional pages to MD.
3627 	 * First is flags and smaller xattr, second the large xattr,
3628 	 * third are just the extents.
3629 	 */
3630 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3631 			      strlen("large_xattr");
3632 	char *xattr = calloc(xattr_length, sizeof(char));
3633 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3634 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3635 	free(xattr);
3636 	SPDK_CU_ASSERT_FATAL(rc == 0);
3637 
3638 	/* Resize the blob */
3639 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3640 	poll_threads();
3641 	CU_ASSERT(g_bserrno == 0);
3642 
3643 	/* Set the blob as the super blob */
3644 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3645 	poll_threads();
3646 	CU_ASSERT(g_bserrno == 0);
3647 
3648 	free_clusters = spdk_bs_free_cluster_count(bs);
3649 
3650 	spdk_blob_close(blob, blob_op_complete, NULL);
3651 	poll_threads();
3652 	CU_ASSERT(g_bserrno == 0);
3653 	blob = NULL;
3654 	g_blob = NULL;
3655 	g_blobid = SPDK_BLOBID_INVALID;
3656 
3657 	ut_bs_dirty_load(&bs, NULL);
3658 
3659 	/* Get the super blob */
3660 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3661 	poll_threads();
3662 	CU_ASSERT(g_bserrno == 0);
3663 	CU_ASSERT(blobid1 == g_blobid);
3664 
3665 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3666 	poll_threads();
3667 	CU_ASSERT(g_bserrno == 0);
3668 	CU_ASSERT(g_blob != NULL);
3669 	blob = g_blob;
3670 
3671 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3672 
3673 	/* Get the xattrs */
3674 	value = NULL;
3675 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3676 	CU_ASSERT(rc == 0);
3677 	SPDK_CU_ASSERT_FATAL(value != NULL);
3678 	CU_ASSERT(*(uint64_t *)value == length);
3679 	CU_ASSERT(value_len == 8);
3680 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3681 
3682 	/* Resize the blob */
3683 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3684 	poll_threads();
3685 	CU_ASSERT(g_bserrno == 0);
3686 
3687 	free_clusters = spdk_bs_free_cluster_count(bs);
3688 
3689 	spdk_blob_close(blob, blob_op_complete, NULL);
3690 	poll_threads();
3691 	CU_ASSERT(g_bserrno == 0);
3692 	blob = NULL;
3693 	g_blob = NULL;
3694 	g_blobid = SPDK_BLOBID_INVALID;
3695 
3696 	ut_bs_dirty_load(&bs, NULL);
3697 
3698 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3699 	poll_threads();
3700 	CU_ASSERT(g_bserrno == 0);
3701 	CU_ASSERT(g_blob != NULL);
3702 	blob = g_blob;
3703 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3704 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3705 
3706 	spdk_blob_close(blob, blob_op_complete, NULL);
3707 	poll_threads();
3708 	CU_ASSERT(g_bserrno == 0);
3709 	blob = NULL;
3710 	g_blob = NULL;
3711 	g_blobid = SPDK_BLOBID_INVALID;
3712 
3713 	/* Create second blob */
3714 	blob = ut_blob_create_and_open(bs, NULL);
3715 	blobid2 = spdk_blob_get_id(blob);
3716 
3717 	/* Set some xattrs */
3718 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3719 	CU_ASSERT(rc == 0);
3720 
3721 	length = 5432;
3722 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3723 	CU_ASSERT(rc == 0);
3724 
3725 	/* Resize the blob */
3726 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3727 	poll_threads();
3728 	CU_ASSERT(g_bserrno == 0);
3729 
3730 	free_clusters = spdk_bs_free_cluster_count(bs);
3731 
3732 	spdk_blob_close(blob, blob_op_complete, NULL);
3733 	poll_threads();
3734 	CU_ASSERT(g_bserrno == 0);
3735 	blob = NULL;
3736 	g_blob = NULL;
3737 	g_blobid = SPDK_BLOBID_INVALID;
3738 
3739 	ut_bs_dirty_load(&bs, NULL);
3740 
3741 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3742 	poll_threads();
3743 	CU_ASSERT(g_bserrno == 0);
3744 	CU_ASSERT(g_blob != NULL);
3745 	blob = g_blob;
3746 
3747 	/* Get the xattrs */
3748 	value = NULL;
3749 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3750 	CU_ASSERT(rc == 0);
3751 	SPDK_CU_ASSERT_FATAL(value != NULL);
3752 	CU_ASSERT(*(uint64_t *)value == length);
3753 	CU_ASSERT(value_len == 8);
3754 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3755 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3756 
3757 	ut_blob_close_and_delete(bs, blob);
3758 
3759 	free_clusters = spdk_bs_free_cluster_count(bs);
3760 
3761 	ut_bs_dirty_load(&bs, NULL);
3762 
3763 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3764 	poll_threads();
3765 	CU_ASSERT(g_bserrno != 0);
3766 	CU_ASSERT(g_blob == NULL);
3767 
3768 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3769 	poll_threads();
3770 	CU_ASSERT(g_bserrno == 0);
3771 	CU_ASSERT(g_blob != NULL);
3772 	blob = g_blob;
3773 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3774 	spdk_blob_close(blob, blob_op_complete, NULL);
3775 	poll_threads();
3776 	CU_ASSERT(g_bserrno == 0);
3777 
3778 	ut_bs_reload(&bs, NULL);
3779 
3780 	/* Create second blob */
3781 	ut_spdk_blob_opts_init(&blob_opts);
3782 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3783 	poll_threads();
3784 	CU_ASSERT(g_bserrno == 0);
3785 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3786 	blobid2 = g_blobid;
3787 
3788 	/* Create third blob */
3789 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3790 	poll_threads();
3791 	CU_ASSERT(g_bserrno == 0);
3792 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3793 	blobid3 = g_blobid;
3794 
3795 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3796 	poll_threads();
3797 	CU_ASSERT(g_bserrno == 0);
3798 	CU_ASSERT(g_blob != NULL);
3799 	blob = g_blob;
3800 
3801 	/* Set some xattrs for second blob */
3802 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3803 	CU_ASSERT(rc == 0);
3804 
3805 	length = 5432;
3806 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3807 	CU_ASSERT(rc == 0);
3808 
3809 	spdk_blob_close(blob, blob_op_complete, NULL);
3810 	poll_threads();
3811 	CU_ASSERT(g_bserrno == 0);
3812 	blob = NULL;
3813 	g_blob = NULL;
3814 	g_blobid = SPDK_BLOBID_INVALID;
3815 
3816 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3817 	poll_threads();
3818 	CU_ASSERT(g_bserrno == 0);
3819 	CU_ASSERT(g_blob != NULL);
3820 	blob = g_blob;
3821 
3822 	/* Set some xattrs for third blob */
3823 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3824 	CU_ASSERT(rc == 0);
3825 
3826 	length = 5432;
3827 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3828 	CU_ASSERT(rc == 0);
3829 
3830 	spdk_blob_close(blob, blob_op_complete, NULL);
3831 	poll_threads();
3832 	CU_ASSERT(g_bserrno == 0);
3833 	blob = NULL;
3834 	g_blob = NULL;
3835 	g_blobid = SPDK_BLOBID_INVALID;
3836 
3837 	/* Mark second blob as invalid */
3838 	page_num = bs_blobid_to_page(blobid2);
3839 
3840 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3841 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3842 	page->sequence_num = 1;
3843 	page->crc = blob_md_page_calc_crc(page);
3844 
3845 	free_clusters = spdk_bs_free_cluster_count(bs);
3846 
3847 	ut_bs_dirty_load(&bs, NULL);
3848 
3849 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3850 	poll_threads();
3851 	CU_ASSERT(g_bserrno != 0);
3852 	CU_ASSERT(g_blob == NULL);
3853 
3854 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3855 	poll_threads();
3856 	CU_ASSERT(g_bserrno == 0);
3857 	CU_ASSERT(g_blob != NULL);
3858 	blob = g_blob;
3859 
3860 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3861 }
3862 
3863 static void
3864 blob_flags(void)
3865 {
3866 	struct spdk_blob_store *bs = g_bs;
3867 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3868 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3869 	struct spdk_blob_opts blob_opts;
3870 	int rc;
3871 
3872 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3873 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3874 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3875 
3876 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3877 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3878 
3879 	ut_spdk_blob_opts_init(&blob_opts);
3880 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3881 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3882 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3883 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3884 
3885 	/* Change the size of blob_data_ro to check if flags are serialized
3886 	 * when blob has non zero number of extents */
3887 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3888 	poll_threads();
3889 	CU_ASSERT(g_bserrno == 0);
3890 
3891 	/* Set the xattr to check if flags are serialized
3892 	 * when blob has non zero number of xattrs */
3893 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3894 	CU_ASSERT(rc == 0);
3895 
3896 	blob_invalid->invalid_flags = (1ULL << 63);
3897 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3898 	blob_data_ro->data_ro_flags = (1ULL << 62);
3899 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3900 	blob_md_ro->md_ro_flags = (1ULL << 61);
3901 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3902 
3903 	g_bserrno = -1;
3904 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3905 	poll_threads();
3906 	CU_ASSERT(g_bserrno == 0);
3907 	g_bserrno = -1;
3908 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3909 	poll_threads();
3910 	CU_ASSERT(g_bserrno == 0);
3911 	g_bserrno = -1;
3912 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3913 	poll_threads();
3914 	CU_ASSERT(g_bserrno == 0);
3915 
3916 	g_bserrno = -1;
3917 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3918 	poll_threads();
3919 	CU_ASSERT(g_bserrno == 0);
3920 	blob_invalid = NULL;
3921 	g_bserrno = -1;
3922 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3923 	poll_threads();
3924 	CU_ASSERT(g_bserrno == 0);
3925 	blob_data_ro = NULL;
3926 	g_bserrno = -1;
3927 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3928 	poll_threads();
3929 	CU_ASSERT(g_bserrno == 0);
3930 	blob_md_ro = NULL;
3931 
3932 	g_blob = NULL;
3933 	g_blobid = SPDK_BLOBID_INVALID;
3934 
3935 	ut_bs_reload(&bs, NULL);
3936 
3937 	g_blob = NULL;
3938 	g_bserrno = 0;
3939 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3940 	poll_threads();
3941 	CU_ASSERT(g_bserrno != 0);
3942 	CU_ASSERT(g_blob == NULL);
3943 
3944 	g_blob = NULL;
3945 	g_bserrno = -1;
3946 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3947 	poll_threads();
3948 	CU_ASSERT(g_bserrno == 0);
3949 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3950 	blob_data_ro = g_blob;
3951 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3952 	CU_ASSERT(blob_data_ro->data_ro == true);
3953 	CU_ASSERT(blob_data_ro->md_ro == true);
3954 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3955 
3956 	g_blob = NULL;
3957 	g_bserrno = -1;
3958 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3959 	poll_threads();
3960 	CU_ASSERT(g_bserrno == 0);
3961 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3962 	blob_md_ro = g_blob;
3963 	CU_ASSERT(blob_md_ro->data_ro == false);
3964 	CU_ASSERT(blob_md_ro->md_ro == true);
3965 
3966 	g_bserrno = -1;
3967 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3968 	poll_threads();
3969 	CU_ASSERT(g_bserrno == 0);
3970 
3971 	ut_blob_close_and_delete(bs, blob_data_ro);
3972 	ut_blob_close_and_delete(bs, blob_md_ro);
3973 }
3974 
3975 static void
3976 bs_version(void)
3977 {
3978 	struct spdk_bs_super_block *super;
3979 	struct spdk_blob_store *bs = g_bs;
3980 	struct spdk_bs_dev *dev;
3981 	struct spdk_blob *blob;
3982 	struct spdk_blob_opts blob_opts;
3983 	spdk_blob_id blobid;
3984 
3985 	/* Unload the blob store */
3986 	spdk_bs_unload(bs, bs_op_complete, NULL);
3987 	poll_threads();
3988 	CU_ASSERT(g_bserrno == 0);
3989 	g_bs = NULL;
3990 
3991 	/*
3992 	 * Change the bs version on disk.  This will allow us to
3993 	 *  test that the version does not get modified automatically
3994 	 *  when loading and unloading the blobstore.
3995 	 */
3996 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3997 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3998 	CU_ASSERT(super->clean == 1);
3999 	super->version = 2;
4000 	/*
4001 	 * Version 2 metadata does not have a used blobid mask, so clear
4002 	 *  those fields in the super block and zero the corresponding
4003 	 *  region on "disk".  We will use this to ensure blob IDs are
4004 	 *  correctly reconstructed.
4005 	 */
4006 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
4007 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
4008 	super->used_blobid_mask_start = 0;
4009 	super->used_blobid_mask_len = 0;
4010 	super->crc = blob_md_page_calc_crc(super);
4011 
4012 	/* Load an existing blob store */
4013 	dev = init_dev();
4014 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4015 	poll_threads();
4016 	CU_ASSERT(g_bserrno == 0);
4017 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4018 	CU_ASSERT(super->clean == 1);
4019 	bs = g_bs;
4020 
4021 	/*
4022 	 * Create a blob - just to make sure that when we unload it
4023 	 *  results in writing the super block (since metadata pages
4024 	 *  were allocated.
4025 	 */
4026 	ut_spdk_blob_opts_init(&blob_opts);
4027 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
4028 	poll_threads();
4029 	CU_ASSERT(g_bserrno == 0);
4030 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4031 	blobid = g_blobid;
4032 
4033 	/* Unload the blob store */
4034 	spdk_bs_unload(bs, bs_op_complete, NULL);
4035 	poll_threads();
4036 	CU_ASSERT(g_bserrno == 0);
4037 	g_bs = NULL;
4038 	CU_ASSERT(super->version == 2);
4039 	CU_ASSERT(super->used_blobid_mask_start == 0);
4040 	CU_ASSERT(super->used_blobid_mask_len == 0);
4041 
4042 	dev = init_dev();
4043 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4044 	poll_threads();
4045 	CU_ASSERT(g_bserrno == 0);
4046 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4047 	bs = g_bs;
4048 
4049 	g_blob = NULL;
4050 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4051 	poll_threads();
4052 	CU_ASSERT(g_bserrno == 0);
4053 	CU_ASSERT(g_blob != NULL);
4054 	blob = g_blob;
4055 
4056 	ut_blob_close_and_delete(bs, blob);
4057 
4058 	CU_ASSERT(super->version == 2);
4059 	CU_ASSERT(super->used_blobid_mask_start == 0);
4060 	CU_ASSERT(super->used_blobid_mask_len == 0);
4061 }
4062 
4063 static void
4064 blob_set_xattrs_test(void)
4065 {
4066 	struct spdk_blob_store *bs = g_bs;
4067 	struct spdk_blob *blob;
4068 	struct spdk_blob_opts opts;
4069 	const void *value;
4070 	size_t value_len;
4071 	char *xattr;
4072 	size_t xattr_length;
4073 	int rc;
4074 
4075 	/* Create blob with extra attributes */
4076 	ut_spdk_blob_opts_init(&opts);
4077 
4078 	opts.xattrs.names = g_xattr_names;
4079 	opts.xattrs.get_value = _get_xattr_value;
4080 	opts.xattrs.count = 3;
4081 	opts.xattrs.ctx = &g_ctx;
4082 
4083 	blob = ut_blob_create_and_open(bs, &opts);
4084 
4085 	/* Get the xattrs */
4086 	value = NULL;
4087 
4088 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
4089 	CU_ASSERT(rc == 0);
4090 	SPDK_CU_ASSERT_FATAL(value != NULL);
4091 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
4092 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
4093 
4094 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
4095 	CU_ASSERT(rc == 0);
4096 	SPDK_CU_ASSERT_FATAL(value != NULL);
4097 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
4098 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
4099 
4100 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
4101 	CU_ASSERT(rc == 0);
4102 	SPDK_CU_ASSERT_FATAL(value != NULL);
4103 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
4104 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
4105 
4106 	/* Try to get non existing attribute */
4107 
4108 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
4109 	CU_ASSERT(rc == -ENOENT);
4110 
4111 	/* Try xattr exceeding maximum length of descriptor in single page */
4112 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
4113 		       strlen("large_xattr") + 1;
4114 	xattr = calloc(xattr_length, sizeof(char));
4115 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
4116 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
4117 	free(xattr);
4118 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
4119 
4120 	spdk_blob_close(blob, blob_op_complete, NULL);
4121 	poll_threads();
4122 	CU_ASSERT(g_bserrno == 0);
4123 	blob = NULL;
4124 	g_blob = NULL;
4125 	g_blobid = SPDK_BLOBID_INVALID;
4126 
4127 	/* NULL callback */
4128 	ut_spdk_blob_opts_init(&opts);
4129 	opts.xattrs.names = g_xattr_names;
4130 	opts.xattrs.get_value = NULL;
4131 	opts.xattrs.count = 1;
4132 	opts.xattrs.ctx = &g_ctx;
4133 
4134 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4135 	poll_threads();
4136 	CU_ASSERT(g_bserrno == -EINVAL);
4137 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4138 
4139 	/* NULL values */
4140 	ut_spdk_blob_opts_init(&opts);
4141 	opts.xattrs.names = g_xattr_names;
4142 	opts.xattrs.get_value = _get_xattr_value_null;
4143 	opts.xattrs.count = 1;
4144 	opts.xattrs.ctx = NULL;
4145 
4146 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4147 	poll_threads();
4148 	CU_ASSERT(g_bserrno == -EINVAL);
4149 }
4150 
4151 static void
4152 blob_thin_prov_alloc(void)
4153 {
4154 	struct spdk_blob_store *bs = g_bs;
4155 	struct spdk_blob *blob;
4156 	struct spdk_blob_opts opts;
4157 	spdk_blob_id blobid;
4158 	uint64_t free_clusters;
4159 
4160 	free_clusters = spdk_bs_free_cluster_count(bs);
4161 
4162 	/* Set blob as thin provisioned */
4163 	ut_spdk_blob_opts_init(&opts);
4164 	opts.thin_provision = true;
4165 
4166 	blob = ut_blob_create_and_open(bs, &opts);
4167 	blobid = spdk_blob_get_id(blob);
4168 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4169 
4170 	CU_ASSERT(blob->active.num_clusters == 0);
4171 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
4172 
4173 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4174 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4175 	poll_threads();
4176 	CU_ASSERT(g_bserrno == 0);
4177 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4178 	CU_ASSERT(blob->active.num_clusters == 5);
4179 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4180 
4181 	/* Grow it to 1TB - still unallocated */
4182 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
4183 	poll_threads();
4184 	CU_ASSERT(g_bserrno == 0);
4185 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4186 	CU_ASSERT(blob->active.num_clusters == 262144);
4187 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4188 
4189 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4190 	poll_threads();
4191 	CU_ASSERT(g_bserrno == 0);
4192 	/* Sync must not change anything */
4193 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4194 	CU_ASSERT(blob->active.num_clusters == 262144);
4195 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4196 	/* Since clusters are not allocated,
4197 	 * number of metadata pages is expected to be minimal.
4198 	 */
4199 	CU_ASSERT(blob->active.num_pages == 1);
4200 
4201 	/* Shrink the blob to 3 clusters - still unallocated */
4202 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
4203 	poll_threads();
4204 	CU_ASSERT(g_bserrno == 0);
4205 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4206 	CU_ASSERT(blob->active.num_clusters == 3);
4207 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4208 
4209 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4210 	poll_threads();
4211 	CU_ASSERT(g_bserrno == 0);
4212 	/* Sync must not change anything */
4213 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4214 	CU_ASSERT(blob->active.num_clusters == 3);
4215 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4216 
4217 	spdk_blob_close(blob, blob_op_complete, NULL);
4218 	poll_threads();
4219 	CU_ASSERT(g_bserrno == 0);
4220 
4221 	ut_bs_reload(&bs, NULL);
4222 
4223 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4224 	poll_threads();
4225 	CU_ASSERT(g_bserrno == 0);
4226 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4227 	blob = g_blob;
4228 
4229 	/* Check that clusters allocation and size is still the same */
4230 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4231 	CU_ASSERT(blob->active.num_clusters == 3);
4232 
4233 	ut_blob_close_and_delete(bs, blob);
4234 }
4235 
4236 static void
4237 blob_insert_cluster_msg_test(void)
4238 {
4239 	struct spdk_blob_store *bs = g_bs;
4240 	struct spdk_blob *blob;
4241 	struct spdk_blob_opts opts;
4242 	struct spdk_blob_md_page page = {};
4243 	spdk_blob_id blobid;
4244 	uint64_t free_clusters;
4245 	uint64_t new_cluster = 0;
4246 	uint32_t cluster_num = 3;
4247 	uint32_t extent_page = 0;
4248 
4249 	free_clusters = spdk_bs_free_cluster_count(bs);
4250 
4251 	/* Set blob as thin provisioned */
4252 	ut_spdk_blob_opts_init(&opts);
4253 	opts.thin_provision = true;
4254 	opts.num_clusters = 4;
4255 
4256 	blob = ut_blob_create_and_open(bs, &opts);
4257 	blobid = spdk_blob_get_id(blob);
4258 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4259 
4260 	CU_ASSERT(blob->active.num_clusters == 4);
4261 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4262 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4263 
4264 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4265 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4266 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4267 	spdk_spin_lock(&bs->used_lock);
4268 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4269 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4270 	spdk_spin_unlock(&bs->used_lock);
4271 
4272 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
4273 					 blob_op_complete, NULL);
4274 	poll_threads();
4275 
4276 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4277 
4278 	spdk_blob_close(blob, blob_op_complete, NULL);
4279 	poll_threads();
4280 	CU_ASSERT(g_bserrno == 0);
4281 
4282 	ut_bs_reload(&bs, NULL);
4283 
4284 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4285 	poll_threads();
4286 	CU_ASSERT(g_bserrno == 0);
4287 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4288 	blob = g_blob;
4289 
4290 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4291 
4292 	ut_blob_close_and_delete(bs, blob);
4293 }
4294 
4295 static void
4296 blob_thin_prov_rw(void)
4297 {
4298 	static const uint8_t zero[10 * 4096] = { 0 };
4299 	struct spdk_blob_store *bs = g_bs;
4300 	struct spdk_blob *blob, *blob_id0;
4301 	struct spdk_io_channel *channel, *channel_thread1;
4302 	struct spdk_blob_opts opts;
4303 	uint64_t free_clusters;
4304 	uint64_t page_size;
4305 	uint8_t payload_read[10 * 4096];
4306 	uint8_t payload_write[10 * 4096];
4307 	uint64_t write_bytes;
4308 	uint64_t read_bytes;
4309 
4310 	free_clusters = spdk_bs_free_cluster_count(bs);
4311 	page_size = spdk_bs_get_page_size(bs);
4312 
4313 	channel = spdk_bs_alloc_io_channel(bs);
4314 	CU_ASSERT(channel != NULL);
4315 
4316 	ut_spdk_blob_opts_init(&opts);
4317 	opts.thin_provision = true;
4318 
4319 	/* Create and delete blob at md page 0, so that next md page allocation
4320 	 * for extent will use that. */
4321 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4322 	blob = ut_blob_create_and_open(bs, &opts);
4323 	ut_blob_close_and_delete(bs, blob_id0);
4324 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4325 
4326 	CU_ASSERT(blob->active.num_clusters == 0);
4327 
4328 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4329 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4330 	poll_threads();
4331 	CU_ASSERT(g_bserrno == 0);
4332 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4333 	CU_ASSERT(blob->active.num_clusters == 5);
4334 
4335 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4336 	poll_threads();
4337 	CU_ASSERT(g_bserrno == 0);
4338 	/* Sync must not change anything */
4339 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4340 	CU_ASSERT(blob->active.num_clusters == 5);
4341 
4342 	/* Payload should be all zeros from unallocated clusters */
4343 	memset(payload_read, 0xFF, sizeof(payload_read));
4344 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4345 	poll_threads();
4346 	CU_ASSERT(g_bserrno == 0);
4347 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4348 
4349 	write_bytes = g_dev_write_bytes;
4350 	read_bytes = g_dev_read_bytes;
4351 
4352 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4353 	set_thread(1);
4354 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4355 	CU_ASSERT(channel_thread1 != NULL);
4356 	memset(payload_write, 0xE5, sizeof(payload_write));
4357 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4358 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4359 	/* Perform write on thread 0. That will try to allocate cluster,
4360 	 * but fail due to another thread issuing the cluster allocation first. */
4361 	set_thread(0);
4362 	memset(payload_write, 0xE5, sizeof(payload_write));
4363 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4364 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4365 	poll_threads();
4366 	CU_ASSERT(g_bserrno == 0);
4367 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4368 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
4369 	 * read 0 bytes */
4370 	if (g_use_extent_table) {
4371 		/* Add one more page for EXTENT_PAGE write */
4372 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
4373 	} else {
4374 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
4375 	}
4376 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4377 
4378 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4379 	poll_threads();
4380 	CU_ASSERT(g_bserrno == 0);
4381 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4382 
4383 	ut_blob_close_and_delete(bs, blob);
4384 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4385 
4386 	set_thread(1);
4387 	spdk_bs_free_io_channel(channel_thread1);
4388 	set_thread(0);
4389 	spdk_bs_free_io_channel(channel);
4390 	poll_threads();
4391 	g_blob = NULL;
4392 	g_blobid = 0;
4393 }
4394 
4395 static void
4396 blob_thin_prov_write_count_io(void)
4397 {
4398 	struct spdk_blob_store *bs;
4399 	struct spdk_blob *blob;
4400 	struct spdk_io_channel *ch;
4401 	struct spdk_bs_dev *dev;
4402 	struct spdk_bs_opts bs_opts;
4403 	struct spdk_blob_opts opts;
4404 	uint64_t free_clusters;
4405 	uint64_t page_size;
4406 	uint8_t payload_write[4096];
4407 	uint64_t write_bytes;
4408 	uint64_t read_bytes;
4409 	const uint32_t CLUSTER_SZ = 16384;
4410 	uint32_t pages_per_cluster;
4411 	uint32_t pages_per_extent_page;
4412 	uint32_t i;
4413 
4414 	/* Use a very small cluster size for this test.  This ensures we need multiple
4415 	 * extent pages to hold all of the clusters even for relatively small blobs like
4416 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4417 	 * buffers).
4418 	 */
4419 	dev = init_dev();
4420 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4421 	bs_opts.cluster_sz = CLUSTER_SZ;
4422 
4423 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4424 	poll_threads();
4425 	CU_ASSERT(g_bserrno == 0);
4426 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4427 	bs = g_bs;
4428 
4429 	free_clusters = spdk_bs_free_cluster_count(bs);
4430 	page_size = spdk_bs_get_page_size(bs);
4431 	pages_per_cluster = CLUSTER_SZ / page_size;
4432 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4433 
4434 	ch = spdk_bs_alloc_io_channel(bs);
4435 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4436 
4437 	ut_spdk_blob_opts_init(&opts);
4438 	opts.thin_provision = true;
4439 
4440 	blob = ut_blob_create_and_open(bs, &opts);
4441 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4442 
4443 	/* Resize the blob so that it will require 8 extent pages to hold all of
4444 	 * the clusters.
4445 	 */
4446 	g_bserrno = -1;
4447 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4448 	poll_threads();
4449 	CU_ASSERT(g_bserrno == 0);
4450 
4451 	g_bserrno = -1;
4452 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4453 	poll_threads();
4454 	CU_ASSERT(g_bserrno == 0);
4455 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4456 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4457 
4458 	memset(payload_write, 0, sizeof(payload_write));
4459 	for (i = 0; i < 8; i++) {
4460 		write_bytes = g_dev_write_bytes;
4461 		read_bytes = g_dev_read_bytes;
4462 
4463 		g_bserrno = -1;
4464 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4465 		poll_threads();
4466 		CU_ASSERT(g_bserrno == 0);
4467 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4468 
4469 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4470 		if (!g_use_extent_table) {
4471 			/* For legacy metadata, we should have written two pages - one for the
4472 			 * write I/O itself, another for the blob's primary metadata.
4473 			 */
4474 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4475 		} else {
4476 			/* For extent table metadata, we should have written three pages - one
4477 			 * for the write I/O, one for the extent page, one for the blob's primary
4478 			 * metadata.
4479 			 */
4480 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4481 		}
4482 
4483 		/* The write should have synced the metadata already.  Do another sync here
4484 		 * just to confirm.
4485 		 */
4486 		write_bytes = g_dev_write_bytes;
4487 		read_bytes = g_dev_read_bytes;
4488 
4489 		g_bserrno = -1;
4490 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4491 		poll_threads();
4492 		CU_ASSERT(g_bserrno == 0);
4493 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4494 
4495 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4496 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4497 
4498 		/* Now write to another unallocated cluster that is part of the same extent page. */
4499 		g_bserrno = -1;
4500 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4501 				   1, blob_op_complete, NULL);
4502 		poll_threads();
4503 		CU_ASSERT(g_bserrno == 0);
4504 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4505 
4506 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4507 		/*
4508 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4509 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4510 		 */
4511 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4512 
4513 		/* Send unmap aligned to the whole cluster - should free it up */
4514 		g_bserrno = -1;
4515 		spdk_blob_io_unmap(blob, ch, pages_per_extent_page * i, pages_per_cluster, blob_op_complete, NULL);
4516 		poll_threads();
4517 		CU_ASSERT(g_bserrno == 0);
4518 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4519 
4520 		/* Write back to the freed cluster */
4521 		g_bserrno = -1;
4522 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4523 		poll_threads();
4524 		CU_ASSERT(g_bserrno == 0);
4525 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4526 	}
4527 
4528 	ut_blob_close_and_delete(bs, blob);
4529 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4530 
4531 	spdk_bs_free_io_channel(ch);
4532 	poll_threads();
4533 	g_blob = NULL;
4534 	g_blobid = 0;
4535 
4536 	spdk_bs_unload(bs, bs_op_complete, NULL);
4537 	poll_threads();
4538 	CU_ASSERT(g_bserrno == 0);
4539 	g_bs = NULL;
4540 }
4541 
4542 static void
4543 blob_thin_prov_unmap_cluster(void)
4544 {
4545 	struct spdk_blob_store *bs;
4546 	struct spdk_blob *blob;
4547 	struct spdk_io_channel *ch;
4548 	struct spdk_bs_dev *dev;
4549 	struct spdk_bs_opts bs_opts;
4550 	struct spdk_blob_opts opts;
4551 	uint64_t free_clusters;
4552 	uint64_t page_size;
4553 	uint8_t payload_write[4096];
4554 	uint8_t payload_read[4096];
4555 	const uint32_t CLUSTER_COUNT = 3;
4556 	uint32_t pages_per_cluster;
4557 	uint32_t i;
4558 
4559 	/* Use a very large cluster size for this test. Check how the unmap/release cluster code path behaves when
4560 	 * clusters are fully used.
4561 	 */
4562 	dev = init_dev();
4563 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4564 	bs_opts.cluster_sz = dev->blocklen * dev->blockcnt / (CLUSTER_COUNT + 1);
4565 
4566 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4567 	poll_threads();
4568 	CU_ASSERT(g_bserrno == 0);
4569 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4570 	bs = g_bs;
4571 
4572 	free_clusters = spdk_bs_free_cluster_count(bs);
4573 	page_size = spdk_bs_get_page_size(bs);
4574 	pages_per_cluster = bs_opts.cluster_sz / page_size;
4575 
4576 	ch = spdk_bs_alloc_io_channel(bs);
4577 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4578 
4579 	ut_spdk_blob_opts_init(&opts);
4580 	opts.thin_provision = true;
4581 
4582 	blob = ut_blob_create_and_open(bs, &opts);
4583 	CU_ASSERT(free_clusters == CLUSTER_COUNT);
4584 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4585 
4586 	g_bserrno = -1;
4587 	spdk_blob_resize(blob, CLUSTER_COUNT, blob_op_complete, NULL);
4588 	poll_threads();
4589 	CU_ASSERT(g_bserrno == 0);
4590 
4591 	g_bserrno = -1;
4592 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4593 	poll_threads();
4594 	CU_ASSERT(g_bserrno == 0);
4595 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4596 	CU_ASSERT(blob->active.num_clusters == CLUSTER_COUNT);
4597 
4598 	/* Fill all clusters */
4599 	for (i = 0; i < CLUSTER_COUNT; i++) {
4600 		memset(payload_write, i + 1, sizeof(payload_write));
4601 		g_bserrno = -1;
4602 		spdk_blob_io_write(blob, ch, payload_write, pages_per_cluster * i, 1, blob_op_complete, NULL);
4603 		poll_threads();
4604 		CU_ASSERT(g_bserrno == 0);
4605 		CU_ASSERT(free_clusters - (i + 1) == spdk_bs_free_cluster_count(bs));
4606 	}
4607 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4608 
4609 	/* Unmap one whole cluster */
4610 	g_bserrno = -1;
4611 	spdk_blob_io_unmap(blob, ch, pages_per_cluster, pages_per_cluster, blob_op_complete, NULL);
4612 	poll_threads();
4613 	CU_ASSERT(g_bserrno == 0);
4614 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4615 
4616 	/* Verify the data read from the cluster is zeroed out */
4617 	memset(payload_write, 0, sizeof(payload_write));
4618 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4619 	poll_threads();
4620 	CU_ASSERT(g_bserrno == 0);
4621 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4622 
4623 	/* Fill the same cluster with data */
4624 	memset(payload_write, 3, sizeof(payload_write));
4625 	g_bserrno = -1;
4626 	spdk_blob_io_write(blob, ch, payload_write, pages_per_cluster, 1, blob_op_complete, NULL);
4627 	poll_threads();
4628 	CU_ASSERT(g_bserrno == 0);
4629 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4630 
4631 	/* Verify the data read from the cluster has the expected data */
4632 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4633 	poll_threads();
4634 	CU_ASSERT(g_bserrno == 0);
4635 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4636 
4637 	/* Send an unaligned unmap that ecompasses one whole cluster */
4638 	g_bserrno = -1;
4639 	spdk_blob_io_unmap(blob, ch, pages_per_cluster - 1, pages_per_cluster + 2, blob_op_complete, NULL);
4640 	poll_threads();
4641 	CU_ASSERT(g_bserrno == 0);
4642 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4643 
4644 	/* Verify the data read from the cluster is zeroed out */
4645 	g_bserrno = -1;
4646 	memset(payload_write, 0, sizeof(payload_write));
4647 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4648 	poll_threads();
4649 	CU_ASSERT(g_bserrno == 0);
4650 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4651 
4652 	/* Send a simultaneous unmap with a write to an unallocated area -
4653 	 * check that writes don't claim the currently unmapped cluster */
4654 	g_bserrno = -1;
4655 	memset(payload_write, 7, sizeof(payload_write));
4656 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4657 	spdk_blob_io_write(blob, ch, payload_write, pages_per_cluster, 1, blob_op_complete, NULL);
4658 	poll_threads();
4659 	CU_ASSERT(g_bserrno == 0);
4660 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4661 
4662 	/* Verify the contents of written sector */
4663 	g_bserrno = -1;
4664 	spdk_blob_io_read(blob, ch, payload_read, pages_per_cluster, 1, blob_op_complete, NULL);
4665 	poll_threads();
4666 	CU_ASSERT(g_bserrno == 0);
4667 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4668 
4669 	/* Verify the contents of unmapped sector */
4670 	g_bserrno = -1;
4671 	memset(payload_write, 0, sizeof(payload_write));
4672 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4673 	poll_threads();
4674 	CU_ASSERT(g_bserrno == 0);
4675 	CU_ASSERT(memcmp(payload_write, payload_read, 4096) == 0);
4676 
4677 	/* Make sure clusters are not freed until the unmap to the drive is done */
4678 	g_bserrno = -1;
4679 	memset(payload_write, 7, sizeof(payload_write));
4680 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4681 	poll_threads();
4682 	CU_ASSERT(g_bserrno == 0);
4683 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4684 
4685 	g_bserrno = -1;
4686 	spdk_blob_io_unmap(blob, ch, 0, pages_per_cluster, blob_op_complete, NULL);
4687 	while (memcmp(payload_write, &g_dev_buffer[4096 * pages_per_cluster], 4096) == 0) {
4688 		CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4689 		poll_thread_times(0, 1);
4690 	}
4691 	poll_threads();
4692 	CU_ASSERT(g_bserrno == 0);
4693 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4694 
4695 	ut_blob_close_and_delete(bs, blob);
4696 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4697 
4698 	spdk_bs_free_io_channel(ch);
4699 	poll_threads();
4700 	g_blob = NULL;
4701 	g_blobid = 0;
4702 
4703 	spdk_bs_unload(bs, bs_op_complete, NULL);
4704 	poll_threads();
4705 	CU_ASSERT(g_bserrno == 0);
4706 	g_bs = NULL;
4707 }
4708 
4709 static void
4710 blob_thin_prov_rle(void)
4711 {
4712 	static const uint8_t zero[10 * 4096] = { 0 };
4713 	struct spdk_blob_store *bs = g_bs;
4714 	struct spdk_blob *blob;
4715 	struct spdk_io_channel *channel;
4716 	struct spdk_blob_opts opts;
4717 	spdk_blob_id blobid;
4718 	uint64_t free_clusters;
4719 	uint64_t page_size;
4720 	uint8_t payload_read[10 * 4096];
4721 	uint8_t payload_write[10 * 4096];
4722 	uint64_t write_bytes;
4723 	uint64_t read_bytes;
4724 	uint64_t io_unit;
4725 
4726 	free_clusters = spdk_bs_free_cluster_count(bs);
4727 	page_size = spdk_bs_get_page_size(bs);
4728 
4729 	ut_spdk_blob_opts_init(&opts);
4730 	opts.thin_provision = true;
4731 	opts.num_clusters = 5;
4732 
4733 	blob = ut_blob_create_and_open(bs, &opts);
4734 	blobid = spdk_blob_get_id(blob);
4735 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4736 
4737 	channel = spdk_bs_alloc_io_channel(bs);
4738 	CU_ASSERT(channel != NULL);
4739 
4740 	/* Target specifically second cluster in a blob as first allocation */
4741 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4742 
4743 	/* Payload should be all zeros from unallocated clusters */
4744 	memset(payload_read, 0xFF, sizeof(payload_read));
4745 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4746 	poll_threads();
4747 	CU_ASSERT(g_bserrno == 0);
4748 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4749 
4750 	write_bytes = g_dev_write_bytes;
4751 	read_bytes = g_dev_read_bytes;
4752 
4753 	/* Issue write to second cluster in a blob */
4754 	memset(payload_write, 0xE5, sizeof(payload_write));
4755 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4756 	poll_threads();
4757 	CU_ASSERT(g_bserrno == 0);
4758 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4759 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4760 	 * read 0 bytes */
4761 	if (g_use_extent_table) {
4762 		/* Add one more page for EXTENT_PAGE write */
4763 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4764 	} else {
4765 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4766 	}
4767 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4768 
4769 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4770 	poll_threads();
4771 	CU_ASSERT(g_bserrno == 0);
4772 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4773 
4774 	spdk_bs_free_io_channel(channel);
4775 	poll_threads();
4776 
4777 	spdk_blob_close(blob, blob_op_complete, NULL);
4778 	poll_threads();
4779 	CU_ASSERT(g_bserrno == 0);
4780 
4781 	ut_bs_reload(&bs, NULL);
4782 
4783 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4784 	poll_threads();
4785 	CU_ASSERT(g_bserrno == 0);
4786 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4787 	blob = g_blob;
4788 
4789 	channel = spdk_bs_alloc_io_channel(bs);
4790 	CU_ASSERT(channel != NULL);
4791 
4792 	/* Read second cluster after blob reload to confirm data written */
4793 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4794 	poll_threads();
4795 	CU_ASSERT(g_bserrno == 0);
4796 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4797 
4798 	spdk_bs_free_io_channel(channel);
4799 	poll_threads();
4800 
4801 	ut_blob_close_and_delete(bs, blob);
4802 }
4803 
4804 static void
4805 blob_thin_prov_rw_iov(void)
4806 {
4807 	static const uint8_t zero[10 * 4096] = { 0 };
4808 	struct spdk_blob_store *bs = g_bs;
4809 	struct spdk_blob *blob;
4810 	struct spdk_io_channel *channel;
4811 	struct spdk_blob_opts opts;
4812 	uint64_t free_clusters;
4813 	uint8_t payload_read[10 * 4096];
4814 	uint8_t payload_write[10 * 4096];
4815 	struct iovec iov_read[3];
4816 	struct iovec iov_write[3];
4817 
4818 	free_clusters = spdk_bs_free_cluster_count(bs);
4819 
4820 	channel = spdk_bs_alloc_io_channel(bs);
4821 	CU_ASSERT(channel != NULL);
4822 
4823 	ut_spdk_blob_opts_init(&opts);
4824 	opts.thin_provision = true;
4825 
4826 	blob = ut_blob_create_and_open(bs, &opts);
4827 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4828 
4829 	CU_ASSERT(blob->active.num_clusters == 0);
4830 
4831 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4832 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4833 	poll_threads();
4834 	CU_ASSERT(g_bserrno == 0);
4835 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4836 	CU_ASSERT(blob->active.num_clusters == 5);
4837 
4838 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4839 	poll_threads();
4840 	CU_ASSERT(g_bserrno == 0);
4841 	/* Sync must not change anything */
4842 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4843 	CU_ASSERT(blob->active.num_clusters == 5);
4844 
4845 	/* Payload should be all zeros from unallocated clusters */
4846 	memset(payload_read, 0xAA, sizeof(payload_read));
4847 	iov_read[0].iov_base = payload_read;
4848 	iov_read[0].iov_len = 3 * 4096;
4849 	iov_read[1].iov_base = payload_read + 3 * 4096;
4850 	iov_read[1].iov_len = 4 * 4096;
4851 	iov_read[2].iov_base = payload_read + 7 * 4096;
4852 	iov_read[2].iov_len = 3 * 4096;
4853 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4854 	poll_threads();
4855 	CU_ASSERT(g_bserrno == 0);
4856 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4857 
4858 	memset(payload_write, 0xE5, sizeof(payload_write));
4859 	iov_write[0].iov_base = payload_write;
4860 	iov_write[0].iov_len = 1 * 4096;
4861 	iov_write[1].iov_base = payload_write + 1 * 4096;
4862 	iov_write[1].iov_len = 5 * 4096;
4863 	iov_write[2].iov_base = payload_write + 6 * 4096;
4864 	iov_write[2].iov_len = 4 * 4096;
4865 
4866 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4867 	poll_threads();
4868 	CU_ASSERT(g_bserrno == 0);
4869 
4870 	memset(payload_read, 0xAA, sizeof(payload_read));
4871 	iov_read[0].iov_base = payload_read;
4872 	iov_read[0].iov_len = 3 * 4096;
4873 	iov_read[1].iov_base = payload_read + 3 * 4096;
4874 	iov_read[1].iov_len = 4 * 4096;
4875 	iov_read[2].iov_base = payload_read + 7 * 4096;
4876 	iov_read[2].iov_len = 3 * 4096;
4877 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4878 	poll_threads();
4879 	CU_ASSERT(g_bserrno == 0);
4880 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4881 
4882 	spdk_bs_free_io_channel(channel);
4883 	poll_threads();
4884 
4885 	ut_blob_close_and_delete(bs, blob);
4886 }
4887 
4888 struct iter_ctx {
4889 	int		current_iter;
4890 	spdk_blob_id	blobid[4];
4891 };
4892 
4893 static void
4894 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4895 {
4896 	struct iter_ctx *iter_ctx = arg;
4897 	spdk_blob_id blobid;
4898 
4899 	CU_ASSERT(bserrno == 0);
4900 	blobid = spdk_blob_get_id(blob);
4901 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4902 }
4903 
4904 static void
4905 bs_load_iter_test(void)
4906 {
4907 	struct spdk_blob_store *bs;
4908 	struct spdk_bs_dev *dev;
4909 	struct iter_ctx iter_ctx = { 0 };
4910 	struct spdk_blob *blob;
4911 	int i, rc;
4912 	struct spdk_bs_opts opts;
4913 
4914 	dev = init_dev();
4915 	spdk_bs_opts_init(&opts, sizeof(opts));
4916 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4917 
4918 	/* Initialize a new blob store */
4919 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4920 	poll_threads();
4921 	CU_ASSERT(g_bserrno == 0);
4922 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4923 	bs = g_bs;
4924 
4925 	for (i = 0; i < 4; i++) {
4926 		blob = ut_blob_create_and_open(bs, NULL);
4927 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4928 
4929 		/* Just save the blobid as an xattr for testing purposes. */
4930 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4931 		CU_ASSERT(rc == 0);
4932 
4933 		/* Resize the blob */
4934 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4935 		poll_threads();
4936 		CU_ASSERT(g_bserrno == 0);
4937 
4938 		spdk_blob_close(blob, blob_op_complete, NULL);
4939 		poll_threads();
4940 		CU_ASSERT(g_bserrno == 0);
4941 	}
4942 
4943 	g_bserrno = -1;
4944 	spdk_bs_unload(bs, bs_op_complete, NULL);
4945 	poll_threads();
4946 	CU_ASSERT(g_bserrno == 0);
4947 
4948 	dev = init_dev();
4949 	spdk_bs_opts_init(&opts, sizeof(opts));
4950 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4951 	opts.iter_cb_fn = test_iter;
4952 	opts.iter_cb_arg = &iter_ctx;
4953 
4954 	/* Test blob iteration during load after a clean shutdown. */
4955 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4956 	poll_threads();
4957 	CU_ASSERT(g_bserrno == 0);
4958 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4959 	bs = g_bs;
4960 
4961 	/* Dirty shutdown */
4962 	bs_free(bs);
4963 
4964 	dev = init_dev();
4965 	spdk_bs_opts_init(&opts, sizeof(opts));
4966 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4967 	opts.iter_cb_fn = test_iter;
4968 	iter_ctx.current_iter = 0;
4969 	opts.iter_cb_arg = &iter_ctx;
4970 
4971 	/* Test blob iteration during load after a dirty shutdown. */
4972 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4973 	poll_threads();
4974 	CU_ASSERT(g_bserrno == 0);
4975 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4976 	bs = g_bs;
4977 
4978 	spdk_bs_unload(bs, bs_op_complete, NULL);
4979 	poll_threads();
4980 	CU_ASSERT(g_bserrno == 0);
4981 	g_bs = NULL;
4982 }
4983 
4984 static void
4985 blob_snapshot_rw(void)
4986 {
4987 	static const uint8_t zero[10 * 4096] = { 0 };
4988 	struct spdk_blob_store *bs = g_bs;
4989 	struct spdk_blob *blob, *snapshot;
4990 	struct spdk_io_channel *channel;
4991 	struct spdk_blob_opts opts;
4992 	spdk_blob_id blobid, snapshotid;
4993 	uint64_t free_clusters;
4994 	uint64_t cluster_size;
4995 	uint64_t page_size;
4996 	uint8_t payload_read[10 * 4096];
4997 	uint8_t payload_write[10 * 4096];
4998 	uint64_t write_bytes_start;
4999 	uint64_t read_bytes_start;
5000 	uint64_t copy_bytes_start;
5001 	uint64_t write_bytes;
5002 	uint64_t read_bytes;
5003 	uint64_t copy_bytes;
5004 
5005 	free_clusters = spdk_bs_free_cluster_count(bs);
5006 	cluster_size = spdk_bs_get_cluster_size(bs);
5007 	page_size = spdk_bs_get_page_size(bs);
5008 
5009 	channel = spdk_bs_alloc_io_channel(bs);
5010 	CU_ASSERT(channel != NULL);
5011 
5012 	ut_spdk_blob_opts_init(&opts);
5013 	opts.thin_provision = true;
5014 	opts.num_clusters = 5;
5015 
5016 	blob = ut_blob_create_and_open(bs, &opts);
5017 	blobid = spdk_blob_get_id(blob);
5018 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5019 
5020 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5021 
5022 	memset(payload_read, 0xFF, sizeof(payload_read));
5023 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5024 	poll_threads();
5025 	CU_ASSERT(g_bserrno == 0);
5026 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
5027 
5028 	memset(payload_write, 0xE5, sizeof(payload_write));
5029 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5030 	poll_threads();
5031 	CU_ASSERT(g_bserrno == 0);
5032 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5033 
5034 	/* Create snapshot from blob */
5035 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5036 	poll_threads();
5037 	CU_ASSERT(g_bserrno == 0);
5038 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5039 	snapshotid = g_blobid;
5040 
5041 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5042 	poll_threads();
5043 	CU_ASSERT(g_bserrno == 0);
5044 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5045 	snapshot = g_blob;
5046 	CU_ASSERT(snapshot->data_ro == true);
5047 	CU_ASSERT(snapshot->md_ro == true);
5048 
5049 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5050 
5051 	write_bytes_start = g_dev_write_bytes;
5052 	read_bytes_start = g_dev_read_bytes;
5053 	copy_bytes_start = g_dev_copy_bytes;
5054 
5055 	memset(payload_write, 0xAA, sizeof(payload_write));
5056 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5057 	poll_threads();
5058 	CU_ASSERT(g_bserrno == 0);
5059 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5060 
5061 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
5062 	 * and then write 10 pages of payload.
5063 	 */
5064 	write_bytes = g_dev_write_bytes - write_bytes_start;
5065 	read_bytes = g_dev_read_bytes - read_bytes_start;
5066 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
5067 	if (g_dev_copy_enabled) {
5068 		CU_ASSERT(copy_bytes == cluster_size);
5069 	} else {
5070 		CU_ASSERT(copy_bytes == 0);
5071 	}
5072 	if (g_use_extent_table) {
5073 		/* Add one more page for EXTENT_PAGE write */
5074 		CU_ASSERT(write_bytes + copy_bytes == page_size * 12 + cluster_size);
5075 	} else {
5076 		CU_ASSERT(write_bytes + copy_bytes == page_size * 11 + cluster_size);
5077 	}
5078 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
5079 
5080 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5081 	poll_threads();
5082 	CU_ASSERT(g_bserrno == 0);
5083 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
5084 
5085 	/* Data on snapshot should not change after write to clone */
5086 	memset(payload_write, 0xE5, sizeof(payload_write));
5087 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
5088 	poll_threads();
5089 	CU_ASSERT(g_bserrno == 0);
5090 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
5091 
5092 	ut_blob_close_and_delete(bs, blob);
5093 	ut_blob_close_and_delete(bs, snapshot);
5094 
5095 	spdk_bs_free_io_channel(channel);
5096 	poll_threads();
5097 	g_blob = NULL;
5098 	g_blobid = 0;
5099 }
5100 
5101 static void
5102 blob_snapshot_rw_iov(void)
5103 {
5104 	static const uint8_t zero[10 * 4096] = { 0 };
5105 	struct spdk_blob_store *bs = g_bs;
5106 	struct spdk_blob *blob, *snapshot;
5107 	struct spdk_io_channel *channel;
5108 	struct spdk_blob_opts opts;
5109 	spdk_blob_id blobid, snapshotid;
5110 	uint64_t free_clusters;
5111 	uint8_t payload_read[10 * 4096];
5112 	uint8_t payload_write[10 * 4096];
5113 	struct iovec iov_read[3];
5114 	struct iovec iov_write[3];
5115 
5116 	free_clusters = spdk_bs_free_cluster_count(bs);
5117 
5118 	channel = spdk_bs_alloc_io_channel(bs);
5119 	CU_ASSERT(channel != NULL);
5120 
5121 	ut_spdk_blob_opts_init(&opts);
5122 	opts.thin_provision = true;
5123 	opts.num_clusters = 5;
5124 
5125 	blob = ut_blob_create_and_open(bs, &opts);
5126 	blobid = spdk_blob_get_id(blob);
5127 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5128 
5129 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5130 
5131 	/* Create snapshot from blob */
5132 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5133 	poll_threads();
5134 	CU_ASSERT(g_bserrno == 0);
5135 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5136 	snapshotid = g_blobid;
5137 
5138 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5139 	poll_threads();
5140 	CU_ASSERT(g_bserrno == 0);
5141 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5142 	snapshot = g_blob;
5143 	CU_ASSERT(snapshot->data_ro == true);
5144 	CU_ASSERT(snapshot->md_ro == true);
5145 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5146 
5147 	/* Payload should be all zeros from unallocated clusters */
5148 	memset(payload_read, 0xAA, sizeof(payload_read));
5149 	iov_read[0].iov_base = payload_read;
5150 	iov_read[0].iov_len = 3 * 4096;
5151 	iov_read[1].iov_base = payload_read + 3 * 4096;
5152 	iov_read[1].iov_len = 4 * 4096;
5153 	iov_read[2].iov_base = payload_read + 7 * 4096;
5154 	iov_read[2].iov_len = 3 * 4096;
5155 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5156 	poll_threads();
5157 	CU_ASSERT(g_bserrno == 0);
5158 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
5159 
5160 	memset(payload_write, 0xE5, sizeof(payload_write));
5161 	iov_write[0].iov_base = payload_write;
5162 	iov_write[0].iov_len = 1 * 4096;
5163 	iov_write[1].iov_base = payload_write + 1 * 4096;
5164 	iov_write[1].iov_len = 5 * 4096;
5165 	iov_write[2].iov_base = payload_write + 6 * 4096;
5166 	iov_write[2].iov_len = 4 * 4096;
5167 
5168 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5169 	poll_threads();
5170 	CU_ASSERT(g_bserrno == 0);
5171 
5172 	memset(payload_read, 0xAA, sizeof(payload_read));
5173 	iov_read[0].iov_base = payload_read;
5174 	iov_read[0].iov_len = 3 * 4096;
5175 	iov_read[1].iov_base = payload_read + 3 * 4096;
5176 	iov_read[1].iov_len = 4 * 4096;
5177 	iov_read[2].iov_base = payload_read + 7 * 4096;
5178 	iov_read[2].iov_len = 3 * 4096;
5179 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5180 	poll_threads();
5181 	CU_ASSERT(g_bserrno == 0);
5182 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
5183 
5184 	spdk_bs_free_io_channel(channel);
5185 	poll_threads();
5186 
5187 	ut_blob_close_and_delete(bs, blob);
5188 	ut_blob_close_and_delete(bs, snapshot);
5189 }
5190 
5191 /**
5192  * Inflate / decouple parent rw unit tests.
5193  *
5194  * --------------
5195  * original blob:         0         1         2         3         4
5196  *                   ,---------+---------+---------+---------+---------.
5197  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5198  *                   +---------+---------+---------+---------+---------+
5199  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
5200  *                   +---------+---------+---------+---------+---------+
5201  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
5202  *                   '---------+---------+---------+---------+---------'
5203  *                   .         .         .         .         .         .
5204  * --------          .         .         .         .         .         .
5205  * inflate:          .         .         .         .         .         .
5206  *                   ,---------+---------+---------+---------+---------.
5207  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
5208  *                   '---------+---------+---------+---------+---------'
5209  *
5210  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
5211  *               on snapshot2 and snapshot removed .         .         .
5212  *                   .         .         .         .         .         .
5213  * ----------------  .         .         .         .         .         .
5214  * decouple parent:  .         .         .         .         .         .
5215  *                   ,---------+---------+---------+---------+---------.
5216  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5217  *                   +---------+---------+---------+---------+---------+
5218  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
5219  *                   '---------+---------+---------+---------+---------'
5220  *
5221  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
5222  *               on snapshot2 removed and on snapshot still exists. Snapshot2
5223  *               should remain a clone of snapshot.
5224  */
5225 static void
5226 _blob_inflate_rw(bool decouple_parent)
5227 {
5228 	struct spdk_blob_store *bs = g_bs;
5229 	struct spdk_blob *blob, *snapshot, *snapshot2;
5230 	struct spdk_io_channel *channel;
5231 	struct spdk_blob_opts opts;
5232 	spdk_blob_id blobid, snapshotid, snapshot2id;
5233 	uint64_t free_clusters;
5234 	uint64_t cluster_size;
5235 
5236 	uint64_t payload_size;
5237 	uint8_t *payload_read;
5238 	uint8_t *payload_write;
5239 	uint8_t *payload_clone;
5240 
5241 	uint64_t pages_per_cluster;
5242 	uint64_t pages_per_payload;
5243 
5244 	int i;
5245 	spdk_blob_id ids[2];
5246 	size_t count;
5247 
5248 	free_clusters = spdk_bs_free_cluster_count(bs);
5249 	cluster_size = spdk_bs_get_cluster_size(bs);
5250 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
5251 	pages_per_payload = pages_per_cluster * 5;
5252 
5253 	payload_size = cluster_size * 5;
5254 
5255 	payload_read = malloc(payload_size);
5256 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
5257 
5258 	payload_write = malloc(payload_size);
5259 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
5260 
5261 	payload_clone = malloc(payload_size);
5262 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
5263 
5264 	channel = spdk_bs_alloc_io_channel(bs);
5265 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5266 
5267 	/* Create blob */
5268 	ut_spdk_blob_opts_init(&opts);
5269 	opts.thin_provision = true;
5270 	opts.num_clusters = 5;
5271 
5272 	blob = ut_blob_create_and_open(bs, &opts);
5273 	blobid = spdk_blob_get_id(blob);
5274 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5275 
5276 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5277 
5278 	/* 1) Initial read should return zeroed payload */
5279 	memset(payload_read, 0xFF, payload_size);
5280 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5281 			  blob_op_complete, NULL);
5282 	poll_threads();
5283 	CU_ASSERT(g_bserrno == 0);
5284 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
5285 
5286 	/* Fill whole blob with a pattern, except last cluster (to be sure it
5287 	 * isn't allocated) */
5288 	memset(payload_write, 0xE5, payload_size - cluster_size);
5289 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
5290 			   pages_per_cluster, blob_op_complete, NULL);
5291 	poll_threads();
5292 	CU_ASSERT(g_bserrno == 0);
5293 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5294 
5295 	/* 2) Create snapshot from blob (first level) */
5296 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5297 	poll_threads();
5298 	CU_ASSERT(g_bserrno == 0);
5299 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5300 	snapshotid = g_blobid;
5301 
5302 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5303 	poll_threads();
5304 	CU_ASSERT(g_bserrno == 0);
5305 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5306 	snapshot = g_blob;
5307 	CU_ASSERT(snapshot->data_ro == true);
5308 	CU_ASSERT(snapshot->md_ro == true);
5309 
5310 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5311 
5312 	/* Write every second cluster with a pattern.
5313 	 *
5314 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
5315 	 * doesn't allocate it.
5316 	 *
5317 	 * payload_clone stores expected result on "blob" read at the time and
5318 	 * is used only to check data consistency on clone before and after
5319 	 * inflation. Initially we fill it with a backing snapshots pattern
5320 	 * used before.
5321 	 */
5322 	memset(payload_clone, 0xE5, payload_size - cluster_size);
5323 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
5324 	memset(payload_write, 0xAA, payload_size);
5325 	for (i = 1; i < 5; i += 2) {
5326 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
5327 				   pages_per_cluster, blob_op_complete, NULL);
5328 		poll_threads();
5329 		CU_ASSERT(g_bserrno == 0);
5330 
5331 		/* Update expected result */
5332 		memcpy(payload_clone + (cluster_size * i), payload_write,
5333 		       cluster_size);
5334 	}
5335 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5336 
5337 	/* Check data consistency on clone */
5338 	memset(payload_read, 0xFF, payload_size);
5339 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5340 			  blob_op_complete, NULL);
5341 	poll_threads();
5342 	CU_ASSERT(g_bserrno == 0);
5343 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5344 
5345 	/* 3) Create second levels snapshot from blob */
5346 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5347 	poll_threads();
5348 	CU_ASSERT(g_bserrno == 0);
5349 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5350 	snapshot2id = g_blobid;
5351 
5352 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
5353 	poll_threads();
5354 	CU_ASSERT(g_bserrno == 0);
5355 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5356 	snapshot2 = g_blob;
5357 	CU_ASSERT(snapshot2->data_ro == true);
5358 	CU_ASSERT(snapshot2->md_ro == true);
5359 
5360 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
5361 
5362 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5363 
5364 	/* Write one cluster on the top level blob. This cluster (1) covers
5365 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
5366 	 * at all */
5367 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
5368 			   pages_per_cluster, blob_op_complete, NULL);
5369 	poll_threads();
5370 	CU_ASSERT(g_bserrno == 0);
5371 
5372 	/* Update expected result */
5373 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
5374 
5375 	/* Check data consistency on clone */
5376 	memset(payload_read, 0xFF, payload_size);
5377 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5378 			  blob_op_complete, NULL);
5379 	poll_threads();
5380 	CU_ASSERT(g_bserrno == 0);
5381 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5382 
5383 
5384 	/* Close all blobs */
5385 	spdk_blob_close(blob, blob_op_complete, NULL);
5386 	poll_threads();
5387 	CU_ASSERT(g_bserrno == 0);
5388 
5389 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5390 	poll_threads();
5391 	CU_ASSERT(g_bserrno == 0);
5392 
5393 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5394 	poll_threads();
5395 	CU_ASSERT(g_bserrno == 0);
5396 
5397 	/* Check snapshot-clone relations */
5398 	count = 2;
5399 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5400 	CU_ASSERT(count == 1);
5401 	CU_ASSERT(ids[0] == snapshot2id);
5402 
5403 	count = 2;
5404 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5405 	CU_ASSERT(count == 1);
5406 	CU_ASSERT(ids[0] == blobid);
5407 
5408 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
5409 
5410 	free_clusters = spdk_bs_free_cluster_count(bs);
5411 	if (!decouple_parent) {
5412 		/* Do full blob inflation */
5413 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
5414 		poll_threads();
5415 		CU_ASSERT(g_bserrno == 0);
5416 
5417 		/* All clusters should be inflated (except one already allocated
5418 		 * in a top level blob) */
5419 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5420 
5421 		/* Check if relation tree updated correctly */
5422 		count = 2;
5423 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5424 
5425 		/* snapshotid have one clone */
5426 		CU_ASSERT(count == 1);
5427 		CU_ASSERT(ids[0] == snapshot2id);
5428 
5429 		/* snapshot2id have no clones */
5430 		count = 2;
5431 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5432 		CU_ASSERT(count == 0);
5433 
5434 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5435 	} else {
5436 		/* Decouple parent of blob */
5437 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5438 		poll_threads();
5439 		CU_ASSERT(g_bserrno == 0);
5440 
5441 		/* Only one cluster from a parent should be inflated (second one
5442 		 * is covered by a cluster written on a top level blob, and
5443 		 * already allocated) */
5444 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5445 
5446 		/* Check if relation tree updated correctly */
5447 		count = 2;
5448 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5449 
5450 		/* snapshotid have two clones now */
5451 		CU_ASSERT(count == 2);
5452 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5453 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5454 
5455 		/* snapshot2id have no clones */
5456 		count = 2;
5457 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5458 		CU_ASSERT(count == 0);
5459 
5460 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5461 	}
5462 
5463 	/* Try to delete snapshot2 (should pass) */
5464 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5465 	poll_threads();
5466 	CU_ASSERT(g_bserrno == 0);
5467 
5468 	/* Try to delete base snapshot */
5469 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5470 	poll_threads();
5471 	CU_ASSERT(g_bserrno == 0);
5472 
5473 	/* Reopen blob after snapshot deletion */
5474 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5475 	poll_threads();
5476 	CU_ASSERT(g_bserrno == 0);
5477 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5478 	blob = g_blob;
5479 
5480 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5481 
5482 	/* Check data consistency on inflated blob */
5483 	memset(payload_read, 0xFF, payload_size);
5484 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5485 			  blob_op_complete, NULL);
5486 	poll_threads();
5487 	CU_ASSERT(g_bserrno == 0);
5488 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5489 
5490 	spdk_bs_free_io_channel(channel);
5491 	poll_threads();
5492 
5493 	free(payload_read);
5494 	free(payload_write);
5495 	free(payload_clone);
5496 
5497 	ut_blob_close_and_delete(bs, blob);
5498 }
5499 
5500 static void
5501 blob_inflate_rw(void)
5502 {
5503 	_blob_inflate_rw(false);
5504 	_blob_inflate_rw(true);
5505 }
5506 
5507 /**
5508  * Snapshot-clones relation test
5509  *
5510  *         snapshot
5511  *            |
5512  *      +-----+-----+
5513  *      |           |
5514  *   blob(ro)   snapshot2
5515  *      |           |
5516  *   clone2      clone
5517  */
5518 static void
5519 blob_relations(void)
5520 {
5521 	struct spdk_blob_store *bs;
5522 	struct spdk_bs_dev *dev;
5523 	struct spdk_bs_opts bs_opts;
5524 	struct spdk_blob_opts opts;
5525 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5526 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5527 	int rc;
5528 	size_t count;
5529 	spdk_blob_id ids[10] = {};
5530 
5531 	dev = init_dev();
5532 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5533 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5534 
5535 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5536 	poll_threads();
5537 	CU_ASSERT(g_bserrno == 0);
5538 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5539 	bs = g_bs;
5540 
5541 	/* 1. Create blob with 10 clusters */
5542 
5543 	ut_spdk_blob_opts_init(&opts);
5544 	opts.num_clusters = 10;
5545 
5546 	blob = ut_blob_create_and_open(bs, &opts);
5547 	blobid = spdk_blob_get_id(blob);
5548 
5549 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5550 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5551 	CU_ASSERT(!spdk_blob_is_clone(blob));
5552 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5553 
5554 	/* blob should not have underlying snapshot nor clones */
5555 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5556 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5557 	count = SPDK_COUNTOF(ids);
5558 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5559 	CU_ASSERT(rc == 0);
5560 	CU_ASSERT(count == 0);
5561 
5562 
5563 	/* 2. Create snapshot */
5564 
5565 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5566 	poll_threads();
5567 	CU_ASSERT(g_bserrno == 0);
5568 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5569 	snapshotid = g_blobid;
5570 
5571 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5572 	poll_threads();
5573 	CU_ASSERT(g_bserrno == 0);
5574 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5575 	snapshot = g_blob;
5576 
5577 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5578 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5579 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5580 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5581 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5582 
5583 	/* Check if original blob is converted to the clone of snapshot */
5584 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5585 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5586 	CU_ASSERT(spdk_blob_is_clone(blob));
5587 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5588 	CU_ASSERT(blob->parent_id == snapshotid);
5589 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5590 
5591 	count = SPDK_COUNTOF(ids);
5592 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5593 	CU_ASSERT(rc == 0);
5594 	CU_ASSERT(count == 1);
5595 	CU_ASSERT(ids[0] == blobid);
5596 
5597 
5598 	/* 3. Create clone from snapshot */
5599 
5600 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5601 	poll_threads();
5602 	CU_ASSERT(g_bserrno == 0);
5603 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5604 	cloneid = g_blobid;
5605 
5606 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5607 	poll_threads();
5608 	CU_ASSERT(g_bserrno == 0);
5609 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5610 	clone = g_blob;
5611 
5612 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5613 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5614 	CU_ASSERT(spdk_blob_is_clone(clone));
5615 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5616 	CU_ASSERT(clone->parent_id == snapshotid);
5617 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5618 
5619 	count = SPDK_COUNTOF(ids);
5620 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5621 	CU_ASSERT(rc == 0);
5622 	CU_ASSERT(count == 0);
5623 
5624 	/* Check if clone is on the snapshot's list */
5625 	count = SPDK_COUNTOF(ids);
5626 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5627 	CU_ASSERT(rc == 0);
5628 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5629 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5630 
5631 
5632 	/* 4. Create snapshot of the clone */
5633 
5634 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5635 	poll_threads();
5636 	CU_ASSERT(g_bserrno == 0);
5637 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5638 	snapshotid2 = g_blobid;
5639 
5640 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5641 	poll_threads();
5642 	CU_ASSERT(g_bserrno == 0);
5643 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5644 	snapshot2 = g_blob;
5645 
5646 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5647 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5648 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5649 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5650 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5651 
5652 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5653 	 * is a child of snapshot */
5654 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5655 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5656 	CU_ASSERT(spdk_blob_is_clone(clone));
5657 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5658 	CU_ASSERT(clone->parent_id == snapshotid2);
5659 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5660 
5661 	count = SPDK_COUNTOF(ids);
5662 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5663 	CU_ASSERT(rc == 0);
5664 	CU_ASSERT(count == 1);
5665 	CU_ASSERT(ids[0] == cloneid);
5666 
5667 
5668 	/* 5. Try to create clone from read only blob */
5669 
5670 	/* Mark blob as read only */
5671 	spdk_blob_set_read_only(blob);
5672 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5673 	poll_threads();
5674 	CU_ASSERT(g_bserrno == 0);
5675 
5676 	/* Check if previously created blob is read only clone */
5677 	CU_ASSERT(spdk_blob_is_read_only(blob));
5678 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5679 	CU_ASSERT(spdk_blob_is_clone(blob));
5680 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5681 
5682 	/* Create clone from read only blob */
5683 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5684 	poll_threads();
5685 	CU_ASSERT(g_bserrno == 0);
5686 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5687 	cloneid2 = g_blobid;
5688 
5689 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5690 	poll_threads();
5691 	CU_ASSERT(g_bserrno == 0);
5692 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5693 	clone2 = g_blob;
5694 
5695 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5696 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5697 	CU_ASSERT(spdk_blob_is_clone(clone2));
5698 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5699 
5700 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5701 
5702 	count = SPDK_COUNTOF(ids);
5703 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5704 	CU_ASSERT(rc == 0);
5705 
5706 	CU_ASSERT(count == 1);
5707 	CU_ASSERT(ids[0] == cloneid2);
5708 
5709 	/* Close blobs */
5710 
5711 	spdk_blob_close(clone2, blob_op_complete, NULL);
5712 	poll_threads();
5713 	CU_ASSERT(g_bserrno == 0);
5714 
5715 	spdk_blob_close(blob, blob_op_complete, NULL);
5716 	poll_threads();
5717 	CU_ASSERT(g_bserrno == 0);
5718 
5719 	spdk_blob_close(clone, blob_op_complete, NULL);
5720 	poll_threads();
5721 	CU_ASSERT(g_bserrno == 0);
5722 
5723 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5724 	poll_threads();
5725 	CU_ASSERT(g_bserrno == 0);
5726 
5727 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5728 	poll_threads();
5729 	CU_ASSERT(g_bserrno == 0);
5730 
5731 	/* Try to delete snapshot with more than 1 clone */
5732 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5733 	poll_threads();
5734 	CU_ASSERT(g_bserrno != 0);
5735 
5736 	ut_bs_reload(&bs, &bs_opts);
5737 
5738 	/* NULL ids array should return number of clones in count */
5739 	count = SPDK_COUNTOF(ids);
5740 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5741 	CU_ASSERT(rc == -ENOMEM);
5742 	CU_ASSERT(count == 2);
5743 
5744 	/* incorrect array size */
5745 	count = 1;
5746 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5747 	CU_ASSERT(rc == -ENOMEM);
5748 	CU_ASSERT(count == 2);
5749 
5750 
5751 	/* Verify structure of loaded blob store */
5752 
5753 	/* snapshot */
5754 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5755 
5756 	count = SPDK_COUNTOF(ids);
5757 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5758 	CU_ASSERT(rc == 0);
5759 	CU_ASSERT(count == 2);
5760 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5761 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5762 
5763 	/* blob */
5764 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5765 	count = SPDK_COUNTOF(ids);
5766 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5767 	CU_ASSERT(rc == 0);
5768 	CU_ASSERT(count == 1);
5769 	CU_ASSERT(ids[0] == cloneid2);
5770 
5771 	/* clone */
5772 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5773 	count = SPDK_COUNTOF(ids);
5774 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5775 	CU_ASSERT(rc == 0);
5776 	CU_ASSERT(count == 0);
5777 
5778 	/* snapshot2 */
5779 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5780 	count = SPDK_COUNTOF(ids);
5781 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5782 	CU_ASSERT(rc == 0);
5783 	CU_ASSERT(count == 1);
5784 	CU_ASSERT(ids[0] == cloneid);
5785 
5786 	/* clone2 */
5787 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5788 	count = SPDK_COUNTOF(ids);
5789 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5790 	CU_ASSERT(rc == 0);
5791 	CU_ASSERT(count == 0);
5792 
5793 	/* Try to delete blob that user should not be able to remove */
5794 
5795 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5796 	poll_threads();
5797 	CU_ASSERT(g_bserrno != 0);
5798 
5799 	/* Remove all blobs */
5800 
5801 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5802 	poll_threads();
5803 	CU_ASSERT(g_bserrno == 0);
5804 
5805 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5806 	poll_threads();
5807 	CU_ASSERT(g_bserrno == 0);
5808 
5809 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5810 	poll_threads();
5811 	CU_ASSERT(g_bserrno == 0);
5812 
5813 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5814 	poll_threads();
5815 	CU_ASSERT(g_bserrno == 0);
5816 
5817 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5818 	poll_threads();
5819 	CU_ASSERT(g_bserrno == 0);
5820 
5821 	spdk_bs_unload(bs, bs_op_complete, NULL);
5822 	poll_threads();
5823 	CU_ASSERT(g_bserrno == 0);
5824 
5825 	g_bs = NULL;
5826 }
5827 
5828 /**
5829  * Snapshot-clones relation test 2
5830  *
5831  *         snapshot1
5832  *            |
5833  *         snapshot2
5834  *            |
5835  *      +-----+-----+
5836  *      |           |
5837  *   blob(ro)   snapshot3
5838  *      |           |
5839  *      |       snapshot4
5840  *      |        |     |
5841  *   clone2   clone  clone3
5842  */
5843 static void
5844 blob_relations2(void)
5845 {
5846 	struct spdk_blob_store *bs;
5847 	struct spdk_bs_dev *dev;
5848 	struct spdk_bs_opts bs_opts;
5849 	struct spdk_blob_opts opts;
5850 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5851 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5852 		     cloneid3;
5853 	int rc;
5854 	size_t count;
5855 	spdk_blob_id ids[10] = {};
5856 
5857 	dev = init_dev();
5858 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5859 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5860 
5861 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5862 	poll_threads();
5863 	CU_ASSERT(g_bserrno == 0);
5864 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5865 	bs = g_bs;
5866 
5867 	/* 1. Create blob with 10 clusters */
5868 
5869 	ut_spdk_blob_opts_init(&opts);
5870 	opts.num_clusters = 10;
5871 
5872 	blob = ut_blob_create_and_open(bs, &opts);
5873 	blobid = spdk_blob_get_id(blob);
5874 
5875 	/* 2. Create snapshot1 */
5876 
5877 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5878 	poll_threads();
5879 	CU_ASSERT(g_bserrno == 0);
5880 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5881 	snapshotid1 = g_blobid;
5882 
5883 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5884 	poll_threads();
5885 	CU_ASSERT(g_bserrno == 0);
5886 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5887 	snapshot1 = g_blob;
5888 
5889 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5890 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5891 
5892 	CU_ASSERT(blob->parent_id == snapshotid1);
5893 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5894 
5895 	/* Check if blob is the clone of snapshot1 */
5896 	CU_ASSERT(blob->parent_id == snapshotid1);
5897 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5898 
5899 	count = SPDK_COUNTOF(ids);
5900 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5901 	CU_ASSERT(rc == 0);
5902 	CU_ASSERT(count == 1);
5903 	CU_ASSERT(ids[0] == blobid);
5904 
5905 	/* 3. Create another snapshot */
5906 
5907 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5908 	poll_threads();
5909 	CU_ASSERT(g_bserrno == 0);
5910 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5911 	snapshotid2 = g_blobid;
5912 
5913 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5914 	poll_threads();
5915 	CU_ASSERT(g_bserrno == 0);
5916 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5917 	snapshot2 = g_blob;
5918 
5919 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5920 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5921 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5922 
5923 	/* Check if snapshot2 is the clone of snapshot1 and blob
5924 	 * is a child of snapshot2 */
5925 	CU_ASSERT(blob->parent_id == snapshotid2);
5926 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5927 
5928 	count = SPDK_COUNTOF(ids);
5929 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5930 	CU_ASSERT(rc == 0);
5931 	CU_ASSERT(count == 1);
5932 	CU_ASSERT(ids[0] == blobid);
5933 
5934 	/* 4. Create clone from snapshot */
5935 
5936 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5937 	poll_threads();
5938 	CU_ASSERT(g_bserrno == 0);
5939 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5940 	cloneid = g_blobid;
5941 
5942 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5943 	poll_threads();
5944 	CU_ASSERT(g_bserrno == 0);
5945 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5946 	clone = g_blob;
5947 
5948 	CU_ASSERT(clone->parent_id == snapshotid2);
5949 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5950 
5951 	/* Check if clone is on the snapshot's list */
5952 	count = SPDK_COUNTOF(ids);
5953 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5954 	CU_ASSERT(rc == 0);
5955 	CU_ASSERT(count == 2);
5956 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5957 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5958 
5959 	/* 5. Create snapshot of the clone */
5960 
5961 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5962 	poll_threads();
5963 	CU_ASSERT(g_bserrno == 0);
5964 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5965 	snapshotid3 = g_blobid;
5966 
5967 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5968 	poll_threads();
5969 	CU_ASSERT(g_bserrno == 0);
5970 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5971 	snapshot3 = g_blob;
5972 
5973 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5974 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5975 
5976 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5977 	 * is a child of snapshot2 */
5978 	CU_ASSERT(clone->parent_id == snapshotid3);
5979 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5980 
5981 	count = SPDK_COUNTOF(ids);
5982 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5983 	CU_ASSERT(rc == 0);
5984 	CU_ASSERT(count == 1);
5985 	CU_ASSERT(ids[0] == cloneid);
5986 
5987 	/* 6. Create another snapshot of the clone */
5988 
5989 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5990 	poll_threads();
5991 	CU_ASSERT(g_bserrno == 0);
5992 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5993 	snapshotid4 = g_blobid;
5994 
5995 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5996 	poll_threads();
5997 	CU_ASSERT(g_bserrno == 0);
5998 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5999 	snapshot4 = g_blob;
6000 
6001 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
6002 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
6003 
6004 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
6005 	 * is a child of snapshot3 */
6006 	CU_ASSERT(clone->parent_id == snapshotid4);
6007 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
6008 
6009 	count = SPDK_COUNTOF(ids);
6010 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
6011 	CU_ASSERT(rc == 0);
6012 	CU_ASSERT(count == 1);
6013 	CU_ASSERT(ids[0] == cloneid);
6014 
6015 	/* 7. Remove snapshot 4 */
6016 
6017 	ut_blob_close_and_delete(bs, snapshot4);
6018 
6019 	/* Check if relations are back to state from before creating snapshot 4 */
6020 	CU_ASSERT(clone->parent_id == snapshotid3);
6021 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6022 
6023 	count = SPDK_COUNTOF(ids);
6024 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6025 	CU_ASSERT(rc == 0);
6026 	CU_ASSERT(count == 1);
6027 	CU_ASSERT(ids[0] == cloneid);
6028 
6029 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
6030 
6031 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
6032 	poll_threads();
6033 	CU_ASSERT(g_bserrno == 0);
6034 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6035 	cloneid3 = g_blobid;
6036 
6037 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6038 	poll_threads();
6039 	CU_ASSERT(g_bserrno != 0);
6040 
6041 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
6042 
6043 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6044 	poll_threads();
6045 	CU_ASSERT(g_bserrno == 0);
6046 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6047 	snapshot3 = g_blob;
6048 
6049 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6050 	poll_threads();
6051 	CU_ASSERT(g_bserrno != 0);
6052 
6053 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6054 	poll_threads();
6055 	CU_ASSERT(g_bserrno == 0);
6056 
6057 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
6058 	poll_threads();
6059 	CU_ASSERT(g_bserrno == 0);
6060 
6061 	/* 10. Remove snapshot 1 */
6062 
6063 	ut_blob_close_and_delete(bs, snapshot1);
6064 
6065 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
6066 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
6067 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6068 
6069 	count = SPDK_COUNTOF(ids);
6070 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6071 	CU_ASSERT(rc == 0);
6072 	CU_ASSERT(count == 2);
6073 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6074 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6075 
6076 	/* 11. Try to create clone from read only blob */
6077 
6078 	/* Mark blob as read only */
6079 	spdk_blob_set_read_only(blob);
6080 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6081 	poll_threads();
6082 	CU_ASSERT(g_bserrno == 0);
6083 
6084 	/* Create clone from read only blob */
6085 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6086 	poll_threads();
6087 	CU_ASSERT(g_bserrno == 0);
6088 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6089 	cloneid2 = g_blobid;
6090 
6091 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
6092 	poll_threads();
6093 	CU_ASSERT(g_bserrno == 0);
6094 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6095 	clone2 = g_blob;
6096 
6097 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6098 
6099 	count = SPDK_COUNTOF(ids);
6100 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6101 	CU_ASSERT(rc == 0);
6102 	CU_ASSERT(count == 1);
6103 	CU_ASSERT(ids[0] == cloneid2);
6104 
6105 	/* Close blobs */
6106 
6107 	spdk_blob_close(clone2, blob_op_complete, NULL);
6108 	poll_threads();
6109 	CU_ASSERT(g_bserrno == 0);
6110 
6111 	spdk_blob_close(blob, blob_op_complete, NULL);
6112 	poll_threads();
6113 	CU_ASSERT(g_bserrno == 0);
6114 
6115 	spdk_blob_close(clone, blob_op_complete, NULL);
6116 	poll_threads();
6117 	CU_ASSERT(g_bserrno == 0);
6118 
6119 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
6120 	poll_threads();
6121 	CU_ASSERT(g_bserrno == 0);
6122 
6123 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6124 	poll_threads();
6125 	CU_ASSERT(g_bserrno == 0);
6126 
6127 	ut_bs_reload(&bs, &bs_opts);
6128 
6129 	/* Verify structure of loaded blob store */
6130 
6131 	/* snapshot2 */
6132 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6133 
6134 	count = SPDK_COUNTOF(ids);
6135 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6136 	CU_ASSERT(rc == 0);
6137 	CU_ASSERT(count == 2);
6138 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6139 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6140 
6141 	/* blob */
6142 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6143 	count = SPDK_COUNTOF(ids);
6144 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6145 	CU_ASSERT(rc == 0);
6146 	CU_ASSERT(count == 1);
6147 	CU_ASSERT(ids[0] == cloneid2);
6148 
6149 	/* clone */
6150 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6151 	count = SPDK_COUNTOF(ids);
6152 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6153 	CU_ASSERT(rc == 0);
6154 	CU_ASSERT(count == 0);
6155 
6156 	/* snapshot3 */
6157 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6158 	count = SPDK_COUNTOF(ids);
6159 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6160 	CU_ASSERT(rc == 0);
6161 	CU_ASSERT(count == 1);
6162 	CU_ASSERT(ids[0] == cloneid);
6163 
6164 	/* clone2 */
6165 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6166 	count = SPDK_COUNTOF(ids);
6167 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6168 	CU_ASSERT(rc == 0);
6169 	CU_ASSERT(count == 0);
6170 
6171 	/* Try to delete all blobs in the worse possible order */
6172 
6173 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6174 	poll_threads();
6175 	CU_ASSERT(g_bserrno != 0);
6176 
6177 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6178 	poll_threads();
6179 	CU_ASSERT(g_bserrno == 0);
6180 
6181 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6182 	poll_threads();
6183 	CU_ASSERT(g_bserrno != 0);
6184 
6185 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6186 	poll_threads();
6187 	CU_ASSERT(g_bserrno == 0);
6188 
6189 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6190 	poll_threads();
6191 	CU_ASSERT(g_bserrno == 0);
6192 
6193 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6194 	poll_threads();
6195 	CU_ASSERT(g_bserrno == 0);
6196 
6197 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6198 	poll_threads();
6199 	CU_ASSERT(g_bserrno == 0);
6200 
6201 	spdk_bs_unload(bs, bs_op_complete, NULL);
6202 	poll_threads();
6203 	CU_ASSERT(g_bserrno == 0);
6204 
6205 	g_bs = NULL;
6206 }
6207 
6208 /**
6209  * Snapshot-clones relation test 3
6210  *
6211  *         snapshot0
6212  *            |
6213  *         snapshot1
6214  *            |
6215  *         snapshot2
6216  *            |
6217  *           blob
6218  */
6219 static void
6220 blob_relations3(void)
6221 {
6222 	struct spdk_blob_store *bs;
6223 	struct spdk_bs_dev *dev;
6224 	struct spdk_io_channel *channel;
6225 	struct spdk_bs_opts bs_opts;
6226 	struct spdk_blob_opts opts;
6227 	struct spdk_blob *blob;
6228 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
6229 
6230 	dev = init_dev();
6231 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6232 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6233 
6234 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6235 	poll_threads();
6236 	CU_ASSERT(g_bserrno == 0);
6237 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6238 	bs = g_bs;
6239 
6240 	channel = spdk_bs_alloc_io_channel(bs);
6241 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6242 
6243 	/* 1. Create blob with 10 clusters */
6244 	ut_spdk_blob_opts_init(&opts);
6245 	opts.num_clusters = 10;
6246 
6247 	blob = ut_blob_create_and_open(bs, &opts);
6248 	blobid = spdk_blob_get_id(blob);
6249 
6250 	/* 2. Create snapshot0 */
6251 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6252 	poll_threads();
6253 	CU_ASSERT(g_bserrno == 0);
6254 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6255 	snapshotid0 = g_blobid;
6256 
6257 	/* 3. Create snapshot1 */
6258 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6259 	poll_threads();
6260 	CU_ASSERT(g_bserrno == 0);
6261 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6262 	snapshotid1 = g_blobid;
6263 
6264 	/* 4. Create snapshot2 */
6265 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6266 	poll_threads();
6267 	CU_ASSERT(g_bserrno == 0);
6268 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6269 	snapshotid2 = g_blobid;
6270 
6271 	/* 5. Decouple blob */
6272 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
6273 	poll_threads();
6274 	CU_ASSERT(g_bserrno == 0);
6275 
6276 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
6277 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
6278 	poll_threads();
6279 	CU_ASSERT(g_bserrno == 0);
6280 
6281 	/* 7. Delete blob */
6282 	spdk_blob_close(blob, blob_op_complete, NULL);
6283 	poll_threads();
6284 	CU_ASSERT(g_bserrno == 0);
6285 
6286 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6287 	poll_threads();
6288 	CU_ASSERT(g_bserrno == 0);
6289 
6290 	/* 8. Delete snapshot2.
6291 	 * If md of snapshot 2 was updated, it should be possible to delete it */
6292 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6293 	poll_threads();
6294 	CU_ASSERT(g_bserrno == 0);
6295 
6296 	/* Remove remaining blobs and unload bs */
6297 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
6298 	poll_threads();
6299 	CU_ASSERT(g_bserrno == 0);
6300 
6301 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
6302 	poll_threads();
6303 	CU_ASSERT(g_bserrno == 0);
6304 
6305 	spdk_bs_free_io_channel(channel);
6306 	poll_threads();
6307 
6308 	spdk_bs_unload(bs, bs_op_complete, NULL);
6309 	poll_threads();
6310 	CU_ASSERT(g_bserrno == 0);
6311 
6312 	g_bs = NULL;
6313 }
6314 
6315 static void
6316 blobstore_clean_power_failure(void)
6317 {
6318 	struct spdk_blob_store *bs;
6319 	struct spdk_blob *blob;
6320 	struct spdk_power_failure_thresholds thresholds = {};
6321 	bool clean = false;
6322 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6323 	struct spdk_bs_super_block super_copy = {};
6324 
6325 	thresholds.general_threshold = 1;
6326 	while (!clean) {
6327 		/* Create bs and blob */
6328 		suite_blob_setup();
6329 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6330 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6331 		bs = g_bs;
6332 		blob = g_blob;
6333 
6334 		/* Super block should not change for rest of the UT,
6335 		 * save it and compare later. */
6336 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
6337 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
6338 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6339 
6340 		/* Force bs/super block in a clean state.
6341 		 * Along with marking blob dirty, to cause blob persist. */
6342 		blob->state = SPDK_BLOB_STATE_DIRTY;
6343 		bs->clean = 1;
6344 		super->clean = 1;
6345 		super->crc = blob_md_page_calc_crc(super);
6346 
6347 		g_bserrno = -1;
6348 		dev_set_power_failure_thresholds(thresholds);
6349 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6350 		poll_threads();
6351 		dev_reset_power_failure_event();
6352 
6353 		if (g_bserrno == 0) {
6354 			/* After successful md sync, both bs and super block
6355 			 * should be marked as not clean. */
6356 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6357 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
6358 			clean = true;
6359 		}
6360 
6361 		/* Depending on the point of failure, super block was either updated or not. */
6362 		super_copy.clean = super->clean;
6363 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
6364 		/* Compare that the values in super block remained unchanged. */
6365 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
6366 
6367 		/* Delete blob and unload bs */
6368 		suite_blob_cleanup();
6369 
6370 		thresholds.general_threshold++;
6371 	}
6372 }
6373 
6374 static void
6375 blob_delete_snapshot_power_failure(void)
6376 {
6377 	struct spdk_bs_dev *dev;
6378 	struct spdk_blob_store *bs;
6379 	struct spdk_blob_opts opts;
6380 	struct spdk_blob *blob, *snapshot;
6381 	struct spdk_power_failure_thresholds thresholds = {};
6382 	spdk_blob_id blobid, snapshotid;
6383 	const void *value;
6384 	size_t value_len;
6385 	size_t count;
6386 	spdk_blob_id ids[3] = {};
6387 	int rc;
6388 	bool deleted = false;
6389 	int delete_snapshot_bserrno = -1;
6390 
6391 	thresholds.general_threshold = 1;
6392 	while (!deleted) {
6393 		dev = init_dev();
6394 
6395 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6396 		poll_threads();
6397 		CU_ASSERT(g_bserrno == 0);
6398 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6399 		bs = g_bs;
6400 
6401 		/* Create blob */
6402 		ut_spdk_blob_opts_init(&opts);
6403 		opts.num_clusters = 10;
6404 
6405 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6406 		poll_threads();
6407 		CU_ASSERT(g_bserrno == 0);
6408 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6409 		blobid = g_blobid;
6410 
6411 		/* Create snapshot */
6412 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6413 		poll_threads();
6414 		CU_ASSERT(g_bserrno == 0);
6415 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6416 		snapshotid = g_blobid;
6417 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6418 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6419 
6420 		dev_set_power_failure_thresholds(thresholds);
6421 
6422 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6423 		poll_threads();
6424 		delete_snapshot_bserrno = g_bserrno;
6425 
6426 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6427 		 * reports success, changes to both blobs should already persisted. */
6428 		dev_reset_power_failure_event();
6429 		ut_bs_dirty_load(&bs, NULL);
6430 
6431 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6432 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6433 
6434 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6435 		poll_threads();
6436 		CU_ASSERT(g_bserrno == 0);
6437 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6438 		blob = g_blob;
6439 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6440 
6441 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6442 		poll_threads();
6443 
6444 		if (g_bserrno == 0) {
6445 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6446 			snapshot = g_blob;
6447 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6448 			count = SPDK_COUNTOF(ids);
6449 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6450 			CU_ASSERT(rc == 0);
6451 			CU_ASSERT(count == 1);
6452 			CU_ASSERT(ids[0] == blobid);
6453 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6454 			CU_ASSERT(rc != 0);
6455 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6456 
6457 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6458 			poll_threads();
6459 			CU_ASSERT(g_bserrno == 0);
6460 		} else {
6461 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6462 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6463 			 * Yet delete might perform further changes to the clone after that.
6464 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6465 			if (delete_snapshot_bserrno == 0) {
6466 				deleted = true;
6467 			}
6468 		}
6469 
6470 		spdk_blob_close(blob, blob_op_complete, NULL);
6471 		poll_threads();
6472 		CU_ASSERT(g_bserrno == 0);
6473 
6474 		spdk_bs_unload(bs, bs_op_complete, NULL);
6475 		poll_threads();
6476 		CU_ASSERT(g_bserrno == 0);
6477 
6478 		thresholds.general_threshold++;
6479 	}
6480 }
6481 
6482 static void
6483 blob_create_snapshot_power_failure(void)
6484 {
6485 	struct spdk_blob_store *bs = g_bs;
6486 	struct spdk_bs_dev *dev;
6487 	struct spdk_blob_opts opts;
6488 	struct spdk_blob *blob, *snapshot;
6489 	struct spdk_power_failure_thresholds thresholds = {};
6490 	spdk_blob_id blobid, snapshotid;
6491 	const void *value;
6492 	size_t value_len;
6493 	size_t count;
6494 	spdk_blob_id ids[3] = {};
6495 	int rc;
6496 	bool created = false;
6497 	int create_snapshot_bserrno = -1;
6498 
6499 	thresholds.general_threshold = 1;
6500 	while (!created) {
6501 		dev = init_dev();
6502 
6503 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6504 		poll_threads();
6505 		CU_ASSERT(g_bserrno == 0);
6506 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6507 		bs = g_bs;
6508 
6509 		/* Create blob */
6510 		ut_spdk_blob_opts_init(&opts);
6511 		opts.num_clusters = 10;
6512 
6513 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6514 		poll_threads();
6515 		CU_ASSERT(g_bserrno == 0);
6516 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6517 		blobid = g_blobid;
6518 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6519 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6520 
6521 		dev_set_power_failure_thresholds(thresholds);
6522 
6523 		/* Create snapshot */
6524 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6525 		poll_threads();
6526 		create_snapshot_bserrno = g_bserrno;
6527 		snapshotid = g_blobid;
6528 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6529 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6530 
6531 		/* Do not shut down cleanly. Assumption is that after create snapshot
6532 		 * reports success, both blobs should be power-fail safe. */
6533 		dev_reset_power_failure_event();
6534 		ut_bs_dirty_load(&bs, NULL);
6535 
6536 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6537 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6538 
6539 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6540 		poll_threads();
6541 		CU_ASSERT(g_bserrno == 0);
6542 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6543 		blob = g_blob;
6544 
6545 		if (snapshotid != SPDK_BLOBID_INVALID) {
6546 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6547 			poll_threads();
6548 		}
6549 
6550 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6551 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6552 			snapshot = g_blob;
6553 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6554 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6555 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6556 			count = SPDK_COUNTOF(ids);
6557 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6558 			CU_ASSERT(rc == 0);
6559 			CU_ASSERT(count == 1);
6560 			CU_ASSERT(ids[0] == blobid);
6561 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6562 			CU_ASSERT(rc != 0);
6563 
6564 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6565 			poll_threads();
6566 			CU_ASSERT(g_bserrno == 0);
6567 			if (create_snapshot_bserrno == 0) {
6568 				created = true;
6569 			}
6570 		} else {
6571 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6572 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6573 		}
6574 
6575 		spdk_blob_close(blob, blob_op_complete, NULL);
6576 		poll_threads();
6577 		CU_ASSERT(g_bserrno == 0);
6578 
6579 		spdk_bs_unload(bs, bs_op_complete, NULL);
6580 		poll_threads();
6581 		CU_ASSERT(g_bserrno == 0);
6582 
6583 		thresholds.general_threshold++;
6584 	}
6585 }
6586 
6587 static void
6588 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6589 {
6590 	uint8_t payload_ff[64 * 512];
6591 	uint8_t payload_aa[64 * 512];
6592 	uint8_t payload_00[64 * 512];
6593 	uint8_t *cluster0, *cluster1;
6594 
6595 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6596 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6597 	memset(payload_00, 0x00, sizeof(payload_00));
6598 
6599 	/* Try to perform I/O with io unit = 512 */
6600 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6601 	poll_threads();
6602 	CU_ASSERT(g_bserrno == 0);
6603 
6604 	/* If thin provisioned is set cluster should be allocated now */
6605 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6606 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6607 
6608 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6609 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6610 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6611 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6612 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6613 
6614 	/* Verify write with offset on first page */
6615 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6616 	poll_threads();
6617 	CU_ASSERT(g_bserrno == 0);
6618 
6619 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6620 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6621 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6622 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6623 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6624 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6625 
6626 	/* Verify write with offset on first page */
6627 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6628 	poll_threads();
6629 
6630 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6631 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6632 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6633 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6634 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6635 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6636 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6637 
6638 	/* Verify write with offset on second page */
6639 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6640 	poll_threads();
6641 
6642 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6643 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6644 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6645 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6646 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6647 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6648 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6649 
6650 	/* Verify write across multiple pages */
6651 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6652 	poll_threads();
6653 
6654 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6655 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6656 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6657 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6658 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6659 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6660 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6661 
6662 	/* Verify write across multiple clusters */
6663 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6664 	poll_threads();
6665 
6666 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6667 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6668 
6669 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6670 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6671 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6672 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6673 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6674 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6675 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6676 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6677 
6678 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6679 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6680 
6681 	/* Verify write to second cluster */
6682 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6683 	poll_threads();
6684 
6685 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6686 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6687 
6688 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6689 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6690 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6691 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6692 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6693 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6694 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6695 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6696 
6697 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6698 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6699 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6700 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6701 }
6702 
6703 static void
6704 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6705 {
6706 	uint8_t payload_read[64 * 512];
6707 	uint8_t payload_ff[64 * 512];
6708 	uint8_t payload_aa[64 * 512];
6709 	uint8_t payload_00[64 * 512];
6710 
6711 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6712 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6713 	memset(payload_00, 0x00, sizeof(payload_00));
6714 
6715 	/* Read only first io unit */
6716 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6717 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6718 	 * payload_read: F000 0000 | 0000 0000 ... */
6719 	memset(payload_read, 0x00, sizeof(payload_read));
6720 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6721 	poll_threads();
6722 	CU_ASSERT(g_bserrno == 0);
6723 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6724 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6725 
6726 	/* Read four io_units starting from offset = 2
6727 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6728 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6729 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6730 
6731 	memset(payload_read, 0x00, sizeof(payload_read));
6732 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6733 	poll_threads();
6734 	CU_ASSERT(g_bserrno == 0);
6735 
6736 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6737 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6738 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6739 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6740 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6741 
6742 	/* Read eight io_units across multiple pages
6743 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6744 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6745 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6746 	memset(payload_read, 0x00, sizeof(payload_read));
6747 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6748 	poll_threads();
6749 	CU_ASSERT(g_bserrno == 0);
6750 
6751 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6752 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6753 
6754 	/* Read eight io_units across multiple clusters
6755 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6756 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6757 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6758 	memset(payload_read, 0x00, sizeof(payload_read));
6759 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6760 	poll_threads();
6761 	CU_ASSERT(g_bserrno == 0);
6762 
6763 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6764 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6765 
6766 	/* Read four io_units from second cluster
6767 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6768 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6769 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6770 	memset(payload_read, 0x00, sizeof(payload_read));
6771 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6772 	poll_threads();
6773 	CU_ASSERT(g_bserrno == 0);
6774 
6775 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6776 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6777 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6778 
6779 	/* Read second cluster
6780 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6781 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6782 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6783 	memset(payload_read, 0x00, sizeof(payload_read));
6784 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6785 	poll_threads();
6786 	CU_ASSERT(g_bserrno == 0);
6787 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6788 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6789 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6790 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6791 
6792 	/* Read whole two clusters
6793 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6794 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6795 	memset(payload_read, 0x00, sizeof(payload_read));
6796 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6797 	poll_threads();
6798 	CU_ASSERT(g_bserrno == 0);
6799 
6800 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6801 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6802 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6803 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6804 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6805 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6806 
6807 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6808 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6809 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6810 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6811 }
6812 
6813 
6814 static void
6815 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6816 {
6817 	uint8_t payload_ff[64 * 512];
6818 	uint8_t payload_aa[64 * 512];
6819 	uint8_t payload_00[64 * 512];
6820 	uint8_t *cluster0, *cluster1;
6821 
6822 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6823 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6824 	memset(payload_00, 0x00, sizeof(payload_00));
6825 
6826 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6827 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6828 
6829 	/* Unmap */
6830 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6831 	poll_threads();
6832 
6833 	CU_ASSERT(g_bserrno == 0);
6834 
6835 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6836 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6837 }
6838 
6839 static void
6840 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6841 {
6842 	uint8_t payload_ff[64 * 512];
6843 	uint8_t payload_aa[64 * 512];
6844 	uint8_t payload_00[64 * 512];
6845 	uint8_t *cluster0, *cluster1;
6846 
6847 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6848 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6849 	memset(payload_00, 0x00, sizeof(payload_00));
6850 
6851 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6852 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6853 
6854 	/* Write zeroes  */
6855 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6856 	poll_threads();
6857 
6858 	CU_ASSERT(g_bserrno == 0);
6859 
6860 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6861 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6862 }
6863 
6864 static inline void
6865 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6866 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6867 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6868 {
6869 	if (io_opts) {
6870 		g_dev_writev_ext_called = false;
6871 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6872 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
6873 					io_opts);
6874 	} else {
6875 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6876 	}
6877 	poll_threads();
6878 	CU_ASSERT(g_bserrno == 0);
6879 	if (io_opts) {
6880 		CU_ASSERT(g_dev_writev_ext_called);
6881 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6882 	}
6883 }
6884 
6885 static void
6886 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6887 	       bool ext_api)
6888 {
6889 	uint8_t payload_ff[64 * 512];
6890 	uint8_t payload_aa[64 * 512];
6891 	uint8_t payload_00[64 * 512];
6892 	uint8_t *cluster0, *cluster1;
6893 	struct iovec iov[4];
6894 	struct spdk_blob_ext_io_opts ext_opts = {
6895 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6896 		.memory_domain_ctx = (void *)0xf00df00d,
6897 		.size = sizeof(struct spdk_blob_ext_io_opts),
6898 		.user_ctx = (void *)123,
6899 	};
6900 
6901 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6902 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6903 	memset(payload_00, 0x00, sizeof(payload_00));
6904 
6905 	/* Try to perform I/O with io unit = 512 */
6906 	iov[0].iov_base = payload_ff;
6907 	iov[0].iov_len = 1 * 512;
6908 
6909 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
6910 			    ext_api ? &ext_opts : NULL);
6911 
6912 	/* If thin provisioned is set cluster should be allocated now */
6913 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6914 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6915 
6916 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6917 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6918 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6919 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6920 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6921 
6922 	/* Verify write with offset on first page */
6923 	iov[0].iov_base = payload_ff;
6924 	iov[0].iov_len = 1 * 512;
6925 
6926 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
6927 			    ext_api ? &ext_opts : NULL);
6928 
6929 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6930 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6931 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6932 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6933 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6934 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6935 
6936 	/* Verify write with offset on first page */
6937 	iov[0].iov_base = payload_ff;
6938 	iov[0].iov_len = 4 * 512;
6939 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6940 	poll_threads();
6941 
6942 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6943 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6944 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6945 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6946 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6947 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6948 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6949 
6950 	/* Verify write with offset on second page */
6951 	iov[0].iov_base = payload_ff;
6952 	iov[0].iov_len = 4 * 512;
6953 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6954 	poll_threads();
6955 
6956 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6957 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6958 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6959 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6960 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6961 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6962 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6963 
6964 	/* Verify write across multiple pages */
6965 	iov[0].iov_base = payload_aa;
6966 	iov[0].iov_len = 8 * 512;
6967 
6968 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
6969 			    ext_api ? &ext_opts : NULL);
6970 
6971 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6972 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6973 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6974 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6975 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6976 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6977 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6978 
6979 	/* Verify write across multiple clusters */
6980 
6981 	iov[0].iov_base = payload_ff;
6982 	iov[0].iov_len = 8 * 512;
6983 
6984 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
6985 			    ext_api ? &ext_opts : NULL);
6986 
6987 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6988 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6989 
6990 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6991 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6992 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6993 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6994 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6995 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6996 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6997 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6998 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6999 
7000 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7001 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
7002 
7003 	/* Verify write to second cluster */
7004 
7005 	iov[0].iov_base = payload_ff;
7006 	iov[0].iov_len = 2 * 512;
7007 
7008 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
7009 			    ext_api ? &ext_opts : NULL);
7010 
7011 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7012 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7013 
7014 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7015 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
7016 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7017 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7018 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7019 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7020 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7021 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
7022 
7023 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7024 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
7025 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
7026 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
7027 }
7028 
7029 static inline void
7030 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
7031 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7032 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7033 {
7034 	if (io_opts) {
7035 		g_dev_readv_ext_called = false;
7036 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7037 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
7038 	} else {
7039 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7040 	}
7041 	poll_threads();
7042 	CU_ASSERT(g_bserrno == 0);
7043 	if (io_opts) {
7044 		CU_ASSERT(g_dev_readv_ext_called);
7045 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7046 	}
7047 }
7048 
7049 static void
7050 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7051 	      bool ext_api)
7052 {
7053 	uint8_t payload_read[64 * 512];
7054 	uint8_t payload_ff[64 * 512];
7055 	uint8_t payload_aa[64 * 512];
7056 	uint8_t payload_00[64 * 512];
7057 	struct iovec iov[4];
7058 	struct spdk_blob_ext_io_opts ext_opts = {
7059 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7060 		.memory_domain_ctx = (void *)0xf00df00d,
7061 		.size = sizeof(struct spdk_blob_ext_io_opts),
7062 		.user_ctx = (void *)123,
7063 	};
7064 
7065 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7066 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7067 	memset(payload_00, 0x00, sizeof(payload_00));
7068 
7069 	/* Read only first io unit */
7070 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7071 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7072 	 * payload_read: F000 0000 | 0000 0000 ... */
7073 	memset(payload_read, 0x00, sizeof(payload_read));
7074 	iov[0].iov_base = payload_read;
7075 	iov[0].iov_len = 1 * 512;
7076 
7077 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7078 
7079 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7080 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
7081 
7082 	/* Read four io_units starting from offset = 2
7083 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7084 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7085 	 * payload_read: F0AA 0000 | 0000 0000 ... */
7086 
7087 	memset(payload_read, 0x00, sizeof(payload_read));
7088 	iov[0].iov_base = payload_read;
7089 	iov[0].iov_len = 4 * 512;
7090 
7091 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7092 
7093 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7094 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7095 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
7096 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
7097 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
7098 
7099 	/* Read eight io_units across multiple pages
7100 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7101 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7102 	 * payload_read: AAAA AAAA | 0000 0000 ... */
7103 	memset(payload_read, 0x00, sizeof(payload_read));
7104 	iov[0].iov_base = payload_read;
7105 	iov[0].iov_len = 4 * 512;
7106 	iov[1].iov_base = payload_read + 4 * 512;
7107 	iov[1].iov_len = 4 * 512;
7108 
7109 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7110 
7111 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7112 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
7113 
7114 	/* Read eight io_units across multiple clusters
7115 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7116 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7117 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7118 	memset(payload_read, 0x00, sizeof(payload_read));
7119 	iov[0].iov_base = payload_read;
7120 	iov[0].iov_len = 2 * 512;
7121 	iov[1].iov_base = payload_read + 2 * 512;
7122 	iov[1].iov_len = 2 * 512;
7123 	iov[2].iov_base = payload_read + 4 * 512;
7124 	iov[2].iov_len = 2 * 512;
7125 	iov[3].iov_base = payload_read + 6 * 512;
7126 	iov[3].iov_len = 2 * 512;
7127 
7128 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
7129 			   ext_api ? &ext_opts : NULL);
7130 
7131 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7132 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
7133 
7134 	/* Read four io_units from second cluster
7135 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7136 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7137 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7138 	memset(payload_read, 0x00, sizeof(payload_read));
7139 	iov[0].iov_base = payload_read;
7140 	iov[0].iov_len = 1 * 512;
7141 	iov[1].iov_base = payload_read + 1 * 512;
7142 	iov[1].iov_len = 3 * 512;
7143 
7144 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
7145 			   ext_api ? &ext_opts : NULL);
7146 
7147 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7148 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7149 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
7150 
7151 	/* Read second cluster
7152 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7153 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7154 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7155 	memset(payload_read, 0x00, sizeof(payload_read));
7156 	iov[0].iov_base = payload_read;
7157 	iov[0].iov_len = 1 * 512;
7158 	iov[1].iov_base = payload_read + 1 * 512;
7159 	iov[1].iov_len = 2 * 512;
7160 	iov[2].iov_base = payload_read + 3 * 512;
7161 	iov[2].iov_len = 4 * 512;
7162 	iov[3].iov_base = payload_read + 7 * 512;
7163 	iov[3].iov_len = 25 * 512;
7164 
7165 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
7166 			   ext_api ? &ext_opts : NULL);
7167 
7168 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7169 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7170 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7171 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
7172 
7173 	/* Read whole two clusters
7174 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7175 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7176 	memset(payload_read, 0x00, sizeof(payload_read));
7177 	iov[0].iov_base = payload_read;
7178 	iov[0].iov_len = 1 * 512;
7179 	iov[1].iov_base = payload_read + 1 * 512;
7180 	iov[1].iov_len = 8 * 512;
7181 	iov[2].iov_base = payload_read + 9 * 512;
7182 	iov[2].iov_len = 16 * 512;
7183 	iov[3].iov_base = payload_read + 25 * 512;
7184 	iov[3].iov_len = 39 * 512;
7185 
7186 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
7187 			   ext_api ? &ext_opts : NULL);
7188 
7189 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7190 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7191 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7192 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7193 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7194 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
7195 
7196 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
7197 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
7198 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
7199 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
7200 }
7201 
7202 static void
7203 blob_io_unit(void)
7204 {
7205 	struct spdk_bs_opts bsopts;
7206 	struct spdk_blob_opts opts;
7207 	struct spdk_blob_store *bs;
7208 	struct spdk_bs_dev *dev;
7209 	struct spdk_blob *blob, *snapshot, *clone;
7210 	spdk_blob_id blobid;
7211 	struct spdk_io_channel *channel;
7212 
7213 	/* Create dev with 512 bytes io unit size */
7214 
7215 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7216 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
7217 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7218 
7219 	/* Try to initialize a new blob store with unsupported io_unit */
7220 	dev = init_dev();
7221 	dev->blocklen = 512;
7222 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7223 
7224 	/* Initialize a new blob store */
7225 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7226 	poll_threads();
7227 	CU_ASSERT(g_bserrno == 0);
7228 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7229 	bs = g_bs;
7230 
7231 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7232 	channel = spdk_bs_alloc_io_channel(bs);
7233 
7234 	/* Create thick provisioned blob */
7235 	ut_spdk_blob_opts_init(&opts);
7236 	opts.thin_provision = false;
7237 	opts.num_clusters = 32;
7238 
7239 	blob = ut_blob_create_and_open(bs, &opts);
7240 	blobid = spdk_blob_get_id(blob);
7241 
7242 	test_io_write(dev, blob, channel);
7243 	test_io_read(dev, blob, channel);
7244 	test_io_zeroes(dev, blob, channel);
7245 
7246 	test_iov_write(dev, blob, channel, false);
7247 	test_iov_read(dev, blob, channel, false);
7248 	test_io_zeroes(dev, blob, channel);
7249 
7250 	test_iov_write(dev, blob, channel, true);
7251 	test_iov_read(dev, blob, channel, true);
7252 
7253 	test_io_unmap(dev, blob, channel);
7254 
7255 	spdk_blob_close(blob, blob_op_complete, NULL);
7256 	poll_threads();
7257 	CU_ASSERT(g_bserrno == 0);
7258 	blob = NULL;
7259 	g_blob = NULL;
7260 
7261 	/* Create thin provisioned blob */
7262 
7263 	ut_spdk_blob_opts_init(&opts);
7264 	opts.thin_provision = true;
7265 	opts.num_clusters = 32;
7266 
7267 	blob = ut_blob_create_and_open(bs, &opts);
7268 	blobid = spdk_blob_get_id(blob);
7269 
7270 	test_io_write(dev, blob, channel);
7271 	test_io_read(dev, blob, channel);
7272 	test_io_zeroes(dev, blob, channel);
7273 
7274 	test_iov_write(dev, blob, channel, false);
7275 	test_iov_read(dev, blob, channel, false);
7276 	test_io_zeroes(dev, blob, channel);
7277 
7278 	test_iov_write(dev, blob, channel, true);
7279 	test_iov_read(dev, blob, channel, true);
7280 
7281 	/* Create snapshot */
7282 
7283 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7284 	poll_threads();
7285 	CU_ASSERT(g_bserrno == 0);
7286 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7287 	blobid = g_blobid;
7288 
7289 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7290 	poll_threads();
7291 	CU_ASSERT(g_bserrno == 0);
7292 	CU_ASSERT(g_blob != NULL);
7293 	snapshot = g_blob;
7294 
7295 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7296 	poll_threads();
7297 	CU_ASSERT(g_bserrno == 0);
7298 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7299 	blobid = g_blobid;
7300 
7301 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7302 	poll_threads();
7303 	CU_ASSERT(g_bserrno == 0);
7304 	CU_ASSERT(g_blob != NULL);
7305 	clone = g_blob;
7306 
7307 	test_io_read(dev, blob, channel);
7308 	test_io_read(dev, snapshot, channel);
7309 	test_io_read(dev, clone, channel);
7310 
7311 	test_iov_read(dev, blob, channel, false);
7312 	test_iov_read(dev, snapshot, channel, false);
7313 	test_iov_read(dev, clone, channel, false);
7314 
7315 	test_iov_read(dev, blob, channel, true);
7316 	test_iov_read(dev, snapshot, channel, true);
7317 	test_iov_read(dev, clone, channel, true);
7318 
7319 	/* Inflate clone */
7320 
7321 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7322 	poll_threads();
7323 
7324 	CU_ASSERT(g_bserrno == 0);
7325 
7326 	test_io_read(dev, clone, channel);
7327 
7328 	test_io_unmap(dev, clone, channel);
7329 
7330 	test_iov_write(dev, clone, channel, false);
7331 	test_iov_read(dev, clone, channel, false);
7332 	test_io_unmap(dev, clone, channel);
7333 
7334 	test_iov_write(dev, clone, channel, true);
7335 	test_iov_read(dev, clone, channel, true);
7336 
7337 	spdk_blob_close(blob, blob_op_complete, NULL);
7338 	spdk_blob_close(snapshot, blob_op_complete, NULL);
7339 	spdk_blob_close(clone, blob_op_complete, NULL);
7340 	poll_threads();
7341 	CU_ASSERT(g_bserrno == 0);
7342 	blob = NULL;
7343 	g_blob = NULL;
7344 
7345 	spdk_bs_free_io_channel(channel);
7346 	poll_threads();
7347 
7348 	/* Unload the blob store */
7349 	spdk_bs_unload(bs, bs_op_complete, NULL);
7350 	poll_threads();
7351 	CU_ASSERT(g_bserrno == 0);
7352 	g_bs = NULL;
7353 	g_blob = NULL;
7354 	g_blobid = 0;
7355 }
7356 
7357 static void
7358 blob_io_unit_compatibility(void)
7359 {
7360 	struct spdk_bs_opts bsopts;
7361 	struct spdk_blob_store *bs;
7362 	struct spdk_bs_dev *dev;
7363 	struct spdk_bs_super_block *super;
7364 
7365 	/* Create dev with 512 bytes io unit size */
7366 
7367 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7368 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
7369 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7370 
7371 	/* Try to initialize a new blob store with unsupported io_unit */
7372 	dev = init_dev();
7373 	dev->blocklen = 512;
7374 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7375 
7376 	/* Initialize a new blob store */
7377 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7378 	poll_threads();
7379 	CU_ASSERT(g_bserrno == 0);
7380 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7381 	bs = g_bs;
7382 
7383 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7384 
7385 	/* Unload the blob store */
7386 	spdk_bs_unload(bs, bs_op_complete, NULL);
7387 	poll_threads();
7388 	CU_ASSERT(g_bserrno == 0);
7389 
7390 	/* Modify super block to behave like older version.
7391 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
7392 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
7393 	super->io_unit_size = 0;
7394 	super->crc = blob_md_page_calc_crc(super);
7395 
7396 	dev = init_dev();
7397 	dev->blocklen = 512;
7398 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7399 
7400 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
7401 	poll_threads();
7402 	CU_ASSERT(g_bserrno == 0);
7403 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7404 	bs = g_bs;
7405 
7406 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
7407 
7408 	/* Unload the blob store */
7409 	spdk_bs_unload(bs, bs_op_complete, NULL);
7410 	poll_threads();
7411 	CU_ASSERT(g_bserrno == 0);
7412 
7413 	g_bs = NULL;
7414 	g_blob = NULL;
7415 	g_blobid = 0;
7416 }
7417 
7418 static void
7419 first_sync_complete(void *cb_arg, int bserrno)
7420 {
7421 	struct spdk_blob *blob = cb_arg;
7422 	int rc;
7423 
7424 	CU_ASSERT(bserrno == 0);
7425 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7426 	CU_ASSERT(rc == 0);
7427 	CU_ASSERT(g_bserrno == -1);
7428 
7429 	/* Keep g_bserrno at -1, only the
7430 	 * second sync completion should set it at 0. */
7431 }
7432 
7433 static void
7434 second_sync_complete(void *cb_arg, int bserrno)
7435 {
7436 	struct spdk_blob *blob = cb_arg;
7437 	const void *value;
7438 	size_t value_len;
7439 	int rc;
7440 
7441 	CU_ASSERT(bserrno == 0);
7442 
7443 	/* Verify that the first sync completion had a chance to execute */
7444 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7445 	CU_ASSERT(rc == 0);
7446 	SPDK_CU_ASSERT_FATAL(value != NULL);
7447 	CU_ASSERT(value_len == strlen("second") + 1);
7448 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7449 
7450 	CU_ASSERT(g_bserrno == -1);
7451 	g_bserrno = bserrno;
7452 }
7453 
7454 static void
7455 blob_simultaneous_operations(void)
7456 {
7457 	struct spdk_blob_store *bs = g_bs;
7458 	struct spdk_blob_opts opts;
7459 	struct spdk_blob *blob, *snapshot;
7460 	spdk_blob_id blobid, snapshotid;
7461 	struct spdk_io_channel *channel;
7462 	int rc;
7463 
7464 	channel = spdk_bs_alloc_io_channel(bs);
7465 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7466 
7467 	ut_spdk_blob_opts_init(&opts);
7468 	opts.num_clusters = 10;
7469 
7470 	blob = ut_blob_create_and_open(bs, &opts);
7471 	blobid = spdk_blob_get_id(blob);
7472 
7473 	/* Create snapshot and try to remove blob in the same time:
7474 	 * - snapshot should be created successfully
7475 	 * - delete operation should fail w -EBUSY */
7476 	CU_ASSERT(blob->locked_operation_in_progress == false);
7477 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7478 	CU_ASSERT(blob->locked_operation_in_progress == true);
7479 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7480 	CU_ASSERT(blob->locked_operation_in_progress == true);
7481 	/* Deletion failure */
7482 	CU_ASSERT(g_bserrno == -EBUSY);
7483 	poll_threads();
7484 	CU_ASSERT(blob->locked_operation_in_progress == false);
7485 	/* Snapshot creation success */
7486 	CU_ASSERT(g_bserrno == 0);
7487 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7488 
7489 	snapshotid = g_blobid;
7490 
7491 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7492 	poll_threads();
7493 	CU_ASSERT(g_bserrno == 0);
7494 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7495 	snapshot = g_blob;
7496 
7497 	/* Inflate blob and try to remove blob in the same time:
7498 	 * - blob should be inflated successfully
7499 	 * - delete operation should fail w -EBUSY */
7500 	CU_ASSERT(blob->locked_operation_in_progress == false);
7501 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7502 	CU_ASSERT(blob->locked_operation_in_progress == true);
7503 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7504 	CU_ASSERT(blob->locked_operation_in_progress == true);
7505 	/* Deletion failure */
7506 	CU_ASSERT(g_bserrno == -EBUSY);
7507 	poll_threads();
7508 	CU_ASSERT(blob->locked_operation_in_progress == false);
7509 	/* Inflation success */
7510 	CU_ASSERT(g_bserrno == 0);
7511 
7512 	/* Clone snapshot and try to remove snapshot in the same time:
7513 	 * - snapshot should be cloned successfully
7514 	 * - delete operation should fail w -EBUSY */
7515 	CU_ASSERT(blob->locked_operation_in_progress == false);
7516 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7517 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7518 	/* Deletion failure */
7519 	CU_ASSERT(g_bserrno == -EBUSY);
7520 	poll_threads();
7521 	CU_ASSERT(blob->locked_operation_in_progress == false);
7522 	/* Clone created */
7523 	CU_ASSERT(g_bserrno == 0);
7524 
7525 	/* Resize blob and try to remove blob in the same time:
7526 	 * - blob should be resized successfully
7527 	 * - delete operation should fail w -EBUSY */
7528 	CU_ASSERT(blob->locked_operation_in_progress == false);
7529 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7530 	CU_ASSERT(blob->locked_operation_in_progress == true);
7531 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7532 	CU_ASSERT(blob->locked_operation_in_progress == true);
7533 	/* Deletion failure */
7534 	CU_ASSERT(g_bserrno == -EBUSY);
7535 	poll_threads();
7536 	CU_ASSERT(blob->locked_operation_in_progress == false);
7537 	/* Blob resized successfully */
7538 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7539 	poll_threads();
7540 	CU_ASSERT(g_bserrno == 0);
7541 
7542 	/* Issue two consecutive blob syncs, neither should fail.
7543 	 * Force sync to actually occur by marking blob dirty each time.
7544 	 * Execution of sync should not be enough to complete the operation,
7545 	 * since disk I/O is required to complete it. */
7546 	g_bserrno = -1;
7547 
7548 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7549 	CU_ASSERT(rc == 0);
7550 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7551 	CU_ASSERT(g_bserrno == -1);
7552 
7553 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7554 	CU_ASSERT(g_bserrno == -1);
7555 
7556 	poll_threads();
7557 	CU_ASSERT(g_bserrno == 0);
7558 
7559 	spdk_bs_free_io_channel(channel);
7560 	poll_threads();
7561 
7562 	ut_blob_close_and_delete(bs, snapshot);
7563 	ut_blob_close_and_delete(bs, blob);
7564 }
7565 
7566 static void
7567 blob_persist_test(void)
7568 {
7569 	struct spdk_blob_store *bs = g_bs;
7570 	struct spdk_blob_opts opts;
7571 	struct spdk_blob *blob;
7572 	spdk_blob_id blobid;
7573 	struct spdk_io_channel *channel;
7574 	char *xattr;
7575 	size_t xattr_length;
7576 	int rc;
7577 	uint32_t page_count_clear, page_count_xattr;
7578 	uint64_t poller_iterations;
7579 	bool run_poller;
7580 
7581 	channel = spdk_bs_alloc_io_channel(bs);
7582 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7583 
7584 	ut_spdk_blob_opts_init(&opts);
7585 	opts.num_clusters = 10;
7586 
7587 	blob = ut_blob_create_and_open(bs, &opts);
7588 	blobid = spdk_blob_get_id(blob);
7589 
7590 	/* Save the amount of md pages used after creation of a blob.
7591 	 * This should be consistent after removing xattr. */
7592 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7593 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7594 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7595 
7596 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7597 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7598 		       strlen("large_xattr");
7599 	xattr = calloc(xattr_length, sizeof(char));
7600 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7601 
7602 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7603 	SPDK_CU_ASSERT_FATAL(rc == 0);
7604 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7605 	poll_threads();
7606 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7607 
7608 	/* Save the amount of md pages used after adding the large xattr */
7609 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7610 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7611 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7612 
7613 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7614 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7615 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7616 	poller_iterations = 1;
7617 	run_poller = true;
7618 	while (run_poller) {
7619 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7620 		SPDK_CU_ASSERT_FATAL(rc == 0);
7621 		g_bserrno = -1;
7622 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7623 		poll_thread_times(0, poller_iterations);
7624 		if (g_bserrno == 0) {
7625 			/* Poller iteration count was high enough for first sync to complete.
7626 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7627 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7628 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7629 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7630 			run_poller = false;
7631 		}
7632 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7633 		SPDK_CU_ASSERT_FATAL(rc == 0);
7634 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7635 		poll_threads();
7636 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7637 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7638 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7639 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7640 
7641 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7642 		spdk_blob_close(blob, blob_op_complete, NULL);
7643 		poll_threads();
7644 		CU_ASSERT(g_bserrno == 0);
7645 
7646 		ut_bs_reload(&bs, NULL);
7647 
7648 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7649 		poll_threads();
7650 		CU_ASSERT(g_bserrno == 0);
7651 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7652 		blob = g_blob;
7653 
7654 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7655 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7656 
7657 		poller_iterations++;
7658 		/* Stop at high iteration count to prevent infinite loop.
7659 		 * This value should be enough for first md sync to complete in any case. */
7660 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7661 	}
7662 
7663 	free(xattr);
7664 
7665 	ut_blob_close_and_delete(bs, blob);
7666 
7667 	spdk_bs_free_io_channel(channel);
7668 	poll_threads();
7669 }
7670 
7671 static void
7672 blob_decouple_snapshot(void)
7673 {
7674 	struct spdk_blob_store *bs = g_bs;
7675 	struct spdk_blob_opts opts;
7676 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7677 	struct spdk_io_channel *channel;
7678 	spdk_blob_id blobid, snapshotid;
7679 	uint64_t cluster;
7680 
7681 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7682 		channel = spdk_bs_alloc_io_channel(bs);
7683 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7684 
7685 		ut_spdk_blob_opts_init(&opts);
7686 		opts.num_clusters = 10;
7687 		opts.thin_provision = false;
7688 
7689 		blob = ut_blob_create_and_open(bs, &opts);
7690 		blobid = spdk_blob_get_id(blob);
7691 
7692 		/* Create first snapshot */
7693 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7694 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7695 		poll_threads();
7696 		CU_ASSERT(g_bserrno == 0);
7697 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7698 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7699 		snapshotid = g_blobid;
7700 
7701 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7702 		poll_threads();
7703 		CU_ASSERT(g_bserrno == 0);
7704 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7705 		snapshot1 = g_blob;
7706 
7707 		/* Create the second one */
7708 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7709 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7710 		poll_threads();
7711 		CU_ASSERT(g_bserrno == 0);
7712 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7713 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7714 		snapshotid = g_blobid;
7715 
7716 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7717 		poll_threads();
7718 		CU_ASSERT(g_bserrno == 0);
7719 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7720 		snapshot2 = g_blob;
7721 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7722 
7723 		/* Now decouple the second snapshot forcing it to copy the written clusters */
7724 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7725 		poll_threads();
7726 		CU_ASSERT(g_bserrno == 0);
7727 
7728 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7729 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7730 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7731 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7732 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7733 					    snapshot1->active.clusters[cluster]);
7734 		}
7735 
7736 		spdk_bs_free_io_channel(channel);
7737 
7738 		if (delete_snapshot_first) {
7739 			ut_blob_close_and_delete(bs, snapshot2);
7740 			ut_blob_close_and_delete(bs, snapshot1);
7741 			ut_blob_close_and_delete(bs, blob);
7742 		} else {
7743 			ut_blob_close_and_delete(bs, blob);
7744 			ut_blob_close_and_delete(bs, snapshot2);
7745 			ut_blob_close_and_delete(bs, snapshot1);
7746 		}
7747 		poll_threads();
7748 	}
7749 }
7750 
7751 static void
7752 blob_seek_io_unit(void)
7753 {
7754 	struct spdk_blob_store *bs = g_bs;
7755 	struct spdk_blob *blob;
7756 	struct spdk_io_channel *channel;
7757 	struct spdk_blob_opts opts;
7758 	uint64_t free_clusters;
7759 	uint8_t payload[10 * 4096];
7760 	uint64_t offset;
7761 	uint64_t io_unit, io_units_per_cluster;
7762 
7763 	free_clusters = spdk_bs_free_cluster_count(bs);
7764 
7765 	channel = spdk_bs_alloc_io_channel(bs);
7766 	CU_ASSERT(channel != NULL);
7767 
7768 	/* Set blob as thin provisioned */
7769 	ut_spdk_blob_opts_init(&opts);
7770 	opts.thin_provision = true;
7771 
7772 	/* Create a blob */
7773 	blob = ut_blob_create_and_open(bs, &opts);
7774 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7775 
7776 	io_units_per_cluster = bs_io_units_per_cluster(blob);
7777 
7778 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
7779 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
7780 	poll_threads();
7781 	CU_ASSERT(g_bserrno == 0);
7782 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7783 	CU_ASSERT(blob->active.num_clusters == 5);
7784 
7785 	/* Write at the beginning of first cluster */
7786 	offset = 0;
7787 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7788 	poll_threads();
7789 	CU_ASSERT(g_bserrno == 0);
7790 
7791 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
7792 	CU_ASSERT(io_unit == offset);
7793 
7794 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
7795 	CU_ASSERT(io_unit == io_units_per_cluster);
7796 
7797 	/* Write in the middle of third cluster */
7798 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
7799 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7800 	poll_threads();
7801 	CU_ASSERT(g_bserrno == 0);
7802 
7803 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
7804 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
7805 
7806 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
7807 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
7808 
7809 	/* Write at the end of last cluster */
7810 	offset = 5 * io_units_per_cluster - 1;
7811 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7812 	poll_threads();
7813 	CU_ASSERT(g_bserrno == 0);
7814 
7815 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
7816 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
7817 
7818 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
7819 	CU_ASSERT(io_unit == UINT64_MAX);
7820 
7821 	spdk_bs_free_io_channel(channel);
7822 	poll_threads();
7823 
7824 	ut_blob_close_and_delete(bs, blob);
7825 }
7826 
7827 static void
7828 blob_esnap_create(void)
7829 {
7830 	struct spdk_blob_store	*bs = g_bs;
7831 	struct spdk_bs_opts	bs_opts;
7832 	struct ut_esnap_opts	esnap_opts;
7833 	struct spdk_blob_opts	opts;
7834 	struct spdk_blob_open_opts open_opts;
7835 	struct spdk_blob	*blob;
7836 	uint32_t		cluster_sz, block_sz;
7837 	const uint32_t		esnap_num_clusters = 4;
7838 	uint64_t		esnap_num_blocks;
7839 	uint32_t		sz;
7840 	spdk_blob_id		blobid;
7841 	uint32_t		bs_ctx_count, blob_ctx_count;
7842 
7843 	cluster_sz = spdk_bs_get_cluster_size(bs);
7844 	block_sz = spdk_bs_get_io_unit_size(bs);
7845 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
7846 
7847 	/* Create a normal blob and verify it is not an esnap clone. */
7848 	ut_spdk_blob_opts_init(&opts);
7849 	blob = ut_blob_create_and_open(bs, &opts);
7850 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob));
7851 	ut_blob_close_and_delete(bs, blob);
7852 
7853 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
7854 	ut_spdk_blob_opts_init(&opts);
7855 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7856 	opts.esnap_id = &esnap_opts;
7857 	opts.esnap_id_len = sizeof(esnap_opts);
7858 	opts.num_clusters = esnap_num_clusters;
7859 	blob = ut_blob_create_and_open(bs, &opts);
7860 	SPDK_CU_ASSERT_FATAL(blob != NULL);
7861 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7862 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
7863 	SPDK_CU_ASSERT_FATAL(!spdk_blob_is_clone(blob));
7864 	sz = spdk_blob_get_num_clusters(blob);
7865 	CU_ASSERT(sz == esnap_num_clusters);
7866 	ut_blob_close_and_delete(bs, blob);
7867 
7868 	/* Create an esnap clone without the size and verify it can be grown */
7869 	ut_spdk_blob_opts_init(&opts);
7870 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7871 	opts.esnap_id = &esnap_opts;
7872 	opts.esnap_id_len = sizeof(esnap_opts);
7873 	blob = ut_blob_create_and_open(bs, &opts);
7874 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7875 	sz = spdk_blob_get_num_clusters(blob);
7876 	CU_ASSERT(sz == 0);
7877 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
7878 	poll_threads();
7879 	CU_ASSERT(g_bserrno == 0);
7880 	sz = spdk_blob_get_num_clusters(blob);
7881 	CU_ASSERT(sz == 1);
7882 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
7883 	poll_threads();
7884 	CU_ASSERT(g_bserrno == 0);
7885 	sz = spdk_blob_get_num_clusters(blob);
7886 	CU_ASSERT(sz == esnap_num_clusters);
7887 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
7888 	poll_threads();
7889 	CU_ASSERT(g_bserrno == 0);
7890 	sz = spdk_blob_get_num_clusters(blob);
7891 	CU_ASSERT(sz == esnap_num_clusters + 1);
7892 
7893 	/* Reload the blobstore and be sure that the blob can be opened. */
7894 	blobid = spdk_blob_get_id(blob);
7895 	spdk_blob_close(blob, blob_op_complete, NULL);
7896 	poll_threads();
7897 	CU_ASSERT(g_bserrno == 0);
7898 	g_blob = NULL;
7899 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7900 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
7901 	ut_bs_reload(&bs, &bs_opts);
7902 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7903 	poll_threads();
7904 	CU_ASSERT(g_bserrno == 0);
7905 	CU_ASSERT(g_blob != NULL);
7906 	blob = g_blob;
7907 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7908 	sz = spdk_blob_get_num_clusters(blob);
7909 	CU_ASSERT(sz == esnap_num_clusters + 1);
7910 
7911 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
7912 	spdk_blob_close(blob, blob_op_complete, NULL);
7913 	poll_threads();
7914 	CU_ASSERT(g_bserrno == 0);
7915 	g_blob = NULL;
7916 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7917 	ut_bs_reload(&bs, &bs_opts);
7918 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7919 	poll_threads();
7920 	CU_ASSERT(g_bserrno != 0);
7921 	CU_ASSERT(g_blob == NULL);
7922 
7923 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
7924 	bs_ctx_count = 0;
7925 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7926 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
7927 	bs_opts.esnap_ctx = &bs_ctx_count;
7928 	ut_bs_reload(&bs, &bs_opts);
7929 	/* Loading the blobstore triggers the esnap to be loaded */
7930 	CU_ASSERT(bs_ctx_count == 1);
7931 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7932 	poll_threads();
7933 	CU_ASSERT(g_bserrno == 0);
7934 	CU_ASSERT(g_blob != NULL);
7935 	/* Opening the blob also triggers the esnap to be loaded */
7936 	CU_ASSERT(bs_ctx_count == 2);
7937 	blob = g_blob;
7938 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7939 	sz = spdk_blob_get_num_clusters(blob);
7940 	CU_ASSERT(sz == esnap_num_clusters + 1);
7941 	spdk_blob_close(blob, blob_op_complete, NULL);
7942 	poll_threads();
7943 	CU_ASSERT(g_bserrno == 0);
7944 	g_blob = NULL;
7945 	/* If open_opts.esnap_ctx is set it is passed to the esnap create callback */
7946 	blob_ctx_count = 0;
7947 	spdk_blob_open_opts_init(&open_opts, sizeof(open_opts));
7948 	open_opts.esnap_ctx = &blob_ctx_count;
7949 	spdk_bs_open_blob_ext(bs, blobid, &open_opts, blob_op_with_handle_complete, NULL);
7950 	poll_threads();
7951 	blob = g_blob;
7952 	CU_ASSERT(bs_ctx_count == 3);
7953 	CU_ASSERT(blob_ctx_count == 1);
7954 	spdk_blob_close(blob, blob_op_complete, NULL);
7955 	poll_threads();
7956 	CU_ASSERT(g_bserrno == 0);
7957 	g_blob = NULL;
7958 }
7959 
7960 static void
7961 blob_esnap_clone_reload(void)
7962 {
7963 	struct spdk_blob_store	*bs = g_bs;
7964 	struct spdk_bs_opts	bs_opts;
7965 	struct ut_esnap_opts	esnap_opts;
7966 	struct spdk_blob_opts	opts;
7967 	struct spdk_blob	*eclone1, *snap1, *clone1;
7968 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
7969 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
7970 	const uint32_t		esnap_num_clusters = 4;
7971 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
7972 	spdk_blob_id		eclone1_id, snap1_id, clone1_id;
7973 	struct spdk_io_channel	*bs_ch;
7974 	char			buf[block_sz];
7975 	int			bserr1, bserr2, bserr3, bserr4;
7976 	struct spdk_bs_dev	*dev;
7977 
7978 	/* Create and open an esnap clone blob */
7979 	ut_spdk_blob_opts_init(&opts);
7980 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7981 	opts.esnap_id = &esnap_opts;
7982 	opts.esnap_id_len = sizeof(esnap_opts);
7983 	opts.num_clusters = esnap_num_clusters;
7984 	eclone1 = ut_blob_create_and_open(bs, &opts);
7985 	CU_ASSERT(eclone1 != NULL);
7986 	CU_ASSERT(spdk_blob_is_esnap_clone(eclone1));
7987 	eclone1_id = eclone1->id;
7988 
7989 	/* Create and open a snapshot of eclone1 */
7990 	spdk_bs_create_snapshot(bs, eclone1_id, NULL, blob_op_with_id_complete, NULL);
7991 	poll_threads();
7992 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7993 	CU_ASSERT(g_bserrno == 0);
7994 	snap1_id = g_blobid;
7995 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
7996 	poll_threads();
7997 	CU_ASSERT(g_bserrno == 0);
7998 	CU_ASSERT(g_blob != NULL);
7999 	snap1 = g_blob;
8000 
8001 	/* Create and open regular clone of snap1 */
8002 	spdk_bs_create_clone(bs, snap1_id, NULL, blob_op_with_id_complete, NULL);
8003 	poll_threads();
8004 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8005 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
8006 	clone1_id = g_blobid;
8007 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8008 	poll_threads();
8009 	CU_ASSERT(g_bserrno == 0);
8010 	CU_ASSERT(g_blob != NULL);
8011 	clone1 = g_blob;
8012 
8013 	/* Close the blobs in preparation for reloading the blobstore */
8014 	spdk_blob_close(clone1, blob_op_complete, NULL);
8015 	poll_threads();
8016 	CU_ASSERT(g_bserrno == 0);
8017 	spdk_blob_close(snap1, blob_op_complete, NULL);
8018 	poll_threads();
8019 	CU_ASSERT(g_bserrno == 0);
8020 	spdk_blob_close(eclone1, blob_op_complete, NULL);
8021 	poll_threads();
8022 	CU_ASSERT(g_bserrno == 0);
8023 	g_blob = NULL;
8024 
8025 	/* Reload the blobstore */
8026 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8027 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8028 	ut_bs_reload(&bs, &bs_opts);
8029 
8030 	/* Be sure each of the blobs can be opened */
8031 	spdk_bs_open_blob(bs, eclone1_id, blob_op_with_handle_complete, NULL);
8032 	poll_threads();
8033 	CU_ASSERT(g_bserrno == 0);
8034 	CU_ASSERT(g_blob != NULL);
8035 	eclone1 = g_blob;
8036 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8037 	poll_threads();
8038 	CU_ASSERT(g_bserrno == 0);
8039 	CU_ASSERT(g_blob != NULL);
8040 	snap1 = g_blob;
8041 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8042 	poll_threads();
8043 	CU_ASSERT(g_bserrno == 0);
8044 	CU_ASSERT(g_blob != NULL);
8045 	clone1 = g_blob;
8046 
8047 	/* Perform some reads on each of them to cause channels to be allocated */
8048 	bs_ch = spdk_bs_alloc_io_channel(bs);
8049 	CU_ASSERT(bs_ch != NULL);
8050 	spdk_blob_io_read(eclone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8051 	poll_threads();
8052 	CU_ASSERT(g_bserrno == 0);
8053 	spdk_blob_io_read(snap1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8054 	poll_threads();
8055 	CU_ASSERT(g_bserrno == 0);
8056 	spdk_blob_io_read(clone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8057 	poll_threads();
8058 	CU_ASSERT(g_bserrno == 0);
8059 
8060 	/*
8061 	 * Unload the blobstore in a way similar to how lvstore unloads it.  This should exercise
8062 	 * the deferred unload path in spdk_bs_unload().
8063 	 */
8064 	bserr1 = 0xbad;
8065 	bserr2 = 0xbad;
8066 	bserr3 = 0xbad;
8067 	bserr4 = 0xbad;
8068 	spdk_blob_close(eclone1, blob_op_complete, &bserr1);
8069 	spdk_blob_close(snap1, blob_op_complete, &bserr2);
8070 	spdk_blob_close(clone1, blob_op_complete, &bserr3);
8071 	spdk_bs_unload(bs, blob_op_complete, &bserr4);
8072 	spdk_bs_free_io_channel(bs_ch);
8073 	poll_threads();
8074 	CU_ASSERT(bserr1 == 0);
8075 	CU_ASSERT(bserr2 == 0);
8076 	CU_ASSERT(bserr3 == 0);
8077 	CU_ASSERT(bserr4 == 0);
8078 	g_blob = NULL;
8079 
8080 	/* Reload the blobstore */
8081 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8082 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8083 	dev = init_dev();
8084 	spdk_bs_load(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8085 	poll_threads();
8086 	CU_ASSERT(g_bserrno == 0);
8087 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8088 }
8089 
8090 static bool
8091 blob_esnap_verify_contents(struct spdk_blob *blob, struct spdk_io_channel *ch,
8092 			   uint64_t offset, uint64_t size, uint32_t readsize, const char *how)
8093 {
8094 	const uint32_t	bs_blksz = blob->bs->io_unit_size;
8095 	const uint32_t	esnap_blksz = blob->back_bs_dev ? blob->back_bs_dev->blocklen : bs_blksz;
8096 	const uint32_t	start_blk = offset / bs_blksz;
8097 	const uint32_t	num_blocks = spdk_max(size, readsize) / bs_blksz;
8098 	const uint32_t	blocks_per_read = spdk_min(size, readsize) / bs_blksz;
8099 	uint32_t	blob_block;
8100 	struct iovec	iov;
8101 	uint8_t		buf[spdk_min(size, readsize)];
8102 	bool		block_ok;
8103 
8104 	SPDK_CU_ASSERT_FATAL(offset % bs_blksz == 0);
8105 	SPDK_CU_ASSERT_FATAL(size % bs_blksz == 0);
8106 	SPDK_CU_ASSERT_FATAL(readsize % bs_blksz == 0);
8107 
8108 	memset(buf, 0, readsize);
8109 	iov.iov_base = buf;
8110 	iov.iov_len = readsize;
8111 	for (blob_block = start_blk; blob_block < num_blocks; blob_block += blocks_per_read) {
8112 		if (strcmp(how, "read") == 0) {
8113 			spdk_blob_io_read(blob, ch, buf, blob_block, blocks_per_read,
8114 					  bs_op_complete, NULL);
8115 		} else if (strcmp(how, "readv") == 0) {
8116 			spdk_blob_io_readv(blob, ch, &iov, 1, blob_block, blocks_per_read,
8117 					   bs_op_complete, NULL);
8118 		} else if (strcmp(how, "readv_ext") == 0) {
8119 			/*
8120 			 * This is currently pointless. NULL ext_opts leads to dev->readv(), not
8121 			 * dev->readv_ext().
8122 			 */
8123 			spdk_blob_io_readv_ext(blob, ch, &iov, 1, blob_block, blocks_per_read,
8124 					       bs_op_complete, NULL, NULL);
8125 		} else {
8126 			abort();
8127 		}
8128 		poll_threads();
8129 		CU_ASSERT(g_bserrno == 0);
8130 		if (g_bserrno != 0) {
8131 			return false;
8132 		}
8133 		block_ok = ut_esnap_content_is_correct(buf, blocks_per_read * bs_blksz, blob->id,
8134 						       blob_block * bs_blksz, esnap_blksz);
8135 		CU_ASSERT(block_ok);
8136 		if (!block_ok) {
8137 			return false;
8138 		}
8139 	}
8140 
8141 	return true;
8142 }
8143 
8144 static void
8145 blob_esnap_io_size(uint32_t bs_blksz, uint32_t esnap_blksz)
8146 {
8147 	struct spdk_bs_dev	*dev;
8148 	struct spdk_blob_store	*bs;
8149 	struct spdk_bs_opts	bsopts;
8150 	struct spdk_blob_opts	opts;
8151 	struct ut_esnap_opts	esnap_opts;
8152 	struct spdk_blob	*blob;
8153 	const uint32_t		cluster_sz = 16 * 1024;
8154 	const uint64_t		esnap_num_clusters = 4;
8155 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8156 	const uint64_t		esnap_num_blocks = esnap_sz / esnap_blksz;
8157 	const uint64_t		blob_num_blocks = esnap_sz / bs_blksz;
8158 	uint32_t		block;
8159 	struct spdk_io_channel	*bs_ch;
8160 
8161 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
8162 	bsopts.cluster_sz = cluster_sz;
8163 	bsopts.esnap_bs_dev_create = ut_esnap_create;
8164 
8165 	/* Create device with desired block size */
8166 	dev = init_dev();
8167 	dev->blocklen = bs_blksz;
8168 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8169 
8170 	/* Initialize a new blob store */
8171 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
8172 	poll_threads();
8173 	CU_ASSERT(g_bserrno == 0);
8174 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8175 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8176 	bs = g_bs;
8177 
8178 	bs_ch = spdk_bs_alloc_io_channel(bs);
8179 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
8180 
8181 	/* Create and open the esnap clone  */
8182 	ut_spdk_blob_opts_init(&opts);
8183 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8184 	opts.esnap_id = &esnap_opts;
8185 	opts.esnap_id_len = sizeof(esnap_opts);
8186 	opts.num_clusters = esnap_num_clusters;
8187 	blob = ut_blob_create_and_open(bs, &opts);
8188 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8189 
8190 	/* Verify that large reads return the content of the esnap device */
8191 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "read"));
8192 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv"));
8193 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv_ext"));
8194 	/* Verify that small reads return the content of the esnap device */
8195 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "read"));
8196 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv"));
8197 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv_ext"));
8198 
8199 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
8200 	for (block = 0; block < blob_num_blocks; block++) {
8201 		char		buf[bs_blksz];
8202 		union ut_word	word;
8203 
8204 		word.f.blob_id = 0xfedcba90;
8205 		word.f.lba = block;
8206 		ut_memset8(buf, word.num, bs_blksz);
8207 
8208 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8209 		poll_threads();
8210 		CU_ASSERT(g_bserrno == 0);
8211 		if (g_bserrno != 0) {
8212 			break;
8213 		}
8214 
8215 		/* Read and verify the block before the current block */
8216 		if (block != 0) {
8217 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
8218 			poll_threads();
8219 			CU_ASSERT(g_bserrno == 0);
8220 			if (g_bserrno != 0) {
8221 				break;
8222 			}
8223 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8224 							      (block - 1) * bs_blksz, bs_blksz));
8225 		}
8226 
8227 		/* Read and verify the current block */
8228 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8229 		poll_threads();
8230 		CU_ASSERT(g_bserrno == 0);
8231 		if (g_bserrno != 0) {
8232 			break;
8233 		}
8234 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8235 						      block * bs_blksz, bs_blksz));
8236 
8237 		/* Check the block that follows */
8238 		if (block + 1 < blob_num_blocks) {
8239 			g_bserrno = 0xbad;
8240 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
8241 			poll_threads();
8242 			CU_ASSERT(g_bserrno == 0);
8243 			if (g_bserrno != 0) {
8244 				break;
8245 			}
8246 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
8247 							      (block + 1) * bs_blksz,
8248 							      esnap_blksz));
8249 		}
8250 	}
8251 
8252 	/* Clean up */
8253 	spdk_bs_free_io_channel(bs_ch);
8254 	g_bserrno = 0xbad;
8255 	spdk_blob_close(blob, blob_op_complete, NULL);
8256 	poll_threads();
8257 	CU_ASSERT(g_bserrno == 0);
8258 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
8259 	poll_threads();
8260 	CU_ASSERT(g_bserrno == 0);
8261 	g_bs = NULL;
8262 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8263 }
8264 
8265 static void
8266 blob_esnap_io_4096_4096(void)
8267 {
8268 	blob_esnap_io_size(4096, 4096);
8269 }
8270 
8271 static void
8272 blob_esnap_io_512_512(void)
8273 {
8274 	blob_esnap_io_size(512, 512);
8275 }
8276 
8277 static void
8278 blob_esnap_io_4096_512(void)
8279 {
8280 	blob_esnap_io_size(4096, 512);
8281 }
8282 
8283 static void
8284 blob_esnap_io_512_4096(void)
8285 {
8286 	struct spdk_bs_dev	*dev;
8287 	struct spdk_blob_store	*bs;
8288 	struct spdk_bs_opts	bs_opts;
8289 	struct spdk_blob_opts	blob_opts;
8290 	struct ut_esnap_opts	esnap_opts;
8291 	uint64_t		cluster_sz = 16 * 1024;
8292 	uint32_t		bs_blksz = 512;
8293 	uint32_t		esnap_blksz = 4096;
8294 	uint64_t		esnap_num_blocks = 64;
8295 	spdk_blob_id		blobid;
8296 
8297 	/* Create device with desired block size */
8298 	dev = init_dev();
8299 	dev->blocklen = bs_blksz;
8300 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8301 
8302 	/* Initialize a new blob store */
8303 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8304 	bs_opts.cluster_sz = cluster_sz;
8305 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8306 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8307 	poll_threads();
8308 	CU_ASSERT(g_bserrno == 0);
8309 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8310 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8311 	bs = g_bs;
8312 
8313 	/* Try to create and open the esnap clone. Create should succeed, open should fail. */
8314 	ut_spdk_blob_opts_init(&blob_opts);
8315 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8316 	blob_opts.esnap_id = &esnap_opts;
8317 	blob_opts.esnap_id_len = sizeof(esnap_opts);
8318 	blob_opts.num_clusters = esnap_num_blocks * esnap_blksz / bs_blksz;
8319 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
8320 	poll_threads();
8321 	CU_ASSERT(g_bserrno == 0);
8322 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8323 	blobid = g_blobid;
8324 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8325 	poll_threads();
8326 	CU_ASSERT(g_bserrno == -EINVAL);
8327 	CU_ASSERT(g_blob == NULL);
8328 
8329 	/* Clean up */
8330 	spdk_bs_unload(bs, bs_op_complete, NULL);
8331 	poll_threads();
8332 	CU_ASSERT(g_bserrno == 0);
8333 	g_bs = NULL;
8334 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8335 }
8336 
8337 static void
8338 blob_esnap_thread_add_remove(void)
8339 {
8340 	struct spdk_blob_store	*bs = g_bs;
8341 	struct spdk_blob_opts	opts;
8342 	struct ut_esnap_opts	ut_esnap_opts;
8343 	struct spdk_blob	*blob;
8344 	struct ut_esnap_dev	*ut_dev;
8345 	spdk_blob_id		blobid;
8346 	uint64_t		start_thread = g_ut_thread_id;
8347 	bool			destroyed = false;
8348 	struct spdk_io_channel	*ch0, *ch1;
8349 	struct ut_esnap_channel	*ut_ch0, *ut_ch1;
8350 	const uint32_t		blocklen = bs->io_unit_size;
8351 	char			buf[blocklen * 4];
8352 
8353 	SPDK_CU_ASSERT_FATAL(g_ut_num_threads > 1);
8354 	set_thread(0);
8355 
8356 	/* Create the esnap clone */
8357 	ut_esnap_opts_init(blocklen, 2048, "add_remove_1", &destroyed, &ut_esnap_opts);
8358 	ut_spdk_blob_opts_init(&opts);
8359 	opts.esnap_id = &ut_esnap_opts;
8360 	opts.esnap_id_len = sizeof(ut_esnap_opts);
8361 	opts.num_clusters = 10;
8362 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8363 	poll_threads();
8364 	CU_ASSERT(g_bserrno == 0);
8365 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8366 	blobid = g_blobid;
8367 
8368 	/* Open the blob. No channels should be allocated yet. */
8369 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8370 	poll_threads();
8371 	CU_ASSERT(g_bserrno == 0);
8372 	CU_ASSERT(g_blob != NULL);
8373 	blob = g_blob;
8374 	ut_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8375 	CU_ASSERT(ut_dev != NULL);
8376 	CU_ASSERT(ut_dev->num_channels == 0);
8377 
8378 	/* Create a channel on thread 0. It is lazily created on the first read. */
8379 	ch0 = spdk_bs_alloc_io_channel(bs);
8380 	CU_ASSERT(ch0 != NULL);
8381 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8382 	CU_ASSERT(ut_ch0 == NULL);
8383 	CU_ASSERT(ut_dev->num_channels == 0);
8384 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
8385 	poll_threads();
8386 	CU_ASSERT(g_bserrno == 0);
8387 	CU_ASSERT(ut_dev->num_channels == 1);
8388 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8389 	CU_ASSERT(ut_ch0 != NULL);
8390 	CU_ASSERT(ut_ch0->blocks_read == 1);
8391 
8392 	/* Create a channel on thread 1 and verify its lazy creation too. */
8393 	set_thread(1);
8394 	ch1 = spdk_bs_alloc_io_channel(bs);
8395 	CU_ASSERT(ch1 != NULL);
8396 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8397 	CU_ASSERT(ut_ch1 == NULL);
8398 	CU_ASSERT(ut_dev->num_channels == 1);
8399 	spdk_blob_io_read(blob, ch1, buf, 0, 4, bs_op_complete, NULL);
8400 	poll_threads();
8401 	CU_ASSERT(g_bserrno == 0);
8402 	CU_ASSERT(ut_dev->num_channels == 2);
8403 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8404 	CU_ASSERT(ut_ch1 != NULL);
8405 	CU_ASSERT(ut_ch1->blocks_read == 4);
8406 
8407 	/* Close the channel on thread 0 and verify the bs_dev channel is also gone. */
8408 	set_thread(0);
8409 	spdk_bs_free_io_channel(ch0);
8410 	poll_threads();
8411 	CU_ASSERT(ut_dev->num_channels == 1);
8412 
8413 	/* Close the blob. There is no outstanding IO so it should close right away. */
8414 	g_bserrno = 0xbad;
8415 	spdk_blob_close(blob, blob_op_complete, NULL);
8416 	poll_threads();
8417 	CU_ASSERT(g_bserrno == 0);
8418 	CU_ASSERT(destroyed);
8419 
8420 	/* The esnap channel for the blob should be gone now too. */
8421 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8422 	CU_ASSERT(ut_ch1 == NULL);
8423 
8424 	/* Clean up */
8425 	set_thread(1);
8426 	spdk_bs_free_io_channel(ch1);
8427 	set_thread(start_thread);
8428 }
8429 
8430 static void
8431 freeze_done(void *cb_arg, int bserrno)
8432 {
8433 	uint32_t *freeze_cnt = cb_arg;
8434 
8435 	CU_ASSERT(bserrno == 0);
8436 	(*freeze_cnt)++;
8437 }
8438 
8439 static void
8440 unfreeze_done(void *cb_arg, int bserrno)
8441 {
8442 	uint32_t *unfreeze_cnt = cb_arg;
8443 
8444 	CU_ASSERT(bserrno == 0);
8445 	(*unfreeze_cnt)++;
8446 }
8447 
8448 static void
8449 blob_nested_freezes(void)
8450 {
8451 	struct spdk_blob_store *bs = g_bs;
8452 	struct spdk_blob *blob;
8453 	struct spdk_io_channel *channel[2];
8454 	struct spdk_blob_opts opts;
8455 	uint32_t freeze_cnt, unfreeze_cnt;
8456 	int i;
8457 
8458 	for (i = 0; i < 2; i++) {
8459 		set_thread(i);
8460 		channel[i] = spdk_bs_alloc_io_channel(bs);
8461 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
8462 	}
8463 
8464 	set_thread(0);
8465 
8466 	ut_spdk_blob_opts_init(&opts);
8467 	blob = ut_blob_create_and_open(bs, &opts);
8468 
8469 	/* First just test a single freeze/unfreeze. */
8470 	freeze_cnt = 0;
8471 	unfreeze_cnt = 0;
8472 	CU_ASSERT(blob->frozen_refcnt == 0);
8473 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8474 	CU_ASSERT(blob->frozen_refcnt == 1);
8475 	CU_ASSERT(freeze_cnt == 0);
8476 	poll_threads();
8477 	CU_ASSERT(freeze_cnt == 1);
8478 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8479 	CU_ASSERT(blob->frozen_refcnt == 0);
8480 	CU_ASSERT(unfreeze_cnt == 0);
8481 	poll_threads();
8482 	CU_ASSERT(unfreeze_cnt == 1);
8483 
8484 	/* Now nest multiple freeze/unfreeze operations.  We should
8485 	 * expect a callback for each operation, but only after
8486 	 * the threads have been polled to ensure a for_each_channel()
8487 	 * was executed.
8488 	 */
8489 	freeze_cnt = 0;
8490 	unfreeze_cnt = 0;
8491 	CU_ASSERT(blob->frozen_refcnt == 0);
8492 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8493 	CU_ASSERT(blob->frozen_refcnt == 1);
8494 	CU_ASSERT(freeze_cnt == 0);
8495 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8496 	CU_ASSERT(blob->frozen_refcnt == 2);
8497 	CU_ASSERT(freeze_cnt == 0);
8498 	poll_threads();
8499 	CU_ASSERT(freeze_cnt == 2);
8500 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8501 	CU_ASSERT(blob->frozen_refcnt == 1);
8502 	CU_ASSERT(unfreeze_cnt == 0);
8503 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8504 	CU_ASSERT(blob->frozen_refcnt == 0);
8505 	CU_ASSERT(unfreeze_cnt == 0);
8506 	poll_threads();
8507 	CU_ASSERT(unfreeze_cnt == 2);
8508 
8509 	for (i = 0; i < 2; i++) {
8510 		set_thread(i);
8511 		spdk_bs_free_io_channel(channel[i]);
8512 	}
8513 	set_thread(0);
8514 	ut_blob_close_and_delete(bs, blob);
8515 
8516 	poll_threads();
8517 	g_blob = NULL;
8518 	g_blobid = 0;
8519 }
8520 
8521 static void
8522 blob_ext_md_pages(void)
8523 {
8524 	struct spdk_blob_store *bs;
8525 	struct spdk_bs_dev *dev;
8526 	struct spdk_blob *blob;
8527 	struct spdk_blob_opts opts;
8528 	struct spdk_bs_opts bs_opts;
8529 	uint64_t free_clusters;
8530 
8531 	dev = init_dev();
8532 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8533 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
8534 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
8535 	 * It requires num_md_pages that is much smaller than the number of clusters.
8536 	 * Make sure we can create a blob that uses all of the free clusters.
8537 	 */
8538 	bs_opts.cluster_sz = 65536;
8539 	bs_opts.num_md_pages = 16;
8540 
8541 	/* Initialize a new blob store */
8542 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8543 	poll_threads();
8544 	CU_ASSERT(g_bserrno == 0);
8545 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8546 	bs = g_bs;
8547 
8548 	free_clusters = spdk_bs_free_cluster_count(bs);
8549 
8550 	ut_spdk_blob_opts_init(&opts);
8551 	opts.num_clusters = free_clusters;
8552 
8553 	blob = ut_blob_create_and_open(bs, &opts);
8554 	spdk_blob_close(blob, blob_op_complete, NULL);
8555 	CU_ASSERT(g_bserrno == 0);
8556 
8557 	spdk_bs_unload(bs, bs_op_complete, NULL);
8558 	poll_threads();
8559 	CU_ASSERT(g_bserrno == 0);
8560 	g_bs = NULL;
8561 }
8562 
8563 static void
8564 blob_esnap_clone_snapshot(void)
8565 {
8566 	/*
8567 	 * When a snapshot is created, the blob that is being snapped becomes
8568 	 * the leaf node (a clone of the snapshot) and the newly created
8569 	 * snapshot sits between the snapped blob and the external snapshot.
8570 	 *
8571 	 * Before creating snap1
8572 	 *
8573 	 *   ,--------.     ,----------.
8574 	 *   |  blob  |     |  vbdev   |
8575 	 *   | blob1  |<----| nvme1n42 |
8576 	 *   |  (rw)  |     |   (ro)   |
8577 	 *   `--------'     `----------'
8578 	 *       Figure 1
8579 	 *
8580 	 * After creating snap1
8581 	 *
8582 	 *   ,--------.     ,--------.     ,----------.
8583 	 *   |  blob  |     |  blob  |     |  vbdev   |
8584 	 *   | blob1  |<----| snap1  |<----| nvme1n42 |
8585 	 *   |  (rw)  |     |  (ro)  |     |   (ro)   |
8586 	 *   `--------'     `--------'     `----------'
8587 	 *       Figure 2
8588 	 *
8589 	 * Starting from Figure 2, if snap1 is removed, the chain reverts to
8590 	 * what it looks like in Figure 1.
8591 	 *
8592 	 * Starting from Figure 2, if blob1 is removed, the chain becomes:
8593 	 *
8594 	 *   ,--------.     ,----------.
8595 	 *   |  blob  |     |  vbdev   |
8596 	 *   | snap1  |<----| nvme1n42 |
8597 	 *   |  (ro)  |     |   (ro)   |
8598 	 *   `--------'     `----------'
8599 	 *       Figure 3
8600 	 *
8601 	 * In each case, the blob pointed to by the nvme vbdev is considered
8602 	 * the "esnap clone".  The esnap clone must have:
8603 	 *
8604 	 *   - XATTR_INTERNAL for BLOB_EXTERNAL_SNAPSHOT_ID (e.g. name or UUID)
8605 	 *   - blob->invalid_flags must contain SPDK_BLOB_EXTERNAL_SNAPSHOT
8606 	 *   - blob->parent_id must be SPDK_BLOBID_EXTERNAL_SNAPSHOT.
8607 	 *
8608 	 * No other blob that descends from the esnap clone may have any of
8609 	 * those set.
8610 	 */
8611 	struct spdk_blob_store	*bs = g_bs;
8612 	const uint32_t		blocklen = bs->io_unit_size;
8613 	struct spdk_blob_opts	opts;
8614 	struct ut_esnap_opts	esnap_opts;
8615 	struct spdk_blob	*blob, *snap_blob;
8616 	spdk_blob_id		blobid, snap_blobid;
8617 	bool			destroyed = false;
8618 
8619 	/* Create the esnap clone */
8620 	ut_esnap_opts_init(blocklen, 2048, __func__, &destroyed, &esnap_opts);
8621 	ut_spdk_blob_opts_init(&opts);
8622 	opts.esnap_id = &esnap_opts;
8623 	opts.esnap_id_len = sizeof(esnap_opts);
8624 	opts.num_clusters = 10;
8625 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8626 	poll_threads();
8627 	CU_ASSERT(g_bserrno == 0);
8628 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8629 	blobid = g_blobid;
8630 
8631 	/* Open the blob. */
8632 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8633 	poll_threads();
8634 	CU_ASSERT(g_bserrno == 0);
8635 	CU_ASSERT(g_blob != NULL);
8636 	blob = g_blob;
8637 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8638 
8639 	/*
8640 	 * Create a snapshot of the blob. The snapshot becomes the esnap clone.
8641 	 */
8642 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8643 	poll_threads();
8644 	CU_ASSERT(g_bserrno == 0);
8645 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8646 	snap_blobid = g_blobid;
8647 
8648 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8649 	poll_threads();
8650 	CU_ASSERT(g_bserrno == 0);
8651 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8652 	snap_blob = g_blob;
8653 
8654 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8655 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8656 
8657 	/*
8658 	 * Delete the snapshot.  The original blob becomes the esnap clone.
8659 	 */
8660 	ut_blob_close_and_delete(bs, snap_blob);
8661 	snap_blob = NULL;
8662 	snap_blobid = SPDK_BLOBID_INVALID;
8663 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8664 
8665 	/*
8666 	 * Create the snapshot again, then delete the original blob.  The
8667 	 * snapshot should survive as the esnap clone.
8668 	 */
8669 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8670 	poll_threads();
8671 	CU_ASSERT(g_bserrno == 0);
8672 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8673 	snap_blobid = g_blobid;
8674 
8675 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8676 	poll_threads();
8677 	CU_ASSERT(g_bserrno == 0);
8678 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8679 	snap_blob = g_blob;
8680 
8681 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8682 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8683 
8684 	ut_blob_close_and_delete(bs, blob);
8685 	blob = NULL;
8686 	blobid = SPDK_BLOBID_INVALID;
8687 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8688 
8689 	/*
8690 	 * Clone the snapshot.  The snapshot continues to be the esnap clone.
8691 	 */
8692 	spdk_bs_create_clone(bs, snap_blobid, NULL, blob_op_with_id_complete, NULL);
8693 	poll_threads();
8694 	CU_ASSERT(g_bserrno == 0);
8695 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8696 	blobid = g_blobid;
8697 
8698 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8699 	poll_threads();
8700 	CU_ASSERT(g_bserrno == 0);
8701 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8702 	blob = g_blob;
8703 
8704 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8705 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8706 
8707 	/*
8708 	 * Delete the snapshot. The clone becomes the esnap clone.
8709 	 */
8710 	ut_blob_close_and_delete(bs, snap_blob);
8711 	snap_blob = NULL;
8712 	snap_blobid = SPDK_BLOBID_INVALID;
8713 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8714 
8715 	/*
8716 	 * Clean up
8717 	 */
8718 	ut_blob_close_and_delete(bs, blob);
8719 }
8720 
8721 static uint64_t
8722 _blob_esnap_clone_hydrate(bool inflate)
8723 {
8724 	struct spdk_blob_store	*bs = g_bs;
8725 	struct spdk_blob_opts	opts;
8726 	struct ut_esnap_opts	esnap_opts;
8727 	struct spdk_blob	*blob;
8728 	spdk_blob_id		blobid;
8729 	struct spdk_io_channel *channel;
8730 	bool			destroyed = false;
8731 	const uint32_t		blocklen = spdk_bs_get_io_unit_size(bs);
8732 	const uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8733 	const uint64_t		esnap_num_clusters = 4;
8734 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8735 	const uint64_t		esnap_num_blocks = esnap_sz / blocklen;
8736 	uint64_t		num_failures = CU_get_number_of_failures();
8737 
8738 	channel = spdk_bs_alloc_io_channel(bs);
8739 	SPDK_CU_ASSERT_FATAL(channel != NULL);
8740 
8741 	/* Create the esnap clone */
8742 	ut_spdk_blob_opts_init(&opts);
8743 	ut_esnap_opts_init(blocklen, esnap_num_blocks, __func__, &destroyed, &esnap_opts);
8744 	opts.esnap_id = &esnap_opts;
8745 	opts.esnap_id_len = sizeof(esnap_opts);
8746 	opts.num_clusters = esnap_num_clusters;
8747 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8748 	poll_threads();
8749 	CU_ASSERT(g_bserrno == 0);
8750 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8751 	blobid = g_blobid;
8752 
8753 	/* Open the esnap clone */
8754 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8755 	poll_threads();
8756 	CU_ASSERT(g_bserrno == 0);
8757 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8758 	blob = g_blob;
8759 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8760 
8761 	/*
8762 	 * Inflate or decouple  the blob then verify that it is no longer an esnap clone and has
8763 	 * right content
8764 	 */
8765 	if (inflate) {
8766 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
8767 	} else {
8768 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
8769 	}
8770 	poll_threads();
8771 	CU_ASSERT(g_bserrno == 0);
8772 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8773 	CU_ASSERT(blob_esnap_verify_contents(blob, channel, 0, esnap_sz, esnap_sz, "read"));
8774 	ut_blob_close_and_delete(bs, blob);
8775 
8776 	/*
8777 	 * Clean up
8778 	 */
8779 	spdk_bs_free_io_channel(channel);
8780 	poll_threads();
8781 
8782 	/* Return number of new failures */
8783 	return CU_get_number_of_failures() - num_failures;
8784 }
8785 
8786 static void
8787 blob_esnap_clone_inflate(void)
8788 {
8789 	_blob_esnap_clone_hydrate(true);
8790 }
8791 
8792 static void
8793 blob_esnap_clone_decouple(void)
8794 {
8795 	_blob_esnap_clone_hydrate(false);
8796 }
8797 
8798 static void
8799 blob_esnap_hotplug(void)
8800 {
8801 	struct spdk_blob_store	*bs = g_bs;
8802 	struct ut_esnap_opts	esnap1_opts, esnap2_opts;
8803 	struct spdk_blob_opts	opts;
8804 	struct spdk_blob	*blob;
8805 	struct spdk_bs_dev	*bs_dev;
8806 	struct ut_esnap_dev	*esnap_dev;
8807 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8808 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
8809 	const uint32_t		esnap_num_clusters = 4;
8810 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8811 	bool			destroyed1 = false, destroyed2 = false;
8812 	uint64_t		start_thread = g_ut_thread_id;
8813 	struct spdk_io_channel	*ch0, *ch1;
8814 	char			buf[block_sz];
8815 
8816 	/* Create and open an esnap clone blob */
8817 	ut_spdk_blob_opts_init(&opts);
8818 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1", &destroyed1, &esnap1_opts);
8819 	opts.esnap_id = &esnap1_opts;
8820 	opts.esnap_id_len = sizeof(esnap1_opts);
8821 	opts.num_clusters = esnap_num_clusters;
8822 	blob = ut_blob_create_and_open(bs, &opts);
8823 	CU_ASSERT(blob != NULL);
8824 	CU_ASSERT(spdk_blob_is_esnap_clone(blob));
8825 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
8826 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8827 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1") == 0);
8828 
8829 	/* Replace the external snapshot */
8830 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap2", &destroyed2, &esnap2_opts);
8831 	bs_dev = ut_esnap_dev_alloc(&esnap2_opts);
8832 	CU_ASSERT(!destroyed1);
8833 	CU_ASSERT(!destroyed2);
8834 	g_bserrno = 0xbad;
8835 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
8836 	poll_threads();
8837 	CU_ASSERT(g_bserrno == 0);
8838 	CU_ASSERT(destroyed1);
8839 	CU_ASSERT(!destroyed2);
8840 	SPDK_CU_ASSERT_FATAL(bs_dev == blob->back_bs_dev);
8841 	SPDK_CU_ASSERT_FATAL(bs_dev == spdk_blob_get_esnap_bs_dev(blob));
8842 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8843 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap2") == 0);
8844 
8845 	/* Create a couple channels */
8846 	set_thread(0);
8847 	ch0 = spdk_bs_alloc_io_channel(bs);
8848 	CU_ASSERT(ch0 != NULL);
8849 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
8850 	set_thread(1);
8851 	ch1 = spdk_bs_alloc_io_channel(bs);
8852 	CU_ASSERT(ch1 != NULL);
8853 	spdk_blob_io_read(blob, ch1, buf, 0, 1, bs_op_complete, NULL);
8854 	set_thread(start_thread);
8855 	poll_threads();
8856 	CU_ASSERT(esnap_dev->num_channels == 2);
8857 
8858 	/* Replace the external snapshot */
8859 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1a", &destroyed1, &esnap1_opts);
8860 	bs_dev = ut_esnap_dev_alloc(&esnap1_opts);
8861 	destroyed1 = destroyed2 = false;
8862 	g_bserrno = 0xbad;
8863 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
8864 	poll_threads();
8865 	CU_ASSERT(g_bserrno == 0);
8866 	CU_ASSERT(!destroyed1);
8867 	CU_ASSERT(destroyed2);
8868 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
8869 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8870 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1a") == 0);
8871 
8872 	/* Clean up */
8873 	set_thread(0);
8874 	spdk_bs_free_io_channel(ch0);
8875 	set_thread(1);
8876 	spdk_bs_free_io_channel(ch1);
8877 	set_thread(start_thread);
8878 	g_bserrno = 0xbad;
8879 	spdk_blob_close(blob, bs_op_complete, NULL);
8880 	poll_threads();
8881 	CU_ASSERT(g_bserrno == 0);
8882 }
8883 
8884 static bool g_blob_is_degraded;
8885 static int g_blob_is_degraded_called;
8886 
8887 static bool
8888 _blob_is_degraded(struct spdk_bs_dev *dev)
8889 {
8890 	g_blob_is_degraded_called++;
8891 	return g_blob_is_degraded;
8892 }
8893 
8894 static void
8895 blob_is_degraded(void)
8896 {
8897 	struct spdk_bs_dev bs_is_degraded_null = { 0 };
8898 	struct spdk_bs_dev bs_is_degraded = { .is_degraded = _blob_is_degraded };
8899 
8900 	/* No back_bs_dev, no bs->dev->is_degraded */
8901 	g_blob_is_degraded_called = 0;
8902 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
8903 	CU_ASSERT(g_blob_is_degraded_called == 0);
8904 
8905 	/* No back_bs_dev, blobstore device degraded */
8906 	g_bs->dev->is_degraded = _blob_is_degraded;
8907 	g_blob_is_degraded_called = 0;
8908 	g_blob_is_degraded = true;
8909 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
8910 	CU_ASSERT(g_blob_is_degraded_called == 1);
8911 
8912 	/* No back_bs_dev, blobstore device not degraded */
8913 	g_bs->dev->is_degraded = _blob_is_degraded;
8914 	g_blob_is_degraded_called = 0;
8915 	g_blob_is_degraded = false;
8916 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
8917 	CU_ASSERT(g_blob_is_degraded_called == 1);
8918 
8919 	/* back_bs_dev does not define is_degraded, no bs->dev->is_degraded */
8920 	g_bs->dev->is_degraded = NULL;
8921 	g_blob->back_bs_dev = &bs_is_degraded_null;
8922 	g_blob_is_degraded_called = 0;
8923 	g_blob_is_degraded = false;
8924 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
8925 	CU_ASSERT(g_blob_is_degraded_called == 0);
8926 
8927 	/* back_bs_dev is not degraded, no bs->dev->is_degraded */
8928 	g_bs->dev->is_degraded = NULL;
8929 	g_blob->back_bs_dev = &bs_is_degraded;
8930 	g_blob_is_degraded_called = 0;
8931 	g_blob_is_degraded = false;
8932 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
8933 	CU_ASSERT(g_blob_is_degraded_called == 1);
8934 
8935 	/* back_bs_dev is degraded, no bs->dev->is_degraded */
8936 	g_bs->dev->is_degraded = NULL;
8937 	g_blob->back_bs_dev = &bs_is_degraded;
8938 	g_blob_is_degraded_called = 0;
8939 	g_blob_is_degraded = true;
8940 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
8941 	CU_ASSERT(g_blob_is_degraded_called == 1);
8942 
8943 	/* back_bs_dev is not degraded, blobstore device is not degraded */
8944 	g_bs->dev->is_degraded = _blob_is_degraded;
8945 	g_blob->back_bs_dev = &bs_is_degraded;
8946 	g_blob_is_degraded_called = 0;
8947 	g_blob_is_degraded = false;
8948 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
8949 	CU_ASSERT(g_blob_is_degraded_called == 2);
8950 
8951 	g_blob->back_bs_dev = NULL;
8952 }
8953 
8954 static void
8955 suite_bs_setup(void)
8956 {
8957 	struct spdk_bs_dev *dev;
8958 
8959 	dev = init_dev();
8960 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8961 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
8962 	poll_threads();
8963 	CU_ASSERT(g_bserrno == 0);
8964 	CU_ASSERT(g_bs != NULL);
8965 }
8966 
8967 static void
8968 suite_esnap_bs_setup(void)
8969 {
8970 	struct spdk_bs_dev	*dev;
8971 	struct spdk_bs_opts	bs_opts;
8972 
8973 	dev = init_dev();
8974 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8975 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8976 	bs_opts.cluster_sz = 16 * 1024;
8977 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8978 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8979 	poll_threads();
8980 	CU_ASSERT(g_bserrno == 0);
8981 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8982 }
8983 
8984 static void
8985 suite_bs_cleanup(void)
8986 {
8987 	if (g_bs != NULL) {
8988 		spdk_bs_unload(g_bs, bs_op_complete, NULL);
8989 		poll_threads();
8990 		CU_ASSERT(g_bserrno == 0);
8991 		g_bs = NULL;
8992 	}
8993 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8994 }
8995 
8996 static struct spdk_blob *
8997 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
8998 {
8999 	struct spdk_blob *blob;
9000 	struct spdk_blob_opts create_blob_opts;
9001 	spdk_blob_id blobid;
9002 
9003 	if (blob_opts == NULL) {
9004 		ut_spdk_blob_opts_init(&create_blob_opts);
9005 		blob_opts = &create_blob_opts;
9006 	}
9007 
9008 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
9009 	poll_threads();
9010 	CU_ASSERT(g_bserrno == 0);
9011 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9012 	blobid = g_blobid;
9013 	g_blobid = -1;
9014 
9015 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9016 	poll_threads();
9017 	CU_ASSERT(g_bserrno == 0);
9018 	CU_ASSERT(g_blob != NULL);
9019 	blob = g_blob;
9020 
9021 	g_blob = NULL;
9022 	g_bserrno = -1;
9023 
9024 	return blob;
9025 }
9026 
9027 static void
9028 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
9029 {
9030 	spdk_blob_id blobid = spdk_blob_get_id(blob);
9031 
9032 	spdk_blob_close(blob, blob_op_complete, NULL);
9033 	poll_threads();
9034 	CU_ASSERT(g_bserrno == 0);
9035 	g_blob = NULL;
9036 
9037 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
9038 	poll_threads();
9039 	CU_ASSERT(g_bserrno == 0);
9040 	g_bserrno = -1;
9041 }
9042 
9043 static void
9044 suite_blob_setup(void)
9045 {
9046 	suite_bs_setup();
9047 	CU_ASSERT(g_bs != NULL);
9048 
9049 	g_blob = ut_blob_create_and_open(g_bs, NULL);
9050 	CU_ASSERT(g_blob != NULL);
9051 }
9052 
9053 static void
9054 suite_blob_cleanup(void)
9055 {
9056 	ut_blob_close_and_delete(g_bs, g_blob);
9057 	CU_ASSERT(g_blob == NULL);
9058 
9059 	suite_bs_cleanup();
9060 	CU_ASSERT(g_bs == NULL);
9061 }
9062 
9063 static int
9064 ut_setup_config_nocopy_noextent(void)
9065 {
9066 	g_dev_copy_enabled = false;
9067 	g_use_extent_table = false;
9068 
9069 	return 0;
9070 }
9071 
9072 static int
9073 ut_setup_config_nocopy_extent(void)
9074 {
9075 	g_dev_copy_enabled = false;
9076 	g_use_extent_table = true;
9077 
9078 	return 0;
9079 }
9080 
9081 static int
9082 ut_setup_config_copy_noextent(void)
9083 {
9084 	g_dev_copy_enabled = true;
9085 	g_use_extent_table = false;
9086 
9087 	return 0;
9088 }
9089 
9090 static int
9091 ut_setup_config_copy_extent(void)
9092 {
9093 	g_dev_copy_enabled = true;
9094 	g_use_extent_table = true;
9095 
9096 	return 0;
9097 }
9098 
9099 struct ut_config {
9100 	const char *suffix;
9101 	CU_InitializeFunc setup_cb;
9102 };
9103 
9104 int
9105 main(int argc, char **argv)
9106 {
9107 	CU_pSuite		suite, suite_bs, suite_blob, suite_esnap_bs;
9108 	unsigned int		i, num_failures;
9109 	char			suite_name[4096];
9110 	struct ut_config	*config;
9111 	struct ut_config	configs[] = {
9112 		{"nocopy_noextent", ut_setup_config_nocopy_noextent},
9113 		{"nocopy_extent", ut_setup_config_nocopy_extent},
9114 		{"copy_noextent", ut_setup_config_copy_noextent},
9115 		{"copy_extent", ut_setup_config_copy_extent},
9116 	};
9117 
9118 	CU_initialize_registry();
9119 
9120 	for (i = 0; i < SPDK_COUNTOF(configs); ++i) {
9121 		config = &configs[i];
9122 
9123 		snprintf(suite_name, sizeof(suite_name), "blob_%s", config->suffix);
9124 		suite = CU_add_suite(suite_name, config->setup_cb, NULL);
9125 
9126 		snprintf(suite_name, sizeof(suite_name), "blob_bs_%s", config->suffix);
9127 		suite_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
9128 				suite_bs_setup, suite_bs_cleanup);
9129 
9130 		snprintf(suite_name, sizeof(suite_name), "blob_blob_%s", config->suffix);
9131 		suite_blob = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
9132 				suite_blob_setup, suite_blob_cleanup);
9133 
9134 		snprintf(suite_name, sizeof(suite_name), "blob_esnap_bs_%s", config->suffix);
9135 		suite_esnap_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
9136 				 suite_esnap_bs_setup,
9137 				 suite_bs_cleanup);
9138 
9139 		CU_ADD_TEST(suite, blob_init);
9140 		CU_ADD_TEST(suite_bs, blob_open);
9141 		CU_ADD_TEST(suite_bs, blob_create);
9142 		CU_ADD_TEST(suite_bs, blob_create_loop);
9143 		CU_ADD_TEST(suite_bs, blob_create_fail);
9144 		CU_ADD_TEST(suite_bs, blob_create_internal);
9145 		CU_ADD_TEST(suite_bs, blob_create_zero_extent);
9146 		CU_ADD_TEST(suite, blob_thin_provision);
9147 		CU_ADD_TEST(suite_bs, blob_snapshot);
9148 		CU_ADD_TEST(suite_bs, blob_clone);
9149 		CU_ADD_TEST(suite_bs, blob_inflate);
9150 		CU_ADD_TEST(suite_bs, blob_delete);
9151 		CU_ADD_TEST(suite_bs, blob_resize_test);
9152 		CU_ADD_TEST(suite, blob_read_only);
9153 		CU_ADD_TEST(suite_bs, channel_ops);
9154 		CU_ADD_TEST(suite_bs, blob_super);
9155 		CU_ADD_TEST(suite_blob, blob_write);
9156 		CU_ADD_TEST(suite_blob, blob_read);
9157 		CU_ADD_TEST(suite_blob, blob_rw_verify);
9158 		CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
9159 		CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
9160 		CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
9161 		CU_ADD_TEST(suite_bs, blob_unmap);
9162 		CU_ADD_TEST(suite_bs, blob_iter);
9163 		CU_ADD_TEST(suite_blob, blob_xattr);
9164 		CU_ADD_TEST(suite_bs, blob_parse_md);
9165 		CU_ADD_TEST(suite, bs_load);
9166 		CU_ADD_TEST(suite_bs, bs_load_pending_removal);
9167 		CU_ADD_TEST(suite, bs_load_custom_cluster_size);
9168 		CU_ADD_TEST(suite, bs_load_after_failed_grow);
9169 		CU_ADD_TEST(suite_bs, bs_unload);
9170 		CU_ADD_TEST(suite, bs_cluster_sz);
9171 		CU_ADD_TEST(suite_bs, bs_usable_clusters);
9172 		CU_ADD_TEST(suite, bs_resize_md);
9173 		CU_ADD_TEST(suite, bs_destroy);
9174 		CU_ADD_TEST(suite, bs_type);
9175 		CU_ADD_TEST(suite, bs_super_block);
9176 		CU_ADD_TEST(suite, bs_test_recover_cluster_count);
9177 		CU_ADD_TEST(suite, bs_grow_live);
9178 		CU_ADD_TEST(suite, bs_grow_live_no_space);
9179 		CU_ADD_TEST(suite, bs_test_grow);
9180 		CU_ADD_TEST(suite, blob_serialize_test);
9181 		CU_ADD_TEST(suite_bs, blob_crc);
9182 		CU_ADD_TEST(suite, super_block_crc);
9183 		CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
9184 		CU_ADD_TEST(suite_bs, blob_flags);
9185 		CU_ADD_TEST(suite_bs, bs_version);
9186 		CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
9187 		CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
9188 		CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
9189 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
9190 		CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
9191 		CU_ADD_TEST(suite, blob_thin_prov_unmap_cluster);
9192 		CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
9193 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
9194 		CU_ADD_TEST(suite, bs_load_iter_test);
9195 		CU_ADD_TEST(suite_bs, blob_snapshot_rw);
9196 		CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
9197 		CU_ADD_TEST(suite, blob_relations);
9198 		CU_ADD_TEST(suite, blob_relations2);
9199 		CU_ADD_TEST(suite, blob_relations3);
9200 		CU_ADD_TEST(suite, blobstore_clean_power_failure);
9201 		CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
9202 		CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
9203 		CU_ADD_TEST(suite_bs, blob_inflate_rw);
9204 		CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
9205 		CU_ADD_TEST(suite_bs, blob_operation_split_rw);
9206 		CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
9207 		CU_ADD_TEST(suite, blob_io_unit);
9208 		CU_ADD_TEST(suite, blob_io_unit_compatibility);
9209 		CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
9210 		CU_ADD_TEST(suite_bs, blob_persist_test);
9211 		CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
9212 		CU_ADD_TEST(suite_bs, blob_seek_io_unit);
9213 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
9214 		CU_ADD_TEST(suite_bs, blob_nested_freezes);
9215 		CU_ADD_TEST(suite, blob_ext_md_pages);
9216 		CU_ADD_TEST(suite, blob_esnap_io_4096_4096);
9217 		CU_ADD_TEST(suite, blob_esnap_io_512_512);
9218 		CU_ADD_TEST(suite, blob_esnap_io_4096_512);
9219 		CU_ADD_TEST(suite, blob_esnap_io_512_4096);
9220 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_thread_add_remove);
9221 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_snapshot);
9222 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_inflate);
9223 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_decouple);
9224 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_reload);
9225 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_hotplug);
9226 		CU_ADD_TEST(suite_blob, blob_is_degraded);
9227 	}
9228 
9229 	allocate_threads(2);
9230 	set_thread(0);
9231 
9232 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
9233 
9234 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
9235 
9236 	free(g_dev_buffer);
9237 
9238 	free_threads();
9239 
9240 	return num_failures;
9241 }
9242