xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 488570ebd418ba07c9e69e65106dcc964f3bb41b)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "blob/blobstore.c"
16 #include "blob/request.c"
17 #include "blob/zeroes.c"
18 #include "blob/blob_bs_dev.c"
19 
20 struct spdk_blob_store *g_bs;
21 spdk_blob_id g_blobid;
22 struct spdk_blob *g_blob, *g_blob2;
23 int g_bserrno, g_bserrno2;
24 struct spdk_xattr_names *g_names;
25 int g_done;
26 char *g_xattr_names[] = {"first", "second", "third"};
27 char *g_xattr_values[] = {"one", "two", "three"};
28 uint64_t g_ctx = 1729;
29 bool g_use_extent_table = false;
30 
31 struct spdk_bs_super_block_ver1 {
32 	uint8_t		signature[8];
33 	uint32_t        version;
34 	uint32_t        length;
35 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
36 	spdk_blob_id	super_blob;
37 
38 	uint32_t	cluster_size; /* In bytes */
39 
40 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
41 	uint32_t	used_page_mask_len; /* Count, in pages */
42 
43 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
44 	uint32_t	used_cluster_mask_len; /* Count, in pages */
45 
46 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
47 	uint32_t	md_len; /* Count, in pages */
48 
49 	uint8_t		reserved[4036];
50 	uint32_t	crc;
51 } __attribute__((packed));
52 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
53 
54 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
55 		struct spdk_blob_opts *blob_opts);
56 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
57 static void suite_blob_setup(void);
58 static void suite_blob_cleanup(void);
59 
60 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
61 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
62 		void *cpl_cb_arg), 0);
63 
64 static void
65 _get_xattr_value(void *arg, const char *name,
66 		 const void **value, size_t *value_len)
67 {
68 	uint64_t i;
69 
70 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
71 	SPDK_CU_ASSERT_FATAL(value != NULL);
72 	CU_ASSERT(arg == &g_ctx);
73 
74 	for (i = 0; i < sizeof(g_xattr_names); i++) {
75 		if (!strcmp(name, g_xattr_names[i])) {
76 			*value_len = strlen(g_xattr_values[i]);
77 			*value = g_xattr_values[i];
78 			break;
79 		}
80 	}
81 }
82 
83 static void
84 _get_xattr_value_null(void *arg, const char *name,
85 		      const void **value, size_t *value_len)
86 {
87 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
88 	SPDK_CU_ASSERT_FATAL(value != NULL);
89 	CU_ASSERT(arg == NULL);
90 
91 	*value_len = 0;
92 	*value = NULL;
93 }
94 
95 static int
96 _get_snapshots_count(struct spdk_blob_store *bs)
97 {
98 	struct spdk_blob_list *snapshot = NULL;
99 	int count = 0;
100 
101 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
102 		count += 1;
103 	}
104 
105 	return count;
106 }
107 
108 static void
109 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
110 {
111 	spdk_blob_opts_init(opts, sizeof(*opts));
112 	opts->use_extent_table = g_use_extent_table;
113 }
114 
115 static void
116 bs_op_complete(void *cb_arg, int bserrno)
117 {
118 	g_bserrno = bserrno;
119 }
120 
121 static void
122 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
123 			   int bserrno)
124 {
125 	g_bs = bs;
126 	g_bserrno = bserrno;
127 }
128 
129 static void
130 blob_op_complete(void *cb_arg, int bserrno)
131 {
132 	g_bserrno = bserrno;
133 }
134 
135 static void
136 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
137 {
138 	g_blobid = blobid;
139 	g_bserrno = bserrno;
140 }
141 
142 static void
143 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
144 {
145 	g_blob = blb;
146 	g_bserrno = bserrno;
147 }
148 
149 static void
150 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
151 {
152 	if (g_blob == NULL) {
153 		g_blob = blob;
154 		g_bserrno = bserrno;
155 	} else {
156 		g_blob2 = blob;
157 		g_bserrno2 = bserrno;
158 	}
159 }
160 
161 static void
162 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
163 {
164 	struct spdk_bs_dev *dev;
165 
166 	/* Unload the blob store */
167 	spdk_bs_unload(*bs, bs_op_complete, NULL);
168 	poll_threads();
169 	CU_ASSERT(g_bserrno == 0);
170 
171 	dev = init_dev();
172 	/* Load an existing blob store */
173 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
174 	poll_threads();
175 	CU_ASSERT(g_bserrno == 0);
176 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
177 	*bs = g_bs;
178 
179 	g_bserrno = -1;
180 }
181 
182 static void
183 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
184 {
185 	struct spdk_bs_dev *dev;
186 
187 	/* Dirty shutdown */
188 	bs_free(*bs);
189 
190 	dev = init_dev();
191 	/* Load an existing blob store */
192 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
193 	poll_threads();
194 	CU_ASSERT(g_bserrno == 0);
195 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
196 	*bs = g_bs;
197 
198 	g_bserrno = -1;
199 }
200 
201 static void
202 blob_init(void)
203 {
204 	struct spdk_blob_store *bs;
205 	struct spdk_bs_dev *dev;
206 
207 	dev = init_dev();
208 
209 	/* should fail for an unsupported blocklen */
210 	dev->blocklen = 500;
211 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
212 	poll_threads();
213 	CU_ASSERT(g_bserrno == -EINVAL);
214 
215 	dev = init_dev();
216 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
217 	poll_threads();
218 	CU_ASSERT(g_bserrno == 0);
219 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
220 	bs = g_bs;
221 
222 	spdk_bs_unload(bs, bs_op_complete, NULL);
223 	poll_threads();
224 	CU_ASSERT(g_bserrno == 0);
225 	g_bs = NULL;
226 }
227 
228 static void
229 blob_super(void)
230 {
231 	struct spdk_blob_store *bs = g_bs;
232 	spdk_blob_id blobid;
233 	struct spdk_blob_opts blob_opts;
234 
235 	/* Get the super blob without having set one */
236 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
237 	poll_threads();
238 	CU_ASSERT(g_bserrno == -ENOENT);
239 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
240 
241 	/* Create a blob */
242 	ut_spdk_blob_opts_init(&blob_opts);
243 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
244 	poll_threads();
245 	CU_ASSERT(g_bserrno == 0);
246 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
247 	blobid = g_blobid;
248 
249 	/* Set the blob as the super blob */
250 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
251 	poll_threads();
252 	CU_ASSERT(g_bserrno == 0);
253 
254 	/* Get the super blob */
255 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
256 	poll_threads();
257 	CU_ASSERT(g_bserrno == 0);
258 	CU_ASSERT(blobid == g_blobid);
259 }
260 
261 static void
262 blob_open(void)
263 {
264 	struct spdk_blob_store *bs = g_bs;
265 	struct spdk_blob *blob;
266 	struct spdk_blob_opts blob_opts;
267 	spdk_blob_id blobid, blobid2;
268 
269 	ut_spdk_blob_opts_init(&blob_opts);
270 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
271 	poll_threads();
272 	CU_ASSERT(g_bserrno == 0);
273 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
274 	blobid = g_blobid;
275 
276 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
277 	poll_threads();
278 	CU_ASSERT(g_bserrno == 0);
279 	CU_ASSERT(g_blob != NULL);
280 	blob = g_blob;
281 
282 	blobid2 = spdk_blob_get_id(blob);
283 	CU_ASSERT(blobid == blobid2);
284 
285 	/* Try to open file again.  It should return success. */
286 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
287 	poll_threads();
288 	CU_ASSERT(g_bserrno == 0);
289 	CU_ASSERT(blob == g_blob);
290 
291 	spdk_blob_close(blob, blob_op_complete, NULL);
292 	poll_threads();
293 	CU_ASSERT(g_bserrno == 0);
294 
295 	/*
296 	 * Close the file a second time, releasing the second reference.  This
297 	 *  should succeed.
298 	 */
299 	blob = g_blob;
300 	spdk_blob_close(blob, blob_op_complete, NULL);
301 	poll_threads();
302 	CU_ASSERT(g_bserrno == 0);
303 
304 	/*
305 	 * Try to open file again.  It should succeed.  This tests the case
306 	 *  where the file is opened, closed, then re-opened again.
307 	 */
308 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
309 	poll_threads();
310 	CU_ASSERT(g_bserrno == 0);
311 	CU_ASSERT(g_blob != NULL);
312 	blob = g_blob;
313 	spdk_blob_close(blob, blob_op_complete, NULL);
314 	poll_threads();
315 	CU_ASSERT(g_bserrno == 0);
316 
317 	/* Try to open file twice in succession.  This should return the same
318 	 * blob object.
319 	 */
320 	g_blob = NULL;
321 	g_blob2 = NULL;
322 	g_bserrno = -1;
323 	g_bserrno2 = -1;
324 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
325 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
326 	poll_threads();
327 	CU_ASSERT(g_bserrno == 0);
328 	CU_ASSERT(g_bserrno2 == 0);
329 	CU_ASSERT(g_blob != NULL);
330 	CU_ASSERT(g_blob2 != NULL);
331 	CU_ASSERT(g_blob == g_blob2);
332 
333 	g_bserrno = -1;
334 	spdk_blob_close(g_blob, blob_op_complete, NULL);
335 	poll_threads();
336 	CU_ASSERT(g_bserrno == 0);
337 
338 	ut_blob_close_and_delete(bs, g_blob);
339 }
340 
341 static void
342 blob_create(void)
343 {
344 	struct spdk_blob_store *bs = g_bs;
345 	struct spdk_blob *blob;
346 	struct spdk_blob_opts opts;
347 	spdk_blob_id blobid;
348 
349 	/* Create blob with 10 clusters */
350 
351 	ut_spdk_blob_opts_init(&opts);
352 	opts.num_clusters = 10;
353 
354 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
355 	poll_threads();
356 	CU_ASSERT(g_bserrno == 0);
357 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
358 	blobid = g_blobid;
359 
360 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
361 	poll_threads();
362 	CU_ASSERT(g_bserrno == 0);
363 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
364 	blob = g_blob;
365 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
366 
367 	spdk_blob_close(blob, blob_op_complete, NULL);
368 	poll_threads();
369 	CU_ASSERT(g_bserrno == 0);
370 
371 	/* Create blob with 0 clusters */
372 
373 	ut_spdk_blob_opts_init(&opts);
374 	opts.num_clusters = 0;
375 
376 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
377 	poll_threads();
378 	CU_ASSERT(g_bserrno == 0);
379 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
380 	blobid = g_blobid;
381 
382 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
383 	poll_threads();
384 	CU_ASSERT(g_bserrno == 0);
385 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
386 	blob = g_blob;
387 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
388 
389 	spdk_blob_close(blob, blob_op_complete, NULL);
390 	poll_threads();
391 	CU_ASSERT(g_bserrno == 0);
392 
393 	/* Create blob with default options (opts == NULL) */
394 
395 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
396 	poll_threads();
397 	CU_ASSERT(g_bserrno == 0);
398 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
399 	blobid = g_blobid;
400 
401 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
402 	poll_threads();
403 	CU_ASSERT(g_bserrno == 0);
404 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
405 	blob = g_blob;
406 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
407 
408 	spdk_blob_close(blob, blob_op_complete, NULL);
409 	poll_threads();
410 	CU_ASSERT(g_bserrno == 0);
411 
412 	/* Try to create blob with size larger than blobstore */
413 
414 	ut_spdk_blob_opts_init(&opts);
415 	opts.num_clusters = bs->total_clusters + 1;
416 
417 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
418 	poll_threads();
419 	CU_ASSERT(g_bserrno == -ENOSPC);
420 }
421 
422 static void
423 blob_create_zero_extent(void)
424 {
425 	struct spdk_blob_store *bs = g_bs;
426 	struct spdk_blob *blob;
427 	spdk_blob_id blobid;
428 
429 	/* Create blob with default options (opts == NULL) */
430 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
431 	poll_threads();
432 	CU_ASSERT(g_bserrno == 0);
433 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
434 	blobid = g_blobid;
435 
436 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
437 	poll_threads();
438 	CU_ASSERT(g_bserrno == 0);
439 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
440 	blob = g_blob;
441 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
442 	CU_ASSERT(blob->extent_table_found == true);
443 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
444 	CU_ASSERT(blob->active.extent_pages == NULL);
445 
446 	spdk_blob_close(blob, blob_op_complete, NULL);
447 	poll_threads();
448 	CU_ASSERT(g_bserrno == 0);
449 
450 	/* Create blob with NULL internal options  */
451 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
452 	poll_threads();
453 	CU_ASSERT(g_bserrno == 0);
454 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
455 	blobid = g_blobid;
456 
457 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
458 	poll_threads();
459 	CU_ASSERT(g_bserrno == 0);
460 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
461 	blob = g_blob;
462 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
463 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
464 	CU_ASSERT(blob->extent_table_found == true);
465 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
466 	CU_ASSERT(blob->active.extent_pages == NULL);
467 
468 	spdk_blob_close(blob, blob_op_complete, NULL);
469 	poll_threads();
470 	CU_ASSERT(g_bserrno == 0);
471 }
472 
473 /*
474  * Create and delete one blob in a loop over and over again.  This helps ensure
475  * that the internal bit masks tracking used clusters and md_pages are being
476  * tracked correctly.
477  */
478 static void
479 blob_create_loop(void)
480 {
481 	struct spdk_blob_store *bs = g_bs;
482 	struct spdk_blob_opts opts;
483 	uint32_t i, loop_count;
484 
485 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
486 				  spdk_bit_pool_capacity(bs->used_clusters));
487 
488 	for (i = 0; i < loop_count; i++) {
489 		ut_spdk_blob_opts_init(&opts);
490 		opts.num_clusters = 1;
491 		g_bserrno = -1;
492 		g_blobid = SPDK_BLOBID_INVALID;
493 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
494 		poll_threads();
495 		CU_ASSERT(g_bserrno == 0);
496 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
497 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
498 		poll_threads();
499 		CU_ASSERT(g_bserrno == 0);
500 	}
501 }
502 
503 static void
504 blob_create_fail(void)
505 {
506 	struct spdk_blob_store *bs = g_bs;
507 	struct spdk_blob_opts opts;
508 	spdk_blob_id blobid;
509 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
510 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
511 
512 	/* NULL callback */
513 	ut_spdk_blob_opts_init(&opts);
514 	opts.xattrs.names = g_xattr_names;
515 	opts.xattrs.get_value = NULL;
516 	opts.xattrs.count = 1;
517 	opts.xattrs.ctx = &g_ctx;
518 
519 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
520 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
521 	poll_threads();
522 	CU_ASSERT(g_bserrno == -EINVAL);
523 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
524 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
525 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
526 
527 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
528 	poll_threads();
529 	CU_ASSERT(g_bserrno == -ENOENT);
530 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
531 
532 	ut_bs_reload(&bs, NULL);
533 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
534 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
535 
536 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
537 	poll_threads();
538 	CU_ASSERT(g_blob == NULL);
539 	CU_ASSERT(g_bserrno == -ENOENT);
540 }
541 
542 static void
543 blob_create_internal(void)
544 {
545 	struct spdk_blob_store *bs = g_bs;
546 	struct spdk_blob *blob;
547 	struct spdk_blob_opts opts;
548 	struct spdk_blob_xattr_opts internal_xattrs;
549 	const void *value;
550 	size_t value_len;
551 	spdk_blob_id blobid;
552 	int rc;
553 
554 	/* Create blob with custom xattrs */
555 
556 	ut_spdk_blob_opts_init(&opts);
557 	blob_xattrs_init(&internal_xattrs);
558 	internal_xattrs.count = 3;
559 	internal_xattrs.names = g_xattr_names;
560 	internal_xattrs.get_value = _get_xattr_value;
561 	internal_xattrs.ctx = &g_ctx;
562 
563 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
564 	poll_threads();
565 	CU_ASSERT(g_bserrno == 0);
566 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
567 	blobid = g_blobid;
568 
569 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
570 	poll_threads();
571 	CU_ASSERT(g_bserrno == 0);
572 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
573 	blob = g_blob;
574 
575 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
576 	CU_ASSERT(rc == 0);
577 	SPDK_CU_ASSERT_FATAL(value != NULL);
578 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
579 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
580 
581 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
582 	CU_ASSERT(rc == 0);
583 	SPDK_CU_ASSERT_FATAL(value != NULL);
584 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
585 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
586 
587 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
588 	CU_ASSERT(rc == 0);
589 	SPDK_CU_ASSERT_FATAL(value != NULL);
590 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
591 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
592 
593 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
594 	CU_ASSERT(rc != 0);
595 
596 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
597 	CU_ASSERT(rc != 0);
598 
599 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
600 	CU_ASSERT(rc != 0);
601 
602 	spdk_blob_close(blob, blob_op_complete, NULL);
603 	poll_threads();
604 	CU_ASSERT(g_bserrno == 0);
605 
606 	/* Create blob with NULL internal options  */
607 
608 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
609 	poll_threads();
610 	CU_ASSERT(g_bserrno == 0);
611 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
612 	blobid = g_blobid;
613 
614 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
615 	poll_threads();
616 	CU_ASSERT(g_bserrno == 0);
617 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
618 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
619 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
620 
621 	blob = g_blob;
622 
623 	spdk_blob_close(blob, blob_op_complete, NULL);
624 	poll_threads();
625 	CU_ASSERT(g_bserrno == 0);
626 }
627 
628 static void
629 blob_thin_provision(void)
630 {
631 	struct spdk_blob_store *bs;
632 	struct spdk_bs_dev *dev;
633 	struct spdk_blob *blob;
634 	struct spdk_blob_opts opts;
635 	struct spdk_bs_opts bs_opts;
636 	spdk_blob_id blobid;
637 
638 	dev = init_dev();
639 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
640 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
641 
642 	/* Initialize a new blob store */
643 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
644 	poll_threads();
645 	CU_ASSERT(g_bserrno == 0);
646 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
647 
648 	bs = g_bs;
649 
650 	/* Create blob with thin provisioning enabled */
651 
652 	ut_spdk_blob_opts_init(&opts);
653 	opts.thin_provision = true;
654 	opts.num_clusters = 10;
655 
656 	blob = ut_blob_create_and_open(bs, &opts);
657 	blobid = spdk_blob_get_id(blob);
658 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
659 	/* In thin provisioning with num_clusters is set, if not using the
660 	 * extent table, there is no allocation. If extent table is used,
661 	 * there is related allocation happened. */
662 	if (blob->extent_table_found == true) {
663 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
664 		CU_ASSERT(blob->active.extent_pages != NULL);
665 	} else {
666 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
667 		CU_ASSERT(blob->active.extent_pages == NULL);
668 	}
669 
670 	spdk_blob_close(blob, blob_op_complete, NULL);
671 	CU_ASSERT(g_bserrno == 0);
672 
673 	/* Do not shut down cleanly.  This makes sure that when we load again
674 	 *  and try to recover a valid used_cluster map, that blobstore will
675 	 *  ignore clusters with index 0 since these are unallocated clusters.
676 	 */
677 	ut_bs_dirty_load(&bs, &bs_opts);
678 
679 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
680 	poll_threads();
681 	CU_ASSERT(g_bserrno == 0);
682 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
683 	blob = g_blob;
684 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
685 
686 	ut_blob_close_and_delete(bs, blob);
687 
688 	spdk_bs_unload(bs, bs_op_complete, NULL);
689 	poll_threads();
690 	CU_ASSERT(g_bserrno == 0);
691 	g_bs = NULL;
692 }
693 
694 static void
695 blob_snapshot(void)
696 {
697 	struct spdk_blob_store *bs = g_bs;
698 	struct spdk_blob *blob;
699 	struct spdk_blob *snapshot, *snapshot2;
700 	struct spdk_blob_bs_dev *blob_bs_dev;
701 	struct spdk_blob_opts opts;
702 	struct spdk_blob_xattr_opts xattrs;
703 	spdk_blob_id blobid;
704 	spdk_blob_id snapshotid;
705 	spdk_blob_id snapshotid2;
706 	const void *value;
707 	size_t value_len;
708 	int rc;
709 	spdk_blob_id ids[2];
710 	size_t count;
711 
712 	/* Create blob with 10 clusters */
713 	ut_spdk_blob_opts_init(&opts);
714 	opts.num_clusters = 10;
715 
716 	blob = ut_blob_create_and_open(bs, &opts);
717 	blobid = spdk_blob_get_id(blob);
718 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
719 
720 	/* Create snapshot from blob */
721 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
722 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
723 	poll_threads();
724 	CU_ASSERT(g_bserrno == 0);
725 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
726 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
727 	snapshotid = g_blobid;
728 
729 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
730 	poll_threads();
731 	CU_ASSERT(g_bserrno == 0);
732 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
733 	snapshot = g_blob;
734 	CU_ASSERT(snapshot->data_ro == true);
735 	CU_ASSERT(snapshot->md_ro == true);
736 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
737 
738 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
739 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
740 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
741 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
742 
743 	/* Try to create snapshot from clone with xattrs */
744 	xattrs.names = g_xattr_names;
745 	xattrs.get_value = _get_xattr_value;
746 	xattrs.count = 3;
747 	xattrs.ctx = &g_ctx;
748 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
749 	poll_threads();
750 	CU_ASSERT(g_bserrno == 0);
751 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
752 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
753 	snapshotid2 = g_blobid;
754 
755 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
756 	CU_ASSERT(g_bserrno == 0);
757 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
758 	snapshot2 = g_blob;
759 	CU_ASSERT(snapshot2->data_ro == true);
760 	CU_ASSERT(snapshot2->md_ro == true);
761 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
762 
763 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
764 	CU_ASSERT(snapshot->back_bs_dev == NULL);
765 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
766 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
767 
768 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
769 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
770 
771 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
772 	CU_ASSERT(blob_bs_dev->blob == snapshot);
773 
774 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
775 	CU_ASSERT(rc == 0);
776 	SPDK_CU_ASSERT_FATAL(value != NULL);
777 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
778 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
779 
780 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
781 	CU_ASSERT(rc == 0);
782 	SPDK_CU_ASSERT_FATAL(value != NULL);
783 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
784 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
785 
786 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
787 	CU_ASSERT(rc == 0);
788 	SPDK_CU_ASSERT_FATAL(value != NULL);
789 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
790 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
791 
792 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
793 	count = 2;
794 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
795 	CU_ASSERT(count == 1);
796 	CU_ASSERT(ids[0] == blobid);
797 
798 	count = 2;
799 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
800 	CU_ASSERT(count == 1);
801 	CU_ASSERT(ids[0] == snapshotid2);
802 
803 	/* Try to create snapshot from snapshot */
804 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
805 	poll_threads();
806 	CU_ASSERT(g_bserrno == -EINVAL);
807 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
808 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
809 
810 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
811 	ut_blob_close_and_delete(bs, blob);
812 	count = 2;
813 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
814 	CU_ASSERT(count == 0);
815 
816 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
817 	ut_blob_close_and_delete(bs, snapshot2);
818 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
819 	count = 2;
820 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
821 	CU_ASSERT(count == 0);
822 
823 	ut_blob_close_and_delete(bs, snapshot);
824 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
825 }
826 
827 static void
828 blob_snapshot_freeze_io(void)
829 {
830 	struct spdk_io_channel *channel;
831 	struct spdk_bs_channel *bs_channel;
832 	struct spdk_blob_store *bs = g_bs;
833 	struct spdk_blob *blob;
834 	struct spdk_blob_opts opts;
835 	spdk_blob_id blobid;
836 	uint32_t num_of_pages = 10;
837 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
838 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
839 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
840 
841 	memset(payload_write, 0xE5, sizeof(payload_write));
842 	memset(payload_read, 0x00, sizeof(payload_read));
843 	memset(payload_zero, 0x00, sizeof(payload_zero));
844 
845 	/* Test freeze I/O during snapshot */
846 	channel = spdk_bs_alloc_io_channel(bs);
847 	bs_channel = spdk_io_channel_get_ctx(channel);
848 
849 	/* Create blob with 10 clusters */
850 	ut_spdk_blob_opts_init(&opts);
851 	opts.num_clusters = 10;
852 	opts.thin_provision = false;
853 
854 	blob = ut_blob_create_and_open(bs, &opts);
855 	blobid = spdk_blob_get_id(blob);
856 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
857 
858 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
859 
860 	/* This is implementation specific.
861 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
862 	 * Four async I/O operations happen before that. */
863 	poll_thread_times(0, 5);
864 
865 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
866 
867 	/* Blob I/O should be frozen here */
868 	CU_ASSERT(blob->frozen_refcnt == 1);
869 
870 	/* Write to the blob */
871 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
872 
873 	/* Verify that I/O is queued */
874 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
875 	/* Verify that payload is not written to disk, at this point the blobs already switched */
876 	CU_ASSERT(blob->active.clusters[0] == 0);
877 
878 	/* Finish all operations including spdk_bs_create_snapshot */
879 	poll_threads();
880 
881 	/* Verify snapshot */
882 	CU_ASSERT(g_bserrno == 0);
883 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
884 
885 	/* Verify that blob has unset frozen_io */
886 	CU_ASSERT(blob->frozen_refcnt == 0);
887 
888 	/* Verify that postponed I/O completed successfully by comparing payload */
889 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
890 	poll_threads();
891 	CU_ASSERT(g_bserrno == 0);
892 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
893 
894 	spdk_bs_free_io_channel(channel);
895 	poll_threads();
896 
897 	ut_blob_close_and_delete(bs, blob);
898 }
899 
900 static void
901 blob_clone(void)
902 {
903 	struct spdk_blob_store *bs = g_bs;
904 	struct spdk_blob_opts opts;
905 	struct spdk_blob *blob, *snapshot, *clone;
906 	spdk_blob_id blobid, cloneid, snapshotid;
907 	struct spdk_blob_xattr_opts xattrs;
908 	const void *value;
909 	size_t value_len;
910 	int rc;
911 
912 	/* Create blob with 10 clusters */
913 
914 	ut_spdk_blob_opts_init(&opts);
915 	opts.num_clusters = 10;
916 
917 	blob = ut_blob_create_and_open(bs, &opts);
918 	blobid = spdk_blob_get_id(blob);
919 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
920 
921 	/* Create snapshot */
922 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
923 	poll_threads();
924 	CU_ASSERT(g_bserrno == 0);
925 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
926 	snapshotid = g_blobid;
927 
928 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
929 	poll_threads();
930 	CU_ASSERT(g_bserrno == 0);
931 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
932 	snapshot = g_blob;
933 	CU_ASSERT(snapshot->data_ro == true);
934 	CU_ASSERT(snapshot->md_ro == true);
935 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
936 
937 	spdk_blob_close(snapshot, blob_op_complete, NULL);
938 	poll_threads();
939 	CU_ASSERT(g_bserrno == 0);
940 
941 	/* Create clone from snapshot with xattrs */
942 	xattrs.names = g_xattr_names;
943 	xattrs.get_value = _get_xattr_value;
944 	xattrs.count = 3;
945 	xattrs.ctx = &g_ctx;
946 
947 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
948 	poll_threads();
949 	CU_ASSERT(g_bserrno == 0);
950 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
951 	cloneid = g_blobid;
952 
953 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
954 	poll_threads();
955 	CU_ASSERT(g_bserrno == 0);
956 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
957 	clone = g_blob;
958 	CU_ASSERT(clone->data_ro == false);
959 	CU_ASSERT(clone->md_ro == false);
960 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
961 
962 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
963 	CU_ASSERT(rc == 0);
964 	SPDK_CU_ASSERT_FATAL(value != NULL);
965 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
966 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
967 
968 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
969 	CU_ASSERT(rc == 0);
970 	SPDK_CU_ASSERT_FATAL(value != NULL);
971 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
972 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
973 
974 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
975 	CU_ASSERT(rc == 0);
976 	SPDK_CU_ASSERT_FATAL(value != NULL);
977 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
978 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
979 
980 
981 	spdk_blob_close(clone, blob_op_complete, NULL);
982 	poll_threads();
983 	CU_ASSERT(g_bserrno == 0);
984 
985 	/* Try to create clone from not read only blob */
986 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
987 	poll_threads();
988 	CU_ASSERT(g_bserrno == -EINVAL);
989 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
990 
991 	/* Mark blob as read only */
992 	spdk_blob_set_read_only(blob);
993 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
994 	poll_threads();
995 	CU_ASSERT(g_bserrno == 0);
996 
997 	/* Create clone from read only blob */
998 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
999 	poll_threads();
1000 	CU_ASSERT(g_bserrno == 0);
1001 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1002 	cloneid = g_blobid;
1003 
1004 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1005 	poll_threads();
1006 	CU_ASSERT(g_bserrno == 0);
1007 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1008 	clone = g_blob;
1009 	CU_ASSERT(clone->data_ro == false);
1010 	CU_ASSERT(clone->md_ro == false);
1011 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1012 
1013 	ut_blob_close_and_delete(bs, clone);
1014 	ut_blob_close_and_delete(bs, blob);
1015 }
1016 
1017 static void
1018 _blob_inflate(bool decouple_parent)
1019 {
1020 	struct spdk_blob_store *bs = g_bs;
1021 	struct spdk_blob_opts opts;
1022 	struct spdk_blob *blob, *snapshot;
1023 	spdk_blob_id blobid, snapshotid;
1024 	struct spdk_io_channel *channel;
1025 	uint64_t free_clusters;
1026 
1027 	channel = spdk_bs_alloc_io_channel(bs);
1028 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1029 
1030 	/* Create blob with 10 clusters */
1031 
1032 	ut_spdk_blob_opts_init(&opts);
1033 	opts.num_clusters = 10;
1034 	opts.thin_provision = true;
1035 
1036 	blob = ut_blob_create_and_open(bs, &opts);
1037 	blobid = spdk_blob_get_id(blob);
1038 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1039 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1040 
1041 	/* 1) Blob with no parent */
1042 	if (decouple_parent) {
1043 		/* Decouple parent of blob with no parent (should fail) */
1044 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1045 		poll_threads();
1046 		CU_ASSERT(g_bserrno != 0);
1047 	} else {
1048 		/* Inflate of thin blob with no parent should made it thick */
1049 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1050 		poll_threads();
1051 		CU_ASSERT(g_bserrno == 0);
1052 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1053 	}
1054 
1055 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1056 	poll_threads();
1057 	CU_ASSERT(g_bserrno == 0);
1058 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1059 	snapshotid = g_blobid;
1060 
1061 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1062 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1063 
1064 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1065 	poll_threads();
1066 	CU_ASSERT(g_bserrno == 0);
1067 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1068 	snapshot = g_blob;
1069 	CU_ASSERT(snapshot->data_ro == true);
1070 	CU_ASSERT(snapshot->md_ro == true);
1071 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1072 
1073 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1074 	poll_threads();
1075 	CU_ASSERT(g_bserrno == 0);
1076 
1077 	free_clusters = spdk_bs_free_cluster_count(bs);
1078 
1079 	/* 2) Blob with parent */
1080 	if (!decouple_parent) {
1081 		/* Do full blob inflation */
1082 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1083 		poll_threads();
1084 		CU_ASSERT(g_bserrno == 0);
1085 		/* all 10 clusters should be allocated */
1086 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1087 	} else {
1088 		/* Decouple parent of blob */
1089 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1090 		poll_threads();
1091 		CU_ASSERT(g_bserrno == 0);
1092 		/* when only parent is removed, none of the clusters should be allocated */
1093 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1094 	}
1095 
1096 	/* Now, it should be possible to delete snapshot */
1097 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1098 	poll_threads();
1099 	CU_ASSERT(g_bserrno == 0);
1100 
1101 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1102 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1103 
1104 	spdk_bs_free_io_channel(channel);
1105 	poll_threads();
1106 
1107 	ut_blob_close_and_delete(bs, blob);
1108 }
1109 
1110 static void
1111 blob_inflate(void)
1112 {
1113 	_blob_inflate(false);
1114 	_blob_inflate(true);
1115 }
1116 
1117 static void
1118 blob_delete(void)
1119 {
1120 	struct spdk_blob_store *bs = g_bs;
1121 	struct spdk_blob_opts blob_opts;
1122 	spdk_blob_id blobid;
1123 
1124 	/* Create a blob and then delete it. */
1125 	ut_spdk_blob_opts_init(&blob_opts);
1126 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1127 	poll_threads();
1128 	CU_ASSERT(g_bserrno == 0);
1129 	CU_ASSERT(g_blobid > 0);
1130 	blobid = g_blobid;
1131 
1132 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1133 	poll_threads();
1134 	CU_ASSERT(g_bserrno == 0);
1135 
1136 	/* Try to open the blob */
1137 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1138 	poll_threads();
1139 	CU_ASSERT(g_bserrno == -ENOENT);
1140 }
1141 
1142 static void
1143 blob_resize_test(void)
1144 {
1145 	struct spdk_blob_store *bs = g_bs;
1146 	struct spdk_blob *blob;
1147 	uint64_t free_clusters;
1148 
1149 	free_clusters = spdk_bs_free_cluster_count(bs);
1150 
1151 	blob = ut_blob_create_and_open(bs, NULL);
1152 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1153 
1154 	/* Confirm that resize fails if blob is marked read-only. */
1155 	blob->md_ro = true;
1156 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1157 	poll_threads();
1158 	CU_ASSERT(g_bserrno == -EPERM);
1159 	blob->md_ro = false;
1160 
1161 	/* The blob started at 0 clusters. Resize it to be 5. */
1162 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1163 	poll_threads();
1164 	CU_ASSERT(g_bserrno == 0);
1165 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1166 
1167 	/* Shrink the blob to 3 clusters. This will not actually release
1168 	 * the old clusters until the blob is synced.
1169 	 */
1170 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1171 	poll_threads();
1172 	CU_ASSERT(g_bserrno == 0);
1173 	/* Verify there are still 5 clusters in use */
1174 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1175 
1176 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1177 	poll_threads();
1178 	CU_ASSERT(g_bserrno == 0);
1179 	/* Now there are only 3 clusters in use */
1180 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1181 
1182 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1183 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1184 	poll_threads();
1185 	CU_ASSERT(g_bserrno == 0);
1186 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1187 
1188 	/* Try to resize the blob to size larger than blobstore. */
1189 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1190 	poll_threads();
1191 	CU_ASSERT(g_bserrno == -ENOSPC);
1192 
1193 	ut_blob_close_and_delete(bs, blob);
1194 }
1195 
1196 static void
1197 blob_read_only(void)
1198 {
1199 	struct spdk_blob_store *bs;
1200 	struct spdk_bs_dev *dev;
1201 	struct spdk_blob *blob;
1202 	struct spdk_bs_opts opts;
1203 	spdk_blob_id blobid;
1204 	int rc;
1205 
1206 	dev = init_dev();
1207 	spdk_bs_opts_init(&opts, sizeof(opts));
1208 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1209 
1210 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1211 	poll_threads();
1212 	CU_ASSERT(g_bserrno == 0);
1213 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1214 	bs = g_bs;
1215 
1216 	blob = ut_blob_create_and_open(bs, NULL);
1217 	blobid = spdk_blob_get_id(blob);
1218 
1219 	rc = spdk_blob_set_read_only(blob);
1220 	CU_ASSERT(rc == 0);
1221 
1222 	CU_ASSERT(blob->data_ro == false);
1223 	CU_ASSERT(blob->md_ro == false);
1224 
1225 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1226 	poll_threads();
1227 
1228 	CU_ASSERT(blob->data_ro == true);
1229 	CU_ASSERT(blob->md_ro == true);
1230 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1231 
1232 	spdk_blob_close(blob, blob_op_complete, NULL);
1233 	poll_threads();
1234 	CU_ASSERT(g_bserrno == 0);
1235 
1236 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1237 	poll_threads();
1238 	CU_ASSERT(g_bserrno == 0);
1239 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1240 	blob = g_blob;
1241 
1242 	CU_ASSERT(blob->data_ro == true);
1243 	CU_ASSERT(blob->md_ro == true);
1244 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1245 
1246 	spdk_blob_close(blob, blob_op_complete, NULL);
1247 	poll_threads();
1248 	CU_ASSERT(g_bserrno == 0);
1249 
1250 	ut_bs_reload(&bs, &opts);
1251 
1252 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1253 	poll_threads();
1254 	CU_ASSERT(g_bserrno == 0);
1255 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1256 	blob = g_blob;
1257 
1258 	CU_ASSERT(blob->data_ro == true);
1259 	CU_ASSERT(blob->md_ro == true);
1260 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1261 
1262 	ut_blob_close_and_delete(bs, blob);
1263 
1264 	spdk_bs_unload(bs, bs_op_complete, NULL);
1265 	poll_threads();
1266 	CU_ASSERT(g_bserrno == 0);
1267 }
1268 
1269 static void
1270 channel_ops(void)
1271 {
1272 	struct spdk_blob_store *bs = g_bs;
1273 	struct spdk_io_channel *channel;
1274 
1275 	channel = spdk_bs_alloc_io_channel(bs);
1276 	CU_ASSERT(channel != NULL);
1277 
1278 	spdk_bs_free_io_channel(channel);
1279 	poll_threads();
1280 }
1281 
1282 static void
1283 blob_write(void)
1284 {
1285 	struct spdk_blob_store *bs = g_bs;
1286 	struct spdk_blob *blob = g_blob;
1287 	struct spdk_io_channel *channel;
1288 	uint64_t pages_per_cluster;
1289 	uint8_t payload[10 * 4096];
1290 
1291 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1292 
1293 	channel = spdk_bs_alloc_io_channel(bs);
1294 	CU_ASSERT(channel != NULL);
1295 
1296 	/* Write to a blob with 0 size */
1297 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1298 	poll_threads();
1299 	CU_ASSERT(g_bserrno == -EINVAL);
1300 
1301 	/* Resize the blob */
1302 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1303 	poll_threads();
1304 	CU_ASSERT(g_bserrno == 0);
1305 
1306 	/* Confirm that write fails if blob is marked read-only. */
1307 	blob->data_ro = true;
1308 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1309 	poll_threads();
1310 	CU_ASSERT(g_bserrno == -EPERM);
1311 	blob->data_ro = false;
1312 
1313 	/* Write to the blob */
1314 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1315 	poll_threads();
1316 	CU_ASSERT(g_bserrno == 0);
1317 
1318 	/* Write starting beyond the end */
1319 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1320 			   NULL);
1321 	poll_threads();
1322 	CU_ASSERT(g_bserrno == -EINVAL);
1323 
1324 	/* Write starting at a valid location but going off the end */
1325 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1326 			   blob_op_complete, NULL);
1327 	poll_threads();
1328 	CU_ASSERT(g_bserrno == -EINVAL);
1329 
1330 	spdk_bs_free_io_channel(channel);
1331 	poll_threads();
1332 }
1333 
1334 static void
1335 blob_read(void)
1336 {
1337 	struct spdk_blob_store *bs = g_bs;
1338 	struct spdk_blob *blob = g_blob;
1339 	struct spdk_io_channel *channel;
1340 	uint64_t pages_per_cluster;
1341 	uint8_t payload[10 * 4096];
1342 
1343 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1344 
1345 	channel = spdk_bs_alloc_io_channel(bs);
1346 	CU_ASSERT(channel != NULL);
1347 
1348 	/* Read from a blob with 0 size */
1349 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1350 	poll_threads();
1351 	CU_ASSERT(g_bserrno == -EINVAL);
1352 
1353 	/* Resize the blob */
1354 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1355 	poll_threads();
1356 	CU_ASSERT(g_bserrno == 0);
1357 
1358 	/* Confirm that read passes if blob is marked read-only. */
1359 	blob->data_ro = true;
1360 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1361 	poll_threads();
1362 	CU_ASSERT(g_bserrno == 0);
1363 	blob->data_ro = false;
1364 
1365 	/* Read from the blob */
1366 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1367 	poll_threads();
1368 	CU_ASSERT(g_bserrno == 0);
1369 
1370 	/* Read starting beyond the end */
1371 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1372 			  NULL);
1373 	poll_threads();
1374 	CU_ASSERT(g_bserrno == -EINVAL);
1375 
1376 	/* Read starting at a valid location but going off the end */
1377 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1378 			  blob_op_complete, NULL);
1379 	poll_threads();
1380 	CU_ASSERT(g_bserrno == -EINVAL);
1381 
1382 	spdk_bs_free_io_channel(channel);
1383 	poll_threads();
1384 }
1385 
1386 static void
1387 blob_rw_verify(void)
1388 {
1389 	struct spdk_blob_store *bs = g_bs;
1390 	struct spdk_blob *blob = g_blob;
1391 	struct spdk_io_channel *channel;
1392 	uint8_t payload_read[10 * 4096];
1393 	uint8_t payload_write[10 * 4096];
1394 
1395 	channel = spdk_bs_alloc_io_channel(bs);
1396 	CU_ASSERT(channel != NULL);
1397 
1398 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1399 	poll_threads();
1400 	CU_ASSERT(g_bserrno == 0);
1401 
1402 	memset(payload_write, 0xE5, sizeof(payload_write));
1403 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1404 	poll_threads();
1405 	CU_ASSERT(g_bserrno == 0);
1406 
1407 	memset(payload_read, 0x00, sizeof(payload_read));
1408 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1409 	poll_threads();
1410 	CU_ASSERT(g_bserrno == 0);
1411 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1412 
1413 	spdk_bs_free_io_channel(channel);
1414 	poll_threads();
1415 }
1416 
1417 static void
1418 blob_rw_verify_iov(void)
1419 {
1420 	struct spdk_blob_store *bs = g_bs;
1421 	struct spdk_blob *blob;
1422 	struct spdk_io_channel *channel;
1423 	uint8_t payload_read[10 * 4096];
1424 	uint8_t payload_write[10 * 4096];
1425 	struct iovec iov_read[3];
1426 	struct iovec iov_write[3];
1427 	void *buf;
1428 
1429 	channel = spdk_bs_alloc_io_channel(bs);
1430 	CU_ASSERT(channel != NULL);
1431 
1432 	blob = ut_blob_create_and_open(bs, NULL);
1433 
1434 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1435 	poll_threads();
1436 	CU_ASSERT(g_bserrno == 0);
1437 
1438 	/*
1439 	 * Manually adjust the offset of the blob's second cluster.  This allows
1440 	 *  us to make sure that the readv/write code correctly accounts for I/O
1441 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1442 	 *  clusters are where we expect before modifying the second cluster.
1443 	 */
1444 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1445 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1446 	blob->active.clusters[1] = 3 * 256;
1447 
1448 	memset(payload_write, 0xE5, sizeof(payload_write));
1449 	iov_write[0].iov_base = payload_write;
1450 	iov_write[0].iov_len = 1 * 4096;
1451 	iov_write[1].iov_base = payload_write + 1 * 4096;
1452 	iov_write[1].iov_len = 5 * 4096;
1453 	iov_write[2].iov_base = payload_write + 6 * 4096;
1454 	iov_write[2].iov_len = 4 * 4096;
1455 	/*
1456 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1457 	 *  will get written to the first cluster, the last 4 to the second cluster.
1458 	 */
1459 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1460 	poll_threads();
1461 	CU_ASSERT(g_bserrno == 0);
1462 
1463 	memset(payload_read, 0xAA, sizeof(payload_read));
1464 	iov_read[0].iov_base = payload_read;
1465 	iov_read[0].iov_len = 3 * 4096;
1466 	iov_read[1].iov_base = payload_read + 3 * 4096;
1467 	iov_read[1].iov_len = 4 * 4096;
1468 	iov_read[2].iov_base = payload_read + 7 * 4096;
1469 	iov_read[2].iov_len = 3 * 4096;
1470 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1471 	poll_threads();
1472 	CU_ASSERT(g_bserrno == 0);
1473 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1474 
1475 	buf = calloc(1, 256 * 4096);
1476 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1477 	/* Check that cluster 2 on "disk" was not modified. */
1478 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1479 	free(buf);
1480 
1481 	spdk_blob_close(blob, blob_op_complete, NULL);
1482 	poll_threads();
1483 	CU_ASSERT(g_bserrno == 0);
1484 
1485 	spdk_bs_free_io_channel(channel);
1486 	poll_threads();
1487 }
1488 
1489 static uint32_t
1490 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1491 {
1492 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1493 	struct spdk_bs_request_set *set;
1494 	uint32_t count = 0;
1495 
1496 	TAILQ_FOREACH(set, &channel->reqs, link) {
1497 		count++;
1498 	}
1499 
1500 	return count;
1501 }
1502 
1503 static void
1504 blob_rw_verify_iov_nomem(void)
1505 {
1506 	struct spdk_blob_store *bs = g_bs;
1507 	struct spdk_blob *blob = g_blob;
1508 	struct spdk_io_channel *channel;
1509 	uint8_t payload_write[10 * 4096];
1510 	struct iovec iov_write[3];
1511 	uint32_t req_count;
1512 
1513 	channel = spdk_bs_alloc_io_channel(bs);
1514 	CU_ASSERT(channel != NULL);
1515 
1516 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1517 	poll_threads();
1518 	CU_ASSERT(g_bserrno == 0);
1519 
1520 	/*
1521 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1522 	 *  will get written to the first cluster, the last 4 to the second cluster.
1523 	 */
1524 	iov_write[0].iov_base = payload_write;
1525 	iov_write[0].iov_len = 1 * 4096;
1526 	iov_write[1].iov_base = payload_write + 1 * 4096;
1527 	iov_write[1].iov_len = 5 * 4096;
1528 	iov_write[2].iov_base = payload_write + 6 * 4096;
1529 	iov_write[2].iov_len = 4 * 4096;
1530 	MOCK_SET(calloc, NULL);
1531 	req_count = bs_channel_get_req_count(channel);
1532 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1533 	poll_threads();
1534 	CU_ASSERT(g_bserrno = -ENOMEM);
1535 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1536 	MOCK_CLEAR(calloc);
1537 
1538 	spdk_bs_free_io_channel(channel);
1539 	poll_threads();
1540 }
1541 
1542 static void
1543 blob_rw_iov_read_only(void)
1544 {
1545 	struct spdk_blob_store *bs = g_bs;
1546 	struct spdk_blob *blob = g_blob;
1547 	struct spdk_io_channel *channel;
1548 	uint8_t payload_read[4096];
1549 	uint8_t payload_write[4096];
1550 	struct iovec iov_read;
1551 	struct iovec iov_write;
1552 
1553 	channel = spdk_bs_alloc_io_channel(bs);
1554 	CU_ASSERT(channel != NULL);
1555 
1556 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1557 	poll_threads();
1558 	CU_ASSERT(g_bserrno == 0);
1559 
1560 	/* Verify that writev failed if read_only flag is set. */
1561 	blob->data_ro = true;
1562 	iov_write.iov_base = payload_write;
1563 	iov_write.iov_len = sizeof(payload_write);
1564 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1565 	poll_threads();
1566 	CU_ASSERT(g_bserrno == -EPERM);
1567 
1568 	/* Verify that reads pass if data_ro flag is set. */
1569 	iov_read.iov_base = payload_read;
1570 	iov_read.iov_len = sizeof(payload_read);
1571 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1572 	poll_threads();
1573 	CU_ASSERT(g_bserrno == 0);
1574 
1575 	spdk_bs_free_io_channel(channel);
1576 	poll_threads();
1577 }
1578 
1579 static void
1580 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1581 		       uint8_t *payload, uint64_t offset, uint64_t length,
1582 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1583 {
1584 	uint64_t i;
1585 	uint8_t *buf;
1586 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1587 
1588 	/* To be sure that operation is NOT splitted, read one page at the time */
1589 	buf = payload;
1590 	for (i = 0; i < length; i++) {
1591 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1592 		poll_threads();
1593 		if (g_bserrno != 0) {
1594 			/* Pass the error code up */
1595 			break;
1596 		}
1597 		buf += page_size;
1598 	}
1599 
1600 	cb_fn(cb_arg, g_bserrno);
1601 }
1602 
1603 static void
1604 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1605 			uint8_t *payload, uint64_t offset, uint64_t length,
1606 			spdk_blob_op_complete cb_fn, void *cb_arg)
1607 {
1608 	uint64_t i;
1609 	uint8_t *buf;
1610 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1611 
1612 	/* To be sure that operation is NOT splitted, write one page at the time */
1613 	buf = payload;
1614 	for (i = 0; i < length; i++) {
1615 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1616 		poll_threads();
1617 		if (g_bserrno != 0) {
1618 			/* Pass the error code up */
1619 			break;
1620 		}
1621 		buf += page_size;
1622 	}
1623 
1624 	cb_fn(cb_arg, g_bserrno);
1625 }
1626 
1627 static void
1628 blob_operation_split_rw(void)
1629 {
1630 	struct spdk_blob_store *bs = g_bs;
1631 	struct spdk_blob *blob;
1632 	struct spdk_io_channel *channel;
1633 	struct spdk_blob_opts opts;
1634 	uint64_t cluster_size;
1635 
1636 	uint64_t payload_size;
1637 	uint8_t *payload_read;
1638 	uint8_t *payload_write;
1639 	uint8_t *payload_pattern;
1640 
1641 	uint64_t page_size;
1642 	uint64_t pages_per_cluster;
1643 	uint64_t pages_per_payload;
1644 
1645 	uint64_t i;
1646 
1647 	cluster_size = spdk_bs_get_cluster_size(bs);
1648 	page_size = spdk_bs_get_page_size(bs);
1649 	pages_per_cluster = cluster_size / page_size;
1650 	pages_per_payload = pages_per_cluster * 5;
1651 	payload_size = cluster_size * 5;
1652 
1653 	payload_read = malloc(payload_size);
1654 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1655 
1656 	payload_write = malloc(payload_size);
1657 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1658 
1659 	payload_pattern = malloc(payload_size);
1660 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1661 
1662 	/* Prepare random pattern to write */
1663 	memset(payload_pattern, 0xFF, payload_size);
1664 	for (i = 0; i < pages_per_payload; i++) {
1665 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1666 	}
1667 
1668 	channel = spdk_bs_alloc_io_channel(bs);
1669 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1670 
1671 	/* Create blob */
1672 	ut_spdk_blob_opts_init(&opts);
1673 	opts.thin_provision = false;
1674 	opts.num_clusters = 5;
1675 
1676 	blob = ut_blob_create_and_open(bs, &opts);
1677 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1678 
1679 	/* Initial read should return zeroed payload */
1680 	memset(payload_read, 0xFF, payload_size);
1681 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1682 	poll_threads();
1683 	CU_ASSERT(g_bserrno == 0);
1684 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1685 
1686 	/* Fill whole blob except last page */
1687 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1688 			   blob_op_complete, NULL);
1689 	poll_threads();
1690 	CU_ASSERT(g_bserrno == 0);
1691 
1692 	/* Write last page with a pattern */
1693 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1694 			   blob_op_complete, NULL);
1695 	poll_threads();
1696 	CU_ASSERT(g_bserrno == 0);
1697 
1698 	/* Read whole blob and check consistency */
1699 	memset(payload_read, 0xFF, payload_size);
1700 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1701 	poll_threads();
1702 	CU_ASSERT(g_bserrno == 0);
1703 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1704 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1705 
1706 	/* Fill whole blob except first page */
1707 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1708 			   blob_op_complete, NULL);
1709 	poll_threads();
1710 	CU_ASSERT(g_bserrno == 0);
1711 
1712 	/* Write first page with a pattern */
1713 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1714 			   blob_op_complete, NULL);
1715 	poll_threads();
1716 	CU_ASSERT(g_bserrno == 0);
1717 
1718 	/* Read whole blob and check consistency */
1719 	memset(payload_read, 0xFF, payload_size);
1720 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1721 	poll_threads();
1722 	CU_ASSERT(g_bserrno == 0);
1723 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1724 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1725 
1726 
1727 	/* Fill whole blob with a pattern (5 clusters) */
1728 
1729 	/* 1. Read test. */
1730 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1731 				blob_op_complete, NULL);
1732 	poll_threads();
1733 	CU_ASSERT(g_bserrno == 0);
1734 
1735 	memset(payload_read, 0xFF, payload_size);
1736 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1737 	poll_threads();
1738 	poll_threads();
1739 	CU_ASSERT(g_bserrno == 0);
1740 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1741 
1742 	/* 2. Write test. */
1743 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1744 			   blob_op_complete, NULL);
1745 	poll_threads();
1746 	CU_ASSERT(g_bserrno == 0);
1747 
1748 	memset(payload_read, 0xFF, payload_size);
1749 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1750 	poll_threads();
1751 	CU_ASSERT(g_bserrno == 0);
1752 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1753 
1754 	spdk_bs_free_io_channel(channel);
1755 	poll_threads();
1756 
1757 	g_blob = NULL;
1758 	g_blobid = 0;
1759 
1760 	free(payload_read);
1761 	free(payload_write);
1762 	free(payload_pattern);
1763 
1764 	ut_blob_close_and_delete(bs, blob);
1765 }
1766 
1767 static void
1768 blob_operation_split_rw_iov(void)
1769 {
1770 	struct spdk_blob_store *bs = g_bs;
1771 	struct spdk_blob *blob;
1772 	struct spdk_io_channel *channel;
1773 	struct spdk_blob_opts opts;
1774 	uint64_t cluster_size;
1775 
1776 	uint64_t payload_size;
1777 	uint8_t *payload_read;
1778 	uint8_t *payload_write;
1779 	uint8_t *payload_pattern;
1780 
1781 	uint64_t page_size;
1782 	uint64_t pages_per_cluster;
1783 	uint64_t pages_per_payload;
1784 
1785 	struct iovec iov_read[2];
1786 	struct iovec iov_write[2];
1787 
1788 	uint64_t i, j;
1789 
1790 	cluster_size = spdk_bs_get_cluster_size(bs);
1791 	page_size = spdk_bs_get_page_size(bs);
1792 	pages_per_cluster = cluster_size / page_size;
1793 	pages_per_payload = pages_per_cluster * 5;
1794 	payload_size = cluster_size * 5;
1795 
1796 	payload_read = malloc(payload_size);
1797 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1798 
1799 	payload_write = malloc(payload_size);
1800 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1801 
1802 	payload_pattern = malloc(payload_size);
1803 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1804 
1805 	/* Prepare random pattern to write */
1806 	for (i = 0; i < pages_per_payload; i++) {
1807 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1808 			uint64_t *tmp;
1809 
1810 			tmp = (uint64_t *)payload_pattern;
1811 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1812 			*tmp = i + 1;
1813 		}
1814 	}
1815 
1816 	channel = spdk_bs_alloc_io_channel(bs);
1817 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1818 
1819 	/* Create blob */
1820 	ut_spdk_blob_opts_init(&opts);
1821 	opts.thin_provision = false;
1822 	opts.num_clusters = 5;
1823 
1824 	blob = ut_blob_create_and_open(bs, &opts);
1825 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1826 
1827 	/* Initial read should return zeroes payload */
1828 	memset(payload_read, 0xFF, payload_size);
1829 	iov_read[0].iov_base = payload_read;
1830 	iov_read[0].iov_len = cluster_size * 3;
1831 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1832 	iov_read[1].iov_len = cluster_size * 2;
1833 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1834 	poll_threads();
1835 	CU_ASSERT(g_bserrno == 0);
1836 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1837 
1838 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1839 	 *  with a pattern. */
1840 	iov_write[0].iov_base = payload_pattern;
1841 	iov_write[0].iov_len = payload_size - page_size;
1842 	iov_write[1].iov_base = payload_pattern;
1843 	iov_write[1].iov_len = page_size;
1844 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1845 	poll_threads();
1846 	CU_ASSERT(g_bserrno == 0);
1847 
1848 	/* Read whole blob and check consistency */
1849 	memset(payload_read, 0xFF, payload_size);
1850 	iov_read[0].iov_base = payload_read;
1851 	iov_read[0].iov_len = cluster_size * 2;
1852 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1853 	iov_read[1].iov_len = cluster_size * 3;
1854 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1855 	poll_threads();
1856 	CU_ASSERT(g_bserrno == 0);
1857 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1858 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1859 
1860 	/* First of iovs fills only first page and second of iovs writes whole blob except
1861 	 *  first page with a pattern. */
1862 	iov_write[0].iov_base = payload_pattern;
1863 	iov_write[0].iov_len = page_size;
1864 	iov_write[1].iov_base = payload_pattern;
1865 	iov_write[1].iov_len = payload_size - page_size;
1866 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1867 	poll_threads();
1868 	CU_ASSERT(g_bserrno == 0);
1869 
1870 	/* Read whole blob and check consistency */
1871 	memset(payload_read, 0xFF, payload_size);
1872 	iov_read[0].iov_base = payload_read;
1873 	iov_read[0].iov_len = cluster_size * 4;
1874 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1875 	iov_read[1].iov_len = cluster_size;
1876 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1877 	poll_threads();
1878 	CU_ASSERT(g_bserrno == 0);
1879 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1880 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1881 
1882 
1883 	/* Fill whole blob with a pattern (5 clusters) */
1884 
1885 	/* 1. Read test. */
1886 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1887 				blob_op_complete, NULL);
1888 	poll_threads();
1889 	CU_ASSERT(g_bserrno == 0);
1890 
1891 	memset(payload_read, 0xFF, payload_size);
1892 	iov_read[0].iov_base = payload_read;
1893 	iov_read[0].iov_len = cluster_size;
1894 	iov_read[1].iov_base = payload_read + cluster_size;
1895 	iov_read[1].iov_len = cluster_size * 4;
1896 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1897 	poll_threads();
1898 	CU_ASSERT(g_bserrno == 0);
1899 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1900 
1901 	/* 2. Write test. */
1902 	iov_write[0].iov_base = payload_read;
1903 	iov_write[0].iov_len = cluster_size * 2;
1904 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1905 	iov_write[1].iov_len = cluster_size * 3;
1906 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1907 	poll_threads();
1908 	CU_ASSERT(g_bserrno == 0);
1909 
1910 	memset(payload_read, 0xFF, payload_size);
1911 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1912 	poll_threads();
1913 	CU_ASSERT(g_bserrno == 0);
1914 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1915 
1916 	spdk_bs_free_io_channel(channel);
1917 	poll_threads();
1918 
1919 	g_blob = NULL;
1920 	g_blobid = 0;
1921 
1922 	free(payload_read);
1923 	free(payload_write);
1924 	free(payload_pattern);
1925 
1926 	ut_blob_close_and_delete(bs, blob);
1927 }
1928 
1929 static void
1930 blob_unmap(void)
1931 {
1932 	struct spdk_blob_store *bs = g_bs;
1933 	struct spdk_blob *blob;
1934 	struct spdk_io_channel *channel;
1935 	struct spdk_blob_opts opts;
1936 	uint8_t payload[4096];
1937 	int i;
1938 
1939 	channel = spdk_bs_alloc_io_channel(bs);
1940 	CU_ASSERT(channel != NULL);
1941 
1942 	ut_spdk_blob_opts_init(&opts);
1943 	opts.num_clusters = 10;
1944 
1945 	blob = ut_blob_create_and_open(bs, &opts);
1946 
1947 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1948 	poll_threads();
1949 	CU_ASSERT(g_bserrno == 0);
1950 
1951 	memset(payload, 0, sizeof(payload));
1952 	payload[0] = 0xFF;
1953 
1954 	/*
1955 	 * Set first byte of every cluster to 0xFF.
1956 	 * First cluster on device is reserved so let's start from cluster number 1
1957 	 */
1958 	for (i = 1; i < 11; i++) {
1959 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1960 	}
1961 
1962 	/* Confirm writes */
1963 	for (i = 0; i < 10; i++) {
1964 		payload[0] = 0;
1965 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1966 				  blob_op_complete, NULL);
1967 		poll_threads();
1968 		CU_ASSERT(g_bserrno == 0);
1969 		CU_ASSERT(payload[0] == 0xFF);
1970 	}
1971 
1972 	/* Mark some clusters as unallocated */
1973 	blob->active.clusters[1] = 0;
1974 	blob->active.clusters[2] = 0;
1975 	blob->active.clusters[3] = 0;
1976 	blob->active.clusters[6] = 0;
1977 	blob->active.clusters[8] = 0;
1978 
1979 	/* Unmap clusters by resizing to 0 */
1980 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
1981 	poll_threads();
1982 	CU_ASSERT(g_bserrno == 0);
1983 
1984 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1985 	poll_threads();
1986 	CU_ASSERT(g_bserrno == 0);
1987 
1988 	/* Confirm that only 'allocated' clusters were unmapped */
1989 	for (i = 1; i < 11; i++) {
1990 		switch (i) {
1991 		case 2:
1992 		case 3:
1993 		case 4:
1994 		case 7:
1995 		case 9:
1996 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
1997 			break;
1998 		default:
1999 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2000 			break;
2001 		}
2002 	}
2003 
2004 	spdk_bs_free_io_channel(channel);
2005 	poll_threads();
2006 
2007 	ut_blob_close_and_delete(bs, blob);
2008 }
2009 
2010 static void
2011 blob_iter(void)
2012 {
2013 	struct spdk_blob_store *bs = g_bs;
2014 	struct spdk_blob *blob;
2015 	spdk_blob_id blobid;
2016 	struct spdk_blob_opts blob_opts;
2017 
2018 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2019 	poll_threads();
2020 	CU_ASSERT(g_blob == NULL);
2021 	CU_ASSERT(g_bserrno == -ENOENT);
2022 
2023 	ut_spdk_blob_opts_init(&blob_opts);
2024 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2025 	poll_threads();
2026 	CU_ASSERT(g_bserrno == 0);
2027 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2028 	blobid = g_blobid;
2029 
2030 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2031 	poll_threads();
2032 	CU_ASSERT(g_blob != NULL);
2033 	CU_ASSERT(g_bserrno == 0);
2034 	blob = g_blob;
2035 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2036 
2037 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2038 	poll_threads();
2039 	CU_ASSERT(g_blob == NULL);
2040 	CU_ASSERT(g_bserrno == -ENOENT);
2041 }
2042 
2043 static void
2044 blob_xattr(void)
2045 {
2046 	struct spdk_blob_store *bs = g_bs;
2047 	struct spdk_blob *blob = g_blob;
2048 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2049 	uint64_t length;
2050 	int rc;
2051 	const char *name1, *name2;
2052 	const void *value;
2053 	size_t value_len;
2054 	struct spdk_xattr_names *names;
2055 
2056 	/* Test that set_xattr fails if md_ro flag is set. */
2057 	blob->md_ro = true;
2058 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2059 	CU_ASSERT(rc == -EPERM);
2060 
2061 	blob->md_ro = false;
2062 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2063 	CU_ASSERT(rc == 0);
2064 
2065 	length = 2345;
2066 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2067 	CU_ASSERT(rc == 0);
2068 
2069 	/* Overwrite "length" xattr. */
2070 	length = 3456;
2071 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2072 	CU_ASSERT(rc == 0);
2073 
2074 	/* get_xattr should still work even if md_ro flag is set. */
2075 	value = NULL;
2076 	blob->md_ro = true;
2077 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2078 	CU_ASSERT(rc == 0);
2079 	SPDK_CU_ASSERT_FATAL(value != NULL);
2080 	CU_ASSERT(*(uint64_t *)value == length);
2081 	CU_ASSERT(value_len == 8);
2082 	blob->md_ro = false;
2083 
2084 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2085 	CU_ASSERT(rc == -ENOENT);
2086 
2087 	names = NULL;
2088 	rc = spdk_blob_get_xattr_names(blob, &names);
2089 	CU_ASSERT(rc == 0);
2090 	SPDK_CU_ASSERT_FATAL(names != NULL);
2091 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2092 	name1 = spdk_xattr_names_get_name(names, 0);
2093 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2094 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2095 	name2 = spdk_xattr_names_get_name(names, 1);
2096 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2097 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2098 	CU_ASSERT(strcmp(name1, name2));
2099 	spdk_xattr_names_free(names);
2100 
2101 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2102 	blob->md_ro = true;
2103 	rc = spdk_blob_remove_xattr(blob, "name");
2104 	CU_ASSERT(rc == -EPERM);
2105 
2106 	blob->md_ro = false;
2107 	rc = spdk_blob_remove_xattr(blob, "name");
2108 	CU_ASSERT(rc == 0);
2109 
2110 	rc = spdk_blob_remove_xattr(blob, "foobar");
2111 	CU_ASSERT(rc == -ENOENT);
2112 
2113 	/* Set internal xattr */
2114 	length = 7898;
2115 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2116 	CU_ASSERT(rc == 0);
2117 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2118 	CU_ASSERT(rc == 0);
2119 	CU_ASSERT(*(uint64_t *)value == length);
2120 	/* try to get public xattr with same name */
2121 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2122 	CU_ASSERT(rc != 0);
2123 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2124 	CU_ASSERT(rc != 0);
2125 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2126 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2127 		  SPDK_BLOB_INTERNAL_XATTR);
2128 
2129 	spdk_blob_close(blob, blob_op_complete, NULL);
2130 	poll_threads();
2131 
2132 	/* Check if xattrs are persisted */
2133 	ut_bs_reload(&bs, NULL);
2134 
2135 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2136 	poll_threads();
2137 	CU_ASSERT(g_bserrno == 0);
2138 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2139 	blob = g_blob;
2140 
2141 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2142 	CU_ASSERT(rc == 0);
2143 	CU_ASSERT(*(uint64_t *)value == length);
2144 
2145 	/* try to get internal xattr trough public call */
2146 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2147 	CU_ASSERT(rc != 0);
2148 
2149 	rc = blob_remove_xattr(blob, "internal", true);
2150 	CU_ASSERT(rc == 0);
2151 
2152 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2153 }
2154 
2155 static void
2156 blob_parse_md(void)
2157 {
2158 	struct spdk_blob_store *bs = g_bs;
2159 	struct spdk_blob *blob;
2160 	int rc;
2161 	uint32_t used_pages;
2162 	size_t xattr_length;
2163 	char *xattr;
2164 
2165 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2166 	blob = ut_blob_create_and_open(bs, NULL);
2167 
2168 	/* Create large extent to force more than 1 page of metadata. */
2169 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2170 		       strlen("large_xattr");
2171 	xattr = calloc(xattr_length, sizeof(char));
2172 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2173 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2174 	free(xattr);
2175 	SPDK_CU_ASSERT_FATAL(rc == 0);
2176 
2177 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2178 	poll_threads();
2179 
2180 	/* Delete the blob and verify that number of pages returned to before its creation. */
2181 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2182 	ut_blob_close_and_delete(bs, blob);
2183 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2184 }
2185 
2186 static void
2187 bs_load(void)
2188 {
2189 	struct spdk_blob_store *bs;
2190 	struct spdk_bs_dev *dev;
2191 	spdk_blob_id blobid;
2192 	struct spdk_blob *blob;
2193 	struct spdk_bs_super_block *super_block;
2194 	uint64_t length;
2195 	int rc;
2196 	const void *value;
2197 	size_t value_len;
2198 	struct spdk_bs_opts opts;
2199 	struct spdk_blob_opts blob_opts;
2200 
2201 	dev = init_dev();
2202 	spdk_bs_opts_init(&opts, sizeof(opts));
2203 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2204 
2205 	/* Initialize a new blob store */
2206 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2207 	poll_threads();
2208 	CU_ASSERT(g_bserrno == 0);
2209 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2210 	bs = g_bs;
2211 
2212 	/* Try to open a blobid that does not exist */
2213 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2214 	poll_threads();
2215 	CU_ASSERT(g_bserrno == -ENOENT);
2216 	CU_ASSERT(g_blob == NULL);
2217 
2218 	/* Create a blob */
2219 	blob = ut_blob_create_and_open(bs, NULL);
2220 	blobid = spdk_blob_get_id(blob);
2221 
2222 	/* Try again to open valid blob but without the upper bit set */
2223 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2224 	poll_threads();
2225 	CU_ASSERT(g_bserrno == -ENOENT);
2226 	CU_ASSERT(g_blob == NULL);
2227 
2228 	/* Set some xattrs */
2229 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2230 	CU_ASSERT(rc == 0);
2231 
2232 	length = 2345;
2233 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2234 	CU_ASSERT(rc == 0);
2235 
2236 	/* Resize the blob */
2237 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2238 	poll_threads();
2239 	CU_ASSERT(g_bserrno == 0);
2240 
2241 	spdk_blob_close(blob, blob_op_complete, NULL);
2242 	poll_threads();
2243 	CU_ASSERT(g_bserrno == 0);
2244 	blob = NULL;
2245 	g_blob = NULL;
2246 	g_blobid = SPDK_BLOBID_INVALID;
2247 
2248 	/* Unload the blob store */
2249 	spdk_bs_unload(bs, bs_op_complete, NULL);
2250 	poll_threads();
2251 	CU_ASSERT(g_bserrno == 0);
2252 	g_bs = NULL;
2253 	g_blob = NULL;
2254 	g_blobid = 0;
2255 
2256 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2257 	CU_ASSERT(super_block->clean == 1);
2258 
2259 	/* Load should fail for device with an unsupported blocklen */
2260 	dev = init_dev();
2261 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2262 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2263 	poll_threads();
2264 	CU_ASSERT(g_bserrno == -EINVAL);
2265 
2266 	/* Load should when max_md_ops is set to zero */
2267 	dev = init_dev();
2268 	spdk_bs_opts_init(&opts, sizeof(opts));
2269 	opts.max_md_ops = 0;
2270 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2271 	poll_threads();
2272 	CU_ASSERT(g_bserrno == -EINVAL);
2273 
2274 	/* Load should when max_channel_ops is set to zero */
2275 	dev = init_dev();
2276 	spdk_bs_opts_init(&opts, sizeof(opts));
2277 	opts.max_channel_ops = 0;
2278 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2279 	poll_threads();
2280 	CU_ASSERT(g_bserrno == -EINVAL);
2281 
2282 	/* Load an existing blob store */
2283 	dev = init_dev();
2284 	spdk_bs_opts_init(&opts, sizeof(opts));
2285 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2286 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2287 	poll_threads();
2288 	CU_ASSERT(g_bserrno == 0);
2289 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2290 	bs = g_bs;
2291 
2292 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2293 	CU_ASSERT(super_block->clean == 1);
2294 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2295 
2296 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2297 	poll_threads();
2298 	CU_ASSERT(g_bserrno == 0);
2299 	CU_ASSERT(g_blob != NULL);
2300 	blob = g_blob;
2301 
2302 	/* Verify that blobstore is marked dirty after first metadata sync */
2303 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2304 	CU_ASSERT(super_block->clean == 1);
2305 
2306 	/* Get the xattrs */
2307 	value = NULL;
2308 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2309 	CU_ASSERT(rc == 0);
2310 	SPDK_CU_ASSERT_FATAL(value != NULL);
2311 	CU_ASSERT(*(uint64_t *)value == length);
2312 	CU_ASSERT(value_len == 8);
2313 
2314 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2315 	CU_ASSERT(rc == -ENOENT);
2316 
2317 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2318 
2319 	spdk_blob_close(blob, blob_op_complete, NULL);
2320 	poll_threads();
2321 	CU_ASSERT(g_bserrno == 0);
2322 	blob = NULL;
2323 	g_blob = NULL;
2324 
2325 	spdk_bs_unload(bs, bs_op_complete, NULL);
2326 	poll_threads();
2327 	CU_ASSERT(g_bserrno == 0);
2328 	g_bs = NULL;
2329 
2330 	/* Load should fail: bdev size < saved size */
2331 	dev = init_dev();
2332 	dev->blockcnt /= 2;
2333 
2334 	spdk_bs_opts_init(&opts, sizeof(opts));
2335 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2336 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2337 	poll_threads();
2338 
2339 	CU_ASSERT(g_bserrno == -EILSEQ);
2340 
2341 	/* Load should succeed: bdev size > saved size */
2342 	dev = init_dev();
2343 	dev->blockcnt *= 4;
2344 
2345 	spdk_bs_opts_init(&opts, sizeof(opts));
2346 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2347 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2348 	poll_threads();
2349 	CU_ASSERT(g_bserrno == 0);
2350 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2351 	bs = g_bs;
2352 
2353 	CU_ASSERT(g_bserrno == 0);
2354 	spdk_bs_unload(bs, bs_op_complete, NULL);
2355 	poll_threads();
2356 
2357 
2358 	/* Test compatibility mode */
2359 
2360 	dev = init_dev();
2361 	super_block->size = 0;
2362 	super_block->crc = blob_md_page_calc_crc(super_block);
2363 
2364 	spdk_bs_opts_init(&opts, sizeof(opts));
2365 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2366 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2367 	poll_threads();
2368 	CU_ASSERT(g_bserrno == 0);
2369 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2370 	bs = g_bs;
2371 
2372 	/* Create a blob */
2373 	ut_spdk_blob_opts_init(&blob_opts);
2374 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2375 	poll_threads();
2376 	CU_ASSERT(g_bserrno == 0);
2377 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2378 
2379 	/* Blobstore should update number of blocks in super_block */
2380 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2381 	CU_ASSERT(super_block->clean == 0);
2382 
2383 	spdk_bs_unload(bs, bs_op_complete, NULL);
2384 	poll_threads();
2385 	CU_ASSERT(g_bserrno == 0);
2386 	CU_ASSERT(super_block->clean == 1);
2387 	g_bs = NULL;
2388 
2389 }
2390 
2391 static void
2392 bs_load_pending_removal(void)
2393 {
2394 	struct spdk_blob_store *bs = g_bs;
2395 	struct spdk_blob_opts opts;
2396 	struct spdk_blob *blob, *snapshot;
2397 	spdk_blob_id blobid, snapshotid;
2398 	const void *value;
2399 	size_t value_len;
2400 	int rc;
2401 
2402 	/* Create blob */
2403 	ut_spdk_blob_opts_init(&opts);
2404 	opts.num_clusters = 10;
2405 
2406 	blob = ut_blob_create_and_open(bs, &opts);
2407 	blobid = spdk_blob_get_id(blob);
2408 
2409 	/* Create snapshot */
2410 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2411 	poll_threads();
2412 	CU_ASSERT(g_bserrno == 0);
2413 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2414 	snapshotid = g_blobid;
2415 
2416 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2417 	poll_threads();
2418 	CU_ASSERT(g_bserrno == 0);
2419 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2420 	snapshot = g_blob;
2421 
2422 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2423 	snapshot->md_ro = false;
2424 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2425 	CU_ASSERT(rc == 0);
2426 	snapshot->md_ro = true;
2427 
2428 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2429 	poll_threads();
2430 	CU_ASSERT(g_bserrno == 0);
2431 
2432 	spdk_blob_close(blob, blob_op_complete, NULL);
2433 	poll_threads();
2434 	CU_ASSERT(g_bserrno == 0);
2435 
2436 	/* Reload blobstore */
2437 	ut_bs_reload(&bs, NULL);
2438 
2439 	/* Snapshot should not be removed as blob is still pointing to it */
2440 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2441 	poll_threads();
2442 	CU_ASSERT(g_bserrno == 0);
2443 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2444 	snapshot = g_blob;
2445 
2446 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2447 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2448 	CU_ASSERT(rc != 0);
2449 
2450 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2451 	snapshot->md_ro = false;
2452 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2453 	CU_ASSERT(rc == 0);
2454 	snapshot->md_ro = true;
2455 
2456 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2457 	poll_threads();
2458 	CU_ASSERT(g_bserrno == 0);
2459 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2460 	blob = g_blob;
2461 
2462 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2463 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2464 
2465 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2466 	poll_threads();
2467 	CU_ASSERT(g_bserrno == 0);
2468 
2469 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2470 	poll_threads();
2471 	CU_ASSERT(g_bserrno == 0);
2472 
2473 	spdk_blob_close(blob, blob_op_complete, NULL);
2474 	poll_threads();
2475 	CU_ASSERT(g_bserrno == 0);
2476 
2477 	/* Reload blobstore */
2478 	ut_bs_reload(&bs, NULL);
2479 
2480 	/* Snapshot should be removed as blob is not pointing to it anymore */
2481 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2482 	poll_threads();
2483 	CU_ASSERT(g_bserrno != 0);
2484 }
2485 
2486 static void
2487 bs_load_custom_cluster_size(void)
2488 {
2489 	struct spdk_blob_store *bs;
2490 	struct spdk_bs_dev *dev;
2491 	struct spdk_bs_super_block *super_block;
2492 	struct spdk_bs_opts opts;
2493 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2494 	uint32_t cluster_sz;
2495 	uint64_t total_clusters;
2496 
2497 	dev = init_dev();
2498 	spdk_bs_opts_init(&opts, sizeof(opts));
2499 	opts.cluster_sz = custom_cluster_size;
2500 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2501 
2502 	/* Initialize a new blob store */
2503 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2504 	poll_threads();
2505 	CU_ASSERT(g_bserrno == 0);
2506 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2507 	bs = g_bs;
2508 	cluster_sz = bs->cluster_sz;
2509 	total_clusters = bs->total_clusters;
2510 
2511 	/* Unload the blob store */
2512 	spdk_bs_unload(bs, bs_op_complete, NULL);
2513 	poll_threads();
2514 	CU_ASSERT(g_bserrno == 0);
2515 	g_bs = NULL;
2516 	g_blob = NULL;
2517 	g_blobid = 0;
2518 
2519 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2520 	CU_ASSERT(super_block->clean == 1);
2521 
2522 	/* Load an existing blob store */
2523 	dev = init_dev();
2524 	spdk_bs_opts_init(&opts, sizeof(opts));
2525 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2526 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2527 	poll_threads();
2528 	CU_ASSERT(g_bserrno == 0);
2529 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2530 	bs = g_bs;
2531 	/* Compare cluster size and number to one after initialization */
2532 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2533 	CU_ASSERT(total_clusters == bs->total_clusters);
2534 
2535 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2536 	CU_ASSERT(super_block->clean == 1);
2537 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2538 
2539 	spdk_bs_unload(bs, bs_op_complete, NULL);
2540 	poll_threads();
2541 	CU_ASSERT(g_bserrno == 0);
2542 	CU_ASSERT(super_block->clean == 1);
2543 	g_bs = NULL;
2544 }
2545 
2546 static void
2547 bs_type(void)
2548 {
2549 	struct spdk_blob_store *bs;
2550 	struct spdk_bs_dev *dev;
2551 	struct spdk_bs_opts opts;
2552 
2553 	dev = init_dev();
2554 	spdk_bs_opts_init(&opts, sizeof(opts));
2555 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2556 
2557 	/* Initialize a new blob store */
2558 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2559 	poll_threads();
2560 	CU_ASSERT(g_bserrno == 0);
2561 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2562 	bs = g_bs;
2563 
2564 	/* Unload the blob store */
2565 	spdk_bs_unload(bs, bs_op_complete, NULL);
2566 	poll_threads();
2567 	CU_ASSERT(g_bserrno == 0);
2568 	g_bs = NULL;
2569 	g_blob = NULL;
2570 	g_blobid = 0;
2571 
2572 	/* Load non existing blobstore type */
2573 	dev = init_dev();
2574 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2575 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2576 	poll_threads();
2577 	CU_ASSERT(g_bserrno != 0);
2578 
2579 	/* Load with empty blobstore type */
2580 	dev = init_dev();
2581 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2582 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2583 	poll_threads();
2584 	CU_ASSERT(g_bserrno == 0);
2585 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2586 	bs = g_bs;
2587 
2588 	spdk_bs_unload(bs, bs_op_complete, NULL);
2589 	poll_threads();
2590 	CU_ASSERT(g_bserrno == 0);
2591 	g_bs = NULL;
2592 
2593 	/* Initialize a new blob store with empty bstype */
2594 	dev = init_dev();
2595 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2596 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2597 	poll_threads();
2598 	CU_ASSERT(g_bserrno == 0);
2599 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2600 	bs = g_bs;
2601 
2602 	spdk_bs_unload(bs, bs_op_complete, NULL);
2603 	poll_threads();
2604 	CU_ASSERT(g_bserrno == 0);
2605 	g_bs = NULL;
2606 
2607 	/* Load non existing blobstore type */
2608 	dev = init_dev();
2609 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2610 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2611 	poll_threads();
2612 	CU_ASSERT(g_bserrno != 0);
2613 
2614 	/* Load with empty blobstore type */
2615 	dev = init_dev();
2616 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2617 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2618 	poll_threads();
2619 	CU_ASSERT(g_bserrno == 0);
2620 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2621 	bs = g_bs;
2622 
2623 	spdk_bs_unload(bs, bs_op_complete, NULL);
2624 	poll_threads();
2625 	CU_ASSERT(g_bserrno == 0);
2626 	g_bs = NULL;
2627 }
2628 
2629 static void
2630 bs_super_block(void)
2631 {
2632 	struct spdk_blob_store *bs;
2633 	struct spdk_bs_dev *dev;
2634 	struct spdk_bs_super_block *super_block;
2635 	struct spdk_bs_opts opts;
2636 	struct spdk_bs_super_block_ver1 super_block_v1;
2637 
2638 	dev = init_dev();
2639 	spdk_bs_opts_init(&opts, sizeof(opts));
2640 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2641 
2642 	/* Initialize a new blob store */
2643 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2644 	poll_threads();
2645 	CU_ASSERT(g_bserrno == 0);
2646 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2647 	bs = g_bs;
2648 
2649 	/* Unload the blob store */
2650 	spdk_bs_unload(bs, bs_op_complete, NULL);
2651 	poll_threads();
2652 	CU_ASSERT(g_bserrno == 0);
2653 	g_bs = NULL;
2654 	g_blob = NULL;
2655 	g_blobid = 0;
2656 
2657 	/* Load an existing blob store with version newer than supported */
2658 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2659 	super_block->version++;
2660 
2661 	dev = init_dev();
2662 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2663 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2664 	poll_threads();
2665 	CU_ASSERT(g_bserrno != 0);
2666 
2667 	/* Create a new blob store with super block version 1 */
2668 	dev = init_dev();
2669 	super_block_v1.version = 1;
2670 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2671 	super_block_v1.length = 0x1000;
2672 	super_block_v1.clean = 1;
2673 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2674 	super_block_v1.cluster_size = 0x100000;
2675 	super_block_v1.used_page_mask_start = 0x01;
2676 	super_block_v1.used_page_mask_len = 0x01;
2677 	super_block_v1.used_cluster_mask_start = 0x02;
2678 	super_block_v1.used_cluster_mask_len = 0x01;
2679 	super_block_v1.md_start = 0x03;
2680 	super_block_v1.md_len = 0x40;
2681 	memset(super_block_v1.reserved, 0, 4036);
2682 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2683 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2684 
2685 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2686 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2687 	poll_threads();
2688 	CU_ASSERT(g_bserrno == 0);
2689 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2690 	bs = g_bs;
2691 
2692 	spdk_bs_unload(bs, bs_op_complete, NULL);
2693 	poll_threads();
2694 	CU_ASSERT(g_bserrno == 0);
2695 	g_bs = NULL;
2696 }
2697 
2698 static void
2699 bs_test_recover_cluster_count(void)
2700 {
2701 	struct spdk_blob_store *bs;
2702 	struct spdk_bs_dev *dev;
2703 	struct spdk_bs_super_block super_block;
2704 	struct spdk_bs_opts opts;
2705 
2706 	dev = init_dev();
2707 	spdk_bs_opts_init(&opts, sizeof(opts));
2708 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2709 
2710 	super_block.version = 3;
2711 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2712 	super_block.length = 0x1000;
2713 	super_block.clean = 0;
2714 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2715 	super_block.cluster_size = 4096;
2716 	super_block.used_page_mask_start = 0x01;
2717 	super_block.used_page_mask_len = 0x01;
2718 	super_block.used_cluster_mask_start = 0x02;
2719 	super_block.used_cluster_mask_len = 0x01;
2720 	super_block.used_blobid_mask_start = 0x03;
2721 	super_block.used_blobid_mask_len = 0x01;
2722 	super_block.md_start = 0x04;
2723 	super_block.md_len = 0x40;
2724 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2725 	super_block.size = dev->blockcnt * dev->blocklen;
2726 	super_block.io_unit_size = 0x1000;
2727 	memset(super_block.reserved, 0, 4000);
2728 	super_block.crc = blob_md_page_calc_crc(&super_block);
2729 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2730 
2731 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2732 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2733 	poll_threads();
2734 	CU_ASSERT(g_bserrno == 0);
2735 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2736 	bs = g_bs;
2737 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2738 			super_block.md_len));
2739 
2740 	spdk_bs_unload(bs, bs_op_complete, NULL);
2741 	poll_threads();
2742 	CU_ASSERT(g_bserrno == 0);
2743 	g_bs = NULL;
2744 }
2745 
2746 /*
2747  * Create a blobstore and then unload it.
2748  */
2749 static void
2750 bs_unload(void)
2751 {
2752 	struct spdk_blob_store *bs = g_bs;
2753 	struct spdk_blob *blob;
2754 
2755 	/* Create a blob and open it. */
2756 	blob = ut_blob_create_and_open(bs, NULL);
2757 
2758 	/* Try to unload blobstore, should fail with open blob */
2759 	g_bserrno = -1;
2760 	spdk_bs_unload(bs, bs_op_complete, NULL);
2761 	poll_threads();
2762 	CU_ASSERT(g_bserrno == -EBUSY);
2763 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2764 
2765 	/* Close the blob, then successfully unload blobstore */
2766 	g_bserrno = -1;
2767 	spdk_blob_close(blob, blob_op_complete, NULL);
2768 	poll_threads();
2769 	CU_ASSERT(g_bserrno == 0);
2770 }
2771 
2772 /*
2773  * Create a blobstore with a cluster size different than the default, and ensure it is
2774  *  persisted.
2775  */
2776 static void
2777 bs_cluster_sz(void)
2778 {
2779 	struct spdk_blob_store *bs;
2780 	struct spdk_bs_dev *dev;
2781 	struct spdk_bs_opts opts;
2782 	uint32_t cluster_sz;
2783 
2784 	/* Set cluster size to zero */
2785 	dev = init_dev();
2786 	spdk_bs_opts_init(&opts, sizeof(opts));
2787 	opts.cluster_sz = 0;
2788 
2789 	/* Initialize a new blob store */
2790 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2791 	poll_threads();
2792 	CU_ASSERT(g_bserrno == -EINVAL);
2793 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2794 
2795 	/*
2796 	 * Set cluster size to blobstore page size,
2797 	 * to work it is required to be at least twice the blobstore page size.
2798 	 */
2799 	dev = init_dev();
2800 	spdk_bs_opts_init(&opts, sizeof(opts));
2801 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
2802 
2803 	/* Initialize a new blob store */
2804 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2805 	poll_threads();
2806 	CU_ASSERT(g_bserrno == -ENOMEM);
2807 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2808 
2809 	/*
2810 	 * Set cluster size to lower than page size,
2811 	 * to work it is required to be at least twice the blobstore page size.
2812 	 */
2813 	dev = init_dev();
2814 	spdk_bs_opts_init(&opts, sizeof(opts));
2815 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
2816 
2817 	/* Initialize a new blob store */
2818 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2819 	poll_threads();
2820 	CU_ASSERT(g_bserrno == -EINVAL);
2821 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2822 
2823 	/* Set cluster size to twice the default */
2824 	dev = init_dev();
2825 	spdk_bs_opts_init(&opts, sizeof(opts));
2826 	opts.cluster_sz *= 2;
2827 	cluster_sz = opts.cluster_sz;
2828 
2829 	/* Initialize a new blob store */
2830 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2831 	poll_threads();
2832 	CU_ASSERT(g_bserrno == 0);
2833 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2834 	bs = g_bs;
2835 
2836 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2837 
2838 	ut_bs_reload(&bs, &opts);
2839 
2840 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2841 
2842 	spdk_bs_unload(bs, bs_op_complete, NULL);
2843 	poll_threads();
2844 	CU_ASSERT(g_bserrno == 0);
2845 	g_bs = NULL;
2846 }
2847 
2848 /*
2849  * Create a blobstore, reload it and ensure total usable cluster count
2850  *  stays the same.
2851  */
2852 static void
2853 bs_usable_clusters(void)
2854 {
2855 	struct spdk_blob_store *bs = g_bs;
2856 	struct spdk_blob *blob;
2857 	uint32_t clusters;
2858 	int i;
2859 
2860 
2861 	clusters = spdk_bs_total_data_cluster_count(bs);
2862 
2863 	ut_bs_reload(&bs, NULL);
2864 
2865 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2866 
2867 	/* Create and resize blobs to make sure that useable cluster count won't change */
2868 	for (i = 0; i < 4; i++) {
2869 		g_bserrno = -1;
2870 		g_blobid = SPDK_BLOBID_INVALID;
2871 		blob = ut_blob_create_and_open(bs, NULL);
2872 
2873 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2874 		poll_threads();
2875 		CU_ASSERT(g_bserrno == 0);
2876 
2877 		g_bserrno = -1;
2878 		spdk_blob_close(blob, blob_op_complete, NULL);
2879 		poll_threads();
2880 		CU_ASSERT(g_bserrno == 0);
2881 
2882 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2883 	}
2884 
2885 	/* Reload the blob store to make sure that nothing changed */
2886 	ut_bs_reload(&bs, NULL);
2887 
2888 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2889 }
2890 
2891 /*
2892  * Test resizing of the metadata blob.  This requires creating enough blobs
2893  *  so that one cluster is not enough to fit the metadata for those blobs.
2894  *  To induce this condition to happen more quickly, we reduce the cluster
2895  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
2896  */
2897 static void
2898 bs_resize_md(void)
2899 {
2900 	struct spdk_blob_store *bs;
2901 	const int CLUSTER_PAGE_COUNT = 4;
2902 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
2903 	struct spdk_bs_dev *dev;
2904 	struct spdk_bs_opts opts;
2905 	struct spdk_blob *blob;
2906 	struct spdk_blob_opts blob_opts;
2907 	uint32_t cluster_sz;
2908 	spdk_blob_id blobids[NUM_BLOBS];
2909 	int i;
2910 
2911 
2912 	dev = init_dev();
2913 	spdk_bs_opts_init(&opts, sizeof(opts));
2914 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
2915 	cluster_sz = opts.cluster_sz;
2916 
2917 	/* Initialize a new blob store */
2918 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2919 	poll_threads();
2920 	CU_ASSERT(g_bserrno == 0);
2921 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2922 	bs = g_bs;
2923 
2924 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2925 
2926 	ut_spdk_blob_opts_init(&blob_opts);
2927 
2928 	for (i = 0; i < NUM_BLOBS; i++) {
2929 		g_bserrno = -1;
2930 		g_blobid = SPDK_BLOBID_INVALID;
2931 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2932 		poll_threads();
2933 		CU_ASSERT(g_bserrno == 0);
2934 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
2935 		blobids[i] = g_blobid;
2936 	}
2937 
2938 	ut_bs_reload(&bs, &opts);
2939 
2940 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2941 
2942 	for (i = 0; i < NUM_BLOBS; i++) {
2943 		g_bserrno = -1;
2944 		g_blob = NULL;
2945 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
2946 		poll_threads();
2947 		CU_ASSERT(g_bserrno == 0);
2948 		CU_ASSERT(g_blob !=  NULL);
2949 		blob = g_blob;
2950 		g_bserrno = -1;
2951 		spdk_blob_close(blob, blob_op_complete, NULL);
2952 		poll_threads();
2953 		CU_ASSERT(g_bserrno == 0);
2954 	}
2955 
2956 	spdk_bs_unload(bs, bs_op_complete, NULL);
2957 	poll_threads();
2958 	CU_ASSERT(g_bserrno == 0);
2959 	g_bs = NULL;
2960 }
2961 
2962 static void
2963 bs_destroy(void)
2964 {
2965 	struct spdk_blob_store *bs;
2966 	struct spdk_bs_dev *dev;
2967 
2968 	/* Initialize a new blob store */
2969 	dev = init_dev();
2970 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2971 	poll_threads();
2972 	CU_ASSERT(g_bserrno == 0);
2973 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2974 	bs = g_bs;
2975 
2976 	/* Destroy the blob store */
2977 	g_bserrno = -1;
2978 	spdk_bs_destroy(bs, bs_op_complete, NULL);
2979 	poll_threads();
2980 	CU_ASSERT(g_bserrno == 0);
2981 
2982 	/* Loading an non-existent blob store should fail. */
2983 	g_bs = NULL;
2984 	dev = init_dev();
2985 
2986 	g_bserrno = 0;
2987 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2988 	poll_threads();
2989 	CU_ASSERT(g_bserrno != 0);
2990 }
2991 
2992 /* Try to hit all of the corner cases associated with serializing
2993  * a blob to disk
2994  */
2995 static void
2996 blob_serialize_test(void)
2997 {
2998 	struct spdk_bs_dev *dev;
2999 	struct spdk_bs_opts opts;
3000 	struct spdk_blob_store *bs;
3001 	spdk_blob_id blobid[2];
3002 	struct spdk_blob *blob[2];
3003 	uint64_t i;
3004 	char *value;
3005 	int rc;
3006 
3007 	dev = init_dev();
3008 
3009 	/* Initialize a new blobstore with very small clusters */
3010 	spdk_bs_opts_init(&opts, sizeof(opts));
3011 	opts.cluster_sz = dev->blocklen * 8;
3012 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3013 	poll_threads();
3014 	CU_ASSERT(g_bserrno == 0);
3015 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3016 	bs = g_bs;
3017 
3018 	/* Create and open two blobs */
3019 	for (i = 0; i < 2; i++) {
3020 		blob[i] = ut_blob_create_and_open(bs, NULL);
3021 		blobid[i] = spdk_blob_get_id(blob[i]);
3022 
3023 		/* Set a fairly large xattr on both blobs to eat up
3024 		 * metadata space
3025 		 */
3026 		value = calloc(dev->blocklen - 64, sizeof(char));
3027 		SPDK_CU_ASSERT_FATAL(value != NULL);
3028 		memset(value, i, dev->blocklen / 2);
3029 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3030 		CU_ASSERT(rc == 0);
3031 		free(value);
3032 	}
3033 
3034 	/* Resize the blobs, alternating 1 cluster at a time.
3035 	 * This thwarts run length encoding and will cause spill
3036 	 * over of the extents.
3037 	 */
3038 	for (i = 0; i < 6; i++) {
3039 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3040 		poll_threads();
3041 		CU_ASSERT(g_bserrno == 0);
3042 	}
3043 
3044 	for (i = 0; i < 2; i++) {
3045 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3046 		poll_threads();
3047 		CU_ASSERT(g_bserrno == 0);
3048 	}
3049 
3050 	/* Close the blobs */
3051 	for (i = 0; i < 2; i++) {
3052 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3053 		poll_threads();
3054 		CU_ASSERT(g_bserrno == 0);
3055 	}
3056 
3057 	ut_bs_reload(&bs, &opts);
3058 
3059 	for (i = 0; i < 2; i++) {
3060 		blob[i] = NULL;
3061 
3062 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3063 		poll_threads();
3064 		CU_ASSERT(g_bserrno == 0);
3065 		CU_ASSERT(g_blob != NULL);
3066 		blob[i] = g_blob;
3067 
3068 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3069 
3070 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3071 		poll_threads();
3072 		CU_ASSERT(g_bserrno == 0);
3073 	}
3074 
3075 	spdk_bs_unload(bs, bs_op_complete, NULL);
3076 	poll_threads();
3077 	CU_ASSERT(g_bserrno == 0);
3078 	g_bs = NULL;
3079 }
3080 
3081 static void
3082 blob_crc(void)
3083 {
3084 	struct spdk_blob_store *bs = g_bs;
3085 	struct spdk_blob *blob;
3086 	spdk_blob_id blobid;
3087 	uint32_t page_num;
3088 	int index;
3089 	struct spdk_blob_md_page *page;
3090 
3091 	blob = ut_blob_create_and_open(bs, NULL);
3092 	blobid = spdk_blob_get_id(blob);
3093 
3094 	spdk_blob_close(blob, blob_op_complete, NULL);
3095 	poll_threads();
3096 	CU_ASSERT(g_bserrno == 0);
3097 
3098 	page_num = bs_blobid_to_page(blobid);
3099 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3100 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3101 	page->crc = 0;
3102 
3103 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3104 	poll_threads();
3105 	CU_ASSERT(g_bserrno == -EINVAL);
3106 	CU_ASSERT(g_blob == NULL);
3107 	g_bserrno = 0;
3108 
3109 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3110 	poll_threads();
3111 	CU_ASSERT(g_bserrno == -EINVAL);
3112 }
3113 
3114 static void
3115 super_block_crc(void)
3116 {
3117 	struct spdk_blob_store *bs;
3118 	struct spdk_bs_dev *dev;
3119 	struct spdk_bs_super_block *super_block;
3120 
3121 	dev = init_dev();
3122 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3123 	poll_threads();
3124 	CU_ASSERT(g_bserrno == 0);
3125 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3126 	bs = g_bs;
3127 
3128 	spdk_bs_unload(bs, bs_op_complete, NULL);
3129 	poll_threads();
3130 	CU_ASSERT(g_bserrno == 0);
3131 	g_bs = NULL;
3132 
3133 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3134 	super_block->crc = 0;
3135 	dev = init_dev();
3136 
3137 	/* Load an existing blob store */
3138 	g_bserrno = 0;
3139 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3140 	poll_threads();
3141 	CU_ASSERT(g_bserrno == -EILSEQ);
3142 }
3143 
3144 /* For blob dirty shutdown test case we do the following sub-test cases:
3145  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3146  *   dirty shutdown and reload the blob store and verify the xattrs.
3147  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3148  *   reload the blob store and verify the clusters number.
3149  * 3 Create the second blob and then dirty shutdown, reload the blob store
3150  *   and verify the second blob.
3151  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3152  *   and verify the second blob is invalid.
3153  * 5 Create the second blob again and also create the third blob, modify the
3154  *   md of second blob which makes the md invalid, and then dirty shutdown,
3155  *   reload the blob store verify the second blob, it should invalid and also
3156  *   verify the third blob, it should correct.
3157  */
3158 static void
3159 blob_dirty_shutdown(void)
3160 {
3161 	int rc;
3162 	int index;
3163 	struct spdk_blob_store *bs = g_bs;
3164 	spdk_blob_id blobid1, blobid2, blobid3;
3165 	struct spdk_blob *blob = g_blob;
3166 	uint64_t length;
3167 	uint64_t free_clusters;
3168 	const void *value;
3169 	size_t value_len;
3170 	uint32_t page_num;
3171 	struct spdk_blob_md_page *page;
3172 	struct spdk_blob_opts blob_opts;
3173 
3174 	/* Create first blob */
3175 	blobid1 = spdk_blob_get_id(blob);
3176 
3177 	/* Set some xattrs */
3178 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3179 	CU_ASSERT(rc == 0);
3180 
3181 	length = 2345;
3182 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3183 	CU_ASSERT(rc == 0);
3184 
3185 	/* Put xattr that fits exactly single page.
3186 	 * This results in adding additional pages to MD.
3187 	 * First is flags and smaller xattr, second the large xattr,
3188 	 * third are just the extents.
3189 	 */
3190 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3191 			      strlen("large_xattr");
3192 	char *xattr = calloc(xattr_length, sizeof(char));
3193 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3194 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3195 	free(xattr);
3196 	SPDK_CU_ASSERT_FATAL(rc == 0);
3197 
3198 	/* Resize the blob */
3199 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3200 	poll_threads();
3201 	CU_ASSERT(g_bserrno == 0);
3202 
3203 	/* Set the blob as the super blob */
3204 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3205 	poll_threads();
3206 	CU_ASSERT(g_bserrno == 0);
3207 
3208 	free_clusters = spdk_bs_free_cluster_count(bs);
3209 
3210 	spdk_blob_close(blob, blob_op_complete, NULL);
3211 	poll_threads();
3212 	CU_ASSERT(g_bserrno == 0);
3213 	blob = NULL;
3214 	g_blob = NULL;
3215 	g_blobid = SPDK_BLOBID_INVALID;
3216 
3217 	ut_bs_dirty_load(&bs, NULL);
3218 
3219 	/* Get the super blob */
3220 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3221 	poll_threads();
3222 	CU_ASSERT(g_bserrno == 0);
3223 	CU_ASSERT(blobid1 == g_blobid);
3224 
3225 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3226 	poll_threads();
3227 	CU_ASSERT(g_bserrno == 0);
3228 	CU_ASSERT(g_blob != NULL);
3229 	blob = g_blob;
3230 
3231 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3232 
3233 	/* Get the xattrs */
3234 	value = NULL;
3235 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3236 	CU_ASSERT(rc == 0);
3237 	SPDK_CU_ASSERT_FATAL(value != NULL);
3238 	CU_ASSERT(*(uint64_t *)value == length);
3239 	CU_ASSERT(value_len == 8);
3240 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3241 
3242 	/* Resize the blob */
3243 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3244 	poll_threads();
3245 	CU_ASSERT(g_bserrno == 0);
3246 
3247 	free_clusters = spdk_bs_free_cluster_count(bs);
3248 
3249 	spdk_blob_close(blob, blob_op_complete, NULL);
3250 	poll_threads();
3251 	CU_ASSERT(g_bserrno == 0);
3252 	blob = NULL;
3253 	g_blob = NULL;
3254 	g_blobid = SPDK_BLOBID_INVALID;
3255 
3256 	ut_bs_dirty_load(&bs, NULL);
3257 
3258 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3259 	poll_threads();
3260 	CU_ASSERT(g_bserrno == 0);
3261 	CU_ASSERT(g_blob != NULL);
3262 	blob = g_blob;
3263 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3264 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3265 
3266 	spdk_blob_close(blob, blob_op_complete, NULL);
3267 	poll_threads();
3268 	CU_ASSERT(g_bserrno == 0);
3269 	blob = NULL;
3270 	g_blob = NULL;
3271 	g_blobid = SPDK_BLOBID_INVALID;
3272 
3273 	/* Create second blob */
3274 	blob = ut_blob_create_and_open(bs, NULL);
3275 	blobid2 = spdk_blob_get_id(blob);
3276 
3277 	/* Set some xattrs */
3278 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3279 	CU_ASSERT(rc == 0);
3280 
3281 	length = 5432;
3282 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3283 	CU_ASSERT(rc == 0);
3284 
3285 	/* Resize the blob */
3286 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3287 	poll_threads();
3288 	CU_ASSERT(g_bserrno == 0);
3289 
3290 	free_clusters = spdk_bs_free_cluster_count(bs);
3291 
3292 	spdk_blob_close(blob, blob_op_complete, NULL);
3293 	poll_threads();
3294 	CU_ASSERT(g_bserrno == 0);
3295 	blob = NULL;
3296 	g_blob = NULL;
3297 	g_blobid = SPDK_BLOBID_INVALID;
3298 
3299 	ut_bs_dirty_load(&bs, NULL);
3300 
3301 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3302 	poll_threads();
3303 	CU_ASSERT(g_bserrno == 0);
3304 	CU_ASSERT(g_blob != NULL);
3305 	blob = g_blob;
3306 
3307 	/* Get the xattrs */
3308 	value = NULL;
3309 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3310 	CU_ASSERT(rc == 0);
3311 	SPDK_CU_ASSERT_FATAL(value != NULL);
3312 	CU_ASSERT(*(uint64_t *)value == length);
3313 	CU_ASSERT(value_len == 8);
3314 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3315 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3316 
3317 	ut_blob_close_and_delete(bs, blob);
3318 
3319 	free_clusters = spdk_bs_free_cluster_count(bs);
3320 
3321 	ut_bs_dirty_load(&bs, NULL);
3322 
3323 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3324 	poll_threads();
3325 	CU_ASSERT(g_bserrno != 0);
3326 	CU_ASSERT(g_blob == NULL);
3327 
3328 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3329 	poll_threads();
3330 	CU_ASSERT(g_bserrno == 0);
3331 	CU_ASSERT(g_blob != NULL);
3332 	blob = g_blob;
3333 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3334 	spdk_blob_close(blob, blob_op_complete, NULL);
3335 	poll_threads();
3336 	CU_ASSERT(g_bserrno == 0);
3337 
3338 	ut_bs_reload(&bs, NULL);
3339 
3340 	/* Create second blob */
3341 	ut_spdk_blob_opts_init(&blob_opts);
3342 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3343 	poll_threads();
3344 	CU_ASSERT(g_bserrno == 0);
3345 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3346 	blobid2 = g_blobid;
3347 
3348 	/* Create third blob */
3349 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3350 	poll_threads();
3351 	CU_ASSERT(g_bserrno == 0);
3352 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3353 	blobid3 = g_blobid;
3354 
3355 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3356 	poll_threads();
3357 	CU_ASSERT(g_bserrno == 0);
3358 	CU_ASSERT(g_blob != NULL);
3359 	blob = g_blob;
3360 
3361 	/* Set some xattrs for second blob */
3362 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3363 	CU_ASSERT(rc == 0);
3364 
3365 	length = 5432;
3366 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3367 	CU_ASSERT(rc == 0);
3368 
3369 	spdk_blob_close(blob, blob_op_complete, NULL);
3370 	poll_threads();
3371 	CU_ASSERT(g_bserrno == 0);
3372 	blob = NULL;
3373 	g_blob = NULL;
3374 	g_blobid = SPDK_BLOBID_INVALID;
3375 
3376 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3377 	poll_threads();
3378 	CU_ASSERT(g_bserrno == 0);
3379 	CU_ASSERT(g_blob != NULL);
3380 	blob = g_blob;
3381 
3382 	/* Set some xattrs for third blob */
3383 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3384 	CU_ASSERT(rc == 0);
3385 
3386 	length = 5432;
3387 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3388 	CU_ASSERT(rc == 0);
3389 
3390 	spdk_blob_close(blob, blob_op_complete, NULL);
3391 	poll_threads();
3392 	CU_ASSERT(g_bserrno == 0);
3393 	blob = NULL;
3394 	g_blob = NULL;
3395 	g_blobid = SPDK_BLOBID_INVALID;
3396 
3397 	/* Mark second blob as invalid */
3398 	page_num = bs_blobid_to_page(blobid2);
3399 
3400 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3401 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3402 	page->sequence_num = 1;
3403 	page->crc = blob_md_page_calc_crc(page);
3404 
3405 	free_clusters = spdk_bs_free_cluster_count(bs);
3406 
3407 	ut_bs_dirty_load(&bs, NULL);
3408 
3409 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3410 	poll_threads();
3411 	CU_ASSERT(g_bserrno != 0);
3412 	CU_ASSERT(g_blob == NULL);
3413 
3414 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3415 	poll_threads();
3416 	CU_ASSERT(g_bserrno == 0);
3417 	CU_ASSERT(g_blob != NULL);
3418 	blob = g_blob;
3419 
3420 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3421 }
3422 
3423 static void
3424 blob_flags(void)
3425 {
3426 	struct spdk_blob_store *bs = g_bs;
3427 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3428 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3429 	struct spdk_blob_opts blob_opts;
3430 	int rc;
3431 
3432 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3433 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3434 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3435 
3436 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3437 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3438 
3439 	ut_spdk_blob_opts_init(&blob_opts);
3440 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3441 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3442 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3443 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3444 
3445 	/* Change the size of blob_data_ro to check if flags are serialized
3446 	 * when blob has non zero number of extents */
3447 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3448 	poll_threads();
3449 	CU_ASSERT(g_bserrno == 0);
3450 
3451 	/* Set the xattr to check if flags are serialized
3452 	 * when blob has non zero number of xattrs */
3453 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3454 	CU_ASSERT(rc == 0);
3455 
3456 	blob_invalid->invalid_flags = (1ULL << 63);
3457 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3458 	blob_data_ro->data_ro_flags = (1ULL << 62);
3459 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3460 	blob_md_ro->md_ro_flags = (1ULL << 61);
3461 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3462 
3463 	g_bserrno = -1;
3464 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3465 	poll_threads();
3466 	CU_ASSERT(g_bserrno == 0);
3467 	g_bserrno = -1;
3468 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3469 	poll_threads();
3470 	CU_ASSERT(g_bserrno == 0);
3471 	g_bserrno = -1;
3472 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3473 	poll_threads();
3474 	CU_ASSERT(g_bserrno == 0);
3475 
3476 	g_bserrno = -1;
3477 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3478 	poll_threads();
3479 	CU_ASSERT(g_bserrno == 0);
3480 	blob_invalid = NULL;
3481 	g_bserrno = -1;
3482 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3483 	poll_threads();
3484 	CU_ASSERT(g_bserrno == 0);
3485 	blob_data_ro = NULL;
3486 	g_bserrno = -1;
3487 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3488 	poll_threads();
3489 	CU_ASSERT(g_bserrno == 0);
3490 	blob_md_ro = NULL;
3491 
3492 	g_blob = NULL;
3493 	g_blobid = SPDK_BLOBID_INVALID;
3494 
3495 	ut_bs_reload(&bs, NULL);
3496 
3497 	g_blob = NULL;
3498 	g_bserrno = 0;
3499 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3500 	poll_threads();
3501 	CU_ASSERT(g_bserrno != 0);
3502 	CU_ASSERT(g_blob == NULL);
3503 
3504 	g_blob = NULL;
3505 	g_bserrno = -1;
3506 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3507 	poll_threads();
3508 	CU_ASSERT(g_bserrno == 0);
3509 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3510 	blob_data_ro = g_blob;
3511 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3512 	CU_ASSERT(blob_data_ro->data_ro == true);
3513 	CU_ASSERT(blob_data_ro->md_ro == true);
3514 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3515 
3516 	g_blob = NULL;
3517 	g_bserrno = -1;
3518 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3519 	poll_threads();
3520 	CU_ASSERT(g_bserrno == 0);
3521 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3522 	blob_md_ro = g_blob;
3523 	CU_ASSERT(blob_md_ro->data_ro == false);
3524 	CU_ASSERT(blob_md_ro->md_ro == true);
3525 
3526 	g_bserrno = -1;
3527 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3528 	poll_threads();
3529 	CU_ASSERT(g_bserrno == 0);
3530 
3531 	ut_blob_close_and_delete(bs, blob_data_ro);
3532 	ut_blob_close_and_delete(bs, blob_md_ro);
3533 }
3534 
3535 static void
3536 bs_version(void)
3537 {
3538 	struct spdk_bs_super_block *super;
3539 	struct spdk_blob_store *bs = g_bs;
3540 	struct spdk_bs_dev *dev;
3541 	struct spdk_blob *blob;
3542 	struct spdk_blob_opts blob_opts;
3543 	spdk_blob_id blobid;
3544 
3545 	/* Unload the blob store */
3546 	spdk_bs_unload(bs, bs_op_complete, NULL);
3547 	poll_threads();
3548 	CU_ASSERT(g_bserrno == 0);
3549 	g_bs = NULL;
3550 
3551 	/*
3552 	 * Change the bs version on disk.  This will allow us to
3553 	 *  test that the version does not get modified automatically
3554 	 *  when loading and unloading the blobstore.
3555 	 */
3556 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3557 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3558 	CU_ASSERT(super->clean == 1);
3559 	super->version = 2;
3560 	/*
3561 	 * Version 2 metadata does not have a used blobid mask, so clear
3562 	 *  those fields in the super block and zero the corresponding
3563 	 *  region on "disk".  We will use this to ensure blob IDs are
3564 	 *  correctly reconstructed.
3565 	 */
3566 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3567 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3568 	super->used_blobid_mask_start = 0;
3569 	super->used_blobid_mask_len = 0;
3570 	super->crc = blob_md_page_calc_crc(super);
3571 
3572 	/* Load an existing blob store */
3573 	dev = init_dev();
3574 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3575 	poll_threads();
3576 	CU_ASSERT(g_bserrno == 0);
3577 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3578 	CU_ASSERT(super->clean == 1);
3579 	bs = g_bs;
3580 
3581 	/*
3582 	 * Create a blob - just to make sure that when we unload it
3583 	 *  results in writing the super block (since metadata pages
3584 	 *  were allocated.
3585 	 */
3586 	ut_spdk_blob_opts_init(&blob_opts);
3587 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3588 	poll_threads();
3589 	CU_ASSERT(g_bserrno == 0);
3590 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3591 	blobid = g_blobid;
3592 
3593 	/* Unload the blob store */
3594 	spdk_bs_unload(bs, bs_op_complete, NULL);
3595 	poll_threads();
3596 	CU_ASSERT(g_bserrno == 0);
3597 	g_bs = NULL;
3598 	CU_ASSERT(super->version == 2);
3599 	CU_ASSERT(super->used_blobid_mask_start == 0);
3600 	CU_ASSERT(super->used_blobid_mask_len == 0);
3601 
3602 	dev = init_dev();
3603 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3604 	poll_threads();
3605 	CU_ASSERT(g_bserrno == 0);
3606 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3607 	bs = g_bs;
3608 
3609 	g_blob = NULL;
3610 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3611 	poll_threads();
3612 	CU_ASSERT(g_bserrno == 0);
3613 	CU_ASSERT(g_blob != NULL);
3614 	blob = g_blob;
3615 
3616 	ut_blob_close_and_delete(bs, blob);
3617 
3618 	CU_ASSERT(super->version == 2);
3619 	CU_ASSERT(super->used_blobid_mask_start == 0);
3620 	CU_ASSERT(super->used_blobid_mask_len == 0);
3621 }
3622 
3623 static void
3624 blob_set_xattrs_test(void)
3625 {
3626 	struct spdk_blob_store *bs = g_bs;
3627 	struct spdk_blob *blob;
3628 	struct spdk_blob_opts opts;
3629 	const void *value;
3630 	size_t value_len;
3631 	char *xattr;
3632 	size_t xattr_length;
3633 	int rc;
3634 
3635 	/* Create blob with extra attributes */
3636 	ut_spdk_blob_opts_init(&opts);
3637 
3638 	opts.xattrs.names = g_xattr_names;
3639 	opts.xattrs.get_value = _get_xattr_value;
3640 	opts.xattrs.count = 3;
3641 	opts.xattrs.ctx = &g_ctx;
3642 
3643 	blob = ut_blob_create_and_open(bs, &opts);
3644 
3645 	/* Get the xattrs */
3646 	value = NULL;
3647 
3648 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3649 	CU_ASSERT(rc == 0);
3650 	SPDK_CU_ASSERT_FATAL(value != NULL);
3651 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3652 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3653 
3654 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3655 	CU_ASSERT(rc == 0);
3656 	SPDK_CU_ASSERT_FATAL(value != NULL);
3657 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3658 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3659 
3660 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3661 	CU_ASSERT(rc == 0);
3662 	SPDK_CU_ASSERT_FATAL(value != NULL);
3663 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3664 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3665 
3666 	/* Try to get non existing attribute */
3667 
3668 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3669 	CU_ASSERT(rc == -ENOENT);
3670 
3671 	/* Try xattr exceeding maximum length of descriptor in single page */
3672 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3673 		       strlen("large_xattr") + 1;
3674 	xattr = calloc(xattr_length, sizeof(char));
3675 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3676 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3677 	free(xattr);
3678 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3679 
3680 	spdk_blob_close(blob, blob_op_complete, NULL);
3681 	poll_threads();
3682 	CU_ASSERT(g_bserrno == 0);
3683 	blob = NULL;
3684 	g_blob = NULL;
3685 	g_blobid = SPDK_BLOBID_INVALID;
3686 
3687 	/* NULL callback */
3688 	ut_spdk_blob_opts_init(&opts);
3689 	opts.xattrs.names = g_xattr_names;
3690 	opts.xattrs.get_value = NULL;
3691 	opts.xattrs.count = 1;
3692 	opts.xattrs.ctx = &g_ctx;
3693 
3694 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3695 	poll_threads();
3696 	CU_ASSERT(g_bserrno == -EINVAL);
3697 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3698 
3699 	/* NULL values */
3700 	ut_spdk_blob_opts_init(&opts);
3701 	opts.xattrs.names = g_xattr_names;
3702 	opts.xattrs.get_value = _get_xattr_value_null;
3703 	opts.xattrs.count = 1;
3704 	opts.xattrs.ctx = NULL;
3705 
3706 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3707 	poll_threads();
3708 	CU_ASSERT(g_bserrno == -EINVAL);
3709 }
3710 
3711 static void
3712 blob_thin_prov_alloc(void)
3713 {
3714 	struct spdk_blob_store *bs = g_bs;
3715 	struct spdk_blob *blob;
3716 	struct spdk_blob_opts opts;
3717 	spdk_blob_id blobid;
3718 	uint64_t free_clusters;
3719 
3720 	free_clusters = spdk_bs_free_cluster_count(bs);
3721 
3722 	/* Set blob as thin provisioned */
3723 	ut_spdk_blob_opts_init(&opts);
3724 	opts.thin_provision = true;
3725 
3726 	blob = ut_blob_create_and_open(bs, &opts);
3727 	blobid = spdk_blob_get_id(blob);
3728 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3729 
3730 	CU_ASSERT(blob->active.num_clusters == 0);
3731 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3732 
3733 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3734 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3735 	poll_threads();
3736 	CU_ASSERT(g_bserrno == 0);
3737 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3738 	CU_ASSERT(blob->active.num_clusters == 5);
3739 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3740 
3741 	/* Grow it to 1TB - still unallocated */
3742 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3743 	poll_threads();
3744 	CU_ASSERT(g_bserrno == 0);
3745 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3746 	CU_ASSERT(blob->active.num_clusters == 262144);
3747 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3748 
3749 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3750 	poll_threads();
3751 	CU_ASSERT(g_bserrno == 0);
3752 	/* Sync must not change anything */
3753 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3754 	CU_ASSERT(blob->active.num_clusters == 262144);
3755 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3756 	/* Since clusters are not allocated,
3757 	 * number of metadata pages is expected to be minimal.
3758 	 */
3759 	CU_ASSERT(blob->active.num_pages == 1);
3760 
3761 	/* Shrink the blob to 3 clusters - still unallocated */
3762 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3763 	poll_threads();
3764 	CU_ASSERT(g_bserrno == 0);
3765 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3766 	CU_ASSERT(blob->active.num_clusters == 3);
3767 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3768 
3769 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3770 	poll_threads();
3771 	CU_ASSERT(g_bserrno == 0);
3772 	/* Sync must not change anything */
3773 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3774 	CU_ASSERT(blob->active.num_clusters == 3);
3775 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3776 
3777 	spdk_blob_close(blob, blob_op_complete, NULL);
3778 	poll_threads();
3779 	CU_ASSERT(g_bserrno == 0);
3780 
3781 	ut_bs_reload(&bs, NULL);
3782 
3783 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3784 	poll_threads();
3785 	CU_ASSERT(g_bserrno == 0);
3786 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3787 	blob = g_blob;
3788 
3789 	/* Check that clusters allocation and size is still the same */
3790 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3791 	CU_ASSERT(blob->active.num_clusters == 3);
3792 
3793 	ut_blob_close_and_delete(bs, blob);
3794 }
3795 
3796 static void
3797 blob_insert_cluster_msg_test(void)
3798 {
3799 	struct spdk_blob_store *bs = g_bs;
3800 	struct spdk_blob *blob;
3801 	struct spdk_blob_opts opts;
3802 	struct spdk_blob_md_page page = {};
3803 	spdk_blob_id blobid;
3804 	uint64_t free_clusters;
3805 	uint64_t new_cluster = 0;
3806 	uint32_t cluster_num = 3;
3807 	uint32_t extent_page = 0;
3808 
3809 	free_clusters = spdk_bs_free_cluster_count(bs);
3810 
3811 	/* Set blob as thin provisioned */
3812 	ut_spdk_blob_opts_init(&opts);
3813 	opts.thin_provision = true;
3814 	opts.num_clusters = 4;
3815 
3816 	blob = ut_blob_create_and_open(bs, &opts);
3817 	blobid = spdk_blob_get_id(blob);
3818 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3819 
3820 	CU_ASSERT(blob->active.num_clusters == 4);
3821 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
3822 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3823 
3824 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
3825 	 * This is to simulate behaviour when cluster is allocated after blob creation.
3826 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
3827 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
3828 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3829 
3830 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
3831 					 blob_op_complete, NULL);
3832 	poll_threads();
3833 
3834 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3835 
3836 	spdk_blob_close(blob, blob_op_complete, NULL);
3837 	poll_threads();
3838 	CU_ASSERT(g_bserrno == 0);
3839 
3840 	ut_bs_reload(&bs, NULL);
3841 
3842 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3843 	poll_threads();
3844 	CU_ASSERT(g_bserrno == 0);
3845 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3846 	blob = g_blob;
3847 
3848 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3849 
3850 	ut_blob_close_and_delete(bs, blob);
3851 }
3852 
3853 static void
3854 blob_thin_prov_rw(void)
3855 {
3856 	static const uint8_t zero[10 * 4096] = { 0 };
3857 	struct spdk_blob_store *bs = g_bs;
3858 	struct spdk_blob *blob, *blob_id0;
3859 	struct spdk_io_channel *channel, *channel_thread1;
3860 	struct spdk_blob_opts opts;
3861 	uint64_t free_clusters;
3862 	uint64_t page_size;
3863 	uint8_t payload_read[10 * 4096];
3864 	uint8_t payload_write[10 * 4096];
3865 	uint64_t write_bytes;
3866 	uint64_t read_bytes;
3867 
3868 	free_clusters = spdk_bs_free_cluster_count(bs);
3869 	page_size = spdk_bs_get_page_size(bs);
3870 
3871 	channel = spdk_bs_alloc_io_channel(bs);
3872 	CU_ASSERT(channel != NULL);
3873 
3874 	ut_spdk_blob_opts_init(&opts);
3875 	opts.thin_provision = true;
3876 
3877 	/* Create and delete blob at md page 0, so that next md page allocation
3878 	 * for extent will use that. */
3879 	blob_id0 = ut_blob_create_and_open(bs, &opts);
3880 	blob = ut_blob_create_and_open(bs, &opts);
3881 	ut_blob_close_and_delete(bs, blob_id0);
3882 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3883 
3884 	CU_ASSERT(blob->active.num_clusters == 0);
3885 
3886 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3887 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3888 	poll_threads();
3889 	CU_ASSERT(g_bserrno == 0);
3890 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3891 	CU_ASSERT(blob->active.num_clusters == 5);
3892 
3893 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3894 	poll_threads();
3895 	CU_ASSERT(g_bserrno == 0);
3896 	/* Sync must not change anything */
3897 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3898 	CU_ASSERT(blob->active.num_clusters == 5);
3899 
3900 	/* Payload should be all zeros from unallocated clusters */
3901 	memset(payload_read, 0xFF, sizeof(payload_read));
3902 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3903 	poll_threads();
3904 	CU_ASSERT(g_bserrno == 0);
3905 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
3906 
3907 	write_bytes = g_dev_write_bytes;
3908 	read_bytes = g_dev_read_bytes;
3909 
3910 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
3911 	set_thread(1);
3912 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
3913 	CU_ASSERT(channel_thread1 != NULL);
3914 	memset(payload_write, 0xE5, sizeof(payload_write));
3915 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
3916 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3917 	/* Perform write on thread 0. That will try to allocate cluster,
3918 	 * but fail due to another thread issuing the cluster allocation first. */
3919 	set_thread(0);
3920 	memset(payload_write, 0xE5, sizeof(payload_write));
3921 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
3922 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
3923 	poll_threads();
3924 	CU_ASSERT(g_bserrno == 0);
3925 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3926 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
3927 	 * read 0 bytes */
3928 	if (g_use_extent_table) {
3929 		/* Add one more page for EXTENT_PAGE write */
3930 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
3931 	} else {
3932 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
3933 	}
3934 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
3935 
3936 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3937 	poll_threads();
3938 	CU_ASSERT(g_bserrno == 0);
3939 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
3940 
3941 	ut_blob_close_and_delete(bs, blob);
3942 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3943 
3944 	set_thread(1);
3945 	spdk_bs_free_io_channel(channel_thread1);
3946 	set_thread(0);
3947 	spdk_bs_free_io_channel(channel);
3948 	poll_threads();
3949 	g_blob = NULL;
3950 	g_blobid = 0;
3951 }
3952 
3953 static void
3954 blob_thin_prov_write_count_io(void)
3955 {
3956 	struct spdk_blob_store *bs;
3957 	struct spdk_blob *blob;
3958 	struct spdk_io_channel *ch;
3959 	struct spdk_bs_dev *dev;
3960 	struct spdk_bs_opts bs_opts;
3961 	struct spdk_blob_opts opts;
3962 	uint64_t free_clusters;
3963 	uint64_t page_size;
3964 	uint8_t payload_write[4096];
3965 	uint64_t write_bytes;
3966 	uint64_t read_bytes;
3967 	const uint32_t CLUSTER_SZ = 16384;
3968 	uint32_t pages_per_cluster;
3969 	uint32_t pages_per_extent_page;
3970 	uint32_t i;
3971 
3972 	/* Use a very small cluster size for this test.  This ensures we need multiple
3973 	 * extent pages to hold all of the clusters even for relatively small blobs like
3974 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
3975 	 * buffers).
3976 	 */
3977 	dev = init_dev();
3978 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
3979 	bs_opts.cluster_sz = CLUSTER_SZ;
3980 
3981 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
3982 	poll_threads();
3983 	CU_ASSERT(g_bserrno == 0);
3984 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3985 	bs = g_bs;
3986 
3987 	free_clusters = spdk_bs_free_cluster_count(bs);
3988 	page_size = spdk_bs_get_page_size(bs);
3989 	pages_per_cluster = CLUSTER_SZ / page_size;
3990 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
3991 
3992 	ch = spdk_bs_alloc_io_channel(bs);
3993 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3994 
3995 	ut_spdk_blob_opts_init(&opts);
3996 	opts.thin_provision = true;
3997 
3998 	blob = ut_blob_create_and_open(bs, &opts);
3999 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4000 
4001 	/* Resize the blob so that it will require 8 extent pages to hold all of
4002 	 * the clusters.
4003 	 */
4004 	g_bserrno = -1;
4005 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4006 	poll_threads();
4007 	CU_ASSERT(g_bserrno == 0);
4008 
4009 	g_bserrno = -1;
4010 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4011 	poll_threads();
4012 	CU_ASSERT(g_bserrno == 0);
4013 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4014 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4015 
4016 	memset(payload_write, 0, sizeof(payload_write));
4017 	for (i = 0; i < 8; i++) {
4018 		write_bytes = g_dev_write_bytes;
4019 		read_bytes = g_dev_read_bytes;
4020 
4021 		g_bserrno = -1;
4022 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4023 		poll_threads();
4024 		CU_ASSERT(g_bserrno == 0);
4025 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4026 
4027 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4028 		if (!g_use_extent_table) {
4029 			/* For legacy metadata, we should have written two pages - one for the
4030 			 * write I/O itself, another for the blob's primary metadata.
4031 			 */
4032 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4033 		} else {
4034 			/* For extent table metadata, we should have written three pages - one
4035 			 * for the write I/O, one for the extent page, one for the blob's primary
4036 			 * metadata.
4037 			 */
4038 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4039 		}
4040 
4041 		/* The write should have synced the metadata already.  Do another sync here
4042 		 * just to confirm.
4043 		 */
4044 		write_bytes = g_dev_write_bytes;
4045 		read_bytes = g_dev_read_bytes;
4046 
4047 		g_bserrno = -1;
4048 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4049 		poll_threads();
4050 		CU_ASSERT(g_bserrno == 0);
4051 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4052 
4053 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4054 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4055 
4056 		/* Now write to another unallocated cluster that is part of the same extent page. */
4057 		g_bserrno = -1;
4058 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4059 				   1, blob_op_complete, NULL);
4060 		poll_threads();
4061 		CU_ASSERT(g_bserrno == 0);
4062 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4063 
4064 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4065 		/*
4066 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4067 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4068 		 */
4069 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4070 	}
4071 
4072 	ut_blob_close_and_delete(bs, blob);
4073 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4074 
4075 	spdk_bs_free_io_channel(ch);
4076 	poll_threads();
4077 	g_blob = NULL;
4078 	g_blobid = 0;
4079 
4080 	spdk_bs_unload(bs, bs_op_complete, NULL);
4081 	poll_threads();
4082 	CU_ASSERT(g_bserrno == 0);
4083 	g_bs = NULL;
4084 }
4085 
4086 static void
4087 blob_thin_prov_rle(void)
4088 {
4089 	static const uint8_t zero[10 * 4096] = { 0 };
4090 	struct spdk_blob_store *bs = g_bs;
4091 	struct spdk_blob *blob;
4092 	struct spdk_io_channel *channel;
4093 	struct spdk_blob_opts opts;
4094 	spdk_blob_id blobid;
4095 	uint64_t free_clusters;
4096 	uint64_t page_size;
4097 	uint8_t payload_read[10 * 4096];
4098 	uint8_t payload_write[10 * 4096];
4099 	uint64_t write_bytes;
4100 	uint64_t read_bytes;
4101 	uint64_t io_unit;
4102 
4103 	free_clusters = spdk_bs_free_cluster_count(bs);
4104 	page_size = spdk_bs_get_page_size(bs);
4105 
4106 	ut_spdk_blob_opts_init(&opts);
4107 	opts.thin_provision = true;
4108 	opts.num_clusters = 5;
4109 
4110 	blob = ut_blob_create_and_open(bs, &opts);
4111 	blobid = spdk_blob_get_id(blob);
4112 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4113 
4114 	channel = spdk_bs_alloc_io_channel(bs);
4115 	CU_ASSERT(channel != NULL);
4116 
4117 	/* Target specifically second cluster in a blob as first allocation */
4118 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4119 
4120 	/* Payload should be all zeros from unallocated clusters */
4121 	memset(payload_read, 0xFF, sizeof(payload_read));
4122 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4123 	poll_threads();
4124 	CU_ASSERT(g_bserrno == 0);
4125 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4126 
4127 	write_bytes = g_dev_write_bytes;
4128 	read_bytes = g_dev_read_bytes;
4129 
4130 	/* Issue write to second cluster in a blob */
4131 	memset(payload_write, 0xE5, sizeof(payload_write));
4132 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4133 	poll_threads();
4134 	CU_ASSERT(g_bserrno == 0);
4135 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4136 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4137 	 * read 0 bytes */
4138 	if (g_use_extent_table) {
4139 		/* Add one more page for EXTENT_PAGE write */
4140 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4141 	} else {
4142 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4143 	}
4144 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4145 
4146 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4147 	poll_threads();
4148 	CU_ASSERT(g_bserrno == 0);
4149 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4150 
4151 	spdk_bs_free_io_channel(channel);
4152 	poll_threads();
4153 
4154 	spdk_blob_close(blob, blob_op_complete, NULL);
4155 	poll_threads();
4156 	CU_ASSERT(g_bserrno == 0);
4157 
4158 	ut_bs_reload(&bs, NULL);
4159 
4160 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4161 	poll_threads();
4162 	CU_ASSERT(g_bserrno == 0);
4163 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4164 	blob = g_blob;
4165 
4166 	channel = spdk_bs_alloc_io_channel(bs);
4167 	CU_ASSERT(channel != NULL);
4168 
4169 	/* Read second cluster after blob reload to confirm data written */
4170 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4171 	poll_threads();
4172 	CU_ASSERT(g_bserrno == 0);
4173 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4174 
4175 	spdk_bs_free_io_channel(channel);
4176 	poll_threads();
4177 
4178 	ut_blob_close_and_delete(bs, blob);
4179 }
4180 
4181 static void
4182 blob_thin_prov_rw_iov(void)
4183 {
4184 	static const uint8_t zero[10 * 4096] = { 0 };
4185 	struct spdk_blob_store *bs = g_bs;
4186 	struct spdk_blob *blob;
4187 	struct spdk_io_channel *channel;
4188 	struct spdk_blob_opts opts;
4189 	uint64_t free_clusters;
4190 	uint8_t payload_read[10 * 4096];
4191 	uint8_t payload_write[10 * 4096];
4192 	struct iovec iov_read[3];
4193 	struct iovec iov_write[3];
4194 
4195 	free_clusters = spdk_bs_free_cluster_count(bs);
4196 
4197 	channel = spdk_bs_alloc_io_channel(bs);
4198 	CU_ASSERT(channel != NULL);
4199 
4200 	ut_spdk_blob_opts_init(&opts);
4201 	opts.thin_provision = true;
4202 
4203 	blob = ut_blob_create_and_open(bs, &opts);
4204 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4205 
4206 	CU_ASSERT(blob->active.num_clusters == 0);
4207 
4208 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4209 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4210 	poll_threads();
4211 	CU_ASSERT(g_bserrno == 0);
4212 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4213 	CU_ASSERT(blob->active.num_clusters == 5);
4214 
4215 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4216 	poll_threads();
4217 	CU_ASSERT(g_bserrno == 0);
4218 	/* Sync must not change anything */
4219 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4220 	CU_ASSERT(blob->active.num_clusters == 5);
4221 
4222 	/* Payload should be all zeros from unallocated clusters */
4223 	memset(payload_read, 0xAA, sizeof(payload_read));
4224 	iov_read[0].iov_base = payload_read;
4225 	iov_read[0].iov_len = 3 * 4096;
4226 	iov_read[1].iov_base = payload_read + 3 * 4096;
4227 	iov_read[1].iov_len = 4 * 4096;
4228 	iov_read[2].iov_base = payload_read + 7 * 4096;
4229 	iov_read[2].iov_len = 3 * 4096;
4230 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4231 	poll_threads();
4232 	CU_ASSERT(g_bserrno == 0);
4233 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4234 
4235 	memset(payload_write, 0xE5, sizeof(payload_write));
4236 	iov_write[0].iov_base = payload_write;
4237 	iov_write[0].iov_len = 1 * 4096;
4238 	iov_write[1].iov_base = payload_write + 1 * 4096;
4239 	iov_write[1].iov_len = 5 * 4096;
4240 	iov_write[2].iov_base = payload_write + 6 * 4096;
4241 	iov_write[2].iov_len = 4 * 4096;
4242 
4243 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4244 	poll_threads();
4245 	CU_ASSERT(g_bserrno == 0);
4246 
4247 	memset(payload_read, 0xAA, sizeof(payload_read));
4248 	iov_read[0].iov_base = payload_read;
4249 	iov_read[0].iov_len = 3 * 4096;
4250 	iov_read[1].iov_base = payload_read + 3 * 4096;
4251 	iov_read[1].iov_len = 4 * 4096;
4252 	iov_read[2].iov_base = payload_read + 7 * 4096;
4253 	iov_read[2].iov_len = 3 * 4096;
4254 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4255 	poll_threads();
4256 	CU_ASSERT(g_bserrno == 0);
4257 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4258 
4259 	spdk_bs_free_io_channel(channel);
4260 	poll_threads();
4261 
4262 	ut_blob_close_and_delete(bs, blob);
4263 }
4264 
4265 struct iter_ctx {
4266 	int		current_iter;
4267 	spdk_blob_id	blobid[4];
4268 };
4269 
4270 static void
4271 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4272 {
4273 	struct iter_ctx *iter_ctx = arg;
4274 	spdk_blob_id blobid;
4275 
4276 	CU_ASSERT(bserrno == 0);
4277 	blobid = spdk_blob_get_id(blob);
4278 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4279 }
4280 
4281 static void
4282 bs_load_iter_test(void)
4283 {
4284 	struct spdk_blob_store *bs;
4285 	struct spdk_bs_dev *dev;
4286 	struct iter_ctx iter_ctx = { 0 };
4287 	struct spdk_blob *blob;
4288 	int i, rc;
4289 	struct spdk_bs_opts opts;
4290 
4291 	dev = init_dev();
4292 	spdk_bs_opts_init(&opts, sizeof(opts));
4293 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4294 
4295 	/* Initialize a new blob store */
4296 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4297 	poll_threads();
4298 	CU_ASSERT(g_bserrno == 0);
4299 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4300 	bs = g_bs;
4301 
4302 	for (i = 0; i < 4; i++) {
4303 		blob = ut_blob_create_and_open(bs, NULL);
4304 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4305 
4306 		/* Just save the blobid as an xattr for testing purposes. */
4307 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4308 		CU_ASSERT(rc == 0);
4309 
4310 		/* Resize the blob */
4311 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4312 		poll_threads();
4313 		CU_ASSERT(g_bserrno == 0);
4314 
4315 		spdk_blob_close(blob, blob_op_complete, NULL);
4316 		poll_threads();
4317 		CU_ASSERT(g_bserrno == 0);
4318 	}
4319 
4320 	g_bserrno = -1;
4321 	spdk_bs_unload(bs, bs_op_complete, NULL);
4322 	poll_threads();
4323 	CU_ASSERT(g_bserrno == 0);
4324 
4325 	dev = init_dev();
4326 	spdk_bs_opts_init(&opts, sizeof(opts));
4327 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4328 	opts.iter_cb_fn = test_iter;
4329 	opts.iter_cb_arg = &iter_ctx;
4330 
4331 	/* Test blob iteration during load after a clean shutdown. */
4332 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4333 	poll_threads();
4334 	CU_ASSERT(g_bserrno == 0);
4335 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4336 	bs = g_bs;
4337 
4338 	/* Dirty shutdown */
4339 	bs_free(bs);
4340 
4341 	dev = init_dev();
4342 	spdk_bs_opts_init(&opts, sizeof(opts));
4343 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4344 	opts.iter_cb_fn = test_iter;
4345 	iter_ctx.current_iter = 0;
4346 	opts.iter_cb_arg = &iter_ctx;
4347 
4348 	/* Test blob iteration during load after a dirty shutdown. */
4349 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4350 	poll_threads();
4351 	CU_ASSERT(g_bserrno == 0);
4352 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4353 	bs = g_bs;
4354 
4355 	spdk_bs_unload(bs, bs_op_complete, NULL);
4356 	poll_threads();
4357 	CU_ASSERT(g_bserrno == 0);
4358 	g_bs = NULL;
4359 }
4360 
4361 static void
4362 blob_snapshot_rw(void)
4363 {
4364 	static const uint8_t zero[10 * 4096] = { 0 };
4365 	struct spdk_blob_store *bs = g_bs;
4366 	struct spdk_blob *blob, *snapshot;
4367 	struct spdk_io_channel *channel;
4368 	struct spdk_blob_opts opts;
4369 	spdk_blob_id blobid, snapshotid;
4370 	uint64_t free_clusters;
4371 	uint64_t cluster_size;
4372 	uint64_t page_size;
4373 	uint8_t payload_read[10 * 4096];
4374 	uint8_t payload_write[10 * 4096];
4375 	uint64_t write_bytes;
4376 	uint64_t read_bytes;
4377 
4378 	free_clusters = spdk_bs_free_cluster_count(bs);
4379 	cluster_size = spdk_bs_get_cluster_size(bs);
4380 	page_size = spdk_bs_get_page_size(bs);
4381 
4382 	channel = spdk_bs_alloc_io_channel(bs);
4383 	CU_ASSERT(channel != NULL);
4384 
4385 	ut_spdk_blob_opts_init(&opts);
4386 	opts.thin_provision = true;
4387 	opts.num_clusters = 5;
4388 
4389 	blob = ut_blob_create_and_open(bs, &opts);
4390 	blobid = spdk_blob_get_id(blob);
4391 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4392 
4393 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4394 
4395 	memset(payload_read, 0xFF, sizeof(payload_read));
4396 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4397 	poll_threads();
4398 	CU_ASSERT(g_bserrno == 0);
4399 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4400 
4401 	memset(payload_write, 0xE5, sizeof(payload_write));
4402 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4403 	poll_threads();
4404 	CU_ASSERT(g_bserrno == 0);
4405 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4406 
4407 	/* Create snapshot from blob */
4408 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4409 	poll_threads();
4410 	CU_ASSERT(g_bserrno == 0);
4411 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4412 	snapshotid = g_blobid;
4413 
4414 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4415 	poll_threads();
4416 	CU_ASSERT(g_bserrno == 0);
4417 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4418 	snapshot = g_blob;
4419 	CU_ASSERT(snapshot->data_ro == true);
4420 	CU_ASSERT(snapshot->md_ro == true);
4421 
4422 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4423 
4424 	write_bytes = g_dev_write_bytes;
4425 	read_bytes = g_dev_read_bytes;
4426 
4427 	memset(payload_write, 0xAA, sizeof(payload_write));
4428 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4429 	poll_threads();
4430 	CU_ASSERT(g_bserrno == 0);
4431 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4432 
4433 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4434 	 * and then write 10 pages of payload.
4435 	 */
4436 	if (g_use_extent_table) {
4437 		/* Add one more page for EXTENT_PAGE write */
4438 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
4439 	} else {
4440 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
4441 	}
4442 	CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
4443 
4444 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4445 	poll_threads();
4446 	CU_ASSERT(g_bserrno == 0);
4447 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4448 
4449 	/* Data on snapshot should not change after write to clone */
4450 	memset(payload_write, 0xE5, sizeof(payload_write));
4451 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4452 	poll_threads();
4453 	CU_ASSERT(g_bserrno == 0);
4454 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4455 
4456 	ut_blob_close_and_delete(bs, blob);
4457 	ut_blob_close_and_delete(bs, snapshot);
4458 
4459 	spdk_bs_free_io_channel(channel);
4460 	poll_threads();
4461 	g_blob = NULL;
4462 	g_blobid = 0;
4463 }
4464 
4465 static void
4466 blob_snapshot_rw_iov(void)
4467 {
4468 	static const uint8_t zero[10 * 4096] = { 0 };
4469 	struct spdk_blob_store *bs = g_bs;
4470 	struct spdk_blob *blob, *snapshot;
4471 	struct spdk_io_channel *channel;
4472 	struct spdk_blob_opts opts;
4473 	spdk_blob_id blobid, snapshotid;
4474 	uint64_t free_clusters;
4475 	uint8_t payload_read[10 * 4096];
4476 	uint8_t payload_write[10 * 4096];
4477 	struct iovec iov_read[3];
4478 	struct iovec iov_write[3];
4479 
4480 	free_clusters = spdk_bs_free_cluster_count(bs);
4481 
4482 	channel = spdk_bs_alloc_io_channel(bs);
4483 	CU_ASSERT(channel != NULL);
4484 
4485 	ut_spdk_blob_opts_init(&opts);
4486 	opts.thin_provision = true;
4487 	opts.num_clusters = 5;
4488 
4489 	blob = ut_blob_create_and_open(bs, &opts);
4490 	blobid = spdk_blob_get_id(blob);
4491 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4492 
4493 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4494 
4495 	/* Create snapshot from blob */
4496 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4497 	poll_threads();
4498 	CU_ASSERT(g_bserrno == 0);
4499 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4500 	snapshotid = g_blobid;
4501 
4502 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4503 	poll_threads();
4504 	CU_ASSERT(g_bserrno == 0);
4505 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4506 	snapshot = g_blob;
4507 	CU_ASSERT(snapshot->data_ro == true);
4508 	CU_ASSERT(snapshot->md_ro == true);
4509 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4510 
4511 	/* Payload should be all zeros from unallocated clusters */
4512 	memset(payload_read, 0xAA, sizeof(payload_read));
4513 	iov_read[0].iov_base = payload_read;
4514 	iov_read[0].iov_len = 3 * 4096;
4515 	iov_read[1].iov_base = payload_read + 3 * 4096;
4516 	iov_read[1].iov_len = 4 * 4096;
4517 	iov_read[2].iov_base = payload_read + 7 * 4096;
4518 	iov_read[2].iov_len = 3 * 4096;
4519 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4520 	poll_threads();
4521 	CU_ASSERT(g_bserrno == 0);
4522 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4523 
4524 	memset(payload_write, 0xE5, sizeof(payload_write));
4525 	iov_write[0].iov_base = payload_write;
4526 	iov_write[0].iov_len = 1 * 4096;
4527 	iov_write[1].iov_base = payload_write + 1 * 4096;
4528 	iov_write[1].iov_len = 5 * 4096;
4529 	iov_write[2].iov_base = payload_write + 6 * 4096;
4530 	iov_write[2].iov_len = 4 * 4096;
4531 
4532 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4533 	poll_threads();
4534 	CU_ASSERT(g_bserrno == 0);
4535 
4536 	memset(payload_read, 0xAA, sizeof(payload_read));
4537 	iov_read[0].iov_base = payload_read;
4538 	iov_read[0].iov_len = 3 * 4096;
4539 	iov_read[1].iov_base = payload_read + 3 * 4096;
4540 	iov_read[1].iov_len = 4 * 4096;
4541 	iov_read[2].iov_base = payload_read + 7 * 4096;
4542 	iov_read[2].iov_len = 3 * 4096;
4543 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4544 	poll_threads();
4545 	CU_ASSERT(g_bserrno == 0);
4546 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4547 
4548 	spdk_bs_free_io_channel(channel);
4549 	poll_threads();
4550 
4551 	ut_blob_close_and_delete(bs, blob);
4552 	ut_blob_close_and_delete(bs, snapshot);
4553 }
4554 
4555 /**
4556  * Inflate / decouple parent rw unit tests.
4557  *
4558  * --------------
4559  * original blob:         0         1         2         3         4
4560  *                   ,---------+---------+---------+---------+---------.
4561  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4562  *                   +---------+---------+---------+---------+---------+
4563  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4564  *                   +---------+---------+---------+---------+---------+
4565  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4566  *                   '---------+---------+---------+---------+---------'
4567  *                   .         .         .         .         .         .
4568  * --------          .         .         .         .         .         .
4569  * inflate:          .         .         .         .         .         .
4570  *                   ,---------+---------+---------+---------+---------.
4571  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4572  *                   '---------+---------+---------+---------+---------'
4573  *
4574  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4575  *               on snapshot2 and snapshot removed .         .         .
4576  *                   .         .         .         .         .         .
4577  * ----------------  .         .         .         .         .         .
4578  * decouple parent:  .         .         .         .         .         .
4579  *                   ,---------+---------+---------+---------+---------.
4580  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4581  *                   +---------+---------+---------+---------+---------+
4582  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4583  *                   '---------+---------+---------+---------+---------'
4584  *
4585  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4586  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4587  *               should remain a clone of snapshot.
4588  */
4589 static void
4590 _blob_inflate_rw(bool decouple_parent)
4591 {
4592 	struct spdk_blob_store *bs = g_bs;
4593 	struct spdk_blob *blob, *snapshot, *snapshot2;
4594 	struct spdk_io_channel *channel;
4595 	struct spdk_blob_opts opts;
4596 	spdk_blob_id blobid, snapshotid, snapshot2id;
4597 	uint64_t free_clusters;
4598 	uint64_t cluster_size;
4599 
4600 	uint64_t payload_size;
4601 	uint8_t *payload_read;
4602 	uint8_t *payload_write;
4603 	uint8_t *payload_clone;
4604 
4605 	uint64_t pages_per_cluster;
4606 	uint64_t pages_per_payload;
4607 
4608 	int i;
4609 	spdk_blob_id ids[2];
4610 	size_t count;
4611 
4612 	free_clusters = spdk_bs_free_cluster_count(bs);
4613 	cluster_size = spdk_bs_get_cluster_size(bs);
4614 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4615 	pages_per_payload = pages_per_cluster * 5;
4616 
4617 	payload_size = cluster_size * 5;
4618 
4619 	payload_read = malloc(payload_size);
4620 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4621 
4622 	payload_write = malloc(payload_size);
4623 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4624 
4625 	payload_clone = malloc(payload_size);
4626 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4627 
4628 	channel = spdk_bs_alloc_io_channel(bs);
4629 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4630 
4631 	/* Create blob */
4632 	ut_spdk_blob_opts_init(&opts);
4633 	opts.thin_provision = true;
4634 	opts.num_clusters = 5;
4635 
4636 	blob = ut_blob_create_and_open(bs, &opts);
4637 	blobid = spdk_blob_get_id(blob);
4638 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4639 
4640 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4641 
4642 	/* 1) Initial read should return zeroed payload */
4643 	memset(payload_read, 0xFF, payload_size);
4644 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4645 			  blob_op_complete, NULL);
4646 	poll_threads();
4647 	CU_ASSERT(g_bserrno == 0);
4648 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4649 
4650 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4651 	 * isn't allocated) */
4652 	memset(payload_write, 0xE5, payload_size - cluster_size);
4653 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4654 			   pages_per_cluster, blob_op_complete, NULL);
4655 	poll_threads();
4656 	CU_ASSERT(g_bserrno == 0);
4657 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4658 
4659 	/* 2) Create snapshot from blob (first level) */
4660 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4661 	poll_threads();
4662 	CU_ASSERT(g_bserrno == 0);
4663 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4664 	snapshotid = g_blobid;
4665 
4666 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4667 	poll_threads();
4668 	CU_ASSERT(g_bserrno == 0);
4669 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4670 	snapshot = g_blob;
4671 	CU_ASSERT(snapshot->data_ro == true);
4672 	CU_ASSERT(snapshot->md_ro == true);
4673 
4674 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4675 
4676 	/* Write every second cluster with a pattern.
4677 	 *
4678 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4679 	 * doesn't allocate it.
4680 	 *
4681 	 * payload_clone stores expected result on "blob" read at the time and
4682 	 * is used only to check data consistency on clone before and after
4683 	 * inflation. Initially we fill it with a backing snapshots pattern
4684 	 * used before.
4685 	 */
4686 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4687 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4688 	memset(payload_write, 0xAA, payload_size);
4689 	for (i = 1; i < 5; i += 2) {
4690 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4691 				   pages_per_cluster, blob_op_complete, NULL);
4692 		poll_threads();
4693 		CU_ASSERT(g_bserrno == 0);
4694 
4695 		/* Update expected result */
4696 		memcpy(payload_clone + (cluster_size * i), payload_write,
4697 		       cluster_size);
4698 	}
4699 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4700 
4701 	/* Check data consistency on clone */
4702 	memset(payload_read, 0xFF, payload_size);
4703 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4704 			  blob_op_complete, NULL);
4705 	poll_threads();
4706 	CU_ASSERT(g_bserrno == 0);
4707 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4708 
4709 	/* 3) Create second levels snapshot from blob */
4710 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4711 	poll_threads();
4712 	CU_ASSERT(g_bserrno == 0);
4713 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4714 	snapshot2id = g_blobid;
4715 
4716 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4717 	poll_threads();
4718 	CU_ASSERT(g_bserrno == 0);
4719 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4720 	snapshot2 = g_blob;
4721 	CU_ASSERT(snapshot2->data_ro == true);
4722 	CU_ASSERT(snapshot2->md_ro == true);
4723 
4724 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4725 
4726 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4727 
4728 	/* Write one cluster on the top level blob. This cluster (1) covers
4729 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4730 	 * at all */
4731 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4732 			   pages_per_cluster, blob_op_complete, NULL);
4733 	poll_threads();
4734 	CU_ASSERT(g_bserrno == 0);
4735 
4736 	/* Update expected result */
4737 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4738 
4739 	/* Check data consistency on clone */
4740 	memset(payload_read, 0xFF, payload_size);
4741 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4742 			  blob_op_complete, NULL);
4743 	poll_threads();
4744 	CU_ASSERT(g_bserrno == 0);
4745 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4746 
4747 
4748 	/* Close all blobs */
4749 	spdk_blob_close(blob, blob_op_complete, NULL);
4750 	poll_threads();
4751 	CU_ASSERT(g_bserrno == 0);
4752 
4753 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4754 	poll_threads();
4755 	CU_ASSERT(g_bserrno == 0);
4756 
4757 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4758 	poll_threads();
4759 	CU_ASSERT(g_bserrno == 0);
4760 
4761 	/* Check snapshot-clone relations */
4762 	count = 2;
4763 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4764 	CU_ASSERT(count == 1);
4765 	CU_ASSERT(ids[0] == snapshot2id);
4766 
4767 	count = 2;
4768 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4769 	CU_ASSERT(count == 1);
4770 	CU_ASSERT(ids[0] == blobid);
4771 
4772 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4773 
4774 	free_clusters = spdk_bs_free_cluster_count(bs);
4775 	if (!decouple_parent) {
4776 		/* Do full blob inflation */
4777 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4778 		poll_threads();
4779 		CU_ASSERT(g_bserrno == 0);
4780 
4781 		/* All clusters should be inflated (except one already allocated
4782 		 * in a top level blob) */
4783 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
4784 
4785 		/* Check if relation tree updated correctly */
4786 		count = 2;
4787 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4788 
4789 		/* snapshotid have one clone */
4790 		CU_ASSERT(count == 1);
4791 		CU_ASSERT(ids[0] == snapshot2id);
4792 
4793 		/* snapshot2id have no clones */
4794 		count = 2;
4795 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4796 		CU_ASSERT(count == 0);
4797 
4798 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4799 	} else {
4800 		/* Decouple parent of blob */
4801 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
4802 		poll_threads();
4803 		CU_ASSERT(g_bserrno == 0);
4804 
4805 		/* Only one cluster from a parent should be inflated (second one
4806 		 * is covered by a cluster written on a top level blob, and
4807 		 * already allocated) */
4808 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
4809 
4810 		/* Check if relation tree updated correctly */
4811 		count = 2;
4812 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4813 
4814 		/* snapshotid have two clones now */
4815 		CU_ASSERT(count == 2);
4816 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4817 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
4818 
4819 		/* snapshot2id have no clones */
4820 		count = 2;
4821 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4822 		CU_ASSERT(count == 0);
4823 
4824 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4825 	}
4826 
4827 	/* Try to delete snapshot2 (should pass) */
4828 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
4829 	poll_threads();
4830 	CU_ASSERT(g_bserrno == 0);
4831 
4832 	/* Try to delete base snapshot */
4833 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
4834 	poll_threads();
4835 	CU_ASSERT(g_bserrno == 0);
4836 
4837 	/* Reopen blob after snapshot deletion */
4838 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4839 	poll_threads();
4840 	CU_ASSERT(g_bserrno == 0);
4841 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4842 	blob = g_blob;
4843 
4844 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4845 
4846 	/* Check data consistency on inflated blob */
4847 	memset(payload_read, 0xFF, payload_size);
4848 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4849 			  blob_op_complete, NULL);
4850 	poll_threads();
4851 	CU_ASSERT(g_bserrno == 0);
4852 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4853 
4854 	spdk_bs_free_io_channel(channel);
4855 	poll_threads();
4856 
4857 	free(payload_read);
4858 	free(payload_write);
4859 	free(payload_clone);
4860 
4861 	ut_blob_close_and_delete(bs, blob);
4862 }
4863 
4864 static void
4865 blob_inflate_rw(void)
4866 {
4867 	_blob_inflate_rw(false);
4868 	_blob_inflate_rw(true);
4869 }
4870 
4871 /**
4872  * Snapshot-clones relation test
4873  *
4874  *         snapshot
4875  *            |
4876  *      +-----+-----+
4877  *      |           |
4878  *   blob(ro)   snapshot2
4879  *      |           |
4880  *   clone2      clone
4881  */
4882 static void
4883 blob_relations(void)
4884 {
4885 	struct spdk_blob_store *bs;
4886 	struct spdk_bs_dev *dev;
4887 	struct spdk_bs_opts bs_opts;
4888 	struct spdk_blob_opts opts;
4889 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
4890 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
4891 	int rc;
4892 	size_t count;
4893 	spdk_blob_id ids[10] = {};
4894 
4895 	dev = init_dev();
4896 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4897 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
4898 
4899 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4900 	poll_threads();
4901 	CU_ASSERT(g_bserrno == 0);
4902 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4903 	bs = g_bs;
4904 
4905 	/* 1. Create blob with 10 clusters */
4906 
4907 	ut_spdk_blob_opts_init(&opts);
4908 	opts.num_clusters = 10;
4909 
4910 	blob = ut_blob_create_and_open(bs, &opts);
4911 	blobid = spdk_blob_get_id(blob);
4912 
4913 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4914 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4915 	CU_ASSERT(!spdk_blob_is_clone(blob));
4916 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
4917 
4918 	/* blob should not have underlying snapshot nor clones */
4919 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
4920 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4921 	count = SPDK_COUNTOF(ids);
4922 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
4923 	CU_ASSERT(rc == 0);
4924 	CU_ASSERT(count == 0);
4925 
4926 
4927 	/* 2. Create snapshot */
4928 
4929 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4930 	poll_threads();
4931 	CU_ASSERT(g_bserrno == 0);
4932 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4933 	snapshotid = g_blobid;
4934 
4935 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4936 	poll_threads();
4937 	CU_ASSERT(g_bserrno == 0);
4938 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4939 	snapshot = g_blob;
4940 
4941 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
4942 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
4943 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
4944 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
4945 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
4946 
4947 	/* Check if original blob is converted to the clone of snapshot */
4948 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4949 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4950 	CU_ASSERT(spdk_blob_is_clone(blob));
4951 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
4952 	CU_ASSERT(blob->parent_id == snapshotid);
4953 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4954 
4955 	count = SPDK_COUNTOF(ids);
4956 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4957 	CU_ASSERT(rc == 0);
4958 	CU_ASSERT(count == 1);
4959 	CU_ASSERT(ids[0] == blobid);
4960 
4961 
4962 	/* 3. Create clone from snapshot */
4963 
4964 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
4965 	poll_threads();
4966 	CU_ASSERT(g_bserrno == 0);
4967 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4968 	cloneid = g_blobid;
4969 
4970 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
4971 	poll_threads();
4972 	CU_ASSERT(g_bserrno == 0);
4973 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4974 	clone = g_blob;
4975 
4976 	CU_ASSERT(!spdk_blob_is_read_only(clone));
4977 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
4978 	CU_ASSERT(spdk_blob_is_clone(clone));
4979 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
4980 	CU_ASSERT(clone->parent_id == snapshotid);
4981 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
4982 
4983 	count = SPDK_COUNTOF(ids);
4984 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
4985 	CU_ASSERT(rc == 0);
4986 	CU_ASSERT(count == 0);
4987 
4988 	/* Check if clone is on the snapshot's list */
4989 	count = SPDK_COUNTOF(ids);
4990 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4991 	CU_ASSERT(rc == 0);
4992 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4993 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
4994 
4995 
4996 	/* 4. Create snapshot of the clone */
4997 
4998 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
4999 	poll_threads();
5000 	CU_ASSERT(g_bserrno == 0);
5001 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5002 	snapshotid2 = g_blobid;
5003 
5004 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5005 	poll_threads();
5006 	CU_ASSERT(g_bserrno == 0);
5007 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5008 	snapshot2 = g_blob;
5009 
5010 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5011 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5012 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5013 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5014 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5015 
5016 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5017 	 * is a child of snapshot */
5018 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5019 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5020 	CU_ASSERT(spdk_blob_is_clone(clone));
5021 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5022 	CU_ASSERT(clone->parent_id == snapshotid2);
5023 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5024 
5025 	count = SPDK_COUNTOF(ids);
5026 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5027 	CU_ASSERT(rc == 0);
5028 	CU_ASSERT(count == 1);
5029 	CU_ASSERT(ids[0] == cloneid);
5030 
5031 
5032 	/* 5. Try to create clone from read only blob */
5033 
5034 	/* Mark blob as read only */
5035 	spdk_blob_set_read_only(blob);
5036 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5037 	poll_threads();
5038 	CU_ASSERT(g_bserrno == 0);
5039 
5040 	/* Check if previously created blob is read only clone */
5041 	CU_ASSERT(spdk_blob_is_read_only(blob));
5042 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5043 	CU_ASSERT(spdk_blob_is_clone(blob));
5044 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5045 
5046 	/* Create clone from read only blob */
5047 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5048 	poll_threads();
5049 	CU_ASSERT(g_bserrno == 0);
5050 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5051 	cloneid2 = g_blobid;
5052 
5053 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5054 	poll_threads();
5055 	CU_ASSERT(g_bserrno == 0);
5056 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5057 	clone2 = g_blob;
5058 
5059 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5060 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5061 	CU_ASSERT(spdk_blob_is_clone(clone2));
5062 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5063 
5064 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5065 
5066 	count = SPDK_COUNTOF(ids);
5067 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5068 	CU_ASSERT(rc == 0);
5069 
5070 	CU_ASSERT(count == 1);
5071 	CU_ASSERT(ids[0] == cloneid2);
5072 
5073 	/* Close blobs */
5074 
5075 	spdk_blob_close(clone2, blob_op_complete, NULL);
5076 	poll_threads();
5077 	CU_ASSERT(g_bserrno == 0);
5078 
5079 	spdk_blob_close(blob, blob_op_complete, NULL);
5080 	poll_threads();
5081 	CU_ASSERT(g_bserrno == 0);
5082 
5083 	spdk_blob_close(clone, blob_op_complete, NULL);
5084 	poll_threads();
5085 	CU_ASSERT(g_bserrno == 0);
5086 
5087 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5088 	poll_threads();
5089 	CU_ASSERT(g_bserrno == 0);
5090 
5091 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5092 	poll_threads();
5093 	CU_ASSERT(g_bserrno == 0);
5094 
5095 	/* Try to delete snapshot with more than 1 clone */
5096 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5097 	poll_threads();
5098 	CU_ASSERT(g_bserrno != 0);
5099 
5100 	ut_bs_reload(&bs, &bs_opts);
5101 
5102 	/* NULL ids array should return number of clones in count */
5103 	count = SPDK_COUNTOF(ids);
5104 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5105 	CU_ASSERT(rc == -ENOMEM);
5106 	CU_ASSERT(count == 2);
5107 
5108 	/* incorrect array size */
5109 	count = 1;
5110 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5111 	CU_ASSERT(rc == -ENOMEM);
5112 	CU_ASSERT(count == 2);
5113 
5114 
5115 	/* Verify structure of loaded blob store */
5116 
5117 	/* snapshot */
5118 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5119 
5120 	count = SPDK_COUNTOF(ids);
5121 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5122 	CU_ASSERT(rc == 0);
5123 	CU_ASSERT(count == 2);
5124 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5125 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5126 
5127 	/* blob */
5128 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5129 	count = SPDK_COUNTOF(ids);
5130 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5131 	CU_ASSERT(rc == 0);
5132 	CU_ASSERT(count == 1);
5133 	CU_ASSERT(ids[0] == cloneid2);
5134 
5135 	/* clone */
5136 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5137 	count = SPDK_COUNTOF(ids);
5138 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5139 	CU_ASSERT(rc == 0);
5140 	CU_ASSERT(count == 0);
5141 
5142 	/* snapshot2 */
5143 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5144 	count = SPDK_COUNTOF(ids);
5145 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5146 	CU_ASSERT(rc == 0);
5147 	CU_ASSERT(count == 1);
5148 	CU_ASSERT(ids[0] == cloneid);
5149 
5150 	/* clone2 */
5151 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5152 	count = SPDK_COUNTOF(ids);
5153 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5154 	CU_ASSERT(rc == 0);
5155 	CU_ASSERT(count == 0);
5156 
5157 	/* Try to delete blob that user should not be able to remove */
5158 
5159 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5160 	poll_threads();
5161 	CU_ASSERT(g_bserrno != 0);
5162 
5163 	/* Remove all blobs */
5164 
5165 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5166 	poll_threads();
5167 	CU_ASSERT(g_bserrno == 0);
5168 
5169 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5170 	poll_threads();
5171 	CU_ASSERT(g_bserrno == 0);
5172 
5173 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5174 	poll_threads();
5175 	CU_ASSERT(g_bserrno == 0);
5176 
5177 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5178 	poll_threads();
5179 	CU_ASSERT(g_bserrno == 0);
5180 
5181 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5182 	poll_threads();
5183 	CU_ASSERT(g_bserrno == 0);
5184 
5185 	spdk_bs_unload(bs, bs_op_complete, NULL);
5186 	poll_threads();
5187 	CU_ASSERT(g_bserrno == 0);
5188 
5189 	g_bs = NULL;
5190 }
5191 
5192 /**
5193  * Snapshot-clones relation test 2
5194  *
5195  *         snapshot1
5196  *            |
5197  *         snapshot2
5198  *            |
5199  *      +-----+-----+
5200  *      |           |
5201  *   blob(ro)   snapshot3
5202  *      |           |
5203  *      |       snapshot4
5204  *      |        |     |
5205  *   clone2   clone  clone3
5206  */
5207 static void
5208 blob_relations2(void)
5209 {
5210 	struct spdk_blob_store *bs;
5211 	struct spdk_bs_dev *dev;
5212 	struct spdk_bs_opts bs_opts;
5213 	struct spdk_blob_opts opts;
5214 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5215 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5216 		     cloneid3;
5217 	int rc;
5218 	size_t count;
5219 	spdk_blob_id ids[10] = {};
5220 
5221 	dev = init_dev();
5222 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5223 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5224 
5225 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5226 	poll_threads();
5227 	CU_ASSERT(g_bserrno == 0);
5228 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5229 	bs = g_bs;
5230 
5231 	/* 1. Create blob with 10 clusters */
5232 
5233 	ut_spdk_blob_opts_init(&opts);
5234 	opts.num_clusters = 10;
5235 
5236 	blob = ut_blob_create_and_open(bs, &opts);
5237 	blobid = spdk_blob_get_id(blob);
5238 
5239 	/* 2. Create snapshot1 */
5240 
5241 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5242 	poll_threads();
5243 	CU_ASSERT(g_bserrno == 0);
5244 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5245 	snapshotid1 = g_blobid;
5246 
5247 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5248 	poll_threads();
5249 	CU_ASSERT(g_bserrno == 0);
5250 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5251 	snapshot1 = g_blob;
5252 
5253 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5254 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5255 
5256 	CU_ASSERT(blob->parent_id == snapshotid1);
5257 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5258 
5259 	/* Check if blob is the clone of snapshot1 */
5260 	CU_ASSERT(blob->parent_id == snapshotid1);
5261 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5262 
5263 	count = SPDK_COUNTOF(ids);
5264 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5265 	CU_ASSERT(rc == 0);
5266 	CU_ASSERT(count == 1);
5267 	CU_ASSERT(ids[0] == blobid);
5268 
5269 	/* 3. Create another snapshot */
5270 
5271 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5272 	poll_threads();
5273 	CU_ASSERT(g_bserrno == 0);
5274 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5275 	snapshotid2 = g_blobid;
5276 
5277 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5278 	poll_threads();
5279 	CU_ASSERT(g_bserrno == 0);
5280 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5281 	snapshot2 = g_blob;
5282 
5283 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5284 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5285 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5286 
5287 	/* Check if snapshot2 is the clone of snapshot1 and blob
5288 	 * is a child of snapshot2 */
5289 	CU_ASSERT(blob->parent_id == snapshotid2);
5290 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5291 
5292 	count = SPDK_COUNTOF(ids);
5293 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5294 	CU_ASSERT(rc == 0);
5295 	CU_ASSERT(count == 1);
5296 	CU_ASSERT(ids[0] == blobid);
5297 
5298 	/* 4. Create clone from snapshot */
5299 
5300 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5301 	poll_threads();
5302 	CU_ASSERT(g_bserrno == 0);
5303 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5304 	cloneid = g_blobid;
5305 
5306 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5307 	poll_threads();
5308 	CU_ASSERT(g_bserrno == 0);
5309 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5310 	clone = g_blob;
5311 
5312 	CU_ASSERT(clone->parent_id == snapshotid2);
5313 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5314 
5315 	/* Check if clone is on the snapshot's list */
5316 	count = SPDK_COUNTOF(ids);
5317 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5318 	CU_ASSERT(rc == 0);
5319 	CU_ASSERT(count == 2);
5320 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5321 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5322 
5323 	/* 5. Create snapshot of the clone */
5324 
5325 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5326 	poll_threads();
5327 	CU_ASSERT(g_bserrno == 0);
5328 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5329 	snapshotid3 = g_blobid;
5330 
5331 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5332 	poll_threads();
5333 	CU_ASSERT(g_bserrno == 0);
5334 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5335 	snapshot3 = g_blob;
5336 
5337 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5338 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5339 
5340 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5341 	 * is a child of snapshot2 */
5342 	CU_ASSERT(clone->parent_id == snapshotid3);
5343 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5344 
5345 	count = SPDK_COUNTOF(ids);
5346 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5347 	CU_ASSERT(rc == 0);
5348 	CU_ASSERT(count == 1);
5349 	CU_ASSERT(ids[0] == cloneid);
5350 
5351 	/* 6. Create another snapshot of the clone */
5352 
5353 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5354 	poll_threads();
5355 	CU_ASSERT(g_bserrno == 0);
5356 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5357 	snapshotid4 = g_blobid;
5358 
5359 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5360 	poll_threads();
5361 	CU_ASSERT(g_bserrno == 0);
5362 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5363 	snapshot4 = g_blob;
5364 
5365 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5366 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5367 
5368 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5369 	 * is a child of snapshot3 */
5370 	CU_ASSERT(clone->parent_id == snapshotid4);
5371 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5372 
5373 	count = SPDK_COUNTOF(ids);
5374 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5375 	CU_ASSERT(rc == 0);
5376 	CU_ASSERT(count == 1);
5377 	CU_ASSERT(ids[0] == cloneid);
5378 
5379 	/* 7. Remove snapshot 4 */
5380 
5381 	ut_blob_close_and_delete(bs, snapshot4);
5382 
5383 	/* Check if relations are back to state from before creating snapshot 4 */
5384 	CU_ASSERT(clone->parent_id == snapshotid3);
5385 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5386 
5387 	count = SPDK_COUNTOF(ids);
5388 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5389 	CU_ASSERT(rc == 0);
5390 	CU_ASSERT(count == 1);
5391 	CU_ASSERT(ids[0] == cloneid);
5392 
5393 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5394 
5395 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5396 	poll_threads();
5397 	CU_ASSERT(g_bserrno == 0);
5398 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5399 	cloneid3 = g_blobid;
5400 
5401 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5402 	poll_threads();
5403 	CU_ASSERT(g_bserrno != 0);
5404 
5405 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5406 
5407 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5408 	poll_threads();
5409 	CU_ASSERT(g_bserrno == 0);
5410 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5411 	snapshot3 = g_blob;
5412 
5413 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5414 	poll_threads();
5415 	CU_ASSERT(g_bserrno != 0);
5416 
5417 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5418 	poll_threads();
5419 	CU_ASSERT(g_bserrno == 0);
5420 
5421 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5422 	poll_threads();
5423 	CU_ASSERT(g_bserrno == 0);
5424 
5425 	/* 10. Remove snapshot 1 */
5426 
5427 	ut_blob_close_and_delete(bs, snapshot1);
5428 
5429 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5430 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5431 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5432 
5433 	count = SPDK_COUNTOF(ids);
5434 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5435 	CU_ASSERT(rc == 0);
5436 	CU_ASSERT(count == 2);
5437 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5438 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5439 
5440 	/* 11. Try to create clone from read only blob */
5441 
5442 	/* Mark blob as read only */
5443 	spdk_blob_set_read_only(blob);
5444 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5445 	poll_threads();
5446 	CU_ASSERT(g_bserrno == 0);
5447 
5448 	/* Create clone from read only blob */
5449 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5450 	poll_threads();
5451 	CU_ASSERT(g_bserrno == 0);
5452 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5453 	cloneid2 = g_blobid;
5454 
5455 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5456 	poll_threads();
5457 	CU_ASSERT(g_bserrno == 0);
5458 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5459 	clone2 = g_blob;
5460 
5461 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5462 
5463 	count = SPDK_COUNTOF(ids);
5464 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5465 	CU_ASSERT(rc == 0);
5466 	CU_ASSERT(count == 1);
5467 	CU_ASSERT(ids[0] == cloneid2);
5468 
5469 	/* Close blobs */
5470 
5471 	spdk_blob_close(clone2, blob_op_complete, NULL);
5472 	poll_threads();
5473 	CU_ASSERT(g_bserrno == 0);
5474 
5475 	spdk_blob_close(blob, blob_op_complete, NULL);
5476 	poll_threads();
5477 	CU_ASSERT(g_bserrno == 0);
5478 
5479 	spdk_blob_close(clone, blob_op_complete, NULL);
5480 	poll_threads();
5481 	CU_ASSERT(g_bserrno == 0);
5482 
5483 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5484 	poll_threads();
5485 	CU_ASSERT(g_bserrno == 0);
5486 
5487 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5488 	poll_threads();
5489 	CU_ASSERT(g_bserrno == 0);
5490 
5491 	ut_bs_reload(&bs, &bs_opts);
5492 
5493 	/* Verify structure of loaded blob store */
5494 
5495 	/* snapshot2 */
5496 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5497 
5498 	count = SPDK_COUNTOF(ids);
5499 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5500 	CU_ASSERT(rc == 0);
5501 	CU_ASSERT(count == 2);
5502 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5503 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5504 
5505 	/* blob */
5506 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5507 	count = SPDK_COUNTOF(ids);
5508 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5509 	CU_ASSERT(rc == 0);
5510 	CU_ASSERT(count == 1);
5511 	CU_ASSERT(ids[0] == cloneid2);
5512 
5513 	/* clone */
5514 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5515 	count = SPDK_COUNTOF(ids);
5516 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5517 	CU_ASSERT(rc == 0);
5518 	CU_ASSERT(count == 0);
5519 
5520 	/* snapshot3 */
5521 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5522 	count = SPDK_COUNTOF(ids);
5523 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5524 	CU_ASSERT(rc == 0);
5525 	CU_ASSERT(count == 1);
5526 	CU_ASSERT(ids[0] == cloneid);
5527 
5528 	/* clone2 */
5529 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5530 	count = SPDK_COUNTOF(ids);
5531 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5532 	CU_ASSERT(rc == 0);
5533 	CU_ASSERT(count == 0);
5534 
5535 	/* Try to delete all blobs in the worse possible order */
5536 
5537 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5538 	poll_threads();
5539 	CU_ASSERT(g_bserrno != 0);
5540 
5541 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5542 	poll_threads();
5543 	CU_ASSERT(g_bserrno == 0);
5544 
5545 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5546 	poll_threads();
5547 	CU_ASSERT(g_bserrno != 0);
5548 
5549 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5550 	poll_threads();
5551 	CU_ASSERT(g_bserrno == 0);
5552 
5553 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5554 	poll_threads();
5555 	CU_ASSERT(g_bserrno == 0);
5556 
5557 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5558 	poll_threads();
5559 	CU_ASSERT(g_bserrno == 0);
5560 
5561 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5562 	poll_threads();
5563 	CU_ASSERT(g_bserrno == 0);
5564 
5565 	spdk_bs_unload(bs, bs_op_complete, NULL);
5566 	poll_threads();
5567 	CU_ASSERT(g_bserrno == 0);
5568 
5569 	g_bs = NULL;
5570 }
5571 
5572 /**
5573  * Snapshot-clones relation test 3
5574  *
5575  *         snapshot0
5576  *            |
5577  *         snapshot1
5578  *            |
5579  *         snapshot2
5580  *            |
5581  *           blob
5582  */
5583 static void
5584 blob_relations3(void)
5585 {
5586 	struct spdk_blob_store *bs;
5587 	struct spdk_bs_dev *dev;
5588 	struct spdk_io_channel *channel;
5589 	struct spdk_bs_opts bs_opts;
5590 	struct spdk_blob_opts opts;
5591 	struct spdk_blob *blob;
5592 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
5593 
5594 	dev = init_dev();
5595 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5596 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5597 
5598 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5599 	poll_threads();
5600 	CU_ASSERT(g_bserrno == 0);
5601 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5602 	bs = g_bs;
5603 
5604 	channel = spdk_bs_alloc_io_channel(bs);
5605 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5606 
5607 	/* 1. Create blob with 10 clusters */
5608 	ut_spdk_blob_opts_init(&opts);
5609 	opts.num_clusters = 10;
5610 
5611 	blob = ut_blob_create_and_open(bs, &opts);
5612 	blobid = spdk_blob_get_id(blob);
5613 
5614 	/* 2. Create snapshot0 */
5615 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5616 	poll_threads();
5617 	CU_ASSERT(g_bserrno == 0);
5618 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5619 	snapshotid0 = g_blobid;
5620 
5621 	/* 3. Create snapshot1 */
5622 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5623 	poll_threads();
5624 	CU_ASSERT(g_bserrno == 0);
5625 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5626 	snapshotid1 = g_blobid;
5627 
5628 	/* 4. Create snapshot2 */
5629 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5630 	poll_threads();
5631 	CU_ASSERT(g_bserrno == 0);
5632 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5633 	snapshotid2 = g_blobid;
5634 
5635 	/* 5. Decouple blob */
5636 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5637 	poll_threads();
5638 	CU_ASSERT(g_bserrno == 0);
5639 
5640 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
5641 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
5642 	poll_threads();
5643 	CU_ASSERT(g_bserrno == 0);
5644 
5645 	/* 7. Delete blob */
5646 	spdk_blob_close(blob, blob_op_complete, NULL);
5647 	poll_threads();
5648 	CU_ASSERT(g_bserrno == 0);
5649 
5650 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5651 	poll_threads();
5652 	CU_ASSERT(g_bserrno == 0);
5653 
5654 	/* 8. Delete snapshot2.
5655 	 * If md of snapshot 2 was updated, it should be possible to delete it */
5656 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5657 	poll_threads();
5658 	CU_ASSERT(g_bserrno == 0);
5659 
5660 	/* Remove remaining blobs and unload bs */
5661 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
5662 	poll_threads();
5663 	CU_ASSERT(g_bserrno == 0);
5664 
5665 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
5666 	poll_threads();
5667 	CU_ASSERT(g_bserrno == 0);
5668 
5669 	spdk_bs_free_io_channel(channel);
5670 	poll_threads();
5671 
5672 	spdk_bs_unload(bs, bs_op_complete, NULL);
5673 	poll_threads();
5674 	CU_ASSERT(g_bserrno == 0);
5675 
5676 	g_bs = NULL;
5677 }
5678 
5679 static void
5680 blobstore_clean_power_failure(void)
5681 {
5682 	struct spdk_blob_store *bs;
5683 	struct spdk_blob *blob;
5684 	struct spdk_power_failure_thresholds thresholds = {};
5685 	bool clean = false;
5686 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5687 	struct spdk_bs_super_block super_copy = {};
5688 
5689 	thresholds.general_threshold = 1;
5690 	while (!clean) {
5691 		/* Create bs and blob */
5692 		suite_blob_setup();
5693 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5694 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5695 		bs = g_bs;
5696 		blob = g_blob;
5697 
5698 		/* Super block should not change for rest of the UT,
5699 		 * save it and compare later. */
5700 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5701 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5702 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5703 
5704 		/* Force bs/super block in a clean state.
5705 		 * Along with marking blob dirty, to cause blob persist. */
5706 		blob->state = SPDK_BLOB_STATE_DIRTY;
5707 		bs->clean = 1;
5708 		super->clean = 1;
5709 		super->crc = blob_md_page_calc_crc(super);
5710 
5711 		g_bserrno = -1;
5712 		dev_set_power_failure_thresholds(thresholds);
5713 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5714 		poll_threads();
5715 		dev_reset_power_failure_event();
5716 
5717 		if (g_bserrno == 0) {
5718 			/* After successful md sync, both bs and super block
5719 			 * should be marked as not clean. */
5720 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5721 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5722 			clean = true;
5723 		}
5724 
5725 		/* Depending on the point of failure, super block was either updated or not. */
5726 		super_copy.clean = super->clean;
5727 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5728 		/* Compare that the values in super block remained unchanged. */
5729 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5730 
5731 		/* Delete blob and unload bs */
5732 		suite_blob_cleanup();
5733 
5734 		thresholds.general_threshold++;
5735 	}
5736 }
5737 
5738 static void
5739 blob_delete_snapshot_power_failure(void)
5740 {
5741 	struct spdk_bs_dev *dev;
5742 	struct spdk_blob_store *bs;
5743 	struct spdk_blob_opts opts;
5744 	struct spdk_blob *blob, *snapshot;
5745 	struct spdk_power_failure_thresholds thresholds = {};
5746 	spdk_blob_id blobid, snapshotid;
5747 	const void *value;
5748 	size_t value_len;
5749 	size_t count;
5750 	spdk_blob_id ids[3] = {};
5751 	int rc;
5752 	bool deleted = false;
5753 	int delete_snapshot_bserrno = -1;
5754 
5755 	thresholds.general_threshold = 1;
5756 	while (!deleted) {
5757 		dev = init_dev();
5758 
5759 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5760 		poll_threads();
5761 		CU_ASSERT(g_bserrno == 0);
5762 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5763 		bs = g_bs;
5764 
5765 		/* Create blob */
5766 		ut_spdk_blob_opts_init(&opts);
5767 		opts.num_clusters = 10;
5768 
5769 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5770 		poll_threads();
5771 		CU_ASSERT(g_bserrno == 0);
5772 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5773 		blobid = g_blobid;
5774 
5775 		/* Create snapshot */
5776 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5777 		poll_threads();
5778 		CU_ASSERT(g_bserrno == 0);
5779 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5780 		snapshotid = g_blobid;
5781 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5782 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5783 
5784 		dev_set_power_failure_thresholds(thresholds);
5785 
5786 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5787 		poll_threads();
5788 		delete_snapshot_bserrno = g_bserrno;
5789 
5790 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
5791 		 * reports success, changes to both blobs should already persisted. */
5792 		dev_reset_power_failure_event();
5793 		ut_bs_dirty_load(&bs, NULL);
5794 
5795 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5796 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5797 
5798 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5799 		poll_threads();
5800 		CU_ASSERT(g_bserrno == 0);
5801 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5802 		blob = g_blob;
5803 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5804 
5805 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5806 		poll_threads();
5807 
5808 		if (g_bserrno == 0) {
5809 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5810 			snapshot = g_blob;
5811 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5812 			count = SPDK_COUNTOF(ids);
5813 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5814 			CU_ASSERT(rc == 0);
5815 			CU_ASSERT(count == 1);
5816 			CU_ASSERT(ids[0] == blobid);
5817 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
5818 			CU_ASSERT(rc != 0);
5819 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5820 
5821 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5822 			poll_threads();
5823 			CU_ASSERT(g_bserrno == 0);
5824 		} else {
5825 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5826 			/* Snapshot might have been left in unrecoverable state, so it does not open.
5827 			 * Yet delete might perform further changes to the clone after that.
5828 			 * This UT should test until snapshot is deleted and delete call succeeds. */
5829 			if (delete_snapshot_bserrno == 0) {
5830 				deleted = true;
5831 			}
5832 		}
5833 
5834 		spdk_blob_close(blob, blob_op_complete, NULL);
5835 		poll_threads();
5836 		CU_ASSERT(g_bserrno == 0);
5837 
5838 		spdk_bs_unload(bs, bs_op_complete, NULL);
5839 		poll_threads();
5840 		CU_ASSERT(g_bserrno == 0);
5841 
5842 		thresholds.general_threshold++;
5843 	}
5844 }
5845 
5846 static void
5847 blob_create_snapshot_power_failure(void)
5848 {
5849 	struct spdk_blob_store *bs = g_bs;
5850 	struct spdk_bs_dev *dev;
5851 	struct spdk_blob_opts opts;
5852 	struct spdk_blob *blob, *snapshot;
5853 	struct spdk_power_failure_thresholds thresholds = {};
5854 	spdk_blob_id blobid, snapshotid;
5855 	const void *value;
5856 	size_t value_len;
5857 	size_t count;
5858 	spdk_blob_id ids[3] = {};
5859 	int rc;
5860 	bool created = false;
5861 	int create_snapshot_bserrno = -1;
5862 
5863 	thresholds.general_threshold = 1;
5864 	while (!created) {
5865 		dev = init_dev();
5866 
5867 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5868 		poll_threads();
5869 		CU_ASSERT(g_bserrno == 0);
5870 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5871 		bs = g_bs;
5872 
5873 		/* Create blob */
5874 		ut_spdk_blob_opts_init(&opts);
5875 		opts.num_clusters = 10;
5876 
5877 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5878 		poll_threads();
5879 		CU_ASSERT(g_bserrno == 0);
5880 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5881 		blobid = g_blobid;
5882 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5883 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5884 
5885 		dev_set_power_failure_thresholds(thresholds);
5886 
5887 		/* Create snapshot */
5888 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5889 		poll_threads();
5890 		create_snapshot_bserrno = g_bserrno;
5891 		snapshotid = g_blobid;
5892 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5893 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5894 
5895 		/* Do not shut down cleanly. Assumption is that after create snapshot
5896 		 * reports success, both blobs should be power-fail safe. */
5897 		dev_reset_power_failure_event();
5898 		ut_bs_dirty_load(&bs, NULL);
5899 
5900 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5901 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5902 
5903 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5904 		poll_threads();
5905 		CU_ASSERT(g_bserrno == 0);
5906 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5907 		blob = g_blob;
5908 
5909 		if (snapshotid != SPDK_BLOBID_INVALID) {
5910 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5911 			poll_threads();
5912 		}
5913 
5914 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
5915 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5916 			snapshot = g_blob;
5917 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5918 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5919 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5920 			count = SPDK_COUNTOF(ids);
5921 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5922 			CU_ASSERT(rc == 0);
5923 			CU_ASSERT(count == 1);
5924 			CU_ASSERT(ids[0] == blobid);
5925 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
5926 			CU_ASSERT(rc != 0);
5927 
5928 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5929 			poll_threads();
5930 			CU_ASSERT(g_bserrno == 0);
5931 			if (create_snapshot_bserrno == 0) {
5932 				created = true;
5933 			}
5934 		} else {
5935 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5936 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
5937 		}
5938 
5939 		spdk_blob_close(blob, blob_op_complete, NULL);
5940 		poll_threads();
5941 		CU_ASSERT(g_bserrno == 0);
5942 
5943 		spdk_bs_unload(bs, bs_op_complete, NULL);
5944 		poll_threads();
5945 		CU_ASSERT(g_bserrno == 0);
5946 
5947 		thresholds.general_threshold++;
5948 	}
5949 }
5950 
5951 static void
5952 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5953 {
5954 	uint8_t payload_ff[64 * 512];
5955 	uint8_t payload_aa[64 * 512];
5956 	uint8_t payload_00[64 * 512];
5957 	uint8_t *cluster0, *cluster1;
5958 
5959 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5960 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5961 	memset(payload_00, 0x00, sizeof(payload_00));
5962 
5963 	/* Try to perform I/O with io unit = 512 */
5964 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
5965 	poll_threads();
5966 	CU_ASSERT(g_bserrno == 0);
5967 
5968 	/* If thin provisioned is set cluster should be allocated now */
5969 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
5970 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
5971 
5972 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
5973 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
5974 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5975 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5976 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
5977 
5978 	/* Verify write with offset on first page */
5979 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
5980 	poll_threads();
5981 	CU_ASSERT(g_bserrno == 0);
5982 
5983 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5984 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5985 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5986 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5987 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5988 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
5989 
5990 	/* Verify write with offset on first page */
5991 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
5992 	poll_threads();
5993 
5994 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
5995 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5996 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5997 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5998 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5999 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6000 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6001 
6002 	/* Verify write with offset on second page */
6003 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6004 	poll_threads();
6005 
6006 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6007 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6008 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6009 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6010 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6011 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6012 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6013 
6014 	/* Verify write across multiple pages */
6015 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6016 	poll_threads();
6017 
6018 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6019 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6020 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6021 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6022 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6023 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6024 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6025 
6026 	/* Verify write across multiple clusters */
6027 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6028 	poll_threads();
6029 
6030 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6031 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6032 
6033 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6034 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6035 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6036 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6037 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6038 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6039 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6040 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6041 
6042 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6043 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6044 
6045 	/* Verify write to second cluster */
6046 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6047 	poll_threads();
6048 
6049 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6050 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6051 
6052 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6053 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6054 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6055 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6056 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6057 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6058 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6059 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6060 
6061 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6062 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6063 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6064 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6065 }
6066 
6067 static void
6068 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6069 {
6070 	uint8_t payload_read[64 * 512];
6071 	uint8_t payload_ff[64 * 512];
6072 	uint8_t payload_aa[64 * 512];
6073 	uint8_t payload_00[64 * 512];
6074 
6075 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6076 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6077 	memset(payload_00, 0x00, sizeof(payload_00));
6078 
6079 	/* Read only first io unit */
6080 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6081 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6082 	 * payload_read: F000 0000 | 0000 0000 ... */
6083 	memset(payload_read, 0x00, sizeof(payload_read));
6084 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6085 	poll_threads();
6086 	CU_ASSERT(g_bserrno == 0);
6087 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6088 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6089 
6090 	/* Read four io_units starting from offset = 2
6091 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6092 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6093 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6094 
6095 	memset(payload_read, 0x00, sizeof(payload_read));
6096 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6097 	poll_threads();
6098 	CU_ASSERT(g_bserrno == 0);
6099 
6100 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6101 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6102 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6103 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6104 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6105 
6106 	/* Read eight io_units across multiple pages
6107 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6108 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6109 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6110 	memset(payload_read, 0x00, sizeof(payload_read));
6111 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6112 	poll_threads();
6113 	CU_ASSERT(g_bserrno == 0);
6114 
6115 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6116 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6117 
6118 	/* Read eight io_units across multiple clusters
6119 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6120 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6121 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6122 	memset(payload_read, 0x00, sizeof(payload_read));
6123 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6124 	poll_threads();
6125 	CU_ASSERT(g_bserrno == 0);
6126 
6127 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6128 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6129 
6130 	/* Read four io_units from second cluster
6131 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6132 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6133 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6134 	memset(payload_read, 0x00, sizeof(payload_read));
6135 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6136 	poll_threads();
6137 	CU_ASSERT(g_bserrno == 0);
6138 
6139 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6140 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6141 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6142 
6143 	/* Read second cluster
6144 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6145 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6146 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6147 	memset(payload_read, 0x00, sizeof(payload_read));
6148 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6149 	poll_threads();
6150 	CU_ASSERT(g_bserrno == 0);
6151 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6152 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6153 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6154 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6155 
6156 	/* Read whole two clusters
6157 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6158 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6159 	memset(payload_read, 0x00, sizeof(payload_read));
6160 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6161 	poll_threads();
6162 	CU_ASSERT(g_bserrno == 0);
6163 
6164 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6165 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6166 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6167 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6168 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6169 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6170 
6171 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6172 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6173 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6174 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6175 }
6176 
6177 
6178 static void
6179 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6180 {
6181 	uint8_t payload_ff[64 * 512];
6182 	uint8_t payload_aa[64 * 512];
6183 	uint8_t payload_00[64 * 512];
6184 	uint8_t *cluster0, *cluster1;
6185 
6186 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6187 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6188 	memset(payload_00, 0x00, sizeof(payload_00));
6189 
6190 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6191 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6192 
6193 	/* Unmap */
6194 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6195 	poll_threads();
6196 
6197 	CU_ASSERT(g_bserrno == 0);
6198 
6199 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6200 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6201 }
6202 
6203 static void
6204 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6205 {
6206 	uint8_t payload_ff[64 * 512];
6207 	uint8_t payload_aa[64 * 512];
6208 	uint8_t payload_00[64 * 512];
6209 	uint8_t *cluster0, *cluster1;
6210 
6211 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6212 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6213 	memset(payload_00, 0x00, sizeof(payload_00));
6214 
6215 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6216 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6217 
6218 	/* Write zeroes  */
6219 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6220 	poll_threads();
6221 
6222 	CU_ASSERT(g_bserrno == 0);
6223 
6224 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6225 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6226 }
6227 
6228 static inline void
6229 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6230 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6231 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6232 {
6233 	if (io_opts) {
6234 		g_dev_writev_ext_called = false;
6235 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6236 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
6237 					io_opts);
6238 	} else {
6239 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6240 	}
6241 	poll_threads();
6242 	CU_ASSERT(g_bserrno == 0);
6243 	if (io_opts) {
6244 		CU_ASSERT(g_dev_writev_ext_called);
6245 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6246 	}
6247 }
6248 
6249 static void
6250 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6251 	       bool ext_api)
6252 {
6253 	uint8_t payload_ff[64 * 512];
6254 	uint8_t payload_aa[64 * 512];
6255 	uint8_t payload_00[64 * 512];
6256 	uint8_t *cluster0, *cluster1;
6257 	struct iovec iov[4];
6258 	struct spdk_blob_ext_io_opts ext_opts = {
6259 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6260 		.memory_domain_ctx = (void *)0xf00df00d,
6261 		.size = sizeof(struct spdk_blob_ext_io_opts),
6262 		.user_ctx = (void *)123,
6263 	};
6264 
6265 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6266 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6267 	memset(payload_00, 0x00, sizeof(payload_00));
6268 
6269 	/* Try to perform I/O with io unit = 512 */
6270 	iov[0].iov_base = payload_ff;
6271 	iov[0].iov_len = 1 * 512;
6272 
6273 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
6274 			    ext_api ? &ext_opts : NULL);
6275 
6276 	/* If thin provisioned is set cluster should be allocated now */
6277 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6278 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6279 
6280 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6281 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6282 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6283 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6284 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6285 
6286 	/* Verify write with offset on first page */
6287 	iov[0].iov_base = payload_ff;
6288 	iov[0].iov_len = 1 * 512;
6289 
6290 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
6291 			    ext_api ? &ext_opts : NULL);
6292 
6293 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6294 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6295 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6296 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6297 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6298 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6299 
6300 	/* Verify write with offset on first page */
6301 	iov[0].iov_base = payload_ff;
6302 	iov[0].iov_len = 4 * 512;
6303 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6304 	poll_threads();
6305 
6306 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6307 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6308 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6309 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6310 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6311 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6312 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6313 
6314 	/* Verify write with offset on second page */
6315 	iov[0].iov_base = payload_ff;
6316 	iov[0].iov_len = 4 * 512;
6317 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6318 	poll_threads();
6319 
6320 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6321 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6322 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6323 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6324 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6325 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6326 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6327 
6328 	/* Verify write across multiple pages */
6329 	iov[0].iov_base = payload_aa;
6330 	iov[0].iov_len = 8 * 512;
6331 
6332 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
6333 			    ext_api ? &ext_opts : NULL);
6334 
6335 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6336 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6337 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6338 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6339 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6340 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6341 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6342 
6343 	/* Verify write across multiple clusters */
6344 
6345 	iov[0].iov_base = payload_ff;
6346 	iov[0].iov_len = 8 * 512;
6347 
6348 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
6349 			    ext_api ? &ext_opts : NULL);
6350 
6351 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6352 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6353 
6354 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6355 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6356 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6357 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6358 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6359 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6360 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6361 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6362 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6363 
6364 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6365 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6366 
6367 	/* Verify write to second cluster */
6368 
6369 	iov[0].iov_base = payload_ff;
6370 	iov[0].iov_len = 2 * 512;
6371 
6372 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
6373 			    ext_api ? &ext_opts : NULL);
6374 
6375 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6376 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6377 
6378 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6379 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6380 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6381 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6382 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6383 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6384 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6385 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6386 
6387 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6388 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6389 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6390 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6391 }
6392 
6393 static inline void
6394 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6395 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6396 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6397 {
6398 	if (io_opts) {
6399 		g_dev_readv_ext_called = false;
6400 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6401 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
6402 	} else {
6403 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6404 	}
6405 	poll_threads();
6406 	CU_ASSERT(g_bserrno == 0);
6407 	if (io_opts) {
6408 		CU_ASSERT(g_dev_readv_ext_called);
6409 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6410 	}
6411 }
6412 
6413 static void
6414 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6415 	      bool ext_api)
6416 {
6417 	uint8_t payload_read[64 * 512];
6418 	uint8_t payload_ff[64 * 512];
6419 	uint8_t payload_aa[64 * 512];
6420 	uint8_t payload_00[64 * 512];
6421 	struct iovec iov[4];
6422 	struct spdk_blob_ext_io_opts ext_opts = {
6423 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6424 		.memory_domain_ctx = (void *)0xf00df00d,
6425 		.size = sizeof(struct spdk_blob_ext_io_opts),
6426 		.user_ctx = (void *)123,
6427 	};
6428 
6429 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6430 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6431 	memset(payload_00, 0x00, sizeof(payload_00));
6432 
6433 	/* Read only first io unit */
6434 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6435 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6436 	 * payload_read: F000 0000 | 0000 0000 ... */
6437 	memset(payload_read, 0x00, sizeof(payload_read));
6438 	iov[0].iov_base = payload_read;
6439 	iov[0].iov_len = 1 * 512;
6440 
6441 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6442 
6443 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6444 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6445 
6446 	/* Read four io_units starting from offset = 2
6447 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6448 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6449 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6450 
6451 	memset(payload_read, 0x00, sizeof(payload_read));
6452 	iov[0].iov_base = payload_read;
6453 	iov[0].iov_len = 4 * 512;
6454 
6455 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6456 
6457 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6458 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6459 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6460 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6461 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6462 
6463 	/* Read eight io_units across multiple pages
6464 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6465 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6466 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6467 	memset(payload_read, 0x00, sizeof(payload_read));
6468 	iov[0].iov_base = payload_read;
6469 	iov[0].iov_len = 4 * 512;
6470 	iov[1].iov_base = payload_read + 4 * 512;
6471 	iov[1].iov_len = 4 * 512;
6472 
6473 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6474 
6475 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6476 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6477 
6478 	/* Read eight io_units across multiple clusters
6479 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6480 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6481 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6482 	memset(payload_read, 0x00, sizeof(payload_read));
6483 	iov[0].iov_base = payload_read;
6484 	iov[0].iov_len = 2 * 512;
6485 	iov[1].iov_base = payload_read + 2 * 512;
6486 	iov[1].iov_len = 2 * 512;
6487 	iov[2].iov_base = payload_read + 4 * 512;
6488 	iov[2].iov_len = 2 * 512;
6489 	iov[3].iov_base = payload_read + 6 * 512;
6490 	iov[3].iov_len = 2 * 512;
6491 
6492 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
6493 			   ext_api ? &ext_opts : NULL);
6494 
6495 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6496 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6497 
6498 	/* Read four io_units from second cluster
6499 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6500 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6501 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6502 	memset(payload_read, 0x00, sizeof(payload_read));
6503 	iov[0].iov_base = payload_read;
6504 	iov[0].iov_len = 1 * 512;
6505 	iov[1].iov_base = payload_read + 1 * 512;
6506 	iov[1].iov_len = 3 * 512;
6507 
6508 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
6509 			   ext_api ? &ext_opts : NULL);
6510 
6511 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6512 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6513 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6514 
6515 	/* Read second cluster
6516 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6517 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6518 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6519 	memset(payload_read, 0x00, sizeof(payload_read));
6520 	iov[0].iov_base = payload_read;
6521 	iov[0].iov_len = 1 * 512;
6522 	iov[1].iov_base = payload_read + 1 * 512;
6523 	iov[1].iov_len = 2 * 512;
6524 	iov[2].iov_base = payload_read + 3 * 512;
6525 	iov[2].iov_len = 4 * 512;
6526 	iov[3].iov_base = payload_read + 7 * 512;
6527 	iov[3].iov_len = 25 * 512;
6528 
6529 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
6530 			   ext_api ? &ext_opts : NULL);
6531 
6532 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6533 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6534 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6535 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6536 
6537 	/* Read whole two clusters
6538 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6539 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6540 	memset(payload_read, 0x00, sizeof(payload_read));
6541 	iov[0].iov_base = payload_read;
6542 	iov[0].iov_len = 1 * 512;
6543 	iov[1].iov_base = payload_read + 1 * 512;
6544 	iov[1].iov_len = 8 * 512;
6545 	iov[2].iov_base = payload_read + 9 * 512;
6546 	iov[2].iov_len = 16 * 512;
6547 	iov[3].iov_base = payload_read + 25 * 512;
6548 	iov[3].iov_len = 39 * 512;
6549 
6550 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
6551 			   ext_api ? &ext_opts : NULL);
6552 
6553 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6554 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6555 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6556 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6557 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6558 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6559 
6560 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6561 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6562 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6563 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6564 }
6565 
6566 static void
6567 blob_io_unit(void)
6568 {
6569 	struct spdk_bs_opts bsopts;
6570 	struct spdk_blob_opts opts;
6571 	struct spdk_blob_store *bs;
6572 	struct spdk_bs_dev *dev;
6573 	struct spdk_blob *blob, *snapshot, *clone;
6574 	spdk_blob_id blobid;
6575 	struct spdk_io_channel *channel;
6576 
6577 	/* Create dev with 512 bytes io unit size */
6578 
6579 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6580 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6581 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6582 
6583 	/* Try to initialize a new blob store with unsupported io_unit */
6584 	dev = init_dev();
6585 	dev->blocklen = 512;
6586 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6587 
6588 	/* Initialize a new blob store */
6589 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6590 	poll_threads();
6591 	CU_ASSERT(g_bserrno == 0);
6592 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6593 	bs = g_bs;
6594 
6595 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6596 	channel = spdk_bs_alloc_io_channel(bs);
6597 
6598 	/* Create thick provisioned blob */
6599 	ut_spdk_blob_opts_init(&opts);
6600 	opts.thin_provision = false;
6601 	opts.num_clusters = 32;
6602 
6603 	blob = ut_blob_create_and_open(bs, &opts);
6604 	blobid = spdk_blob_get_id(blob);
6605 
6606 	test_io_write(dev, blob, channel);
6607 	test_io_read(dev, blob, channel);
6608 	test_io_zeroes(dev, blob, channel);
6609 
6610 	test_iov_write(dev, blob, channel, false);
6611 	test_iov_read(dev, blob, channel, false);
6612 	test_io_zeroes(dev, blob, channel);
6613 
6614 	test_iov_write(dev, blob, channel, true);
6615 	test_iov_read(dev, blob, channel, true);
6616 
6617 	test_io_unmap(dev, blob, channel);
6618 
6619 	spdk_blob_close(blob, blob_op_complete, NULL);
6620 	poll_threads();
6621 	CU_ASSERT(g_bserrno == 0);
6622 	blob = NULL;
6623 	g_blob = NULL;
6624 
6625 	/* Create thin provisioned blob */
6626 
6627 	ut_spdk_blob_opts_init(&opts);
6628 	opts.thin_provision = true;
6629 	opts.num_clusters = 32;
6630 
6631 	blob = ut_blob_create_and_open(bs, &opts);
6632 	blobid = spdk_blob_get_id(blob);
6633 
6634 	test_io_write(dev, blob, channel);
6635 	test_io_read(dev, blob, channel);
6636 	test_io_zeroes(dev, blob, channel);
6637 
6638 	test_iov_write(dev, blob, channel, false);
6639 	test_iov_read(dev, blob, channel, false);
6640 	test_io_zeroes(dev, blob, channel);
6641 
6642 	test_iov_write(dev, blob, channel, true);
6643 	test_iov_read(dev, blob, channel, true);
6644 
6645 	/* Create snapshot */
6646 
6647 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6648 	poll_threads();
6649 	CU_ASSERT(g_bserrno == 0);
6650 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6651 	blobid = g_blobid;
6652 
6653 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6654 	poll_threads();
6655 	CU_ASSERT(g_bserrno == 0);
6656 	CU_ASSERT(g_blob != NULL);
6657 	snapshot = g_blob;
6658 
6659 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6660 	poll_threads();
6661 	CU_ASSERT(g_bserrno == 0);
6662 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6663 	blobid = g_blobid;
6664 
6665 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6666 	poll_threads();
6667 	CU_ASSERT(g_bserrno == 0);
6668 	CU_ASSERT(g_blob != NULL);
6669 	clone = g_blob;
6670 
6671 	test_io_read(dev, blob, channel);
6672 	test_io_read(dev, snapshot, channel);
6673 	test_io_read(dev, clone, channel);
6674 
6675 	test_iov_read(dev, blob, channel, false);
6676 	test_iov_read(dev, snapshot, channel, false);
6677 	test_iov_read(dev, clone, channel, false);
6678 
6679 	test_iov_read(dev, blob, channel, true);
6680 	test_iov_read(dev, snapshot, channel, true);
6681 	test_iov_read(dev, clone, channel, true);
6682 
6683 	/* Inflate clone */
6684 
6685 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6686 	poll_threads();
6687 
6688 	CU_ASSERT(g_bserrno == 0);
6689 
6690 	test_io_read(dev, clone, channel);
6691 
6692 	test_io_unmap(dev, clone, channel);
6693 
6694 	test_iov_write(dev, clone, channel, false);
6695 	test_iov_read(dev, clone, channel, false);
6696 	test_io_unmap(dev, clone, channel);
6697 
6698 	test_iov_write(dev, clone, channel, true);
6699 	test_iov_read(dev, clone, channel, true);
6700 
6701 	spdk_blob_close(blob, blob_op_complete, NULL);
6702 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6703 	spdk_blob_close(clone, blob_op_complete, NULL);
6704 	poll_threads();
6705 	CU_ASSERT(g_bserrno == 0);
6706 	blob = NULL;
6707 	g_blob = NULL;
6708 
6709 	spdk_bs_free_io_channel(channel);
6710 	poll_threads();
6711 
6712 	/* Unload the blob store */
6713 	spdk_bs_unload(bs, bs_op_complete, NULL);
6714 	poll_threads();
6715 	CU_ASSERT(g_bserrno == 0);
6716 	g_bs = NULL;
6717 	g_blob = NULL;
6718 	g_blobid = 0;
6719 }
6720 
6721 static void
6722 blob_io_unit_compatibility(void)
6723 {
6724 	struct spdk_bs_opts bsopts;
6725 	struct spdk_blob_store *bs;
6726 	struct spdk_bs_dev *dev;
6727 	struct spdk_bs_super_block *super;
6728 
6729 	/* Create dev with 512 bytes io unit size */
6730 
6731 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6732 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6733 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6734 
6735 	/* Try to initialize a new blob store with unsupported io_unit */
6736 	dev = init_dev();
6737 	dev->blocklen = 512;
6738 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6739 
6740 	/* Initialize a new blob store */
6741 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6742 	poll_threads();
6743 	CU_ASSERT(g_bserrno == 0);
6744 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6745 	bs = g_bs;
6746 
6747 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6748 
6749 	/* Unload the blob store */
6750 	spdk_bs_unload(bs, bs_op_complete, NULL);
6751 	poll_threads();
6752 	CU_ASSERT(g_bserrno == 0);
6753 
6754 	/* Modify super block to behave like older version.
6755 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6756 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6757 	super->io_unit_size = 0;
6758 	super->crc = blob_md_page_calc_crc(super);
6759 
6760 	dev = init_dev();
6761 	dev->blocklen = 512;
6762 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6763 
6764 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6765 	poll_threads();
6766 	CU_ASSERT(g_bserrno == 0);
6767 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6768 	bs = g_bs;
6769 
6770 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6771 
6772 	/* Unload the blob store */
6773 	spdk_bs_unload(bs, bs_op_complete, NULL);
6774 	poll_threads();
6775 	CU_ASSERT(g_bserrno == 0);
6776 
6777 	g_bs = NULL;
6778 	g_blob = NULL;
6779 	g_blobid = 0;
6780 }
6781 
6782 static void
6783 first_sync_complete(void *cb_arg, int bserrno)
6784 {
6785 	struct spdk_blob *blob = cb_arg;
6786 	int rc;
6787 
6788 	CU_ASSERT(bserrno == 0);
6789 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
6790 	CU_ASSERT(rc == 0);
6791 	CU_ASSERT(g_bserrno == -1);
6792 
6793 	/* Keep g_bserrno at -1, only the
6794 	 * second sync completion should set it at 0. */
6795 }
6796 
6797 static void
6798 second_sync_complete(void *cb_arg, int bserrno)
6799 {
6800 	struct spdk_blob *blob = cb_arg;
6801 	const void *value;
6802 	size_t value_len;
6803 	int rc;
6804 
6805 	CU_ASSERT(bserrno == 0);
6806 
6807 	/* Verify that the first sync completion had a chance to execute */
6808 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
6809 	CU_ASSERT(rc == 0);
6810 	SPDK_CU_ASSERT_FATAL(value != NULL);
6811 	CU_ASSERT(value_len == strlen("second") + 1);
6812 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
6813 
6814 	CU_ASSERT(g_bserrno == -1);
6815 	g_bserrno = bserrno;
6816 }
6817 
6818 static void
6819 blob_simultaneous_operations(void)
6820 {
6821 	struct spdk_blob_store *bs = g_bs;
6822 	struct spdk_blob_opts opts;
6823 	struct spdk_blob *blob, *snapshot;
6824 	spdk_blob_id blobid, snapshotid;
6825 	struct spdk_io_channel *channel;
6826 	int rc;
6827 
6828 	channel = spdk_bs_alloc_io_channel(bs);
6829 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6830 
6831 	ut_spdk_blob_opts_init(&opts);
6832 	opts.num_clusters = 10;
6833 
6834 	blob = ut_blob_create_and_open(bs, &opts);
6835 	blobid = spdk_blob_get_id(blob);
6836 
6837 	/* Create snapshot and try to remove blob in the same time:
6838 	 * - snapshot should be created successfully
6839 	 * - delete operation should fail w -EBUSY */
6840 	CU_ASSERT(blob->locked_operation_in_progress == false);
6841 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6842 	CU_ASSERT(blob->locked_operation_in_progress == true);
6843 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6844 	CU_ASSERT(blob->locked_operation_in_progress == true);
6845 	/* Deletion failure */
6846 	CU_ASSERT(g_bserrno == -EBUSY);
6847 	poll_threads();
6848 	CU_ASSERT(blob->locked_operation_in_progress == false);
6849 	/* Snapshot creation success */
6850 	CU_ASSERT(g_bserrno == 0);
6851 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6852 
6853 	snapshotid = g_blobid;
6854 
6855 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6856 	poll_threads();
6857 	CU_ASSERT(g_bserrno == 0);
6858 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6859 	snapshot = g_blob;
6860 
6861 	/* Inflate blob and try to remove blob in the same time:
6862 	 * - blob should be inflated successfully
6863 	 * - delete operation should fail w -EBUSY */
6864 	CU_ASSERT(blob->locked_operation_in_progress == false);
6865 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6866 	CU_ASSERT(blob->locked_operation_in_progress == true);
6867 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6868 	CU_ASSERT(blob->locked_operation_in_progress == true);
6869 	/* Deletion failure */
6870 	CU_ASSERT(g_bserrno == -EBUSY);
6871 	poll_threads();
6872 	CU_ASSERT(blob->locked_operation_in_progress == false);
6873 	/* Inflation success */
6874 	CU_ASSERT(g_bserrno == 0);
6875 
6876 	/* Clone snapshot and try to remove snapshot in the same time:
6877 	 * - snapshot should be cloned successfully
6878 	 * - delete operation should fail w -EBUSY */
6879 	CU_ASSERT(blob->locked_operation_in_progress == false);
6880 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
6881 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6882 	/* Deletion failure */
6883 	CU_ASSERT(g_bserrno == -EBUSY);
6884 	poll_threads();
6885 	CU_ASSERT(blob->locked_operation_in_progress == false);
6886 	/* Clone created */
6887 	CU_ASSERT(g_bserrno == 0);
6888 
6889 	/* Resize blob and try to remove blob in the same time:
6890 	 * - blob should be resized successfully
6891 	 * - delete operation should fail w -EBUSY */
6892 	CU_ASSERT(blob->locked_operation_in_progress == false);
6893 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
6894 	CU_ASSERT(blob->locked_operation_in_progress == true);
6895 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6896 	CU_ASSERT(blob->locked_operation_in_progress == true);
6897 	/* Deletion failure */
6898 	CU_ASSERT(g_bserrno == -EBUSY);
6899 	poll_threads();
6900 	CU_ASSERT(blob->locked_operation_in_progress == false);
6901 	/* Blob resized successfully */
6902 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6903 	poll_threads();
6904 	CU_ASSERT(g_bserrno == 0);
6905 
6906 	/* Issue two consecutive blob syncs, neither should fail.
6907 	 * Force sync to actually occur by marking blob dirty each time.
6908 	 * Execution of sync should not be enough to complete the operation,
6909 	 * since disk I/O is required to complete it. */
6910 	g_bserrno = -1;
6911 
6912 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
6913 	CU_ASSERT(rc == 0);
6914 	spdk_blob_sync_md(blob, first_sync_complete, blob);
6915 	CU_ASSERT(g_bserrno == -1);
6916 
6917 	spdk_blob_sync_md(blob, second_sync_complete, blob);
6918 	CU_ASSERT(g_bserrno == -1);
6919 
6920 	poll_threads();
6921 	CU_ASSERT(g_bserrno == 0);
6922 
6923 	spdk_bs_free_io_channel(channel);
6924 	poll_threads();
6925 
6926 	ut_blob_close_and_delete(bs, snapshot);
6927 	ut_blob_close_and_delete(bs, blob);
6928 }
6929 
6930 static void
6931 blob_persist_test(void)
6932 {
6933 	struct spdk_blob_store *bs = g_bs;
6934 	struct spdk_blob_opts opts;
6935 	struct spdk_blob *blob;
6936 	spdk_blob_id blobid;
6937 	struct spdk_io_channel *channel;
6938 	char *xattr;
6939 	size_t xattr_length;
6940 	int rc;
6941 	uint32_t page_count_clear, page_count_xattr;
6942 	uint64_t poller_iterations;
6943 	bool run_poller;
6944 
6945 	channel = spdk_bs_alloc_io_channel(bs);
6946 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6947 
6948 	ut_spdk_blob_opts_init(&opts);
6949 	opts.num_clusters = 10;
6950 
6951 	blob = ut_blob_create_and_open(bs, &opts);
6952 	blobid = spdk_blob_get_id(blob);
6953 
6954 	/* Save the amount of md pages used after creation of a blob.
6955 	 * This should be consistent after removing xattr. */
6956 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
6957 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
6958 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
6959 
6960 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
6961 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
6962 		       strlen("large_xattr");
6963 	xattr = calloc(xattr_length, sizeof(char));
6964 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
6965 
6966 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6967 	SPDK_CU_ASSERT_FATAL(rc == 0);
6968 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6969 	poll_threads();
6970 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
6971 
6972 	/* Save the amount of md pages used after adding the large xattr */
6973 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
6974 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
6975 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
6976 
6977 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
6978 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
6979 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
6980 	poller_iterations = 1;
6981 	run_poller = true;
6982 	while (run_poller) {
6983 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6984 		SPDK_CU_ASSERT_FATAL(rc == 0);
6985 		g_bserrno = -1;
6986 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6987 		poll_thread_times(0, poller_iterations);
6988 		if (g_bserrno == 0) {
6989 			/* Poller iteration count was high enough for first sync to complete.
6990 			 * Verify that blob takes up enough of md_pages to store the xattr. */
6991 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
6992 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
6993 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
6994 			run_poller = false;
6995 		}
6996 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
6997 		SPDK_CU_ASSERT_FATAL(rc == 0);
6998 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6999 		poll_threads();
7000 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7001 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7002 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7003 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7004 
7005 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7006 		spdk_blob_close(blob, blob_op_complete, NULL);
7007 		poll_threads();
7008 		CU_ASSERT(g_bserrno == 0);
7009 
7010 		ut_bs_reload(&bs, NULL);
7011 
7012 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7013 		poll_threads();
7014 		CU_ASSERT(g_bserrno == 0);
7015 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7016 		blob = g_blob;
7017 
7018 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7019 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7020 
7021 		poller_iterations++;
7022 		/* Stop at high iteration count to prevent infinite loop.
7023 		 * This value should be enough for first md sync to complete in any case. */
7024 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7025 	}
7026 
7027 	free(xattr);
7028 
7029 	ut_blob_close_and_delete(bs, blob);
7030 
7031 	spdk_bs_free_io_channel(channel);
7032 	poll_threads();
7033 }
7034 
7035 static void
7036 blob_decouple_snapshot(void)
7037 {
7038 	struct spdk_blob_store *bs = g_bs;
7039 	struct spdk_blob_opts opts;
7040 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7041 	struct spdk_io_channel *channel;
7042 	spdk_blob_id blobid, snapshotid;
7043 	uint64_t cluster;
7044 
7045 	channel = spdk_bs_alloc_io_channel(bs);
7046 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7047 
7048 	ut_spdk_blob_opts_init(&opts);
7049 	opts.num_clusters = 10;
7050 	opts.thin_provision = false;
7051 
7052 	blob = ut_blob_create_and_open(bs, &opts);
7053 	blobid = spdk_blob_get_id(blob);
7054 
7055 	/* Create first snapshot */
7056 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7057 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7058 	poll_threads();
7059 	CU_ASSERT(g_bserrno == 0);
7060 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7061 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7062 	snapshotid = g_blobid;
7063 
7064 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7065 	poll_threads();
7066 	CU_ASSERT(g_bserrno == 0);
7067 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7068 	snapshot1 = g_blob;
7069 
7070 	/* Create the second one */
7071 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7072 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7073 	poll_threads();
7074 	CU_ASSERT(g_bserrno == 0);
7075 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7076 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7077 	snapshotid = g_blobid;
7078 
7079 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7080 	poll_threads();
7081 	CU_ASSERT(g_bserrno == 0);
7082 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7083 	snapshot2 = g_blob;
7084 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7085 
7086 	/* Now decouple the second snapshot forcing it to copy the written clusters */
7087 	spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7088 	poll_threads();
7089 	CU_ASSERT(g_bserrno == 0);
7090 
7091 	/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7092 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7093 	for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7094 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7095 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7096 				    snapshot1->active.clusters[cluster]);
7097 	}
7098 
7099 	spdk_bs_free_io_channel(channel);
7100 
7101 	ut_blob_close_and_delete(bs, snapshot2);
7102 	ut_blob_close_and_delete(bs, snapshot1);
7103 	ut_blob_close_and_delete(bs, blob);
7104 	poll_threads();
7105 }
7106 
7107 static void
7108 suite_bs_setup(void)
7109 {
7110 	struct spdk_bs_dev *dev;
7111 
7112 	dev = init_dev();
7113 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7114 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
7115 	poll_threads();
7116 	CU_ASSERT(g_bserrno == 0);
7117 	CU_ASSERT(g_bs != NULL);
7118 }
7119 
7120 static void
7121 suite_bs_cleanup(void)
7122 {
7123 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
7124 	poll_threads();
7125 	CU_ASSERT(g_bserrno == 0);
7126 	g_bs = NULL;
7127 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7128 }
7129 
7130 static struct spdk_blob *
7131 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
7132 {
7133 	struct spdk_blob *blob;
7134 	struct spdk_blob_opts create_blob_opts;
7135 	spdk_blob_id blobid;
7136 
7137 	if (blob_opts == NULL) {
7138 		ut_spdk_blob_opts_init(&create_blob_opts);
7139 		blob_opts = &create_blob_opts;
7140 	}
7141 
7142 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
7143 	poll_threads();
7144 	CU_ASSERT(g_bserrno == 0);
7145 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7146 	blobid = g_blobid;
7147 	g_blobid = -1;
7148 
7149 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7150 	poll_threads();
7151 	CU_ASSERT(g_bserrno == 0);
7152 	CU_ASSERT(g_blob != NULL);
7153 	blob = g_blob;
7154 
7155 	g_blob = NULL;
7156 	g_bserrno = -1;
7157 
7158 	return blob;
7159 }
7160 
7161 static void
7162 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
7163 {
7164 	spdk_blob_id blobid = spdk_blob_get_id(blob);
7165 
7166 	spdk_blob_close(blob, blob_op_complete, NULL);
7167 	poll_threads();
7168 	CU_ASSERT(g_bserrno == 0);
7169 	g_blob = NULL;
7170 
7171 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7172 	poll_threads();
7173 	CU_ASSERT(g_bserrno == 0);
7174 	g_bserrno = -1;
7175 }
7176 
7177 static void
7178 suite_blob_setup(void)
7179 {
7180 	suite_bs_setup();
7181 	CU_ASSERT(g_bs != NULL);
7182 
7183 	g_blob = ut_blob_create_and_open(g_bs, NULL);
7184 	CU_ASSERT(g_blob != NULL);
7185 }
7186 
7187 static void
7188 suite_blob_cleanup(void)
7189 {
7190 	ut_blob_close_and_delete(g_bs, g_blob);
7191 	CU_ASSERT(g_blob == NULL);
7192 
7193 	suite_bs_cleanup();
7194 	CU_ASSERT(g_bs == NULL);
7195 }
7196 
7197 int main(int argc, char **argv)
7198 {
7199 	CU_pSuite	suite, suite_bs, suite_blob;
7200 	unsigned int	num_failures;
7201 
7202 	CU_set_error_action(CUEA_ABORT);
7203 	CU_initialize_registry();
7204 
7205 	suite = CU_add_suite("blob", NULL, NULL);
7206 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
7207 			suite_bs_setup, suite_bs_cleanup);
7208 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
7209 			suite_blob_setup, suite_blob_cleanup);
7210 
7211 	CU_ADD_TEST(suite, blob_init);
7212 	CU_ADD_TEST(suite_bs, blob_open);
7213 	CU_ADD_TEST(suite_bs, blob_create);
7214 	CU_ADD_TEST(suite_bs, blob_create_loop);
7215 	CU_ADD_TEST(suite_bs, blob_create_fail);
7216 	CU_ADD_TEST(suite_bs, blob_create_internal);
7217 	CU_ADD_TEST(suite_bs, blob_create_zero_extent);
7218 	CU_ADD_TEST(suite, blob_thin_provision);
7219 	CU_ADD_TEST(suite_bs, blob_snapshot);
7220 	CU_ADD_TEST(suite_bs, blob_clone);
7221 	CU_ADD_TEST(suite_bs, blob_inflate);
7222 	CU_ADD_TEST(suite_bs, blob_delete);
7223 	CU_ADD_TEST(suite_bs, blob_resize_test);
7224 	CU_ADD_TEST(suite, blob_read_only);
7225 	CU_ADD_TEST(suite_bs, channel_ops);
7226 	CU_ADD_TEST(suite_bs, blob_super);
7227 	CU_ADD_TEST(suite_blob, blob_write);
7228 	CU_ADD_TEST(suite_blob, blob_read);
7229 	CU_ADD_TEST(suite_blob, blob_rw_verify);
7230 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
7231 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
7232 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
7233 	CU_ADD_TEST(suite_bs, blob_unmap);
7234 	CU_ADD_TEST(suite_bs, blob_iter);
7235 	CU_ADD_TEST(suite_blob, blob_xattr);
7236 	CU_ADD_TEST(suite_bs, blob_parse_md);
7237 	CU_ADD_TEST(suite, bs_load);
7238 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
7239 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
7240 	CU_ADD_TEST(suite_bs, bs_unload);
7241 	CU_ADD_TEST(suite, bs_cluster_sz);
7242 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
7243 	CU_ADD_TEST(suite, bs_resize_md);
7244 	CU_ADD_TEST(suite, bs_destroy);
7245 	CU_ADD_TEST(suite, bs_type);
7246 	CU_ADD_TEST(suite, bs_super_block);
7247 	CU_ADD_TEST(suite, bs_test_recover_cluster_count);
7248 	CU_ADD_TEST(suite, blob_serialize_test);
7249 	CU_ADD_TEST(suite_bs, blob_crc);
7250 	CU_ADD_TEST(suite, super_block_crc);
7251 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
7252 	CU_ADD_TEST(suite_bs, blob_flags);
7253 	CU_ADD_TEST(suite_bs, bs_version);
7254 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
7255 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
7256 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
7257 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
7258 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
7259 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
7260 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
7261 	CU_ADD_TEST(suite, bs_load_iter_test);
7262 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
7263 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
7264 	CU_ADD_TEST(suite, blob_relations);
7265 	CU_ADD_TEST(suite, blob_relations2);
7266 	CU_ADD_TEST(suite, blob_relations3);
7267 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
7268 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
7269 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
7270 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
7271 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
7272 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
7273 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
7274 	CU_ADD_TEST(suite, blob_io_unit);
7275 	CU_ADD_TEST(suite, blob_io_unit_compatibility);
7276 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
7277 	CU_ADD_TEST(suite_bs, blob_persist_test);
7278 	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
7279 
7280 	allocate_threads(2);
7281 	set_thread(0);
7282 
7283 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
7284 
7285 	CU_basic_set_mode(CU_BRM_VERBOSE);
7286 	g_use_extent_table = false;
7287 	CU_basic_run_tests();
7288 	num_failures = CU_get_number_of_failures();
7289 	g_use_extent_table = true;
7290 	CU_basic_run_tests();
7291 	num_failures += CU_get_number_of_failures();
7292 	CU_cleanup_registry();
7293 
7294 	free(g_dev_buffer);
7295 
7296 	free_threads();
7297 
7298 	return num_failures;
7299 }
7300