xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 3630473789c359155f05075bea018c32d24032b3)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "spdk/blob.h"
38 #include "spdk/string.h"
39 
40 #include "common/lib/ut_multithread.c"
41 #include "../bs_dev_common.c"
42 #include "blob/blobstore.c"
43 #include "blob/request.c"
44 #include "blob/zeroes.c"
45 #include "blob/blob_bs_dev.c"
46 
47 struct spdk_blob_store *g_bs;
48 spdk_blob_id g_blobid;
49 struct spdk_blob *g_blob, *g_blob2;
50 int g_bserrno, g_bserrno2;
51 struct spdk_xattr_names *g_names;
52 int g_done;
53 char *g_xattr_names[] = {"first", "second", "third"};
54 char *g_xattr_values[] = {"one", "two", "three"};
55 uint64_t g_ctx = 1729;
56 bool g_use_extent_table = false;
57 
58 struct spdk_bs_super_block_ver1 {
59 	uint8_t		signature[8];
60 	uint32_t        version;
61 	uint32_t        length;
62 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
63 	spdk_blob_id	super_blob;
64 
65 	uint32_t	cluster_size; /* In bytes */
66 
67 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
68 	uint32_t	used_page_mask_len; /* Count, in pages */
69 
70 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
71 	uint32_t	used_cluster_mask_len; /* Count, in pages */
72 
73 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
74 	uint32_t	md_len; /* Count, in pages */
75 
76 	uint8_t		reserved[4036];
77 	uint32_t	crc;
78 } __attribute__((packed));
79 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
80 
81 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
82 		struct spdk_blob_opts *blob_opts);
83 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
84 static void suite_blob_setup(void);
85 static void suite_blob_cleanup(void);
86 
87 static void
88 _get_xattr_value(void *arg, const char *name,
89 		 const void **value, size_t *value_len)
90 {
91 	uint64_t i;
92 
93 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
94 	SPDK_CU_ASSERT_FATAL(value != NULL);
95 	CU_ASSERT(arg == &g_ctx);
96 
97 	for (i = 0; i < sizeof(g_xattr_names); i++) {
98 		if (!strcmp(name, g_xattr_names[i])) {
99 			*value_len = strlen(g_xattr_values[i]);
100 			*value = g_xattr_values[i];
101 			break;
102 		}
103 	}
104 }
105 
106 static void
107 _get_xattr_value_null(void *arg, const char *name,
108 		      const void **value, size_t *value_len)
109 {
110 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
111 	SPDK_CU_ASSERT_FATAL(value != NULL);
112 	CU_ASSERT(arg == NULL);
113 
114 	*value_len = 0;
115 	*value = NULL;
116 }
117 
118 static int
119 _get_snapshots_count(struct spdk_blob_store *bs)
120 {
121 	struct spdk_blob_list *snapshot = NULL;
122 	int count = 0;
123 
124 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
125 		count += 1;
126 	}
127 
128 	return count;
129 }
130 
131 static void
132 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
133 {
134 	spdk_blob_opts_init(opts, sizeof(*opts));
135 	opts->use_extent_table = g_use_extent_table;
136 }
137 
138 static void
139 bs_op_complete(void *cb_arg, int bserrno)
140 {
141 	g_bserrno = bserrno;
142 }
143 
144 static void
145 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
146 			   int bserrno)
147 {
148 	g_bs = bs;
149 	g_bserrno = bserrno;
150 }
151 
152 static void
153 blob_op_complete(void *cb_arg, int bserrno)
154 {
155 	g_bserrno = bserrno;
156 }
157 
158 static void
159 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
160 {
161 	g_blobid = blobid;
162 	g_bserrno = bserrno;
163 }
164 
165 static void
166 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
167 {
168 	g_blob = blb;
169 	g_bserrno = bserrno;
170 }
171 
172 static void
173 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
174 {
175 	if (g_blob == NULL) {
176 		g_blob = blob;
177 		g_bserrno = bserrno;
178 	} else {
179 		g_blob2 = blob;
180 		g_bserrno2 = bserrno;
181 	}
182 }
183 
184 static void
185 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
186 {
187 	struct spdk_bs_dev *dev;
188 
189 	/* Unload the blob store */
190 	spdk_bs_unload(*bs, bs_op_complete, NULL);
191 	poll_threads();
192 	CU_ASSERT(g_bserrno == 0);
193 
194 	dev = init_dev();
195 	/* Load an existing blob store */
196 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
197 	poll_threads();
198 	CU_ASSERT(g_bserrno == 0);
199 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
200 	*bs = g_bs;
201 
202 	g_bserrno = -1;
203 }
204 
205 static void
206 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
207 {
208 	struct spdk_bs_dev *dev;
209 
210 	/* Dirty shutdown */
211 	bs_free(*bs);
212 
213 	dev = init_dev();
214 	/* Load an existing blob store */
215 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
216 	poll_threads();
217 	CU_ASSERT(g_bserrno == 0);
218 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
219 	*bs = g_bs;
220 
221 	g_bserrno = -1;
222 }
223 
224 static void
225 blob_init(void)
226 {
227 	struct spdk_blob_store *bs;
228 	struct spdk_bs_dev *dev;
229 
230 	dev = init_dev();
231 
232 	/* should fail for an unsupported blocklen */
233 	dev->blocklen = 500;
234 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
235 	poll_threads();
236 	CU_ASSERT(g_bserrno == -EINVAL);
237 
238 	dev = init_dev();
239 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
240 	poll_threads();
241 	CU_ASSERT(g_bserrno == 0);
242 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
243 	bs = g_bs;
244 
245 	spdk_bs_unload(bs, bs_op_complete, NULL);
246 	poll_threads();
247 	CU_ASSERT(g_bserrno == 0);
248 	g_bs = NULL;
249 }
250 
251 static void
252 blob_super(void)
253 {
254 	struct spdk_blob_store *bs = g_bs;
255 	spdk_blob_id blobid;
256 	struct spdk_blob_opts blob_opts;
257 
258 	/* Get the super blob without having set one */
259 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
260 	poll_threads();
261 	CU_ASSERT(g_bserrno == -ENOENT);
262 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
263 
264 	/* Create a blob */
265 	ut_spdk_blob_opts_init(&blob_opts);
266 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
267 	poll_threads();
268 	CU_ASSERT(g_bserrno == 0);
269 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
270 	blobid = g_blobid;
271 
272 	/* Set the blob as the super blob */
273 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
274 	poll_threads();
275 	CU_ASSERT(g_bserrno == 0);
276 
277 	/* Get the super blob */
278 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
279 	poll_threads();
280 	CU_ASSERT(g_bserrno == 0);
281 	CU_ASSERT(blobid == g_blobid);
282 }
283 
284 static void
285 blob_open(void)
286 {
287 	struct spdk_blob_store *bs = g_bs;
288 	struct spdk_blob *blob;
289 	struct spdk_blob_opts blob_opts;
290 	spdk_blob_id blobid, blobid2;
291 
292 	ut_spdk_blob_opts_init(&blob_opts);
293 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
294 	poll_threads();
295 	CU_ASSERT(g_bserrno == 0);
296 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
297 	blobid = g_blobid;
298 
299 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
300 	poll_threads();
301 	CU_ASSERT(g_bserrno == 0);
302 	CU_ASSERT(g_blob != NULL);
303 	blob = g_blob;
304 
305 	blobid2 = spdk_blob_get_id(blob);
306 	CU_ASSERT(blobid == blobid2);
307 
308 	/* Try to open file again.  It should return success. */
309 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
310 	poll_threads();
311 	CU_ASSERT(g_bserrno == 0);
312 	CU_ASSERT(blob == g_blob);
313 
314 	spdk_blob_close(blob, blob_op_complete, NULL);
315 	poll_threads();
316 	CU_ASSERT(g_bserrno == 0);
317 
318 	/*
319 	 * Close the file a second time, releasing the second reference.  This
320 	 *  should succeed.
321 	 */
322 	blob = g_blob;
323 	spdk_blob_close(blob, blob_op_complete, NULL);
324 	poll_threads();
325 	CU_ASSERT(g_bserrno == 0);
326 
327 	/*
328 	 * Try to open file again.  It should succeed.  This tests the case
329 	 *  where the file is opened, closed, then re-opened again.
330 	 */
331 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
332 	poll_threads();
333 	CU_ASSERT(g_bserrno == 0);
334 	CU_ASSERT(g_blob != NULL);
335 	blob = g_blob;
336 	spdk_blob_close(blob, blob_op_complete, NULL);
337 	poll_threads();
338 	CU_ASSERT(g_bserrno == 0);
339 
340 	/* Try to open file twice in succession.  This should return the same
341 	 * blob object.
342 	 */
343 	g_blob = NULL;
344 	g_blob2 = NULL;
345 	g_bserrno = -1;
346 	g_bserrno2 = -1;
347 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
348 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
349 	poll_threads();
350 	CU_ASSERT(g_bserrno == 0);
351 	CU_ASSERT(g_bserrno2 == 0);
352 	CU_ASSERT(g_blob != NULL);
353 	CU_ASSERT(g_blob2 != NULL);
354 	CU_ASSERT(g_blob == g_blob2);
355 
356 	g_bserrno = -1;
357 	spdk_blob_close(g_blob, blob_op_complete, NULL);
358 	poll_threads();
359 	CU_ASSERT(g_bserrno == 0);
360 
361 	ut_blob_close_and_delete(bs, g_blob);
362 }
363 
364 static void
365 blob_create(void)
366 {
367 	struct spdk_blob_store *bs = g_bs;
368 	struct spdk_blob *blob;
369 	struct spdk_blob_opts opts;
370 	spdk_blob_id blobid;
371 
372 	/* Create blob with 10 clusters */
373 
374 	ut_spdk_blob_opts_init(&opts);
375 	opts.num_clusters = 10;
376 
377 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
381 	blobid = g_blobid;
382 
383 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
384 	poll_threads();
385 	CU_ASSERT(g_bserrno == 0);
386 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
387 	blob = g_blob;
388 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
389 
390 	spdk_blob_close(blob, blob_op_complete, NULL);
391 	poll_threads();
392 	CU_ASSERT(g_bserrno == 0);
393 
394 	/* Create blob with 0 clusters */
395 
396 	ut_spdk_blob_opts_init(&opts);
397 	opts.num_clusters = 0;
398 
399 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
400 	poll_threads();
401 	CU_ASSERT(g_bserrno == 0);
402 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
403 	blobid = g_blobid;
404 
405 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
406 	poll_threads();
407 	CU_ASSERT(g_bserrno == 0);
408 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
409 	blob = g_blob;
410 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
411 
412 	spdk_blob_close(blob, blob_op_complete, NULL);
413 	poll_threads();
414 	CU_ASSERT(g_bserrno == 0);
415 
416 	/* Create blob with default options (opts == NULL) */
417 
418 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
419 	poll_threads();
420 	CU_ASSERT(g_bserrno == 0);
421 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
422 	blobid = g_blobid;
423 
424 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
425 	poll_threads();
426 	CU_ASSERT(g_bserrno == 0);
427 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
428 	blob = g_blob;
429 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
430 
431 	spdk_blob_close(blob, blob_op_complete, NULL);
432 	poll_threads();
433 	CU_ASSERT(g_bserrno == 0);
434 
435 	/* Try to create blob with size larger than blobstore */
436 
437 	ut_spdk_blob_opts_init(&opts);
438 	opts.num_clusters = bs->total_clusters + 1;
439 
440 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
441 	poll_threads();
442 	CU_ASSERT(g_bserrno == -ENOSPC);
443 }
444 
445 /*
446  * Create and delete one blob in a loop over and over again.  This helps ensure
447  * that the internal bit masks tracking used clusters and md_pages are being
448  * tracked correctly.
449  */
450 static void
451 blob_create_loop(void)
452 {
453 	struct spdk_blob_store *bs = g_bs;
454 	struct spdk_blob_opts opts;
455 	uint32_t i, loop_count;
456 
457 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
458 				  spdk_bit_pool_capacity(bs->used_clusters));
459 
460 	for (i = 0; i < loop_count; i++) {
461 		ut_spdk_blob_opts_init(&opts);
462 		opts.num_clusters = 1;
463 		g_bserrno = -1;
464 		g_blobid = SPDK_BLOBID_INVALID;
465 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
466 		poll_threads();
467 		CU_ASSERT(g_bserrno == 0);
468 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
469 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
470 		poll_threads();
471 		CU_ASSERT(g_bserrno == 0);
472 	}
473 }
474 
475 static void
476 blob_create_fail(void)
477 {
478 	struct spdk_blob_store *bs = g_bs;
479 	struct spdk_blob_opts opts;
480 	spdk_blob_id blobid;
481 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
482 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
483 
484 	/* NULL callback */
485 	ut_spdk_blob_opts_init(&opts);
486 	opts.xattrs.names = g_xattr_names;
487 	opts.xattrs.get_value = NULL;
488 	opts.xattrs.count = 1;
489 	opts.xattrs.ctx = &g_ctx;
490 
491 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
492 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
493 	poll_threads();
494 	CU_ASSERT(g_bserrno == -EINVAL);
495 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
496 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
497 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
498 
499 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
500 	poll_threads();
501 	CU_ASSERT(g_bserrno == -ENOENT);
502 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
503 
504 	ut_bs_reload(&bs, NULL);
505 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
506 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
507 
508 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
509 	poll_threads();
510 	CU_ASSERT(g_blob == NULL);
511 	CU_ASSERT(g_bserrno == -ENOENT);
512 }
513 
514 static void
515 blob_create_internal(void)
516 {
517 	struct spdk_blob_store *bs = g_bs;
518 	struct spdk_blob *blob;
519 	struct spdk_blob_opts opts;
520 	struct spdk_blob_xattr_opts internal_xattrs;
521 	const void *value;
522 	size_t value_len;
523 	spdk_blob_id blobid;
524 	int rc;
525 
526 	/* Create blob with custom xattrs */
527 
528 	ut_spdk_blob_opts_init(&opts);
529 	blob_xattrs_init(&internal_xattrs);
530 	internal_xattrs.count = 3;
531 	internal_xattrs.names = g_xattr_names;
532 	internal_xattrs.get_value = _get_xattr_value;
533 	internal_xattrs.ctx = &g_ctx;
534 
535 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
536 	poll_threads();
537 	CU_ASSERT(g_bserrno == 0);
538 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
539 	blobid = g_blobid;
540 
541 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
542 	poll_threads();
543 	CU_ASSERT(g_bserrno == 0);
544 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
545 	blob = g_blob;
546 
547 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
548 	CU_ASSERT(rc == 0);
549 	SPDK_CU_ASSERT_FATAL(value != NULL);
550 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
551 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
552 
553 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
554 	CU_ASSERT(rc == 0);
555 	SPDK_CU_ASSERT_FATAL(value != NULL);
556 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
557 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
558 
559 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
560 	CU_ASSERT(rc == 0);
561 	SPDK_CU_ASSERT_FATAL(value != NULL);
562 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
563 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
564 
565 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
566 	CU_ASSERT(rc != 0);
567 
568 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
569 	CU_ASSERT(rc != 0);
570 
571 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
572 	CU_ASSERT(rc != 0);
573 
574 	spdk_blob_close(blob, blob_op_complete, NULL);
575 	poll_threads();
576 	CU_ASSERT(g_bserrno == 0);
577 
578 	/* Create blob with NULL internal options  */
579 
580 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
581 	poll_threads();
582 	CU_ASSERT(g_bserrno == 0);
583 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
584 	blobid = g_blobid;
585 
586 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
587 	poll_threads();
588 	CU_ASSERT(g_bserrno == 0);
589 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
590 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
591 
592 	blob = g_blob;
593 
594 	spdk_blob_close(blob, blob_op_complete, NULL);
595 	poll_threads();
596 	CU_ASSERT(g_bserrno == 0);
597 }
598 
599 static void
600 blob_thin_provision(void)
601 {
602 	struct spdk_blob_store *bs;
603 	struct spdk_bs_dev *dev;
604 	struct spdk_blob *blob;
605 	struct spdk_blob_opts opts;
606 	struct spdk_bs_opts bs_opts;
607 	spdk_blob_id blobid;
608 
609 	dev = init_dev();
610 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
611 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
612 
613 	/* Initialize a new blob store */
614 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
615 	poll_threads();
616 	CU_ASSERT(g_bserrno == 0);
617 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
618 
619 	bs = g_bs;
620 
621 	/* Create blob with thin provisioning enabled */
622 
623 	ut_spdk_blob_opts_init(&opts);
624 	opts.thin_provision = true;
625 	opts.num_clusters = 10;
626 
627 	blob = ut_blob_create_and_open(bs, &opts);
628 	blobid = spdk_blob_get_id(blob);
629 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
630 
631 	spdk_blob_close(blob, blob_op_complete, NULL);
632 	CU_ASSERT(g_bserrno == 0);
633 
634 	/* Do not shut down cleanly.  This makes sure that when we load again
635 	 *  and try to recover a valid used_cluster map, that blobstore will
636 	 *  ignore clusters with index 0 since these are unallocated clusters.
637 	 */
638 	ut_bs_dirty_load(&bs, &bs_opts);
639 
640 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
641 	poll_threads();
642 	CU_ASSERT(g_bserrno == 0);
643 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
644 	blob = g_blob;
645 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
646 
647 	ut_blob_close_and_delete(bs, blob);
648 
649 	spdk_bs_unload(bs, bs_op_complete, NULL);
650 	poll_threads();
651 	CU_ASSERT(g_bserrno == 0);
652 	g_bs = NULL;
653 }
654 
655 static void
656 blob_snapshot(void)
657 {
658 	struct spdk_blob_store *bs = g_bs;
659 	struct spdk_blob *blob;
660 	struct spdk_blob *snapshot, *snapshot2;
661 	struct spdk_blob_bs_dev *blob_bs_dev;
662 	struct spdk_blob_opts opts;
663 	struct spdk_blob_xattr_opts xattrs;
664 	spdk_blob_id blobid;
665 	spdk_blob_id snapshotid;
666 	spdk_blob_id snapshotid2;
667 	const void *value;
668 	size_t value_len;
669 	int rc;
670 	spdk_blob_id ids[2];
671 	size_t count;
672 
673 	/* Create blob with 10 clusters */
674 	ut_spdk_blob_opts_init(&opts);
675 	opts.num_clusters = 10;
676 
677 	blob = ut_blob_create_and_open(bs, &opts);
678 	blobid = spdk_blob_get_id(blob);
679 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
680 
681 	/* Create snapshot from blob */
682 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
683 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
684 	poll_threads();
685 	CU_ASSERT(g_bserrno == 0);
686 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
687 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
688 	snapshotid = g_blobid;
689 
690 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
691 	poll_threads();
692 	CU_ASSERT(g_bserrno == 0);
693 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
694 	snapshot = g_blob;
695 	CU_ASSERT(snapshot->data_ro == true);
696 	CU_ASSERT(snapshot->md_ro == true);
697 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
698 
699 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
700 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
701 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
702 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
703 
704 	/* Try to create snapshot from clone with xattrs */
705 	xattrs.names = g_xattr_names;
706 	xattrs.get_value = _get_xattr_value;
707 	xattrs.count = 3;
708 	xattrs.ctx = &g_ctx;
709 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
710 	poll_threads();
711 	CU_ASSERT(g_bserrno == 0);
712 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
713 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
714 	snapshotid2 = g_blobid;
715 
716 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
717 	CU_ASSERT(g_bserrno == 0);
718 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
719 	snapshot2 = g_blob;
720 	CU_ASSERT(snapshot2->data_ro == true);
721 	CU_ASSERT(snapshot2->md_ro == true);
722 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
723 
724 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
725 	CU_ASSERT(snapshot->back_bs_dev == NULL);
726 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
727 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
728 
729 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
730 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
731 
732 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
733 	CU_ASSERT(blob_bs_dev->blob == snapshot);
734 
735 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
736 	CU_ASSERT(rc == 0);
737 	SPDK_CU_ASSERT_FATAL(value != NULL);
738 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
739 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
740 
741 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
742 	CU_ASSERT(rc == 0);
743 	SPDK_CU_ASSERT_FATAL(value != NULL);
744 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
745 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
746 
747 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
748 	CU_ASSERT(rc == 0);
749 	SPDK_CU_ASSERT_FATAL(value != NULL);
750 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
751 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
752 
753 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
754 	count = 2;
755 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
756 	CU_ASSERT(count == 1);
757 	CU_ASSERT(ids[0] == blobid);
758 
759 	count = 2;
760 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
761 	CU_ASSERT(count == 1);
762 	CU_ASSERT(ids[0] == snapshotid2);
763 
764 	/* Try to create snapshot from snapshot */
765 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
766 	poll_threads();
767 	CU_ASSERT(g_bserrno == -EINVAL);
768 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
769 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
770 
771 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
772 	ut_blob_close_and_delete(bs, blob);
773 	count = 2;
774 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
775 	CU_ASSERT(count == 0);
776 
777 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
778 	ut_blob_close_and_delete(bs, snapshot2);
779 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
780 	count = 2;
781 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
782 	CU_ASSERT(count == 0);
783 
784 	ut_blob_close_and_delete(bs, snapshot);
785 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
786 }
787 
788 static void
789 blob_snapshot_freeze_io(void)
790 {
791 	struct spdk_io_channel *channel;
792 	struct spdk_bs_channel *bs_channel;
793 	struct spdk_blob_store *bs = g_bs;
794 	struct spdk_blob *blob;
795 	struct spdk_blob_opts opts;
796 	spdk_blob_id blobid;
797 	uint32_t num_of_pages = 10;
798 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
799 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
800 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
801 
802 	memset(payload_write, 0xE5, sizeof(payload_write));
803 	memset(payload_read, 0x00, sizeof(payload_read));
804 	memset(payload_zero, 0x00, sizeof(payload_zero));
805 
806 	/* Test freeze I/O during snapshot */
807 	channel = spdk_bs_alloc_io_channel(bs);
808 	bs_channel = spdk_io_channel_get_ctx(channel);
809 
810 	/* Create blob with 10 clusters */
811 	ut_spdk_blob_opts_init(&opts);
812 	opts.num_clusters = 10;
813 	opts.thin_provision = false;
814 
815 	blob = ut_blob_create_and_open(bs, &opts);
816 	blobid = spdk_blob_get_id(blob);
817 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
818 
819 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
820 
821 	/* This is implementation specific.
822 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
823 	 * Four async I/O operations happen before that. */
824 	poll_thread_times(0, 5);
825 
826 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
827 
828 	/* Blob I/O should be frozen here */
829 	CU_ASSERT(blob->frozen_refcnt == 1);
830 
831 	/* Write to the blob */
832 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
833 
834 	/* Verify that I/O is queued */
835 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
836 	/* Verify that payload is not written to disk, at this point the blobs already switched */
837 	CU_ASSERT(blob->active.clusters[0] == 0);
838 
839 	/* Finish all operations including spdk_bs_create_snapshot */
840 	poll_threads();
841 
842 	/* Verify snapshot */
843 	CU_ASSERT(g_bserrno == 0);
844 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
845 
846 	/* Verify that blob has unset frozen_io */
847 	CU_ASSERT(blob->frozen_refcnt == 0);
848 
849 	/* Verify that postponed I/O completed successfully by comparing payload */
850 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
851 	poll_threads();
852 	CU_ASSERT(g_bserrno == 0);
853 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
854 
855 	spdk_bs_free_io_channel(channel);
856 	poll_threads();
857 
858 	ut_blob_close_and_delete(bs, blob);
859 }
860 
861 static void
862 blob_clone(void)
863 {
864 	struct spdk_blob_store *bs = g_bs;
865 	struct spdk_blob_opts opts;
866 	struct spdk_blob *blob, *snapshot, *clone;
867 	spdk_blob_id blobid, cloneid, snapshotid;
868 	struct spdk_blob_xattr_opts xattrs;
869 	const void *value;
870 	size_t value_len;
871 	int rc;
872 
873 	/* Create blob with 10 clusters */
874 
875 	ut_spdk_blob_opts_init(&opts);
876 	opts.num_clusters = 10;
877 
878 	blob = ut_blob_create_and_open(bs, &opts);
879 	blobid = spdk_blob_get_id(blob);
880 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
881 
882 	/* Create snapshot */
883 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
884 	poll_threads();
885 	CU_ASSERT(g_bserrno == 0);
886 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
887 	snapshotid = g_blobid;
888 
889 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
890 	poll_threads();
891 	CU_ASSERT(g_bserrno == 0);
892 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
893 	snapshot = g_blob;
894 	CU_ASSERT(snapshot->data_ro == true);
895 	CU_ASSERT(snapshot->md_ro == true);
896 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
897 
898 	spdk_blob_close(snapshot, blob_op_complete, NULL);
899 	poll_threads();
900 	CU_ASSERT(g_bserrno == 0);
901 
902 	/* Create clone from snapshot with xattrs */
903 	xattrs.names = g_xattr_names;
904 	xattrs.get_value = _get_xattr_value;
905 	xattrs.count = 3;
906 	xattrs.ctx = &g_ctx;
907 
908 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
909 	poll_threads();
910 	CU_ASSERT(g_bserrno == 0);
911 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
912 	cloneid = g_blobid;
913 
914 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
915 	poll_threads();
916 	CU_ASSERT(g_bserrno == 0);
917 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
918 	clone = g_blob;
919 	CU_ASSERT(clone->data_ro == false);
920 	CU_ASSERT(clone->md_ro == false);
921 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
922 
923 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
924 	CU_ASSERT(rc == 0);
925 	SPDK_CU_ASSERT_FATAL(value != NULL);
926 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
927 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
928 
929 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
930 	CU_ASSERT(rc == 0);
931 	SPDK_CU_ASSERT_FATAL(value != NULL);
932 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
933 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
934 
935 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
936 	CU_ASSERT(rc == 0);
937 	SPDK_CU_ASSERT_FATAL(value != NULL);
938 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
939 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
940 
941 
942 	spdk_blob_close(clone, blob_op_complete, NULL);
943 	poll_threads();
944 	CU_ASSERT(g_bserrno == 0);
945 
946 	/* Try to create clone from not read only blob */
947 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
948 	poll_threads();
949 	CU_ASSERT(g_bserrno == -EINVAL);
950 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
951 
952 	/* Mark blob as read only */
953 	spdk_blob_set_read_only(blob);
954 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
955 	poll_threads();
956 	CU_ASSERT(g_bserrno == 0);
957 
958 	/* Create clone from read only blob */
959 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
960 	poll_threads();
961 	CU_ASSERT(g_bserrno == 0);
962 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
963 	cloneid = g_blobid;
964 
965 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
966 	poll_threads();
967 	CU_ASSERT(g_bserrno == 0);
968 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
969 	clone = g_blob;
970 	CU_ASSERT(clone->data_ro == false);
971 	CU_ASSERT(clone->md_ro == false);
972 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
973 
974 	ut_blob_close_and_delete(bs, clone);
975 	ut_blob_close_and_delete(bs, blob);
976 }
977 
978 static void
979 _blob_inflate(bool decouple_parent)
980 {
981 	struct spdk_blob_store *bs = g_bs;
982 	struct spdk_blob_opts opts;
983 	struct spdk_blob *blob, *snapshot;
984 	spdk_blob_id blobid, snapshotid;
985 	struct spdk_io_channel *channel;
986 	uint64_t free_clusters;
987 
988 	channel = spdk_bs_alloc_io_channel(bs);
989 	SPDK_CU_ASSERT_FATAL(channel != NULL);
990 
991 	/* Create blob with 10 clusters */
992 
993 	ut_spdk_blob_opts_init(&opts);
994 	opts.num_clusters = 10;
995 	opts.thin_provision = true;
996 
997 	blob = ut_blob_create_and_open(bs, &opts);
998 	blobid = spdk_blob_get_id(blob);
999 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1000 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1001 
1002 	/* 1) Blob with no parent */
1003 	if (decouple_parent) {
1004 		/* Decouple parent of blob with no parent (should fail) */
1005 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1006 		poll_threads();
1007 		CU_ASSERT(g_bserrno != 0);
1008 	} else {
1009 		/* Inflate of thin blob with no parent should made it thick */
1010 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1011 		poll_threads();
1012 		CU_ASSERT(g_bserrno == 0);
1013 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1014 	}
1015 
1016 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1017 	poll_threads();
1018 	CU_ASSERT(g_bserrno == 0);
1019 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1020 	snapshotid = g_blobid;
1021 
1022 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1023 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1024 
1025 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1026 	poll_threads();
1027 	CU_ASSERT(g_bserrno == 0);
1028 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1029 	snapshot = g_blob;
1030 	CU_ASSERT(snapshot->data_ro == true);
1031 	CU_ASSERT(snapshot->md_ro == true);
1032 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1033 
1034 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1035 	poll_threads();
1036 	CU_ASSERT(g_bserrno == 0);
1037 
1038 	free_clusters = spdk_bs_free_cluster_count(bs);
1039 
1040 	/* 2) Blob with parent */
1041 	if (!decouple_parent) {
1042 		/* Do full blob inflation */
1043 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1044 		poll_threads();
1045 		CU_ASSERT(g_bserrno == 0);
1046 		/* all 10 clusters should be allocated */
1047 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1048 	} else {
1049 		/* Decouple parent of blob */
1050 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1051 		poll_threads();
1052 		CU_ASSERT(g_bserrno == 0);
1053 		/* when only parent is removed, none of the clusters should be allocated */
1054 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1055 	}
1056 
1057 	/* Now, it should be possible to delete snapshot */
1058 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1059 	poll_threads();
1060 	CU_ASSERT(g_bserrno == 0);
1061 
1062 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1063 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1064 
1065 	spdk_bs_free_io_channel(channel);
1066 	poll_threads();
1067 
1068 	ut_blob_close_and_delete(bs, blob);
1069 }
1070 
1071 static void
1072 blob_inflate(void)
1073 {
1074 	_blob_inflate(false);
1075 	_blob_inflate(true);
1076 }
1077 
1078 static void
1079 blob_delete(void)
1080 {
1081 	struct spdk_blob_store *bs = g_bs;
1082 	struct spdk_blob_opts blob_opts;
1083 	spdk_blob_id blobid;
1084 
1085 	/* Create a blob and then delete it. */
1086 	ut_spdk_blob_opts_init(&blob_opts);
1087 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1088 	poll_threads();
1089 	CU_ASSERT(g_bserrno == 0);
1090 	CU_ASSERT(g_blobid > 0);
1091 	blobid = g_blobid;
1092 
1093 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1094 	poll_threads();
1095 	CU_ASSERT(g_bserrno == 0);
1096 
1097 	/* Try to open the blob */
1098 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1099 	poll_threads();
1100 	CU_ASSERT(g_bserrno == -ENOENT);
1101 }
1102 
1103 static void
1104 blob_resize_test(void)
1105 {
1106 	struct spdk_blob_store *bs = g_bs;
1107 	struct spdk_blob *blob;
1108 	uint64_t free_clusters;
1109 
1110 	free_clusters = spdk_bs_free_cluster_count(bs);
1111 
1112 	blob = ut_blob_create_and_open(bs, NULL);
1113 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1114 
1115 	/* Confirm that resize fails if blob is marked read-only. */
1116 	blob->md_ro = true;
1117 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1118 	poll_threads();
1119 	CU_ASSERT(g_bserrno == -EPERM);
1120 	blob->md_ro = false;
1121 
1122 	/* The blob started at 0 clusters. Resize it to be 5. */
1123 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1124 	poll_threads();
1125 	CU_ASSERT(g_bserrno == 0);
1126 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1127 
1128 	/* Shrink the blob to 3 clusters. This will not actually release
1129 	 * the old clusters until the blob is synced.
1130 	 */
1131 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1132 	poll_threads();
1133 	CU_ASSERT(g_bserrno == 0);
1134 	/* Verify there are still 5 clusters in use */
1135 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1136 
1137 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1138 	poll_threads();
1139 	CU_ASSERT(g_bserrno == 0);
1140 	/* Now there are only 3 clusters in use */
1141 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1142 
1143 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1144 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1145 	poll_threads();
1146 	CU_ASSERT(g_bserrno == 0);
1147 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1148 
1149 	/* Try to resize the blob to size larger than blobstore. */
1150 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1151 	poll_threads();
1152 	CU_ASSERT(g_bserrno == -ENOSPC);
1153 
1154 	ut_blob_close_and_delete(bs, blob);
1155 }
1156 
1157 static void
1158 blob_read_only(void)
1159 {
1160 	struct spdk_blob_store *bs;
1161 	struct spdk_bs_dev *dev;
1162 	struct spdk_blob *blob;
1163 	struct spdk_bs_opts opts;
1164 	spdk_blob_id blobid;
1165 	int rc;
1166 
1167 	dev = init_dev();
1168 	spdk_bs_opts_init(&opts, sizeof(opts));
1169 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1170 
1171 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1172 	poll_threads();
1173 	CU_ASSERT(g_bserrno == 0);
1174 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1175 	bs = g_bs;
1176 
1177 	blob = ut_blob_create_and_open(bs, NULL);
1178 	blobid = spdk_blob_get_id(blob);
1179 
1180 	rc = spdk_blob_set_read_only(blob);
1181 	CU_ASSERT(rc == 0);
1182 
1183 	CU_ASSERT(blob->data_ro == false);
1184 	CU_ASSERT(blob->md_ro == false);
1185 
1186 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1187 	poll_threads();
1188 
1189 	CU_ASSERT(blob->data_ro == true);
1190 	CU_ASSERT(blob->md_ro == true);
1191 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1192 
1193 	spdk_blob_close(blob, blob_op_complete, NULL);
1194 	poll_threads();
1195 	CU_ASSERT(g_bserrno == 0);
1196 
1197 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1198 	poll_threads();
1199 	CU_ASSERT(g_bserrno == 0);
1200 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1201 	blob = g_blob;
1202 
1203 	CU_ASSERT(blob->data_ro == true);
1204 	CU_ASSERT(blob->md_ro == true);
1205 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1206 
1207 	spdk_blob_close(blob, blob_op_complete, NULL);
1208 	poll_threads();
1209 	CU_ASSERT(g_bserrno == 0);
1210 
1211 	ut_bs_reload(&bs, &opts);
1212 
1213 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1214 	poll_threads();
1215 	CU_ASSERT(g_bserrno == 0);
1216 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1217 	blob = g_blob;
1218 
1219 	CU_ASSERT(blob->data_ro == true);
1220 	CU_ASSERT(blob->md_ro == true);
1221 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1222 
1223 	ut_blob_close_and_delete(bs, blob);
1224 
1225 	spdk_bs_unload(bs, bs_op_complete, NULL);
1226 	poll_threads();
1227 	CU_ASSERT(g_bserrno == 0);
1228 }
1229 
1230 static void
1231 channel_ops(void)
1232 {
1233 	struct spdk_blob_store *bs = g_bs;
1234 	struct spdk_io_channel *channel;
1235 
1236 	channel = spdk_bs_alloc_io_channel(bs);
1237 	CU_ASSERT(channel != NULL);
1238 
1239 	spdk_bs_free_io_channel(channel);
1240 	poll_threads();
1241 }
1242 
1243 static void
1244 blob_write(void)
1245 {
1246 	struct spdk_blob_store *bs = g_bs;
1247 	struct spdk_blob *blob = g_blob;
1248 	struct spdk_io_channel *channel;
1249 	uint64_t pages_per_cluster;
1250 	uint8_t payload[10 * 4096];
1251 
1252 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1253 
1254 	channel = spdk_bs_alloc_io_channel(bs);
1255 	CU_ASSERT(channel != NULL);
1256 
1257 	/* Write to a blob with 0 size */
1258 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1259 	poll_threads();
1260 	CU_ASSERT(g_bserrno == -EINVAL);
1261 
1262 	/* Resize the blob */
1263 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1264 	poll_threads();
1265 	CU_ASSERT(g_bserrno == 0);
1266 
1267 	/* Confirm that write fails if blob is marked read-only. */
1268 	blob->data_ro = true;
1269 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1270 	poll_threads();
1271 	CU_ASSERT(g_bserrno == -EPERM);
1272 	blob->data_ro = false;
1273 
1274 	/* Write to the blob */
1275 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1276 	poll_threads();
1277 	CU_ASSERT(g_bserrno == 0);
1278 
1279 	/* Write starting beyond the end */
1280 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1281 			   NULL);
1282 	poll_threads();
1283 	CU_ASSERT(g_bserrno == -EINVAL);
1284 
1285 	/* Write starting at a valid location but going off the end */
1286 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1287 			   blob_op_complete, NULL);
1288 	poll_threads();
1289 	CU_ASSERT(g_bserrno == -EINVAL);
1290 
1291 	spdk_bs_free_io_channel(channel);
1292 	poll_threads();
1293 }
1294 
1295 static void
1296 blob_read(void)
1297 {
1298 	struct spdk_blob_store *bs = g_bs;
1299 	struct spdk_blob *blob = g_blob;
1300 	struct spdk_io_channel *channel;
1301 	uint64_t pages_per_cluster;
1302 	uint8_t payload[10 * 4096];
1303 
1304 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1305 
1306 	channel = spdk_bs_alloc_io_channel(bs);
1307 	CU_ASSERT(channel != NULL);
1308 
1309 	/* Read from a blob with 0 size */
1310 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1311 	poll_threads();
1312 	CU_ASSERT(g_bserrno == -EINVAL);
1313 
1314 	/* Resize the blob */
1315 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1316 	poll_threads();
1317 	CU_ASSERT(g_bserrno == 0);
1318 
1319 	/* Confirm that read passes if blob is marked read-only. */
1320 	blob->data_ro = true;
1321 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1322 	poll_threads();
1323 	CU_ASSERT(g_bserrno == 0);
1324 	blob->data_ro = false;
1325 
1326 	/* Read from the blob */
1327 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1328 	poll_threads();
1329 	CU_ASSERT(g_bserrno == 0);
1330 
1331 	/* Read starting beyond the end */
1332 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1333 			  NULL);
1334 	poll_threads();
1335 	CU_ASSERT(g_bserrno == -EINVAL);
1336 
1337 	/* Read starting at a valid location but going off the end */
1338 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1339 			  blob_op_complete, NULL);
1340 	poll_threads();
1341 	CU_ASSERT(g_bserrno == -EINVAL);
1342 
1343 	spdk_bs_free_io_channel(channel);
1344 	poll_threads();
1345 }
1346 
1347 static void
1348 blob_rw_verify(void)
1349 {
1350 	struct spdk_blob_store *bs = g_bs;
1351 	struct spdk_blob *blob = g_blob;
1352 	struct spdk_io_channel *channel;
1353 	uint8_t payload_read[10 * 4096];
1354 	uint8_t payload_write[10 * 4096];
1355 
1356 	channel = spdk_bs_alloc_io_channel(bs);
1357 	CU_ASSERT(channel != NULL);
1358 
1359 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1360 	poll_threads();
1361 	CU_ASSERT(g_bserrno == 0);
1362 
1363 	memset(payload_write, 0xE5, sizeof(payload_write));
1364 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1365 	poll_threads();
1366 	CU_ASSERT(g_bserrno == 0);
1367 
1368 	memset(payload_read, 0x00, sizeof(payload_read));
1369 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1370 	poll_threads();
1371 	CU_ASSERT(g_bserrno == 0);
1372 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1373 
1374 	spdk_bs_free_io_channel(channel);
1375 	poll_threads();
1376 }
1377 
1378 static void
1379 blob_rw_verify_iov(void)
1380 {
1381 	struct spdk_blob_store *bs = g_bs;
1382 	struct spdk_blob *blob;
1383 	struct spdk_io_channel *channel;
1384 	uint8_t payload_read[10 * 4096];
1385 	uint8_t payload_write[10 * 4096];
1386 	struct iovec iov_read[3];
1387 	struct iovec iov_write[3];
1388 	void *buf;
1389 
1390 	channel = spdk_bs_alloc_io_channel(bs);
1391 	CU_ASSERT(channel != NULL);
1392 
1393 	blob = ut_blob_create_and_open(bs, NULL);
1394 
1395 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1396 	poll_threads();
1397 	CU_ASSERT(g_bserrno == 0);
1398 
1399 	/*
1400 	 * Manually adjust the offset of the blob's second cluster.  This allows
1401 	 *  us to make sure that the readv/write code correctly accounts for I/O
1402 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1403 	 *  clusters are where we expect before modifying the second cluster.
1404 	 */
1405 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1406 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1407 	blob->active.clusters[1] = 3 * 256;
1408 
1409 	memset(payload_write, 0xE5, sizeof(payload_write));
1410 	iov_write[0].iov_base = payload_write;
1411 	iov_write[0].iov_len = 1 * 4096;
1412 	iov_write[1].iov_base = payload_write + 1 * 4096;
1413 	iov_write[1].iov_len = 5 * 4096;
1414 	iov_write[2].iov_base = payload_write + 6 * 4096;
1415 	iov_write[2].iov_len = 4 * 4096;
1416 	/*
1417 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1418 	 *  will get written to the first cluster, the last 4 to the second cluster.
1419 	 */
1420 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1421 	poll_threads();
1422 	CU_ASSERT(g_bserrno == 0);
1423 
1424 	memset(payload_read, 0xAA, sizeof(payload_read));
1425 	iov_read[0].iov_base = payload_read;
1426 	iov_read[0].iov_len = 3 * 4096;
1427 	iov_read[1].iov_base = payload_read + 3 * 4096;
1428 	iov_read[1].iov_len = 4 * 4096;
1429 	iov_read[2].iov_base = payload_read + 7 * 4096;
1430 	iov_read[2].iov_len = 3 * 4096;
1431 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1432 	poll_threads();
1433 	CU_ASSERT(g_bserrno == 0);
1434 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1435 
1436 	buf = calloc(1, 256 * 4096);
1437 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1438 	/* Check that cluster 2 on "disk" was not modified. */
1439 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1440 	free(buf);
1441 
1442 	spdk_blob_close(blob, blob_op_complete, NULL);
1443 	poll_threads();
1444 	CU_ASSERT(g_bserrno == 0);
1445 
1446 	spdk_bs_free_io_channel(channel);
1447 	poll_threads();
1448 }
1449 
1450 static uint32_t
1451 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1452 {
1453 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1454 	struct spdk_bs_request_set *set;
1455 	uint32_t count = 0;
1456 
1457 	TAILQ_FOREACH(set, &channel->reqs, link) {
1458 		count++;
1459 	}
1460 
1461 	return count;
1462 }
1463 
1464 static void
1465 blob_rw_verify_iov_nomem(void)
1466 {
1467 	struct spdk_blob_store *bs = g_bs;
1468 	struct spdk_blob *blob = g_blob;
1469 	struct spdk_io_channel *channel;
1470 	uint8_t payload_write[10 * 4096];
1471 	struct iovec iov_write[3];
1472 	uint32_t req_count;
1473 
1474 	channel = spdk_bs_alloc_io_channel(bs);
1475 	CU_ASSERT(channel != NULL);
1476 
1477 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1478 	poll_threads();
1479 	CU_ASSERT(g_bserrno == 0);
1480 
1481 	/*
1482 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1483 	 *  will get written to the first cluster, the last 4 to the second cluster.
1484 	 */
1485 	iov_write[0].iov_base = payload_write;
1486 	iov_write[0].iov_len = 1 * 4096;
1487 	iov_write[1].iov_base = payload_write + 1 * 4096;
1488 	iov_write[1].iov_len = 5 * 4096;
1489 	iov_write[2].iov_base = payload_write + 6 * 4096;
1490 	iov_write[2].iov_len = 4 * 4096;
1491 	MOCK_SET(calloc, NULL);
1492 	req_count = bs_channel_get_req_count(channel);
1493 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1494 	poll_threads();
1495 	CU_ASSERT(g_bserrno = -ENOMEM);
1496 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1497 	MOCK_CLEAR(calloc);
1498 
1499 	spdk_bs_free_io_channel(channel);
1500 	poll_threads();
1501 }
1502 
1503 static void
1504 blob_rw_iov_read_only(void)
1505 {
1506 	struct spdk_blob_store *bs = g_bs;
1507 	struct spdk_blob *blob = g_blob;
1508 	struct spdk_io_channel *channel;
1509 	uint8_t payload_read[4096];
1510 	uint8_t payload_write[4096];
1511 	struct iovec iov_read;
1512 	struct iovec iov_write;
1513 
1514 	channel = spdk_bs_alloc_io_channel(bs);
1515 	CU_ASSERT(channel != NULL);
1516 
1517 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1518 	poll_threads();
1519 	CU_ASSERT(g_bserrno == 0);
1520 
1521 	/* Verify that writev failed if read_only flag is set. */
1522 	blob->data_ro = true;
1523 	iov_write.iov_base = payload_write;
1524 	iov_write.iov_len = sizeof(payload_write);
1525 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1526 	poll_threads();
1527 	CU_ASSERT(g_bserrno == -EPERM);
1528 
1529 	/* Verify that reads pass if data_ro flag is set. */
1530 	iov_read.iov_base = payload_read;
1531 	iov_read.iov_len = sizeof(payload_read);
1532 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1533 	poll_threads();
1534 	CU_ASSERT(g_bserrno == 0);
1535 
1536 	spdk_bs_free_io_channel(channel);
1537 	poll_threads();
1538 }
1539 
1540 static void
1541 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1542 		       uint8_t *payload, uint64_t offset, uint64_t length,
1543 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1544 {
1545 	uint64_t i;
1546 	uint8_t *buf;
1547 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1548 
1549 	/* To be sure that operation is NOT splitted, read one page at the time */
1550 	buf = payload;
1551 	for (i = 0; i < length; i++) {
1552 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1553 		poll_threads();
1554 		if (g_bserrno != 0) {
1555 			/* Pass the error code up */
1556 			break;
1557 		}
1558 		buf += page_size;
1559 	}
1560 
1561 	cb_fn(cb_arg, g_bserrno);
1562 }
1563 
1564 static void
1565 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1566 			uint8_t *payload, uint64_t offset, uint64_t length,
1567 			spdk_blob_op_complete cb_fn, void *cb_arg)
1568 {
1569 	uint64_t i;
1570 	uint8_t *buf;
1571 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1572 
1573 	/* To be sure that operation is NOT splitted, write one page at the time */
1574 	buf = payload;
1575 	for (i = 0; i < length; i++) {
1576 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1577 		poll_threads();
1578 		if (g_bserrno != 0) {
1579 			/* Pass the error code up */
1580 			break;
1581 		}
1582 		buf += page_size;
1583 	}
1584 
1585 	cb_fn(cb_arg, g_bserrno);
1586 }
1587 
1588 static void
1589 blob_operation_split_rw(void)
1590 {
1591 	struct spdk_blob_store *bs = g_bs;
1592 	struct spdk_blob *blob;
1593 	struct spdk_io_channel *channel;
1594 	struct spdk_blob_opts opts;
1595 	uint64_t cluster_size;
1596 
1597 	uint64_t payload_size;
1598 	uint8_t *payload_read;
1599 	uint8_t *payload_write;
1600 	uint8_t *payload_pattern;
1601 
1602 	uint64_t page_size;
1603 	uint64_t pages_per_cluster;
1604 	uint64_t pages_per_payload;
1605 
1606 	uint64_t i;
1607 
1608 	cluster_size = spdk_bs_get_cluster_size(bs);
1609 	page_size = spdk_bs_get_page_size(bs);
1610 	pages_per_cluster = cluster_size / page_size;
1611 	pages_per_payload = pages_per_cluster * 5;
1612 	payload_size = cluster_size * 5;
1613 
1614 	payload_read = malloc(payload_size);
1615 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1616 
1617 	payload_write = malloc(payload_size);
1618 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1619 
1620 	payload_pattern = malloc(payload_size);
1621 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1622 
1623 	/* Prepare random pattern to write */
1624 	memset(payload_pattern, 0xFF, payload_size);
1625 	for (i = 0; i < pages_per_payload; i++) {
1626 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1627 	}
1628 
1629 	channel = spdk_bs_alloc_io_channel(bs);
1630 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1631 
1632 	/* Create blob */
1633 	ut_spdk_blob_opts_init(&opts);
1634 	opts.thin_provision = false;
1635 	opts.num_clusters = 5;
1636 
1637 	blob = ut_blob_create_and_open(bs, &opts);
1638 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1639 
1640 	/* Initial read should return zeroed payload */
1641 	memset(payload_read, 0xFF, payload_size);
1642 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1643 	poll_threads();
1644 	CU_ASSERT(g_bserrno == 0);
1645 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1646 
1647 	/* Fill whole blob except last page */
1648 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1649 			   blob_op_complete, NULL);
1650 	poll_threads();
1651 	CU_ASSERT(g_bserrno == 0);
1652 
1653 	/* Write last page with a pattern */
1654 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1655 			   blob_op_complete, NULL);
1656 	poll_threads();
1657 	CU_ASSERT(g_bserrno == 0);
1658 
1659 	/* Read whole blob and check consistency */
1660 	memset(payload_read, 0xFF, payload_size);
1661 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1662 	poll_threads();
1663 	CU_ASSERT(g_bserrno == 0);
1664 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1665 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1666 
1667 	/* Fill whole blob except first page */
1668 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1669 			   blob_op_complete, NULL);
1670 	poll_threads();
1671 	CU_ASSERT(g_bserrno == 0);
1672 
1673 	/* Write first page with a pattern */
1674 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1675 			   blob_op_complete, NULL);
1676 	poll_threads();
1677 	CU_ASSERT(g_bserrno == 0);
1678 
1679 	/* Read whole blob and check consistency */
1680 	memset(payload_read, 0xFF, payload_size);
1681 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1682 	poll_threads();
1683 	CU_ASSERT(g_bserrno == 0);
1684 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1685 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1686 
1687 
1688 	/* Fill whole blob with a pattern (5 clusters) */
1689 
1690 	/* 1. Read test. */
1691 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1692 				blob_op_complete, NULL);
1693 	poll_threads();
1694 	CU_ASSERT(g_bserrno == 0);
1695 
1696 	memset(payload_read, 0xFF, payload_size);
1697 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1698 	poll_threads();
1699 	poll_threads();
1700 	CU_ASSERT(g_bserrno == 0);
1701 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1702 
1703 	/* 2. Write test. */
1704 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1705 			   blob_op_complete, NULL);
1706 	poll_threads();
1707 	CU_ASSERT(g_bserrno == 0);
1708 
1709 	memset(payload_read, 0xFF, payload_size);
1710 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1711 	poll_threads();
1712 	CU_ASSERT(g_bserrno == 0);
1713 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1714 
1715 	spdk_bs_free_io_channel(channel);
1716 	poll_threads();
1717 
1718 	g_blob = NULL;
1719 	g_blobid = 0;
1720 
1721 	free(payload_read);
1722 	free(payload_write);
1723 	free(payload_pattern);
1724 
1725 	ut_blob_close_and_delete(bs, blob);
1726 }
1727 
1728 static void
1729 blob_operation_split_rw_iov(void)
1730 {
1731 	struct spdk_blob_store *bs = g_bs;
1732 	struct spdk_blob *blob;
1733 	struct spdk_io_channel *channel;
1734 	struct spdk_blob_opts opts;
1735 	uint64_t cluster_size;
1736 
1737 	uint64_t payload_size;
1738 	uint8_t *payload_read;
1739 	uint8_t *payload_write;
1740 	uint8_t *payload_pattern;
1741 
1742 	uint64_t page_size;
1743 	uint64_t pages_per_cluster;
1744 	uint64_t pages_per_payload;
1745 
1746 	struct iovec iov_read[2];
1747 	struct iovec iov_write[2];
1748 
1749 	uint64_t i, j;
1750 
1751 	cluster_size = spdk_bs_get_cluster_size(bs);
1752 	page_size = spdk_bs_get_page_size(bs);
1753 	pages_per_cluster = cluster_size / page_size;
1754 	pages_per_payload = pages_per_cluster * 5;
1755 	payload_size = cluster_size * 5;
1756 
1757 	payload_read = malloc(payload_size);
1758 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1759 
1760 	payload_write = malloc(payload_size);
1761 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1762 
1763 	payload_pattern = malloc(payload_size);
1764 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1765 
1766 	/* Prepare random pattern to write */
1767 	for (i = 0; i < pages_per_payload; i++) {
1768 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1769 			uint64_t *tmp;
1770 
1771 			tmp = (uint64_t *)payload_pattern;
1772 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1773 			*tmp = i + 1;
1774 		}
1775 	}
1776 
1777 	channel = spdk_bs_alloc_io_channel(bs);
1778 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1779 
1780 	/* Create blob */
1781 	ut_spdk_blob_opts_init(&opts);
1782 	opts.thin_provision = false;
1783 	opts.num_clusters = 5;
1784 
1785 	blob = ut_blob_create_and_open(bs, &opts);
1786 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1787 
1788 	/* Initial read should return zeroes payload */
1789 	memset(payload_read, 0xFF, payload_size);
1790 	iov_read[0].iov_base = payload_read;
1791 	iov_read[0].iov_len = cluster_size * 3;
1792 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1793 	iov_read[1].iov_len = cluster_size * 2;
1794 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1795 	poll_threads();
1796 	CU_ASSERT(g_bserrno == 0);
1797 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1798 
1799 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1800 	 *  with a pattern. */
1801 	iov_write[0].iov_base = payload_pattern;
1802 	iov_write[0].iov_len = payload_size - page_size;
1803 	iov_write[1].iov_base = payload_pattern;
1804 	iov_write[1].iov_len = page_size;
1805 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1806 	poll_threads();
1807 	CU_ASSERT(g_bserrno == 0);
1808 
1809 	/* Read whole blob and check consistency */
1810 	memset(payload_read, 0xFF, payload_size);
1811 	iov_read[0].iov_base = payload_read;
1812 	iov_read[0].iov_len = cluster_size * 2;
1813 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1814 	iov_read[1].iov_len = cluster_size * 3;
1815 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1816 	poll_threads();
1817 	CU_ASSERT(g_bserrno == 0);
1818 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1819 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1820 
1821 	/* First of iovs fills only first page and second of iovs writes whole blob except
1822 	 *  first page with a pattern. */
1823 	iov_write[0].iov_base = payload_pattern;
1824 	iov_write[0].iov_len = page_size;
1825 	iov_write[1].iov_base = payload_pattern;
1826 	iov_write[1].iov_len = payload_size - page_size;
1827 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1828 	poll_threads();
1829 	CU_ASSERT(g_bserrno == 0);
1830 
1831 	/* Read whole blob and check consistency */
1832 	memset(payload_read, 0xFF, payload_size);
1833 	iov_read[0].iov_base = payload_read;
1834 	iov_read[0].iov_len = cluster_size * 4;
1835 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1836 	iov_read[1].iov_len = cluster_size;
1837 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1838 	poll_threads();
1839 	CU_ASSERT(g_bserrno == 0);
1840 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1841 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1842 
1843 
1844 	/* Fill whole blob with a pattern (5 clusters) */
1845 
1846 	/* 1. Read test. */
1847 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1848 				blob_op_complete, NULL);
1849 	poll_threads();
1850 	CU_ASSERT(g_bserrno == 0);
1851 
1852 	memset(payload_read, 0xFF, payload_size);
1853 	iov_read[0].iov_base = payload_read;
1854 	iov_read[0].iov_len = cluster_size;
1855 	iov_read[1].iov_base = payload_read + cluster_size;
1856 	iov_read[1].iov_len = cluster_size * 4;
1857 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1858 	poll_threads();
1859 	CU_ASSERT(g_bserrno == 0);
1860 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1861 
1862 	/* 2. Write test. */
1863 	iov_write[0].iov_base = payload_read;
1864 	iov_write[0].iov_len = cluster_size * 2;
1865 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1866 	iov_write[1].iov_len = cluster_size * 3;
1867 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1868 	poll_threads();
1869 	CU_ASSERT(g_bserrno == 0);
1870 
1871 	memset(payload_read, 0xFF, payload_size);
1872 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1873 	poll_threads();
1874 	CU_ASSERT(g_bserrno == 0);
1875 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1876 
1877 	spdk_bs_free_io_channel(channel);
1878 	poll_threads();
1879 
1880 	g_blob = NULL;
1881 	g_blobid = 0;
1882 
1883 	free(payload_read);
1884 	free(payload_write);
1885 	free(payload_pattern);
1886 
1887 	ut_blob_close_and_delete(bs, blob);
1888 }
1889 
1890 static void
1891 blob_unmap(void)
1892 {
1893 	struct spdk_blob_store *bs = g_bs;
1894 	struct spdk_blob *blob;
1895 	struct spdk_io_channel *channel;
1896 	struct spdk_blob_opts opts;
1897 	uint8_t payload[4096];
1898 	int i;
1899 
1900 	channel = spdk_bs_alloc_io_channel(bs);
1901 	CU_ASSERT(channel != NULL);
1902 
1903 	ut_spdk_blob_opts_init(&opts);
1904 	opts.num_clusters = 10;
1905 
1906 	blob = ut_blob_create_and_open(bs, &opts);
1907 
1908 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1909 	poll_threads();
1910 	CU_ASSERT(g_bserrno == 0);
1911 
1912 	memset(payload, 0, sizeof(payload));
1913 	payload[0] = 0xFF;
1914 
1915 	/*
1916 	 * Set first byte of every cluster to 0xFF.
1917 	 * First cluster on device is reserved so let's start from cluster number 1
1918 	 */
1919 	for (i = 1; i < 11; i++) {
1920 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1921 	}
1922 
1923 	/* Confirm writes */
1924 	for (i = 0; i < 10; i++) {
1925 		payload[0] = 0;
1926 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1927 				  blob_op_complete, NULL);
1928 		poll_threads();
1929 		CU_ASSERT(g_bserrno == 0);
1930 		CU_ASSERT(payload[0] == 0xFF);
1931 	}
1932 
1933 	/* Mark some clusters as unallocated */
1934 	blob->active.clusters[1] = 0;
1935 	blob->active.clusters[2] = 0;
1936 	blob->active.clusters[3] = 0;
1937 	blob->active.clusters[6] = 0;
1938 	blob->active.clusters[8] = 0;
1939 
1940 	/* Unmap clusters by resizing to 0 */
1941 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
1942 	poll_threads();
1943 	CU_ASSERT(g_bserrno == 0);
1944 
1945 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1946 	poll_threads();
1947 	CU_ASSERT(g_bserrno == 0);
1948 
1949 	/* Confirm that only 'allocated' clusters were unmapped */
1950 	for (i = 1; i < 11; i++) {
1951 		switch (i) {
1952 		case 2:
1953 		case 3:
1954 		case 4:
1955 		case 7:
1956 		case 9:
1957 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
1958 			break;
1959 		default:
1960 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
1961 			break;
1962 		}
1963 	}
1964 
1965 	spdk_bs_free_io_channel(channel);
1966 	poll_threads();
1967 
1968 	ut_blob_close_and_delete(bs, blob);
1969 }
1970 
1971 static void
1972 blob_iter(void)
1973 {
1974 	struct spdk_blob_store *bs = g_bs;
1975 	struct spdk_blob *blob;
1976 	spdk_blob_id blobid;
1977 	struct spdk_blob_opts blob_opts;
1978 
1979 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
1980 	poll_threads();
1981 	CU_ASSERT(g_blob == NULL);
1982 	CU_ASSERT(g_bserrno == -ENOENT);
1983 
1984 	ut_spdk_blob_opts_init(&blob_opts);
1985 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1986 	poll_threads();
1987 	CU_ASSERT(g_bserrno == 0);
1988 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1989 	blobid = g_blobid;
1990 
1991 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
1992 	poll_threads();
1993 	CU_ASSERT(g_blob != NULL);
1994 	CU_ASSERT(g_bserrno == 0);
1995 	blob = g_blob;
1996 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
1997 
1998 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
1999 	poll_threads();
2000 	CU_ASSERT(g_blob == NULL);
2001 	CU_ASSERT(g_bserrno == -ENOENT);
2002 }
2003 
2004 static void
2005 blob_xattr(void)
2006 {
2007 	struct spdk_blob_store *bs = g_bs;
2008 	struct spdk_blob *blob = g_blob;
2009 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2010 	uint64_t length;
2011 	int rc;
2012 	const char *name1, *name2;
2013 	const void *value;
2014 	size_t value_len;
2015 	struct spdk_xattr_names *names;
2016 
2017 	/* Test that set_xattr fails if md_ro flag is set. */
2018 	blob->md_ro = true;
2019 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2020 	CU_ASSERT(rc == -EPERM);
2021 
2022 	blob->md_ro = false;
2023 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2024 	CU_ASSERT(rc == 0);
2025 
2026 	length = 2345;
2027 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2028 	CU_ASSERT(rc == 0);
2029 
2030 	/* Overwrite "length" xattr. */
2031 	length = 3456;
2032 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2033 	CU_ASSERT(rc == 0);
2034 
2035 	/* get_xattr should still work even if md_ro flag is set. */
2036 	value = NULL;
2037 	blob->md_ro = true;
2038 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2039 	CU_ASSERT(rc == 0);
2040 	SPDK_CU_ASSERT_FATAL(value != NULL);
2041 	CU_ASSERT(*(uint64_t *)value == length);
2042 	CU_ASSERT(value_len == 8);
2043 	blob->md_ro = false;
2044 
2045 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2046 	CU_ASSERT(rc == -ENOENT);
2047 
2048 	names = NULL;
2049 	rc = spdk_blob_get_xattr_names(blob, &names);
2050 	CU_ASSERT(rc == 0);
2051 	SPDK_CU_ASSERT_FATAL(names != NULL);
2052 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2053 	name1 = spdk_xattr_names_get_name(names, 0);
2054 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2055 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2056 	name2 = spdk_xattr_names_get_name(names, 1);
2057 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2058 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2059 	CU_ASSERT(strcmp(name1, name2));
2060 	spdk_xattr_names_free(names);
2061 
2062 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2063 	blob->md_ro = true;
2064 	rc = spdk_blob_remove_xattr(blob, "name");
2065 	CU_ASSERT(rc == -EPERM);
2066 
2067 	blob->md_ro = false;
2068 	rc = spdk_blob_remove_xattr(blob, "name");
2069 	CU_ASSERT(rc == 0);
2070 
2071 	rc = spdk_blob_remove_xattr(blob, "foobar");
2072 	CU_ASSERT(rc == -ENOENT);
2073 
2074 	/* Set internal xattr */
2075 	length = 7898;
2076 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2077 	CU_ASSERT(rc == 0);
2078 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2079 	CU_ASSERT(rc == 0);
2080 	CU_ASSERT(*(uint64_t *)value == length);
2081 	/* try to get public xattr with same name */
2082 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2083 	CU_ASSERT(rc != 0);
2084 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2085 	CU_ASSERT(rc != 0);
2086 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2087 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2088 		  SPDK_BLOB_INTERNAL_XATTR);
2089 
2090 	spdk_blob_close(blob, blob_op_complete, NULL);
2091 	poll_threads();
2092 
2093 	/* Check if xattrs are persisted */
2094 	ut_bs_reload(&bs, NULL);
2095 
2096 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2097 	poll_threads();
2098 	CU_ASSERT(g_bserrno == 0);
2099 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2100 	blob = g_blob;
2101 
2102 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2103 	CU_ASSERT(rc == 0);
2104 	CU_ASSERT(*(uint64_t *)value == length);
2105 
2106 	/* try to get internal xattr trough public call */
2107 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2108 	CU_ASSERT(rc != 0);
2109 
2110 	rc = blob_remove_xattr(blob, "internal", true);
2111 	CU_ASSERT(rc == 0);
2112 
2113 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2114 }
2115 
2116 static void
2117 blob_parse_md(void)
2118 {
2119 	struct spdk_blob_store *bs = g_bs;
2120 	struct spdk_blob *blob;
2121 	int rc;
2122 	uint32_t used_pages;
2123 	size_t xattr_length;
2124 	char *xattr;
2125 
2126 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2127 	blob = ut_blob_create_and_open(bs, NULL);
2128 
2129 	/* Create large extent to force more than 1 page of metadata. */
2130 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2131 		       strlen("large_xattr");
2132 	xattr = calloc(xattr_length, sizeof(char));
2133 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2134 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2135 	free(xattr);
2136 	SPDK_CU_ASSERT_FATAL(rc == 0);
2137 
2138 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2139 	poll_threads();
2140 
2141 	/* Delete the blob and verify that number of pages returned to before its creation. */
2142 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2143 	ut_blob_close_and_delete(bs, blob);
2144 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2145 }
2146 
2147 static void
2148 bs_load(void)
2149 {
2150 	struct spdk_blob_store *bs;
2151 	struct spdk_bs_dev *dev;
2152 	spdk_blob_id blobid;
2153 	struct spdk_blob *blob;
2154 	struct spdk_bs_super_block *super_block;
2155 	uint64_t length;
2156 	int rc;
2157 	const void *value;
2158 	size_t value_len;
2159 	struct spdk_bs_opts opts;
2160 	struct spdk_blob_opts blob_opts;
2161 
2162 	dev = init_dev();
2163 	spdk_bs_opts_init(&opts, sizeof(opts));
2164 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2165 
2166 	/* Initialize a new blob store */
2167 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2168 	poll_threads();
2169 	CU_ASSERT(g_bserrno == 0);
2170 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2171 	bs = g_bs;
2172 
2173 	/* Try to open a blobid that does not exist */
2174 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2175 	poll_threads();
2176 	CU_ASSERT(g_bserrno == -ENOENT);
2177 	CU_ASSERT(g_blob == NULL);
2178 
2179 	/* Create a blob */
2180 	blob = ut_blob_create_and_open(bs, NULL);
2181 	blobid = spdk_blob_get_id(blob);
2182 
2183 	/* Try again to open valid blob but without the upper bit set */
2184 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2185 	poll_threads();
2186 	CU_ASSERT(g_bserrno == -ENOENT);
2187 	CU_ASSERT(g_blob == NULL);
2188 
2189 	/* Set some xattrs */
2190 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2191 	CU_ASSERT(rc == 0);
2192 
2193 	length = 2345;
2194 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2195 	CU_ASSERT(rc == 0);
2196 
2197 	/* Resize the blob */
2198 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2199 	poll_threads();
2200 	CU_ASSERT(g_bserrno == 0);
2201 
2202 	spdk_blob_close(blob, blob_op_complete, NULL);
2203 	poll_threads();
2204 	CU_ASSERT(g_bserrno == 0);
2205 	blob = NULL;
2206 	g_blob = NULL;
2207 	g_blobid = SPDK_BLOBID_INVALID;
2208 
2209 	/* Unload the blob store */
2210 	spdk_bs_unload(bs, bs_op_complete, NULL);
2211 	poll_threads();
2212 	CU_ASSERT(g_bserrno == 0);
2213 	g_bs = NULL;
2214 	g_blob = NULL;
2215 	g_blobid = 0;
2216 
2217 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2218 	CU_ASSERT(super_block->clean == 1);
2219 
2220 	/* Load should fail for device with an unsupported blocklen */
2221 	dev = init_dev();
2222 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2223 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2224 	poll_threads();
2225 	CU_ASSERT(g_bserrno == -EINVAL);
2226 
2227 	/* Load should when max_md_ops is set to zero */
2228 	dev = init_dev();
2229 	spdk_bs_opts_init(&opts, sizeof(opts));
2230 	opts.max_md_ops = 0;
2231 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2232 	poll_threads();
2233 	CU_ASSERT(g_bserrno == -EINVAL);
2234 
2235 	/* Load should when max_channel_ops is set to zero */
2236 	dev = init_dev();
2237 	spdk_bs_opts_init(&opts, sizeof(opts));
2238 	opts.max_channel_ops = 0;
2239 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2240 	poll_threads();
2241 	CU_ASSERT(g_bserrno == -EINVAL);
2242 
2243 	/* Load an existing blob store */
2244 	dev = init_dev();
2245 	spdk_bs_opts_init(&opts, sizeof(opts));
2246 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2247 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2248 	poll_threads();
2249 	CU_ASSERT(g_bserrno == 0);
2250 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2251 	bs = g_bs;
2252 
2253 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2254 	CU_ASSERT(super_block->clean == 1);
2255 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2256 
2257 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2258 	poll_threads();
2259 	CU_ASSERT(g_bserrno == 0);
2260 	CU_ASSERT(g_blob != NULL);
2261 	blob = g_blob;
2262 
2263 	/* Verify that blobstore is marked dirty after first metadata sync */
2264 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2265 	CU_ASSERT(super_block->clean == 1);
2266 
2267 	/* Get the xattrs */
2268 	value = NULL;
2269 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2270 	CU_ASSERT(rc == 0);
2271 	SPDK_CU_ASSERT_FATAL(value != NULL);
2272 	CU_ASSERT(*(uint64_t *)value == length);
2273 	CU_ASSERT(value_len == 8);
2274 
2275 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2276 	CU_ASSERT(rc == -ENOENT);
2277 
2278 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2279 
2280 	spdk_blob_close(blob, blob_op_complete, NULL);
2281 	poll_threads();
2282 	CU_ASSERT(g_bserrno == 0);
2283 	blob = NULL;
2284 	g_blob = NULL;
2285 
2286 	spdk_bs_unload(bs, bs_op_complete, NULL);
2287 	poll_threads();
2288 	CU_ASSERT(g_bserrno == 0);
2289 	g_bs = NULL;
2290 
2291 	/* Load should fail: bdev size < saved size */
2292 	dev = init_dev();
2293 	dev->blockcnt /= 2;
2294 
2295 	spdk_bs_opts_init(&opts, sizeof(opts));
2296 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2297 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2298 	poll_threads();
2299 
2300 	CU_ASSERT(g_bserrno == -EILSEQ);
2301 
2302 	/* Load should succeed: bdev size > saved size */
2303 	dev = init_dev();
2304 	dev->blockcnt *= 4;
2305 
2306 	spdk_bs_opts_init(&opts, sizeof(opts));
2307 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2308 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2309 	poll_threads();
2310 	CU_ASSERT(g_bserrno == 0);
2311 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2312 	bs = g_bs;
2313 
2314 	CU_ASSERT(g_bserrno == 0);
2315 	spdk_bs_unload(bs, bs_op_complete, NULL);
2316 	poll_threads();
2317 
2318 
2319 	/* Test compatibility mode */
2320 
2321 	dev = init_dev();
2322 	super_block->size = 0;
2323 	super_block->crc = blob_md_page_calc_crc(super_block);
2324 
2325 	spdk_bs_opts_init(&opts, sizeof(opts));
2326 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2327 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2328 	poll_threads();
2329 	CU_ASSERT(g_bserrno == 0);
2330 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2331 	bs = g_bs;
2332 
2333 	/* Create a blob */
2334 	ut_spdk_blob_opts_init(&blob_opts);
2335 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2336 	poll_threads();
2337 	CU_ASSERT(g_bserrno == 0);
2338 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2339 
2340 	/* Blobstore should update number of blocks in super_block */
2341 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2342 	CU_ASSERT(super_block->clean == 0);
2343 
2344 	spdk_bs_unload(bs, bs_op_complete, NULL);
2345 	poll_threads();
2346 	CU_ASSERT(g_bserrno == 0);
2347 	CU_ASSERT(super_block->clean == 1);
2348 	g_bs = NULL;
2349 
2350 }
2351 
2352 static void
2353 bs_load_pending_removal(void)
2354 {
2355 	struct spdk_blob_store *bs = g_bs;
2356 	struct spdk_blob_opts opts;
2357 	struct spdk_blob *blob, *snapshot;
2358 	spdk_blob_id blobid, snapshotid;
2359 	const void *value;
2360 	size_t value_len;
2361 	int rc;
2362 
2363 	/* Create blob */
2364 	ut_spdk_blob_opts_init(&opts);
2365 	opts.num_clusters = 10;
2366 
2367 	blob = ut_blob_create_and_open(bs, &opts);
2368 	blobid = spdk_blob_get_id(blob);
2369 
2370 	/* Create snapshot */
2371 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2372 	poll_threads();
2373 	CU_ASSERT(g_bserrno == 0);
2374 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2375 	snapshotid = g_blobid;
2376 
2377 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2378 	poll_threads();
2379 	CU_ASSERT(g_bserrno == 0);
2380 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2381 	snapshot = g_blob;
2382 
2383 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2384 	snapshot->md_ro = false;
2385 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2386 	CU_ASSERT(rc == 0);
2387 	snapshot->md_ro = true;
2388 
2389 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2390 	poll_threads();
2391 	CU_ASSERT(g_bserrno == 0);
2392 
2393 	spdk_blob_close(blob, blob_op_complete, NULL);
2394 	poll_threads();
2395 	CU_ASSERT(g_bserrno == 0);
2396 
2397 	/* Reload blobstore */
2398 	ut_bs_reload(&bs, NULL);
2399 
2400 	/* Snapshot should not be removed as blob is still pointing to it */
2401 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2402 	poll_threads();
2403 	CU_ASSERT(g_bserrno == 0);
2404 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2405 	snapshot = g_blob;
2406 
2407 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2408 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2409 	CU_ASSERT(rc != 0);
2410 
2411 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2412 	snapshot->md_ro = false;
2413 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2414 	CU_ASSERT(rc == 0);
2415 	snapshot->md_ro = true;
2416 
2417 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2418 	poll_threads();
2419 	CU_ASSERT(g_bserrno == 0);
2420 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2421 	blob = g_blob;
2422 
2423 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2424 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2425 
2426 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2427 	poll_threads();
2428 	CU_ASSERT(g_bserrno == 0);
2429 
2430 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2431 	poll_threads();
2432 	CU_ASSERT(g_bserrno == 0);
2433 
2434 	spdk_blob_close(blob, blob_op_complete, NULL);
2435 	poll_threads();
2436 	CU_ASSERT(g_bserrno == 0);
2437 
2438 	/* Reload blobstore */
2439 	ut_bs_reload(&bs, NULL);
2440 
2441 	/* Snapshot should be removed as blob is not pointing to it anymore */
2442 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2443 	poll_threads();
2444 	CU_ASSERT(g_bserrno != 0);
2445 }
2446 
2447 static void
2448 bs_load_custom_cluster_size(void)
2449 {
2450 	struct spdk_blob_store *bs;
2451 	struct spdk_bs_dev *dev;
2452 	struct spdk_bs_super_block *super_block;
2453 	struct spdk_bs_opts opts;
2454 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2455 	uint32_t cluster_sz;
2456 	uint64_t total_clusters;
2457 
2458 	dev = init_dev();
2459 	spdk_bs_opts_init(&opts, sizeof(opts));
2460 	opts.cluster_sz = custom_cluster_size;
2461 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2462 
2463 	/* Initialize a new blob store */
2464 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2465 	poll_threads();
2466 	CU_ASSERT(g_bserrno == 0);
2467 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2468 	bs = g_bs;
2469 	cluster_sz = bs->cluster_sz;
2470 	total_clusters = bs->total_clusters;
2471 
2472 	/* Unload the blob store */
2473 	spdk_bs_unload(bs, bs_op_complete, NULL);
2474 	poll_threads();
2475 	CU_ASSERT(g_bserrno == 0);
2476 	g_bs = NULL;
2477 	g_blob = NULL;
2478 	g_blobid = 0;
2479 
2480 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2481 	CU_ASSERT(super_block->clean == 1);
2482 
2483 	/* Load an existing blob store */
2484 	dev = init_dev();
2485 	spdk_bs_opts_init(&opts, sizeof(opts));
2486 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2487 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2488 	poll_threads();
2489 	CU_ASSERT(g_bserrno == 0);
2490 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2491 	bs = g_bs;
2492 	/* Compare cluster size and number to one after initialization */
2493 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2494 	CU_ASSERT(total_clusters == bs->total_clusters);
2495 
2496 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2497 	CU_ASSERT(super_block->clean == 1);
2498 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2499 
2500 	spdk_bs_unload(bs, bs_op_complete, NULL);
2501 	poll_threads();
2502 	CU_ASSERT(g_bserrno == 0);
2503 	CU_ASSERT(super_block->clean == 1);
2504 	g_bs = NULL;
2505 }
2506 
2507 static void
2508 bs_type(void)
2509 {
2510 	struct spdk_blob_store *bs;
2511 	struct spdk_bs_dev *dev;
2512 	struct spdk_bs_opts opts;
2513 
2514 	dev = init_dev();
2515 	spdk_bs_opts_init(&opts, sizeof(opts));
2516 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2517 
2518 	/* Initialize a new blob store */
2519 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2520 	poll_threads();
2521 	CU_ASSERT(g_bserrno == 0);
2522 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2523 	bs = g_bs;
2524 
2525 	/* Unload the blob store */
2526 	spdk_bs_unload(bs, bs_op_complete, NULL);
2527 	poll_threads();
2528 	CU_ASSERT(g_bserrno == 0);
2529 	g_bs = NULL;
2530 	g_blob = NULL;
2531 	g_blobid = 0;
2532 
2533 	/* Load non existing blobstore type */
2534 	dev = init_dev();
2535 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2536 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2537 	poll_threads();
2538 	CU_ASSERT(g_bserrno != 0);
2539 
2540 	/* Load with empty blobstore type */
2541 	dev = init_dev();
2542 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2543 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2544 	poll_threads();
2545 	CU_ASSERT(g_bserrno == 0);
2546 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2547 	bs = g_bs;
2548 
2549 	spdk_bs_unload(bs, bs_op_complete, NULL);
2550 	poll_threads();
2551 	CU_ASSERT(g_bserrno == 0);
2552 	g_bs = NULL;
2553 
2554 	/* Initialize a new blob store with empty bstype */
2555 	dev = init_dev();
2556 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2557 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2558 	poll_threads();
2559 	CU_ASSERT(g_bserrno == 0);
2560 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2561 	bs = g_bs;
2562 
2563 	spdk_bs_unload(bs, bs_op_complete, NULL);
2564 	poll_threads();
2565 	CU_ASSERT(g_bserrno == 0);
2566 	g_bs = NULL;
2567 
2568 	/* Load non existing blobstore type */
2569 	dev = init_dev();
2570 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2571 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2572 	poll_threads();
2573 	CU_ASSERT(g_bserrno != 0);
2574 
2575 	/* Load with empty blobstore type */
2576 	dev = init_dev();
2577 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2578 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2579 	poll_threads();
2580 	CU_ASSERT(g_bserrno == 0);
2581 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2582 	bs = g_bs;
2583 
2584 	spdk_bs_unload(bs, bs_op_complete, NULL);
2585 	poll_threads();
2586 	CU_ASSERT(g_bserrno == 0);
2587 	g_bs = NULL;
2588 }
2589 
2590 static void
2591 bs_super_block(void)
2592 {
2593 	struct spdk_blob_store *bs;
2594 	struct spdk_bs_dev *dev;
2595 	struct spdk_bs_super_block *super_block;
2596 	struct spdk_bs_opts opts;
2597 	struct spdk_bs_super_block_ver1 super_block_v1;
2598 
2599 	dev = init_dev();
2600 	spdk_bs_opts_init(&opts, sizeof(opts));
2601 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2602 
2603 	/* Initialize a new blob store */
2604 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2605 	poll_threads();
2606 	CU_ASSERT(g_bserrno == 0);
2607 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2608 	bs = g_bs;
2609 
2610 	/* Unload the blob store */
2611 	spdk_bs_unload(bs, bs_op_complete, NULL);
2612 	poll_threads();
2613 	CU_ASSERT(g_bserrno == 0);
2614 	g_bs = NULL;
2615 	g_blob = NULL;
2616 	g_blobid = 0;
2617 
2618 	/* Load an existing blob store with version newer than supported */
2619 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2620 	super_block->version++;
2621 
2622 	dev = init_dev();
2623 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2624 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2625 	poll_threads();
2626 	CU_ASSERT(g_bserrno != 0);
2627 
2628 	/* Create a new blob store with super block version 1 */
2629 	dev = init_dev();
2630 	super_block_v1.version = 1;
2631 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2632 	super_block_v1.length = 0x1000;
2633 	super_block_v1.clean = 1;
2634 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2635 	super_block_v1.cluster_size = 0x100000;
2636 	super_block_v1.used_page_mask_start = 0x01;
2637 	super_block_v1.used_page_mask_len = 0x01;
2638 	super_block_v1.used_cluster_mask_start = 0x02;
2639 	super_block_v1.used_cluster_mask_len = 0x01;
2640 	super_block_v1.md_start = 0x03;
2641 	super_block_v1.md_len = 0x40;
2642 	memset(super_block_v1.reserved, 0, 4036);
2643 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2644 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2645 
2646 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2647 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2648 	poll_threads();
2649 	CU_ASSERT(g_bserrno == 0);
2650 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2651 	bs = g_bs;
2652 
2653 	spdk_bs_unload(bs, bs_op_complete, NULL);
2654 	poll_threads();
2655 	CU_ASSERT(g_bserrno == 0);
2656 	g_bs = NULL;
2657 }
2658 
2659 /*
2660  * Create a blobstore and then unload it.
2661  */
2662 static void
2663 bs_unload(void)
2664 {
2665 	struct spdk_blob_store *bs = g_bs;
2666 	struct spdk_blob *blob;
2667 
2668 	/* Create a blob and open it. */
2669 	blob = ut_blob_create_and_open(bs, NULL);
2670 
2671 	/* Try to unload blobstore, should fail with open blob */
2672 	g_bserrno = -1;
2673 	spdk_bs_unload(bs, bs_op_complete, NULL);
2674 	poll_threads();
2675 	CU_ASSERT(g_bserrno == -EBUSY);
2676 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2677 
2678 	/* Close the blob, then successfully unload blobstore */
2679 	g_bserrno = -1;
2680 	spdk_blob_close(blob, blob_op_complete, NULL);
2681 	poll_threads();
2682 	CU_ASSERT(g_bserrno == 0);
2683 }
2684 
2685 /*
2686  * Create a blobstore with a cluster size different than the default, and ensure it is
2687  *  persisted.
2688  */
2689 static void
2690 bs_cluster_sz(void)
2691 {
2692 	struct spdk_blob_store *bs;
2693 	struct spdk_bs_dev *dev;
2694 	struct spdk_bs_opts opts;
2695 	uint32_t cluster_sz;
2696 
2697 	/* Set cluster size to zero */
2698 	dev = init_dev();
2699 	spdk_bs_opts_init(&opts, sizeof(opts));
2700 	opts.cluster_sz = 0;
2701 
2702 	/* Initialize a new blob store */
2703 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2704 	poll_threads();
2705 	CU_ASSERT(g_bserrno == -EINVAL);
2706 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2707 
2708 	/*
2709 	 * Set cluster size to blobstore page size,
2710 	 * to work it is required to be at least twice the blobstore page size.
2711 	 */
2712 	dev = init_dev();
2713 	spdk_bs_opts_init(&opts, sizeof(opts));
2714 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
2715 
2716 	/* Initialize a new blob store */
2717 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2718 	poll_threads();
2719 	CU_ASSERT(g_bserrno == -ENOMEM);
2720 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2721 
2722 	/*
2723 	 * Set cluster size to lower than page size,
2724 	 * to work it is required to be at least twice the blobstore page size.
2725 	 */
2726 	dev = init_dev();
2727 	spdk_bs_opts_init(&opts, sizeof(opts));
2728 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
2729 
2730 	/* Initialize a new blob store */
2731 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2732 	poll_threads();
2733 	CU_ASSERT(g_bserrno == -EINVAL);
2734 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2735 
2736 	/* Set cluster size to twice the default */
2737 	dev = init_dev();
2738 	spdk_bs_opts_init(&opts, sizeof(opts));
2739 	opts.cluster_sz *= 2;
2740 	cluster_sz = opts.cluster_sz;
2741 
2742 	/* Initialize a new blob store */
2743 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2744 	poll_threads();
2745 	CU_ASSERT(g_bserrno == 0);
2746 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2747 	bs = g_bs;
2748 
2749 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2750 
2751 	ut_bs_reload(&bs, &opts);
2752 
2753 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2754 
2755 	spdk_bs_unload(bs, bs_op_complete, NULL);
2756 	poll_threads();
2757 	CU_ASSERT(g_bserrno == 0);
2758 	g_bs = NULL;
2759 }
2760 
2761 /*
2762  * Create a blobstore, reload it and ensure total usable cluster count
2763  *  stays the same.
2764  */
2765 static void
2766 bs_usable_clusters(void)
2767 {
2768 	struct spdk_blob_store *bs = g_bs;
2769 	struct spdk_blob *blob;
2770 	uint32_t clusters;
2771 	int i;
2772 
2773 
2774 	clusters = spdk_bs_total_data_cluster_count(bs);
2775 
2776 	ut_bs_reload(&bs, NULL);
2777 
2778 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2779 
2780 	/* Create and resize blobs to make sure that useable cluster count won't change */
2781 	for (i = 0; i < 4; i++) {
2782 		g_bserrno = -1;
2783 		g_blobid = SPDK_BLOBID_INVALID;
2784 		blob = ut_blob_create_and_open(bs, NULL);
2785 
2786 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2787 		poll_threads();
2788 		CU_ASSERT(g_bserrno == 0);
2789 
2790 		g_bserrno = -1;
2791 		spdk_blob_close(blob, blob_op_complete, NULL);
2792 		poll_threads();
2793 		CU_ASSERT(g_bserrno == 0);
2794 
2795 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2796 	}
2797 
2798 	/* Reload the blob store to make sure that nothing changed */
2799 	ut_bs_reload(&bs, NULL);
2800 
2801 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2802 }
2803 
2804 /*
2805  * Test resizing of the metadata blob.  This requires creating enough blobs
2806  *  so that one cluster is not enough to fit the metadata for those blobs.
2807  *  To induce this condition to happen more quickly, we reduce the cluster
2808  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
2809  */
2810 static void
2811 bs_resize_md(void)
2812 {
2813 	struct spdk_blob_store *bs;
2814 	const int CLUSTER_PAGE_COUNT = 4;
2815 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
2816 	struct spdk_bs_dev *dev;
2817 	struct spdk_bs_opts opts;
2818 	struct spdk_blob *blob;
2819 	struct spdk_blob_opts blob_opts;
2820 	uint32_t cluster_sz;
2821 	spdk_blob_id blobids[NUM_BLOBS];
2822 	int i;
2823 
2824 
2825 	dev = init_dev();
2826 	spdk_bs_opts_init(&opts, sizeof(opts));
2827 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
2828 	cluster_sz = opts.cluster_sz;
2829 
2830 	/* Initialize a new blob store */
2831 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2832 	poll_threads();
2833 	CU_ASSERT(g_bserrno == 0);
2834 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2835 	bs = g_bs;
2836 
2837 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2838 
2839 	ut_spdk_blob_opts_init(&blob_opts);
2840 
2841 	for (i = 0; i < NUM_BLOBS; i++) {
2842 		g_bserrno = -1;
2843 		g_blobid = SPDK_BLOBID_INVALID;
2844 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2845 		poll_threads();
2846 		CU_ASSERT(g_bserrno == 0);
2847 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
2848 		blobids[i] = g_blobid;
2849 	}
2850 
2851 	ut_bs_reload(&bs, &opts);
2852 
2853 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2854 
2855 	for (i = 0; i < NUM_BLOBS; i++) {
2856 		g_bserrno = -1;
2857 		g_blob = NULL;
2858 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
2859 		poll_threads();
2860 		CU_ASSERT(g_bserrno == 0);
2861 		CU_ASSERT(g_blob !=  NULL);
2862 		blob = g_blob;
2863 		g_bserrno = -1;
2864 		spdk_blob_close(blob, blob_op_complete, NULL);
2865 		poll_threads();
2866 		CU_ASSERT(g_bserrno == 0);
2867 	}
2868 
2869 	spdk_bs_unload(bs, bs_op_complete, NULL);
2870 	poll_threads();
2871 	CU_ASSERT(g_bserrno == 0);
2872 	g_bs = NULL;
2873 }
2874 
2875 static void
2876 bs_destroy(void)
2877 {
2878 	struct spdk_blob_store *bs;
2879 	struct spdk_bs_dev *dev;
2880 
2881 	/* Initialize a new blob store */
2882 	dev = init_dev();
2883 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2884 	poll_threads();
2885 	CU_ASSERT(g_bserrno == 0);
2886 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2887 	bs = g_bs;
2888 
2889 	/* Destroy the blob store */
2890 	g_bserrno = -1;
2891 	spdk_bs_destroy(bs, bs_op_complete, NULL);
2892 	poll_threads();
2893 	CU_ASSERT(g_bserrno == 0);
2894 
2895 	/* Loading an non-existent blob store should fail. */
2896 	g_bs = NULL;
2897 	dev = init_dev();
2898 
2899 	g_bserrno = 0;
2900 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2901 	poll_threads();
2902 	CU_ASSERT(g_bserrno != 0);
2903 }
2904 
2905 /* Try to hit all of the corner cases associated with serializing
2906  * a blob to disk
2907  */
2908 static void
2909 blob_serialize_test(void)
2910 {
2911 	struct spdk_bs_dev *dev;
2912 	struct spdk_bs_opts opts;
2913 	struct spdk_blob_store *bs;
2914 	spdk_blob_id blobid[2];
2915 	struct spdk_blob *blob[2];
2916 	uint64_t i;
2917 	char *value;
2918 	int rc;
2919 
2920 	dev = init_dev();
2921 
2922 	/* Initialize a new blobstore with very small clusters */
2923 	spdk_bs_opts_init(&opts, sizeof(opts));
2924 	opts.cluster_sz = dev->blocklen * 8;
2925 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2926 	poll_threads();
2927 	CU_ASSERT(g_bserrno == 0);
2928 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2929 	bs = g_bs;
2930 
2931 	/* Create and open two blobs */
2932 	for (i = 0; i < 2; i++) {
2933 		blob[i] = ut_blob_create_and_open(bs, NULL);
2934 		blobid[i] = spdk_blob_get_id(blob[i]);
2935 
2936 		/* Set a fairly large xattr on both blobs to eat up
2937 		 * metadata space
2938 		 */
2939 		value = calloc(dev->blocklen - 64, sizeof(char));
2940 		SPDK_CU_ASSERT_FATAL(value != NULL);
2941 		memset(value, i, dev->blocklen / 2);
2942 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
2943 		CU_ASSERT(rc == 0);
2944 		free(value);
2945 	}
2946 
2947 	/* Resize the blobs, alternating 1 cluster at a time.
2948 	 * This thwarts run length encoding and will cause spill
2949 	 * over of the extents.
2950 	 */
2951 	for (i = 0; i < 6; i++) {
2952 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
2953 		poll_threads();
2954 		CU_ASSERT(g_bserrno == 0);
2955 	}
2956 
2957 	for (i = 0; i < 2; i++) {
2958 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
2959 		poll_threads();
2960 		CU_ASSERT(g_bserrno == 0);
2961 	}
2962 
2963 	/* Close the blobs */
2964 	for (i = 0; i < 2; i++) {
2965 		spdk_blob_close(blob[i], blob_op_complete, NULL);
2966 		poll_threads();
2967 		CU_ASSERT(g_bserrno == 0);
2968 	}
2969 
2970 	ut_bs_reload(&bs, &opts);
2971 
2972 	for (i = 0; i < 2; i++) {
2973 		blob[i] = NULL;
2974 
2975 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
2976 		poll_threads();
2977 		CU_ASSERT(g_bserrno == 0);
2978 		CU_ASSERT(g_blob != NULL);
2979 		blob[i] = g_blob;
2980 
2981 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
2982 
2983 		spdk_blob_close(blob[i], blob_op_complete, NULL);
2984 		poll_threads();
2985 		CU_ASSERT(g_bserrno == 0);
2986 	}
2987 
2988 	spdk_bs_unload(bs, bs_op_complete, NULL);
2989 	poll_threads();
2990 	CU_ASSERT(g_bserrno == 0);
2991 	g_bs = NULL;
2992 }
2993 
2994 static void
2995 blob_crc(void)
2996 {
2997 	struct spdk_blob_store *bs = g_bs;
2998 	struct spdk_blob *blob;
2999 	spdk_blob_id blobid;
3000 	uint32_t page_num;
3001 	int index;
3002 	struct spdk_blob_md_page *page;
3003 
3004 	blob = ut_blob_create_and_open(bs, NULL);
3005 	blobid = spdk_blob_get_id(blob);
3006 
3007 	spdk_blob_close(blob, blob_op_complete, NULL);
3008 	poll_threads();
3009 	CU_ASSERT(g_bserrno == 0);
3010 
3011 	page_num = bs_blobid_to_page(blobid);
3012 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3013 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3014 	page->crc = 0;
3015 
3016 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3017 	poll_threads();
3018 	CU_ASSERT(g_bserrno == -EINVAL);
3019 	CU_ASSERT(g_blob == NULL);
3020 	g_bserrno = 0;
3021 
3022 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3023 	poll_threads();
3024 	CU_ASSERT(g_bserrno == -EINVAL);
3025 }
3026 
3027 static void
3028 super_block_crc(void)
3029 {
3030 	struct spdk_blob_store *bs;
3031 	struct spdk_bs_dev *dev;
3032 	struct spdk_bs_super_block *super_block;
3033 
3034 	dev = init_dev();
3035 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3036 	poll_threads();
3037 	CU_ASSERT(g_bserrno == 0);
3038 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3039 	bs = g_bs;
3040 
3041 	spdk_bs_unload(bs, bs_op_complete, NULL);
3042 	poll_threads();
3043 	CU_ASSERT(g_bserrno == 0);
3044 	g_bs = NULL;
3045 
3046 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3047 	super_block->crc = 0;
3048 	dev = init_dev();
3049 
3050 	/* Load an existing blob store */
3051 	g_bserrno = 0;
3052 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3053 	poll_threads();
3054 	CU_ASSERT(g_bserrno == -EILSEQ);
3055 }
3056 
3057 /* For blob dirty shutdown test case we do the following sub-test cases:
3058  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3059  *   dirty shutdown and reload the blob store and verify the xattrs.
3060  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3061  *   reload the blob store and verify the clusters number.
3062  * 3 Create the second blob and then dirty shutdown, reload the blob store
3063  *   and verify the second blob.
3064  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3065  *   and verify the second blob is invalid.
3066  * 5 Create the second blob again and also create the third blob, modify the
3067  *   md of second blob which makes the md invalid, and then dirty shutdown,
3068  *   reload the blob store verify the second blob, it should invalid and also
3069  *   verify the third blob, it should correct.
3070  */
3071 static void
3072 blob_dirty_shutdown(void)
3073 {
3074 	int rc;
3075 	int index;
3076 	struct spdk_blob_store *bs = g_bs;
3077 	spdk_blob_id blobid1, blobid2, blobid3;
3078 	struct spdk_blob *blob = g_blob;
3079 	uint64_t length;
3080 	uint64_t free_clusters;
3081 	const void *value;
3082 	size_t value_len;
3083 	uint32_t page_num;
3084 	struct spdk_blob_md_page *page;
3085 	struct spdk_blob_opts blob_opts;
3086 
3087 	/* Create first blob */
3088 	blobid1 = spdk_blob_get_id(blob);
3089 
3090 	/* Set some xattrs */
3091 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3092 	CU_ASSERT(rc == 0);
3093 
3094 	length = 2345;
3095 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3096 	CU_ASSERT(rc == 0);
3097 
3098 	/* Put xattr that fits exactly single page.
3099 	 * This results in adding additional pages to MD.
3100 	 * First is flags and smaller xattr, second the large xattr,
3101 	 * third are just the extents.
3102 	 */
3103 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3104 			      strlen("large_xattr");
3105 	char *xattr = calloc(xattr_length, sizeof(char));
3106 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3107 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3108 	free(xattr);
3109 	SPDK_CU_ASSERT_FATAL(rc == 0);
3110 
3111 	/* Resize the blob */
3112 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3113 	poll_threads();
3114 	CU_ASSERT(g_bserrno == 0);
3115 
3116 	/* Set the blob as the super blob */
3117 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3118 	poll_threads();
3119 	CU_ASSERT(g_bserrno == 0);
3120 
3121 	free_clusters = spdk_bs_free_cluster_count(bs);
3122 
3123 	spdk_blob_close(blob, blob_op_complete, NULL);
3124 	poll_threads();
3125 	CU_ASSERT(g_bserrno == 0);
3126 	blob = NULL;
3127 	g_blob = NULL;
3128 	g_blobid = SPDK_BLOBID_INVALID;
3129 
3130 	ut_bs_dirty_load(&bs, NULL);
3131 
3132 	/* Get the super blob */
3133 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3134 	poll_threads();
3135 	CU_ASSERT(g_bserrno == 0);
3136 	CU_ASSERT(blobid1 == g_blobid);
3137 
3138 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3139 	poll_threads();
3140 	CU_ASSERT(g_bserrno == 0);
3141 	CU_ASSERT(g_blob != NULL);
3142 	blob = g_blob;
3143 
3144 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3145 
3146 	/* Get the xattrs */
3147 	value = NULL;
3148 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3149 	CU_ASSERT(rc == 0);
3150 	SPDK_CU_ASSERT_FATAL(value != NULL);
3151 	CU_ASSERT(*(uint64_t *)value == length);
3152 	CU_ASSERT(value_len == 8);
3153 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3154 
3155 	/* Resize the blob */
3156 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3157 	poll_threads();
3158 	CU_ASSERT(g_bserrno == 0);
3159 
3160 	free_clusters = spdk_bs_free_cluster_count(bs);
3161 
3162 	spdk_blob_close(blob, blob_op_complete, NULL);
3163 	poll_threads();
3164 	CU_ASSERT(g_bserrno == 0);
3165 	blob = NULL;
3166 	g_blob = NULL;
3167 	g_blobid = SPDK_BLOBID_INVALID;
3168 
3169 	ut_bs_dirty_load(&bs, NULL);
3170 
3171 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3172 	poll_threads();
3173 	CU_ASSERT(g_bserrno == 0);
3174 	CU_ASSERT(g_blob != NULL);
3175 	blob = g_blob;
3176 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3177 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3178 
3179 	spdk_blob_close(blob, blob_op_complete, NULL);
3180 	poll_threads();
3181 	CU_ASSERT(g_bserrno == 0);
3182 	blob = NULL;
3183 	g_blob = NULL;
3184 	g_blobid = SPDK_BLOBID_INVALID;
3185 
3186 	/* Create second blob */
3187 	blob = ut_blob_create_and_open(bs, NULL);
3188 	blobid2 = spdk_blob_get_id(blob);
3189 
3190 	/* Set some xattrs */
3191 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3192 	CU_ASSERT(rc == 0);
3193 
3194 	length = 5432;
3195 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3196 	CU_ASSERT(rc == 0);
3197 
3198 	/* Resize the blob */
3199 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3200 	poll_threads();
3201 	CU_ASSERT(g_bserrno == 0);
3202 
3203 	free_clusters = spdk_bs_free_cluster_count(bs);
3204 
3205 	spdk_blob_close(blob, blob_op_complete, NULL);
3206 	poll_threads();
3207 	CU_ASSERT(g_bserrno == 0);
3208 	blob = NULL;
3209 	g_blob = NULL;
3210 	g_blobid = SPDK_BLOBID_INVALID;
3211 
3212 	ut_bs_dirty_load(&bs, NULL);
3213 
3214 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3215 	poll_threads();
3216 	CU_ASSERT(g_bserrno == 0);
3217 	CU_ASSERT(g_blob != NULL);
3218 	blob = g_blob;
3219 
3220 	/* Get the xattrs */
3221 	value = NULL;
3222 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3223 	CU_ASSERT(rc == 0);
3224 	SPDK_CU_ASSERT_FATAL(value != NULL);
3225 	CU_ASSERT(*(uint64_t *)value == length);
3226 	CU_ASSERT(value_len == 8);
3227 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3228 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3229 
3230 	ut_blob_close_and_delete(bs, blob);
3231 
3232 	free_clusters = spdk_bs_free_cluster_count(bs);
3233 
3234 	ut_bs_dirty_load(&bs, NULL);
3235 
3236 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3237 	poll_threads();
3238 	CU_ASSERT(g_bserrno != 0);
3239 	CU_ASSERT(g_blob == NULL);
3240 
3241 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3242 	poll_threads();
3243 	CU_ASSERT(g_bserrno == 0);
3244 	CU_ASSERT(g_blob != NULL);
3245 	blob = g_blob;
3246 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3247 	spdk_blob_close(blob, blob_op_complete, NULL);
3248 	poll_threads();
3249 	CU_ASSERT(g_bserrno == 0);
3250 
3251 	ut_bs_reload(&bs, NULL);
3252 
3253 	/* Create second blob */
3254 	ut_spdk_blob_opts_init(&blob_opts);
3255 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3256 	poll_threads();
3257 	CU_ASSERT(g_bserrno == 0);
3258 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3259 	blobid2 = g_blobid;
3260 
3261 	/* Create third blob */
3262 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3263 	poll_threads();
3264 	CU_ASSERT(g_bserrno == 0);
3265 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3266 	blobid3 = g_blobid;
3267 
3268 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3269 	poll_threads();
3270 	CU_ASSERT(g_bserrno == 0);
3271 	CU_ASSERT(g_blob != NULL);
3272 	blob = g_blob;
3273 
3274 	/* Set some xattrs for second blob */
3275 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3276 	CU_ASSERT(rc == 0);
3277 
3278 	length = 5432;
3279 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3280 	CU_ASSERT(rc == 0);
3281 
3282 	spdk_blob_close(blob, blob_op_complete, NULL);
3283 	poll_threads();
3284 	CU_ASSERT(g_bserrno == 0);
3285 	blob = NULL;
3286 	g_blob = NULL;
3287 	g_blobid = SPDK_BLOBID_INVALID;
3288 
3289 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3290 	poll_threads();
3291 	CU_ASSERT(g_bserrno == 0);
3292 	CU_ASSERT(g_blob != NULL);
3293 	blob = g_blob;
3294 
3295 	/* Set some xattrs for third blob */
3296 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3297 	CU_ASSERT(rc == 0);
3298 
3299 	length = 5432;
3300 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3301 	CU_ASSERT(rc == 0);
3302 
3303 	spdk_blob_close(blob, blob_op_complete, NULL);
3304 	poll_threads();
3305 	CU_ASSERT(g_bserrno == 0);
3306 	blob = NULL;
3307 	g_blob = NULL;
3308 	g_blobid = SPDK_BLOBID_INVALID;
3309 
3310 	/* Mark second blob as invalid */
3311 	page_num = bs_blobid_to_page(blobid2);
3312 
3313 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3314 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3315 	page->sequence_num = 1;
3316 	page->crc = blob_md_page_calc_crc(page);
3317 
3318 	free_clusters = spdk_bs_free_cluster_count(bs);
3319 
3320 	ut_bs_dirty_load(&bs, NULL);
3321 
3322 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3323 	poll_threads();
3324 	CU_ASSERT(g_bserrno != 0);
3325 	CU_ASSERT(g_blob == NULL);
3326 
3327 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3328 	poll_threads();
3329 	CU_ASSERT(g_bserrno == 0);
3330 	CU_ASSERT(g_blob != NULL);
3331 	blob = g_blob;
3332 
3333 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3334 }
3335 
3336 static void
3337 blob_flags(void)
3338 {
3339 	struct spdk_blob_store *bs = g_bs;
3340 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3341 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3342 	struct spdk_blob_opts blob_opts;
3343 	int rc;
3344 
3345 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3346 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3347 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3348 
3349 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3350 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3351 
3352 	ut_spdk_blob_opts_init(&blob_opts);
3353 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3354 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3355 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3356 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3357 
3358 	/* Change the size of blob_data_ro to check if flags are serialized
3359 	 * when blob has non zero number of extents */
3360 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3361 	poll_threads();
3362 	CU_ASSERT(g_bserrno == 0);
3363 
3364 	/* Set the xattr to check if flags are serialized
3365 	 * when blob has non zero number of xattrs */
3366 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3367 	CU_ASSERT(rc == 0);
3368 
3369 	blob_invalid->invalid_flags = (1ULL << 63);
3370 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3371 	blob_data_ro->data_ro_flags = (1ULL << 62);
3372 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3373 	blob_md_ro->md_ro_flags = (1ULL << 61);
3374 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3375 
3376 	g_bserrno = -1;
3377 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3378 	poll_threads();
3379 	CU_ASSERT(g_bserrno == 0);
3380 	g_bserrno = -1;
3381 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3382 	poll_threads();
3383 	CU_ASSERT(g_bserrno == 0);
3384 	g_bserrno = -1;
3385 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3386 	poll_threads();
3387 	CU_ASSERT(g_bserrno == 0);
3388 
3389 	g_bserrno = -1;
3390 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3391 	poll_threads();
3392 	CU_ASSERT(g_bserrno == 0);
3393 	blob_invalid = NULL;
3394 	g_bserrno = -1;
3395 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3396 	poll_threads();
3397 	CU_ASSERT(g_bserrno == 0);
3398 	blob_data_ro = NULL;
3399 	g_bserrno = -1;
3400 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3401 	poll_threads();
3402 	CU_ASSERT(g_bserrno == 0);
3403 	blob_md_ro = NULL;
3404 
3405 	g_blob = NULL;
3406 	g_blobid = SPDK_BLOBID_INVALID;
3407 
3408 	ut_bs_reload(&bs, NULL);
3409 
3410 	g_blob = NULL;
3411 	g_bserrno = 0;
3412 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3413 	poll_threads();
3414 	CU_ASSERT(g_bserrno != 0);
3415 	CU_ASSERT(g_blob == NULL);
3416 
3417 	g_blob = NULL;
3418 	g_bserrno = -1;
3419 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3420 	poll_threads();
3421 	CU_ASSERT(g_bserrno == 0);
3422 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3423 	blob_data_ro = g_blob;
3424 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3425 	CU_ASSERT(blob_data_ro->data_ro == true);
3426 	CU_ASSERT(blob_data_ro->md_ro == true);
3427 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3428 
3429 	g_blob = NULL;
3430 	g_bserrno = -1;
3431 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3432 	poll_threads();
3433 	CU_ASSERT(g_bserrno == 0);
3434 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3435 	blob_md_ro = g_blob;
3436 	CU_ASSERT(blob_md_ro->data_ro == false);
3437 	CU_ASSERT(blob_md_ro->md_ro == true);
3438 
3439 	g_bserrno = -1;
3440 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3441 	poll_threads();
3442 	CU_ASSERT(g_bserrno == 0);
3443 
3444 	ut_blob_close_and_delete(bs, blob_data_ro);
3445 	ut_blob_close_and_delete(bs, blob_md_ro);
3446 }
3447 
3448 static void
3449 bs_version(void)
3450 {
3451 	struct spdk_bs_super_block *super;
3452 	struct spdk_blob_store *bs = g_bs;
3453 	struct spdk_bs_dev *dev;
3454 	struct spdk_blob *blob;
3455 	struct spdk_blob_opts blob_opts;
3456 	spdk_blob_id blobid;
3457 
3458 	/* Unload the blob store */
3459 	spdk_bs_unload(bs, bs_op_complete, NULL);
3460 	poll_threads();
3461 	CU_ASSERT(g_bserrno == 0);
3462 	g_bs = NULL;
3463 
3464 	/*
3465 	 * Change the bs version on disk.  This will allow us to
3466 	 *  test that the version does not get modified automatically
3467 	 *  when loading and unloading the blobstore.
3468 	 */
3469 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3470 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3471 	CU_ASSERT(super->clean == 1);
3472 	super->version = 2;
3473 	/*
3474 	 * Version 2 metadata does not have a used blobid mask, so clear
3475 	 *  those fields in the super block and zero the corresponding
3476 	 *  region on "disk".  We will use this to ensure blob IDs are
3477 	 *  correctly reconstructed.
3478 	 */
3479 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3480 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3481 	super->used_blobid_mask_start = 0;
3482 	super->used_blobid_mask_len = 0;
3483 	super->crc = blob_md_page_calc_crc(super);
3484 
3485 	/* Load an existing blob store */
3486 	dev = init_dev();
3487 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3488 	poll_threads();
3489 	CU_ASSERT(g_bserrno == 0);
3490 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3491 	CU_ASSERT(super->clean == 1);
3492 	bs = g_bs;
3493 
3494 	/*
3495 	 * Create a blob - just to make sure that when we unload it
3496 	 *  results in writing the super block (since metadata pages
3497 	 *  were allocated.
3498 	 */
3499 	ut_spdk_blob_opts_init(&blob_opts);
3500 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3501 	poll_threads();
3502 	CU_ASSERT(g_bserrno == 0);
3503 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3504 	blobid = g_blobid;
3505 
3506 	/* Unload the blob store */
3507 	spdk_bs_unload(bs, bs_op_complete, NULL);
3508 	poll_threads();
3509 	CU_ASSERT(g_bserrno == 0);
3510 	g_bs = NULL;
3511 	CU_ASSERT(super->version == 2);
3512 	CU_ASSERT(super->used_blobid_mask_start == 0);
3513 	CU_ASSERT(super->used_blobid_mask_len == 0);
3514 
3515 	dev = init_dev();
3516 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3517 	poll_threads();
3518 	CU_ASSERT(g_bserrno == 0);
3519 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3520 	bs = g_bs;
3521 
3522 	g_blob = NULL;
3523 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3524 	poll_threads();
3525 	CU_ASSERT(g_bserrno == 0);
3526 	CU_ASSERT(g_blob != NULL);
3527 	blob = g_blob;
3528 
3529 	ut_blob_close_and_delete(bs, blob);
3530 
3531 	CU_ASSERT(super->version == 2);
3532 	CU_ASSERT(super->used_blobid_mask_start == 0);
3533 	CU_ASSERT(super->used_blobid_mask_len == 0);
3534 }
3535 
3536 static void
3537 blob_set_xattrs_test(void)
3538 {
3539 	struct spdk_blob_store *bs = g_bs;
3540 	struct spdk_blob *blob;
3541 	struct spdk_blob_opts opts;
3542 	const void *value;
3543 	size_t value_len;
3544 	char *xattr;
3545 	size_t xattr_length;
3546 	int rc;
3547 
3548 	/* Create blob with extra attributes */
3549 	ut_spdk_blob_opts_init(&opts);
3550 
3551 	opts.xattrs.names = g_xattr_names;
3552 	opts.xattrs.get_value = _get_xattr_value;
3553 	opts.xattrs.count = 3;
3554 	opts.xattrs.ctx = &g_ctx;
3555 
3556 	blob = ut_blob_create_and_open(bs, &opts);
3557 
3558 	/* Get the xattrs */
3559 	value = NULL;
3560 
3561 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3562 	CU_ASSERT(rc == 0);
3563 	SPDK_CU_ASSERT_FATAL(value != NULL);
3564 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3565 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3566 
3567 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3568 	CU_ASSERT(rc == 0);
3569 	SPDK_CU_ASSERT_FATAL(value != NULL);
3570 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3571 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3572 
3573 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3574 	CU_ASSERT(rc == 0);
3575 	SPDK_CU_ASSERT_FATAL(value != NULL);
3576 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3577 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3578 
3579 	/* Try to get non existing attribute */
3580 
3581 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3582 	CU_ASSERT(rc == -ENOENT);
3583 
3584 	/* Try xattr exceeding maximum length of descriptor in single page */
3585 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3586 		       strlen("large_xattr") + 1;
3587 	xattr = calloc(xattr_length, sizeof(char));
3588 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3589 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3590 	free(xattr);
3591 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3592 
3593 	spdk_blob_close(blob, blob_op_complete, NULL);
3594 	poll_threads();
3595 	CU_ASSERT(g_bserrno == 0);
3596 	blob = NULL;
3597 	g_blob = NULL;
3598 	g_blobid = SPDK_BLOBID_INVALID;
3599 
3600 	/* NULL callback */
3601 	ut_spdk_blob_opts_init(&opts);
3602 	opts.xattrs.names = g_xattr_names;
3603 	opts.xattrs.get_value = NULL;
3604 	opts.xattrs.count = 1;
3605 	opts.xattrs.ctx = &g_ctx;
3606 
3607 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3608 	poll_threads();
3609 	CU_ASSERT(g_bserrno == -EINVAL);
3610 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3611 
3612 	/* NULL values */
3613 	ut_spdk_blob_opts_init(&opts);
3614 	opts.xattrs.names = g_xattr_names;
3615 	opts.xattrs.get_value = _get_xattr_value_null;
3616 	opts.xattrs.count = 1;
3617 	opts.xattrs.ctx = NULL;
3618 
3619 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3620 	poll_threads();
3621 	CU_ASSERT(g_bserrno == -EINVAL);
3622 }
3623 
3624 static void
3625 blob_thin_prov_alloc(void)
3626 {
3627 	struct spdk_blob_store *bs = g_bs;
3628 	struct spdk_blob *blob;
3629 	struct spdk_blob_opts opts;
3630 	spdk_blob_id blobid;
3631 	uint64_t free_clusters;
3632 
3633 	free_clusters = spdk_bs_free_cluster_count(bs);
3634 
3635 	/* Set blob as thin provisioned */
3636 	ut_spdk_blob_opts_init(&opts);
3637 	opts.thin_provision = true;
3638 
3639 	blob = ut_blob_create_and_open(bs, &opts);
3640 	blobid = spdk_blob_get_id(blob);
3641 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3642 
3643 	CU_ASSERT(blob->active.num_clusters == 0);
3644 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3645 
3646 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3647 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3648 	poll_threads();
3649 	CU_ASSERT(g_bserrno == 0);
3650 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3651 	CU_ASSERT(blob->active.num_clusters == 5);
3652 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3653 
3654 	/* Grow it to 1TB - still unallocated */
3655 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3656 	poll_threads();
3657 	CU_ASSERT(g_bserrno == 0);
3658 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3659 	CU_ASSERT(blob->active.num_clusters == 262144);
3660 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3661 
3662 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3663 	poll_threads();
3664 	CU_ASSERT(g_bserrno == 0);
3665 	/* Sync must not change anything */
3666 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3667 	CU_ASSERT(blob->active.num_clusters == 262144);
3668 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3669 	/* Since clusters are not allocated,
3670 	 * number of metadata pages is expected to be minimal.
3671 	 */
3672 	CU_ASSERT(blob->active.num_pages == 1);
3673 
3674 	/* Shrink the blob to 3 clusters - still unallocated */
3675 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3676 	poll_threads();
3677 	CU_ASSERT(g_bserrno == 0);
3678 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3679 	CU_ASSERT(blob->active.num_clusters == 3);
3680 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3681 
3682 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3683 	poll_threads();
3684 	CU_ASSERT(g_bserrno == 0);
3685 	/* Sync must not change anything */
3686 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3687 	CU_ASSERT(blob->active.num_clusters == 3);
3688 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3689 
3690 	spdk_blob_close(blob, blob_op_complete, NULL);
3691 	poll_threads();
3692 	CU_ASSERT(g_bserrno == 0);
3693 
3694 	ut_bs_reload(&bs, NULL);
3695 
3696 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3697 	poll_threads();
3698 	CU_ASSERT(g_bserrno == 0);
3699 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3700 	blob = g_blob;
3701 
3702 	/* Check that clusters allocation and size is still the same */
3703 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3704 	CU_ASSERT(blob->active.num_clusters == 3);
3705 
3706 	ut_blob_close_and_delete(bs, blob);
3707 }
3708 
3709 static void
3710 blob_insert_cluster_msg_test(void)
3711 {
3712 	struct spdk_blob_store *bs = g_bs;
3713 	struct spdk_blob *blob;
3714 	struct spdk_blob_opts opts;
3715 	spdk_blob_id blobid;
3716 	uint64_t free_clusters;
3717 	uint64_t new_cluster = 0;
3718 	uint32_t cluster_num = 3;
3719 	uint32_t extent_page = 0;
3720 
3721 	free_clusters = spdk_bs_free_cluster_count(bs);
3722 
3723 	/* Set blob as thin provisioned */
3724 	ut_spdk_blob_opts_init(&opts);
3725 	opts.thin_provision = true;
3726 	opts.num_clusters = 4;
3727 
3728 	blob = ut_blob_create_and_open(bs, &opts);
3729 	blobid = spdk_blob_get_id(blob);
3730 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3731 
3732 	CU_ASSERT(blob->active.num_clusters == 4);
3733 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
3734 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3735 
3736 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
3737 	 * This is to simulate behaviour when cluster is allocated after blob creation.
3738 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
3739 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
3740 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3741 
3742 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page,
3743 					 blob_op_complete, NULL);
3744 	poll_threads();
3745 
3746 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3747 
3748 	spdk_blob_close(blob, blob_op_complete, NULL);
3749 	poll_threads();
3750 	CU_ASSERT(g_bserrno == 0);
3751 
3752 	ut_bs_reload(&bs, NULL);
3753 
3754 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3755 	poll_threads();
3756 	CU_ASSERT(g_bserrno == 0);
3757 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3758 	blob = g_blob;
3759 
3760 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3761 
3762 	ut_blob_close_and_delete(bs, blob);
3763 }
3764 
3765 static void
3766 blob_thin_prov_rw(void)
3767 {
3768 	static const uint8_t zero[10 * 4096] = { 0 };
3769 	struct spdk_blob_store *bs = g_bs;
3770 	struct spdk_blob *blob, *blob_id0;
3771 	struct spdk_io_channel *channel, *channel_thread1;
3772 	struct spdk_blob_opts opts;
3773 	uint64_t free_clusters;
3774 	uint64_t page_size;
3775 	uint8_t payload_read[10 * 4096];
3776 	uint8_t payload_write[10 * 4096];
3777 	uint64_t write_bytes;
3778 	uint64_t read_bytes;
3779 
3780 	free_clusters = spdk_bs_free_cluster_count(bs);
3781 	page_size = spdk_bs_get_page_size(bs);
3782 
3783 	channel = spdk_bs_alloc_io_channel(bs);
3784 	CU_ASSERT(channel != NULL);
3785 
3786 	ut_spdk_blob_opts_init(&opts);
3787 	opts.thin_provision = true;
3788 
3789 	/* Create and delete blob at md page 0, so that next md page allocation
3790 	 * for extent will use that. */
3791 	blob_id0 = ut_blob_create_and_open(bs, &opts);
3792 	blob = ut_blob_create_and_open(bs, &opts);
3793 	ut_blob_close_and_delete(bs, blob_id0);
3794 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3795 
3796 	CU_ASSERT(blob->active.num_clusters == 0);
3797 
3798 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3799 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3800 	poll_threads();
3801 	CU_ASSERT(g_bserrno == 0);
3802 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3803 	CU_ASSERT(blob->active.num_clusters == 5);
3804 
3805 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3806 	poll_threads();
3807 	CU_ASSERT(g_bserrno == 0);
3808 	/* Sync must not change anything */
3809 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3810 	CU_ASSERT(blob->active.num_clusters == 5);
3811 
3812 	/* Payload should be all zeros from unallocated clusters */
3813 	memset(payload_read, 0xFF, sizeof(payload_read));
3814 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3815 	poll_threads();
3816 	CU_ASSERT(g_bserrno == 0);
3817 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
3818 
3819 	write_bytes = g_dev_write_bytes;
3820 	read_bytes = g_dev_read_bytes;
3821 
3822 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
3823 	set_thread(1);
3824 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
3825 	CU_ASSERT(channel_thread1 != NULL);
3826 	memset(payload_write, 0xE5, sizeof(payload_write));
3827 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
3828 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3829 	/* Perform write on thread 0. That will try to allocate cluster,
3830 	 * but fail due to another thread issuing the cluster allocation first. */
3831 	set_thread(0);
3832 	memset(payload_write, 0xE5, sizeof(payload_write));
3833 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
3834 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
3835 	poll_threads();
3836 	CU_ASSERT(g_bserrno == 0);
3837 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3838 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
3839 	 * read 0 bytes */
3840 	if (g_use_extent_table) {
3841 		/* Add one more page for EXTENT_PAGE write */
3842 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
3843 	} else {
3844 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
3845 	}
3846 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
3847 
3848 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3849 	poll_threads();
3850 	CU_ASSERT(g_bserrno == 0);
3851 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
3852 
3853 	ut_blob_close_and_delete(bs, blob);
3854 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3855 
3856 	set_thread(1);
3857 	spdk_bs_free_io_channel(channel_thread1);
3858 	set_thread(0);
3859 	spdk_bs_free_io_channel(channel);
3860 	poll_threads();
3861 	g_blob = NULL;
3862 	g_blobid = 0;
3863 }
3864 
3865 static void
3866 blob_thin_prov_write_count_io(void)
3867 {
3868 	struct spdk_blob_store *bs;
3869 	struct spdk_blob *blob;
3870 	struct spdk_io_channel *ch;
3871 	struct spdk_bs_dev *dev;
3872 	struct spdk_bs_opts bs_opts;
3873 	struct spdk_blob_opts opts;
3874 	uint64_t free_clusters;
3875 	uint64_t page_size;
3876 	uint8_t payload_write[4096];
3877 	uint64_t write_bytes;
3878 	uint64_t read_bytes;
3879 	const uint32_t CLUSTER_SZ = 16384;
3880 	uint32_t pages_per_cluster;
3881 	uint32_t pages_per_extent_page;
3882 	uint32_t i;
3883 
3884 	/* Use a very small cluster size for this test.  This ensures we need multiple
3885 	 * extent pages to hold all of the clusters even for relatively small blobs like
3886 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
3887 	 * buffers).
3888 	 */
3889 	dev = init_dev();
3890 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
3891 	bs_opts.cluster_sz = CLUSTER_SZ;
3892 
3893 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
3894 	poll_threads();
3895 	CU_ASSERT(g_bserrno == 0);
3896 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3897 	bs = g_bs;
3898 
3899 	free_clusters = spdk_bs_free_cluster_count(bs);
3900 	page_size = spdk_bs_get_page_size(bs);
3901 	pages_per_cluster = CLUSTER_SZ / page_size;
3902 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
3903 
3904 	ch = spdk_bs_alloc_io_channel(bs);
3905 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3906 
3907 	ut_spdk_blob_opts_init(&opts);
3908 	opts.thin_provision = true;
3909 
3910 	blob = ut_blob_create_and_open(bs, &opts);
3911 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3912 
3913 	/* Resize the blob so that it will require 8 extent pages to hold all of
3914 	 * the clusters.
3915 	 */
3916 	g_bserrno = -1;
3917 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
3918 	poll_threads();
3919 	CU_ASSERT(g_bserrno == 0);
3920 
3921 	g_bserrno = -1;
3922 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3923 	poll_threads();
3924 	CU_ASSERT(g_bserrno == 0);
3925 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3926 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
3927 
3928 	memset(payload_write, 0, sizeof(payload_write));
3929 	for (i = 0; i < 8; i++) {
3930 		write_bytes = g_dev_write_bytes;
3931 		read_bytes = g_dev_read_bytes;
3932 
3933 		g_bserrno = -1;
3934 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
3935 		poll_threads();
3936 		CU_ASSERT(g_bserrno == 0);
3937 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
3938 
3939 		CU_ASSERT(g_dev_read_bytes == read_bytes);
3940 		if (!g_use_extent_table) {
3941 			/* For legacy metadata, we should have written two pages - one for the
3942 			 * write I/O itself, another for the blob's primary metadata.
3943 			 */
3944 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
3945 		} else {
3946 			/* For extent table metadata, we should have written three pages - one
3947 			 * for the write I/O, one for the extent page, one for the blob's primary
3948 			 * metadata.
3949 			 */
3950 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
3951 		}
3952 
3953 		/* The write should have synced the metadata already.  Do another sync here
3954 		 * just to confirm.
3955 		 */
3956 		write_bytes = g_dev_write_bytes;
3957 		read_bytes = g_dev_read_bytes;
3958 
3959 		g_bserrno = -1;
3960 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
3961 		poll_threads();
3962 		CU_ASSERT(g_bserrno == 0);
3963 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
3964 
3965 		CU_ASSERT(g_dev_read_bytes == read_bytes);
3966 		CU_ASSERT(g_dev_write_bytes == write_bytes);
3967 
3968 		/* Now write to another unallocated cluster that is part of the same extent page. */
3969 		g_bserrno = -1;
3970 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
3971 				   1, blob_op_complete, NULL);
3972 		poll_threads();
3973 		CU_ASSERT(g_bserrno == 0);
3974 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
3975 
3976 		CU_ASSERT(g_dev_read_bytes == read_bytes);
3977 		/*
3978 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
3979 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
3980 		 */
3981 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
3982 	}
3983 
3984 	ut_blob_close_and_delete(bs, blob);
3985 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3986 
3987 	spdk_bs_free_io_channel(ch);
3988 	poll_threads();
3989 	g_blob = NULL;
3990 	g_blobid = 0;
3991 
3992 	spdk_bs_unload(bs, bs_op_complete, NULL);
3993 	poll_threads();
3994 	CU_ASSERT(g_bserrno == 0);
3995 	g_bs = NULL;
3996 }
3997 
3998 static void
3999 blob_thin_prov_rle(void)
4000 {
4001 	static const uint8_t zero[10 * 4096] = { 0 };
4002 	struct spdk_blob_store *bs = g_bs;
4003 	struct spdk_blob *blob;
4004 	struct spdk_io_channel *channel;
4005 	struct spdk_blob_opts opts;
4006 	spdk_blob_id blobid;
4007 	uint64_t free_clusters;
4008 	uint64_t page_size;
4009 	uint8_t payload_read[10 * 4096];
4010 	uint8_t payload_write[10 * 4096];
4011 	uint64_t write_bytes;
4012 	uint64_t read_bytes;
4013 	uint64_t io_unit;
4014 
4015 	free_clusters = spdk_bs_free_cluster_count(bs);
4016 	page_size = spdk_bs_get_page_size(bs);
4017 
4018 	ut_spdk_blob_opts_init(&opts);
4019 	opts.thin_provision = true;
4020 	opts.num_clusters = 5;
4021 
4022 	blob = ut_blob_create_and_open(bs, &opts);
4023 	blobid = spdk_blob_get_id(blob);
4024 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4025 
4026 	channel = spdk_bs_alloc_io_channel(bs);
4027 	CU_ASSERT(channel != NULL);
4028 
4029 	/* Target specifically second cluster in a blob as first allocation */
4030 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4031 
4032 	/* Payload should be all zeros from unallocated clusters */
4033 	memset(payload_read, 0xFF, sizeof(payload_read));
4034 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4035 	poll_threads();
4036 	CU_ASSERT(g_bserrno == 0);
4037 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4038 
4039 	write_bytes = g_dev_write_bytes;
4040 	read_bytes = g_dev_read_bytes;
4041 
4042 	/* Issue write to second cluster in a blob */
4043 	memset(payload_write, 0xE5, sizeof(payload_write));
4044 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4045 	poll_threads();
4046 	CU_ASSERT(g_bserrno == 0);
4047 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4048 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4049 	 * read 0 bytes */
4050 	if (g_use_extent_table) {
4051 		/* Add one more page for EXTENT_PAGE write */
4052 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4053 	} else {
4054 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4055 	}
4056 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4057 
4058 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4059 	poll_threads();
4060 	CU_ASSERT(g_bserrno == 0);
4061 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4062 
4063 	spdk_bs_free_io_channel(channel);
4064 	poll_threads();
4065 
4066 	spdk_blob_close(blob, blob_op_complete, NULL);
4067 	poll_threads();
4068 	CU_ASSERT(g_bserrno == 0);
4069 
4070 	ut_bs_reload(&bs, NULL);
4071 
4072 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4073 	poll_threads();
4074 	CU_ASSERT(g_bserrno == 0);
4075 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4076 	blob = g_blob;
4077 
4078 	channel = spdk_bs_alloc_io_channel(bs);
4079 	CU_ASSERT(channel != NULL);
4080 
4081 	/* Read second cluster after blob reload to confirm data written */
4082 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4083 	poll_threads();
4084 	CU_ASSERT(g_bserrno == 0);
4085 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4086 
4087 	spdk_bs_free_io_channel(channel);
4088 	poll_threads();
4089 
4090 	ut_blob_close_and_delete(bs, blob);
4091 }
4092 
4093 static void
4094 blob_thin_prov_rw_iov(void)
4095 {
4096 	static const uint8_t zero[10 * 4096] = { 0 };
4097 	struct spdk_blob_store *bs = g_bs;
4098 	struct spdk_blob *blob;
4099 	struct spdk_io_channel *channel;
4100 	struct spdk_blob_opts opts;
4101 	uint64_t free_clusters;
4102 	uint8_t payload_read[10 * 4096];
4103 	uint8_t payload_write[10 * 4096];
4104 	struct iovec iov_read[3];
4105 	struct iovec iov_write[3];
4106 
4107 	free_clusters = spdk_bs_free_cluster_count(bs);
4108 
4109 	channel = spdk_bs_alloc_io_channel(bs);
4110 	CU_ASSERT(channel != NULL);
4111 
4112 	ut_spdk_blob_opts_init(&opts);
4113 	opts.thin_provision = true;
4114 
4115 	blob = ut_blob_create_and_open(bs, &opts);
4116 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4117 
4118 	CU_ASSERT(blob->active.num_clusters == 0);
4119 
4120 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4121 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4122 	poll_threads();
4123 	CU_ASSERT(g_bserrno == 0);
4124 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4125 	CU_ASSERT(blob->active.num_clusters == 5);
4126 
4127 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4128 	poll_threads();
4129 	CU_ASSERT(g_bserrno == 0);
4130 	/* Sync must not change anything */
4131 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4132 	CU_ASSERT(blob->active.num_clusters == 5);
4133 
4134 	/* Payload should be all zeros from unallocated clusters */
4135 	memset(payload_read, 0xAA, sizeof(payload_read));
4136 	iov_read[0].iov_base = payload_read;
4137 	iov_read[0].iov_len = 3 * 4096;
4138 	iov_read[1].iov_base = payload_read + 3 * 4096;
4139 	iov_read[1].iov_len = 4 * 4096;
4140 	iov_read[2].iov_base = payload_read + 7 * 4096;
4141 	iov_read[2].iov_len = 3 * 4096;
4142 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4143 	poll_threads();
4144 	CU_ASSERT(g_bserrno == 0);
4145 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4146 
4147 	memset(payload_write, 0xE5, sizeof(payload_write));
4148 	iov_write[0].iov_base = payload_write;
4149 	iov_write[0].iov_len = 1 * 4096;
4150 	iov_write[1].iov_base = payload_write + 1 * 4096;
4151 	iov_write[1].iov_len = 5 * 4096;
4152 	iov_write[2].iov_base = payload_write + 6 * 4096;
4153 	iov_write[2].iov_len = 4 * 4096;
4154 
4155 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4156 	poll_threads();
4157 	CU_ASSERT(g_bserrno == 0);
4158 
4159 	memset(payload_read, 0xAA, sizeof(payload_read));
4160 	iov_read[0].iov_base = payload_read;
4161 	iov_read[0].iov_len = 3 * 4096;
4162 	iov_read[1].iov_base = payload_read + 3 * 4096;
4163 	iov_read[1].iov_len = 4 * 4096;
4164 	iov_read[2].iov_base = payload_read + 7 * 4096;
4165 	iov_read[2].iov_len = 3 * 4096;
4166 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4167 	poll_threads();
4168 	CU_ASSERT(g_bserrno == 0);
4169 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4170 
4171 	spdk_bs_free_io_channel(channel);
4172 	poll_threads();
4173 
4174 	ut_blob_close_and_delete(bs, blob);
4175 }
4176 
4177 struct iter_ctx {
4178 	int		current_iter;
4179 	spdk_blob_id	blobid[4];
4180 };
4181 
4182 static void
4183 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4184 {
4185 	struct iter_ctx *iter_ctx = arg;
4186 	spdk_blob_id blobid;
4187 
4188 	CU_ASSERT(bserrno == 0);
4189 	blobid = spdk_blob_get_id(blob);
4190 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4191 }
4192 
4193 static void
4194 bs_load_iter_test(void)
4195 {
4196 	struct spdk_blob_store *bs;
4197 	struct spdk_bs_dev *dev;
4198 	struct iter_ctx iter_ctx = { 0 };
4199 	struct spdk_blob *blob;
4200 	int i, rc;
4201 	struct spdk_bs_opts opts;
4202 
4203 	dev = init_dev();
4204 	spdk_bs_opts_init(&opts, sizeof(opts));
4205 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4206 
4207 	/* Initialize a new blob store */
4208 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4209 	poll_threads();
4210 	CU_ASSERT(g_bserrno == 0);
4211 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4212 	bs = g_bs;
4213 
4214 	for (i = 0; i < 4; i++) {
4215 		blob = ut_blob_create_and_open(bs, NULL);
4216 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4217 
4218 		/* Just save the blobid as an xattr for testing purposes. */
4219 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4220 		CU_ASSERT(rc == 0);
4221 
4222 		/* Resize the blob */
4223 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4224 		poll_threads();
4225 		CU_ASSERT(g_bserrno == 0);
4226 
4227 		spdk_blob_close(blob, blob_op_complete, NULL);
4228 		poll_threads();
4229 		CU_ASSERT(g_bserrno == 0);
4230 	}
4231 
4232 	g_bserrno = -1;
4233 	spdk_bs_unload(bs, bs_op_complete, NULL);
4234 	poll_threads();
4235 	CU_ASSERT(g_bserrno == 0);
4236 
4237 	dev = init_dev();
4238 	spdk_bs_opts_init(&opts, sizeof(opts));
4239 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4240 	opts.iter_cb_fn = test_iter;
4241 	opts.iter_cb_arg = &iter_ctx;
4242 
4243 	/* Test blob iteration during load after a clean shutdown. */
4244 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4245 	poll_threads();
4246 	CU_ASSERT(g_bserrno == 0);
4247 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4248 	bs = g_bs;
4249 
4250 	/* Dirty shutdown */
4251 	bs_free(bs);
4252 
4253 	dev = init_dev();
4254 	spdk_bs_opts_init(&opts, sizeof(opts));
4255 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4256 	opts.iter_cb_fn = test_iter;
4257 	iter_ctx.current_iter = 0;
4258 	opts.iter_cb_arg = &iter_ctx;
4259 
4260 	/* Test blob iteration during load after a dirty shutdown. */
4261 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4262 	poll_threads();
4263 	CU_ASSERT(g_bserrno == 0);
4264 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4265 	bs = g_bs;
4266 
4267 	spdk_bs_unload(bs, bs_op_complete, NULL);
4268 	poll_threads();
4269 	CU_ASSERT(g_bserrno == 0);
4270 	g_bs = NULL;
4271 }
4272 
4273 static void
4274 blob_snapshot_rw(void)
4275 {
4276 	static const uint8_t zero[10 * 4096] = { 0 };
4277 	struct spdk_blob_store *bs = g_bs;
4278 	struct spdk_blob *blob, *snapshot;
4279 	struct spdk_io_channel *channel;
4280 	struct spdk_blob_opts opts;
4281 	spdk_blob_id blobid, snapshotid;
4282 	uint64_t free_clusters;
4283 	uint64_t cluster_size;
4284 	uint64_t page_size;
4285 	uint8_t payload_read[10 * 4096];
4286 	uint8_t payload_write[10 * 4096];
4287 	uint64_t write_bytes;
4288 	uint64_t read_bytes;
4289 
4290 	free_clusters = spdk_bs_free_cluster_count(bs);
4291 	cluster_size = spdk_bs_get_cluster_size(bs);
4292 	page_size = spdk_bs_get_page_size(bs);
4293 
4294 	channel = spdk_bs_alloc_io_channel(bs);
4295 	CU_ASSERT(channel != NULL);
4296 
4297 	ut_spdk_blob_opts_init(&opts);
4298 	opts.thin_provision = true;
4299 	opts.num_clusters = 5;
4300 
4301 	blob = ut_blob_create_and_open(bs, &opts);
4302 	blobid = spdk_blob_get_id(blob);
4303 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4304 
4305 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4306 
4307 	memset(payload_read, 0xFF, sizeof(payload_read));
4308 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4309 	poll_threads();
4310 	CU_ASSERT(g_bserrno == 0);
4311 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4312 
4313 	memset(payload_write, 0xE5, sizeof(payload_write));
4314 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4315 	poll_threads();
4316 	CU_ASSERT(g_bserrno == 0);
4317 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4318 
4319 	/* Create snapshot from blob */
4320 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4321 	poll_threads();
4322 	CU_ASSERT(g_bserrno == 0);
4323 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4324 	snapshotid = g_blobid;
4325 
4326 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4327 	poll_threads();
4328 	CU_ASSERT(g_bserrno == 0);
4329 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4330 	snapshot = g_blob;
4331 	CU_ASSERT(snapshot->data_ro == true);
4332 	CU_ASSERT(snapshot->md_ro == true);
4333 
4334 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4335 
4336 	write_bytes = g_dev_write_bytes;
4337 	read_bytes = g_dev_read_bytes;
4338 
4339 	memset(payload_write, 0xAA, sizeof(payload_write));
4340 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4341 	poll_threads();
4342 	CU_ASSERT(g_bserrno == 0);
4343 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4344 
4345 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4346 	 * and then write 10 pages of payload.
4347 	 */
4348 	if (g_use_extent_table) {
4349 		/* Add one more page for EXTENT_PAGE write */
4350 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
4351 	} else {
4352 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
4353 	}
4354 	CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
4355 
4356 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4357 	poll_threads();
4358 	CU_ASSERT(g_bserrno == 0);
4359 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4360 
4361 	/* Data on snapshot should not change after write to clone */
4362 	memset(payload_write, 0xE5, sizeof(payload_write));
4363 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4364 	poll_threads();
4365 	CU_ASSERT(g_bserrno == 0);
4366 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4367 
4368 	ut_blob_close_and_delete(bs, blob);
4369 	ut_blob_close_and_delete(bs, snapshot);
4370 
4371 	spdk_bs_free_io_channel(channel);
4372 	poll_threads();
4373 	g_blob = NULL;
4374 	g_blobid = 0;
4375 }
4376 
4377 static void
4378 blob_snapshot_rw_iov(void)
4379 {
4380 	static const uint8_t zero[10 * 4096] = { 0 };
4381 	struct spdk_blob_store *bs = g_bs;
4382 	struct spdk_blob *blob, *snapshot;
4383 	struct spdk_io_channel *channel;
4384 	struct spdk_blob_opts opts;
4385 	spdk_blob_id blobid, snapshotid;
4386 	uint64_t free_clusters;
4387 	uint8_t payload_read[10 * 4096];
4388 	uint8_t payload_write[10 * 4096];
4389 	struct iovec iov_read[3];
4390 	struct iovec iov_write[3];
4391 
4392 	free_clusters = spdk_bs_free_cluster_count(bs);
4393 
4394 	channel = spdk_bs_alloc_io_channel(bs);
4395 	CU_ASSERT(channel != NULL);
4396 
4397 	ut_spdk_blob_opts_init(&opts);
4398 	opts.thin_provision = true;
4399 	opts.num_clusters = 5;
4400 
4401 	blob = ut_blob_create_and_open(bs, &opts);
4402 	blobid = spdk_blob_get_id(blob);
4403 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4404 
4405 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4406 
4407 	/* Create snapshot from blob */
4408 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4409 	poll_threads();
4410 	CU_ASSERT(g_bserrno == 0);
4411 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4412 	snapshotid = g_blobid;
4413 
4414 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4415 	poll_threads();
4416 	CU_ASSERT(g_bserrno == 0);
4417 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4418 	snapshot = g_blob;
4419 	CU_ASSERT(snapshot->data_ro == true);
4420 	CU_ASSERT(snapshot->md_ro == true);
4421 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4422 
4423 	/* Payload should be all zeros from unallocated clusters */
4424 	memset(payload_read, 0xAA, sizeof(payload_read));
4425 	iov_read[0].iov_base = payload_read;
4426 	iov_read[0].iov_len = 3 * 4096;
4427 	iov_read[1].iov_base = payload_read + 3 * 4096;
4428 	iov_read[1].iov_len = 4 * 4096;
4429 	iov_read[2].iov_base = payload_read + 7 * 4096;
4430 	iov_read[2].iov_len = 3 * 4096;
4431 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4432 	poll_threads();
4433 	CU_ASSERT(g_bserrno == 0);
4434 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4435 
4436 	memset(payload_write, 0xE5, sizeof(payload_write));
4437 	iov_write[0].iov_base = payload_write;
4438 	iov_write[0].iov_len = 1 * 4096;
4439 	iov_write[1].iov_base = payload_write + 1 * 4096;
4440 	iov_write[1].iov_len = 5 * 4096;
4441 	iov_write[2].iov_base = payload_write + 6 * 4096;
4442 	iov_write[2].iov_len = 4 * 4096;
4443 
4444 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4445 	poll_threads();
4446 	CU_ASSERT(g_bserrno == 0);
4447 
4448 	memset(payload_read, 0xAA, sizeof(payload_read));
4449 	iov_read[0].iov_base = payload_read;
4450 	iov_read[0].iov_len = 3 * 4096;
4451 	iov_read[1].iov_base = payload_read + 3 * 4096;
4452 	iov_read[1].iov_len = 4 * 4096;
4453 	iov_read[2].iov_base = payload_read + 7 * 4096;
4454 	iov_read[2].iov_len = 3 * 4096;
4455 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4456 	poll_threads();
4457 	CU_ASSERT(g_bserrno == 0);
4458 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4459 
4460 	spdk_bs_free_io_channel(channel);
4461 	poll_threads();
4462 
4463 	ut_blob_close_and_delete(bs, blob);
4464 	ut_blob_close_and_delete(bs, snapshot);
4465 }
4466 
4467 /**
4468  * Inflate / decouple parent rw unit tests.
4469  *
4470  * --------------
4471  * original blob:         0         1         2         3         4
4472  *                   ,---------+---------+---------+---------+---------.
4473  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4474  *                   +---------+---------+---------+---------+---------+
4475  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4476  *                   +---------+---------+---------+---------+---------+
4477  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4478  *                   '---------+---------+---------+---------+---------'
4479  *                   .         .         .         .         .         .
4480  * --------          .         .         .         .         .         .
4481  * inflate:          .         .         .         .         .         .
4482  *                   ,---------+---------+---------+---------+---------.
4483  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4484  *                   '---------+---------+---------+---------+---------'
4485  *
4486  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4487  *               on snapshot2 and snapshot removed .         .         .
4488  *                   .         .         .         .         .         .
4489  * ----------------  .         .         .         .         .         .
4490  * decouple parent:  .         .         .         .         .         .
4491  *                   ,---------+---------+---------+---------+---------.
4492  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4493  *                   +---------+---------+---------+---------+---------+
4494  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4495  *                   '---------+---------+---------+---------+---------'
4496  *
4497  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4498  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4499  *               should remain a clone of snapshot.
4500  */
4501 static void
4502 _blob_inflate_rw(bool decouple_parent)
4503 {
4504 	struct spdk_blob_store *bs = g_bs;
4505 	struct spdk_blob *blob, *snapshot, *snapshot2;
4506 	struct spdk_io_channel *channel;
4507 	struct spdk_blob_opts opts;
4508 	spdk_blob_id blobid, snapshotid, snapshot2id;
4509 	uint64_t free_clusters;
4510 	uint64_t cluster_size;
4511 
4512 	uint64_t payload_size;
4513 	uint8_t *payload_read;
4514 	uint8_t *payload_write;
4515 	uint8_t *payload_clone;
4516 
4517 	uint64_t pages_per_cluster;
4518 	uint64_t pages_per_payload;
4519 
4520 	int i;
4521 	spdk_blob_id ids[2];
4522 	size_t count;
4523 
4524 	free_clusters = spdk_bs_free_cluster_count(bs);
4525 	cluster_size = spdk_bs_get_cluster_size(bs);
4526 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4527 	pages_per_payload = pages_per_cluster * 5;
4528 
4529 	payload_size = cluster_size * 5;
4530 
4531 	payload_read = malloc(payload_size);
4532 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4533 
4534 	payload_write = malloc(payload_size);
4535 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4536 
4537 	payload_clone = malloc(payload_size);
4538 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4539 
4540 	channel = spdk_bs_alloc_io_channel(bs);
4541 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4542 
4543 	/* Create blob */
4544 	ut_spdk_blob_opts_init(&opts);
4545 	opts.thin_provision = true;
4546 	opts.num_clusters = 5;
4547 
4548 	blob = ut_blob_create_and_open(bs, &opts);
4549 	blobid = spdk_blob_get_id(blob);
4550 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4551 
4552 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4553 
4554 	/* 1) Initial read should return zeroed payload */
4555 	memset(payload_read, 0xFF, payload_size);
4556 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4557 			  blob_op_complete, NULL);
4558 	poll_threads();
4559 	CU_ASSERT(g_bserrno == 0);
4560 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4561 
4562 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4563 	 * isn't allocated) */
4564 	memset(payload_write, 0xE5, payload_size - cluster_size);
4565 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4566 			   pages_per_cluster, blob_op_complete, NULL);
4567 	poll_threads();
4568 	CU_ASSERT(g_bserrno == 0);
4569 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4570 
4571 	/* 2) Create snapshot from blob (first level) */
4572 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4573 	poll_threads();
4574 	CU_ASSERT(g_bserrno == 0);
4575 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4576 	snapshotid = g_blobid;
4577 
4578 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4579 	poll_threads();
4580 	CU_ASSERT(g_bserrno == 0);
4581 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4582 	snapshot = g_blob;
4583 	CU_ASSERT(snapshot->data_ro == true);
4584 	CU_ASSERT(snapshot->md_ro == true);
4585 
4586 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4587 
4588 	/* Write every second cluster with a pattern.
4589 	 *
4590 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4591 	 * doesn't allocate it.
4592 	 *
4593 	 * payload_clone stores expected result on "blob" read at the time and
4594 	 * is used only to check data consistency on clone before and after
4595 	 * inflation. Initially we fill it with a backing snapshots pattern
4596 	 * used before.
4597 	 */
4598 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4599 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4600 	memset(payload_write, 0xAA, payload_size);
4601 	for (i = 1; i < 5; i += 2) {
4602 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4603 				   pages_per_cluster, blob_op_complete, NULL);
4604 		poll_threads();
4605 		CU_ASSERT(g_bserrno == 0);
4606 
4607 		/* Update expected result */
4608 		memcpy(payload_clone + (cluster_size * i), payload_write,
4609 		       cluster_size);
4610 	}
4611 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4612 
4613 	/* Check data consistency on clone */
4614 	memset(payload_read, 0xFF, payload_size);
4615 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4616 			  blob_op_complete, NULL);
4617 	poll_threads();
4618 	CU_ASSERT(g_bserrno == 0);
4619 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4620 
4621 	/* 3) Create second levels snapshot from blob */
4622 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4623 	poll_threads();
4624 	CU_ASSERT(g_bserrno == 0);
4625 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4626 	snapshot2id = g_blobid;
4627 
4628 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4629 	poll_threads();
4630 	CU_ASSERT(g_bserrno == 0);
4631 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4632 	snapshot2 = g_blob;
4633 	CU_ASSERT(snapshot2->data_ro == true);
4634 	CU_ASSERT(snapshot2->md_ro == true);
4635 
4636 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4637 
4638 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4639 
4640 	/* Write one cluster on the top level blob. This cluster (1) covers
4641 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4642 	 * at all */
4643 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4644 			   pages_per_cluster, blob_op_complete, NULL);
4645 	poll_threads();
4646 	CU_ASSERT(g_bserrno == 0);
4647 
4648 	/* Update expected result */
4649 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4650 
4651 	/* Check data consistency on clone */
4652 	memset(payload_read, 0xFF, payload_size);
4653 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4654 			  blob_op_complete, NULL);
4655 	poll_threads();
4656 	CU_ASSERT(g_bserrno == 0);
4657 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4658 
4659 
4660 	/* Close all blobs */
4661 	spdk_blob_close(blob, blob_op_complete, NULL);
4662 	poll_threads();
4663 	CU_ASSERT(g_bserrno == 0);
4664 
4665 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4666 	poll_threads();
4667 	CU_ASSERT(g_bserrno == 0);
4668 
4669 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4670 	poll_threads();
4671 	CU_ASSERT(g_bserrno == 0);
4672 
4673 	/* Check snapshot-clone relations */
4674 	count = 2;
4675 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4676 	CU_ASSERT(count == 1);
4677 	CU_ASSERT(ids[0] == snapshot2id);
4678 
4679 	count = 2;
4680 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4681 	CU_ASSERT(count == 1);
4682 	CU_ASSERT(ids[0] == blobid);
4683 
4684 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4685 
4686 	free_clusters = spdk_bs_free_cluster_count(bs);
4687 	if (!decouple_parent) {
4688 		/* Do full blob inflation */
4689 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4690 		poll_threads();
4691 		CU_ASSERT(g_bserrno == 0);
4692 
4693 		/* All clusters should be inflated (except one already allocated
4694 		 * in a top level blob) */
4695 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
4696 
4697 		/* Check if relation tree updated correctly */
4698 		count = 2;
4699 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4700 
4701 		/* snapshotid have one clone */
4702 		CU_ASSERT(count == 1);
4703 		CU_ASSERT(ids[0] == snapshot2id);
4704 
4705 		/* snapshot2id have no clones */
4706 		count = 2;
4707 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4708 		CU_ASSERT(count == 0);
4709 
4710 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4711 	} else {
4712 		/* Decouple parent of blob */
4713 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
4714 		poll_threads();
4715 		CU_ASSERT(g_bserrno == 0);
4716 
4717 		/* Only one cluster from a parent should be inflated (second one
4718 		 * is covered by a cluster written on a top level blob, and
4719 		 * already allocated) */
4720 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
4721 
4722 		/* Check if relation tree updated correctly */
4723 		count = 2;
4724 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4725 
4726 		/* snapshotid have two clones now */
4727 		CU_ASSERT(count == 2);
4728 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4729 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
4730 
4731 		/* snapshot2id have no clones */
4732 		count = 2;
4733 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4734 		CU_ASSERT(count == 0);
4735 
4736 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4737 	}
4738 
4739 	/* Try to delete snapshot2 (should pass) */
4740 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
4741 	poll_threads();
4742 	CU_ASSERT(g_bserrno == 0);
4743 
4744 	/* Try to delete base snapshot */
4745 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
4746 	poll_threads();
4747 	CU_ASSERT(g_bserrno == 0);
4748 
4749 	/* Reopen blob after snapshot deletion */
4750 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4751 	poll_threads();
4752 	CU_ASSERT(g_bserrno == 0);
4753 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4754 	blob = g_blob;
4755 
4756 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4757 
4758 	/* Check data consistency on inflated blob */
4759 	memset(payload_read, 0xFF, payload_size);
4760 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4761 			  blob_op_complete, NULL);
4762 	poll_threads();
4763 	CU_ASSERT(g_bserrno == 0);
4764 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4765 
4766 	spdk_bs_free_io_channel(channel);
4767 	poll_threads();
4768 
4769 	free(payload_read);
4770 	free(payload_write);
4771 	free(payload_clone);
4772 
4773 	ut_blob_close_and_delete(bs, blob);
4774 }
4775 
4776 static void
4777 blob_inflate_rw(void)
4778 {
4779 	_blob_inflate_rw(false);
4780 	_blob_inflate_rw(true);
4781 }
4782 
4783 /**
4784  * Snapshot-clones relation test
4785  *
4786  *         snapshot
4787  *            |
4788  *      +-----+-----+
4789  *      |           |
4790  *   blob(ro)   snapshot2
4791  *      |           |
4792  *   clone2      clone
4793  */
4794 static void
4795 blob_relations(void)
4796 {
4797 	struct spdk_blob_store *bs;
4798 	struct spdk_bs_dev *dev;
4799 	struct spdk_bs_opts bs_opts;
4800 	struct spdk_blob_opts opts;
4801 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
4802 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
4803 	int rc;
4804 	size_t count;
4805 	spdk_blob_id ids[10] = {};
4806 
4807 	dev = init_dev();
4808 	spdk_bs_opts_init(&bs_opts, sizeof(opts));
4809 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
4810 
4811 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4812 	poll_threads();
4813 	CU_ASSERT(g_bserrno == 0);
4814 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4815 	bs = g_bs;
4816 
4817 	/* 1. Create blob with 10 clusters */
4818 
4819 	ut_spdk_blob_opts_init(&opts);
4820 	opts.num_clusters = 10;
4821 
4822 	blob = ut_blob_create_and_open(bs, &opts);
4823 	blobid = spdk_blob_get_id(blob);
4824 
4825 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4826 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4827 	CU_ASSERT(!spdk_blob_is_clone(blob));
4828 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
4829 
4830 	/* blob should not have underlying snapshot nor clones */
4831 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
4832 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4833 	count = SPDK_COUNTOF(ids);
4834 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
4835 	CU_ASSERT(rc == 0);
4836 	CU_ASSERT(count == 0);
4837 
4838 
4839 	/* 2. Create snapshot */
4840 
4841 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4842 	poll_threads();
4843 	CU_ASSERT(g_bserrno == 0);
4844 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4845 	snapshotid = g_blobid;
4846 
4847 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4848 	poll_threads();
4849 	CU_ASSERT(g_bserrno == 0);
4850 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4851 	snapshot = g_blob;
4852 
4853 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
4854 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
4855 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
4856 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
4857 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
4858 
4859 	/* Check if original blob is converted to the clone of snapshot */
4860 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4861 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4862 	CU_ASSERT(spdk_blob_is_clone(blob));
4863 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
4864 	CU_ASSERT(blob->parent_id == snapshotid);
4865 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4866 
4867 	count = SPDK_COUNTOF(ids);
4868 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4869 	CU_ASSERT(rc == 0);
4870 	CU_ASSERT(count == 1);
4871 	CU_ASSERT(ids[0] == blobid);
4872 
4873 
4874 	/* 3. Create clone from snapshot */
4875 
4876 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
4877 	poll_threads();
4878 	CU_ASSERT(g_bserrno == 0);
4879 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4880 	cloneid = g_blobid;
4881 
4882 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
4883 	poll_threads();
4884 	CU_ASSERT(g_bserrno == 0);
4885 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4886 	clone = g_blob;
4887 
4888 	CU_ASSERT(!spdk_blob_is_read_only(clone));
4889 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
4890 	CU_ASSERT(spdk_blob_is_clone(clone));
4891 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
4892 	CU_ASSERT(clone->parent_id == snapshotid);
4893 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
4894 
4895 	count = SPDK_COUNTOF(ids);
4896 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
4897 	CU_ASSERT(rc == 0);
4898 	CU_ASSERT(count == 0);
4899 
4900 	/* Check if clone is on the snapshot's list */
4901 	count = SPDK_COUNTOF(ids);
4902 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4903 	CU_ASSERT(rc == 0);
4904 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4905 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
4906 
4907 
4908 	/* 4. Create snapshot of the clone */
4909 
4910 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
4911 	poll_threads();
4912 	CU_ASSERT(g_bserrno == 0);
4913 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4914 	snapshotid2 = g_blobid;
4915 
4916 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
4917 	poll_threads();
4918 	CU_ASSERT(g_bserrno == 0);
4919 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4920 	snapshot2 = g_blob;
4921 
4922 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
4923 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
4924 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
4925 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4926 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
4927 
4928 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
4929 	 * is a child of snapshot */
4930 	CU_ASSERT(!spdk_blob_is_read_only(clone));
4931 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
4932 	CU_ASSERT(spdk_blob_is_clone(clone));
4933 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
4934 	CU_ASSERT(clone->parent_id == snapshotid2);
4935 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
4936 
4937 	count = SPDK_COUNTOF(ids);
4938 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
4939 	CU_ASSERT(rc == 0);
4940 	CU_ASSERT(count == 1);
4941 	CU_ASSERT(ids[0] == cloneid);
4942 
4943 
4944 	/* 5. Try to create clone from read only blob */
4945 
4946 	/* Mark blob as read only */
4947 	spdk_blob_set_read_only(blob);
4948 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4949 	poll_threads();
4950 	CU_ASSERT(g_bserrno == 0);
4951 
4952 	/* Check if previously created blob is read only clone */
4953 	CU_ASSERT(spdk_blob_is_read_only(blob));
4954 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4955 	CU_ASSERT(spdk_blob_is_clone(blob));
4956 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
4957 
4958 	/* Create clone from read only blob */
4959 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4960 	poll_threads();
4961 	CU_ASSERT(g_bserrno == 0);
4962 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4963 	cloneid2 = g_blobid;
4964 
4965 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
4966 	poll_threads();
4967 	CU_ASSERT(g_bserrno == 0);
4968 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4969 	clone2 = g_blob;
4970 
4971 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
4972 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
4973 	CU_ASSERT(spdk_blob_is_clone(clone2));
4974 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
4975 
4976 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
4977 
4978 	count = SPDK_COUNTOF(ids);
4979 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
4980 	CU_ASSERT(rc == 0);
4981 
4982 	CU_ASSERT(count == 1);
4983 	CU_ASSERT(ids[0] == cloneid2);
4984 
4985 	/* Close blobs */
4986 
4987 	spdk_blob_close(clone2, blob_op_complete, NULL);
4988 	poll_threads();
4989 	CU_ASSERT(g_bserrno == 0);
4990 
4991 	spdk_blob_close(blob, blob_op_complete, NULL);
4992 	poll_threads();
4993 	CU_ASSERT(g_bserrno == 0);
4994 
4995 	spdk_blob_close(clone, blob_op_complete, NULL);
4996 	poll_threads();
4997 	CU_ASSERT(g_bserrno == 0);
4998 
4999 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5000 	poll_threads();
5001 	CU_ASSERT(g_bserrno == 0);
5002 
5003 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5004 	poll_threads();
5005 	CU_ASSERT(g_bserrno == 0);
5006 
5007 	/* Try to delete snapshot with more than 1 clone */
5008 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5009 	poll_threads();
5010 	CU_ASSERT(g_bserrno != 0);
5011 
5012 	ut_bs_reload(&bs, &bs_opts);
5013 
5014 	/* NULL ids array should return number of clones in count */
5015 	count = SPDK_COUNTOF(ids);
5016 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5017 	CU_ASSERT(rc == -ENOMEM);
5018 	CU_ASSERT(count == 2);
5019 
5020 	/* incorrect array size */
5021 	count = 1;
5022 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5023 	CU_ASSERT(rc == -ENOMEM);
5024 	CU_ASSERT(count == 2);
5025 
5026 
5027 	/* Verify structure of loaded blob store */
5028 
5029 	/* snapshot */
5030 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5031 
5032 	count = SPDK_COUNTOF(ids);
5033 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5034 	CU_ASSERT(rc == 0);
5035 	CU_ASSERT(count == 2);
5036 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5037 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5038 
5039 	/* blob */
5040 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5041 	count = SPDK_COUNTOF(ids);
5042 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5043 	CU_ASSERT(rc == 0);
5044 	CU_ASSERT(count == 1);
5045 	CU_ASSERT(ids[0] == cloneid2);
5046 
5047 	/* clone */
5048 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5049 	count = SPDK_COUNTOF(ids);
5050 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5051 	CU_ASSERT(rc == 0);
5052 	CU_ASSERT(count == 0);
5053 
5054 	/* snapshot2 */
5055 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5056 	count = SPDK_COUNTOF(ids);
5057 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5058 	CU_ASSERT(rc == 0);
5059 	CU_ASSERT(count == 1);
5060 	CU_ASSERT(ids[0] == cloneid);
5061 
5062 	/* clone2 */
5063 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5064 	count = SPDK_COUNTOF(ids);
5065 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5066 	CU_ASSERT(rc == 0);
5067 	CU_ASSERT(count == 0);
5068 
5069 	/* Try to delete blob that user should not be able to remove */
5070 
5071 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5072 	poll_threads();
5073 	CU_ASSERT(g_bserrno != 0);
5074 
5075 	/* Remove all blobs */
5076 
5077 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5078 	poll_threads();
5079 	CU_ASSERT(g_bserrno == 0);
5080 
5081 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5082 	poll_threads();
5083 	CU_ASSERT(g_bserrno == 0);
5084 
5085 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5086 	poll_threads();
5087 	CU_ASSERT(g_bserrno == 0);
5088 
5089 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5090 	poll_threads();
5091 	CU_ASSERT(g_bserrno == 0);
5092 
5093 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5094 	poll_threads();
5095 	CU_ASSERT(g_bserrno == 0);
5096 
5097 	spdk_bs_unload(bs, bs_op_complete, NULL);
5098 	poll_threads();
5099 	CU_ASSERT(g_bserrno == 0);
5100 
5101 	g_bs = NULL;
5102 }
5103 
5104 /**
5105  * Snapshot-clones relation test 2
5106  *
5107  *         snapshot1
5108  *            |
5109  *         snapshot2
5110  *            |
5111  *      +-----+-----+
5112  *      |           |
5113  *   blob(ro)   snapshot3
5114  *      |           |
5115  *      |       snapshot4
5116  *      |        |     |
5117  *   clone2   clone  clone3
5118  */
5119 static void
5120 blob_relations2(void)
5121 {
5122 	struct spdk_blob_store *bs;
5123 	struct spdk_bs_dev *dev;
5124 	struct spdk_bs_opts bs_opts;
5125 	struct spdk_blob_opts opts;
5126 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5127 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5128 		     cloneid3;
5129 	int rc;
5130 	size_t count;
5131 	spdk_blob_id ids[10] = {};
5132 
5133 	dev = init_dev();
5134 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5135 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5136 
5137 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5138 	poll_threads();
5139 	CU_ASSERT(g_bserrno == 0);
5140 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5141 	bs = g_bs;
5142 
5143 	/* 1. Create blob with 10 clusters */
5144 
5145 	ut_spdk_blob_opts_init(&opts);
5146 	opts.num_clusters = 10;
5147 
5148 	blob = ut_blob_create_and_open(bs, &opts);
5149 	blobid = spdk_blob_get_id(blob);
5150 
5151 	/* 2. Create snapshot1 */
5152 
5153 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5154 	poll_threads();
5155 	CU_ASSERT(g_bserrno == 0);
5156 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5157 	snapshotid1 = g_blobid;
5158 
5159 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5160 	poll_threads();
5161 	CU_ASSERT(g_bserrno == 0);
5162 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5163 	snapshot1 = g_blob;
5164 
5165 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5166 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5167 
5168 	CU_ASSERT(blob->parent_id == snapshotid1);
5169 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5170 
5171 	/* Check if blob is the clone of snapshot1 */
5172 	CU_ASSERT(blob->parent_id == snapshotid1);
5173 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5174 
5175 	count = SPDK_COUNTOF(ids);
5176 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5177 	CU_ASSERT(rc == 0);
5178 	CU_ASSERT(count == 1);
5179 	CU_ASSERT(ids[0] == blobid);
5180 
5181 	/* 3. Create another snapshot */
5182 
5183 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5184 	poll_threads();
5185 	CU_ASSERT(g_bserrno == 0);
5186 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5187 	snapshotid2 = g_blobid;
5188 
5189 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5190 	poll_threads();
5191 	CU_ASSERT(g_bserrno == 0);
5192 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5193 	snapshot2 = g_blob;
5194 
5195 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5196 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5197 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5198 
5199 	/* Check if snapshot2 is the clone of snapshot1 and blob
5200 	 * is a child of snapshot2 */
5201 	CU_ASSERT(blob->parent_id == snapshotid2);
5202 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5203 
5204 	count = SPDK_COUNTOF(ids);
5205 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5206 	CU_ASSERT(rc == 0);
5207 	CU_ASSERT(count == 1);
5208 	CU_ASSERT(ids[0] == blobid);
5209 
5210 	/* 4. Create clone from snapshot */
5211 
5212 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5213 	poll_threads();
5214 	CU_ASSERT(g_bserrno == 0);
5215 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5216 	cloneid = g_blobid;
5217 
5218 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5219 	poll_threads();
5220 	CU_ASSERT(g_bserrno == 0);
5221 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5222 	clone = g_blob;
5223 
5224 	CU_ASSERT(clone->parent_id == snapshotid2);
5225 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5226 
5227 	/* Check if clone is on the snapshot's list */
5228 	count = SPDK_COUNTOF(ids);
5229 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5230 	CU_ASSERT(rc == 0);
5231 	CU_ASSERT(count == 2);
5232 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5233 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5234 
5235 	/* 5. Create snapshot of the clone */
5236 
5237 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5238 	poll_threads();
5239 	CU_ASSERT(g_bserrno == 0);
5240 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5241 	snapshotid3 = g_blobid;
5242 
5243 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5244 	poll_threads();
5245 	CU_ASSERT(g_bserrno == 0);
5246 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5247 	snapshot3 = g_blob;
5248 
5249 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5250 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5251 
5252 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5253 	 * is a child of snapshot2 */
5254 	CU_ASSERT(clone->parent_id == snapshotid3);
5255 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5256 
5257 	count = SPDK_COUNTOF(ids);
5258 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5259 	CU_ASSERT(rc == 0);
5260 	CU_ASSERT(count == 1);
5261 	CU_ASSERT(ids[0] == cloneid);
5262 
5263 	/* 6. Create another snapshot of the clone */
5264 
5265 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5266 	poll_threads();
5267 	CU_ASSERT(g_bserrno == 0);
5268 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5269 	snapshotid4 = g_blobid;
5270 
5271 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5272 	poll_threads();
5273 	CU_ASSERT(g_bserrno == 0);
5274 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5275 	snapshot4 = g_blob;
5276 
5277 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5278 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5279 
5280 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5281 	 * is a child of snapshot3 */
5282 	CU_ASSERT(clone->parent_id == snapshotid4);
5283 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5284 
5285 	count = SPDK_COUNTOF(ids);
5286 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5287 	CU_ASSERT(rc == 0);
5288 	CU_ASSERT(count == 1);
5289 	CU_ASSERT(ids[0] == cloneid);
5290 
5291 	/* 7. Remove snapshot 4 */
5292 
5293 	ut_blob_close_and_delete(bs, snapshot4);
5294 
5295 	/* Check if relations are back to state from before creating snapshot 4 */
5296 	CU_ASSERT(clone->parent_id == snapshotid3);
5297 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5298 
5299 	count = SPDK_COUNTOF(ids);
5300 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5301 	CU_ASSERT(rc == 0);
5302 	CU_ASSERT(count == 1);
5303 	CU_ASSERT(ids[0] == cloneid);
5304 
5305 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5306 
5307 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5308 	poll_threads();
5309 	CU_ASSERT(g_bserrno == 0);
5310 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5311 	cloneid3 = g_blobid;
5312 
5313 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5314 	poll_threads();
5315 	CU_ASSERT(g_bserrno != 0);
5316 
5317 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5318 
5319 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5320 	poll_threads();
5321 	CU_ASSERT(g_bserrno == 0);
5322 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5323 	snapshot3 = g_blob;
5324 
5325 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5326 	poll_threads();
5327 	CU_ASSERT(g_bserrno != 0);
5328 
5329 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5330 	poll_threads();
5331 	CU_ASSERT(g_bserrno == 0);
5332 
5333 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5334 	poll_threads();
5335 	CU_ASSERT(g_bserrno == 0);
5336 
5337 	/* 10. Remove snapshot 1 */
5338 
5339 	ut_blob_close_and_delete(bs, snapshot1);
5340 
5341 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5342 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5343 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5344 
5345 	count = SPDK_COUNTOF(ids);
5346 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5347 	CU_ASSERT(rc == 0);
5348 	CU_ASSERT(count == 2);
5349 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5350 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5351 
5352 	/* 11. Try to create clone from read only blob */
5353 
5354 	/* Mark blob as read only */
5355 	spdk_blob_set_read_only(blob);
5356 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5357 	poll_threads();
5358 	CU_ASSERT(g_bserrno == 0);
5359 
5360 	/* Create clone from read only blob */
5361 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5362 	poll_threads();
5363 	CU_ASSERT(g_bserrno == 0);
5364 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5365 	cloneid2 = g_blobid;
5366 
5367 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5368 	poll_threads();
5369 	CU_ASSERT(g_bserrno == 0);
5370 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5371 	clone2 = g_blob;
5372 
5373 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5374 
5375 	count = SPDK_COUNTOF(ids);
5376 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5377 	CU_ASSERT(rc == 0);
5378 	CU_ASSERT(count == 1);
5379 	CU_ASSERT(ids[0] == cloneid2);
5380 
5381 	/* Close blobs */
5382 
5383 	spdk_blob_close(clone2, blob_op_complete, NULL);
5384 	poll_threads();
5385 	CU_ASSERT(g_bserrno == 0);
5386 
5387 	spdk_blob_close(blob, blob_op_complete, NULL);
5388 	poll_threads();
5389 	CU_ASSERT(g_bserrno == 0);
5390 
5391 	spdk_blob_close(clone, blob_op_complete, NULL);
5392 	poll_threads();
5393 	CU_ASSERT(g_bserrno == 0);
5394 
5395 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5396 	poll_threads();
5397 	CU_ASSERT(g_bserrno == 0);
5398 
5399 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5400 	poll_threads();
5401 	CU_ASSERT(g_bserrno == 0);
5402 
5403 	ut_bs_reload(&bs, &bs_opts);
5404 
5405 	/* Verify structure of loaded blob store */
5406 
5407 	/* snapshot2 */
5408 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5409 
5410 	count = SPDK_COUNTOF(ids);
5411 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5412 	CU_ASSERT(rc == 0);
5413 	CU_ASSERT(count == 2);
5414 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5415 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5416 
5417 	/* blob */
5418 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5419 	count = SPDK_COUNTOF(ids);
5420 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5421 	CU_ASSERT(rc == 0);
5422 	CU_ASSERT(count == 1);
5423 	CU_ASSERT(ids[0] == cloneid2);
5424 
5425 	/* clone */
5426 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5427 	count = SPDK_COUNTOF(ids);
5428 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5429 	CU_ASSERT(rc == 0);
5430 	CU_ASSERT(count == 0);
5431 
5432 	/* snapshot3 */
5433 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5434 	count = SPDK_COUNTOF(ids);
5435 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5436 	CU_ASSERT(rc == 0);
5437 	CU_ASSERT(count == 1);
5438 	CU_ASSERT(ids[0] == cloneid);
5439 
5440 	/* clone2 */
5441 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5442 	count = SPDK_COUNTOF(ids);
5443 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5444 	CU_ASSERT(rc == 0);
5445 	CU_ASSERT(count == 0);
5446 
5447 	/* Try to delete all blobs in the worse possible order */
5448 
5449 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5450 	poll_threads();
5451 	CU_ASSERT(g_bserrno != 0);
5452 
5453 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5454 	poll_threads();
5455 	CU_ASSERT(g_bserrno == 0);
5456 
5457 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5458 	poll_threads();
5459 	CU_ASSERT(g_bserrno != 0);
5460 
5461 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5462 	poll_threads();
5463 	CU_ASSERT(g_bserrno == 0);
5464 
5465 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5466 	poll_threads();
5467 	CU_ASSERT(g_bserrno == 0);
5468 
5469 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5470 	poll_threads();
5471 	CU_ASSERT(g_bserrno == 0);
5472 
5473 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5474 	poll_threads();
5475 	CU_ASSERT(g_bserrno == 0);
5476 
5477 	spdk_bs_unload(bs, bs_op_complete, NULL);
5478 	poll_threads();
5479 	CU_ASSERT(g_bserrno == 0);
5480 
5481 	g_bs = NULL;
5482 }
5483 
5484 static void
5485 blobstore_clean_power_failure(void)
5486 {
5487 	struct spdk_blob_store *bs;
5488 	struct spdk_blob *blob;
5489 	struct spdk_power_failure_thresholds thresholds = {};
5490 	bool clean = false;
5491 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5492 	struct spdk_bs_super_block super_copy = {};
5493 
5494 	thresholds.general_threshold = 1;
5495 	while (!clean) {
5496 		/* Create bs and blob */
5497 		suite_blob_setup();
5498 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5499 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5500 		bs = g_bs;
5501 		blob = g_blob;
5502 
5503 		/* Super block should not change for rest of the UT,
5504 		 * save it and compare later. */
5505 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5506 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5507 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5508 
5509 		/* Force bs/super block in a clean state.
5510 		 * Along with marking blob dirty, to cause blob persist. */
5511 		blob->state = SPDK_BLOB_STATE_DIRTY;
5512 		bs->clean = 1;
5513 		super->clean = 1;
5514 		super->crc = blob_md_page_calc_crc(super);
5515 
5516 		g_bserrno = -1;
5517 		dev_set_power_failure_thresholds(thresholds);
5518 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5519 		poll_threads();
5520 		dev_reset_power_failure_event();
5521 
5522 		if (g_bserrno == 0) {
5523 			/* After successful md sync, both bs and super block
5524 			 * should be marked as not clean. */
5525 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5526 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5527 			clean = true;
5528 		}
5529 
5530 		/* Depending on the point of failure, super block was either updated or not. */
5531 		super_copy.clean = super->clean;
5532 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5533 		/* Compare that the values in super block remained unchanged. */
5534 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5535 
5536 		/* Delete blob and unload bs */
5537 		suite_blob_cleanup();
5538 
5539 		thresholds.general_threshold++;
5540 	}
5541 }
5542 
5543 static void
5544 blob_delete_snapshot_power_failure(void)
5545 {
5546 	struct spdk_bs_dev *dev;
5547 	struct spdk_blob_store *bs;
5548 	struct spdk_blob_opts opts;
5549 	struct spdk_blob *blob, *snapshot;
5550 	struct spdk_power_failure_thresholds thresholds = {};
5551 	spdk_blob_id blobid, snapshotid;
5552 	const void *value;
5553 	size_t value_len;
5554 	size_t count;
5555 	spdk_blob_id ids[3] = {};
5556 	int rc;
5557 	bool deleted = false;
5558 	int delete_snapshot_bserrno = -1;
5559 
5560 	thresholds.general_threshold = 1;
5561 	while (!deleted) {
5562 		dev = init_dev();
5563 
5564 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5565 		poll_threads();
5566 		CU_ASSERT(g_bserrno == 0);
5567 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5568 		bs = g_bs;
5569 
5570 		/* Create blob */
5571 		ut_spdk_blob_opts_init(&opts);
5572 		opts.num_clusters = 10;
5573 
5574 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5575 		poll_threads();
5576 		CU_ASSERT(g_bserrno == 0);
5577 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5578 		blobid = g_blobid;
5579 
5580 		/* Create snapshot */
5581 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5582 		poll_threads();
5583 		CU_ASSERT(g_bserrno == 0);
5584 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5585 		snapshotid = g_blobid;
5586 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5587 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5588 
5589 		dev_set_power_failure_thresholds(thresholds);
5590 
5591 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5592 		poll_threads();
5593 		delete_snapshot_bserrno = g_bserrno;
5594 
5595 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
5596 		 * reports success, changes to both blobs should already persisted. */
5597 		dev_reset_power_failure_event();
5598 		ut_bs_dirty_load(&bs, NULL);
5599 
5600 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5601 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5602 
5603 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5604 		poll_threads();
5605 		CU_ASSERT(g_bserrno == 0);
5606 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5607 		blob = g_blob;
5608 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5609 
5610 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5611 		poll_threads();
5612 
5613 		if (g_bserrno == 0) {
5614 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5615 			snapshot = g_blob;
5616 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5617 			count = SPDK_COUNTOF(ids);
5618 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5619 			CU_ASSERT(rc == 0);
5620 			CU_ASSERT(count == 1);
5621 			CU_ASSERT(ids[0] == blobid);
5622 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
5623 			CU_ASSERT(rc != 0);
5624 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5625 
5626 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5627 			poll_threads();
5628 			CU_ASSERT(g_bserrno == 0);
5629 		} else {
5630 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5631 			/* Snapshot might have been left in unrecoverable state, so it does not open.
5632 			 * Yet delete might perform further changes to the clone after that.
5633 			 * This UT should test until snapshot is deleted and delete call succeeds. */
5634 			if (delete_snapshot_bserrno == 0) {
5635 				deleted = true;
5636 			}
5637 		}
5638 
5639 		spdk_blob_close(blob, blob_op_complete, NULL);
5640 		poll_threads();
5641 		CU_ASSERT(g_bserrno == 0);
5642 
5643 		spdk_bs_unload(bs, bs_op_complete, NULL);
5644 		poll_threads();
5645 		CU_ASSERT(g_bserrno == 0);
5646 
5647 		thresholds.general_threshold++;
5648 	}
5649 }
5650 
5651 static void
5652 blob_create_snapshot_power_failure(void)
5653 {
5654 	struct spdk_blob_store *bs = g_bs;
5655 	struct spdk_bs_dev *dev;
5656 	struct spdk_blob_opts opts;
5657 	struct spdk_blob *blob, *snapshot;
5658 	struct spdk_power_failure_thresholds thresholds = {};
5659 	spdk_blob_id blobid, snapshotid;
5660 	const void *value;
5661 	size_t value_len;
5662 	size_t count;
5663 	spdk_blob_id ids[3] = {};
5664 	int rc;
5665 	bool created = false;
5666 	int create_snapshot_bserrno = -1;
5667 
5668 	thresholds.general_threshold = 1;
5669 	while (!created) {
5670 		dev = init_dev();
5671 
5672 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5673 		poll_threads();
5674 		CU_ASSERT(g_bserrno == 0);
5675 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5676 		bs = g_bs;
5677 
5678 		/* Create blob */
5679 		ut_spdk_blob_opts_init(&opts);
5680 		opts.num_clusters = 10;
5681 
5682 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5683 		poll_threads();
5684 		CU_ASSERT(g_bserrno == 0);
5685 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5686 		blobid = g_blobid;
5687 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5688 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5689 
5690 		dev_set_power_failure_thresholds(thresholds);
5691 
5692 		/* Create snapshot */
5693 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5694 		poll_threads();
5695 		create_snapshot_bserrno = g_bserrno;
5696 		snapshotid = g_blobid;
5697 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5698 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5699 
5700 		/* Do not shut down cleanly. Assumption is that after create snapshot
5701 		 * reports success, both blobs should be power-fail safe. */
5702 		dev_reset_power_failure_event();
5703 		ut_bs_dirty_load(&bs, NULL);
5704 
5705 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5706 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5707 
5708 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5709 		poll_threads();
5710 		CU_ASSERT(g_bserrno == 0);
5711 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5712 		blob = g_blob;
5713 
5714 		if (snapshotid != SPDK_BLOBID_INVALID) {
5715 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5716 			poll_threads();
5717 		}
5718 
5719 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
5720 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5721 			snapshot = g_blob;
5722 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5723 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5724 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5725 			count = SPDK_COUNTOF(ids);
5726 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5727 			CU_ASSERT(rc == 0);
5728 			CU_ASSERT(count == 1);
5729 			CU_ASSERT(ids[0] == blobid);
5730 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
5731 			CU_ASSERT(rc != 0);
5732 
5733 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5734 			poll_threads();
5735 			CU_ASSERT(g_bserrno == 0);
5736 			if (create_snapshot_bserrno == 0) {
5737 				created = true;
5738 			}
5739 		} else {
5740 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5741 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
5742 		}
5743 
5744 		spdk_blob_close(blob, blob_op_complete, NULL);
5745 		poll_threads();
5746 		CU_ASSERT(g_bserrno == 0);
5747 
5748 		spdk_bs_unload(bs, bs_op_complete, NULL);
5749 		poll_threads();
5750 		CU_ASSERT(g_bserrno == 0);
5751 
5752 		thresholds.general_threshold++;
5753 	}
5754 }
5755 
5756 static void
5757 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5758 {
5759 	uint8_t payload_ff[64 * 512];
5760 	uint8_t payload_aa[64 * 512];
5761 	uint8_t payload_00[64 * 512];
5762 	uint8_t *cluster0, *cluster1;
5763 
5764 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5765 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5766 	memset(payload_00, 0x00, sizeof(payload_00));
5767 
5768 	/* Try to perform I/O with io unit = 512 */
5769 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
5770 	poll_threads();
5771 	CU_ASSERT(g_bserrno == 0);
5772 
5773 	/* If thin provisioned is set cluster should be allocated now */
5774 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
5775 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
5776 
5777 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
5778 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
5779 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5780 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5781 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
5782 
5783 	/* Verify write with offset on first page */
5784 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
5785 	poll_threads();
5786 	CU_ASSERT(g_bserrno == 0);
5787 
5788 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5789 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5790 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5791 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5792 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5793 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
5794 
5795 	/* Verify write with offset on first page */
5796 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
5797 	poll_threads();
5798 
5799 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
5800 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5801 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5802 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5803 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5804 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
5805 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
5806 
5807 	/* Verify write with offset on second page */
5808 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
5809 	poll_threads();
5810 
5811 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
5812 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5813 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5814 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5815 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5816 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
5817 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
5818 
5819 	/* Verify write across multiple pages */
5820 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
5821 	poll_threads();
5822 
5823 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
5824 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5825 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5826 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5827 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5828 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
5829 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
5830 
5831 	/* Verify write across multiple clusters */
5832 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
5833 	poll_threads();
5834 
5835 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
5836 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
5837 
5838 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5839 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5840 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5841 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5842 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5843 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5844 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
5845 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
5846 
5847 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
5848 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
5849 
5850 	/* Verify write to second cluster */
5851 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
5852 	poll_threads();
5853 
5854 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
5855 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
5856 
5857 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5858 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
5859 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5860 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5861 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5862 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5863 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
5864 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
5865 
5866 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
5867 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
5868 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
5869 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
5870 }
5871 
5872 static void
5873 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5874 {
5875 	uint8_t payload_read[64 * 512];
5876 	uint8_t payload_ff[64 * 512];
5877 	uint8_t payload_aa[64 * 512];
5878 	uint8_t payload_00[64 * 512];
5879 
5880 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5881 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5882 	memset(payload_00, 0x00, sizeof(payload_00));
5883 
5884 	/* Read only first io unit */
5885 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5886 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
5887 	 * payload_read: F000 0000 | 0000 0000 ... */
5888 	memset(payload_read, 0x00, sizeof(payload_read));
5889 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
5890 	poll_threads();
5891 	CU_ASSERT(g_bserrno == 0);
5892 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
5893 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
5894 
5895 	/* Read four io_units starting from offset = 2
5896 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5897 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
5898 	 * payload_read: F0AA 0000 | 0000 0000 ... */
5899 
5900 	memset(payload_read, 0x00, sizeof(payload_read));
5901 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
5902 	poll_threads();
5903 	CU_ASSERT(g_bserrno == 0);
5904 
5905 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
5906 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
5907 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
5908 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
5909 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
5910 
5911 	/* Read eight io_units across multiple pages
5912 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
5913 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
5914 	 * payload_read: AAAA AAAA | 0000 0000 ... */
5915 	memset(payload_read, 0x00, sizeof(payload_read));
5916 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
5917 	poll_threads();
5918 	CU_ASSERT(g_bserrno == 0);
5919 
5920 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
5921 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
5922 
5923 	/* Read eight io_units across multiple clusters
5924 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
5925 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
5926 	 * payload_read: FFFF FFFF | 0000 0000 ... */
5927 	memset(payload_read, 0x00, sizeof(payload_read));
5928 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
5929 	poll_threads();
5930 	CU_ASSERT(g_bserrno == 0);
5931 
5932 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
5933 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
5934 
5935 	/* Read four io_units from second cluster
5936 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5937 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
5938 	 * payload_read: 00FF 0000 | 0000 0000 ... */
5939 	memset(payload_read, 0x00, sizeof(payload_read));
5940 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
5941 	poll_threads();
5942 	CU_ASSERT(g_bserrno == 0);
5943 
5944 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
5945 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
5946 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
5947 
5948 	/* Read second cluster
5949 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5950 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
5951 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
5952 	memset(payload_read, 0x00, sizeof(payload_read));
5953 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
5954 	poll_threads();
5955 	CU_ASSERT(g_bserrno == 0);
5956 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
5957 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
5958 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
5959 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
5960 
5961 	/* Read whole two clusters
5962 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
5963 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
5964 	memset(payload_read, 0x00, sizeof(payload_read));
5965 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
5966 	poll_threads();
5967 	CU_ASSERT(g_bserrno == 0);
5968 
5969 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
5970 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
5971 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
5972 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
5973 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
5974 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
5975 
5976 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
5977 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
5978 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
5979 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
5980 }
5981 
5982 
5983 static void
5984 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5985 {
5986 	uint8_t payload_ff[64 * 512];
5987 	uint8_t payload_aa[64 * 512];
5988 	uint8_t payload_00[64 * 512];
5989 	uint8_t *cluster0, *cluster1;
5990 
5991 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5992 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5993 	memset(payload_00, 0x00, sizeof(payload_00));
5994 
5995 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
5996 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
5997 
5998 	/* Unmap */
5999 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6000 	poll_threads();
6001 
6002 	CU_ASSERT(g_bserrno == 0);
6003 
6004 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6005 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6006 }
6007 
6008 static void
6009 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6010 {
6011 	uint8_t payload_ff[64 * 512];
6012 	uint8_t payload_aa[64 * 512];
6013 	uint8_t payload_00[64 * 512];
6014 	uint8_t *cluster0, *cluster1;
6015 
6016 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6017 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6018 	memset(payload_00, 0x00, sizeof(payload_00));
6019 
6020 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6021 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6022 
6023 	/* Write zeroes  */
6024 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6025 	poll_threads();
6026 
6027 	CU_ASSERT(g_bserrno == 0);
6028 
6029 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6030 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6031 }
6032 
6033 
6034 static void
6035 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6036 {
6037 	uint8_t payload_ff[64 * 512];
6038 	uint8_t payload_aa[64 * 512];
6039 	uint8_t payload_00[64 * 512];
6040 	uint8_t *cluster0, *cluster1;
6041 	struct iovec iov[4];
6042 
6043 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6044 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6045 	memset(payload_00, 0x00, sizeof(payload_00));
6046 
6047 	/* Try to perform I/O with io unit = 512 */
6048 	iov[0].iov_base = payload_ff;
6049 	iov[0].iov_len = 1 * 512;
6050 	spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
6051 	poll_threads();
6052 	CU_ASSERT(g_bserrno == 0);
6053 
6054 	/* If thin provisioned is set cluster should be allocated now */
6055 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6056 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6057 
6058 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6059 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6060 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6061 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6062 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6063 
6064 	/* Verify write with offset on first page */
6065 	iov[0].iov_base = payload_ff;
6066 	iov[0].iov_len = 1 * 512;
6067 	spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL);
6068 	poll_threads();
6069 	CU_ASSERT(g_bserrno == 0);
6070 
6071 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6072 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6073 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6074 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6075 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6076 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6077 
6078 	/* Verify write with offset on first page */
6079 	iov[0].iov_base = payload_ff;
6080 	iov[0].iov_len = 4 * 512;
6081 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6082 	poll_threads();
6083 
6084 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6085 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6086 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6087 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6088 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6089 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6090 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6091 
6092 	/* Verify write with offset on second page */
6093 	iov[0].iov_base = payload_ff;
6094 	iov[0].iov_len = 4 * 512;
6095 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6096 	poll_threads();
6097 
6098 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6099 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6100 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6101 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6102 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6103 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6104 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6105 
6106 	/* Verify write across multiple pages */
6107 	iov[0].iov_base = payload_aa;
6108 	iov[0].iov_len = 8 * 512;
6109 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL);
6110 	poll_threads();
6111 
6112 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6113 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6114 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6115 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6116 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6117 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6118 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6119 
6120 	/* Verify write across multiple clusters */
6121 
6122 	iov[0].iov_base = payload_ff;
6123 	iov[0].iov_len = 8 * 512;
6124 	spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL);
6125 	poll_threads();
6126 
6127 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6128 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6129 
6130 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6131 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6132 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6133 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6134 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6135 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6136 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6137 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6138 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6139 
6140 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6141 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6142 
6143 	/* Verify write to second cluster */
6144 
6145 	iov[0].iov_base = payload_ff;
6146 	iov[0].iov_len = 2 * 512;
6147 	spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL);
6148 	poll_threads();
6149 
6150 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6151 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6152 
6153 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6154 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6155 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6156 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6157 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6158 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6159 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6160 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6161 
6162 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6163 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6164 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6165 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6166 }
6167 
6168 static void
6169 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6170 {
6171 	uint8_t payload_read[64 * 512];
6172 	uint8_t payload_ff[64 * 512];
6173 	uint8_t payload_aa[64 * 512];
6174 	uint8_t payload_00[64 * 512];
6175 	struct iovec iov[4];
6176 
6177 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6178 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6179 	memset(payload_00, 0x00, sizeof(payload_00));
6180 
6181 	/* Read only first io unit */
6182 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6183 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6184 	 * payload_read: F000 0000 | 0000 0000 ... */
6185 	memset(payload_read, 0x00, sizeof(payload_read));
6186 	iov[0].iov_base = payload_read;
6187 	iov[0].iov_len = 1 * 512;
6188 	spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
6189 	poll_threads();
6190 
6191 	CU_ASSERT(g_bserrno == 0);
6192 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6193 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6194 
6195 	/* Read four io_units starting from offset = 2
6196 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6197 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6198 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6199 
6200 	memset(payload_read, 0x00, sizeof(payload_read));
6201 	iov[0].iov_base = payload_read;
6202 	iov[0].iov_len = 4 * 512;
6203 	spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL);
6204 	poll_threads();
6205 	CU_ASSERT(g_bserrno == 0);
6206 
6207 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6208 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6209 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6210 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6211 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6212 
6213 	/* Read eight io_units across multiple pages
6214 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6215 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6216 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6217 	memset(payload_read, 0x00, sizeof(payload_read));
6218 	iov[0].iov_base = payload_read;
6219 	iov[0].iov_len = 4 * 512;
6220 	iov[1].iov_base = payload_read + 4 * 512;
6221 	iov[1].iov_len = 4 * 512;
6222 	spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL);
6223 	poll_threads();
6224 	CU_ASSERT(g_bserrno == 0);
6225 
6226 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6227 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6228 
6229 	/* Read eight io_units across multiple clusters
6230 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6231 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6232 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6233 	memset(payload_read, 0x00, sizeof(payload_read));
6234 	iov[0].iov_base = payload_read;
6235 	iov[0].iov_len = 2 * 512;
6236 	iov[1].iov_base = payload_read + 2 * 512;
6237 	iov[1].iov_len = 2 * 512;
6238 	iov[2].iov_base = payload_read + 4 * 512;
6239 	iov[2].iov_len = 2 * 512;
6240 	iov[3].iov_base = payload_read + 6 * 512;
6241 	iov[3].iov_len = 2 * 512;
6242 	spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL);
6243 	poll_threads();
6244 	CU_ASSERT(g_bserrno == 0);
6245 
6246 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6247 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6248 
6249 	/* Read four io_units from second cluster
6250 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6251 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6252 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6253 	memset(payload_read, 0x00, sizeof(payload_read));
6254 	iov[0].iov_base = payload_read;
6255 	iov[0].iov_len = 1 * 512;
6256 	iov[1].iov_base = payload_read + 1 * 512;
6257 	iov[1].iov_len = 3 * 512;
6258 	spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL);
6259 	poll_threads();
6260 	CU_ASSERT(g_bserrno == 0);
6261 
6262 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6263 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6264 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6265 
6266 	/* Read second cluster
6267 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6268 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6269 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6270 	memset(payload_read, 0x00, sizeof(payload_read));
6271 	iov[0].iov_base = payload_read;
6272 	iov[0].iov_len = 1 * 512;
6273 	iov[1].iov_base = payload_read + 1 * 512;
6274 	iov[1].iov_len = 2 * 512;
6275 	iov[2].iov_base = payload_read + 3 * 512;
6276 	iov[2].iov_len = 4 * 512;
6277 	iov[3].iov_base = payload_read + 7 * 512;
6278 	iov[3].iov_len = 25 * 512;
6279 	spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL);
6280 	poll_threads();
6281 	CU_ASSERT(g_bserrno == 0);
6282 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6283 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6284 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6285 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6286 
6287 	/* Read whole two clusters
6288 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6289 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6290 	memset(payload_read, 0x00, sizeof(payload_read));
6291 	iov[0].iov_base = payload_read;
6292 	iov[0].iov_len = 1 * 512;
6293 	iov[1].iov_base = payload_read + 1 * 512;
6294 	iov[1].iov_len = 8 * 512;
6295 	iov[2].iov_base = payload_read + 9 * 512;
6296 	iov[2].iov_len = 16 * 512;
6297 	iov[3].iov_base = payload_read + 25 * 512;
6298 	iov[3].iov_len = 39 * 512;
6299 	spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL);
6300 	poll_threads();
6301 	CU_ASSERT(g_bserrno == 0);
6302 
6303 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6304 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6305 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6306 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6307 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6308 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6309 
6310 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6311 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6312 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6313 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6314 }
6315 
6316 static void
6317 blob_io_unit(void)
6318 {
6319 	struct spdk_bs_opts bsopts;
6320 	struct spdk_blob_opts opts;
6321 	struct spdk_blob_store *bs;
6322 	struct spdk_bs_dev *dev;
6323 	struct spdk_blob *blob, *snapshot, *clone;
6324 	spdk_blob_id blobid;
6325 	struct spdk_io_channel *channel;
6326 
6327 	/* Create dev with 512 bytes io unit size */
6328 
6329 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6330 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6331 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6332 
6333 	/* Try to initialize a new blob store with unsupported io_unit */
6334 	dev = init_dev();
6335 	dev->blocklen = 512;
6336 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6337 
6338 	/* Initialize a new blob store */
6339 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6340 	poll_threads();
6341 	CU_ASSERT(g_bserrno == 0);
6342 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6343 	bs = g_bs;
6344 
6345 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6346 	channel = spdk_bs_alloc_io_channel(bs);
6347 
6348 	/* Create thick provisioned blob */
6349 	ut_spdk_blob_opts_init(&opts);
6350 	opts.thin_provision = false;
6351 	opts.num_clusters = 32;
6352 
6353 	blob = ut_blob_create_and_open(bs, &opts);
6354 	blobid = spdk_blob_get_id(blob);
6355 
6356 	test_io_write(dev, blob, channel);
6357 	test_io_read(dev, blob, channel);
6358 	test_io_zeroes(dev, blob, channel);
6359 
6360 	test_iov_write(dev, blob, channel);
6361 	test_iov_read(dev, blob, channel);
6362 
6363 	test_io_unmap(dev, blob, channel);
6364 
6365 	spdk_blob_close(blob, blob_op_complete, NULL);
6366 	poll_threads();
6367 	CU_ASSERT(g_bserrno == 0);
6368 	blob = NULL;
6369 	g_blob = NULL;
6370 
6371 	/* Create thin provisioned blob */
6372 
6373 	ut_spdk_blob_opts_init(&opts);
6374 	opts.thin_provision = true;
6375 	opts.num_clusters = 32;
6376 
6377 	blob = ut_blob_create_and_open(bs, &opts);
6378 	blobid = spdk_blob_get_id(blob);
6379 
6380 	test_io_write(dev, blob, channel);
6381 	test_io_read(dev, blob, channel);
6382 
6383 	test_io_zeroes(dev, blob, channel);
6384 
6385 	test_iov_write(dev, blob, channel);
6386 	test_iov_read(dev, blob, channel);
6387 
6388 	/* Create snapshot */
6389 
6390 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6391 	poll_threads();
6392 	CU_ASSERT(g_bserrno == 0);
6393 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6394 	blobid = g_blobid;
6395 
6396 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6397 	poll_threads();
6398 	CU_ASSERT(g_bserrno == 0);
6399 	CU_ASSERT(g_blob != NULL);
6400 	snapshot = g_blob;
6401 
6402 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6403 	poll_threads();
6404 	CU_ASSERT(g_bserrno == 0);
6405 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6406 	blobid = g_blobid;
6407 
6408 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6409 	poll_threads();
6410 	CU_ASSERT(g_bserrno == 0);
6411 	CU_ASSERT(g_blob != NULL);
6412 	clone = g_blob;
6413 
6414 	test_io_read(dev, blob, channel);
6415 	test_io_read(dev, snapshot, channel);
6416 	test_io_read(dev, clone, channel);
6417 
6418 	test_iov_read(dev, blob, channel);
6419 	test_iov_read(dev, snapshot, channel);
6420 	test_iov_read(dev, clone, channel);
6421 
6422 	/* Inflate clone */
6423 
6424 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6425 	poll_threads();
6426 
6427 	CU_ASSERT(g_bserrno == 0);
6428 
6429 	test_io_read(dev, clone, channel);
6430 
6431 	test_io_unmap(dev, clone, channel);
6432 
6433 	test_iov_write(dev, clone, channel);
6434 	test_iov_read(dev, clone, channel);
6435 
6436 	spdk_blob_close(blob, blob_op_complete, NULL);
6437 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6438 	spdk_blob_close(clone, blob_op_complete, NULL);
6439 	poll_threads();
6440 	CU_ASSERT(g_bserrno == 0);
6441 	blob = NULL;
6442 	g_blob = NULL;
6443 
6444 	spdk_bs_free_io_channel(channel);
6445 	poll_threads();
6446 
6447 	/* Unload the blob store */
6448 	spdk_bs_unload(bs, bs_op_complete, NULL);
6449 	poll_threads();
6450 	CU_ASSERT(g_bserrno == 0);
6451 	g_bs = NULL;
6452 	g_blob = NULL;
6453 	g_blobid = 0;
6454 }
6455 
6456 static void
6457 blob_io_unit_compatiblity(void)
6458 {
6459 	struct spdk_bs_opts bsopts;
6460 	struct spdk_blob_store *bs;
6461 	struct spdk_bs_dev *dev;
6462 	struct spdk_bs_super_block *super;
6463 
6464 	/* Create dev with 512 bytes io unit size */
6465 
6466 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6467 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6468 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6469 
6470 	/* Try to initialize a new blob store with unsupported io_unit */
6471 	dev = init_dev();
6472 	dev->blocklen = 512;
6473 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6474 
6475 	/* Initialize a new blob store */
6476 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6477 	poll_threads();
6478 	CU_ASSERT(g_bserrno == 0);
6479 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6480 	bs = g_bs;
6481 
6482 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6483 
6484 	/* Unload the blob store */
6485 	spdk_bs_unload(bs, bs_op_complete, NULL);
6486 	poll_threads();
6487 	CU_ASSERT(g_bserrno == 0);
6488 
6489 	/* Modify super block to behave like older version.
6490 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6491 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6492 	super->io_unit_size = 0;
6493 	super->crc = blob_md_page_calc_crc(super);
6494 
6495 	dev = init_dev();
6496 	dev->blocklen = 512;
6497 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6498 
6499 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6500 	poll_threads();
6501 	CU_ASSERT(g_bserrno == 0);
6502 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6503 	bs = g_bs;
6504 
6505 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6506 
6507 	/* Unload the blob store */
6508 	spdk_bs_unload(bs, bs_op_complete, NULL);
6509 	poll_threads();
6510 	CU_ASSERT(g_bserrno == 0);
6511 
6512 	g_bs = NULL;
6513 	g_blob = NULL;
6514 	g_blobid = 0;
6515 }
6516 
6517 static void
6518 first_sync_complete(void *cb_arg, int bserrno)
6519 {
6520 	struct spdk_blob *blob = cb_arg;
6521 	int rc;
6522 
6523 	CU_ASSERT(bserrno == 0);
6524 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
6525 	CU_ASSERT(rc == 0);
6526 	CU_ASSERT(g_bserrno == -1);
6527 
6528 	/* Keep g_bserrno at -1, only the
6529 	 * second sync completion should set it at 0. */
6530 }
6531 
6532 static void
6533 second_sync_complete(void *cb_arg, int bserrno)
6534 {
6535 	struct spdk_blob *blob = cb_arg;
6536 	const void *value;
6537 	size_t value_len;
6538 	int rc;
6539 
6540 	CU_ASSERT(bserrno == 0);
6541 
6542 	/* Verify that the first sync completion had a chance to execute */
6543 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
6544 	CU_ASSERT(rc == 0);
6545 	SPDK_CU_ASSERT_FATAL(value != NULL);
6546 	CU_ASSERT(value_len == strlen("second") + 1);
6547 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
6548 
6549 	CU_ASSERT(g_bserrno == -1);
6550 	g_bserrno = bserrno;
6551 }
6552 
6553 static void
6554 blob_simultaneous_operations(void)
6555 {
6556 	struct spdk_blob_store *bs = g_bs;
6557 	struct spdk_blob_opts opts;
6558 	struct spdk_blob *blob, *snapshot;
6559 	spdk_blob_id blobid, snapshotid;
6560 	struct spdk_io_channel *channel;
6561 	int rc;
6562 
6563 	channel = spdk_bs_alloc_io_channel(bs);
6564 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6565 
6566 	ut_spdk_blob_opts_init(&opts);
6567 	opts.num_clusters = 10;
6568 
6569 	blob = ut_blob_create_and_open(bs, &opts);
6570 	blobid = spdk_blob_get_id(blob);
6571 
6572 	/* Create snapshot and try to remove blob in the same time:
6573 	 * - snapshot should be created successfully
6574 	 * - delete operation should fail w -EBUSY */
6575 	CU_ASSERT(blob->locked_operation_in_progress == false);
6576 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6577 	CU_ASSERT(blob->locked_operation_in_progress == true);
6578 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6579 	CU_ASSERT(blob->locked_operation_in_progress == true);
6580 	/* Deletion failure */
6581 	CU_ASSERT(g_bserrno == -EBUSY);
6582 	poll_threads();
6583 	CU_ASSERT(blob->locked_operation_in_progress == false);
6584 	/* Snapshot creation success */
6585 	CU_ASSERT(g_bserrno == 0);
6586 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6587 
6588 	snapshotid = g_blobid;
6589 
6590 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6591 	poll_threads();
6592 	CU_ASSERT(g_bserrno == 0);
6593 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6594 	snapshot = g_blob;
6595 
6596 	/* Inflate blob and try to remove blob in the same time:
6597 	 * - blob should be inflated successfully
6598 	 * - delete operation should fail w -EBUSY */
6599 	CU_ASSERT(blob->locked_operation_in_progress == false);
6600 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6601 	CU_ASSERT(blob->locked_operation_in_progress == true);
6602 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6603 	CU_ASSERT(blob->locked_operation_in_progress == true);
6604 	/* Deletion failure */
6605 	CU_ASSERT(g_bserrno == -EBUSY);
6606 	poll_threads();
6607 	CU_ASSERT(blob->locked_operation_in_progress == false);
6608 	/* Inflation success */
6609 	CU_ASSERT(g_bserrno == 0);
6610 
6611 	/* Clone snapshot and try to remove snapshot in the same time:
6612 	 * - snapshot should be cloned successfully
6613 	 * - delete operation should fail w -EBUSY */
6614 	CU_ASSERT(blob->locked_operation_in_progress == false);
6615 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
6616 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6617 	/* Deletion failure */
6618 	CU_ASSERT(g_bserrno == -EBUSY);
6619 	poll_threads();
6620 	CU_ASSERT(blob->locked_operation_in_progress == false);
6621 	/* Clone created */
6622 	CU_ASSERT(g_bserrno == 0);
6623 
6624 	/* Resize blob and try to remove blob in the same time:
6625 	 * - blob should be resized successfully
6626 	 * - delete operation should fail w -EBUSY */
6627 	CU_ASSERT(blob->locked_operation_in_progress == false);
6628 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
6629 	CU_ASSERT(blob->locked_operation_in_progress == true);
6630 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6631 	CU_ASSERT(blob->locked_operation_in_progress == true);
6632 	/* Deletion failure */
6633 	CU_ASSERT(g_bserrno == -EBUSY);
6634 	poll_threads();
6635 	CU_ASSERT(blob->locked_operation_in_progress == false);
6636 	/* Blob resized successfully */
6637 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6638 	poll_threads();
6639 	CU_ASSERT(g_bserrno == 0);
6640 
6641 	/* Issue two consecutive blob syncs, neither should fail.
6642 	 * Force sync to actually occur by marking blob dirty each time.
6643 	 * Execution of sync should not be enough to complete the operation,
6644 	 * since disk I/O is required to complete it. */
6645 	g_bserrno = -1;
6646 
6647 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
6648 	CU_ASSERT(rc == 0);
6649 	spdk_blob_sync_md(blob, first_sync_complete, blob);
6650 	CU_ASSERT(g_bserrno == -1);
6651 
6652 	spdk_blob_sync_md(blob, second_sync_complete, blob);
6653 	CU_ASSERT(g_bserrno == -1);
6654 
6655 	poll_threads();
6656 	CU_ASSERT(g_bserrno == 0);
6657 
6658 	spdk_bs_free_io_channel(channel);
6659 	poll_threads();
6660 
6661 	ut_blob_close_and_delete(bs, snapshot);
6662 	ut_blob_close_and_delete(bs, blob);
6663 }
6664 
6665 static void
6666 blob_persist_test(void)
6667 {
6668 	struct spdk_blob_store *bs = g_bs;
6669 	struct spdk_blob_opts opts;
6670 	struct spdk_blob *blob;
6671 	spdk_blob_id blobid;
6672 	struct spdk_io_channel *channel;
6673 	char *xattr;
6674 	size_t xattr_length;
6675 	int rc;
6676 	uint32_t page_count_clear, page_count_xattr;
6677 	uint64_t poller_iterations;
6678 	bool run_poller;
6679 
6680 	channel = spdk_bs_alloc_io_channel(bs);
6681 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6682 
6683 	ut_spdk_blob_opts_init(&opts);
6684 	opts.num_clusters = 10;
6685 
6686 	blob = ut_blob_create_and_open(bs, &opts);
6687 	blobid = spdk_blob_get_id(blob);
6688 
6689 	/* Save the amount of md pages used after creation of a blob.
6690 	 * This should be consistent after removing xattr. */
6691 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
6692 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
6693 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
6694 
6695 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
6696 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
6697 		       strlen("large_xattr");
6698 	xattr = calloc(xattr_length, sizeof(char));
6699 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
6700 
6701 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6702 	SPDK_CU_ASSERT_FATAL(rc == 0);
6703 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6704 	poll_threads();
6705 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
6706 
6707 	/* Save the amount of md pages used after adding the large xattr */
6708 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
6709 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
6710 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
6711 
6712 	/* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again.
6713 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
6714 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
6715 	poller_iterations = 1;
6716 	run_poller = true;
6717 	while (run_poller) {
6718 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6719 		SPDK_CU_ASSERT_FATAL(rc == 0);
6720 		g_bserrno = -1;
6721 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6722 		poll_thread_times(0, poller_iterations);
6723 		if (g_bserrno == 0) {
6724 			/* Poller iteration count was high enough for first sync to complete.
6725 			 * Verify that blob takes up enough of md_pages to store the xattr. */
6726 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
6727 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
6728 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
6729 			run_poller = false;
6730 		}
6731 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
6732 		SPDK_CU_ASSERT_FATAL(rc == 0);
6733 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6734 		poll_threads();
6735 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
6736 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
6737 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
6738 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
6739 
6740 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
6741 		spdk_blob_close(blob, blob_op_complete, NULL);
6742 		poll_threads();
6743 		CU_ASSERT(g_bserrno == 0);
6744 
6745 		ut_bs_reload(&bs, NULL);
6746 
6747 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6748 		poll_threads();
6749 		CU_ASSERT(g_bserrno == 0);
6750 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6751 		blob = g_blob;
6752 
6753 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
6754 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
6755 
6756 		poller_iterations++;
6757 		/* Stop at high iteration count to prevent infinite loop.
6758 		 * This value should be enough for first md sync to complete in any case. */
6759 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
6760 	}
6761 
6762 	free(xattr);
6763 
6764 	ut_blob_close_and_delete(bs, blob);
6765 
6766 	spdk_bs_free_io_channel(channel);
6767 	poll_threads();
6768 }
6769 
6770 static void
6771 suite_bs_setup(void)
6772 {
6773 	struct spdk_bs_dev *dev;
6774 
6775 	dev = init_dev();
6776 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
6777 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6778 	poll_threads();
6779 	CU_ASSERT(g_bserrno == 0);
6780 	CU_ASSERT(g_bs != NULL);
6781 }
6782 
6783 static void
6784 suite_bs_cleanup(void)
6785 {
6786 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
6787 	poll_threads();
6788 	CU_ASSERT(g_bserrno == 0);
6789 	g_bs = NULL;
6790 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
6791 }
6792 
6793 static struct spdk_blob *
6794 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
6795 {
6796 	struct spdk_blob *blob;
6797 	struct spdk_blob_opts create_blob_opts;
6798 	spdk_blob_id blobid;
6799 
6800 	if (blob_opts == NULL) {
6801 		ut_spdk_blob_opts_init(&create_blob_opts);
6802 		blob_opts = &create_blob_opts;
6803 	}
6804 
6805 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
6806 	poll_threads();
6807 	CU_ASSERT(g_bserrno == 0);
6808 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6809 	blobid = g_blobid;
6810 	g_blobid = -1;
6811 
6812 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6813 	poll_threads();
6814 	CU_ASSERT(g_bserrno == 0);
6815 	CU_ASSERT(g_blob != NULL);
6816 	blob = g_blob;
6817 
6818 	g_blob = NULL;
6819 	g_bserrno = -1;
6820 
6821 	return blob;
6822 }
6823 
6824 static void
6825 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
6826 {
6827 	spdk_blob_id blobid = spdk_blob_get_id(blob);
6828 
6829 	spdk_blob_close(blob, blob_op_complete, NULL);
6830 	poll_threads();
6831 	CU_ASSERT(g_bserrno == 0);
6832 	g_blob = NULL;
6833 
6834 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6835 	poll_threads();
6836 	CU_ASSERT(g_bserrno == 0);
6837 	g_bserrno = -1;
6838 }
6839 
6840 static void
6841 suite_blob_setup(void)
6842 {
6843 	suite_bs_setup();
6844 	CU_ASSERT(g_bs != NULL);
6845 
6846 	g_blob = ut_blob_create_and_open(g_bs, NULL);
6847 	CU_ASSERT(g_blob != NULL);
6848 }
6849 
6850 static void
6851 suite_blob_cleanup(void)
6852 {
6853 	ut_blob_close_and_delete(g_bs, g_blob);
6854 	CU_ASSERT(g_blob == NULL);
6855 
6856 	suite_bs_cleanup();
6857 	CU_ASSERT(g_bs == NULL);
6858 }
6859 
6860 int main(int argc, char **argv)
6861 {
6862 	CU_pSuite	suite, suite_bs, suite_blob;
6863 	unsigned int	num_failures;
6864 
6865 	CU_set_error_action(CUEA_ABORT);
6866 	CU_initialize_registry();
6867 
6868 	suite = CU_add_suite("blob", NULL, NULL);
6869 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
6870 			suite_bs_setup, suite_bs_cleanup);
6871 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
6872 			suite_blob_setup, suite_blob_cleanup);
6873 
6874 	CU_ADD_TEST(suite, blob_init);
6875 	CU_ADD_TEST(suite_bs, blob_open);
6876 	CU_ADD_TEST(suite_bs, blob_create);
6877 	CU_ADD_TEST(suite_bs, blob_create_loop);
6878 	CU_ADD_TEST(suite_bs, blob_create_fail);
6879 	CU_ADD_TEST(suite_bs, blob_create_internal);
6880 	CU_ADD_TEST(suite, blob_thin_provision);
6881 	CU_ADD_TEST(suite_bs, blob_snapshot);
6882 	CU_ADD_TEST(suite_bs, blob_clone);
6883 	CU_ADD_TEST(suite_bs, blob_inflate);
6884 	CU_ADD_TEST(suite_bs, blob_delete);
6885 	CU_ADD_TEST(suite_bs, blob_resize_test);
6886 	CU_ADD_TEST(suite, blob_read_only);
6887 	CU_ADD_TEST(suite_bs, channel_ops);
6888 	CU_ADD_TEST(suite_bs, blob_super);
6889 	CU_ADD_TEST(suite_blob, blob_write);
6890 	CU_ADD_TEST(suite_blob, blob_read);
6891 	CU_ADD_TEST(suite_blob, blob_rw_verify);
6892 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
6893 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
6894 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
6895 	CU_ADD_TEST(suite_bs, blob_unmap);
6896 	CU_ADD_TEST(suite_bs, blob_iter);
6897 	CU_ADD_TEST(suite_blob, blob_xattr);
6898 	CU_ADD_TEST(suite_bs, blob_parse_md);
6899 	CU_ADD_TEST(suite, bs_load);
6900 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
6901 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
6902 	CU_ADD_TEST(suite_bs, bs_unload);
6903 	CU_ADD_TEST(suite, bs_cluster_sz);
6904 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
6905 	CU_ADD_TEST(suite, bs_resize_md);
6906 	CU_ADD_TEST(suite, bs_destroy);
6907 	CU_ADD_TEST(suite, bs_type);
6908 	CU_ADD_TEST(suite, bs_super_block);
6909 	CU_ADD_TEST(suite, blob_serialize_test);
6910 	CU_ADD_TEST(suite_bs, blob_crc);
6911 	CU_ADD_TEST(suite, super_block_crc);
6912 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
6913 	CU_ADD_TEST(suite_bs, blob_flags);
6914 	CU_ADD_TEST(suite_bs, bs_version);
6915 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
6916 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
6917 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
6918 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
6919 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
6920 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
6921 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
6922 	CU_ADD_TEST(suite, bs_load_iter_test);
6923 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
6924 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
6925 	CU_ADD_TEST(suite, blob_relations);
6926 	CU_ADD_TEST(suite, blob_relations2);
6927 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
6928 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
6929 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
6930 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
6931 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
6932 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
6933 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
6934 	CU_ADD_TEST(suite, blob_io_unit);
6935 	CU_ADD_TEST(suite, blob_io_unit_compatiblity);
6936 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
6937 	CU_ADD_TEST(suite_bs, blob_persist_test);
6938 
6939 	allocate_threads(2);
6940 	set_thread(0);
6941 
6942 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
6943 
6944 	CU_basic_set_mode(CU_BRM_VERBOSE);
6945 	g_use_extent_table = false;
6946 	CU_basic_run_tests();
6947 	num_failures = CU_get_number_of_failures();
6948 	g_use_extent_table = true;
6949 	CU_basic_run_tests();
6950 	num_failures += CU_get_number_of_failures();
6951 	CU_cleanup_registry();
6952 
6953 	free(g_dev_buffer);
6954 
6955 	free_threads();
6956 
6957 	return num_failures;
6958 }
6959