xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 5a7d428d0fa7ace275e7dc5fe97f6a2ae6ad012d)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "spdk/blob.h"
38 #include "spdk/string.h"
39 
40 #include "common/lib/ut_multithread.c"
41 #include "../bs_dev_common.c"
42 #include "blob/blobstore.c"
43 #include "blob/request.c"
44 #include "blob/zeroes.c"
45 #include "blob/blob_bs_dev.c"
46 
47 struct spdk_blob_store *g_bs;
48 spdk_blob_id g_blobid;
49 struct spdk_blob *g_blob, *g_blob2;
50 int g_bserrno, g_bserrno2;
51 struct spdk_xattr_names *g_names;
52 int g_done;
53 char *g_xattr_names[] = {"first", "second", "third"};
54 char *g_xattr_values[] = {"one", "two", "three"};
55 uint64_t g_ctx = 1729;
56 bool g_use_extent_table = false;
57 
58 struct spdk_bs_super_block_ver1 {
59 	uint8_t		signature[8];
60 	uint32_t        version;
61 	uint32_t        length;
62 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
63 	spdk_blob_id	super_blob;
64 
65 	uint32_t	cluster_size; /* In bytes */
66 
67 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
68 	uint32_t	used_page_mask_len; /* Count, in pages */
69 
70 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
71 	uint32_t	used_cluster_mask_len; /* Count, in pages */
72 
73 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
74 	uint32_t	md_len; /* Count, in pages */
75 
76 	uint8_t		reserved[4036];
77 	uint32_t	crc;
78 } __attribute__((packed));
79 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
80 
81 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
82 		struct spdk_blob_opts *blob_opts);
83 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
84 static void suite_blob_setup(void);
85 static void suite_blob_cleanup(void);
86 
87 static void
88 _get_xattr_value(void *arg, const char *name,
89 		 const void **value, size_t *value_len)
90 {
91 	uint64_t i;
92 
93 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
94 	SPDK_CU_ASSERT_FATAL(value != NULL);
95 	CU_ASSERT(arg == &g_ctx);
96 
97 	for (i = 0; i < sizeof(g_xattr_names); i++) {
98 		if (!strcmp(name, g_xattr_names[i])) {
99 			*value_len = strlen(g_xattr_values[i]);
100 			*value = g_xattr_values[i];
101 			break;
102 		}
103 	}
104 }
105 
106 static void
107 _get_xattr_value_null(void *arg, const char *name,
108 		      const void **value, size_t *value_len)
109 {
110 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
111 	SPDK_CU_ASSERT_FATAL(value != NULL);
112 	CU_ASSERT(arg == NULL);
113 
114 	*value_len = 0;
115 	*value = NULL;
116 }
117 
118 static int
119 _get_snapshots_count(struct spdk_blob_store *bs)
120 {
121 	struct spdk_blob_list *snapshot = NULL;
122 	int count = 0;
123 
124 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
125 		count += 1;
126 	}
127 
128 	return count;
129 }
130 
131 static void
132 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
133 {
134 	spdk_blob_opts_init(opts, sizeof(*opts));
135 	opts->use_extent_table = g_use_extent_table;
136 }
137 
138 static void
139 bs_op_complete(void *cb_arg, int bserrno)
140 {
141 	g_bserrno = bserrno;
142 }
143 
144 static void
145 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
146 			   int bserrno)
147 {
148 	g_bs = bs;
149 	g_bserrno = bserrno;
150 }
151 
152 static void
153 blob_op_complete(void *cb_arg, int bserrno)
154 {
155 	g_bserrno = bserrno;
156 }
157 
158 static void
159 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
160 {
161 	g_blobid = blobid;
162 	g_bserrno = bserrno;
163 }
164 
165 static void
166 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
167 {
168 	g_blob = blb;
169 	g_bserrno = bserrno;
170 }
171 
172 static void
173 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
174 {
175 	if (g_blob == NULL) {
176 		g_blob = blob;
177 		g_bserrno = bserrno;
178 	} else {
179 		g_blob2 = blob;
180 		g_bserrno2 = bserrno;
181 	}
182 }
183 
184 static void
185 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
186 {
187 	struct spdk_bs_dev *dev;
188 
189 	/* Unload the blob store */
190 	spdk_bs_unload(*bs, bs_op_complete, NULL);
191 	poll_threads();
192 	CU_ASSERT(g_bserrno == 0);
193 
194 	dev = init_dev();
195 	/* Load an existing blob store */
196 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
197 	poll_threads();
198 	CU_ASSERT(g_bserrno == 0);
199 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
200 	*bs = g_bs;
201 
202 	g_bserrno = -1;
203 }
204 
205 static void
206 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
207 {
208 	struct spdk_bs_dev *dev;
209 
210 	/* Dirty shutdown */
211 	bs_free(*bs);
212 
213 	dev = init_dev();
214 	/* Load an existing blob store */
215 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
216 	poll_threads();
217 	CU_ASSERT(g_bserrno == 0);
218 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
219 	*bs = g_bs;
220 
221 	g_bserrno = -1;
222 }
223 
224 static void
225 blob_init(void)
226 {
227 	struct spdk_blob_store *bs;
228 	struct spdk_bs_dev *dev;
229 
230 	dev = init_dev();
231 
232 	/* should fail for an unsupported blocklen */
233 	dev->blocklen = 500;
234 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
235 	poll_threads();
236 	CU_ASSERT(g_bserrno == -EINVAL);
237 
238 	dev = init_dev();
239 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
240 	poll_threads();
241 	CU_ASSERT(g_bserrno == 0);
242 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
243 	bs = g_bs;
244 
245 	spdk_bs_unload(bs, bs_op_complete, NULL);
246 	poll_threads();
247 	CU_ASSERT(g_bserrno == 0);
248 	g_bs = NULL;
249 }
250 
251 static void
252 blob_super(void)
253 {
254 	struct spdk_blob_store *bs = g_bs;
255 	spdk_blob_id blobid;
256 	struct spdk_blob_opts blob_opts;
257 
258 	/* Get the super blob without having set one */
259 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
260 	poll_threads();
261 	CU_ASSERT(g_bserrno == -ENOENT);
262 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
263 
264 	/* Create a blob */
265 	ut_spdk_blob_opts_init(&blob_opts);
266 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
267 	poll_threads();
268 	CU_ASSERT(g_bserrno == 0);
269 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
270 	blobid = g_blobid;
271 
272 	/* Set the blob as the super blob */
273 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
274 	poll_threads();
275 	CU_ASSERT(g_bserrno == 0);
276 
277 	/* Get the super blob */
278 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
279 	poll_threads();
280 	CU_ASSERT(g_bserrno == 0);
281 	CU_ASSERT(blobid == g_blobid);
282 }
283 
284 static void
285 blob_open(void)
286 {
287 	struct spdk_blob_store *bs = g_bs;
288 	struct spdk_blob *blob;
289 	struct spdk_blob_opts blob_opts;
290 	spdk_blob_id blobid, blobid2;
291 
292 	ut_spdk_blob_opts_init(&blob_opts);
293 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
294 	poll_threads();
295 	CU_ASSERT(g_bserrno == 0);
296 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
297 	blobid = g_blobid;
298 
299 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
300 	poll_threads();
301 	CU_ASSERT(g_bserrno == 0);
302 	CU_ASSERT(g_blob != NULL);
303 	blob = g_blob;
304 
305 	blobid2 = spdk_blob_get_id(blob);
306 	CU_ASSERT(blobid == blobid2);
307 
308 	/* Try to open file again.  It should return success. */
309 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
310 	poll_threads();
311 	CU_ASSERT(g_bserrno == 0);
312 	CU_ASSERT(blob == g_blob);
313 
314 	spdk_blob_close(blob, blob_op_complete, NULL);
315 	poll_threads();
316 	CU_ASSERT(g_bserrno == 0);
317 
318 	/*
319 	 * Close the file a second time, releasing the second reference.  This
320 	 *  should succeed.
321 	 */
322 	blob = g_blob;
323 	spdk_blob_close(blob, blob_op_complete, NULL);
324 	poll_threads();
325 	CU_ASSERT(g_bserrno == 0);
326 
327 	/*
328 	 * Try to open file again.  It should succeed.  This tests the case
329 	 *  where the file is opened, closed, then re-opened again.
330 	 */
331 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
332 	poll_threads();
333 	CU_ASSERT(g_bserrno == 0);
334 	CU_ASSERT(g_blob != NULL);
335 	blob = g_blob;
336 	spdk_blob_close(blob, blob_op_complete, NULL);
337 	poll_threads();
338 	CU_ASSERT(g_bserrno == 0);
339 
340 	/* Try to open file twice in succession.  This should return the same
341 	 * blob object.
342 	 */
343 	g_blob = NULL;
344 	g_blob2 = NULL;
345 	g_bserrno = -1;
346 	g_bserrno2 = -1;
347 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
348 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
349 	poll_threads();
350 	CU_ASSERT(g_bserrno == 0);
351 	CU_ASSERT(g_bserrno2 == 0);
352 	CU_ASSERT(g_blob != NULL);
353 	CU_ASSERT(g_blob2 != NULL);
354 	CU_ASSERT(g_blob == g_blob2);
355 
356 	g_bserrno = -1;
357 	spdk_blob_close(g_blob, blob_op_complete, NULL);
358 	poll_threads();
359 	CU_ASSERT(g_bserrno == 0);
360 
361 	ut_blob_close_and_delete(bs, g_blob);
362 }
363 
364 static void
365 blob_create(void)
366 {
367 	struct spdk_blob_store *bs = g_bs;
368 	struct spdk_blob *blob;
369 	struct spdk_blob_opts opts;
370 	spdk_blob_id blobid;
371 
372 	/* Create blob with 10 clusters */
373 
374 	ut_spdk_blob_opts_init(&opts);
375 	opts.num_clusters = 10;
376 
377 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
381 	blobid = g_blobid;
382 
383 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
384 	poll_threads();
385 	CU_ASSERT(g_bserrno == 0);
386 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
387 	blob = g_blob;
388 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
389 
390 	spdk_blob_close(blob, blob_op_complete, NULL);
391 	poll_threads();
392 	CU_ASSERT(g_bserrno == 0);
393 
394 	/* Create blob with 0 clusters */
395 
396 	ut_spdk_blob_opts_init(&opts);
397 	opts.num_clusters = 0;
398 
399 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
400 	poll_threads();
401 	CU_ASSERT(g_bserrno == 0);
402 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
403 	blobid = g_blobid;
404 
405 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
406 	poll_threads();
407 	CU_ASSERT(g_bserrno == 0);
408 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
409 	blob = g_blob;
410 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
411 
412 	spdk_blob_close(blob, blob_op_complete, NULL);
413 	poll_threads();
414 	CU_ASSERT(g_bserrno == 0);
415 
416 	/* Create blob with default options (opts == NULL) */
417 
418 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
419 	poll_threads();
420 	CU_ASSERT(g_bserrno == 0);
421 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
422 	blobid = g_blobid;
423 
424 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
425 	poll_threads();
426 	CU_ASSERT(g_bserrno == 0);
427 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
428 	blob = g_blob;
429 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
430 
431 	spdk_blob_close(blob, blob_op_complete, NULL);
432 	poll_threads();
433 	CU_ASSERT(g_bserrno == 0);
434 
435 	/* Try to create blob with size larger than blobstore */
436 
437 	ut_spdk_blob_opts_init(&opts);
438 	opts.num_clusters = bs->total_clusters + 1;
439 
440 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
441 	poll_threads();
442 	CU_ASSERT(g_bserrno == -ENOSPC);
443 }
444 
445 /*
446  * Create and delete one blob in a loop over and over again.  This helps ensure
447  * that the internal bit masks tracking used clusters and md_pages are being
448  * tracked correctly.
449  */
450 static void
451 blob_create_loop(void)
452 {
453 	struct spdk_blob_store *bs = g_bs;
454 	struct spdk_blob_opts opts;
455 	uint32_t i, loop_count;
456 
457 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
458 				  spdk_bit_pool_capacity(bs->used_clusters));
459 
460 	for (i = 0; i < loop_count; i++) {
461 		ut_spdk_blob_opts_init(&opts);
462 		opts.num_clusters = 1;
463 		g_bserrno = -1;
464 		g_blobid = SPDK_BLOBID_INVALID;
465 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
466 		poll_threads();
467 		CU_ASSERT(g_bserrno == 0);
468 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
469 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
470 		poll_threads();
471 		CU_ASSERT(g_bserrno == 0);
472 	}
473 }
474 
475 static void
476 blob_create_fail(void)
477 {
478 	struct spdk_blob_store *bs = g_bs;
479 	struct spdk_blob_opts opts;
480 	spdk_blob_id blobid;
481 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
482 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
483 
484 	/* NULL callback */
485 	ut_spdk_blob_opts_init(&opts);
486 	opts.xattrs.names = g_xattr_names;
487 	opts.xattrs.get_value = NULL;
488 	opts.xattrs.count = 1;
489 	opts.xattrs.ctx = &g_ctx;
490 
491 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
492 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
493 	poll_threads();
494 	CU_ASSERT(g_bserrno == -EINVAL);
495 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
496 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
497 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
498 
499 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
500 	poll_threads();
501 	CU_ASSERT(g_bserrno == -ENOENT);
502 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
503 
504 	ut_bs_reload(&bs, NULL);
505 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
506 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
507 
508 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
509 	poll_threads();
510 	CU_ASSERT(g_blob == NULL);
511 	CU_ASSERT(g_bserrno == -ENOENT);
512 }
513 
514 static void
515 blob_create_internal(void)
516 {
517 	struct spdk_blob_store *bs = g_bs;
518 	struct spdk_blob *blob;
519 	struct spdk_blob_opts opts;
520 	struct spdk_blob_xattr_opts internal_xattrs;
521 	const void *value;
522 	size_t value_len;
523 	spdk_blob_id blobid;
524 	int rc;
525 
526 	/* Create blob with custom xattrs */
527 
528 	ut_spdk_blob_opts_init(&opts);
529 	blob_xattrs_init(&internal_xattrs);
530 	internal_xattrs.count = 3;
531 	internal_xattrs.names = g_xattr_names;
532 	internal_xattrs.get_value = _get_xattr_value;
533 	internal_xattrs.ctx = &g_ctx;
534 
535 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
536 	poll_threads();
537 	CU_ASSERT(g_bserrno == 0);
538 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
539 	blobid = g_blobid;
540 
541 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
542 	poll_threads();
543 	CU_ASSERT(g_bserrno == 0);
544 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
545 	blob = g_blob;
546 
547 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
548 	CU_ASSERT(rc == 0);
549 	SPDK_CU_ASSERT_FATAL(value != NULL);
550 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
551 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
552 
553 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
554 	CU_ASSERT(rc == 0);
555 	SPDK_CU_ASSERT_FATAL(value != NULL);
556 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
557 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
558 
559 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
560 	CU_ASSERT(rc == 0);
561 	SPDK_CU_ASSERT_FATAL(value != NULL);
562 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
563 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
564 
565 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
566 	CU_ASSERT(rc != 0);
567 
568 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
569 	CU_ASSERT(rc != 0);
570 
571 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
572 	CU_ASSERT(rc != 0);
573 
574 	spdk_blob_close(blob, blob_op_complete, NULL);
575 	poll_threads();
576 	CU_ASSERT(g_bserrno == 0);
577 
578 	/* Create blob with NULL internal options  */
579 
580 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
581 	poll_threads();
582 	CU_ASSERT(g_bserrno == 0);
583 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
584 	blobid = g_blobid;
585 
586 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
587 	poll_threads();
588 	CU_ASSERT(g_bserrno == 0);
589 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
590 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
591 
592 	blob = g_blob;
593 
594 	spdk_blob_close(blob, blob_op_complete, NULL);
595 	poll_threads();
596 	CU_ASSERT(g_bserrno == 0);
597 }
598 
599 static void
600 blob_thin_provision(void)
601 {
602 	struct spdk_blob_store *bs;
603 	struct spdk_bs_dev *dev;
604 	struct spdk_blob *blob;
605 	struct spdk_blob_opts opts;
606 	struct spdk_bs_opts bs_opts;
607 	spdk_blob_id blobid;
608 
609 	dev = init_dev();
610 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
611 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
612 
613 	/* Initialize a new blob store */
614 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
615 	poll_threads();
616 	CU_ASSERT(g_bserrno == 0);
617 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
618 
619 	bs = g_bs;
620 
621 	/* Create blob with thin provisioning enabled */
622 
623 	ut_spdk_blob_opts_init(&opts);
624 	opts.thin_provision = true;
625 	opts.num_clusters = 10;
626 
627 	blob = ut_blob_create_and_open(bs, &opts);
628 	blobid = spdk_blob_get_id(blob);
629 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
630 
631 	spdk_blob_close(blob, blob_op_complete, NULL);
632 	CU_ASSERT(g_bserrno == 0);
633 
634 	/* Do not shut down cleanly.  This makes sure that when we load again
635 	 *  and try to recover a valid used_cluster map, that blobstore will
636 	 *  ignore clusters with index 0 since these are unallocated clusters.
637 	 */
638 	ut_bs_dirty_load(&bs, &bs_opts);
639 
640 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
641 	poll_threads();
642 	CU_ASSERT(g_bserrno == 0);
643 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
644 	blob = g_blob;
645 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
646 
647 	ut_blob_close_and_delete(bs, blob);
648 
649 	spdk_bs_unload(bs, bs_op_complete, NULL);
650 	poll_threads();
651 	CU_ASSERT(g_bserrno == 0);
652 	g_bs = NULL;
653 }
654 
655 static void
656 blob_snapshot(void)
657 {
658 	struct spdk_blob_store *bs = g_bs;
659 	struct spdk_blob *blob;
660 	struct spdk_blob *snapshot, *snapshot2;
661 	struct spdk_blob_bs_dev *blob_bs_dev;
662 	struct spdk_blob_opts opts;
663 	struct spdk_blob_xattr_opts xattrs;
664 	spdk_blob_id blobid;
665 	spdk_blob_id snapshotid;
666 	spdk_blob_id snapshotid2;
667 	const void *value;
668 	size_t value_len;
669 	int rc;
670 	spdk_blob_id ids[2];
671 	size_t count;
672 
673 	/* Create blob with 10 clusters */
674 	ut_spdk_blob_opts_init(&opts);
675 	opts.num_clusters = 10;
676 
677 	blob = ut_blob_create_and_open(bs, &opts);
678 	blobid = spdk_blob_get_id(blob);
679 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
680 
681 	/* Create snapshot from blob */
682 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
683 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
684 	poll_threads();
685 	CU_ASSERT(g_bserrno == 0);
686 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
687 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
688 	snapshotid = g_blobid;
689 
690 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
691 	poll_threads();
692 	CU_ASSERT(g_bserrno == 0);
693 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
694 	snapshot = g_blob;
695 	CU_ASSERT(snapshot->data_ro == true);
696 	CU_ASSERT(snapshot->md_ro == true);
697 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
698 
699 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
700 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
701 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
702 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
703 
704 	/* Try to create snapshot from clone with xattrs */
705 	xattrs.names = g_xattr_names;
706 	xattrs.get_value = _get_xattr_value;
707 	xattrs.count = 3;
708 	xattrs.ctx = &g_ctx;
709 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
710 	poll_threads();
711 	CU_ASSERT(g_bserrno == 0);
712 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
713 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
714 	snapshotid2 = g_blobid;
715 
716 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
717 	CU_ASSERT(g_bserrno == 0);
718 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
719 	snapshot2 = g_blob;
720 	CU_ASSERT(snapshot2->data_ro == true);
721 	CU_ASSERT(snapshot2->md_ro == true);
722 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
723 
724 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
725 	CU_ASSERT(snapshot->back_bs_dev == NULL);
726 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
727 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
728 
729 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
730 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
731 
732 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
733 	CU_ASSERT(blob_bs_dev->blob == snapshot);
734 
735 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
736 	CU_ASSERT(rc == 0);
737 	SPDK_CU_ASSERT_FATAL(value != NULL);
738 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
739 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
740 
741 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
742 	CU_ASSERT(rc == 0);
743 	SPDK_CU_ASSERT_FATAL(value != NULL);
744 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
745 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
746 
747 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
748 	CU_ASSERT(rc == 0);
749 	SPDK_CU_ASSERT_FATAL(value != NULL);
750 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
751 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
752 
753 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
754 	count = 2;
755 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
756 	CU_ASSERT(count == 1);
757 	CU_ASSERT(ids[0] == blobid);
758 
759 	count = 2;
760 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
761 	CU_ASSERT(count == 1);
762 	CU_ASSERT(ids[0] == snapshotid2);
763 
764 	/* Try to create snapshot from snapshot */
765 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
766 	poll_threads();
767 	CU_ASSERT(g_bserrno == -EINVAL);
768 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
769 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
770 
771 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
772 	ut_blob_close_and_delete(bs, blob);
773 	count = 2;
774 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
775 	CU_ASSERT(count == 0);
776 
777 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
778 	ut_blob_close_and_delete(bs, snapshot2);
779 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
780 	count = 2;
781 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
782 	CU_ASSERT(count == 0);
783 
784 	ut_blob_close_and_delete(bs, snapshot);
785 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
786 }
787 
788 static void
789 blob_snapshot_freeze_io(void)
790 {
791 	struct spdk_io_channel *channel;
792 	struct spdk_bs_channel *bs_channel;
793 	struct spdk_blob_store *bs = g_bs;
794 	struct spdk_blob *blob;
795 	struct spdk_blob_opts opts;
796 	spdk_blob_id blobid;
797 	uint32_t num_of_pages = 10;
798 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
799 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
800 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
801 
802 	memset(payload_write, 0xE5, sizeof(payload_write));
803 	memset(payload_read, 0x00, sizeof(payload_read));
804 	memset(payload_zero, 0x00, sizeof(payload_zero));
805 
806 	/* Test freeze I/O during snapshot */
807 	channel = spdk_bs_alloc_io_channel(bs);
808 	bs_channel = spdk_io_channel_get_ctx(channel);
809 
810 	/* Create blob with 10 clusters */
811 	ut_spdk_blob_opts_init(&opts);
812 	opts.num_clusters = 10;
813 	opts.thin_provision = false;
814 
815 	blob = ut_blob_create_and_open(bs, &opts);
816 	blobid = spdk_blob_get_id(blob);
817 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
818 
819 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
820 
821 	/* This is implementation specific.
822 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
823 	 * Four async I/O operations happen before that. */
824 	poll_thread_times(0, 3);
825 
826 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
827 
828 	/* Blob I/O should be frozen here */
829 	CU_ASSERT(blob->frozen_refcnt == 1);
830 
831 	/* Write to the blob */
832 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
833 
834 	/* Verify that I/O is queued */
835 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
836 	/* Verify that payload is not written to disk */
837 	CU_ASSERT(memcmp(payload_zero, &g_dev_buffer[blob->active.clusters[0]*SPDK_BS_PAGE_SIZE],
838 			 SPDK_BS_PAGE_SIZE) == 0);
839 
840 	/* Finish all operations including spdk_bs_create_snapshot */
841 	poll_threads();
842 
843 	/* Verify snapshot */
844 	CU_ASSERT(g_bserrno == 0);
845 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
846 
847 	/* Verify that blob has unset frozen_io */
848 	CU_ASSERT(blob->frozen_refcnt == 0);
849 
850 	/* Verify that postponed I/O completed successfully by comparing payload */
851 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
852 	poll_threads();
853 	CU_ASSERT(g_bserrno == 0);
854 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
855 
856 	spdk_bs_free_io_channel(channel);
857 	poll_threads();
858 
859 	ut_blob_close_and_delete(bs, blob);
860 }
861 
862 static void
863 blob_clone(void)
864 {
865 	struct spdk_blob_store *bs = g_bs;
866 	struct spdk_blob_opts opts;
867 	struct spdk_blob *blob, *snapshot, *clone;
868 	spdk_blob_id blobid, cloneid, snapshotid;
869 	struct spdk_blob_xattr_opts xattrs;
870 	const void *value;
871 	size_t value_len;
872 	int rc;
873 
874 	/* Create blob with 10 clusters */
875 
876 	ut_spdk_blob_opts_init(&opts);
877 	opts.num_clusters = 10;
878 
879 	blob = ut_blob_create_and_open(bs, &opts);
880 	blobid = spdk_blob_get_id(blob);
881 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
882 
883 	/* Create snapshot */
884 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
885 	poll_threads();
886 	CU_ASSERT(g_bserrno == 0);
887 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
888 	snapshotid = g_blobid;
889 
890 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
891 	poll_threads();
892 	CU_ASSERT(g_bserrno == 0);
893 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
894 	snapshot = g_blob;
895 	CU_ASSERT(snapshot->data_ro == true);
896 	CU_ASSERT(snapshot->md_ro == true);
897 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
898 
899 	spdk_blob_close(snapshot, blob_op_complete, NULL);
900 	poll_threads();
901 	CU_ASSERT(g_bserrno == 0);
902 
903 	/* Create clone from snapshot with xattrs */
904 	xattrs.names = g_xattr_names;
905 	xattrs.get_value = _get_xattr_value;
906 	xattrs.count = 3;
907 	xattrs.ctx = &g_ctx;
908 
909 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
910 	poll_threads();
911 	CU_ASSERT(g_bserrno == 0);
912 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
913 	cloneid = g_blobid;
914 
915 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
916 	poll_threads();
917 	CU_ASSERT(g_bserrno == 0);
918 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
919 	clone = g_blob;
920 	CU_ASSERT(clone->data_ro == false);
921 	CU_ASSERT(clone->md_ro == false);
922 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
923 
924 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
925 	CU_ASSERT(rc == 0);
926 	SPDK_CU_ASSERT_FATAL(value != NULL);
927 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
928 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
929 
930 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
931 	CU_ASSERT(rc == 0);
932 	SPDK_CU_ASSERT_FATAL(value != NULL);
933 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
934 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
935 
936 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
937 	CU_ASSERT(rc == 0);
938 	SPDK_CU_ASSERT_FATAL(value != NULL);
939 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
940 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
941 
942 
943 	spdk_blob_close(clone, blob_op_complete, NULL);
944 	poll_threads();
945 	CU_ASSERT(g_bserrno == 0);
946 
947 	/* Try to create clone from not read only blob */
948 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
949 	poll_threads();
950 	CU_ASSERT(g_bserrno == -EINVAL);
951 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
952 
953 	/* Mark blob as read only */
954 	spdk_blob_set_read_only(blob);
955 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
956 	poll_threads();
957 	CU_ASSERT(g_bserrno == 0);
958 
959 	/* Create clone from read only blob */
960 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
961 	poll_threads();
962 	CU_ASSERT(g_bserrno == 0);
963 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
964 	cloneid = g_blobid;
965 
966 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
967 	poll_threads();
968 	CU_ASSERT(g_bserrno == 0);
969 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
970 	clone = g_blob;
971 	CU_ASSERT(clone->data_ro == false);
972 	CU_ASSERT(clone->md_ro == false);
973 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
974 
975 	ut_blob_close_and_delete(bs, clone);
976 	ut_blob_close_and_delete(bs, blob);
977 }
978 
979 static void
980 _blob_inflate(bool decouple_parent)
981 {
982 	struct spdk_blob_store *bs = g_bs;
983 	struct spdk_blob_opts opts;
984 	struct spdk_blob *blob, *snapshot;
985 	spdk_blob_id blobid, snapshotid;
986 	struct spdk_io_channel *channel;
987 	uint64_t free_clusters;
988 
989 	channel = spdk_bs_alloc_io_channel(bs);
990 	SPDK_CU_ASSERT_FATAL(channel != NULL);
991 
992 	/* Create blob with 10 clusters */
993 
994 	ut_spdk_blob_opts_init(&opts);
995 	opts.num_clusters = 10;
996 	opts.thin_provision = true;
997 
998 	blob = ut_blob_create_and_open(bs, &opts);
999 	blobid = spdk_blob_get_id(blob);
1000 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1001 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1002 
1003 	/* 1) Blob with no parent */
1004 	if (decouple_parent) {
1005 		/* Decouple parent of blob with no parent (should fail) */
1006 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1007 		poll_threads();
1008 		CU_ASSERT(g_bserrno != 0);
1009 	} else {
1010 		/* Inflate of thin blob with no parent should made it thick */
1011 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1012 		poll_threads();
1013 		CU_ASSERT(g_bserrno == 0);
1014 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1015 	}
1016 
1017 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1018 	poll_threads();
1019 	CU_ASSERT(g_bserrno == 0);
1020 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1021 	snapshotid = g_blobid;
1022 
1023 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1024 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1025 
1026 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1027 	poll_threads();
1028 	CU_ASSERT(g_bserrno == 0);
1029 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1030 	snapshot = g_blob;
1031 	CU_ASSERT(snapshot->data_ro == true);
1032 	CU_ASSERT(snapshot->md_ro == true);
1033 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1034 
1035 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1036 	poll_threads();
1037 	CU_ASSERT(g_bserrno == 0);
1038 
1039 	free_clusters = spdk_bs_free_cluster_count(bs);
1040 
1041 	/* 2) Blob with parent */
1042 	if (!decouple_parent) {
1043 		/* Do full blob inflation */
1044 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1045 		poll_threads();
1046 		CU_ASSERT(g_bserrno == 0);
1047 		/* all 10 clusters should be allocated */
1048 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1049 	} else {
1050 		/* Decouple parent of blob */
1051 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1052 		poll_threads();
1053 		CU_ASSERT(g_bserrno == 0);
1054 		/* when only parent is removed, none of the clusters should be allocated */
1055 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1056 	}
1057 
1058 	/* Now, it should be possible to delete snapshot */
1059 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1060 	poll_threads();
1061 	CU_ASSERT(g_bserrno == 0);
1062 
1063 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1064 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1065 
1066 	spdk_bs_free_io_channel(channel);
1067 	poll_threads();
1068 
1069 	ut_blob_close_and_delete(bs, blob);
1070 }
1071 
1072 static void
1073 blob_inflate(void)
1074 {
1075 	_blob_inflate(false);
1076 	_blob_inflate(true);
1077 }
1078 
1079 static void
1080 blob_delete(void)
1081 {
1082 	struct spdk_blob_store *bs = g_bs;
1083 	struct spdk_blob_opts blob_opts;
1084 	spdk_blob_id blobid;
1085 
1086 	/* Create a blob and then delete it. */
1087 	ut_spdk_blob_opts_init(&blob_opts);
1088 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1089 	poll_threads();
1090 	CU_ASSERT(g_bserrno == 0);
1091 	CU_ASSERT(g_blobid > 0);
1092 	blobid = g_blobid;
1093 
1094 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1095 	poll_threads();
1096 	CU_ASSERT(g_bserrno == 0);
1097 
1098 	/* Try to open the blob */
1099 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1100 	poll_threads();
1101 	CU_ASSERT(g_bserrno == -ENOENT);
1102 }
1103 
1104 static void
1105 blob_resize_test(void)
1106 {
1107 	struct spdk_blob_store *bs = g_bs;
1108 	struct spdk_blob *blob;
1109 	uint64_t free_clusters;
1110 
1111 	free_clusters = spdk_bs_free_cluster_count(bs);
1112 
1113 	blob = ut_blob_create_and_open(bs, NULL);
1114 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1115 
1116 	/* Confirm that resize fails if blob is marked read-only. */
1117 	blob->md_ro = true;
1118 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1119 	poll_threads();
1120 	CU_ASSERT(g_bserrno == -EPERM);
1121 	blob->md_ro = false;
1122 
1123 	/* The blob started at 0 clusters. Resize it to be 5. */
1124 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1125 	poll_threads();
1126 	CU_ASSERT(g_bserrno == 0);
1127 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1128 
1129 	/* Shrink the blob to 3 clusters. This will not actually release
1130 	 * the old clusters until the blob is synced.
1131 	 */
1132 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1133 	poll_threads();
1134 	CU_ASSERT(g_bserrno == 0);
1135 	/* Verify there are still 5 clusters in use */
1136 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1137 
1138 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1139 	poll_threads();
1140 	CU_ASSERT(g_bserrno == 0);
1141 	/* Now there are only 3 clusters in use */
1142 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1143 
1144 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1145 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1146 	poll_threads();
1147 	CU_ASSERT(g_bserrno == 0);
1148 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1149 
1150 	/* Try to resize the blob to size larger than blobstore. */
1151 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1152 	poll_threads();
1153 	CU_ASSERT(g_bserrno == -ENOSPC);
1154 
1155 	ut_blob_close_and_delete(bs, blob);
1156 }
1157 
1158 static void
1159 blob_read_only(void)
1160 {
1161 	struct spdk_blob_store *bs;
1162 	struct spdk_bs_dev *dev;
1163 	struct spdk_blob *blob;
1164 	struct spdk_bs_opts opts;
1165 	spdk_blob_id blobid;
1166 	int rc;
1167 
1168 	dev = init_dev();
1169 	spdk_bs_opts_init(&opts, sizeof(opts));
1170 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1171 
1172 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1173 	poll_threads();
1174 	CU_ASSERT(g_bserrno == 0);
1175 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1176 	bs = g_bs;
1177 
1178 	blob = ut_blob_create_and_open(bs, NULL);
1179 	blobid = spdk_blob_get_id(blob);
1180 
1181 	rc = spdk_blob_set_read_only(blob);
1182 	CU_ASSERT(rc == 0);
1183 
1184 	CU_ASSERT(blob->data_ro == false);
1185 	CU_ASSERT(blob->md_ro == false);
1186 
1187 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1188 	poll_threads();
1189 
1190 	CU_ASSERT(blob->data_ro == true);
1191 	CU_ASSERT(blob->md_ro == true);
1192 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1193 
1194 	spdk_blob_close(blob, blob_op_complete, NULL);
1195 	poll_threads();
1196 	CU_ASSERT(g_bserrno == 0);
1197 
1198 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1199 	poll_threads();
1200 	CU_ASSERT(g_bserrno == 0);
1201 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1202 	blob = g_blob;
1203 
1204 	CU_ASSERT(blob->data_ro == true);
1205 	CU_ASSERT(blob->md_ro == true);
1206 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1207 
1208 	spdk_blob_close(blob, blob_op_complete, NULL);
1209 	poll_threads();
1210 	CU_ASSERT(g_bserrno == 0);
1211 
1212 	ut_bs_reload(&bs, &opts);
1213 
1214 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1215 	poll_threads();
1216 	CU_ASSERT(g_bserrno == 0);
1217 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1218 	blob = g_blob;
1219 
1220 	CU_ASSERT(blob->data_ro == true);
1221 	CU_ASSERT(blob->md_ro == true);
1222 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1223 
1224 	ut_blob_close_and_delete(bs, blob);
1225 
1226 	spdk_bs_unload(bs, bs_op_complete, NULL);
1227 	poll_threads();
1228 	CU_ASSERT(g_bserrno == 0);
1229 }
1230 
1231 static void
1232 channel_ops(void)
1233 {
1234 	struct spdk_blob_store *bs = g_bs;
1235 	struct spdk_io_channel *channel;
1236 
1237 	channel = spdk_bs_alloc_io_channel(bs);
1238 	CU_ASSERT(channel != NULL);
1239 
1240 	spdk_bs_free_io_channel(channel);
1241 	poll_threads();
1242 }
1243 
1244 static void
1245 blob_write(void)
1246 {
1247 	struct spdk_blob_store *bs = g_bs;
1248 	struct spdk_blob *blob = g_blob;
1249 	struct spdk_io_channel *channel;
1250 	uint64_t pages_per_cluster;
1251 	uint8_t payload[10 * 4096];
1252 
1253 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1254 
1255 	channel = spdk_bs_alloc_io_channel(bs);
1256 	CU_ASSERT(channel != NULL);
1257 
1258 	/* Write to a blob with 0 size */
1259 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1260 	poll_threads();
1261 	CU_ASSERT(g_bserrno == -EINVAL);
1262 
1263 	/* Resize the blob */
1264 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1265 	poll_threads();
1266 	CU_ASSERT(g_bserrno == 0);
1267 
1268 	/* Confirm that write fails if blob is marked read-only. */
1269 	blob->data_ro = true;
1270 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1271 	poll_threads();
1272 	CU_ASSERT(g_bserrno == -EPERM);
1273 	blob->data_ro = false;
1274 
1275 	/* Write to the blob */
1276 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1277 	poll_threads();
1278 	CU_ASSERT(g_bserrno == 0);
1279 
1280 	/* Write starting beyond the end */
1281 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1282 			   NULL);
1283 	poll_threads();
1284 	CU_ASSERT(g_bserrno == -EINVAL);
1285 
1286 	/* Write starting at a valid location but going off the end */
1287 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1288 			   blob_op_complete, NULL);
1289 	poll_threads();
1290 	CU_ASSERT(g_bserrno == -EINVAL);
1291 
1292 	spdk_bs_free_io_channel(channel);
1293 	poll_threads();
1294 }
1295 
1296 static void
1297 blob_read(void)
1298 {
1299 	struct spdk_blob_store *bs = g_bs;
1300 	struct spdk_blob *blob = g_blob;
1301 	struct spdk_io_channel *channel;
1302 	uint64_t pages_per_cluster;
1303 	uint8_t payload[10 * 4096];
1304 
1305 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1306 
1307 	channel = spdk_bs_alloc_io_channel(bs);
1308 	CU_ASSERT(channel != NULL);
1309 
1310 	/* Read from a blob with 0 size */
1311 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1312 	poll_threads();
1313 	CU_ASSERT(g_bserrno == -EINVAL);
1314 
1315 	/* Resize the blob */
1316 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1317 	poll_threads();
1318 	CU_ASSERT(g_bserrno == 0);
1319 
1320 	/* Confirm that read passes if blob is marked read-only. */
1321 	blob->data_ro = true;
1322 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1323 	poll_threads();
1324 	CU_ASSERT(g_bserrno == 0);
1325 	blob->data_ro = false;
1326 
1327 	/* Read from the blob */
1328 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1329 	poll_threads();
1330 	CU_ASSERT(g_bserrno == 0);
1331 
1332 	/* Read starting beyond the end */
1333 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1334 			  NULL);
1335 	poll_threads();
1336 	CU_ASSERT(g_bserrno == -EINVAL);
1337 
1338 	/* Read starting at a valid location but going off the end */
1339 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1340 			  blob_op_complete, NULL);
1341 	poll_threads();
1342 	CU_ASSERT(g_bserrno == -EINVAL);
1343 
1344 	spdk_bs_free_io_channel(channel);
1345 	poll_threads();
1346 }
1347 
1348 static void
1349 blob_rw_verify(void)
1350 {
1351 	struct spdk_blob_store *bs = g_bs;
1352 	struct spdk_blob *blob = g_blob;
1353 	struct spdk_io_channel *channel;
1354 	uint8_t payload_read[10 * 4096];
1355 	uint8_t payload_write[10 * 4096];
1356 
1357 	channel = spdk_bs_alloc_io_channel(bs);
1358 	CU_ASSERT(channel != NULL);
1359 
1360 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1361 	poll_threads();
1362 	CU_ASSERT(g_bserrno == 0);
1363 
1364 	memset(payload_write, 0xE5, sizeof(payload_write));
1365 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1366 	poll_threads();
1367 	CU_ASSERT(g_bserrno == 0);
1368 
1369 	memset(payload_read, 0x00, sizeof(payload_read));
1370 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1371 	poll_threads();
1372 	CU_ASSERT(g_bserrno == 0);
1373 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1374 
1375 	spdk_bs_free_io_channel(channel);
1376 	poll_threads();
1377 }
1378 
1379 static void
1380 blob_rw_verify_iov(void)
1381 {
1382 	struct spdk_blob_store *bs = g_bs;
1383 	struct spdk_blob *blob;
1384 	struct spdk_io_channel *channel;
1385 	uint8_t payload_read[10 * 4096];
1386 	uint8_t payload_write[10 * 4096];
1387 	struct iovec iov_read[3];
1388 	struct iovec iov_write[3];
1389 	void *buf;
1390 
1391 	channel = spdk_bs_alloc_io_channel(bs);
1392 	CU_ASSERT(channel != NULL);
1393 
1394 	blob = ut_blob_create_and_open(bs, NULL);
1395 
1396 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1397 	poll_threads();
1398 	CU_ASSERT(g_bserrno == 0);
1399 
1400 	/*
1401 	 * Manually adjust the offset of the blob's second cluster.  This allows
1402 	 *  us to make sure that the readv/write code correctly accounts for I/O
1403 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1404 	 *  clusters are where we expect before modifying the second cluster.
1405 	 */
1406 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1407 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1408 	blob->active.clusters[1] = 3 * 256;
1409 
1410 	memset(payload_write, 0xE5, sizeof(payload_write));
1411 	iov_write[0].iov_base = payload_write;
1412 	iov_write[0].iov_len = 1 * 4096;
1413 	iov_write[1].iov_base = payload_write + 1 * 4096;
1414 	iov_write[1].iov_len = 5 * 4096;
1415 	iov_write[2].iov_base = payload_write + 6 * 4096;
1416 	iov_write[2].iov_len = 4 * 4096;
1417 	/*
1418 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1419 	 *  will get written to the first cluster, the last 4 to the second cluster.
1420 	 */
1421 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1422 	poll_threads();
1423 	CU_ASSERT(g_bserrno == 0);
1424 
1425 	memset(payload_read, 0xAA, sizeof(payload_read));
1426 	iov_read[0].iov_base = payload_read;
1427 	iov_read[0].iov_len = 3 * 4096;
1428 	iov_read[1].iov_base = payload_read + 3 * 4096;
1429 	iov_read[1].iov_len = 4 * 4096;
1430 	iov_read[2].iov_base = payload_read + 7 * 4096;
1431 	iov_read[2].iov_len = 3 * 4096;
1432 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1433 	poll_threads();
1434 	CU_ASSERT(g_bserrno == 0);
1435 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1436 
1437 	buf = calloc(1, 256 * 4096);
1438 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1439 	/* Check that cluster 2 on "disk" was not modified. */
1440 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1441 	free(buf);
1442 
1443 	spdk_blob_close(blob, blob_op_complete, NULL);
1444 	poll_threads();
1445 	CU_ASSERT(g_bserrno == 0);
1446 
1447 	spdk_bs_free_io_channel(channel);
1448 	poll_threads();
1449 }
1450 
1451 static uint32_t
1452 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1453 {
1454 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1455 	struct spdk_bs_request_set *set;
1456 	uint32_t count = 0;
1457 
1458 	TAILQ_FOREACH(set, &channel->reqs, link) {
1459 		count++;
1460 	}
1461 
1462 	return count;
1463 }
1464 
1465 static void
1466 blob_rw_verify_iov_nomem(void)
1467 {
1468 	struct spdk_blob_store *bs = g_bs;
1469 	struct spdk_blob *blob = g_blob;
1470 	struct spdk_io_channel *channel;
1471 	uint8_t payload_write[10 * 4096];
1472 	struct iovec iov_write[3];
1473 	uint32_t req_count;
1474 
1475 	channel = spdk_bs_alloc_io_channel(bs);
1476 	CU_ASSERT(channel != NULL);
1477 
1478 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1479 	poll_threads();
1480 	CU_ASSERT(g_bserrno == 0);
1481 
1482 	/*
1483 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1484 	 *  will get written to the first cluster, the last 4 to the second cluster.
1485 	 */
1486 	iov_write[0].iov_base = payload_write;
1487 	iov_write[0].iov_len = 1 * 4096;
1488 	iov_write[1].iov_base = payload_write + 1 * 4096;
1489 	iov_write[1].iov_len = 5 * 4096;
1490 	iov_write[2].iov_base = payload_write + 6 * 4096;
1491 	iov_write[2].iov_len = 4 * 4096;
1492 	MOCK_SET(calloc, NULL);
1493 	req_count = bs_channel_get_req_count(channel);
1494 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1495 	poll_threads();
1496 	CU_ASSERT(g_bserrno = -ENOMEM);
1497 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1498 	MOCK_CLEAR(calloc);
1499 
1500 	spdk_bs_free_io_channel(channel);
1501 	poll_threads();
1502 }
1503 
1504 static void
1505 blob_rw_iov_read_only(void)
1506 {
1507 	struct spdk_blob_store *bs = g_bs;
1508 	struct spdk_blob *blob = g_blob;
1509 	struct spdk_io_channel *channel;
1510 	uint8_t payload_read[4096];
1511 	uint8_t payload_write[4096];
1512 	struct iovec iov_read;
1513 	struct iovec iov_write;
1514 
1515 	channel = spdk_bs_alloc_io_channel(bs);
1516 	CU_ASSERT(channel != NULL);
1517 
1518 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1519 	poll_threads();
1520 	CU_ASSERT(g_bserrno == 0);
1521 
1522 	/* Verify that writev failed if read_only flag is set. */
1523 	blob->data_ro = true;
1524 	iov_write.iov_base = payload_write;
1525 	iov_write.iov_len = sizeof(payload_write);
1526 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1527 	poll_threads();
1528 	CU_ASSERT(g_bserrno == -EPERM);
1529 
1530 	/* Verify that reads pass if data_ro flag is set. */
1531 	iov_read.iov_base = payload_read;
1532 	iov_read.iov_len = sizeof(payload_read);
1533 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1534 	poll_threads();
1535 	CU_ASSERT(g_bserrno == 0);
1536 
1537 	spdk_bs_free_io_channel(channel);
1538 	poll_threads();
1539 }
1540 
1541 static void
1542 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1543 		       uint8_t *payload, uint64_t offset, uint64_t length,
1544 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1545 {
1546 	uint64_t i;
1547 	uint8_t *buf;
1548 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1549 
1550 	/* To be sure that operation is NOT splitted, read one page at the time */
1551 	buf = payload;
1552 	for (i = 0; i < length; i++) {
1553 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1554 		poll_threads();
1555 		if (g_bserrno != 0) {
1556 			/* Pass the error code up */
1557 			break;
1558 		}
1559 		buf += page_size;
1560 	}
1561 
1562 	cb_fn(cb_arg, g_bserrno);
1563 }
1564 
1565 static void
1566 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1567 			uint8_t *payload, uint64_t offset, uint64_t length,
1568 			spdk_blob_op_complete cb_fn, void *cb_arg)
1569 {
1570 	uint64_t i;
1571 	uint8_t *buf;
1572 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1573 
1574 	/* To be sure that operation is NOT splitted, write one page at the time */
1575 	buf = payload;
1576 	for (i = 0; i < length; i++) {
1577 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1578 		poll_threads();
1579 		if (g_bserrno != 0) {
1580 			/* Pass the error code up */
1581 			break;
1582 		}
1583 		buf += page_size;
1584 	}
1585 
1586 	cb_fn(cb_arg, g_bserrno);
1587 }
1588 
1589 static void
1590 blob_operation_split_rw(void)
1591 {
1592 	struct spdk_blob_store *bs = g_bs;
1593 	struct spdk_blob *blob;
1594 	struct spdk_io_channel *channel;
1595 	struct spdk_blob_opts opts;
1596 	uint64_t cluster_size;
1597 
1598 	uint64_t payload_size;
1599 	uint8_t *payload_read;
1600 	uint8_t *payload_write;
1601 	uint8_t *payload_pattern;
1602 
1603 	uint64_t page_size;
1604 	uint64_t pages_per_cluster;
1605 	uint64_t pages_per_payload;
1606 
1607 	uint64_t i;
1608 
1609 	cluster_size = spdk_bs_get_cluster_size(bs);
1610 	page_size = spdk_bs_get_page_size(bs);
1611 	pages_per_cluster = cluster_size / page_size;
1612 	pages_per_payload = pages_per_cluster * 5;
1613 	payload_size = cluster_size * 5;
1614 
1615 	payload_read = malloc(payload_size);
1616 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1617 
1618 	payload_write = malloc(payload_size);
1619 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1620 
1621 	payload_pattern = malloc(payload_size);
1622 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1623 
1624 	/* Prepare random pattern to write */
1625 	memset(payload_pattern, 0xFF, payload_size);
1626 	for (i = 0; i < pages_per_payload; i++) {
1627 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1628 	}
1629 
1630 	channel = spdk_bs_alloc_io_channel(bs);
1631 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1632 
1633 	/* Create blob */
1634 	ut_spdk_blob_opts_init(&opts);
1635 	opts.thin_provision = false;
1636 	opts.num_clusters = 5;
1637 
1638 	blob = ut_blob_create_and_open(bs, &opts);
1639 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1640 
1641 	/* Initial read should return zeroed payload */
1642 	memset(payload_read, 0xFF, payload_size);
1643 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1644 	poll_threads();
1645 	CU_ASSERT(g_bserrno == 0);
1646 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1647 
1648 	/* Fill whole blob except last page */
1649 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1650 			   blob_op_complete, NULL);
1651 	poll_threads();
1652 	CU_ASSERT(g_bserrno == 0);
1653 
1654 	/* Write last page with a pattern */
1655 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1656 			   blob_op_complete, NULL);
1657 	poll_threads();
1658 	CU_ASSERT(g_bserrno == 0);
1659 
1660 	/* Read whole blob and check consistency */
1661 	memset(payload_read, 0xFF, payload_size);
1662 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1663 	poll_threads();
1664 	CU_ASSERT(g_bserrno == 0);
1665 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1666 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1667 
1668 	/* Fill whole blob except first page */
1669 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1670 			   blob_op_complete, NULL);
1671 	poll_threads();
1672 	CU_ASSERT(g_bserrno == 0);
1673 
1674 	/* Write first page with a pattern */
1675 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1676 			   blob_op_complete, NULL);
1677 	poll_threads();
1678 	CU_ASSERT(g_bserrno == 0);
1679 
1680 	/* Read whole blob and check consistency */
1681 	memset(payload_read, 0xFF, payload_size);
1682 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1683 	poll_threads();
1684 	CU_ASSERT(g_bserrno == 0);
1685 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1686 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1687 
1688 
1689 	/* Fill whole blob with a pattern (5 clusters) */
1690 
1691 	/* 1. Read test. */
1692 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1693 				blob_op_complete, NULL);
1694 	poll_threads();
1695 	CU_ASSERT(g_bserrno == 0);
1696 
1697 	memset(payload_read, 0xFF, payload_size);
1698 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1699 	poll_threads();
1700 	poll_threads();
1701 	CU_ASSERT(g_bserrno == 0);
1702 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1703 
1704 	/* 2. Write test. */
1705 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1706 			   blob_op_complete, NULL);
1707 	poll_threads();
1708 	CU_ASSERT(g_bserrno == 0);
1709 
1710 	memset(payload_read, 0xFF, payload_size);
1711 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1712 	poll_threads();
1713 	CU_ASSERT(g_bserrno == 0);
1714 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1715 
1716 	spdk_bs_free_io_channel(channel);
1717 	poll_threads();
1718 
1719 	g_blob = NULL;
1720 	g_blobid = 0;
1721 
1722 	free(payload_read);
1723 	free(payload_write);
1724 	free(payload_pattern);
1725 
1726 	ut_blob_close_and_delete(bs, blob);
1727 }
1728 
1729 static void
1730 blob_operation_split_rw_iov(void)
1731 {
1732 	struct spdk_blob_store *bs = g_bs;
1733 	struct spdk_blob *blob;
1734 	struct spdk_io_channel *channel;
1735 	struct spdk_blob_opts opts;
1736 	uint64_t cluster_size;
1737 
1738 	uint64_t payload_size;
1739 	uint8_t *payload_read;
1740 	uint8_t *payload_write;
1741 	uint8_t *payload_pattern;
1742 
1743 	uint64_t page_size;
1744 	uint64_t pages_per_cluster;
1745 	uint64_t pages_per_payload;
1746 
1747 	struct iovec iov_read[2];
1748 	struct iovec iov_write[2];
1749 
1750 	uint64_t i, j;
1751 
1752 	cluster_size = spdk_bs_get_cluster_size(bs);
1753 	page_size = spdk_bs_get_page_size(bs);
1754 	pages_per_cluster = cluster_size / page_size;
1755 	pages_per_payload = pages_per_cluster * 5;
1756 	payload_size = cluster_size * 5;
1757 
1758 	payload_read = malloc(payload_size);
1759 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1760 
1761 	payload_write = malloc(payload_size);
1762 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1763 
1764 	payload_pattern = malloc(payload_size);
1765 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1766 
1767 	/* Prepare random pattern to write */
1768 	for (i = 0; i < pages_per_payload; i++) {
1769 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1770 			uint64_t *tmp;
1771 
1772 			tmp = (uint64_t *)payload_pattern;
1773 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1774 			*tmp = i + 1;
1775 		}
1776 	}
1777 
1778 	channel = spdk_bs_alloc_io_channel(bs);
1779 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1780 
1781 	/* Create blob */
1782 	ut_spdk_blob_opts_init(&opts);
1783 	opts.thin_provision = false;
1784 	opts.num_clusters = 5;
1785 
1786 	blob = ut_blob_create_and_open(bs, &opts);
1787 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1788 
1789 	/* Initial read should return zeroes payload */
1790 	memset(payload_read, 0xFF, payload_size);
1791 	iov_read[0].iov_base = payload_read;
1792 	iov_read[0].iov_len = cluster_size * 3;
1793 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1794 	iov_read[1].iov_len = cluster_size * 2;
1795 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1796 	poll_threads();
1797 	CU_ASSERT(g_bserrno == 0);
1798 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1799 
1800 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1801 	 *  with a pattern. */
1802 	iov_write[0].iov_base = payload_pattern;
1803 	iov_write[0].iov_len = payload_size - page_size;
1804 	iov_write[1].iov_base = payload_pattern;
1805 	iov_write[1].iov_len = page_size;
1806 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1807 	poll_threads();
1808 	CU_ASSERT(g_bserrno == 0);
1809 
1810 	/* Read whole blob and check consistency */
1811 	memset(payload_read, 0xFF, payload_size);
1812 	iov_read[0].iov_base = payload_read;
1813 	iov_read[0].iov_len = cluster_size * 2;
1814 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1815 	iov_read[1].iov_len = cluster_size * 3;
1816 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1817 	poll_threads();
1818 	CU_ASSERT(g_bserrno == 0);
1819 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1820 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1821 
1822 	/* First of iovs fills only first page and second of iovs writes whole blob except
1823 	 *  first page with a pattern. */
1824 	iov_write[0].iov_base = payload_pattern;
1825 	iov_write[0].iov_len = page_size;
1826 	iov_write[1].iov_base = payload_pattern;
1827 	iov_write[1].iov_len = payload_size - page_size;
1828 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1829 	poll_threads();
1830 	CU_ASSERT(g_bserrno == 0);
1831 
1832 	/* Read whole blob and check consistency */
1833 	memset(payload_read, 0xFF, payload_size);
1834 	iov_read[0].iov_base = payload_read;
1835 	iov_read[0].iov_len = cluster_size * 4;
1836 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1837 	iov_read[1].iov_len = cluster_size;
1838 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1839 	poll_threads();
1840 	CU_ASSERT(g_bserrno == 0);
1841 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1842 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1843 
1844 
1845 	/* Fill whole blob with a pattern (5 clusters) */
1846 
1847 	/* 1. Read test. */
1848 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1849 				blob_op_complete, NULL);
1850 	poll_threads();
1851 	CU_ASSERT(g_bserrno == 0);
1852 
1853 	memset(payload_read, 0xFF, payload_size);
1854 	iov_read[0].iov_base = payload_read;
1855 	iov_read[0].iov_len = cluster_size;
1856 	iov_read[1].iov_base = payload_read + cluster_size;
1857 	iov_read[1].iov_len = cluster_size * 4;
1858 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1859 	poll_threads();
1860 	CU_ASSERT(g_bserrno == 0);
1861 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1862 
1863 	/* 2. Write test. */
1864 	iov_write[0].iov_base = payload_read;
1865 	iov_write[0].iov_len = cluster_size * 2;
1866 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1867 	iov_write[1].iov_len = cluster_size * 3;
1868 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1869 	poll_threads();
1870 	CU_ASSERT(g_bserrno == 0);
1871 
1872 	memset(payload_read, 0xFF, payload_size);
1873 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1874 	poll_threads();
1875 	CU_ASSERT(g_bserrno == 0);
1876 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1877 
1878 	spdk_bs_free_io_channel(channel);
1879 	poll_threads();
1880 
1881 	g_blob = NULL;
1882 	g_blobid = 0;
1883 
1884 	free(payload_read);
1885 	free(payload_write);
1886 	free(payload_pattern);
1887 
1888 	ut_blob_close_and_delete(bs, blob);
1889 }
1890 
1891 static void
1892 blob_unmap(void)
1893 {
1894 	struct spdk_blob_store *bs = g_bs;
1895 	struct spdk_blob *blob;
1896 	struct spdk_io_channel *channel;
1897 	struct spdk_blob_opts opts;
1898 	uint8_t payload[4096];
1899 	int i;
1900 
1901 	channel = spdk_bs_alloc_io_channel(bs);
1902 	CU_ASSERT(channel != NULL);
1903 
1904 	ut_spdk_blob_opts_init(&opts);
1905 	opts.num_clusters = 10;
1906 
1907 	blob = ut_blob_create_and_open(bs, &opts);
1908 
1909 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1910 	poll_threads();
1911 	CU_ASSERT(g_bserrno == 0);
1912 
1913 	memset(payload, 0, sizeof(payload));
1914 	payload[0] = 0xFF;
1915 
1916 	/*
1917 	 * Set first byte of every cluster to 0xFF.
1918 	 * First cluster on device is reserved so let's start from cluster number 1
1919 	 */
1920 	for (i = 1; i < 11; i++) {
1921 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1922 	}
1923 
1924 	/* Confirm writes */
1925 	for (i = 0; i < 10; i++) {
1926 		payload[0] = 0;
1927 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1928 				  blob_op_complete, NULL);
1929 		poll_threads();
1930 		CU_ASSERT(g_bserrno == 0);
1931 		CU_ASSERT(payload[0] == 0xFF);
1932 	}
1933 
1934 	/* Mark some clusters as unallocated */
1935 	blob->active.clusters[1] = 0;
1936 	blob->active.clusters[2] = 0;
1937 	blob->active.clusters[3] = 0;
1938 	blob->active.clusters[6] = 0;
1939 	blob->active.clusters[8] = 0;
1940 
1941 	/* Unmap clusters by resizing to 0 */
1942 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
1943 	poll_threads();
1944 	CU_ASSERT(g_bserrno == 0);
1945 
1946 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1947 	poll_threads();
1948 	CU_ASSERT(g_bserrno == 0);
1949 
1950 	/* Confirm that only 'allocated' clusters were unmapped */
1951 	for (i = 1; i < 11; i++) {
1952 		switch (i) {
1953 		case 2:
1954 		case 3:
1955 		case 4:
1956 		case 7:
1957 		case 9:
1958 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
1959 			break;
1960 		default:
1961 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
1962 			break;
1963 		}
1964 	}
1965 
1966 	spdk_bs_free_io_channel(channel);
1967 	poll_threads();
1968 
1969 	ut_blob_close_and_delete(bs, blob);
1970 }
1971 
1972 static void
1973 blob_iter(void)
1974 {
1975 	struct spdk_blob_store *bs = g_bs;
1976 	struct spdk_blob *blob;
1977 	spdk_blob_id blobid;
1978 	struct spdk_blob_opts blob_opts;
1979 
1980 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
1981 	poll_threads();
1982 	CU_ASSERT(g_blob == NULL);
1983 	CU_ASSERT(g_bserrno == -ENOENT);
1984 
1985 	ut_spdk_blob_opts_init(&blob_opts);
1986 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1987 	poll_threads();
1988 	CU_ASSERT(g_bserrno == 0);
1989 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1990 	blobid = g_blobid;
1991 
1992 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
1993 	poll_threads();
1994 	CU_ASSERT(g_blob != NULL);
1995 	CU_ASSERT(g_bserrno == 0);
1996 	blob = g_blob;
1997 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
1998 
1999 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2000 	poll_threads();
2001 	CU_ASSERT(g_blob == NULL);
2002 	CU_ASSERT(g_bserrno == -ENOENT);
2003 }
2004 
2005 static void
2006 blob_xattr(void)
2007 {
2008 	struct spdk_blob_store *bs = g_bs;
2009 	struct spdk_blob *blob = g_blob;
2010 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2011 	uint64_t length;
2012 	int rc;
2013 	const char *name1, *name2;
2014 	const void *value;
2015 	size_t value_len;
2016 	struct spdk_xattr_names *names;
2017 
2018 	/* Test that set_xattr fails if md_ro flag is set. */
2019 	blob->md_ro = true;
2020 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2021 	CU_ASSERT(rc == -EPERM);
2022 
2023 	blob->md_ro = false;
2024 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2025 	CU_ASSERT(rc == 0);
2026 
2027 	length = 2345;
2028 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2029 	CU_ASSERT(rc == 0);
2030 
2031 	/* Overwrite "length" xattr. */
2032 	length = 3456;
2033 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2034 	CU_ASSERT(rc == 0);
2035 
2036 	/* get_xattr should still work even if md_ro flag is set. */
2037 	value = NULL;
2038 	blob->md_ro = true;
2039 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2040 	CU_ASSERT(rc == 0);
2041 	SPDK_CU_ASSERT_FATAL(value != NULL);
2042 	CU_ASSERT(*(uint64_t *)value == length);
2043 	CU_ASSERT(value_len == 8);
2044 	blob->md_ro = false;
2045 
2046 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2047 	CU_ASSERT(rc == -ENOENT);
2048 
2049 	names = NULL;
2050 	rc = spdk_blob_get_xattr_names(blob, &names);
2051 	CU_ASSERT(rc == 0);
2052 	SPDK_CU_ASSERT_FATAL(names != NULL);
2053 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2054 	name1 = spdk_xattr_names_get_name(names, 0);
2055 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2056 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2057 	name2 = spdk_xattr_names_get_name(names, 1);
2058 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2059 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2060 	CU_ASSERT(strcmp(name1, name2));
2061 	spdk_xattr_names_free(names);
2062 
2063 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2064 	blob->md_ro = true;
2065 	rc = spdk_blob_remove_xattr(blob, "name");
2066 	CU_ASSERT(rc == -EPERM);
2067 
2068 	blob->md_ro = false;
2069 	rc = spdk_blob_remove_xattr(blob, "name");
2070 	CU_ASSERT(rc == 0);
2071 
2072 	rc = spdk_blob_remove_xattr(blob, "foobar");
2073 	CU_ASSERT(rc == -ENOENT);
2074 
2075 	/* Set internal xattr */
2076 	length = 7898;
2077 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2078 	CU_ASSERT(rc == 0);
2079 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2080 	CU_ASSERT(rc == 0);
2081 	CU_ASSERT(*(uint64_t *)value == length);
2082 	/* try to get public xattr with same name */
2083 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2084 	CU_ASSERT(rc != 0);
2085 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2086 	CU_ASSERT(rc != 0);
2087 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2088 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2089 		  SPDK_BLOB_INTERNAL_XATTR);
2090 
2091 	spdk_blob_close(blob, blob_op_complete, NULL);
2092 	poll_threads();
2093 
2094 	/* Check if xattrs are persisted */
2095 	ut_bs_reload(&bs, NULL);
2096 
2097 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2098 	poll_threads();
2099 	CU_ASSERT(g_bserrno == 0);
2100 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2101 	blob = g_blob;
2102 
2103 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2104 	CU_ASSERT(rc == 0);
2105 	CU_ASSERT(*(uint64_t *)value == length);
2106 
2107 	/* try to get internal xattr trough public call */
2108 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2109 	CU_ASSERT(rc != 0);
2110 
2111 	rc = blob_remove_xattr(blob, "internal", true);
2112 	CU_ASSERT(rc == 0);
2113 
2114 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2115 }
2116 
2117 static void
2118 blob_parse_md(void)
2119 {
2120 	struct spdk_blob_store *bs = g_bs;
2121 	struct spdk_blob *blob;
2122 	int rc;
2123 	uint32_t used_pages;
2124 	size_t xattr_length;
2125 	char *xattr;
2126 
2127 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2128 	blob = ut_blob_create_and_open(bs, NULL);
2129 
2130 	/* Create large extent to force more than 1 page of metadata. */
2131 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2132 		       strlen("large_xattr");
2133 	xattr = calloc(xattr_length, sizeof(char));
2134 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2135 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2136 	free(xattr);
2137 	SPDK_CU_ASSERT_FATAL(rc == 0);
2138 
2139 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2140 	poll_threads();
2141 
2142 	/* Delete the blob and verify that number of pages returned to before its creation. */
2143 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2144 	ut_blob_close_and_delete(bs, blob);
2145 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2146 }
2147 
2148 static void
2149 bs_load(void)
2150 {
2151 	struct spdk_blob_store *bs;
2152 	struct spdk_bs_dev *dev;
2153 	spdk_blob_id blobid;
2154 	struct spdk_blob *blob;
2155 	struct spdk_bs_super_block *super_block;
2156 	uint64_t length;
2157 	int rc;
2158 	const void *value;
2159 	size_t value_len;
2160 	struct spdk_bs_opts opts;
2161 	struct spdk_blob_opts blob_opts;
2162 
2163 	dev = init_dev();
2164 	spdk_bs_opts_init(&opts, sizeof(opts));
2165 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2166 
2167 	/* Initialize a new blob store */
2168 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2169 	poll_threads();
2170 	CU_ASSERT(g_bserrno == 0);
2171 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2172 	bs = g_bs;
2173 
2174 	/* Try to open a blobid that does not exist */
2175 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2176 	poll_threads();
2177 	CU_ASSERT(g_bserrno == -ENOENT);
2178 	CU_ASSERT(g_blob == NULL);
2179 
2180 	/* Create a blob */
2181 	blob = ut_blob_create_and_open(bs, NULL);
2182 	blobid = spdk_blob_get_id(blob);
2183 
2184 	/* Try again to open valid blob but without the upper bit set */
2185 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2186 	poll_threads();
2187 	CU_ASSERT(g_bserrno == -ENOENT);
2188 	CU_ASSERT(g_blob == NULL);
2189 
2190 	/* Set some xattrs */
2191 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2192 	CU_ASSERT(rc == 0);
2193 
2194 	length = 2345;
2195 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2196 	CU_ASSERT(rc == 0);
2197 
2198 	/* Resize the blob */
2199 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2200 	poll_threads();
2201 	CU_ASSERT(g_bserrno == 0);
2202 
2203 	spdk_blob_close(blob, blob_op_complete, NULL);
2204 	poll_threads();
2205 	CU_ASSERT(g_bserrno == 0);
2206 	blob = NULL;
2207 	g_blob = NULL;
2208 	g_blobid = SPDK_BLOBID_INVALID;
2209 
2210 	/* Unload the blob store */
2211 	spdk_bs_unload(bs, bs_op_complete, NULL);
2212 	poll_threads();
2213 	CU_ASSERT(g_bserrno == 0);
2214 	g_bs = NULL;
2215 	g_blob = NULL;
2216 	g_blobid = 0;
2217 
2218 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2219 	CU_ASSERT(super_block->clean == 1);
2220 
2221 	/* Load should fail for device with an unsupported blocklen */
2222 	dev = init_dev();
2223 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2224 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2225 	poll_threads();
2226 	CU_ASSERT(g_bserrno == -EINVAL);
2227 
2228 	/* Load should when max_md_ops is set to zero */
2229 	dev = init_dev();
2230 	spdk_bs_opts_init(&opts, sizeof(opts));
2231 	opts.max_md_ops = 0;
2232 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2233 	poll_threads();
2234 	CU_ASSERT(g_bserrno == -EINVAL);
2235 
2236 	/* Load should when max_channel_ops is set to zero */
2237 	dev = init_dev();
2238 	spdk_bs_opts_init(&opts, sizeof(opts));
2239 	opts.max_channel_ops = 0;
2240 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2241 	poll_threads();
2242 	CU_ASSERT(g_bserrno == -EINVAL);
2243 
2244 	/* Load an existing blob store */
2245 	dev = init_dev();
2246 	spdk_bs_opts_init(&opts, sizeof(opts));
2247 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2248 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2249 	poll_threads();
2250 	CU_ASSERT(g_bserrno == 0);
2251 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2252 	bs = g_bs;
2253 
2254 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2255 	CU_ASSERT(super_block->clean == 1);
2256 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2257 
2258 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2259 	poll_threads();
2260 	CU_ASSERT(g_bserrno == 0);
2261 	CU_ASSERT(g_blob != NULL);
2262 	blob = g_blob;
2263 
2264 	/* Verify that blobstore is marked dirty after first metadata sync */
2265 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2266 	CU_ASSERT(super_block->clean == 1);
2267 
2268 	/* Get the xattrs */
2269 	value = NULL;
2270 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2271 	CU_ASSERT(rc == 0);
2272 	SPDK_CU_ASSERT_FATAL(value != NULL);
2273 	CU_ASSERT(*(uint64_t *)value == length);
2274 	CU_ASSERT(value_len == 8);
2275 
2276 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2277 	CU_ASSERT(rc == -ENOENT);
2278 
2279 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2280 
2281 	spdk_blob_close(blob, blob_op_complete, NULL);
2282 	poll_threads();
2283 	CU_ASSERT(g_bserrno == 0);
2284 	blob = NULL;
2285 	g_blob = NULL;
2286 
2287 	spdk_bs_unload(bs, bs_op_complete, NULL);
2288 	poll_threads();
2289 	CU_ASSERT(g_bserrno == 0);
2290 	g_bs = NULL;
2291 
2292 	/* Load should fail: bdev size < saved size */
2293 	dev = init_dev();
2294 	dev->blockcnt /= 2;
2295 
2296 	spdk_bs_opts_init(&opts, sizeof(opts));
2297 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2298 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2299 	poll_threads();
2300 
2301 	CU_ASSERT(g_bserrno == -EILSEQ);
2302 
2303 	/* Load should succeed: bdev size > saved size */
2304 	dev = init_dev();
2305 	dev->blockcnt *= 4;
2306 
2307 	spdk_bs_opts_init(&opts, sizeof(opts));
2308 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2309 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2310 	poll_threads();
2311 	CU_ASSERT(g_bserrno == 0);
2312 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2313 	bs = g_bs;
2314 
2315 	CU_ASSERT(g_bserrno == 0);
2316 	spdk_bs_unload(bs, bs_op_complete, NULL);
2317 	poll_threads();
2318 
2319 
2320 	/* Test compatibility mode */
2321 
2322 	dev = init_dev();
2323 	super_block->size = 0;
2324 	super_block->crc = blob_md_page_calc_crc(super_block);
2325 
2326 	spdk_bs_opts_init(&opts, sizeof(opts));
2327 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2328 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2329 	poll_threads();
2330 	CU_ASSERT(g_bserrno == 0);
2331 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2332 	bs = g_bs;
2333 
2334 	/* Create a blob */
2335 	ut_spdk_blob_opts_init(&blob_opts);
2336 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2337 	poll_threads();
2338 	CU_ASSERT(g_bserrno == 0);
2339 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2340 
2341 	/* Blobstore should update number of blocks in super_block */
2342 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2343 	CU_ASSERT(super_block->clean == 0);
2344 
2345 	spdk_bs_unload(bs, bs_op_complete, NULL);
2346 	poll_threads();
2347 	CU_ASSERT(g_bserrno == 0);
2348 	CU_ASSERT(super_block->clean == 1);
2349 	g_bs = NULL;
2350 
2351 }
2352 
2353 static void
2354 bs_load_pending_removal(void)
2355 {
2356 	struct spdk_blob_store *bs = g_bs;
2357 	struct spdk_blob_opts opts;
2358 	struct spdk_blob *blob, *snapshot;
2359 	spdk_blob_id blobid, snapshotid;
2360 	const void *value;
2361 	size_t value_len;
2362 	int rc;
2363 
2364 	/* Create blob */
2365 	ut_spdk_blob_opts_init(&opts);
2366 	opts.num_clusters = 10;
2367 
2368 	blob = ut_blob_create_and_open(bs, &opts);
2369 	blobid = spdk_blob_get_id(blob);
2370 
2371 	/* Create snapshot */
2372 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2373 	poll_threads();
2374 	CU_ASSERT(g_bserrno == 0);
2375 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2376 	snapshotid = g_blobid;
2377 
2378 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2379 	poll_threads();
2380 	CU_ASSERT(g_bserrno == 0);
2381 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2382 	snapshot = g_blob;
2383 
2384 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2385 	snapshot->md_ro = false;
2386 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2387 	CU_ASSERT(rc == 0);
2388 	snapshot->md_ro = true;
2389 
2390 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2391 	poll_threads();
2392 	CU_ASSERT(g_bserrno == 0);
2393 
2394 	spdk_blob_close(blob, blob_op_complete, NULL);
2395 	poll_threads();
2396 	CU_ASSERT(g_bserrno == 0);
2397 
2398 	/* Reload blobstore */
2399 	ut_bs_reload(&bs, NULL);
2400 
2401 	/* Snapshot should not be removed as blob is still pointing to it */
2402 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2403 	poll_threads();
2404 	CU_ASSERT(g_bserrno == 0);
2405 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2406 	snapshot = g_blob;
2407 
2408 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2409 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2410 	CU_ASSERT(rc != 0);
2411 
2412 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2413 	snapshot->md_ro = false;
2414 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2415 	CU_ASSERT(rc == 0);
2416 	snapshot->md_ro = true;
2417 
2418 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2419 	poll_threads();
2420 	CU_ASSERT(g_bserrno == 0);
2421 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2422 	blob = g_blob;
2423 
2424 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2425 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2426 
2427 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2428 	poll_threads();
2429 	CU_ASSERT(g_bserrno == 0);
2430 
2431 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2432 	poll_threads();
2433 	CU_ASSERT(g_bserrno == 0);
2434 
2435 	spdk_blob_close(blob, blob_op_complete, NULL);
2436 	poll_threads();
2437 	CU_ASSERT(g_bserrno == 0);
2438 
2439 	/* Reload blobstore */
2440 	ut_bs_reload(&bs, NULL);
2441 
2442 	/* Snapshot should be removed as blob is not pointing to it anymore */
2443 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2444 	poll_threads();
2445 	CU_ASSERT(g_bserrno != 0);
2446 }
2447 
2448 static void
2449 bs_load_custom_cluster_size(void)
2450 {
2451 	struct spdk_blob_store *bs;
2452 	struct spdk_bs_dev *dev;
2453 	struct spdk_bs_super_block *super_block;
2454 	struct spdk_bs_opts opts;
2455 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2456 	uint32_t cluster_sz;
2457 	uint64_t total_clusters;
2458 
2459 	dev = init_dev();
2460 	spdk_bs_opts_init(&opts, sizeof(opts));
2461 	opts.cluster_sz = custom_cluster_size;
2462 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2463 
2464 	/* Initialize a new blob store */
2465 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2466 	poll_threads();
2467 	CU_ASSERT(g_bserrno == 0);
2468 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2469 	bs = g_bs;
2470 	cluster_sz = bs->cluster_sz;
2471 	total_clusters = bs->total_clusters;
2472 
2473 	/* Unload the blob store */
2474 	spdk_bs_unload(bs, bs_op_complete, NULL);
2475 	poll_threads();
2476 	CU_ASSERT(g_bserrno == 0);
2477 	g_bs = NULL;
2478 	g_blob = NULL;
2479 	g_blobid = 0;
2480 
2481 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2482 	CU_ASSERT(super_block->clean == 1);
2483 
2484 	/* Load an existing blob store */
2485 	dev = init_dev();
2486 	spdk_bs_opts_init(&opts, sizeof(opts));
2487 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2488 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2489 	poll_threads();
2490 	CU_ASSERT(g_bserrno == 0);
2491 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2492 	bs = g_bs;
2493 	/* Compare cluster size and number to one after initialization */
2494 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2495 	CU_ASSERT(total_clusters == bs->total_clusters);
2496 
2497 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2498 	CU_ASSERT(super_block->clean == 1);
2499 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2500 
2501 	spdk_bs_unload(bs, bs_op_complete, NULL);
2502 	poll_threads();
2503 	CU_ASSERT(g_bserrno == 0);
2504 	CU_ASSERT(super_block->clean == 1);
2505 	g_bs = NULL;
2506 }
2507 
2508 static void
2509 bs_type(void)
2510 {
2511 	struct spdk_blob_store *bs;
2512 	struct spdk_bs_dev *dev;
2513 	struct spdk_bs_opts opts;
2514 
2515 	dev = init_dev();
2516 	spdk_bs_opts_init(&opts, sizeof(opts));
2517 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2518 
2519 	/* Initialize a new blob store */
2520 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2521 	poll_threads();
2522 	CU_ASSERT(g_bserrno == 0);
2523 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2524 	bs = g_bs;
2525 
2526 	/* Unload the blob store */
2527 	spdk_bs_unload(bs, bs_op_complete, NULL);
2528 	poll_threads();
2529 	CU_ASSERT(g_bserrno == 0);
2530 	g_bs = NULL;
2531 	g_blob = NULL;
2532 	g_blobid = 0;
2533 
2534 	/* Load non existing blobstore type */
2535 	dev = init_dev();
2536 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2537 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2538 	poll_threads();
2539 	CU_ASSERT(g_bserrno != 0);
2540 
2541 	/* Load with empty blobstore type */
2542 	dev = init_dev();
2543 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2544 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2545 	poll_threads();
2546 	CU_ASSERT(g_bserrno == 0);
2547 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2548 	bs = g_bs;
2549 
2550 	spdk_bs_unload(bs, bs_op_complete, NULL);
2551 	poll_threads();
2552 	CU_ASSERT(g_bserrno == 0);
2553 	g_bs = NULL;
2554 
2555 	/* Initialize a new blob store with empty bstype */
2556 	dev = init_dev();
2557 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2558 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2559 	poll_threads();
2560 	CU_ASSERT(g_bserrno == 0);
2561 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2562 	bs = g_bs;
2563 
2564 	spdk_bs_unload(bs, bs_op_complete, NULL);
2565 	poll_threads();
2566 	CU_ASSERT(g_bserrno == 0);
2567 	g_bs = NULL;
2568 
2569 	/* Load non existing blobstore type */
2570 	dev = init_dev();
2571 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2572 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2573 	poll_threads();
2574 	CU_ASSERT(g_bserrno != 0);
2575 
2576 	/* Load with empty blobstore type */
2577 	dev = init_dev();
2578 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2579 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2580 	poll_threads();
2581 	CU_ASSERT(g_bserrno == 0);
2582 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2583 	bs = g_bs;
2584 
2585 	spdk_bs_unload(bs, bs_op_complete, NULL);
2586 	poll_threads();
2587 	CU_ASSERT(g_bserrno == 0);
2588 	g_bs = NULL;
2589 }
2590 
2591 static void
2592 bs_super_block(void)
2593 {
2594 	struct spdk_blob_store *bs;
2595 	struct spdk_bs_dev *dev;
2596 	struct spdk_bs_super_block *super_block;
2597 	struct spdk_bs_opts opts;
2598 	struct spdk_bs_super_block_ver1 super_block_v1;
2599 
2600 	dev = init_dev();
2601 	spdk_bs_opts_init(&opts, sizeof(opts));
2602 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2603 
2604 	/* Initialize a new blob store */
2605 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2606 	poll_threads();
2607 	CU_ASSERT(g_bserrno == 0);
2608 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2609 	bs = g_bs;
2610 
2611 	/* Unload the blob store */
2612 	spdk_bs_unload(bs, bs_op_complete, NULL);
2613 	poll_threads();
2614 	CU_ASSERT(g_bserrno == 0);
2615 	g_bs = NULL;
2616 	g_blob = NULL;
2617 	g_blobid = 0;
2618 
2619 	/* Load an existing blob store with version newer than supported */
2620 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2621 	super_block->version++;
2622 
2623 	dev = init_dev();
2624 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2625 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2626 	poll_threads();
2627 	CU_ASSERT(g_bserrno != 0);
2628 
2629 	/* Create a new blob store with super block version 1 */
2630 	dev = init_dev();
2631 	super_block_v1.version = 1;
2632 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2633 	super_block_v1.length = 0x1000;
2634 	super_block_v1.clean = 1;
2635 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2636 	super_block_v1.cluster_size = 0x100000;
2637 	super_block_v1.used_page_mask_start = 0x01;
2638 	super_block_v1.used_page_mask_len = 0x01;
2639 	super_block_v1.used_cluster_mask_start = 0x02;
2640 	super_block_v1.used_cluster_mask_len = 0x01;
2641 	super_block_v1.md_start = 0x03;
2642 	super_block_v1.md_len = 0x40;
2643 	memset(super_block_v1.reserved, 0, 4036);
2644 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2645 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2646 
2647 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2648 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2649 	poll_threads();
2650 	CU_ASSERT(g_bserrno == 0);
2651 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2652 	bs = g_bs;
2653 
2654 	spdk_bs_unload(bs, bs_op_complete, NULL);
2655 	poll_threads();
2656 	CU_ASSERT(g_bserrno == 0);
2657 	g_bs = NULL;
2658 }
2659 
2660 /*
2661  * Create a blobstore and then unload it.
2662  */
2663 static void
2664 bs_unload(void)
2665 {
2666 	struct spdk_blob_store *bs = g_bs;
2667 	struct spdk_blob *blob;
2668 
2669 	/* Create a blob and open it. */
2670 	blob = ut_blob_create_and_open(bs, NULL);
2671 
2672 	/* Try to unload blobstore, should fail with open blob */
2673 	g_bserrno = -1;
2674 	spdk_bs_unload(bs, bs_op_complete, NULL);
2675 	poll_threads();
2676 	CU_ASSERT(g_bserrno == -EBUSY);
2677 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2678 
2679 	/* Close the blob, then successfully unload blobstore */
2680 	g_bserrno = -1;
2681 	spdk_blob_close(blob, blob_op_complete, NULL);
2682 	poll_threads();
2683 	CU_ASSERT(g_bserrno == 0);
2684 }
2685 
2686 /*
2687  * Create a blobstore with a cluster size different than the default, and ensure it is
2688  *  persisted.
2689  */
2690 static void
2691 bs_cluster_sz(void)
2692 {
2693 	struct spdk_blob_store *bs;
2694 	struct spdk_bs_dev *dev;
2695 	struct spdk_bs_opts opts;
2696 	uint32_t cluster_sz;
2697 
2698 	/* Set cluster size to zero */
2699 	dev = init_dev();
2700 	spdk_bs_opts_init(&opts, sizeof(opts));
2701 	opts.cluster_sz = 0;
2702 
2703 	/* Initialize a new blob store */
2704 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2705 	poll_threads();
2706 	CU_ASSERT(g_bserrno == -EINVAL);
2707 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2708 
2709 	/*
2710 	 * Set cluster size to blobstore page size,
2711 	 * to work it is required to be at least twice the blobstore page size.
2712 	 */
2713 	dev = init_dev();
2714 	spdk_bs_opts_init(&opts, sizeof(opts));
2715 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
2716 
2717 	/* Initialize a new blob store */
2718 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2719 	poll_threads();
2720 	CU_ASSERT(g_bserrno == -ENOMEM);
2721 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2722 
2723 	/*
2724 	 * Set cluster size to lower than page size,
2725 	 * to work it is required to be at least twice the blobstore page size.
2726 	 */
2727 	dev = init_dev();
2728 	spdk_bs_opts_init(&opts, sizeof(opts));
2729 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
2730 
2731 	/* Initialize a new blob store */
2732 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2733 	poll_threads();
2734 	CU_ASSERT(g_bserrno == -EINVAL);
2735 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2736 
2737 	/* Set cluster size to twice the default */
2738 	dev = init_dev();
2739 	spdk_bs_opts_init(&opts, sizeof(opts));
2740 	opts.cluster_sz *= 2;
2741 	cluster_sz = opts.cluster_sz;
2742 
2743 	/* Initialize a new blob store */
2744 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2745 	poll_threads();
2746 	CU_ASSERT(g_bserrno == 0);
2747 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2748 	bs = g_bs;
2749 
2750 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2751 
2752 	ut_bs_reload(&bs, &opts);
2753 
2754 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2755 
2756 	spdk_bs_unload(bs, bs_op_complete, NULL);
2757 	poll_threads();
2758 	CU_ASSERT(g_bserrno == 0);
2759 	g_bs = NULL;
2760 }
2761 
2762 /*
2763  * Create a blobstore, reload it and ensure total usable cluster count
2764  *  stays the same.
2765  */
2766 static void
2767 bs_usable_clusters(void)
2768 {
2769 	struct spdk_blob_store *bs = g_bs;
2770 	struct spdk_blob *blob;
2771 	uint32_t clusters;
2772 	int i;
2773 
2774 
2775 	clusters = spdk_bs_total_data_cluster_count(bs);
2776 
2777 	ut_bs_reload(&bs, NULL);
2778 
2779 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2780 
2781 	/* Create and resize blobs to make sure that useable cluster count won't change */
2782 	for (i = 0; i < 4; i++) {
2783 		g_bserrno = -1;
2784 		g_blobid = SPDK_BLOBID_INVALID;
2785 		blob = ut_blob_create_and_open(bs, NULL);
2786 
2787 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2788 		poll_threads();
2789 		CU_ASSERT(g_bserrno == 0);
2790 
2791 		g_bserrno = -1;
2792 		spdk_blob_close(blob, blob_op_complete, NULL);
2793 		poll_threads();
2794 		CU_ASSERT(g_bserrno == 0);
2795 
2796 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2797 	}
2798 
2799 	/* Reload the blob store to make sure that nothing changed */
2800 	ut_bs_reload(&bs, NULL);
2801 
2802 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2803 }
2804 
2805 /*
2806  * Test resizing of the metadata blob.  This requires creating enough blobs
2807  *  so that one cluster is not enough to fit the metadata for those blobs.
2808  *  To induce this condition to happen more quickly, we reduce the cluster
2809  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
2810  */
2811 static void
2812 bs_resize_md(void)
2813 {
2814 	struct spdk_blob_store *bs;
2815 	const int CLUSTER_PAGE_COUNT = 4;
2816 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
2817 	struct spdk_bs_dev *dev;
2818 	struct spdk_bs_opts opts;
2819 	struct spdk_blob *blob;
2820 	struct spdk_blob_opts blob_opts;
2821 	uint32_t cluster_sz;
2822 	spdk_blob_id blobids[NUM_BLOBS];
2823 	int i;
2824 
2825 
2826 	dev = init_dev();
2827 	spdk_bs_opts_init(&opts, sizeof(opts));
2828 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
2829 	cluster_sz = opts.cluster_sz;
2830 
2831 	/* Initialize a new blob store */
2832 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2833 	poll_threads();
2834 	CU_ASSERT(g_bserrno == 0);
2835 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2836 	bs = g_bs;
2837 
2838 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2839 
2840 	ut_spdk_blob_opts_init(&blob_opts);
2841 
2842 	for (i = 0; i < NUM_BLOBS; i++) {
2843 		g_bserrno = -1;
2844 		g_blobid = SPDK_BLOBID_INVALID;
2845 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2846 		poll_threads();
2847 		CU_ASSERT(g_bserrno == 0);
2848 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
2849 		blobids[i] = g_blobid;
2850 	}
2851 
2852 	ut_bs_reload(&bs, &opts);
2853 
2854 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2855 
2856 	for (i = 0; i < NUM_BLOBS; i++) {
2857 		g_bserrno = -1;
2858 		g_blob = NULL;
2859 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
2860 		poll_threads();
2861 		CU_ASSERT(g_bserrno == 0);
2862 		CU_ASSERT(g_blob !=  NULL);
2863 		blob = g_blob;
2864 		g_bserrno = -1;
2865 		spdk_blob_close(blob, blob_op_complete, NULL);
2866 		poll_threads();
2867 		CU_ASSERT(g_bserrno == 0);
2868 	}
2869 
2870 	spdk_bs_unload(bs, bs_op_complete, NULL);
2871 	poll_threads();
2872 	CU_ASSERT(g_bserrno == 0);
2873 	g_bs = NULL;
2874 }
2875 
2876 static void
2877 bs_destroy(void)
2878 {
2879 	struct spdk_blob_store *bs;
2880 	struct spdk_bs_dev *dev;
2881 
2882 	/* Initialize a new blob store */
2883 	dev = init_dev();
2884 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2885 	poll_threads();
2886 	CU_ASSERT(g_bserrno == 0);
2887 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2888 	bs = g_bs;
2889 
2890 	/* Destroy the blob store */
2891 	g_bserrno = -1;
2892 	spdk_bs_destroy(bs, bs_op_complete, NULL);
2893 	poll_threads();
2894 	CU_ASSERT(g_bserrno == 0);
2895 
2896 	/* Loading an non-existent blob store should fail. */
2897 	g_bs = NULL;
2898 	dev = init_dev();
2899 
2900 	g_bserrno = 0;
2901 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2902 	poll_threads();
2903 	CU_ASSERT(g_bserrno != 0);
2904 }
2905 
2906 /* Try to hit all of the corner cases associated with serializing
2907  * a blob to disk
2908  */
2909 static void
2910 blob_serialize_test(void)
2911 {
2912 	struct spdk_bs_dev *dev;
2913 	struct spdk_bs_opts opts;
2914 	struct spdk_blob_store *bs;
2915 	spdk_blob_id blobid[2];
2916 	struct spdk_blob *blob[2];
2917 	uint64_t i;
2918 	char *value;
2919 	int rc;
2920 
2921 	dev = init_dev();
2922 
2923 	/* Initialize a new blobstore with very small clusters */
2924 	spdk_bs_opts_init(&opts, sizeof(opts));
2925 	opts.cluster_sz = dev->blocklen * 8;
2926 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2927 	poll_threads();
2928 	CU_ASSERT(g_bserrno == 0);
2929 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2930 	bs = g_bs;
2931 
2932 	/* Create and open two blobs */
2933 	for (i = 0; i < 2; i++) {
2934 		blob[i] = ut_blob_create_and_open(bs, NULL);
2935 		blobid[i] = spdk_blob_get_id(blob[i]);
2936 
2937 		/* Set a fairly large xattr on both blobs to eat up
2938 		 * metadata space
2939 		 */
2940 		value = calloc(dev->blocklen - 64, sizeof(char));
2941 		SPDK_CU_ASSERT_FATAL(value != NULL);
2942 		memset(value, i, dev->blocklen / 2);
2943 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
2944 		CU_ASSERT(rc == 0);
2945 		free(value);
2946 	}
2947 
2948 	/* Resize the blobs, alternating 1 cluster at a time.
2949 	 * This thwarts run length encoding and will cause spill
2950 	 * over of the extents.
2951 	 */
2952 	for (i = 0; i < 6; i++) {
2953 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
2954 		poll_threads();
2955 		CU_ASSERT(g_bserrno == 0);
2956 	}
2957 
2958 	for (i = 0; i < 2; i++) {
2959 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
2960 		poll_threads();
2961 		CU_ASSERT(g_bserrno == 0);
2962 	}
2963 
2964 	/* Close the blobs */
2965 	for (i = 0; i < 2; i++) {
2966 		spdk_blob_close(blob[i], blob_op_complete, NULL);
2967 		poll_threads();
2968 		CU_ASSERT(g_bserrno == 0);
2969 	}
2970 
2971 	ut_bs_reload(&bs, &opts);
2972 
2973 	for (i = 0; i < 2; i++) {
2974 		blob[i] = NULL;
2975 
2976 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
2977 		poll_threads();
2978 		CU_ASSERT(g_bserrno == 0);
2979 		CU_ASSERT(g_blob != NULL);
2980 		blob[i] = g_blob;
2981 
2982 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
2983 
2984 		spdk_blob_close(blob[i], blob_op_complete, NULL);
2985 		poll_threads();
2986 		CU_ASSERT(g_bserrno == 0);
2987 	}
2988 
2989 	spdk_bs_unload(bs, bs_op_complete, NULL);
2990 	poll_threads();
2991 	CU_ASSERT(g_bserrno == 0);
2992 	g_bs = NULL;
2993 }
2994 
2995 static void
2996 blob_crc(void)
2997 {
2998 	struct spdk_blob_store *bs = g_bs;
2999 	struct spdk_blob *blob;
3000 	spdk_blob_id blobid;
3001 	uint32_t page_num;
3002 	int index;
3003 	struct spdk_blob_md_page *page;
3004 
3005 	blob = ut_blob_create_and_open(bs, NULL);
3006 	blobid = spdk_blob_get_id(blob);
3007 
3008 	spdk_blob_close(blob, blob_op_complete, NULL);
3009 	poll_threads();
3010 	CU_ASSERT(g_bserrno == 0);
3011 
3012 	page_num = bs_blobid_to_page(blobid);
3013 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3014 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3015 	page->crc = 0;
3016 
3017 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3018 	poll_threads();
3019 	CU_ASSERT(g_bserrno == -EINVAL);
3020 	CU_ASSERT(g_blob == NULL);
3021 	g_bserrno = 0;
3022 
3023 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3024 	poll_threads();
3025 	CU_ASSERT(g_bserrno == -EINVAL);
3026 }
3027 
3028 static void
3029 super_block_crc(void)
3030 {
3031 	struct spdk_blob_store *bs;
3032 	struct spdk_bs_dev *dev;
3033 	struct spdk_bs_super_block *super_block;
3034 
3035 	dev = init_dev();
3036 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3037 	poll_threads();
3038 	CU_ASSERT(g_bserrno == 0);
3039 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3040 	bs = g_bs;
3041 
3042 	spdk_bs_unload(bs, bs_op_complete, NULL);
3043 	poll_threads();
3044 	CU_ASSERT(g_bserrno == 0);
3045 	g_bs = NULL;
3046 
3047 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3048 	super_block->crc = 0;
3049 	dev = init_dev();
3050 
3051 	/* Load an existing blob store */
3052 	g_bserrno = 0;
3053 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3054 	poll_threads();
3055 	CU_ASSERT(g_bserrno == -EILSEQ);
3056 }
3057 
3058 /* For blob dirty shutdown test case we do the following sub-test cases:
3059  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3060  *   dirty shutdown and reload the blob store and verify the xattrs.
3061  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3062  *   reload the blob store and verify the clusters number.
3063  * 3 Create the second blob and then dirty shutdown, reload the blob store
3064  *   and verify the second blob.
3065  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3066  *   and verify the second blob is invalid.
3067  * 5 Create the second blob again and also create the third blob, modify the
3068  *   md of second blob which makes the md invalid, and then dirty shutdown,
3069  *   reload the blob store verify the second blob, it should invalid and also
3070  *   verify the third blob, it should correct.
3071  */
3072 static void
3073 blob_dirty_shutdown(void)
3074 {
3075 	int rc;
3076 	int index;
3077 	struct spdk_blob_store *bs = g_bs;
3078 	spdk_blob_id blobid1, blobid2, blobid3;
3079 	struct spdk_blob *blob = g_blob;
3080 	uint64_t length;
3081 	uint64_t free_clusters;
3082 	const void *value;
3083 	size_t value_len;
3084 	uint32_t page_num;
3085 	struct spdk_blob_md_page *page;
3086 	struct spdk_blob_opts blob_opts;
3087 
3088 	/* Create first blob */
3089 	blobid1 = spdk_blob_get_id(blob);
3090 
3091 	/* Set some xattrs */
3092 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3093 	CU_ASSERT(rc == 0);
3094 
3095 	length = 2345;
3096 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3097 	CU_ASSERT(rc == 0);
3098 
3099 	/* Put xattr that fits exactly single page.
3100 	 * This results in adding additional pages to MD.
3101 	 * First is flags and smaller xattr, second the large xattr,
3102 	 * third are just the extents.
3103 	 */
3104 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3105 			      strlen("large_xattr");
3106 	char *xattr = calloc(xattr_length, sizeof(char));
3107 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3108 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3109 	free(xattr);
3110 	SPDK_CU_ASSERT_FATAL(rc == 0);
3111 
3112 	/* Resize the blob */
3113 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3114 	poll_threads();
3115 	CU_ASSERT(g_bserrno == 0);
3116 
3117 	/* Set the blob as the super blob */
3118 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3119 	poll_threads();
3120 	CU_ASSERT(g_bserrno == 0);
3121 
3122 	free_clusters = spdk_bs_free_cluster_count(bs);
3123 
3124 	spdk_blob_close(blob, blob_op_complete, NULL);
3125 	poll_threads();
3126 	CU_ASSERT(g_bserrno == 0);
3127 	blob = NULL;
3128 	g_blob = NULL;
3129 	g_blobid = SPDK_BLOBID_INVALID;
3130 
3131 	ut_bs_dirty_load(&bs, NULL);
3132 
3133 	/* Get the super blob */
3134 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3135 	poll_threads();
3136 	CU_ASSERT(g_bserrno == 0);
3137 	CU_ASSERT(blobid1 == g_blobid);
3138 
3139 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3140 	poll_threads();
3141 	CU_ASSERT(g_bserrno == 0);
3142 	CU_ASSERT(g_blob != NULL);
3143 	blob = g_blob;
3144 
3145 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3146 
3147 	/* Get the xattrs */
3148 	value = NULL;
3149 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3150 	CU_ASSERT(rc == 0);
3151 	SPDK_CU_ASSERT_FATAL(value != NULL);
3152 	CU_ASSERT(*(uint64_t *)value == length);
3153 	CU_ASSERT(value_len == 8);
3154 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3155 
3156 	/* Resize the blob */
3157 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3158 	poll_threads();
3159 	CU_ASSERT(g_bserrno == 0);
3160 
3161 	free_clusters = spdk_bs_free_cluster_count(bs);
3162 
3163 	spdk_blob_close(blob, blob_op_complete, NULL);
3164 	poll_threads();
3165 	CU_ASSERT(g_bserrno == 0);
3166 	blob = NULL;
3167 	g_blob = NULL;
3168 	g_blobid = SPDK_BLOBID_INVALID;
3169 
3170 	ut_bs_dirty_load(&bs, NULL);
3171 
3172 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3173 	poll_threads();
3174 	CU_ASSERT(g_bserrno == 0);
3175 	CU_ASSERT(g_blob != NULL);
3176 	blob = g_blob;
3177 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3178 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3179 
3180 	spdk_blob_close(blob, blob_op_complete, NULL);
3181 	poll_threads();
3182 	CU_ASSERT(g_bserrno == 0);
3183 	blob = NULL;
3184 	g_blob = NULL;
3185 	g_blobid = SPDK_BLOBID_INVALID;
3186 
3187 	/* Create second blob */
3188 	blob = ut_blob_create_and_open(bs, NULL);
3189 	blobid2 = spdk_blob_get_id(blob);
3190 
3191 	/* Set some xattrs */
3192 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3193 	CU_ASSERT(rc == 0);
3194 
3195 	length = 5432;
3196 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3197 	CU_ASSERT(rc == 0);
3198 
3199 	/* Resize the blob */
3200 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3201 	poll_threads();
3202 	CU_ASSERT(g_bserrno == 0);
3203 
3204 	free_clusters = spdk_bs_free_cluster_count(bs);
3205 
3206 	spdk_blob_close(blob, blob_op_complete, NULL);
3207 	poll_threads();
3208 	CU_ASSERT(g_bserrno == 0);
3209 	blob = NULL;
3210 	g_blob = NULL;
3211 	g_blobid = SPDK_BLOBID_INVALID;
3212 
3213 	ut_bs_dirty_load(&bs, NULL);
3214 
3215 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3216 	poll_threads();
3217 	CU_ASSERT(g_bserrno == 0);
3218 	CU_ASSERT(g_blob != NULL);
3219 	blob = g_blob;
3220 
3221 	/* Get the xattrs */
3222 	value = NULL;
3223 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3224 	CU_ASSERT(rc == 0);
3225 	SPDK_CU_ASSERT_FATAL(value != NULL);
3226 	CU_ASSERT(*(uint64_t *)value == length);
3227 	CU_ASSERT(value_len == 8);
3228 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3229 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3230 
3231 	ut_blob_close_and_delete(bs, blob);
3232 
3233 	free_clusters = spdk_bs_free_cluster_count(bs);
3234 
3235 	ut_bs_dirty_load(&bs, NULL);
3236 
3237 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3238 	poll_threads();
3239 	CU_ASSERT(g_bserrno != 0);
3240 	CU_ASSERT(g_blob == NULL);
3241 
3242 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3243 	poll_threads();
3244 	CU_ASSERT(g_bserrno == 0);
3245 	CU_ASSERT(g_blob != NULL);
3246 	blob = g_blob;
3247 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3248 	spdk_blob_close(blob, blob_op_complete, NULL);
3249 	poll_threads();
3250 	CU_ASSERT(g_bserrno == 0);
3251 
3252 	ut_bs_reload(&bs, NULL);
3253 
3254 	/* Create second blob */
3255 	ut_spdk_blob_opts_init(&blob_opts);
3256 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3257 	poll_threads();
3258 	CU_ASSERT(g_bserrno == 0);
3259 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3260 	blobid2 = g_blobid;
3261 
3262 	/* Create third blob */
3263 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3264 	poll_threads();
3265 	CU_ASSERT(g_bserrno == 0);
3266 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3267 	blobid3 = g_blobid;
3268 
3269 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3270 	poll_threads();
3271 	CU_ASSERT(g_bserrno == 0);
3272 	CU_ASSERT(g_blob != NULL);
3273 	blob = g_blob;
3274 
3275 	/* Set some xattrs for second blob */
3276 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3277 	CU_ASSERT(rc == 0);
3278 
3279 	length = 5432;
3280 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3281 	CU_ASSERT(rc == 0);
3282 
3283 	spdk_blob_close(blob, blob_op_complete, NULL);
3284 	poll_threads();
3285 	CU_ASSERT(g_bserrno == 0);
3286 	blob = NULL;
3287 	g_blob = NULL;
3288 	g_blobid = SPDK_BLOBID_INVALID;
3289 
3290 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3291 	poll_threads();
3292 	CU_ASSERT(g_bserrno == 0);
3293 	CU_ASSERT(g_blob != NULL);
3294 	blob = g_blob;
3295 
3296 	/* Set some xattrs for third blob */
3297 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3298 	CU_ASSERT(rc == 0);
3299 
3300 	length = 5432;
3301 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3302 	CU_ASSERT(rc == 0);
3303 
3304 	spdk_blob_close(blob, blob_op_complete, NULL);
3305 	poll_threads();
3306 	CU_ASSERT(g_bserrno == 0);
3307 	blob = NULL;
3308 	g_blob = NULL;
3309 	g_blobid = SPDK_BLOBID_INVALID;
3310 
3311 	/* Mark second blob as invalid */
3312 	page_num = bs_blobid_to_page(blobid2);
3313 
3314 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3315 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3316 	page->sequence_num = 1;
3317 	page->crc = blob_md_page_calc_crc(page);
3318 
3319 	free_clusters = spdk_bs_free_cluster_count(bs);
3320 
3321 	ut_bs_dirty_load(&bs, NULL);
3322 
3323 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3324 	poll_threads();
3325 	CU_ASSERT(g_bserrno != 0);
3326 	CU_ASSERT(g_blob == NULL);
3327 
3328 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3329 	poll_threads();
3330 	CU_ASSERT(g_bserrno == 0);
3331 	CU_ASSERT(g_blob != NULL);
3332 	blob = g_blob;
3333 
3334 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3335 }
3336 
3337 static void
3338 blob_flags(void)
3339 {
3340 	struct spdk_blob_store *bs = g_bs;
3341 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3342 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3343 	struct spdk_blob_opts blob_opts;
3344 	int rc;
3345 
3346 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3347 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3348 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3349 
3350 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3351 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3352 
3353 	ut_spdk_blob_opts_init(&blob_opts);
3354 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3355 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3356 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3357 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3358 
3359 	/* Change the size of blob_data_ro to check if flags are serialized
3360 	 * when blob has non zero number of extents */
3361 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3362 	poll_threads();
3363 	CU_ASSERT(g_bserrno == 0);
3364 
3365 	/* Set the xattr to check if flags are serialized
3366 	 * when blob has non zero number of xattrs */
3367 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3368 	CU_ASSERT(rc == 0);
3369 
3370 	blob_invalid->invalid_flags = (1ULL << 63);
3371 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3372 	blob_data_ro->data_ro_flags = (1ULL << 62);
3373 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3374 	blob_md_ro->md_ro_flags = (1ULL << 61);
3375 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3376 
3377 	g_bserrno = -1;
3378 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3379 	poll_threads();
3380 	CU_ASSERT(g_bserrno == 0);
3381 	g_bserrno = -1;
3382 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3383 	poll_threads();
3384 	CU_ASSERT(g_bserrno == 0);
3385 	g_bserrno = -1;
3386 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3387 	poll_threads();
3388 	CU_ASSERT(g_bserrno == 0);
3389 
3390 	g_bserrno = -1;
3391 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3392 	poll_threads();
3393 	CU_ASSERT(g_bserrno == 0);
3394 	blob_invalid = NULL;
3395 	g_bserrno = -1;
3396 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3397 	poll_threads();
3398 	CU_ASSERT(g_bserrno == 0);
3399 	blob_data_ro = NULL;
3400 	g_bserrno = -1;
3401 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3402 	poll_threads();
3403 	CU_ASSERT(g_bserrno == 0);
3404 	blob_md_ro = NULL;
3405 
3406 	g_blob = NULL;
3407 	g_blobid = SPDK_BLOBID_INVALID;
3408 
3409 	ut_bs_reload(&bs, NULL);
3410 
3411 	g_blob = NULL;
3412 	g_bserrno = 0;
3413 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3414 	poll_threads();
3415 	CU_ASSERT(g_bserrno != 0);
3416 	CU_ASSERT(g_blob == NULL);
3417 
3418 	g_blob = NULL;
3419 	g_bserrno = -1;
3420 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3421 	poll_threads();
3422 	CU_ASSERT(g_bserrno == 0);
3423 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3424 	blob_data_ro = g_blob;
3425 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3426 	CU_ASSERT(blob_data_ro->data_ro == true);
3427 	CU_ASSERT(blob_data_ro->md_ro == true);
3428 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3429 
3430 	g_blob = NULL;
3431 	g_bserrno = -1;
3432 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3433 	poll_threads();
3434 	CU_ASSERT(g_bserrno == 0);
3435 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3436 	blob_md_ro = g_blob;
3437 	CU_ASSERT(blob_md_ro->data_ro == false);
3438 	CU_ASSERT(blob_md_ro->md_ro == true);
3439 
3440 	g_bserrno = -1;
3441 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3442 	poll_threads();
3443 	CU_ASSERT(g_bserrno == 0);
3444 
3445 	ut_blob_close_and_delete(bs, blob_data_ro);
3446 	ut_blob_close_and_delete(bs, blob_md_ro);
3447 }
3448 
3449 static void
3450 bs_version(void)
3451 {
3452 	struct spdk_bs_super_block *super;
3453 	struct spdk_blob_store *bs = g_bs;
3454 	struct spdk_bs_dev *dev;
3455 	struct spdk_blob *blob;
3456 	struct spdk_blob_opts blob_opts;
3457 	spdk_blob_id blobid;
3458 
3459 	/* Unload the blob store */
3460 	spdk_bs_unload(bs, bs_op_complete, NULL);
3461 	poll_threads();
3462 	CU_ASSERT(g_bserrno == 0);
3463 	g_bs = NULL;
3464 
3465 	/*
3466 	 * Change the bs version on disk.  This will allow us to
3467 	 *  test that the version does not get modified automatically
3468 	 *  when loading and unloading the blobstore.
3469 	 */
3470 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3471 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3472 	CU_ASSERT(super->clean == 1);
3473 	super->version = 2;
3474 	/*
3475 	 * Version 2 metadata does not have a used blobid mask, so clear
3476 	 *  those fields in the super block and zero the corresponding
3477 	 *  region on "disk".  We will use this to ensure blob IDs are
3478 	 *  correctly reconstructed.
3479 	 */
3480 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3481 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3482 	super->used_blobid_mask_start = 0;
3483 	super->used_blobid_mask_len = 0;
3484 	super->crc = blob_md_page_calc_crc(super);
3485 
3486 	/* Load an existing blob store */
3487 	dev = init_dev();
3488 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3489 	poll_threads();
3490 	CU_ASSERT(g_bserrno == 0);
3491 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3492 	CU_ASSERT(super->clean == 1);
3493 	bs = g_bs;
3494 
3495 	/*
3496 	 * Create a blob - just to make sure that when we unload it
3497 	 *  results in writing the super block (since metadata pages
3498 	 *  were allocated.
3499 	 */
3500 	ut_spdk_blob_opts_init(&blob_opts);
3501 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3502 	poll_threads();
3503 	CU_ASSERT(g_bserrno == 0);
3504 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3505 	blobid = g_blobid;
3506 
3507 	/* Unload the blob store */
3508 	spdk_bs_unload(bs, bs_op_complete, NULL);
3509 	poll_threads();
3510 	CU_ASSERT(g_bserrno == 0);
3511 	g_bs = NULL;
3512 	CU_ASSERT(super->version == 2);
3513 	CU_ASSERT(super->used_blobid_mask_start == 0);
3514 	CU_ASSERT(super->used_blobid_mask_len == 0);
3515 
3516 	dev = init_dev();
3517 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3518 	poll_threads();
3519 	CU_ASSERT(g_bserrno == 0);
3520 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3521 	bs = g_bs;
3522 
3523 	g_blob = NULL;
3524 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3525 	poll_threads();
3526 	CU_ASSERT(g_bserrno == 0);
3527 	CU_ASSERT(g_blob != NULL);
3528 	blob = g_blob;
3529 
3530 	ut_blob_close_and_delete(bs, blob);
3531 
3532 	CU_ASSERT(super->version == 2);
3533 	CU_ASSERT(super->used_blobid_mask_start == 0);
3534 	CU_ASSERT(super->used_blobid_mask_len == 0);
3535 }
3536 
3537 static void
3538 blob_set_xattrs_test(void)
3539 {
3540 	struct spdk_blob_store *bs = g_bs;
3541 	struct spdk_blob *blob;
3542 	struct spdk_blob_opts opts;
3543 	const void *value;
3544 	size_t value_len;
3545 	char *xattr;
3546 	size_t xattr_length;
3547 	int rc;
3548 
3549 	/* Create blob with extra attributes */
3550 	ut_spdk_blob_opts_init(&opts);
3551 
3552 	opts.xattrs.names = g_xattr_names;
3553 	opts.xattrs.get_value = _get_xattr_value;
3554 	opts.xattrs.count = 3;
3555 	opts.xattrs.ctx = &g_ctx;
3556 
3557 	blob = ut_blob_create_and_open(bs, &opts);
3558 
3559 	/* Get the xattrs */
3560 	value = NULL;
3561 
3562 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3563 	CU_ASSERT(rc == 0);
3564 	SPDK_CU_ASSERT_FATAL(value != NULL);
3565 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3566 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3567 
3568 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3569 	CU_ASSERT(rc == 0);
3570 	SPDK_CU_ASSERT_FATAL(value != NULL);
3571 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3572 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3573 
3574 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3575 	CU_ASSERT(rc == 0);
3576 	SPDK_CU_ASSERT_FATAL(value != NULL);
3577 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3578 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3579 
3580 	/* Try to get non existing attribute */
3581 
3582 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3583 	CU_ASSERT(rc == -ENOENT);
3584 
3585 	/* Try xattr exceeding maximum length of descriptor in single page */
3586 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3587 		       strlen("large_xattr") + 1;
3588 	xattr = calloc(xattr_length, sizeof(char));
3589 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3590 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3591 	free(xattr);
3592 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3593 
3594 	spdk_blob_close(blob, blob_op_complete, NULL);
3595 	poll_threads();
3596 	CU_ASSERT(g_bserrno == 0);
3597 	blob = NULL;
3598 	g_blob = NULL;
3599 	g_blobid = SPDK_BLOBID_INVALID;
3600 
3601 	/* NULL callback */
3602 	ut_spdk_blob_opts_init(&opts);
3603 	opts.xattrs.names = g_xattr_names;
3604 	opts.xattrs.get_value = NULL;
3605 	opts.xattrs.count = 1;
3606 	opts.xattrs.ctx = &g_ctx;
3607 
3608 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3609 	poll_threads();
3610 	CU_ASSERT(g_bserrno == -EINVAL);
3611 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3612 
3613 	/* NULL values */
3614 	ut_spdk_blob_opts_init(&opts);
3615 	opts.xattrs.names = g_xattr_names;
3616 	opts.xattrs.get_value = _get_xattr_value_null;
3617 	opts.xattrs.count = 1;
3618 	opts.xattrs.ctx = NULL;
3619 
3620 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3621 	poll_threads();
3622 	CU_ASSERT(g_bserrno == -EINVAL);
3623 }
3624 
3625 static void
3626 blob_thin_prov_alloc(void)
3627 {
3628 	struct spdk_blob_store *bs = g_bs;
3629 	struct spdk_blob *blob;
3630 	struct spdk_blob_opts opts;
3631 	spdk_blob_id blobid;
3632 	uint64_t free_clusters;
3633 
3634 	free_clusters = spdk_bs_free_cluster_count(bs);
3635 
3636 	/* Set blob as thin provisioned */
3637 	ut_spdk_blob_opts_init(&opts);
3638 	opts.thin_provision = true;
3639 
3640 	blob = ut_blob_create_and_open(bs, &opts);
3641 	blobid = spdk_blob_get_id(blob);
3642 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3643 
3644 	CU_ASSERT(blob->active.num_clusters == 0);
3645 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3646 
3647 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3648 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3649 	poll_threads();
3650 	CU_ASSERT(g_bserrno == 0);
3651 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3652 	CU_ASSERT(blob->active.num_clusters == 5);
3653 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3654 
3655 	/* Grow it to 1TB - still unallocated */
3656 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3657 	poll_threads();
3658 	CU_ASSERT(g_bserrno == 0);
3659 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3660 	CU_ASSERT(blob->active.num_clusters == 262144);
3661 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3662 
3663 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3664 	poll_threads();
3665 	CU_ASSERT(g_bserrno == 0);
3666 	/* Sync must not change anything */
3667 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3668 	CU_ASSERT(blob->active.num_clusters == 262144);
3669 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3670 	/* Since clusters are not allocated,
3671 	 * number of metadata pages is expected to be minimal.
3672 	 */
3673 	CU_ASSERT(blob->active.num_pages == 1);
3674 
3675 	/* Shrink the blob to 3 clusters - still unallocated */
3676 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3677 	poll_threads();
3678 	CU_ASSERT(g_bserrno == 0);
3679 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3680 	CU_ASSERT(blob->active.num_clusters == 3);
3681 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3682 
3683 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3684 	poll_threads();
3685 	CU_ASSERT(g_bserrno == 0);
3686 	/* Sync must not change anything */
3687 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3688 	CU_ASSERT(blob->active.num_clusters == 3);
3689 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3690 
3691 	spdk_blob_close(blob, blob_op_complete, NULL);
3692 	poll_threads();
3693 	CU_ASSERT(g_bserrno == 0);
3694 
3695 	ut_bs_reload(&bs, NULL);
3696 
3697 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3698 	poll_threads();
3699 	CU_ASSERT(g_bserrno == 0);
3700 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3701 	blob = g_blob;
3702 
3703 	/* Check that clusters allocation and size is still the same */
3704 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3705 	CU_ASSERT(blob->active.num_clusters == 3);
3706 
3707 	ut_blob_close_and_delete(bs, blob);
3708 }
3709 
3710 static void
3711 blob_insert_cluster_msg_test(void)
3712 {
3713 	struct spdk_blob_store *bs = g_bs;
3714 	struct spdk_blob *blob;
3715 	struct spdk_blob_opts opts;
3716 	spdk_blob_id blobid;
3717 	uint64_t free_clusters;
3718 	uint64_t new_cluster = 0;
3719 	uint32_t cluster_num = 3;
3720 	uint32_t extent_page = 0;
3721 
3722 	free_clusters = spdk_bs_free_cluster_count(bs);
3723 
3724 	/* Set blob as thin provisioned */
3725 	ut_spdk_blob_opts_init(&opts);
3726 	opts.thin_provision = true;
3727 	opts.num_clusters = 4;
3728 
3729 	blob = ut_blob_create_and_open(bs, &opts);
3730 	blobid = spdk_blob_get_id(blob);
3731 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3732 
3733 	CU_ASSERT(blob->active.num_clusters == 4);
3734 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
3735 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3736 
3737 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
3738 	 * This is to simulate behaviour when cluster is allocated after blob creation.
3739 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
3740 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
3741 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3742 
3743 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page,
3744 					 blob_op_complete, NULL);
3745 	poll_threads();
3746 
3747 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3748 
3749 	spdk_blob_close(blob, blob_op_complete, NULL);
3750 	poll_threads();
3751 	CU_ASSERT(g_bserrno == 0);
3752 
3753 	ut_bs_reload(&bs, NULL);
3754 
3755 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3756 	poll_threads();
3757 	CU_ASSERT(g_bserrno == 0);
3758 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3759 	blob = g_blob;
3760 
3761 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3762 
3763 	ut_blob_close_and_delete(bs, blob);
3764 }
3765 
3766 static void
3767 blob_thin_prov_rw(void)
3768 {
3769 	static const uint8_t zero[10 * 4096] = { 0 };
3770 	struct spdk_blob_store *bs = g_bs;
3771 	struct spdk_blob *blob, *blob_id0;
3772 	struct spdk_io_channel *channel, *channel_thread1;
3773 	struct spdk_blob_opts opts;
3774 	uint64_t free_clusters;
3775 	uint64_t page_size;
3776 	uint8_t payload_read[10 * 4096];
3777 	uint8_t payload_write[10 * 4096];
3778 	uint64_t write_bytes;
3779 	uint64_t read_bytes;
3780 
3781 	free_clusters = spdk_bs_free_cluster_count(bs);
3782 	page_size = spdk_bs_get_page_size(bs);
3783 
3784 	channel = spdk_bs_alloc_io_channel(bs);
3785 	CU_ASSERT(channel != NULL);
3786 
3787 	ut_spdk_blob_opts_init(&opts);
3788 	opts.thin_provision = true;
3789 
3790 	/* Create and delete blob at md page 0, so that next md page allocation
3791 	 * for extent will use that. */
3792 	blob_id0 = ut_blob_create_and_open(bs, &opts);
3793 	blob = ut_blob_create_and_open(bs, &opts);
3794 	ut_blob_close_and_delete(bs, blob_id0);
3795 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3796 
3797 	CU_ASSERT(blob->active.num_clusters == 0);
3798 
3799 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3800 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3801 	poll_threads();
3802 	CU_ASSERT(g_bserrno == 0);
3803 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3804 	CU_ASSERT(blob->active.num_clusters == 5);
3805 
3806 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3807 	poll_threads();
3808 	CU_ASSERT(g_bserrno == 0);
3809 	/* Sync must not change anything */
3810 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3811 	CU_ASSERT(blob->active.num_clusters == 5);
3812 
3813 	/* Payload should be all zeros from unallocated clusters */
3814 	memset(payload_read, 0xFF, sizeof(payload_read));
3815 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3816 	poll_threads();
3817 	CU_ASSERT(g_bserrno == 0);
3818 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
3819 
3820 	write_bytes = g_dev_write_bytes;
3821 	read_bytes = g_dev_read_bytes;
3822 
3823 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
3824 	set_thread(1);
3825 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
3826 	CU_ASSERT(channel_thread1 != NULL);
3827 	memset(payload_write, 0xE5, sizeof(payload_write));
3828 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
3829 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3830 	/* Perform write on thread 0. That will try to allocate cluster,
3831 	 * but fail due to another thread issuing the cluster allocation first. */
3832 	set_thread(0);
3833 	memset(payload_write, 0xE5, sizeof(payload_write));
3834 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
3835 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
3836 	poll_threads();
3837 	CU_ASSERT(g_bserrno == 0);
3838 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3839 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
3840 	 * read 0 bytes */
3841 	if (g_use_extent_table) {
3842 		/* Add one more page for EXTENT_PAGE write */
3843 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
3844 	} else {
3845 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
3846 	}
3847 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
3848 
3849 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3850 	poll_threads();
3851 	CU_ASSERT(g_bserrno == 0);
3852 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
3853 
3854 	ut_blob_close_and_delete(bs, blob);
3855 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3856 
3857 	set_thread(1);
3858 	spdk_bs_free_io_channel(channel_thread1);
3859 	set_thread(0);
3860 	spdk_bs_free_io_channel(channel);
3861 	poll_threads();
3862 	g_blob = NULL;
3863 	g_blobid = 0;
3864 }
3865 
3866 static void
3867 blob_thin_prov_write_count_io(void)
3868 {
3869 	struct spdk_blob_store *bs;
3870 	struct spdk_blob *blob;
3871 	struct spdk_io_channel *ch;
3872 	struct spdk_bs_dev *dev;
3873 	struct spdk_bs_opts bs_opts;
3874 	struct spdk_blob_opts opts;
3875 	uint64_t free_clusters;
3876 	uint64_t page_size;
3877 	uint8_t payload_write[4096];
3878 	uint64_t write_bytes;
3879 	uint64_t read_bytes;
3880 	const uint32_t CLUSTER_SZ = 16384;
3881 	uint32_t pages_per_cluster;
3882 	uint32_t pages_per_extent_page;
3883 	uint32_t i;
3884 
3885 	/* Use a very small cluster size for this test.  This ensures we need multiple
3886 	 * extent pages to hold all of the clusters even for relatively small blobs like
3887 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
3888 	 * buffers).
3889 	 */
3890 	dev = init_dev();
3891 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
3892 	bs_opts.cluster_sz = CLUSTER_SZ;
3893 
3894 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
3895 	poll_threads();
3896 	CU_ASSERT(g_bserrno == 0);
3897 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3898 	bs = g_bs;
3899 
3900 	free_clusters = spdk_bs_free_cluster_count(bs);
3901 	page_size = spdk_bs_get_page_size(bs);
3902 	pages_per_cluster = CLUSTER_SZ / page_size;
3903 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
3904 
3905 	ch = spdk_bs_alloc_io_channel(bs);
3906 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3907 
3908 	ut_spdk_blob_opts_init(&opts);
3909 	opts.thin_provision = true;
3910 
3911 	blob = ut_blob_create_and_open(bs, &opts);
3912 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3913 
3914 	/* Resize the blob so that it will require 8 extent pages to hold all of
3915 	 * the clusters.
3916 	 */
3917 	g_bserrno = -1;
3918 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
3919 	poll_threads();
3920 	CU_ASSERT(g_bserrno == 0);
3921 
3922 	g_bserrno = -1;
3923 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3924 	poll_threads();
3925 	CU_ASSERT(g_bserrno == 0);
3926 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3927 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
3928 
3929 	memset(payload_write, 0, sizeof(payload_write));
3930 	for (i = 0; i < 8; i++) {
3931 		write_bytes = g_dev_write_bytes;
3932 		read_bytes = g_dev_read_bytes;
3933 
3934 		g_bserrno = -1;
3935 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
3936 		poll_threads();
3937 		CU_ASSERT(g_bserrno == 0);
3938 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
3939 
3940 		CU_ASSERT(g_dev_read_bytes == read_bytes);
3941 		if (!g_use_extent_table) {
3942 			/* For legacy metadata, we should have written two pages - one for the
3943 			 * write I/O itself, another for the blob's primary metadata.
3944 			 */
3945 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
3946 		} else {
3947 			/* For extent table metadata, we should have written three pages - one
3948 			 * for the write I/O, one for the extent page, one for the blob's primary
3949 			 * metadata.
3950 			 */
3951 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
3952 		}
3953 
3954 		/* The write should have synced the metadata already.  Do another sync here
3955 		 * just to confirm.
3956 		 */
3957 		write_bytes = g_dev_write_bytes;
3958 		read_bytes = g_dev_read_bytes;
3959 
3960 		g_bserrno = -1;
3961 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
3962 		poll_threads();
3963 		CU_ASSERT(g_bserrno == 0);
3964 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
3965 
3966 		CU_ASSERT(g_dev_read_bytes == read_bytes);
3967 		CU_ASSERT(g_dev_write_bytes == write_bytes);
3968 
3969 		/* Now write to another unallocated cluster that is part of the same extent page. */
3970 		g_bserrno = -1;
3971 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
3972 				   1, blob_op_complete, NULL);
3973 		poll_threads();
3974 		CU_ASSERT(g_bserrno == 0);
3975 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
3976 
3977 		CU_ASSERT(g_dev_read_bytes == read_bytes);
3978 		/*
3979 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
3980 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
3981 		 */
3982 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
3983 	}
3984 
3985 	ut_blob_close_and_delete(bs, blob);
3986 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3987 
3988 	spdk_bs_free_io_channel(ch);
3989 	poll_threads();
3990 	g_blob = NULL;
3991 	g_blobid = 0;
3992 
3993 	spdk_bs_unload(bs, bs_op_complete, NULL);
3994 	poll_threads();
3995 	CU_ASSERT(g_bserrno == 0);
3996 	g_bs = NULL;
3997 }
3998 
3999 static void
4000 blob_thin_prov_rle(void)
4001 {
4002 	static const uint8_t zero[10 * 4096] = { 0 };
4003 	struct spdk_blob_store *bs = g_bs;
4004 	struct spdk_blob *blob;
4005 	struct spdk_io_channel *channel;
4006 	struct spdk_blob_opts opts;
4007 	spdk_blob_id blobid;
4008 	uint64_t free_clusters;
4009 	uint64_t page_size;
4010 	uint8_t payload_read[10 * 4096];
4011 	uint8_t payload_write[10 * 4096];
4012 	uint64_t write_bytes;
4013 	uint64_t read_bytes;
4014 	uint64_t io_unit;
4015 
4016 	free_clusters = spdk_bs_free_cluster_count(bs);
4017 	page_size = spdk_bs_get_page_size(bs);
4018 
4019 	ut_spdk_blob_opts_init(&opts);
4020 	opts.thin_provision = true;
4021 	opts.num_clusters = 5;
4022 
4023 	blob = ut_blob_create_and_open(bs, &opts);
4024 	blobid = spdk_blob_get_id(blob);
4025 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4026 
4027 	channel = spdk_bs_alloc_io_channel(bs);
4028 	CU_ASSERT(channel != NULL);
4029 
4030 	/* Target specifically second cluster in a blob as first allocation */
4031 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4032 
4033 	/* Payload should be all zeros from unallocated clusters */
4034 	memset(payload_read, 0xFF, sizeof(payload_read));
4035 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4036 	poll_threads();
4037 	CU_ASSERT(g_bserrno == 0);
4038 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4039 
4040 	write_bytes = g_dev_write_bytes;
4041 	read_bytes = g_dev_read_bytes;
4042 
4043 	/* Issue write to second cluster in a blob */
4044 	memset(payload_write, 0xE5, sizeof(payload_write));
4045 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4046 	poll_threads();
4047 	CU_ASSERT(g_bserrno == 0);
4048 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4049 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4050 	 * read 0 bytes */
4051 	if (g_use_extent_table) {
4052 		/* Add one more page for EXTENT_PAGE write */
4053 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4054 	} else {
4055 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4056 	}
4057 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4058 
4059 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4060 	poll_threads();
4061 	CU_ASSERT(g_bserrno == 0);
4062 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4063 
4064 	spdk_bs_free_io_channel(channel);
4065 	poll_threads();
4066 
4067 	spdk_blob_close(blob, blob_op_complete, NULL);
4068 	poll_threads();
4069 	CU_ASSERT(g_bserrno == 0);
4070 
4071 	ut_bs_reload(&bs, NULL);
4072 
4073 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4074 	poll_threads();
4075 	CU_ASSERT(g_bserrno == 0);
4076 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4077 	blob = g_blob;
4078 
4079 	channel = spdk_bs_alloc_io_channel(bs);
4080 	CU_ASSERT(channel != NULL);
4081 
4082 	/* Read second cluster after blob reload to confirm data written */
4083 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4084 	poll_threads();
4085 	CU_ASSERT(g_bserrno == 0);
4086 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4087 
4088 	spdk_bs_free_io_channel(channel);
4089 	poll_threads();
4090 
4091 	ut_blob_close_and_delete(bs, blob);
4092 }
4093 
4094 static void
4095 blob_thin_prov_rw_iov(void)
4096 {
4097 	static const uint8_t zero[10 * 4096] = { 0 };
4098 	struct spdk_blob_store *bs = g_bs;
4099 	struct spdk_blob *blob;
4100 	struct spdk_io_channel *channel;
4101 	struct spdk_blob_opts opts;
4102 	uint64_t free_clusters;
4103 	uint8_t payload_read[10 * 4096];
4104 	uint8_t payload_write[10 * 4096];
4105 	struct iovec iov_read[3];
4106 	struct iovec iov_write[3];
4107 
4108 	free_clusters = spdk_bs_free_cluster_count(bs);
4109 
4110 	channel = spdk_bs_alloc_io_channel(bs);
4111 	CU_ASSERT(channel != NULL);
4112 
4113 	ut_spdk_blob_opts_init(&opts);
4114 	opts.thin_provision = true;
4115 
4116 	blob = ut_blob_create_and_open(bs, &opts);
4117 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4118 
4119 	CU_ASSERT(blob->active.num_clusters == 0);
4120 
4121 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4122 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4123 	poll_threads();
4124 	CU_ASSERT(g_bserrno == 0);
4125 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4126 	CU_ASSERT(blob->active.num_clusters == 5);
4127 
4128 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4129 	poll_threads();
4130 	CU_ASSERT(g_bserrno == 0);
4131 	/* Sync must not change anything */
4132 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4133 	CU_ASSERT(blob->active.num_clusters == 5);
4134 
4135 	/* Payload should be all zeros from unallocated clusters */
4136 	memset(payload_read, 0xAA, sizeof(payload_read));
4137 	iov_read[0].iov_base = payload_read;
4138 	iov_read[0].iov_len = 3 * 4096;
4139 	iov_read[1].iov_base = payload_read + 3 * 4096;
4140 	iov_read[1].iov_len = 4 * 4096;
4141 	iov_read[2].iov_base = payload_read + 7 * 4096;
4142 	iov_read[2].iov_len = 3 * 4096;
4143 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4144 	poll_threads();
4145 	CU_ASSERT(g_bserrno == 0);
4146 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4147 
4148 	memset(payload_write, 0xE5, sizeof(payload_write));
4149 	iov_write[0].iov_base = payload_write;
4150 	iov_write[0].iov_len = 1 * 4096;
4151 	iov_write[1].iov_base = payload_write + 1 * 4096;
4152 	iov_write[1].iov_len = 5 * 4096;
4153 	iov_write[2].iov_base = payload_write + 6 * 4096;
4154 	iov_write[2].iov_len = 4 * 4096;
4155 
4156 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4157 	poll_threads();
4158 	CU_ASSERT(g_bserrno == 0);
4159 
4160 	memset(payload_read, 0xAA, sizeof(payload_read));
4161 	iov_read[0].iov_base = payload_read;
4162 	iov_read[0].iov_len = 3 * 4096;
4163 	iov_read[1].iov_base = payload_read + 3 * 4096;
4164 	iov_read[1].iov_len = 4 * 4096;
4165 	iov_read[2].iov_base = payload_read + 7 * 4096;
4166 	iov_read[2].iov_len = 3 * 4096;
4167 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4168 	poll_threads();
4169 	CU_ASSERT(g_bserrno == 0);
4170 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4171 
4172 	spdk_bs_free_io_channel(channel);
4173 	poll_threads();
4174 
4175 	ut_blob_close_and_delete(bs, blob);
4176 }
4177 
4178 struct iter_ctx {
4179 	int		current_iter;
4180 	spdk_blob_id	blobid[4];
4181 };
4182 
4183 static void
4184 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4185 {
4186 	struct iter_ctx *iter_ctx = arg;
4187 	spdk_blob_id blobid;
4188 
4189 	CU_ASSERT(bserrno == 0);
4190 	blobid = spdk_blob_get_id(blob);
4191 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4192 }
4193 
4194 static void
4195 bs_load_iter_test(void)
4196 {
4197 	struct spdk_blob_store *bs;
4198 	struct spdk_bs_dev *dev;
4199 	struct iter_ctx iter_ctx = { 0 };
4200 	struct spdk_blob *blob;
4201 	int i, rc;
4202 	struct spdk_bs_opts opts;
4203 
4204 	dev = init_dev();
4205 	spdk_bs_opts_init(&opts, sizeof(opts));
4206 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4207 
4208 	/* Initialize a new blob store */
4209 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4210 	poll_threads();
4211 	CU_ASSERT(g_bserrno == 0);
4212 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4213 	bs = g_bs;
4214 
4215 	for (i = 0; i < 4; i++) {
4216 		blob = ut_blob_create_and_open(bs, NULL);
4217 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4218 
4219 		/* Just save the blobid as an xattr for testing purposes. */
4220 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4221 		CU_ASSERT(rc == 0);
4222 
4223 		/* Resize the blob */
4224 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4225 		poll_threads();
4226 		CU_ASSERT(g_bserrno == 0);
4227 
4228 		spdk_blob_close(blob, blob_op_complete, NULL);
4229 		poll_threads();
4230 		CU_ASSERT(g_bserrno == 0);
4231 	}
4232 
4233 	g_bserrno = -1;
4234 	spdk_bs_unload(bs, bs_op_complete, NULL);
4235 	poll_threads();
4236 	CU_ASSERT(g_bserrno == 0);
4237 
4238 	dev = init_dev();
4239 	spdk_bs_opts_init(&opts, sizeof(opts));
4240 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4241 	opts.iter_cb_fn = test_iter;
4242 	opts.iter_cb_arg = &iter_ctx;
4243 
4244 	/* Test blob iteration during load after a clean shutdown. */
4245 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4246 	poll_threads();
4247 	CU_ASSERT(g_bserrno == 0);
4248 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4249 	bs = g_bs;
4250 
4251 	/* Dirty shutdown */
4252 	bs_free(bs);
4253 
4254 	dev = init_dev();
4255 	spdk_bs_opts_init(&opts, sizeof(opts));
4256 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4257 	opts.iter_cb_fn = test_iter;
4258 	iter_ctx.current_iter = 0;
4259 	opts.iter_cb_arg = &iter_ctx;
4260 
4261 	/* Test blob iteration during load after a dirty shutdown. */
4262 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4263 	poll_threads();
4264 	CU_ASSERT(g_bserrno == 0);
4265 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4266 	bs = g_bs;
4267 
4268 	spdk_bs_unload(bs, bs_op_complete, NULL);
4269 	poll_threads();
4270 	CU_ASSERT(g_bserrno == 0);
4271 	g_bs = NULL;
4272 }
4273 
4274 static void
4275 blob_snapshot_rw(void)
4276 {
4277 	static const uint8_t zero[10 * 4096] = { 0 };
4278 	struct spdk_blob_store *bs = g_bs;
4279 	struct spdk_blob *blob, *snapshot;
4280 	struct spdk_io_channel *channel;
4281 	struct spdk_blob_opts opts;
4282 	spdk_blob_id blobid, snapshotid;
4283 	uint64_t free_clusters;
4284 	uint64_t cluster_size;
4285 	uint64_t page_size;
4286 	uint8_t payload_read[10 * 4096];
4287 	uint8_t payload_write[10 * 4096];
4288 	uint64_t write_bytes;
4289 	uint64_t read_bytes;
4290 
4291 	free_clusters = spdk_bs_free_cluster_count(bs);
4292 	cluster_size = spdk_bs_get_cluster_size(bs);
4293 	page_size = spdk_bs_get_page_size(bs);
4294 
4295 	channel = spdk_bs_alloc_io_channel(bs);
4296 	CU_ASSERT(channel != NULL);
4297 
4298 	ut_spdk_blob_opts_init(&opts);
4299 	opts.thin_provision = true;
4300 	opts.num_clusters = 5;
4301 
4302 	blob = ut_blob_create_and_open(bs, &opts);
4303 	blobid = spdk_blob_get_id(blob);
4304 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4305 
4306 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4307 
4308 	memset(payload_read, 0xFF, sizeof(payload_read));
4309 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4310 	poll_threads();
4311 	CU_ASSERT(g_bserrno == 0);
4312 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4313 
4314 	memset(payload_write, 0xE5, sizeof(payload_write));
4315 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4316 	poll_threads();
4317 	CU_ASSERT(g_bserrno == 0);
4318 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4319 
4320 	/* Create snapshot from blob */
4321 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4322 	poll_threads();
4323 	CU_ASSERT(g_bserrno == 0);
4324 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4325 	snapshotid = g_blobid;
4326 
4327 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4328 	poll_threads();
4329 	CU_ASSERT(g_bserrno == 0);
4330 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4331 	snapshot = g_blob;
4332 	CU_ASSERT(snapshot->data_ro == true);
4333 	CU_ASSERT(snapshot->md_ro == true);
4334 
4335 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4336 
4337 	write_bytes = g_dev_write_bytes;
4338 	read_bytes = g_dev_read_bytes;
4339 
4340 	memset(payload_write, 0xAA, sizeof(payload_write));
4341 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4342 	poll_threads();
4343 	CU_ASSERT(g_bserrno == 0);
4344 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4345 
4346 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4347 	 * and then write 10 pages of payload.
4348 	 */
4349 	if (g_use_extent_table) {
4350 		/* Add one more page for EXTENT_PAGE write */
4351 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
4352 	} else {
4353 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
4354 	}
4355 	CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
4356 
4357 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4358 	poll_threads();
4359 	CU_ASSERT(g_bserrno == 0);
4360 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4361 
4362 	/* Data on snapshot should not change after write to clone */
4363 	memset(payload_write, 0xE5, sizeof(payload_write));
4364 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4365 	poll_threads();
4366 	CU_ASSERT(g_bserrno == 0);
4367 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4368 
4369 	ut_blob_close_and_delete(bs, blob);
4370 	ut_blob_close_and_delete(bs, snapshot);
4371 
4372 	spdk_bs_free_io_channel(channel);
4373 	poll_threads();
4374 	g_blob = NULL;
4375 	g_blobid = 0;
4376 }
4377 
4378 static void
4379 blob_snapshot_rw_iov(void)
4380 {
4381 	static const uint8_t zero[10 * 4096] = { 0 };
4382 	struct spdk_blob_store *bs = g_bs;
4383 	struct spdk_blob *blob, *snapshot;
4384 	struct spdk_io_channel *channel;
4385 	struct spdk_blob_opts opts;
4386 	spdk_blob_id blobid, snapshotid;
4387 	uint64_t free_clusters;
4388 	uint8_t payload_read[10 * 4096];
4389 	uint8_t payload_write[10 * 4096];
4390 	struct iovec iov_read[3];
4391 	struct iovec iov_write[3];
4392 
4393 	free_clusters = spdk_bs_free_cluster_count(bs);
4394 
4395 	channel = spdk_bs_alloc_io_channel(bs);
4396 	CU_ASSERT(channel != NULL);
4397 
4398 	ut_spdk_blob_opts_init(&opts);
4399 	opts.thin_provision = true;
4400 	opts.num_clusters = 5;
4401 
4402 	blob = ut_blob_create_and_open(bs, &opts);
4403 	blobid = spdk_blob_get_id(blob);
4404 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4405 
4406 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4407 
4408 	/* Create snapshot from blob */
4409 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4410 	poll_threads();
4411 	CU_ASSERT(g_bserrno == 0);
4412 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4413 	snapshotid = g_blobid;
4414 
4415 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4416 	poll_threads();
4417 	CU_ASSERT(g_bserrno == 0);
4418 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4419 	snapshot = g_blob;
4420 	CU_ASSERT(snapshot->data_ro == true);
4421 	CU_ASSERT(snapshot->md_ro == true);
4422 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4423 
4424 	/* Payload should be all zeros from unallocated clusters */
4425 	memset(payload_read, 0xAA, sizeof(payload_read));
4426 	iov_read[0].iov_base = payload_read;
4427 	iov_read[0].iov_len = 3 * 4096;
4428 	iov_read[1].iov_base = payload_read + 3 * 4096;
4429 	iov_read[1].iov_len = 4 * 4096;
4430 	iov_read[2].iov_base = payload_read + 7 * 4096;
4431 	iov_read[2].iov_len = 3 * 4096;
4432 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4433 	poll_threads();
4434 	CU_ASSERT(g_bserrno == 0);
4435 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4436 
4437 	memset(payload_write, 0xE5, sizeof(payload_write));
4438 	iov_write[0].iov_base = payload_write;
4439 	iov_write[0].iov_len = 1 * 4096;
4440 	iov_write[1].iov_base = payload_write + 1 * 4096;
4441 	iov_write[1].iov_len = 5 * 4096;
4442 	iov_write[2].iov_base = payload_write + 6 * 4096;
4443 	iov_write[2].iov_len = 4 * 4096;
4444 
4445 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4446 	poll_threads();
4447 	CU_ASSERT(g_bserrno == 0);
4448 
4449 	memset(payload_read, 0xAA, sizeof(payload_read));
4450 	iov_read[0].iov_base = payload_read;
4451 	iov_read[0].iov_len = 3 * 4096;
4452 	iov_read[1].iov_base = payload_read + 3 * 4096;
4453 	iov_read[1].iov_len = 4 * 4096;
4454 	iov_read[2].iov_base = payload_read + 7 * 4096;
4455 	iov_read[2].iov_len = 3 * 4096;
4456 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4457 	poll_threads();
4458 	CU_ASSERT(g_bserrno == 0);
4459 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4460 
4461 	spdk_bs_free_io_channel(channel);
4462 	poll_threads();
4463 
4464 	ut_blob_close_and_delete(bs, blob);
4465 	ut_blob_close_and_delete(bs, snapshot);
4466 }
4467 
4468 /**
4469  * Inflate / decouple parent rw unit tests.
4470  *
4471  * --------------
4472  * original blob:         0         1         2         3         4
4473  *                   ,---------+---------+---------+---------+---------.
4474  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4475  *                   +---------+---------+---------+---------+---------+
4476  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4477  *                   +---------+---------+---------+---------+---------+
4478  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4479  *                   '---------+---------+---------+---------+---------'
4480  *                   .         .         .         .         .         .
4481  * --------          .         .         .         .         .         .
4482  * inflate:          .         .         .         .         .         .
4483  *                   ,---------+---------+---------+---------+---------.
4484  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4485  *                   '---------+---------+---------+---------+---------'
4486  *
4487  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4488  *               on snapshot2 and snapshot removed .         .         .
4489  *                   .         .         .         .         .         .
4490  * ----------------  .         .         .         .         .         .
4491  * decouple parent:  .         .         .         .         .         .
4492  *                   ,---------+---------+---------+---------+---------.
4493  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4494  *                   +---------+---------+---------+---------+---------+
4495  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4496  *                   '---------+---------+---------+---------+---------'
4497  *
4498  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4499  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4500  *               should remain a clone of snapshot.
4501  */
4502 static void
4503 _blob_inflate_rw(bool decouple_parent)
4504 {
4505 	struct spdk_blob_store *bs = g_bs;
4506 	struct spdk_blob *blob, *snapshot, *snapshot2;
4507 	struct spdk_io_channel *channel;
4508 	struct spdk_blob_opts opts;
4509 	spdk_blob_id blobid, snapshotid, snapshot2id;
4510 	uint64_t free_clusters;
4511 	uint64_t cluster_size;
4512 
4513 	uint64_t payload_size;
4514 	uint8_t *payload_read;
4515 	uint8_t *payload_write;
4516 	uint8_t *payload_clone;
4517 
4518 	uint64_t pages_per_cluster;
4519 	uint64_t pages_per_payload;
4520 
4521 	int i;
4522 	spdk_blob_id ids[2];
4523 	size_t count;
4524 
4525 	free_clusters = spdk_bs_free_cluster_count(bs);
4526 	cluster_size = spdk_bs_get_cluster_size(bs);
4527 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4528 	pages_per_payload = pages_per_cluster * 5;
4529 
4530 	payload_size = cluster_size * 5;
4531 
4532 	payload_read = malloc(payload_size);
4533 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4534 
4535 	payload_write = malloc(payload_size);
4536 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4537 
4538 	payload_clone = malloc(payload_size);
4539 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4540 
4541 	channel = spdk_bs_alloc_io_channel(bs);
4542 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4543 
4544 	/* Create blob */
4545 	ut_spdk_blob_opts_init(&opts);
4546 	opts.thin_provision = true;
4547 	opts.num_clusters = 5;
4548 
4549 	blob = ut_blob_create_and_open(bs, &opts);
4550 	blobid = spdk_blob_get_id(blob);
4551 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4552 
4553 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4554 
4555 	/* 1) Initial read should return zeroed payload */
4556 	memset(payload_read, 0xFF, payload_size);
4557 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4558 			  blob_op_complete, NULL);
4559 	poll_threads();
4560 	CU_ASSERT(g_bserrno == 0);
4561 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4562 
4563 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4564 	 * isn't allocated) */
4565 	memset(payload_write, 0xE5, payload_size - cluster_size);
4566 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4567 			   pages_per_cluster, blob_op_complete, NULL);
4568 	poll_threads();
4569 	CU_ASSERT(g_bserrno == 0);
4570 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4571 
4572 	/* 2) Create snapshot from blob (first level) */
4573 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4574 	poll_threads();
4575 	CU_ASSERT(g_bserrno == 0);
4576 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4577 	snapshotid = g_blobid;
4578 
4579 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4580 	poll_threads();
4581 	CU_ASSERT(g_bserrno == 0);
4582 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4583 	snapshot = g_blob;
4584 	CU_ASSERT(snapshot->data_ro == true);
4585 	CU_ASSERT(snapshot->md_ro == true);
4586 
4587 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4588 
4589 	/* Write every second cluster with a pattern.
4590 	 *
4591 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4592 	 * doesn't allocate it.
4593 	 *
4594 	 * payload_clone stores expected result on "blob" read at the time and
4595 	 * is used only to check data consistency on clone before and after
4596 	 * inflation. Initially we fill it with a backing snapshots pattern
4597 	 * used before.
4598 	 */
4599 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4600 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4601 	memset(payload_write, 0xAA, payload_size);
4602 	for (i = 1; i < 5; i += 2) {
4603 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4604 				   pages_per_cluster, blob_op_complete, NULL);
4605 		poll_threads();
4606 		CU_ASSERT(g_bserrno == 0);
4607 
4608 		/* Update expected result */
4609 		memcpy(payload_clone + (cluster_size * i), payload_write,
4610 		       cluster_size);
4611 	}
4612 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4613 
4614 	/* Check data consistency on clone */
4615 	memset(payload_read, 0xFF, payload_size);
4616 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4617 			  blob_op_complete, NULL);
4618 	poll_threads();
4619 	CU_ASSERT(g_bserrno == 0);
4620 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4621 
4622 	/* 3) Create second levels snapshot from blob */
4623 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4624 	poll_threads();
4625 	CU_ASSERT(g_bserrno == 0);
4626 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4627 	snapshot2id = g_blobid;
4628 
4629 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4630 	poll_threads();
4631 	CU_ASSERT(g_bserrno == 0);
4632 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4633 	snapshot2 = g_blob;
4634 	CU_ASSERT(snapshot2->data_ro == true);
4635 	CU_ASSERT(snapshot2->md_ro == true);
4636 
4637 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4638 
4639 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4640 
4641 	/* Write one cluster on the top level blob. This cluster (1) covers
4642 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4643 	 * at all */
4644 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4645 			   pages_per_cluster, blob_op_complete, NULL);
4646 	poll_threads();
4647 	CU_ASSERT(g_bserrno == 0);
4648 
4649 	/* Update expected result */
4650 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4651 
4652 	/* Check data consistency on clone */
4653 	memset(payload_read, 0xFF, payload_size);
4654 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4655 			  blob_op_complete, NULL);
4656 	poll_threads();
4657 	CU_ASSERT(g_bserrno == 0);
4658 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4659 
4660 
4661 	/* Close all blobs */
4662 	spdk_blob_close(blob, blob_op_complete, NULL);
4663 	poll_threads();
4664 	CU_ASSERT(g_bserrno == 0);
4665 
4666 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4667 	poll_threads();
4668 	CU_ASSERT(g_bserrno == 0);
4669 
4670 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4671 	poll_threads();
4672 	CU_ASSERT(g_bserrno == 0);
4673 
4674 	/* Check snapshot-clone relations */
4675 	count = 2;
4676 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4677 	CU_ASSERT(count == 1);
4678 	CU_ASSERT(ids[0] == snapshot2id);
4679 
4680 	count = 2;
4681 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4682 	CU_ASSERT(count == 1);
4683 	CU_ASSERT(ids[0] == blobid);
4684 
4685 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4686 
4687 	free_clusters = spdk_bs_free_cluster_count(bs);
4688 	if (!decouple_parent) {
4689 		/* Do full blob inflation */
4690 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4691 		poll_threads();
4692 		CU_ASSERT(g_bserrno == 0);
4693 
4694 		/* All clusters should be inflated (except one already allocated
4695 		 * in a top level blob) */
4696 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
4697 
4698 		/* Check if relation tree updated correctly */
4699 		count = 2;
4700 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4701 
4702 		/* snapshotid have one clone */
4703 		CU_ASSERT(count == 1);
4704 		CU_ASSERT(ids[0] == snapshot2id);
4705 
4706 		/* snapshot2id have no clones */
4707 		count = 2;
4708 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4709 		CU_ASSERT(count == 0);
4710 
4711 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4712 	} else {
4713 		/* Decouple parent of blob */
4714 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
4715 		poll_threads();
4716 		CU_ASSERT(g_bserrno == 0);
4717 
4718 		/* Only one cluster from a parent should be inflated (second one
4719 		 * is covered by a cluster written on a top level blob, and
4720 		 * already allocated) */
4721 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
4722 
4723 		/* Check if relation tree updated correctly */
4724 		count = 2;
4725 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4726 
4727 		/* snapshotid have two clones now */
4728 		CU_ASSERT(count == 2);
4729 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4730 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
4731 
4732 		/* snapshot2id have no clones */
4733 		count = 2;
4734 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4735 		CU_ASSERT(count == 0);
4736 
4737 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4738 	}
4739 
4740 	/* Try to delete snapshot2 (should pass) */
4741 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
4742 	poll_threads();
4743 	CU_ASSERT(g_bserrno == 0);
4744 
4745 	/* Try to delete base snapshot */
4746 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
4747 	poll_threads();
4748 	CU_ASSERT(g_bserrno == 0);
4749 
4750 	/* Reopen blob after snapshot deletion */
4751 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4752 	poll_threads();
4753 	CU_ASSERT(g_bserrno == 0);
4754 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4755 	blob = g_blob;
4756 
4757 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4758 
4759 	/* Check data consistency on inflated blob */
4760 	memset(payload_read, 0xFF, payload_size);
4761 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4762 			  blob_op_complete, NULL);
4763 	poll_threads();
4764 	CU_ASSERT(g_bserrno == 0);
4765 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4766 
4767 	spdk_bs_free_io_channel(channel);
4768 	poll_threads();
4769 
4770 	free(payload_read);
4771 	free(payload_write);
4772 	free(payload_clone);
4773 
4774 	ut_blob_close_and_delete(bs, blob);
4775 }
4776 
4777 static void
4778 blob_inflate_rw(void)
4779 {
4780 	_blob_inflate_rw(false);
4781 	_blob_inflate_rw(true);
4782 }
4783 
4784 /**
4785  * Snapshot-clones relation test
4786  *
4787  *         snapshot
4788  *            |
4789  *      +-----+-----+
4790  *      |           |
4791  *   blob(ro)   snapshot2
4792  *      |           |
4793  *   clone2      clone
4794  */
4795 static void
4796 blob_relations(void)
4797 {
4798 	struct spdk_blob_store *bs;
4799 	struct spdk_bs_dev *dev;
4800 	struct spdk_bs_opts bs_opts;
4801 	struct spdk_blob_opts opts;
4802 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
4803 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
4804 	int rc;
4805 	size_t count;
4806 	spdk_blob_id ids[10] = {};
4807 
4808 	dev = init_dev();
4809 	spdk_bs_opts_init(&bs_opts, sizeof(opts));
4810 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
4811 
4812 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4813 	poll_threads();
4814 	CU_ASSERT(g_bserrno == 0);
4815 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4816 	bs = g_bs;
4817 
4818 	/* 1. Create blob with 10 clusters */
4819 
4820 	ut_spdk_blob_opts_init(&opts);
4821 	opts.num_clusters = 10;
4822 
4823 	blob = ut_blob_create_and_open(bs, &opts);
4824 	blobid = spdk_blob_get_id(blob);
4825 
4826 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4827 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4828 	CU_ASSERT(!spdk_blob_is_clone(blob));
4829 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
4830 
4831 	/* blob should not have underlying snapshot nor clones */
4832 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
4833 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4834 	count = SPDK_COUNTOF(ids);
4835 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
4836 	CU_ASSERT(rc == 0);
4837 	CU_ASSERT(count == 0);
4838 
4839 
4840 	/* 2. Create snapshot */
4841 
4842 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4843 	poll_threads();
4844 	CU_ASSERT(g_bserrno == 0);
4845 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4846 	snapshotid = g_blobid;
4847 
4848 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4849 	poll_threads();
4850 	CU_ASSERT(g_bserrno == 0);
4851 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4852 	snapshot = g_blob;
4853 
4854 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
4855 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
4856 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
4857 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
4858 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
4859 
4860 	/* Check if original blob is converted to the clone of snapshot */
4861 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4862 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4863 	CU_ASSERT(spdk_blob_is_clone(blob));
4864 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
4865 	CU_ASSERT(blob->parent_id == snapshotid);
4866 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4867 
4868 	count = SPDK_COUNTOF(ids);
4869 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4870 	CU_ASSERT(rc == 0);
4871 	CU_ASSERT(count == 1);
4872 	CU_ASSERT(ids[0] == blobid);
4873 
4874 
4875 	/* 3. Create clone from snapshot */
4876 
4877 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
4878 	poll_threads();
4879 	CU_ASSERT(g_bserrno == 0);
4880 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4881 	cloneid = g_blobid;
4882 
4883 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
4884 	poll_threads();
4885 	CU_ASSERT(g_bserrno == 0);
4886 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4887 	clone = g_blob;
4888 
4889 	CU_ASSERT(!spdk_blob_is_read_only(clone));
4890 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
4891 	CU_ASSERT(spdk_blob_is_clone(clone));
4892 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
4893 	CU_ASSERT(clone->parent_id == snapshotid);
4894 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
4895 
4896 	count = SPDK_COUNTOF(ids);
4897 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
4898 	CU_ASSERT(rc == 0);
4899 	CU_ASSERT(count == 0);
4900 
4901 	/* Check if clone is on the snapshot's list */
4902 	count = SPDK_COUNTOF(ids);
4903 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4904 	CU_ASSERT(rc == 0);
4905 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4906 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
4907 
4908 
4909 	/* 4. Create snapshot of the clone */
4910 
4911 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
4912 	poll_threads();
4913 	CU_ASSERT(g_bserrno == 0);
4914 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4915 	snapshotid2 = g_blobid;
4916 
4917 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
4918 	poll_threads();
4919 	CU_ASSERT(g_bserrno == 0);
4920 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4921 	snapshot2 = g_blob;
4922 
4923 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
4924 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
4925 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
4926 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4927 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
4928 
4929 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
4930 	 * is a child of snapshot */
4931 	CU_ASSERT(!spdk_blob_is_read_only(clone));
4932 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
4933 	CU_ASSERT(spdk_blob_is_clone(clone));
4934 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
4935 	CU_ASSERT(clone->parent_id == snapshotid2);
4936 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
4937 
4938 	count = SPDK_COUNTOF(ids);
4939 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
4940 	CU_ASSERT(rc == 0);
4941 	CU_ASSERT(count == 1);
4942 	CU_ASSERT(ids[0] == cloneid);
4943 
4944 
4945 	/* 5. Try to create clone from read only blob */
4946 
4947 	/* Mark blob as read only */
4948 	spdk_blob_set_read_only(blob);
4949 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4950 	poll_threads();
4951 	CU_ASSERT(g_bserrno == 0);
4952 
4953 	/* Check if previously created blob is read only clone */
4954 	CU_ASSERT(spdk_blob_is_read_only(blob));
4955 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4956 	CU_ASSERT(spdk_blob_is_clone(blob));
4957 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
4958 
4959 	/* Create clone from read only blob */
4960 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4961 	poll_threads();
4962 	CU_ASSERT(g_bserrno == 0);
4963 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4964 	cloneid2 = g_blobid;
4965 
4966 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
4967 	poll_threads();
4968 	CU_ASSERT(g_bserrno == 0);
4969 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4970 	clone2 = g_blob;
4971 
4972 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
4973 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
4974 	CU_ASSERT(spdk_blob_is_clone(clone2));
4975 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
4976 
4977 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
4978 
4979 	count = SPDK_COUNTOF(ids);
4980 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
4981 	CU_ASSERT(rc == 0);
4982 
4983 	CU_ASSERT(count == 1);
4984 	CU_ASSERT(ids[0] == cloneid2);
4985 
4986 	/* Close blobs */
4987 
4988 	spdk_blob_close(clone2, blob_op_complete, NULL);
4989 	poll_threads();
4990 	CU_ASSERT(g_bserrno == 0);
4991 
4992 	spdk_blob_close(blob, blob_op_complete, NULL);
4993 	poll_threads();
4994 	CU_ASSERT(g_bserrno == 0);
4995 
4996 	spdk_blob_close(clone, blob_op_complete, NULL);
4997 	poll_threads();
4998 	CU_ASSERT(g_bserrno == 0);
4999 
5000 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5001 	poll_threads();
5002 	CU_ASSERT(g_bserrno == 0);
5003 
5004 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5005 	poll_threads();
5006 	CU_ASSERT(g_bserrno == 0);
5007 
5008 	/* Try to delete snapshot with more than 1 clone */
5009 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5010 	poll_threads();
5011 	CU_ASSERT(g_bserrno != 0);
5012 
5013 	ut_bs_reload(&bs, &bs_opts);
5014 
5015 	/* NULL ids array should return number of clones in count */
5016 	count = SPDK_COUNTOF(ids);
5017 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5018 	CU_ASSERT(rc == -ENOMEM);
5019 	CU_ASSERT(count == 2);
5020 
5021 	/* incorrect array size */
5022 	count = 1;
5023 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5024 	CU_ASSERT(rc == -ENOMEM);
5025 	CU_ASSERT(count == 2);
5026 
5027 
5028 	/* Verify structure of loaded blob store */
5029 
5030 	/* snapshot */
5031 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5032 
5033 	count = SPDK_COUNTOF(ids);
5034 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5035 	CU_ASSERT(rc == 0);
5036 	CU_ASSERT(count == 2);
5037 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5038 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5039 
5040 	/* blob */
5041 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5042 	count = SPDK_COUNTOF(ids);
5043 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5044 	CU_ASSERT(rc == 0);
5045 	CU_ASSERT(count == 1);
5046 	CU_ASSERT(ids[0] == cloneid2);
5047 
5048 	/* clone */
5049 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5050 	count = SPDK_COUNTOF(ids);
5051 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5052 	CU_ASSERT(rc == 0);
5053 	CU_ASSERT(count == 0);
5054 
5055 	/* snapshot2 */
5056 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5057 	count = SPDK_COUNTOF(ids);
5058 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5059 	CU_ASSERT(rc == 0);
5060 	CU_ASSERT(count == 1);
5061 	CU_ASSERT(ids[0] == cloneid);
5062 
5063 	/* clone2 */
5064 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5065 	count = SPDK_COUNTOF(ids);
5066 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5067 	CU_ASSERT(rc == 0);
5068 	CU_ASSERT(count == 0);
5069 
5070 	/* Try to delete blob that user should not be able to remove */
5071 
5072 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5073 	poll_threads();
5074 	CU_ASSERT(g_bserrno != 0);
5075 
5076 	/* Remove all blobs */
5077 
5078 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5079 	poll_threads();
5080 	CU_ASSERT(g_bserrno == 0);
5081 
5082 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5083 	poll_threads();
5084 	CU_ASSERT(g_bserrno == 0);
5085 
5086 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5087 	poll_threads();
5088 	CU_ASSERT(g_bserrno == 0);
5089 
5090 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5091 	poll_threads();
5092 	CU_ASSERT(g_bserrno == 0);
5093 
5094 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5095 	poll_threads();
5096 	CU_ASSERT(g_bserrno == 0);
5097 
5098 	spdk_bs_unload(bs, bs_op_complete, NULL);
5099 	poll_threads();
5100 	CU_ASSERT(g_bserrno == 0);
5101 
5102 	g_bs = NULL;
5103 }
5104 
5105 /**
5106  * Snapshot-clones relation test 2
5107  *
5108  *         snapshot1
5109  *            |
5110  *         snapshot2
5111  *            |
5112  *      +-----+-----+
5113  *      |           |
5114  *   blob(ro)   snapshot3
5115  *      |           |
5116  *      |       snapshot4
5117  *      |        |     |
5118  *   clone2   clone  clone3
5119  */
5120 static void
5121 blob_relations2(void)
5122 {
5123 	struct spdk_blob_store *bs;
5124 	struct spdk_bs_dev *dev;
5125 	struct spdk_bs_opts bs_opts;
5126 	struct spdk_blob_opts opts;
5127 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5128 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5129 		     cloneid3;
5130 	int rc;
5131 	size_t count;
5132 	spdk_blob_id ids[10] = {};
5133 
5134 	dev = init_dev();
5135 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5136 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5137 
5138 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5139 	poll_threads();
5140 	CU_ASSERT(g_bserrno == 0);
5141 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5142 	bs = g_bs;
5143 
5144 	/* 1. Create blob with 10 clusters */
5145 
5146 	ut_spdk_blob_opts_init(&opts);
5147 	opts.num_clusters = 10;
5148 
5149 	blob = ut_blob_create_and_open(bs, &opts);
5150 	blobid = spdk_blob_get_id(blob);
5151 
5152 	/* 2. Create snapshot1 */
5153 
5154 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5155 	poll_threads();
5156 	CU_ASSERT(g_bserrno == 0);
5157 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5158 	snapshotid1 = g_blobid;
5159 
5160 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5161 	poll_threads();
5162 	CU_ASSERT(g_bserrno == 0);
5163 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5164 	snapshot1 = g_blob;
5165 
5166 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5167 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5168 
5169 	CU_ASSERT(blob->parent_id == snapshotid1);
5170 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5171 
5172 	/* Check if blob is the clone of snapshot1 */
5173 	CU_ASSERT(blob->parent_id == snapshotid1);
5174 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5175 
5176 	count = SPDK_COUNTOF(ids);
5177 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5178 	CU_ASSERT(rc == 0);
5179 	CU_ASSERT(count == 1);
5180 	CU_ASSERT(ids[0] == blobid);
5181 
5182 	/* 3. Create another snapshot */
5183 
5184 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5185 	poll_threads();
5186 	CU_ASSERT(g_bserrno == 0);
5187 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5188 	snapshotid2 = g_blobid;
5189 
5190 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5191 	poll_threads();
5192 	CU_ASSERT(g_bserrno == 0);
5193 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5194 	snapshot2 = g_blob;
5195 
5196 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5197 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5198 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5199 
5200 	/* Check if snapshot2 is the clone of snapshot1 and blob
5201 	 * is a child of snapshot2 */
5202 	CU_ASSERT(blob->parent_id == snapshotid2);
5203 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5204 
5205 	count = SPDK_COUNTOF(ids);
5206 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5207 	CU_ASSERT(rc == 0);
5208 	CU_ASSERT(count == 1);
5209 	CU_ASSERT(ids[0] == blobid);
5210 
5211 	/* 4. Create clone from snapshot */
5212 
5213 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5214 	poll_threads();
5215 	CU_ASSERT(g_bserrno == 0);
5216 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5217 	cloneid = g_blobid;
5218 
5219 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5220 	poll_threads();
5221 	CU_ASSERT(g_bserrno == 0);
5222 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5223 	clone = g_blob;
5224 
5225 	CU_ASSERT(clone->parent_id == snapshotid2);
5226 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5227 
5228 	/* Check if clone is on the snapshot's list */
5229 	count = SPDK_COUNTOF(ids);
5230 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5231 	CU_ASSERT(rc == 0);
5232 	CU_ASSERT(count == 2);
5233 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5234 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5235 
5236 	/* 5. Create snapshot of the clone */
5237 
5238 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5239 	poll_threads();
5240 	CU_ASSERT(g_bserrno == 0);
5241 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5242 	snapshotid3 = g_blobid;
5243 
5244 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5245 	poll_threads();
5246 	CU_ASSERT(g_bserrno == 0);
5247 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5248 	snapshot3 = g_blob;
5249 
5250 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5251 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5252 
5253 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5254 	 * is a child of snapshot2 */
5255 	CU_ASSERT(clone->parent_id == snapshotid3);
5256 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5257 
5258 	count = SPDK_COUNTOF(ids);
5259 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5260 	CU_ASSERT(rc == 0);
5261 	CU_ASSERT(count == 1);
5262 	CU_ASSERT(ids[0] == cloneid);
5263 
5264 	/* 6. Create another snapshot of the clone */
5265 
5266 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5267 	poll_threads();
5268 	CU_ASSERT(g_bserrno == 0);
5269 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5270 	snapshotid4 = g_blobid;
5271 
5272 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5273 	poll_threads();
5274 	CU_ASSERT(g_bserrno == 0);
5275 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5276 	snapshot4 = g_blob;
5277 
5278 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5279 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5280 
5281 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5282 	 * is a child of snapshot3 */
5283 	CU_ASSERT(clone->parent_id == snapshotid4);
5284 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5285 
5286 	count = SPDK_COUNTOF(ids);
5287 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5288 	CU_ASSERT(rc == 0);
5289 	CU_ASSERT(count == 1);
5290 	CU_ASSERT(ids[0] == cloneid);
5291 
5292 	/* 7. Remove snapshot 4 */
5293 
5294 	ut_blob_close_and_delete(bs, snapshot4);
5295 
5296 	/* Check if relations are back to state from before creating snapshot 4 */
5297 	CU_ASSERT(clone->parent_id == snapshotid3);
5298 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5299 
5300 	count = SPDK_COUNTOF(ids);
5301 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5302 	CU_ASSERT(rc == 0);
5303 	CU_ASSERT(count == 1);
5304 	CU_ASSERT(ids[0] == cloneid);
5305 
5306 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5307 
5308 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5309 	poll_threads();
5310 	CU_ASSERT(g_bserrno == 0);
5311 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5312 	cloneid3 = g_blobid;
5313 
5314 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5315 	poll_threads();
5316 	CU_ASSERT(g_bserrno != 0);
5317 
5318 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5319 
5320 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5321 	poll_threads();
5322 	CU_ASSERT(g_bserrno == 0);
5323 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5324 	snapshot3 = g_blob;
5325 
5326 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5327 	poll_threads();
5328 	CU_ASSERT(g_bserrno != 0);
5329 
5330 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5331 	poll_threads();
5332 	CU_ASSERT(g_bserrno == 0);
5333 
5334 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5335 	poll_threads();
5336 	CU_ASSERT(g_bserrno == 0);
5337 
5338 	/* 10. Remove snapshot 1 */
5339 
5340 	ut_blob_close_and_delete(bs, snapshot1);
5341 
5342 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5343 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5344 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5345 
5346 	count = SPDK_COUNTOF(ids);
5347 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5348 	CU_ASSERT(rc == 0);
5349 	CU_ASSERT(count == 2);
5350 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5351 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5352 
5353 	/* 11. Try to create clone from read only blob */
5354 
5355 	/* Mark blob as read only */
5356 	spdk_blob_set_read_only(blob);
5357 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5358 	poll_threads();
5359 	CU_ASSERT(g_bserrno == 0);
5360 
5361 	/* Create clone from read only blob */
5362 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5363 	poll_threads();
5364 	CU_ASSERT(g_bserrno == 0);
5365 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5366 	cloneid2 = g_blobid;
5367 
5368 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5369 	poll_threads();
5370 	CU_ASSERT(g_bserrno == 0);
5371 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5372 	clone2 = g_blob;
5373 
5374 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5375 
5376 	count = SPDK_COUNTOF(ids);
5377 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5378 	CU_ASSERT(rc == 0);
5379 	CU_ASSERT(count == 1);
5380 	CU_ASSERT(ids[0] == cloneid2);
5381 
5382 	/* Close blobs */
5383 
5384 	spdk_blob_close(clone2, blob_op_complete, NULL);
5385 	poll_threads();
5386 	CU_ASSERT(g_bserrno == 0);
5387 
5388 	spdk_blob_close(blob, blob_op_complete, NULL);
5389 	poll_threads();
5390 	CU_ASSERT(g_bserrno == 0);
5391 
5392 	spdk_blob_close(clone, blob_op_complete, NULL);
5393 	poll_threads();
5394 	CU_ASSERT(g_bserrno == 0);
5395 
5396 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5397 	poll_threads();
5398 	CU_ASSERT(g_bserrno == 0);
5399 
5400 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5401 	poll_threads();
5402 	CU_ASSERT(g_bserrno == 0);
5403 
5404 	ut_bs_reload(&bs, &bs_opts);
5405 
5406 	/* Verify structure of loaded blob store */
5407 
5408 	/* snapshot2 */
5409 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5410 
5411 	count = SPDK_COUNTOF(ids);
5412 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5413 	CU_ASSERT(rc == 0);
5414 	CU_ASSERT(count == 2);
5415 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5416 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5417 
5418 	/* blob */
5419 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5420 	count = SPDK_COUNTOF(ids);
5421 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5422 	CU_ASSERT(rc == 0);
5423 	CU_ASSERT(count == 1);
5424 	CU_ASSERT(ids[0] == cloneid2);
5425 
5426 	/* clone */
5427 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5428 	count = SPDK_COUNTOF(ids);
5429 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5430 	CU_ASSERT(rc == 0);
5431 	CU_ASSERT(count == 0);
5432 
5433 	/* snapshot3 */
5434 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5435 	count = SPDK_COUNTOF(ids);
5436 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5437 	CU_ASSERT(rc == 0);
5438 	CU_ASSERT(count == 1);
5439 	CU_ASSERT(ids[0] == cloneid);
5440 
5441 	/* clone2 */
5442 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5443 	count = SPDK_COUNTOF(ids);
5444 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5445 	CU_ASSERT(rc == 0);
5446 	CU_ASSERT(count == 0);
5447 
5448 	/* Try to delete all blobs in the worse possible order */
5449 
5450 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5451 	poll_threads();
5452 	CU_ASSERT(g_bserrno != 0);
5453 
5454 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5455 	poll_threads();
5456 	CU_ASSERT(g_bserrno == 0);
5457 
5458 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5459 	poll_threads();
5460 	CU_ASSERT(g_bserrno != 0);
5461 
5462 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5463 	poll_threads();
5464 	CU_ASSERT(g_bserrno == 0);
5465 
5466 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5467 	poll_threads();
5468 	CU_ASSERT(g_bserrno == 0);
5469 
5470 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5471 	poll_threads();
5472 	CU_ASSERT(g_bserrno == 0);
5473 
5474 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5475 	poll_threads();
5476 	CU_ASSERT(g_bserrno == 0);
5477 
5478 	spdk_bs_unload(bs, bs_op_complete, NULL);
5479 	poll_threads();
5480 	CU_ASSERT(g_bserrno == 0);
5481 
5482 	g_bs = NULL;
5483 }
5484 
5485 static void
5486 blobstore_clean_power_failure(void)
5487 {
5488 	struct spdk_blob_store *bs;
5489 	struct spdk_blob *blob;
5490 	struct spdk_power_failure_thresholds thresholds = {};
5491 	bool clean = false;
5492 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5493 	struct spdk_bs_super_block super_copy = {};
5494 
5495 	thresholds.general_threshold = 1;
5496 	while (!clean) {
5497 		/* Create bs and blob */
5498 		suite_blob_setup();
5499 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5500 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5501 		bs = g_bs;
5502 		blob = g_blob;
5503 
5504 		/* Super block should not change for rest of the UT,
5505 		 * save it and compare later. */
5506 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5507 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5508 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5509 
5510 		/* Force bs/super block in a clean state.
5511 		 * Along with marking blob dirty, to cause blob persist. */
5512 		blob->state = SPDK_BLOB_STATE_DIRTY;
5513 		bs->clean = 1;
5514 		super->clean = 1;
5515 		super->crc = blob_md_page_calc_crc(super);
5516 
5517 		g_bserrno = -1;
5518 		dev_set_power_failure_thresholds(thresholds);
5519 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5520 		poll_threads();
5521 		dev_reset_power_failure_event();
5522 
5523 		if (g_bserrno == 0) {
5524 			/* After successful md sync, both bs and super block
5525 			 * should be marked as not clean. */
5526 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5527 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5528 			clean = true;
5529 		}
5530 
5531 		/* Depending on the point of failure, super block was either updated or not. */
5532 		super_copy.clean = super->clean;
5533 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5534 		/* Compare that the values in super block remained unchanged. */
5535 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5536 
5537 		/* Delete blob and unload bs */
5538 		suite_blob_cleanup();
5539 
5540 		thresholds.general_threshold++;
5541 	}
5542 }
5543 
5544 static void
5545 blob_delete_snapshot_power_failure(void)
5546 {
5547 	struct spdk_bs_dev *dev;
5548 	struct spdk_blob_store *bs;
5549 	struct spdk_blob_opts opts;
5550 	struct spdk_blob *blob, *snapshot;
5551 	struct spdk_power_failure_thresholds thresholds = {};
5552 	spdk_blob_id blobid, snapshotid;
5553 	const void *value;
5554 	size_t value_len;
5555 	size_t count;
5556 	spdk_blob_id ids[3] = {};
5557 	int rc;
5558 	bool deleted = false;
5559 	int delete_snapshot_bserrno = -1;
5560 
5561 	thresholds.general_threshold = 1;
5562 	while (!deleted) {
5563 		dev = init_dev();
5564 
5565 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5566 		poll_threads();
5567 		CU_ASSERT(g_bserrno == 0);
5568 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5569 		bs = g_bs;
5570 
5571 		/* Create blob */
5572 		ut_spdk_blob_opts_init(&opts);
5573 		opts.num_clusters = 10;
5574 
5575 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5576 		poll_threads();
5577 		CU_ASSERT(g_bserrno == 0);
5578 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5579 		blobid = g_blobid;
5580 
5581 		/* Create snapshot */
5582 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5583 		poll_threads();
5584 		CU_ASSERT(g_bserrno == 0);
5585 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5586 		snapshotid = g_blobid;
5587 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5588 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5589 
5590 		dev_set_power_failure_thresholds(thresholds);
5591 
5592 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5593 		poll_threads();
5594 		delete_snapshot_bserrno = g_bserrno;
5595 
5596 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
5597 		 * reports success, changes to both blobs should already persisted. */
5598 		dev_reset_power_failure_event();
5599 		ut_bs_dirty_load(&bs, NULL);
5600 
5601 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5602 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5603 
5604 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5605 		poll_threads();
5606 		CU_ASSERT(g_bserrno == 0);
5607 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5608 		blob = g_blob;
5609 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5610 
5611 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5612 		poll_threads();
5613 
5614 		if (g_bserrno == 0) {
5615 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5616 			snapshot = g_blob;
5617 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5618 			count = SPDK_COUNTOF(ids);
5619 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5620 			CU_ASSERT(rc == 0);
5621 			CU_ASSERT(count == 1);
5622 			CU_ASSERT(ids[0] == blobid);
5623 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
5624 			CU_ASSERT(rc != 0);
5625 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5626 
5627 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5628 			poll_threads();
5629 			CU_ASSERT(g_bserrno == 0);
5630 		} else {
5631 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5632 			/* Snapshot might have been left in unrecoverable state, so it does not open.
5633 			 * Yet delete might perform further changes to the clone after that.
5634 			 * This UT should test until snapshot is deleted and delete call succeeds. */
5635 			if (delete_snapshot_bserrno == 0) {
5636 				deleted = true;
5637 			}
5638 		}
5639 
5640 		spdk_blob_close(blob, blob_op_complete, NULL);
5641 		poll_threads();
5642 		CU_ASSERT(g_bserrno == 0);
5643 
5644 		spdk_bs_unload(bs, bs_op_complete, NULL);
5645 		poll_threads();
5646 		CU_ASSERT(g_bserrno == 0);
5647 
5648 		thresholds.general_threshold++;
5649 	}
5650 }
5651 
5652 static void
5653 blob_create_snapshot_power_failure(void)
5654 {
5655 	struct spdk_blob_store *bs = g_bs;
5656 	struct spdk_bs_dev *dev;
5657 	struct spdk_blob_opts opts;
5658 	struct spdk_blob *blob, *snapshot;
5659 	struct spdk_power_failure_thresholds thresholds = {};
5660 	spdk_blob_id blobid, snapshotid;
5661 	const void *value;
5662 	size_t value_len;
5663 	size_t count;
5664 	spdk_blob_id ids[3] = {};
5665 	int rc;
5666 	bool created = false;
5667 	int create_snapshot_bserrno = -1;
5668 
5669 	thresholds.general_threshold = 1;
5670 	while (!created) {
5671 		dev = init_dev();
5672 
5673 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5674 		poll_threads();
5675 		CU_ASSERT(g_bserrno == 0);
5676 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5677 		bs = g_bs;
5678 
5679 		/* Create blob */
5680 		ut_spdk_blob_opts_init(&opts);
5681 		opts.num_clusters = 10;
5682 
5683 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5684 		poll_threads();
5685 		CU_ASSERT(g_bserrno == 0);
5686 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5687 		blobid = g_blobid;
5688 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5689 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5690 
5691 		dev_set_power_failure_thresholds(thresholds);
5692 
5693 		/* Create snapshot */
5694 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5695 		poll_threads();
5696 		create_snapshot_bserrno = g_bserrno;
5697 		snapshotid = g_blobid;
5698 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5699 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5700 
5701 		/* Do not shut down cleanly. Assumption is that after create snapshot
5702 		 * reports success, both blobs should be power-fail safe. */
5703 		dev_reset_power_failure_event();
5704 		ut_bs_dirty_load(&bs, NULL);
5705 
5706 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5707 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5708 
5709 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5710 		poll_threads();
5711 		CU_ASSERT(g_bserrno == 0);
5712 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5713 		blob = g_blob;
5714 
5715 		if (snapshotid != SPDK_BLOBID_INVALID) {
5716 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5717 			poll_threads();
5718 		}
5719 
5720 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
5721 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5722 			snapshot = g_blob;
5723 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5724 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5725 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5726 			count = SPDK_COUNTOF(ids);
5727 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5728 			CU_ASSERT(rc == 0);
5729 			CU_ASSERT(count == 1);
5730 			CU_ASSERT(ids[0] == blobid);
5731 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
5732 			CU_ASSERT(rc != 0);
5733 
5734 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5735 			poll_threads();
5736 			CU_ASSERT(g_bserrno == 0);
5737 			if (create_snapshot_bserrno == 0) {
5738 				created = true;
5739 			}
5740 		} else {
5741 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5742 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
5743 		}
5744 
5745 		spdk_blob_close(blob, blob_op_complete, NULL);
5746 		poll_threads();
5747 		CU_ASSERT(g_bserrno == 0);
5748 
5749 		spdk_bs_unload(bs, bs_op_complete, NULL);
5750 		poll_threads();
5751 		CU_ASSERT(g_bserrno == 0);
5752 
5753 		thresholds.general_threshold++;
5754 	}
5755 }
5756 
5757 static void
5758 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5759 {
5760 	uint8_t payload_ff[64 * 512];
5761 	uint8_t payload_aa[64 * 512];
5762 	uint8_t payload_00[64 * 512];
5763 	uint8_t *cluster0, *cluster1;
5764 
5765 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5766 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5767 	memset(payload_00, 0x00, sizeof(payload_00));
5768 
5769 	/* Try to perform I/O with io unit = 512 */
5770 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
5771 	poll_threads();
5772 	CU_ASSERT(g_bserrno == 0);
5773 
5774 	/* If thin provisioned is set cluster should be allocated now */
5775 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
5776 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
5777 
5778 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
5779 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
5780 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5781 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5782 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
5783 
5784 	/* Verify write with offset on first page */
5785 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
5786 	poll_threads();
5787 	CU_ASSERT(g_bserrno == 0);
5788 
5789 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5790 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5791 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5792 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5793 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5794 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
5795 
5796 	/* Verify write with offset on first page */
5797 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
5798 	poll_threads();
5799 
5800 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
5801 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5802 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5803 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5804 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5805 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
5806 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
5807 
5808 	/* Verify write with offset on second page */
5809 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
5810 	poll_threads();
5811 
5812 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
5813 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5814 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5815 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5816 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5817 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
5818 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
5819 
5820 	/* Verify write across multiple pages */
5821 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
5822 	poll_threads();
5823 
5824 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
5825 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5826 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5827 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5828 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5829 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
5830 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
5831 
5832 	/* Verify write across multiple clusters */
5833 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
5834 	poll_threads();
5835 
5836 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
5837 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
5838 
5839 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5840 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5841 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5842 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5843 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5844 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5845 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
5846 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
5847 
5848 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
5849 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
5850 
5851 	/* Verify write to second cluster */
5852 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
5853 	poll_threads();
5854 
5855 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
5856 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
5857 
5858 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5859 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
5860 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5861 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5862 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5863 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5864 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
5865 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
5866 
5867 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
5868 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
5869 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
5870 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
5871 }
5872 
5873 static void
5874 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5875 {
5876 	uint8_t payload_read[64 * 512];
5877 	uint8_t payload_ff[64 * 512];
5878 	uint8_t payload_aa[64 * 512];
5879 	uint8_t payload_00[64 * 512];
5880 
5881 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5882 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5883 	memset(payload_00, 0x00, sizeof(payload_00));
5884 
5885 	/* Read only first io unit */
5886 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5887 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
5888 	 * payload_read: F000 0000 | 0000 0000 ... */
5889 	memset(payload_read, 0x00, sizeof(payload_read));
5890 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
5891 	poll_threads();
5892 	CU_ASSERT(g_bserrno == 0);
5893 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
5894 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
5895 
5896 	/* Read four io_units starting from offset = 2
5897 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5898 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
5899 	 * payload_read: F0AA 0000 | 0000 0000 ... */
5900 
5901 	memset(payload_read, 0x00, sizeof(payload_read));
5902 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
5903 	poll_threads();
5904 	CU_ASSERT(g_bserrno == 0);
5905 
5906 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
5907 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
5908 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
5909 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
5910 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
5911 
5912 	/* Read eight io_units across multiple pages
5913 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
5914 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
5915 	 * payload_read: AAAA AAAA | 0000 0000 ... */
5916 	memset(payload_read, 0x00, sizeof(payload_read));
5917 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
5918 	poll_threads();
5919 	CU_ASSERT(g_bserrno == 0);
5920 
5921 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
5922 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
5923 
5924 	/* Read eight io_units across multiple clusters
5925 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
5926 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
5927 	 * payload_read: FFFF FFFF | 0000 0000 ... */
5928 	memset(payload_read, 0x00, sizeof(payload_read));
5929 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
5930 	poll_threads();
5931 	CU_ASSERT(g_bserrno == 0);
5932 
5933 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
5934 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
5935 
5936 	/* Read four io_units from second cluster
5937 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5938 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
5939 	 * payload_read: 00FF 0000 | 0000 0000 ... */
5940 	memset(payload_read, 0x00, sizeof(payload_read));
5941 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
5942 	poll_threads();
5943 	CU_ASSERT(g_bserrno == 0);
5944 
5945 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
5946 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
5947 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
5948 
5949 	/* Read second cluster
5950 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5951 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
5952 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
5953 	memset(payload_read, 0x00, sizeof(payload_read));
5954 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
5955 	poll_threads();
5956 	CU_ASSERT(g_bserrno == 0);
5957 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
5958 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
5959 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
5960 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
5961 
5962 	/* Read whole two clusters
5963 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
5964 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
5965 	memset(payload_read, 0x00, sizeof(payload_read));
5966 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
5967 	poll_threads();
5968 	CU_ASSERT(g_bserrno == 0);
5969 
5970 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
5971 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
5972 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
5973 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
5974 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
5975 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
5976 
5977 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
5978 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
5979 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
5980 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
5981 }
5982 
5983 
5984 static void
5985 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5986 {
5987 	uint8_t payload_ff[64 * 512];
5988 	uint8_t payload_aa[64 * 512];
5989 	uint8_t payload_00[64 * 512];
5990 	uint8_t *cluster0, *cluster1;
5991 
5992 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5993 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5994 	memset(payload_00, 0x00, sizeof(payload_00));
5995 
5996 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
5997 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
5998 
5999 	/* Unmap */
6000 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6001 	poll_threads();
6002 
6003 	CU_ASSERT(g_bserrno == 0);
6004 
6005 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6006 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6007 }
6008 
6009 static void
6010 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6011 {
6012 	uint8_t payload_ff[64 * 512];
6013 	uint8_t payload_aa[64 * 512];
6014 	uint8_t payload_00[64 * 512];
6015 	uint8_t *cluster0, *cluster1;
6016 
6017 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6018 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6019 	memset(payload_00, 0x00, sizeof(payload_00));
6020 
6021 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6022 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6023 
6024 	/* Write zeroes  */
6025 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6026 	poll_threads();
6027 
6028 	CU_ASSERT(g_bserrno == 0);
6029 
6030 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6031 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6032 }
6033 
6034 
6035 static void
6036 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6037 {
6038 	uint8_t payload_ff[64 * 512];
6039 	uint8_t payload_aa[64 * 512];
6040 	uint8_t payload_00[64 * 512];
6041 	uint8_t *cluster0, *cluster1;
6042 	struct iovec iov[4];
6043 
6044 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6045 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6046 	memset(payload_00, 0x00, sizeof(payload_00));
6047 
6048 	/* Try to perform I/O with io unit = 512 */
6049 	iov[0].iov_base = payload_ff;
6050 	iov[0].iov_len = 1 * 512;
6051 	spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
6052 	poll_threads();
6053 	CU_ASSERT(g_bserrno == 0);
6054 
6055 	/* If thin provisioned is set cluster should be allocated now */
6056 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6057 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6058 
6059 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6060 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6061 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6062 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6063 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6064 
6065 	/* Verify write with offset on first page */
6066 	iov[0].iov_base = payload_ff;
6067 	iov[0].iov_len = 1 * 512;
6068 	spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL);
6069 	poll_threads();
6070 	CU_ASSERT(g_bserrno == 0);
6071 
6072 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6073 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6074 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6075 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6076 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6077 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6078 
6079 	/* Verify write with offset on first page */
6080 	iov[0].iov_base = payload_ff;
6081 	iov[0].iov_len = 4 * 512;
6082 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6083 	poll_threads();
6084 
6085 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6086 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6087 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6088 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6089 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6090 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6091 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6092 
6093 	/* Verify write with offset on second page */
6094 	iov[0].iov_base = payload_ff;
6095 	iov[0].iov_len = 4 * 512;
6096 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6097 	poll_threads();
6098 
6099 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6100 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6101 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6102 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6103 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6104 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6105 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6106 
6107 	/* Verify write across multiple pages */
6108 	iov[0].iov_base = payload_aa;
6109 	iov[0].iov_len = 8 * 512;
6110 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL);
6111 	poll_threads();
6112 
6113 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6114 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6115 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6116 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6117 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6118 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6119 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6120 
6121 	/* Verify write across multiple clusters */
6122 
6123 	iov[0].iov_base = payload_ff;
6124 	iov[0].iov_len = 8 * 512;
6125 	spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL);
6126 	poll_threads();
6127 
6128 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6129 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6130 
6131 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6132 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6133 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6134 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6135 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6136 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6137 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6138 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6139 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6140 
6141 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6142 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6143 
6144 	/* Verify write to second cluster */
6145 
6146 	iov[0].iov_base = payload_ff;
6147 	iov[0].iov_len = 2 * 512;
6148 	spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL);
6149 	poll_threads();
6150 
6151 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6152 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6153 
6154 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6155 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6156 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6157 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6158 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6159 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6160 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6161 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6162 
6163 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6164 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6165 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6166 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6167 }
6168 
6169 static void
6170 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6171 {
6172 	uint8_t payload_read[64 * 512];
6173 	uint8_t payload_ff[64 * 512];
6174 	uint8_t payload_aa[64 * 512];
6175 	uint8_t payload_00[64 * 512];
6176 	struct iovec iov[4];
6177 
6178 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6179 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6180 	memset(payload_00, 0x00, sizeof(payload_00));
6181 
6182 	/* Read only first io unit */
6183 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6184 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6185 	 * payload_read: F000 0000 | 0000 0000 ... */
6186 	memset(payload_read, 0x00, sizeof(payload_read));
6187 	iov[0].iov_base = payload_read;
6188 	iov[0].iov_len = 1 * 512;
6189 	spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
6190 	poll_threads();
6191 
6192 	CU_ASSERT(g_bserrno == 0);
6193 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6194 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6195 
6196 	/* Read four io_units starting from offset = 2
6197 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6198 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6199 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6200 
6201 	memset(payload_read, 0x00, sizeof(payload_read));
6202 	iov[0].iov_base = payload_read;
6203 	iov[0].iov_len = 4 * 512;
6204 	spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL);
6205 	poll_threads();
6206 	CU_ASSERT(g_bserrno == 0);
6207 
6208 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6209 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6210 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6211 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6212 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6213 
6214 	/* Read eight io_units across multiple pages
6215 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6216 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6217 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6218 	memset(payload_read, 0x00, sizeof(payload_read));
6219 	iov[0].iov_base = payload_read;
6220 	iov[0].iov_len = 4 * 512;
6221 	iov[1].iov_base = payload_read + 4 * 512;
6222 	iov[1].iov_len = 4 * 512;
6223 	spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL);
6224 	poll_threads();
6225 	CU_ASSERT(g_bserrno == 0);
6226 
6227 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6228 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6229 
6230 	/* Read eight io_units across multiple clusters
6231 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6232 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6233 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6234 	memset(payload_read, 0x00, sizeof(payload_read));
6235 	iov[0].iov_base = payload_read;
6236 	iov[0].iov_len = 2 * 512;
6237 	iov[1].iov_base = payload_read + 2 * 512;
6238 	iov[1].iov_len = 2 * 512;
6239 	iov[2].iov_base = payload_read + 4 * 512;
6240 	iov[2].iov_len = 2 * 512;
6241 	iov[3].iov_base = payload_read + 6 * 512;
6242 	iov[3].iov_len = 2 * 512;
6243 	spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL);
6244 	poll_threads();
6245 	CU_ASSERT(g_bserrno == 0);
6246 
6247 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6248 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6249 
6250 	/* Read four io_units from second cluster
6251 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6252 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6253 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6254 	memset(payload_read, 0x00, sizeof(payload_read));
6255 	iov[0].iov_base = payload_read;
6256 	iov[0].iov_len = 1 * 512;
6257 	iov[1].iov_base = payload_read + 1 * 512;
6258 	iov[1].iov_len = 3 * 512;
6259 	spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL);
6260 	poll_threads();
6261 	CU_ASSERT(g_bserrno == 0);
6262 
6263 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6264 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6265 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6266 
6267 	/* Read second cluster
6268 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6269 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6270 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6271 	memset(payload_read, 0x00, sizeof(payload_read));
6272 	iov[0].iov_base = payload_read;
6273 	iov[0].iov_len = 1 * 512;
6274 	iov[1].iov_base = payload_read + 1 * 512;
6275 	iov[1].iov_len = 2 * 512;
6276 	iov[2].iov_base = payload_read + 3 * 512;
6277 	iov[2].iov_len = 4 * 512;
6278 	iov[3].iov_base = payload_read + 7 * 512;
6279 	iov[3].iov_len = 25 * 512;
6280 	spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL);
6281 	poll_threads();
6282 	CU_ASSERT(g_bserrno == 0);
6283 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6284 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6285 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6286 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6287 
6288 	/* Read whole two clusters
6289 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6290 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6291 	memset(payload_read, 0x00, sizeof(payload_read));
6292 	iov[0].iov_base = payload_read;
6293 	iov[0].iov_len = 1 * 512;
6294 	iov[1].iov_base = payload_read + 1 * 512;
6295 	iov[1].iov_len = 8 * 512;
6296 	iov[2].iov_base = payload_read + 9 * 512;
6297 	iov[2].iov_len = 16 * 512;
6298 	iov[3].iov_base = payload_read + 25 * 512;
6299 	iov[3].iov_len = 39 * 512;
6300 	spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL);
6301 	poll_threads();
6302 	CU_ASSERT(g_bserrno == 0);
6303 
6304 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6305 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6306 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6307 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6308 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6309 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6310 
6311 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6312 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6313 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6314 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6315 }
6316 
6317 static void
6318 blob_io_unit(void)
6319 {
6320 	struct spdk_bs_opts bsopts;
6321 	struct spdk_blob_opts opts;
6322 	struct spdk_blob_store *bs;
6323 	struct spdk_bs_dev *dev;
6324 	struct spdk_blob *blob, *snapshot, *clone;
6325 	spdk_blob_id blobid;
6326 	struct spdk_io_channel *channel;
6327 
6328 	/* Create dev with 512 bytes io unit size */
6329 
6330 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6331 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6332 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6333 
6334 	/* Try to initialize a new blob store with unsupported io_unit */
6335 	dev = init_dev();
6336 	dev->blocklen = 512;
6337 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6338 
6339 	/* Initialize a new blob store */
6340 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6341 	poll_threads();
6342 	CU_ASSERT(g_bserrno == 0);
6343 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6344 	bs = g_bs;
6345 
6346 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6347 	channel = spdk_bs_alloc_io_channel(bs);
6348 
6349 	/* Create thick provisioned blob */
6350 	ut_spdk_blob_opts_init(&opts);
6351 	opts.thin_provision = false;
6352 	opts.num_clusters = 32;
6353 
6354 	blob = ut_blob_create_and_open(bs, &opts);
6355 	blobid = spdk_blob_get_id(blob);
6356 
6357 	test_io_write(dev, blob, channel);
6358 	test_io_read(dev, blob, channel);
6359 	test_io_zeroes(dev, blob, channel);
6360 
6361 	test_iov_write(dev, blob, channel);
6362 	test_iov_read(dev, blob, channel);
6363 
6364 	test_io_unmap(dev, blob, channel);
6365 
6366 	spdk_blob_close(blob, blob_op_complete, NULL);
6367 	poll_threads();
6368 	CU_ASSERT(g_bserrno == 0);
6369 	blob = NULL;
6370 	g_blob = NULL;
6371 
6372 	/* Create thin provisioned blob */
6373 
6374 	ut_spdk_blob_opts_init(&opts);
6375 	opts.thin_provision = true;
6376 	opts.num_clusters = 32;
6377 
6378 	blob = ut_blob_create_and_open(bs, &opts);
6379 	blobid = spdk_blob_get_id(blob);
6380 
6381 	test_io_write(dev, blob, channel);
6382 	test_io_read(dev, blob, channel);
6383 
6384 	test_io_zeroes(dev, blob, channel);
6385 
6386 	test_iov_write(dev, blob, channel);
6387 	test_iov_read(dev, blob, channel);
6388 
6389 	/* Create snapshot */
6390 
6391 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6392 	poll_threads();
6393 	CU_ASSERT(g_bserrno == 0);
6394 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6395 	blobid = g_blobid;
6396 
6397 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6398 	poll_threads();
6399 	CU_ASSERT(g_bserrno == 0);
6400 	CU_ASSERT(g_blob != NULL);
6401 	snapshot = g_blob;
6402 
6403 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6404 	poll_threads();
6405 	CU_ASSERT(g_bserrno == 0);
6406 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6407 	blobid = g_blobid;
6408 
6409 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6410 	poll_threads();
6411 	CU_ASSERT(g_bserrno == 0);
6412 	CU_ASSERT(g_blob != NULL);
6413 	clone = g_blob;
6414 
6415 	test_io_read(dev, blob, channel);
6416 	test_io_read(dev, snapshot, channel);
6417 	test_io_read(dev, clone, channel);
6418 
6419 	test_iov_read(dev, blob, channel);
6420 	test_iov_read(dev, snapshot, channel);
6421 	test_iov_read(dev, clone, channel);
6422 
6423 	/* Inflate clone */
6424 
6425 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6426 	poll_threads();
6427 
6428 	CU_ASSERT(g_bserrno == 0);
6429 
6430 	test_io_read(dev, clone, channel);
6431 
6432 	test_io_unmap(dev, clone, channel);
6433 
6434 	test_iov_write(dev, clone, channel);
6435 	test_iov_read(dev, clone, channel);
6436 
6437 	spdk_blob_close(blob, blob_op_complete, NULL);
6438 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6439 	spdk_blob_close(clone, blob_op_complete, NULL);
6440 	poll_threads();
6441 	CU_ASSERT(g_bserrno == 0);
6442 	blob = NULL;
6443 	g_blob = NULL;
6444 
6445 	spdk_bs_free_io_channel(channel);
6446 	poll_threads();
6447 
6448 	/* Unload the blob store */
6449 	spdk_bs_unload(bs, bs_op_complete, NULL);
6450 	poll_threads();
6451 	CU_ASSERT(g_bserrno == 0);
6452 	g_bs = NULL;
6453 	g_blob = NULL;
6454 	g_blobid = 0;
6455 }
6456 
6457 static void
6458 blob_io_unit_compatiblity(void)
6459 {
6460 	struct spdk_bs_opts bsopts;
6461 	struct spdk_blob_store *bs;
6462 	struct spdk_bs_dev *dev;
6463 	struct spdk_bs_super_block *super;
6464 
6465 	/* Create dev with 512 bytes io unit size */
6466 
6467 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6468 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6469 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6470 
6471 	/* Try to initialize a new blob store with unsupported io_unit */
6472 	dev = init_dev();
6473 	dev->blocklen = 512;
6474 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6475 
6476 	/* Initialize a new blob store */
6477 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6478 	poll_threads();
6479 	CU_ASSERT(g_bserrno == 0);
6480 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6481 	bs = g_bs;
6482 
6483 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6484 
6485 	/* Unload the blob store */
6486 	spdk_bs_unload(bs, bs_op_complete, NULL);
6487 	poll_threads();
6488 	CU_ASSERT(g_bserrno == 0);
6489 
6490 	/* Modify super block to behave like older version.
6491 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6492 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6493 	super->io_unit_size = 0;
6494 	super->crc = blob_md_page_calc_crc(super);
6495 
6496 	dev = init_dev();
6497 	dev->blocklen = 512;
6498 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6499 
6500 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6501 	poll_threads();
6502 	CU_ASSERT(g_bserrno == 0);
6503 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6504 	bs = g_bs;
6505 
6506 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6507 
6508 	/* Unload the blob store */
6509 	spdk_bs_unload(bs, bs_op_complete, NULL);
6510 	poll_threads();
6511 	CU_ASSERT(g_bserrno == 0);
6512 
6513 	g_bs = NULL;
6514 	g_blob = NULL;
6515 	g_blobid = 0;
6516 }
6517 
6518 static void
6519 first_sync_complete(void *cb_arg, int bserrno)
6520 {
6521 	struct spdk_blob *blob = cb_arg;
6522 	int rc;
6523 
6524 	CU_ASSERT(bserrno == 0);
6525 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
6526 	CU_ASSERT(rc == 0);
6527 	CU_ASSERT(g_bserrno == -1);
6528 
6529 	/* Keep g_bserrno at -1, only the
6530 	 * second sync completion should set it at 0. */
6531 }
6532 
6533 static void
6534 second_sync_complete(void *cb_arg, int bserrno)
6535 {
6536 	struct spdk_blob *blob = cb_arg;
6537 	const void *value;
6538 	size_t value_len;
6539 	int rc;
6540 
6541 	CU_ASSERT(bserrno == 0);
6542 
6543 	/* Verify that the first sync completion had a chance to execute */
6544 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
6545 	CU_ASSERT(rc == 0);
6546 	SPDK_CU_ASSERT_FATAL(value != NULL);
6547 	CU_ASSERT(value_len == strlen("second") + 1);
6548 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
6549 
6550 	CU_ASSERT(g_bserrno == -1);
6551 	g_bserrno = bserrno;
6552 }
6553 
6554 static void
6555 blob_simultaneous_operations(void)
6556 {
6557 	struct spdk_blob_store *bs = g_bs;
6558 	struct spdk_blob_opts opts;
6559 	struct spdk_blob *blob, *snapshot;
6560 	spdk_blob_id blobid, snapshotid;
6561 	struct spdk_io_channel *channel;
6562 	int rc;
6563 
6564 	channel = spdk_bs_alloc_io_channel(bs);
6565 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6566 
6567 	ut_spdk_blob_opts_init(&opts);
6568 	opts.num_clusters = 10;
6569 
6570 	blob = ut_blob_create_and_open(bs, &opts);
6571 	blobid = spdk_blob_get_id(blob);
6572 
6573 	/* Create snapshot and try to remove blob in the same time:
6574 	 * - snapshot should be created successfully
6575 	 * - delete operation should fail w -EBUSY */
6576 	CU_ASSERT(blob->locked_operation_in_progress == false);
6577 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6578 	CU_ASSERT(blob->locked_operation_in_progress == true);
6579 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6580 	CU_ASSERT(blob->locked_operation_in_progress == true);
6581 	/* Deletion failure */
6582 	CU_ASSERT(g_bserrno == -EBUSY);
6583 	poll_threads();
6584 	CU_ASSERT(blob->locked_operation_in_progress == false);
6585 	/* Snapshot creation success */
6586 	CU_ASSERT(g_bserrno == 0);
6587 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6588 
6589 	snapshotid = g_blobid;
6590 
6591 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6592 	poll_threads();
6593 	CU_ASSERT(g_bserrno == 0);
6594 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6595 	snapshot = g_blob;
6596 
6597 	/* Inflate blob and try to remove blob in the same time:
6598 	 * - blob should be inflated successfully
6599 	 * - delete operation should fail w -EBUSY */
6600 	CU_ASSERT(blob->locked_operation_in_progress == false);
6601 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6602 	CU_ASSERT(blob->locked_operation_in_progress == true);
6603 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6604 	CU_ASSERT(blob->locked_operation_in_progress == true);
6605 	/* Deletion failure */
6606 	CU_ASSERT(g_bserrno == -EBUSY);
6607 	poll_threads();
6608 	CU_ASSERT(blob->locked_operation_in_progress == false);
6609 	/* Inflation success */
6610 	CU_ASSERT(g_bserrno == 0);
6611 
6612 	/* Clone snapshot and try to remove snapshot in the same time:
6613 	 * - snapshot should be cloned successfully
6614 	 * - delete operation should fail w -EBUSY */
6615 	CU_ASSERT(blob->locked_operation_in_progress == false);
6616 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
6617 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6618 	/* Deletion failure */
6619 	CU_ASSERT(g_bserrno == -EBUSY);
6620 	poll_threads();
6621 	CU_ASSERT(blob->locked_operation_in_progress == false);
6622 	/* Clone created */
6623 	CU_ASSERT(g_bserrno == 0);
6624 
6625 	/* Resize blob and try to remove blob in the same time:
6626 	 * - blob should be resized successfully
6627 	 * - delete operation should fail w -EBUSY */
6628 	CU_ASSERT(blob->locked_operation_in_progress == false);
6629 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
6630 	CU_ASSERT(blob->locked_operation_in_progress == true);
6631 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6632 	CU_ASSERT(blob->locked_operation_in_progress == true);
6633 	/* Deletion failure */
6634 	CU_ASSERT(g_bserrno == -EBUSY);
6635 	poll_threads();
6636 	CU_ASSERT(blob->locked_operation_in_progress == false);
6637 	/* Blob resized successfully */
6638 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6639 	poll_threads();
6640 	CU_ASSERT(g_bserrno == 0);
6641 
6642 	/* Issue two consecutive blob syncs, neither should fail.
6643 	 * Force sync to actually occur by marking blob dirty each time.
6644 	 * Execution of sync should not be enough to complete the operation,
6645 	 * since disk I/O is required to complete it. */
6646 	g_bserrno = -1;
6647 
6648 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
6649 	CU_ASSERT(rc == 0);
6650 	spdk_blob_sync_md(blob, first_sync_complete, blob);
6651 	CU_ASSERT(g_bserrno == -1);
6652 
6653 	spdk_blob_sync_md(blob, second_sync_complete, blob);
6654 	CU_ASSERT(g_bserrno == -1);
6655 
6656 	poll_threads();
6657 	CU_ASSERT(g_bserrno == 0);
6658 
6659 	spdk_bs_free_io_channel(channel);
6660 	poll_threads();
6661 
6662 	ut_blob_close_and_delete(bs, snapshot);
6663 	ut_blob_close_and_delete(bs, blob);
6664 }
6665 
6666 static void
6667 blob_persist_test(void)
6668 {
6669 	struct spdk_blob_store *bs = g_bs;
6670 	struct spdk_blob_opts opts;
6671 	struct spdk_blob *blob;
6672 	spdk_blob_id blobid;
6673 	struct spdk_io_channel *channel;
6674 	char *xattr;
6675 	size_t xattr_length;
6676 	int rc;
6677 	uint32_t page_count_clear, page_count_xattr;
6678 	uint64_t poller_iterations;
6679 	bool run_poller;
6680 
6681 	channel = spdk_bs_alloc_io_channel(bs);
6682 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6683 
6684 	ut_spdk_blob_opts_init(&opts);
6685 	opts.num_clusters = 10;
6686 
6687 	blob = ut_blob_create_and_open(bs, &opts);
6688 	blobid = spdk_blob_get_id(blob);
6689 
6690 	/* Save the amount of md pages used after creation of a blob.
6691 	 * This should be consistent after removing xattr. */
6692 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
6693 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
6694 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
6695 
6696 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
6697 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
6698 		       strlen("large_xattr");
6699 	xattr = calloc(xattr_length, sizeof(char));
6700 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
6701 
6702 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6703 	SPDK_CU_ASSERT_FATAL(rc == 0);
6704 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6705 	poll_threads();
6706 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
6707 
6708 	/* Save the amount of md pages used after adding the large xattr */
6709 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
6710 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
6711 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
6712 
6713 	/* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again.
6714 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
6715 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
6716 	poller_iterations = 1;
6717 	run_poller = true;
6718 	while (run_poller) {
6719 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6720 		SPDK_CU_ASSERT_FATAL(rc == 0);
6721 		g_bserrno = -1;
6722 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6723 		poll_thread_times(0, poller_iterations);
6724 		if (g_bserrno == 0) {
6725 			/* Poller iteration count was high enough for first sync to complete.
6726 			 * Verify that blob takes up enough of md_pages to store the xattr. */
6727 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
6728 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
6729 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
6730 			run_poller = false;
6731 		}
6732 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
6733 		SPDK_CU_ASSERT_FATAL(rc == 0);
6734 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6735 		poll_threads();
6736 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
6737 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
6738 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
6739 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
6740 
6741 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
6742 		spdk_blob_close(blob, blob_op_complete, NULL);
6743 		poll_threads();
6744 		CU_ASSERT(g_bserrno == 0);
6745 
6746 		ut_bs_reload(&bs, NULL);
6747 
6748 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6749 		poll_threads();
6750 		CU_ASSERT(g_bserrno == 0);
6751 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6752 		blob = g_blob;
6753 
6754 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
6755 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
6756 
6757 		poller_iterations++;
6758 		/* Stop at high iteration count to prevent infinite loop.
6759 		 * This value should be enough for first md sync to complete in any case. */
6760 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
6761 	}
6762 
6763 	free(xattr);
6764 
6765 	ut_blob_close_and_delete(bs, blob);
6766 
6767 	spdk_bs_free_io_channel(channel);
6768 	poll_threads();
6769 }
6770 
6771 static void
6772 suite_bs_setup(void)
6773 {
6774 	struct spdk_bs_dev *dev;
6775 
6776 	dev = init_dev();
6777 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
6778 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6779 	poll_threads();
6780 	CU_ASSERT(g_bserrno == 0);
6781 	CU_ASSERT(g_bs != NULL);
6782 }
6783 
6784 static void
6785 suite_bs_cleanup(void)
6786 {
6787 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
6788 	poll_threads();
6789 	CU_ASSERT(g_bserrno == 0);
6790 	g_bs = NULL;
6791 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
6792 }
6793 
6794 static struct spdk_blob *
6795 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
6796 {
6797 	struct spdk_blob *blob;
6798 	struct spdk_blob_opts create_blob_opts;
6799 	spdk_blob_id blobid;
6800 
6801 	if (blob_opts == NULL) {
6802 		ut_spdk_blob_opts_init(&create_blob_opts);
6803 		blob_opts = &create_blob_opts;
6804 	}
6805 
6806 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
6807 	poll_threads();
6808 	CU_ASSERT(g_bserrno == 0);
6809 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6810 	blobid = g_blobid;
6811 	g_blobid = -1;
6812 
6813 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6814 	poll_threads();
6815 	CU_ASSERT(g_bserrno == 0);
6816 	CU_ASSERT(g_blob != NULL);
6817 	blob = g_blob;
6818 
6819 	g_blob = NULL;
6820 	g_bserrno = -1;
6821 
6822 	return blob;
6823 }
6824 
6825 static void
6826 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
6827 {
6828 	spdk_blob_id blobid = spdk_blob_get_id(blob);
6829 
6830 	spdk_blob_close(blob, blob_op_complete, NULL);
6831 	poll_threads();
6832 	CU_ASSERT(g_bserrno == 0);
6833 	g_blob = NULL;
6834 
6835 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6836 	poll_threads();
6837 	CU_ASSERT(g_bserrno == 0);
6838 	g_bserrno = -1;
6839 }
6840 
6841 static void
6842 suite_blob_setup(void)
6843 {
6844 	suite_bs_setup();
6845 	CU_ASSERT(g_bs != NULL);
6846 
6847 	g_blob = ut_blob_create_and_open(g_bs, NULL);
6848 	CU_ASSERT(g_blob != NULL);
6849 }
6850 
6851 static void
6852 suite_blob_cleanup(void)
6853 {
6854 	ut_blob_close_and_delete(g_bs, g_blob);
6855 	CU_ASSERT(g_blob == NULL);
6856 
6857 	suite_bs_cleanup();
6858 	CU_ASSERT(g_bs == NULL);
6859 }
6860 
6861 int main(int argc, char **argv)
6862 {
6863 	CU_pSuite	suite, suite_bs, suite_blob;
6864 	unsigned int	num_failures;
6865 
6866 	CU_set_error_action(CUEA_ABORT);
6867 	CU_initialize_registry();
6868 
6869 	suite = CU_add_suite("blob", NULL, NULL);
6870 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
6871 			suite_bs_setup, suite_bs_cleanup);
6872 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
6873 			suite_blob_setup, suite_blob_cleanup);
6874 
6875 	CU_ADD_TEST(suite, blob_init);
6876 	CU_ADD_TEST(suite_bs, blob_open);
6877 	CU_ADD_TEST(suite_bs, blob_create);
6878 	CU_ADD_TEST(suite_bs, blob_create_loop);
6879 	CU_ADD_TEST(suite_bs, blob_create_fail);
6880 	CU_ADD_TEST(suite_bs, blob_create_internal);
6881 	CU_ADD_TEST(suite, blob_thin_provision);
6882 	CU_ADD_TEST(suite_bs, blob_snapshot);
6883 	CU_ADD_TEST(suite_bs, blob_clone);
6884 	CU_ADD_TEST(suite_bs, blob_inflate);
6885 	CU_ADD_TEST(suite_bs, blob_delete);
6886 	CU_ADD_TEST(suite_bs, blob_resize_test);
6887 	CU_ADD_TEST(suite, blob_read_only);
6888 	CU_ADD_TEST(suite_bs, channel_ops);
6889 	CU_ADD_TEST(suite_bs, blob_super);
6890 	CU_ADD_TEST(suite_blob, blob_write);
6891 	CU_ADD_TEST(suite_blob, blob_read);
6892 	CU_ADD_TEST(suite_blob, blob_rw_verify);
6893 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
6894 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
6895 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
6896 	CU_ADD_TEST(suite_bs, blob_unmap);
6897 	CU_ADD_TEST(suite_bs, blob_iter);
6898 	CU_ADD_TEST(suite_blob, blob_xattr);
6899 	CU_ADD_TEST(suite_bs, blob_parse_md);
6900 	CU_ADD_TEST(suite, bs_load);
6901 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
6902 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
6903 	CU_ADD_TEST(suite_bs, bs_unload);
6904 	CU_ADD_TEST(suite, bs_cluster_sz);
6905 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
6906 	CU_ADD_TEST(suite, bs_resize_md);
6907 	CU_ADD_TEST(suite, bs_destroy);
6908 	CU_ADD_TEST(suite, bs_type);
6909 	CU_ADD_TEST(suite, bs_super_block);
6910 	CU_ADD_TEST(suite, blob_serialize_test);
6911 	CU_ADD_TEST(suite_bs, blob_crc);
6912 	CU_ADD_TEST(suite, super_block_crc);
6913 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
6914 	CU_ADD_TEST(suite_bs, blob_flags);
6915 	CU_ADD_TEST(suite_bs, bs_version);
6916 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
6917 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
6918 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
6919 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
6920 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
6921 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
6922 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
6923 	CU_ADD_TEST(suite, bs_load_iter_test);
6924 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
6925 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
6926 	CU_ADD_TEST(suite, blob_relations);
6927 	CU_ADD_TEST(suite, blob_relations2);
6928 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
6929 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
6930 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
6931 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
6932 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
6933 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
6934 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
6935 	CU_ADD_TEST(suite, blob_io_unit);
6936 	CU_ADD_TEST(suite, blob_io_unit_compatiblity);
6937 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
6938 	CU_ADD_TEST(suite_bs, blob_persist_test);
6939 
6940 	allocate_threads(2);
6941 	set_thread(0);
6942 
6943 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
6944 
6945 	CU_basic_set_mode(CU_BRM_VERBOSE);
6946 	g_use_extent_table = false;
6947 	CU_basic_run_tests();
6948 	num_failures = CU_get_number_of_failures();
6949 	g_use_extent_table = true;
6950 	CU_basic_run_tests();
6951 	num_failures += CU_get_number_of_failures();
6952 	CU_cleanup_registry();
6953 
6954 	free(g_dev_buffer);
6955 
6956 	free_threads();
6957 
6958 	return num_failures;
6959 }
6960