xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision f01146ae48b78dc5d62db2e0d8050eeabf369e34)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) Intel Corporation.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include "spdk/stdinc.h"
35 
36 #include "spdk_cunit.h"
37 #include "spdk/blob.h"
38 #include "spdk/string.h"
39 
40 #include "common/lib/ut_multithread.c"
41 #include "../bs_dev_common.c"
42 #include "blob/blobstore.c"
43 #include "blob/request.c"
44 #include "blob/zeroes.c"
45 #include "blob/blob_bs_dev.c"
46 
47 struct spdk_blob_store *g_bs;
48 spdk_blob_id g_blobid;
49 struct spdk_blob *g_blob, *g_blob2;
50 int g_bserrno, g_bserrno2;
51 struct spdk_xattr_names *g_names;
52 int g_done;
53 char *g_xattr_names[] = {"first", "second", "third"};
54 char *g_xattr_values[] = {"one", "two", "three"};
55 uint64_t g_ctx = 1729;
56 bool g_use_extent_table = false;
57 
58 struct spdk_bs_super_block_ver1 {
59 	uint8_t		signature[8];
60 	uint32_t        version;
61 	uint32_t        length;
62 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
63 	spdk_blob_id	super_blob;
64 
65 	uint32_t	cluster_size; /* In bytes */
66 
67 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
68 	uint32_t	used_page_mask_len; /* Count, in pages */
69 
70 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
71 	uint32_t	used_cluster_mask_len; /* Count, in pages */
72 
73 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
74 	uint32_t	md_len; /* Count, in pages */
75 
76 	uint8_t		reserved[4036];
77 	uint32_t	crc;
78 } __attribute__((packed));
79 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
80 
81 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
82 		struct spdk_blob_opts *blob_opts);
83 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
84 static void suite_blob_setup(void);
85 static void suite_blob_cleanup(void);
86 
87 static void
88 _get_xattr_value(void *arg, const char *name,
89 		 const void **value, size_t *value_len)
90 {
91 	uint64_t i;
92 
93 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
94 	SPDK_CU_ASSERT_FATAL(value != NULL);
95 	CU_ASSERT(arg == &g_ctx);
96 
97 	for (i = 0; i < sizeof(g_xattr_names); i++) {
98 		if (!strcmp(name, g_xattr_names[i])) {
99 			*value_len = strlen(g_xattr_values[i]);
100 			*value = g_xattr_values[i];
101 			break;
102 		}
103 	}
104 }
105 
106 static void
107 _get_xattr_value_null(void *arg, const char *name,
108 		      const void **value, size_t *value_len)
109 {
110 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
111 	SPDK_CU_ASSERT_FATAL(value != NULL);
112 	CU_ASSERT(arg == NULL);
113 
114 	*value_len = 0;
115 	*value = NULL;
116 }
117 
118 static int
119 _get_snapshots_count(struct spdk_blob_store *bs)
120 {
121 	struct spdk_blob_list *snapshot = NULL;
122 	int count = 0;
123 
124 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
125 		count += 1;
126 	}
127 
128 	return count;
129 }
130 
131 static void
132 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
133 {
134 	spdk_blob_opts_init(opts, sizeof(*opts));
135 	opts->use_extent_table = g_use_extent_table;
136 }
137 
138 static void
139 bs_op_complete(void *cb_arg, int bserrno)
140 {
141 	g_bserrno = bserrno;
142 }
143 
144 static void
145 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
146 			   int bserrno)
147 {
148 	g_bs = bs;
149 	g_bserrno = bserrno;
150 }
151 
152 static void
153 blob_op_complete(void *cb_arg, int bserrno)
154 {
155 	g_bserrno = bserrno;
156 }
157 
158 static void
159 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
160 {
161 	g_blobid = blobid;
162 	g_bserrno = bserrno;
163 }
164 
165 static void
166 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
167 {
168 	g_blob = blb;
169 	g_bserrno = bserrno;
170 }
171 
172 static void
173 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
174 {
175 	if (g_blob == NULL) {
176 		g_blob = blob;
177 		g_bserrno = bserrno;
178 	} else {
179 		g_blob2 = blob;
180 		g_bserrno2 = bserrno;
181 	}
182 }
183 
184 static void
185 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
186 {
187 	struct spdk_bs_dev *dev;
188 
189 	/* Unload the blob store */
190 	spdk_bs_unload(*bs, bs_op_complete, NULL);
191 	poll_threads();
192 	CU_ASSERT(g_bserrno == 0);
193 
194 	dev = init_dev();
195 	/* Load an existing blob store */
196 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
197 	poll_threads();
198 	CU_ASSERT(g_bserrno == 0);
199 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
200 	*bs = g_bs;
201 
202 	g_bserrno = -1;
203 }
204 
205 static void
206 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
207 {
208 	struct spdk_bs_dev *dev;
209 
210 	/* Dirty shutdown */
211 	bs_free(*bs);
212 
213 	dev = init_dev();
214 	/* Load an existing blob store */
215 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
216 	poll_threads();
217 	CU_ASSERT(g_bserrno == 0);
218 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
219 	*bs = g_bs;
220 
221 	g_bserrno = -1;
222 }
223 
224 static void
225 blob_init(void)
226 {
227 	struct spdk_blob_store *bs;
228 	struct spdk_bs_dev *dev;
229 
230 	dev = init_dev();
231 
232 	/* should fail for an unsupported blocklen */
233 	dev->blocklen = 500;
234 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
235 	poll_threads();
236 	CU_ASSERT(g_bserrno == -EINVAL);
237 
238 	dev = init_dev();
239 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
240 	poll_threads();
241 	CU_ASSERT(g_bserrno == 0);
242 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
243 	bs = g_bs;
244 
245 	spdk_bs_unload(bs, bs_op_complete, NULL);
246 	poll_threads();
247 	CU_ASSERT(g_bserrno == 0);
248 	g_bs = NULL;
249 }
250 
251 static void
252 blob_super(void)
253 {
254 	struct spdk_blob_store *bs = g_bs;
255 	spdk_blob_id blobid;
256 	struct spdk_blob_opts blob_opts;
257 
258 	/* Get the super blob without having set one */
259 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
260 	poll_threads();
261 	CU_ASSERT(g_bserrno == -ENOENT);
262 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
263 
264 	/* Create a blob */
265 	ut_spdk_blob_opts_init(&blob_opts);
266 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
267 	poll_threads();
268 	CU_ASSERT(g_bserrno == 0);
269 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
270 	blobid = g_blobid;
271 
272 	/* Set the blob as the super blob */
273 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
274 	poll_threads();
275 	CU_ASSERT(g_bserrno == 0);
276 
277 	/* Get the super blob */
278 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
279 	poll_threads();
280 	CU_ASSERT(g_bserrno == 0);
281 	CU_ASSERT(blobid == g_blobid);
282 }
283 
284 static void
285 blob_open(void)
286 {
287 	struct spdk_blob_store *bs = g_bs;
288 	struct spdk_blob *blob;
289 	struct spdk_blob_opts blob_opts;
290 	spdk_blob_id blobid, blobid2;
291 
292 	ut_spdk_blob_opts_init(&blob_opts);
293 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
294 	poll_threads();
295 	CU_ASSERT(g_bserrno == 0);
296 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
297 	blobid = g_blobid;
298 
299 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
300 	poll_threads();
301 	CU_ASSERT(g_bserrno == 0);
302 	CU_ASSERT(g_blob != NULL);
303 	blob = g_blob;
304 
305 	blobid2 = spdk_blob_get_id(blob);
306 	CU_ASSERT(blobid == blobid2);
307 
308 	/* Try to open file again.  It should return success. */
309 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
310 	poll_threads();
311 	CU_ASSERT(g_bserrno == 0);
312 	CU_ASSERT(blob == g_blob);
313 
314 	spdk_blob_close(blob, blob_op_complete, NULL);
315 	poll_threads();
316 	CU_ASSERT(g_bserrno == 0);
317 
318 	/*
319 	 * Close the file a second time, releasing the second reference.  This
320 	 *  should succeed.
321 	 */
322 	blob = g_blob;
323 	spdk_blob_close(blob, blob_op_complete, NULL);
324 	poll_threads();
325 	CU_ASSERT(g_bserrno == 0);
326 
327 	/*
328 	 * Try to open file again.  It should succeed.  This tests the case
329 	 *  where the file is opened, closed, then re-opened again.
330 	 */
331 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
332 	poll_threads();
333 	CU_ASSERT(g_bserrno == 0);
334 	CU_ASSERT(g_blob != NULL);
335 	blob = g_blob;
336 	spdk_blob_close(blob, blob_op_complete, NULL);
337 	poll_threads();
338 	CU_ASSERT(g_bserrno == 0);
339 
340 	/* Try to open file twice in succession.  This should return the same
341 	 * blob object.
342 	 */
343 	g_blob = NULL;
344 	g_blob2 = NULL;
345 	g_bserrno = -1;
346 	g_bserrno2 = -1;
347 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
348 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
349 	poll_threads();
350 	CU_ASSERT(g_bserrno == 0);
351 	CU_ASSERT(g_bserrno2 == 0);
352 	CU_ASSERT(g_blob != NULL);
353 	CU_ASSERT(g_blob2 != NULL);
354 	CU_ASSERT(g_blob == g_blob2);
355 
356 	g_bserrno = -1;
357 	spdk_blob_close(g_blob, blob_op_complete, NULL);
358 	poll_threads();
359 	CU_ASSERT(g_bserrno == 0);
360 
361 	ut_blob_close_and_delete(bs, g_blob);
362 }
363 
364 static void
365 blob_create(void)
366 {
367 	struct spdk_blob_store *bs = g_bs;
368 	struct spdk_blob *blob;
369 	struct spdk_blob_opts opts;
370 	spdk_blob_id blobid;
371 
372 	/* Create blob with 10 clusters */
373 
374 	ut_spdk_blob_opts_init(&opts);
375 	opts.num_clusters = 10;
376 
377 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
381 	blobid = g_blobid;
382 
383 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
384 	poll_threads();
385 	CU_ASSERT(g_bserrno == 0);
386 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
387 	blob = g_blob;
388 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
389 
390 	spdk_blob_close(blob, blob_op_complete, NULL);
391 	poll_threads();
392 	CU_ASSERT(g_bserrno == 0);
393 
394 	/* Create blob with 0 clusters */
395 
396 	ut_spdk_blob_opts_init(&opts);
397 	opts.num_clusters = 0;
398 
399 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
400 	poll_threads();
401 	CU_ASSERT(g_bserrno == 0);
402 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
403 	blobid = g_blobid;
404 
405 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
406 	poll_threads();
407 	CU_ASSERT(g_bserrno == 0);
408 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
409 	blob = g_blob;
410 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
411 
412 	spdk_blob_close(blob, blob_op_complete, NULL);
413 	poll_threads();
414 	CU_ASSERT(g_bserrno == 0);
415 
416 	/* Create blob with default options (opts == NULL) */
417 
418 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
419 	poll_threads();
420 	CU_ASSERT(g_bserrno == 0);
421 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
422 	blobid = g_blobid;
423 
424 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
425 	poll_threads();
426 	CU_ASSERT(g_bserrno == 0);
427 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
428 	blob = g_blob;
429 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
430 
431 	spdk_blob_close(blob, blob_op_complete, NULL);
432 	poll_threads();
433 	CU_ASSERT(g_bserrno == 0);
434 
435 	/* Try to create blob with size larger than blobstore */
436 
437 	ut_spdk_blob_opts_init(&opts);
438 	opts.num_clusters = bs->total_clusters + 1;
439 
440 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
441 	poll_threads();
442 	CU_ASSERT(g_bserrno == -ENOSPC);
443 }
444 
445 /*
446  * Create and delete one blob in a loop over and over again.  This helps ensure
447  * that the internal bit masks tracking used clusters and md_pages are being
448  * tracked correctly.
449  */
450 static void
451 blob_create_loop(void)
452 {
453 	struct spdk_blob_store *bs = g_bs;
454 	struct spdk_blob_opts opts;
455 	uint32_t i, loop_count;
456 
457 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
458 				  spdk_bit_pool_capacity(bs->used_clusters));
459 
460 	for (i = 0; i < loop_count; i++) {
461 		ut_spdk_blob_opts_init(&opts);
462 		opts.num_clusters = 1;
463 		g_bserrno = -1;
464 		g_blobid = SPDK_BLOBID_INVALID;
465 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
466 		poll_threads();
467 		CU_ASSERT(g_bserrno == 0);
468 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
469 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
470 		poll_threads();
471 		CU_ASSERT(g_bserrno == 0);
472 	}
473 }
474 
475 static void
476 blob_create_fail(void)
477 {
478 	struct spdk_blob_store *bs = g_bs;
479 	struct spdk_blob_opts opts;
480 	spdk_blob_id blobid;
481 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
482 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
483 
484 	/* NULL callback */
485 	ut_spdk_blob_opts_init(&opts);
486 	opts.xattrs.names = g_xattr_names;
487 	opts.xattrs.get_value = NULL;
488 	opts.xattrs.count = 1;
489 	opts.xattrs.ctx = &g_ctx;
490 
491 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
492 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
493 	poll_threads();
494 	CU_ASSERT(g_bserrno == -EINVAL);
495 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
496 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
497 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
498 
499 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
500 	poll_threads();
501 	CU_ASSERT(g_bserrno == -ENOENT);
502 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
503 
504 	ut_bs_reload(&bs, NULL);
505 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
506 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
507 
508 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
509 	poll_threads();
510 	CU_ASSERT(g_blob == NULL);
511 	CU_ASSERT(g_bserrno == -ENOENT);
512 }
513 
514 static void
515 blob_create_internal(void)
516 {
517 	struct spdk_blob_store *bs = g_bs;
518 	struct spdk_blob *blob;
519 	struct spdk_blob_opts opts;
520 	struct spdk_blob_xattr_opts internal_xattrs;
521 	const void *value;
522 	size_t value_len;
523 	spdk_blob_id blobid;
524 	int rc;
525 
526 	/* Create blob with custom xattrs */
527 
528 	ut_spdk_blob_opts_init(&opts);
529 	blob_xattrs_init(&internal_xattrs);
530 	internal_xattrs.count = 3;
531 	internal_xattrs.names = g_xattr_names;
532 	internal_xattrs.get_value = _get_xattr_value;
533 	internal_xattrs.ctx = &g_ctx;
534 
535 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
536 	poll_threads();
537 	CU_ASSERT(g_bserrno == 0);
538 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
539 	blobid = g_blobid;
540 
541 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
542 	poll_threads();
543 	CU_ASSERT(g_bserrno == 0);
544 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
545 	blob = g_blob;
546 
547 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
548 	CU_ASSERT(rc == 0);
549 	SPDK_CU_ASSERT_FATAL(value != NULL);
550 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
551 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
552 
553 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
554 	CU_ASSERT(rc == 0);
555 	SPDK_CU_ASSERT_FATAL(value != NULL);
556 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
557 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
558 
559 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
560 	CU_ASSERT(rc == 0);
561 	SPDK_CU_ASSERT_FATAL(value != NULL);
562 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
563 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
564 
565 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
566 	CU_ASSERT(rc != 0);
567 
568 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
569 	CU_ASSERT(rc != 0);
570 
571 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
572 	CU_ASSERT(rc != 0);
573 
574 	spdk_blob_close(blob, blob_op_complete, NULL);
575 	poll_threads();
576 	CU_ASSERT(g_bserrno == 0);
577 
578 	/* Create blob with NULL internal options  */
579 
580 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
581 	poll_threads();
582 	CU_ASSERT(g_bserrno == 0);
583 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
584 	blobid = g_blobid;
585 
586 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
587 	poll_threads();
588 	CU_ASSERT(g_bserrno == 0);
589 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
590 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
591 
592 	blob = g_blob;
593 
594 	spdk_blob_close(blob, blob_op_complete, NULL);
595 	poll_threads();
596 	CU_ASSERT(g_bserrno == 0);
597 }
598 
599 static void
600 blob_thin_provision(void)
601 {
602 	struct spdk_blob_store *bs;
603 	struct spdk_bs_dev *dev;
604 	struct spdk_blob *blob;
605 	struct spdk_blob_opts opts;
606 	struct spdk_bs_opts bs_opts;
607 	spdk_blob_id blobid;
608 
609 	dev = init_dev();
610 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
611 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
612 
613 	/* Initialize a new blob store */
614 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
615 	poll_threads();
616 	CU_ASSERT(g_bserrno == 0);
617 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
618 
619 	bs = g_bs;
620 
621 	/* Create blob with thin provisioning enabled */
622 
623 	ut_spdk_blob_opts_init(&opts);
624 	opts.thin_provision = true;
625 	opts.num_clusters = 10;
626 
627 	blob = ut_blob_create_and_open(bs, &opts);
628 	blobid = spdk_blob_get_id(blob);
629 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
630 
631 	spdk_blob_close(blob, blob_op_complete, NULL);
632 	CU_ASSERT(g_bserrno == 0);
633 
634 	/* Do not shut down cleanly.  This makes sure that when we load again
635 	 *  and try to recover a valid used_cluster map, that blobstore will
636 	 *  ignore clusters with index 0 since these are unallocated clusters.
637 	 */
638 	ut_bs_dirty_load(&bs, &bs_opts);
639 
640 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
641 	poll_threads();
642 	CU_ASSERT(g_bserrno == 0);
643 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
644 	blob = g_blob;
645 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
646 
647 	ut_blob_close_and_delete(bs, blob);
648 
649 	spdk_bs_unload(bs, bs_op_complete, NULL);
650 	poll_threads();
651 	CU_ASSERT(g_bserrno == 0);
652 	g_bs = NULL;
653 }
654 
655 static void
656 blob_snapshot(void)
657 {
658 	struct spdk_blob_store *bs = g_bs;
659 	struct spdk_blob *blob;
660 	struct spdk_blob *snapshot, *snapshot2;
661 	struct spdk_blob_bs_dev *blob_bs_dev;
662 	struct spdk_blob_opts opts;
663 	struct spdk_blob_xattr_opts xattrs;
664 	spdk_blob_id blobid;
665 	spdk_blob_id snapshotid;
666 	spdk_blob_id snapshotid2;
667 	const void *value;
668 	size_t value_len;
669 	int rc;
670 	spdk_blob_id ids[2];
671 	size_t count;
672 
673 	/* Create blob with 10 clusters */
674 	ut_spdk_blob_opts_init(&opts);
675 	opts.num_clusters = 10;
676 
677 	blob = ut_blob_create_and_open(bs, &opts);
678 	blobid = spdk_blob_get_id(blob);
679 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
680 
681 	/* Create snapshot from blob */
682 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
683 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
684 	poll_threads();
685 	CU_ASSERT(g_bserrno == 0);
686 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
687 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
688 	snapshotid = g_blobid;
689 
690 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
691 	poll_threads();
692 	CU_ASSERT(g_bserrno == 0);
693 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
694 	snapshot = g_blob;
695 	CU_ASSERT(snapshot->data_ro == true);
696 	CU_ASSERT(snapshot->md_ro == true);
697 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
698 
699 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
700 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
701 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
702 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
703 
704 	/* Try to create snapshot from clone with xattrs */
705 	xattrs.names = g_xattr_names;
706 	xattrs.get_value = _get_xattr_value;
707 	xattrs.count = 3;
708 	xattrs.ctx = &g_ctx;
709 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
710 	poll_threads();
711 	CU_ASSERT(g_bserrno == 0);
712 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
713 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
714 	snapshotid2 = g_blobid;
715 
716 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
717 	CU_ASSERT(g_bserrno == 0);
718 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
719 	snapshot2 = g_blob;
720 	CU_ASSERT(snapshot2->data_ro == true);
721 	CU_ASSERT(snapshot2->md_ro == true);
722 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
723 
724 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
725 	CU_ASSERT(snapshot->back_bs_dev == NULL);
726 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
727 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
728 
729 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
730 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
731 
732 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
733 	CU_ASSERT(blob_bs_dev->blob == snapshot);
734 
735 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
736 	CU_ASSERT(rc == 0);
737 	SPDK_CU_ASSERT_FATAL(value != NULL);
738 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
739 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
740 
741 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
742 	CU_ASSERT(rc == 0);
743 	SPDK_CU_ASSERT_FATAL(value != NULL);
744 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
745 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
746 
747 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
748 	CU_ASSERT(rc == 0);
749 	SPDK_CU_ASSERT_FATAL(value != NULL);
750 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
751 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
752 
753 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
754 	count = 2;
755 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
756 	CU_ASSERT(count == 1);
757 	CU_ASSERT(ids[0] == blobid);
758 
759 	count = 2;
760 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
761 	CU_ASSERT(count == 1);
762 	CU_ASSERT(ids[0] == snapshotid2);
763 
764 	/* Try to create snapshot from snapshot */
765 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
766 	poll_threads();
767 	CU_ASSERT(g_bserrno == -EINVAL);
768 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
769 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
770 
771 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
772 	ut_blob_close_and_delete(bs, blob);
773 	count = 2;
774 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
775 	CU_ASSERT(count == 0);
776 
777 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
778 	ut_blob_close_and_delete(bs, snapshot2);
779 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
780 	count = 2;
781 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
782 	CU_ASSERT(count == 0);
783 
784 	ut_blob_close_and_delete(bs, snapshot);
785 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
786 }
787 
788 static void
789 blob_snapshot_freeze_io(void)
790 {
791 	struct spdk_io_channel *channel;
792 	struct spdk_bs_channel *bs_channel;
793 	struct spdk_blob_store *bs = g_bs;
794 	struct spdk_blob *blob;
795 	struct spdk_blob_opts opts;
796 	spdk_blob_id blobid;
797 	uint32_t num_of_pages = 10;
798 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
799 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
800 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
801 
802 	memset(payload_write, 0xE5, sizeof(payload_write));
803 	memset(payload_read, 0x00, sizeof(payload_read));
804 	memset(payload_zero, 0x00, sizeof(payload_zero));
805 
806 	/* Test freeze I/O during snapshot */
807 	channel = spdk_bs_alloc_io_channel(bs);
808 	bs_channel = spdk_io_channel_get_ctx(channel);
809 
810 	/* Create blob with 10 clusters */
811 	ut_spdk_blob_opts_init(&opts);
812 	opts.num_clusters = 10;
813 	opts.thin_provision = false;
814 
815 	blob = ut_blob_create_and_open(bs, &opts);
816 	blobid = spdk_blob_get_id(blob);
817 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
818 
819 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
820 
821 	/* This is implementation specific.
822 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
823 	 * Four async I/O operations happen before that. */
824 	poll_thread_times(0, 5);
825 
826 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
827 
828 	/* Blob I/O should be frozen here */
829 	CU_ASSERT(blob->frozen_refcnt == 1);
830 
831 	/* Write to the blob */
832 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
833 
834 	/* Verify that I/O is queued */
835 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
836 	/* Verify that payload is not written to disk, at this point the blobs already switched */
837 	CU_ASSERT(blob->active.clusters[0] == 0);
838 
839 	/* Finish all operations including spdk_bs_create_snapshot */
840 	poll_threads();
841 
842 	/* Verify snapshot */
843 	CU_ASSERT(g_bserrno == 0);
844 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
845 
846 	/* Verify that blob has unset frozen_io */
847 	CU_ASSERT(blob->frozen_refcnt == 0);
848 
849 	/* Verify that postponed I/O completed successfully by comparing payload */
850 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
851 	poll_threads();
852 	CU_ASSERT(g_bserrno == 0);
853 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
854 
855 	spdk_bs_free_io_channel(channel);
856 	poll_threads();
857 
858 	ut_blob_close_and_delete(bs, blob);
859 }
860 
861 static void
862 blob_clone(void)
863 {
864 	struct spdk_blob_store *bs = g_bs;
865 	struct spdk_blob_opts opts;
866 	struct spdk_blob *blob, *snapshot, *clone;
867 	spdk_blob_id blobid, cloneid, snapshotid;
868 	struct spdk_blob_xattr_opts xattrs;
869 	const void *value;
870 	size_t value_len;
871 	int rc;
872 
873 	/* Create blob with 10 clusters */
874 
875 	ut_spdk_blob_opts_init(&opts);
876 	opts.num_clusters = 10;
877 
878 	blob = ut_blob_create_and_open(bs, &opts);
879 	blobid = spdk_blob_get_id(blob);
880 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
881 
882 	/* Create snapshot */
883 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
884 	poll_threads();
885 	CU_ASSERT(g_bserrno == 0);
886 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
887 	snapshotid = g_blobid;
888 
889 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
890 	poll_threads();
891 	CU_ASSERT(g_bserrno == 0);
892 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
893 	snapshot = g_blob;
894 	CU_ASSERT(snapshot->data_ro == true);
895 	CU_ASSERT(snapshot->md_ro == true);
896 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
897 
898 	spdk_blob_close(snapshot, blob_op_complete, NULL);
899 	poll_threads();
900 	CU_ASSERT(g_bserrno == 0);
901 
902 	/* Create clone from snapshot with xattrs */
903 	xattrs.names = g_xattr_names;
904 	xattrs.get_value = _get_xattr_value;
905 	xattrs.count = 3;
906 	xattrs.ctx = &g_ctx;
907 
908 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
909 	poll_threads();
910 	CU_ASSERT(g_bserrno == 0);
911 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
912 	cloneid = g_blobid;
913 
914 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
915 	poll_threads();
916 	CU_ASSERT(g_bserrno == 0);
917 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
918 	clone = g_blob;
919 	CU_ASSERT(clone->data_ro == false);
920 	CU_ASSERT(clone->md_ro == false);
921 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
922 
923 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
924 	CU_ASSERT(rc == 0);
925 	SPDK_CU_ASSERT_FATAL(value != NULL);
926 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
927 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
928 
929 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
930 	CU_ASSERT(rc == 0);
931 	SPDK_CU_ASSERT_FATAL(value != NULL);
932 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
933 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
934 
935 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
936 	CU_ASSERT(rc == 0);
937 	SPDK_CU_ASSERT_FATAL(value != NULL);
938 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
939 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
940 
941 
942 	spdk_blob_close(clone, blob_op_complete, NULL);
943 	poll_threads();
944 	CU_ASSERT(g_bserrno == 0);
945 
946 	/* Try to create clone from not read only blob */
947 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
948 	poll_threads();
949 	CU_ASSERT(g_bserrno == -EINVAL);
950 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
951 
952 	/* Mark blob as read only */
953 	spdk_blob_set_read_only(blob);
954 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
955 	poll_threads();
956 	CU_ASSERT(g_bserrno == 0);
957 
958 	/* Create clone from read only blob */
959 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
960 	poll_threads();
961 	CU_ASSERT(g_bserrno == 0);
962 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
963 	cloneid = g_blobid;
964 
965 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
966 	poll_threads();
967 	CU_ASSERT(g_bserrno == 0);
968 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
969 	clone = g_blob;
970 	CU_ASSERT(clone->data_ro == false);
971 	CU_ASSERT(clone->md_ro == false);
972 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
973 
974 	ut_blob_close_and_delete(bs, clone);
975 	ut_blob_close_and_delete(bs, blob);
976 }
977 
978 static void
979 _blob_inflate(bool decouple_parent)
980 {
981 	struct spdk_blob_store *bs = g_bs;
982 	struct spdk_blob_opts opts;
983 	struct spdk_blob *blob, *snapshot;
984 	spdk_blob_id blobid, snapshotid;
985 	struct spdk_io_channel *channel;
986 	uint64_t free_clusters;
987 
988 	channel = spdk_bs_alloc_io_channel(bs);
989 	SPDK_CU_ASSERT_FATAL(channel != NULL);
990 
991 	/* Create blob with 10 clusters */
992 
993 	ut_spdk_blob_opts_init(&opts);
994 	opts.num_clusters = 10;
995 	opts.thin_provision = true;
996 
997 	blob = ut_blob_create_and_open(bs, &opts);
998 	blobid = spdk_blob_get_id(blob);
999 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1000 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1001 
1002 	/* 1) Blob with no parent */
1003 	if (decouple_parent) {
1004 		/* Decouple parent of blob with no parent (should fail) */
1005 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1006 		poll_threads();
1007 		CU_ASSERT(g_bserrno != 0);
1008 	} else {
1009 		/* Inflate of thin blob with no parent should made it thick */
1010 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1011 		poll_threads();
1012 		CU_ASSERT(g_bserrno == 0);
1013 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1014 	}
1015 
1016 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1017 	poll_threads();
1018 	CU_ASSERT(g_bserrno == 0);
1019 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1020 	snapshotid = g_blobid;
1021 
1022 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1023 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1024 
1025 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1026 	poll_threads();
1027 	CU_ASSERT(g_bserrno == 0);
1028 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1029 	snapshot = g_blob;
1030 	CU_ASSERT(snapshot->data_ro == true);
1031 	CU_ASSERT(snapshot->md_ro == true);
1032 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1033 
1034 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1035 	poll_threads();
1036 	CU_ASSERT(g_bserrno == 0);
1037 
1038 	free_clusters = spdk_bs_free_cluster_count(bs);
1039 
1040 	/* 2) Blob with parent */
1041 	if (!decouple_parent) {
1042 		/* Do full blob inflation */
1043 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1044 		poll_threads();
1045 		CU_ASSERT(g_bserrno == 0);
1046 		/* all 10 clusters should be allocated */
1047 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1048 	} else {
1049 		/* Decouple parent of blob */
1050 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1051 		poll_threads();
1052 		CU_ASSERT(g_bserrno == 0);
1053 		/* when only parent is removed, none of the clusters should be allocated */
1054 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1055 	}
1056 
1057 	/* Now, it should be possible to delete snapshot */
1058 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1059 	poll_threads();
1060 	CU_ASSERT(g_bserrno == 0);
1061 
1062 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1063 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1064 
1065 	spdk_bs_free_io_channel(channel);
1066 	poll_threads();
1067 
1068 	ut_blob_close_and_delete(bs, blob);
1069 }
1070 
1071 static void
1072 blob_inflate(void)
1073 {
1074 	_blob_inflate(false);
1075 	_blob_inflate(true);
1076 }
1077 
1078 static void
1079 blob_delete(void)
1080 {
1081 	struct spdk_blob_store *bs = g_bs;
1082 	struct spdk_blob_opts blob_opts;
1083 	spdk_blob_id blobid;
1084 
1085 	/* Create a blob and then delete it. */
1086 	ut_spdk_blob_opts_init(&blob_opts);
1087 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1088 	poll_threads();
1089 	CU_ASSERT(g_bserrno == 0);
1090 	CU_ASSERT(g_blobid > 0);
1091 	blobid = g_blobid;
1092 
1093 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1094 	poll_threads();
1095 	CU_ASSERT(g_bserrno == 0);
1096 
1097 	/* Try to open the blob */
1098 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1099 	poll_threads();
1100 	CU_ASSERT(g_bserrno == -ENOENT);
1101 }
1102 
1103 static void
1104 blob_resize_test(void)
1105 {
1106 	struct spdk_blob_store *bs = g_bs;
1107 	struct spdk_blob *blob;
1108 	uint64_t free_clusters;
1109 
1110 	free_clusters = spdk_bs_free_cluster_count(bs);
1111 
1112 	blob = ut_blob_create_and_open(bs, NULL);
1113 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1114 
1115 	/* Confirm that resize fails if blob is marked read-only. */
1116 	blob->md_ro = true;
1117 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1118 	poll_threads();
1119 	CU_ASSERT(g_bserrno == -EPERM);
1120 	blob->md_ro = false;
1121 
1122 	/* The blob started at 0 clusters. Resize it to be 5. */
1123 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1124 	poll_threads();
1125 	CU_ASSERT(g_bserrno == 0);
1126 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1127 
1128 	/* Shrink the blob to 3 clusters. This will not actually release
1129 	 * the old clusters until the blob is synced.
1130 	 */
1131 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1132 	poll_threads();
1133 	CU_ASSERT(g_bserrno == 0);
1134 	/* Verify there are still 5 clusters in use */
1135 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1136 
1137 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1138 	poll_threads();
1139 	CU_ASSERT(g_bserrno == 0);
1140 	/* Now there are only 3 clusters in use */
1141 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1142 
1143 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1144 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1145 	poll_threads();
1146 	CU_ASSERT(g_bserrno == 0);
1147 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1148 
1149 	/* Try to resize the blob to size larger than blobstore. */
1150 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1151 	poll_threads();
1152 	CU_ASSERT(g_bserrno == -ENOSPC);
1153 
1154 	ut_blob_close_and_delete(bs, blob);
1155 }
1156 
1157 static void
1158 blob_read_only(void)
1159 {
1160 	struct spdk_blob_store *bs;
1161 	struct spdk_bs_dev *dev;
1162 	struct spdk_blob *blob;
1163 	struct spdk_bs_opts opts;
1164 	spdk_blob_id blobid;
1165 	int rc;
1166 
1167 	dev = init_dev();
1168 	spdk_bs_opts_init(&opts, sizeof(opts));
1169 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1170 
1171 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1172 	poll_threads();
1173 	CU_ASSERT(g_bserrno == 0);
1174 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1175 	bs = g_bs;
1176 
1177 	blob = ut_blob_create_and_open(bs, NULL);
1178 	blobid = spdk_blob_get_id(blob);
1179 
1180 	rc = spdk_blob_set_read_only(blob);
1181 	CU_ASSERT(rc == 0);
1182 
1183 	CU_ASSERT(blob->data_ro == false);
1184 	CU_ASSERT(blob->md_ro == false);
1185 
1186 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1187 	poll_threads();
1188 
1189 	CU_ASSERT(blob->data_ro == true);
1190 	CU_ASSERT(blob->md_ro == true);
1191 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1192 
1193 	spdk_blob_close(blob, blob_op_complete, NULL);
1194 	poll_threads();
1195 	CU_ASSERT(g_bserrno == 0);
1196 
1197 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1198 	poll_threads();
1199 	CU_ASSERT(g_bserrno == 0);
1200 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1201 	blob = g_blob;
1202 
1203 	CU_ASSERT(blob->data_ro == true);
1204 	CU_ASSERT(blob->md_ro == true);
1205 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1206 
1207 	spdk_blob_close(blob, blob_op_complete, NULL);
1208 	poll_threads();
1209 	CU_ASSERT(g_bserrno == 0);
1210 
1211 	ut_bs_reload(&bs, &opts);
1212 
1213 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1214 	poll_threads();
1215 	CU_ASSERT(g_bserrno == 0);
1216 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1217 	blob = g_blob;
1218 
1219 	CU_ASSERT(blob->data_ro == true);
1220 	CU_ASSERT(blob->md_ro == true);
1221 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1222 
1223 	ut_blob_close_and_delete(bs, blob);
1224 
1225 	spdk_bs_unload(bs, bs_op_complete, NULL);
1226 	poll_threads();
1227 	CU_ASSERT(g_bserrno == 0);
1228 }
1229 
1230 static void
1231 channel_ops(void)
1232 {
1233 	struct spdk_blob_store *bs = g_bs;
1234 	struct spdk_io_channel *channel;
1235 
1236 	channel = spdk_bs_alloc_io_channel(bs);
1237 	CU_ASSERT(channel != NULL);
1238 
1239 	spdk_bs_free_io_channel(channel);
1240 	poll_threads();
1241 }
1242 
1243 static void
1244 blob_write(void)
1245 {
1246 	struct spdk_blob_store *bs = g_bs;
1247 	struct spdk_blob *blob = g_blob;
1248 	struct spdk_io_channel *channel;
1249 	uint64_t pages_per_cluster;
1250 	uint8_t payload[10 * 4096];
1251 
1252 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1253 
1254 	channel = spdk_bs_alloc_io_channel(bs);
1255 	CU_ASSERT(channel != NULL);
1256 
1257 	/* Write to a blob with 0 size */
1258 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1259 	poll_threads();
1260 	CU_ASSERT(g_bserrno == -EINVAL);
1261 
1262 	/* Resize the blob */
1263 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1264 	poll_threads();
1265 	CU_ASSERT(g_bserrno == 0);
1266 
1267 	/* Confirm that write fails if blob is marked read-only. */
1268 	blob->data_ro = true;
1269 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1270 	poll_threads();
1271 	CU_ASSERT(g_bserrno == -EPERM);
1272 	blob->data_ro = false;
1273 
1274 	/* Write to the blob */
1275 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1276 	poll_threads();
1277 	CU_ASSERT(g_bserrno == 0);
1278 
1279 	/* Write starting beyond the end */
1280 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1281 			   NULL);
1282 	poll_threads();
1283 	CU_ASSERT(g_bserrno == -EINVAL);
1284 
1285 	/* Write starting at a valid location but going off the end */
1286 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1287 			   blob_op_complete, NULL);
1288 	poll_threads();
1289 	CU_ASSERT(g_bserrno == -EINVAL);
1290 
1291 	spdk_bs_free_io_channel(channel);
1292 	poll_threads();
1293 }
1294 
1295 static void
1296 blob_read(void)
1297 {
1298 	struct spdk_blob_store *bs = g_bs;
1299 	struct spdk_blob *blob = g_blob;
1300 	struct spdk_io_channel *channel;
1301 	uint64_t pages_per_cluster;
1302 	uint8_t payload[10 * 4096];
1303 
1304 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1305 
1306 	channel = spdk_bs_alloc_io_channel(bs);
1307 	CU_ASSERT(channel != NULL);
1308 
1309 	/* Read from a blob with 0 size */
1310 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1311 	poll_threads();
1312 	CU_ASSERT(g_bserrno == -EINVAL);
1313 
1314 	/* Resize the blob */
1315 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1316 	poll_threads();
1317 	CU_ASSERT(g_bserrno == 0);
1318 
1319 	/* Confirm that read passes if blob is marked read-only. */
1320 	blob->data_ro = true;
1321 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1322 	poll_threads();
1323 	CU_ASSERT(g_bserrno == 0);
1324 	blob->data_ro = false;
1325 
1326 	/* Read from the blob */
1327 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1328 	poll_threads();
1329 	CU_ASSERT(g_bserrno == 0);
1330 
1331 	/* Read starting beyond the end */
1332 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1333 			  NULL);
1334 	poll_threads();
1335 	CU_ASSERT(g_bserrno == -EINVAL);
1336 
1337 	/* Read starting at a valid location but going off the end */
1338 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1339 			  blob_op_complete, NULL);
1340 	poll_threads();
1341 	CU_ASSERT(g_bserrno == -EINVAL);
1342 
1343 	spdk_bs_free_io_channel(channel);
1344 	poll_threads();
1345 }
1346 
1347 static void
1348 blob_rw_verify(void)
1349 {
1350 	struct spdk_blob_store *bs = g_bs;
1351 	struct spdk_blob *blob = g_blob;
1352 	struct spdk_io_channel *channel;
1353 	uint8_t payload_read[10 * 4096];
1354 	uint8_t payload_write[10 * 4096];
1355 
1356 	channel = spdk_bs_alloc_io_channel(bs);
1357 	CU_ASSERT(channel != NULL);
1358 
1359 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1360 	poll_threads();
1361 	CU_ASSERT(g_bserrno == 0);
1362 
1363 	memset(payload_write, 0xE5, sizeof(payload_write));
1364 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1365 	poll_threads();
1366 	CU_ASSERT(g_bserrno == 0);
1367 
1368 	memset(payload_read, 0x00, sizeof(payload_read));
1369 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1370 	poll_threads();
1371 	CU_ASSERT(g_bserrno == 0);
1372 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1373 
1374 	spdk_bs_free_io_channel(channel);
1375 	poll_threads();
1376 }
1377 
1378 static void
1379 blob_rw_verify_iov(void)
1380 {
1381 	struct spdk_blob_store *bs = g_bs;
1382 	struct spdk_blob *blob;
1383 	struct spdk_io_channel *channel;
1384 	uint8_t payload_read[10 * 4096];
1385 	uint8_t payload_write[10 * 4096];
1386 	struct iovec iov_read[3];
1387 	struct iovec iov_write[3];
1388 	void *buf;
1389 
1390 	channel = spdk_bs_alloc_io_channel(bs);
1391 	CU_ASSERT(channel != NULL);
1392 
1393 	blob = ut_blob_create_and_open(bs, NULL);
1394 
1395 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1396 	poll_threads();
1397 	CU_ASSERT(g_bserrno == 0);
1398 
1399 	/*
1400 	 * Manually adjust the offset of the blob's second cluster.  This allows
1401 	 *  us to make sure that the readv/write code correctly accounts for I/O
1402 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1403 	 *  clusters are where we expect before modifying the second cluster.
1404 	 */
1405 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1406 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1407 	blob->active.clusters[1] = 3 * 256;
1408 
1409 	memset(payload_write, 0xE5, sizeof(payload_write));
1410 	iov_write[0].iov_base = payload_write;
1411 	iov_write[0].iov_len = 1 * 4096;
1412 	iov_write[1].iov_base = payload_write + 1 * 4096;
1413 	iov_write[1].iov_len = 5 * 4096;
1414 	iov_write[2].iov_base = payload_write + 6 * 4096;
1415 	iov_write[2].iov_len = 4 * 4096;
1416 	/*
1417 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1418 	 *  will get written to the first cluster, the last 4 to the second cluster.
1419 	 */
1420 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1421 	poll_threads();
1422 	CU_ASSERT(g_bserrno == 0);
1423 
1424 	memset(payload_read, 0xAA, sizeof(payload_read));
1425 	iov_read[0].iov_base = payload_read;
1426 	iov_read[0].iov_len = 3 * 4096;
1427 	iov_read[1].iov_base = payload_read + 3 * 4096;
1428 	iov_read[1].iov_len = 4 * 4096;
1429 	iov_read[2].iov_base = payload_read + 7 * 4096;
1430 	iov_read[2].iov_len = 3 * 4096;
1431 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1432 	poll_threads();
1433 	CU_ASSERT(g_bserrno == 0);
1434 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1435 
1436 	buf = calloc(1, 256 * 4096);
1437 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1438 	/* Check that cluster 2 on "disk" was not modified. */
1439 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1440 	free(buf);
1441 
1442 	spdk_blob_close(blob, blob_op_complete, NULL);
1443 	poll_threads();
1444 	CU_ASSERT(g_bserrno == 0);
1445 
1446 	spdk_bs_free_io_channel(channel);
1447 	poll_threads();
1448 }
1449 
1450 static uint32_t
1451 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1452 {
1453 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1454 	struct spdk_bs_request_set *set;
1455 	uint32_t count = 0;
1456 
1457 	TAILQ_FOREACH(set, &channel->reqs, link) {
1458 		count++;
1459 	}
1460 
1461 	return count;
1462 }
1463 
1464 static void
1465 blob_rw_verify_iov_nomem(void)
1466 {
1467 	struct spdk_blob_store *bs = g_bs;
1468 	struct spdk_blob *blob = g_blob;
1469 	struct spdk_io_channel *channel;
1470 	uint8_t payload_write[10 * 4096];
1471 	struct iovec iov_write[3];
1472 	uint32_t req_count;
1473 
1474 	channel = spdk_bs_alloc_io_channel(bs);
1475 	CU_ASSERT(channel != NULL);
1476 
1477 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1478 	poll_threads();
1479 	CU_ASSERT(g_bserrno == 0);
1480 
1481 	/*
1482 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1483 	 *  will get written to the first cluster, the last 4 to the second cluster.
1484 	 */
1485 	iov_write[0].iov_base = payload_write;
1486 	iov_write[0].iov_len = 1 * 4096;
1487 	iov_write[1].iov_base = payload_write + 1 * 4096;
1488 	iov_write[1].iov_len = 5 * 4096;
1489 	iov_write[2].iov_base = payload_write + 6 * 4096;
1490 	iov_write[2].iov_len = 4 * 4096;
1491 	MOCK_SET(calloc, NULL);
1492 	req_count = bs_channel_get_req_count(channel);
1493 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1494 	poll_threads();
1495 	CU_ASSERT(g_bserrno = -ENOMEM);
1496 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1497 	MOCK_CLEAR(calloc);
1498 
1499 	spdk_bs_free_io_channel(channel);
1500 	poll_threads();
1501 }
1502 
1503 static void
1504 blob_rw_iov_read_only(void)
1505 {
1506 	struct spdk_blob_store *bs = g_bs;
1507 	struct spdk_blob *blob = g_blob;
1508 	struct spdk_io_channel *channel;
1509 	uint8_t payload_read[4096];
1510 	uint8_t payload_write[4096];
1511 	struct iovec iov_read;
1512 	struct iovec iov_write;
1513 
1514 	channel = spdk_bs_alloc_io_channel(bs);
1515 	CU_ASSERT(channel != NULL);
1516 
1517 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1518 	poll_threads();
1519 	CU_ASSERT(g_bserrno == 0);
1520 
1521 	/* Verify that writev failed if read_only flag is set. */
1522 	blob->data_ro = true;
1523 	iov_write.iov_base = payload_write;
1524 	iov_write.iov_len = sizeof(payload_write);
1525 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1526 	poll_threads();
1527 	CU_ASSERT(g_bserrno == -EPERM);
1528 
1529 	/* Verify that reads pass if data_ro flag is set. */
1530 	iov_read.iov_base = payload_read;
1531 	iov_read.iov_len = sizeof(payload_read);
1532 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1533 	poll_threads();
1534 	CU_ASSERT(g_bserrno == 0);
1535 
1536 	spdk_bs_free_io_channel(channel);
1537 	poll_threads();
1538 }
1539 
1540 static void
1541 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1542 		       uint8_t *payload, uint64_t offset, uint64_t length,
1543 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1544 {
1545 	uint64_t i;
1546 	uint8_t *buf;
1547 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1548 
1549 	/* To be sure that operation is NOT splitted, read one page at the time */
1550 	buf = payload;
1551 	for (i = 0; i < length; i++) {
1552 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1553 		poll_threads();
1554 		if (g_bserrno != 0) {
1555 			/* Pass the error code up */
1556 			break;
1557 		}
1558 		buf += page_size;
1559 	}
1560 
1561 	cb_fn(cb_arg, g_bserrno);
1562 }
1563 
1564 static void
1565 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1566 			uint8_t *payload, uint64_t offset, uint64_t length,
1567 			spdk_blob_op_complete cb_fn, void *cb_arg)
1568 {
1569 	uint64_t i;
1570 	uint8_t *buf;
1571 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1572 
1573 	/* To be sure that operation is NOT splitted, write one page at the time */
1574 	buf = payload;
1575 	for (i = 0; i < length; i++) {
1576 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1577 		poll_threads();
1578 		if (g_bserrno != 0) {
1579 			/* Pass the error code up */
1580 			break;
1581 		}
1582 		buf += page_size;
1583 	}
1584 
1585 	cb_fn(cb_arg, g_bserrno);
1586 }
1587 
1588 static void
1589 blob_operation_split_rw(void)
1590 {
1591 	struct spdk_blob_store *bs = g_bs;
1592 	struct spdk_blob *blob;
1593 	struct spdk_io_channel *channel;
1594 	struct spdk_blob_opts opts;
1595 	uint64_t cluster_size;
1596 
1597 	uint64_t payload_size;
1598 	uint8_t *payload_read;
1599 	uint8_t *payload_write;
1600 	uint8_t *payload_pattern;
1601 
1602 	uint64_t page_size;
1603 	uint64_t pages_per_cluster;
1604 	uint64_t pages_per_payload;
1605 
1606 	uint64_t i;
1607 
1608 	cluster_size = spdk_bs_get_cluster_size(bs);
1609 	page_size = spdk_bs_get_page_size(bs);
1610 	pages_per_cluster = cluster_size / page_size;
1611 	pages_per_payload = pages_per_cluster * 5;
1612 	payload_size = cluster_size * 5;
1613 
1614 	payload_read = malloc(payload_size);
1615 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1616 
1617 	payload_write = malloc(payload_size);
1618 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1619 
1620 	payload_pattern = malloc(payload_size);
1621 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1622 
1623 	/* Prepare random pattern to write */
1624 	memset(payload_pattern, 0xFF, payload_size);
1625 	for (i = 0; i < pages_per_payload; i++) {
1626 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1627 	}
1628 
1629 	channel = spdk_bs_alloc_io_channel(bs);
1630 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1631 
1632 	/* Create blob */
1633 	ut_spdk_blob_opts_init(&opts);
1634 	opts.thin_provision = false;
1635 	opts.num_clusters = 5;
1636 
1637 	blob = ut_blob_create_and_open(bs, &opts);
1638 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1639 
1640 	/* Initial read should return zeroed payload */
1641 	memset(payload_read, 0xFF, payload_size);
1642 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1643 	poll_threads();
1644 	CU_ASSERT(g_bserrno == 0);
1645 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1646 
1647 	/* Fill whole blob except last page */
1648 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1649 			   blob_op_complete, NULL);
1650 	poll_threads();
1651 	CU_ASSERT(g_bserrno == 0);
1652 
1653 	/* Write last page with a pattern */
1654 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1655 			   blob_op_complete, NULL);
1656 	poll_threads();
1657 	CU_ASSERT(g_bserrno == 0);
1658 
1659 	/* Read whole blob and check consistency */
1660 	memset(payload_read, 0xFF, payload_size);
1661 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1662 	poll_threads();
1663 	CU_ASSERT(g_bserrno == 0);
1664 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1665 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1666 
1667 	/* Fill whole blob except first page */
1668 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1669 			   blob_op_complete, NULL);
1670 	poll_threads();
1671 	CU_ASSERT(g_bserrno == 0);
1672 
1673 	/* Write first page with a pattern */
1674 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1675 			   blob_op_complete, NULL);
1676 	poll_threads();
1677 	CU_ASSERT(g_bserrno == 0);
1678 
1679 	/* Read whole blob and check consistency */
1680 	memset(payload_read, 0xFF, payload_size);
1681 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1682 	poll_threads();
1683 	CU_ASSERT(g_bserrno == 0);
1684 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1685 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1686 
1687 
1688 	/* Fill whole blob with a pattern (5 clusters) */
1689 
1690 	/* 1. Read test. */
1691 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1692 				blob_op_complete, NULL);
1693 	poll_threads();
1694 	CU_ASSERT(g_bserrno == 0);
1695 
1696 	memset(payload_read, 0xFF, payload_size);
1697 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1698 	poll_threads();
1699 	poll_threads();
1700 	CU_ASSERT(g_bserrno == 0);
1701 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1702 
1703 	/* 2. Write test. */
1704 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1705 			   blob_op_complete, NULL);
1706 	poll_threads();
1707 	CU_ASSERT(g_bserrno == 0);
1708 
1709 	memset(payload_read, 0xFF, payload_size);
1710 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1711 	poll_threads();
1712 	CU_ASSERT(g_bserrno == 0);
1713 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1714 
1715 	spdk_bs_free_io_channel(channel);
1716 	poll_threads();
1717 
1718 	g_blob = NULL;
1719 	g_blobid = 0;
1720 
1721 	free(payload_read);
1722 	free(payload_write);
1723 	free(payload_pattern);
1724 
1725 	ut_blob_close_and_delete(bs, blob);
1726 }
1727 
1728 static void
1729 blob_operation_split_rw_iov(void)
1730 {
1731 	struct spdk_blob_store *bs = g_bs;
1732 	struct spdk_blob *blob;
1733 	struct spdk_io_channel *channel;
1734 	struct spdk_blob_opts opts;
1735 	uint64_t cluster_size;
1736 
1737 	uint64_t payload_size;
1738 	uint8_t *payload_read;
1739 	uint8_t *payload_write;
1740 	uint8_t *payload_pattern;
1741 
1742 	uint64_t page_size;
1743 	uint64_t pages_per_cluster;
1744 	uint64_t pages_per_payload;
1745 
1746 	struct iovec iov_read[2];
1747 	struct iovec iov_write[2];
1748 
1749 	uint64_t i, j;
1750 
1751 	cluster_size = spdk_bs_get_cluster_size(bs);
1752 	page_size = spdk_bs_get_page_size(bs);
1753 	pages_per_cluster = cluster_size / page_size;
1754 	pages_per_payload = pages_per_cluster * 5;
1755 	payload_size = cluster_size * 5;
1756 
1757 	payload_read = malloc(payload_size);
1758 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1759 
1760 	payload_write = malloc(payload_size);
1761 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1762 
1763 	payload_pattern = malloc(payload_size);
1764 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1765 
1766 	/* Prepare random pattern to write */
1767 	for (i = 0; i < pages_per_payload; i++) {
1768 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1769 			uint64_t *tmp;
1770 
1771 			tmp = (uint64_t *)payload_pattern;
1772 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1773 			*tmp = i + 1;
1774 		}
1775 	}
1776 
1777 	channel = spdk_bs_alloc_io_channel(bs);
1778 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1779 
1780 	/* Create blob */
1781 	ut_spdk_blob_opts_init(&opts);
1782 	opts.thin_provision = false;
1783 	opts.num_clusters = 5;
1784 
1785 	blob = ut_blob_create_and_open(bs, &opts);
1786 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1787 
1788 	/* Initial read should return zeroes payload */
1789 	memset(payload_read, 0xFF, payload_size);
1790 	iov_read[0].iov_base = payload_read;
1791 	iov_read[0].iov_len = cluster_size * 3;
1792 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1793 	iov_read[1].iov_len = cluster_size * 2;
1794 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1795 	poll_threads();
1796 	CU_ASSERT(g_bserrno == 0);
1797 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1798 
1799 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1800 	 *  with a pattern. */
1801 	iov_write[0].iov_base = payload_pattern;
1802 	iov_write[0].iov_len = payload_size - page_size;
1803 	iov_write[1].iov_base = payload_pattern;
1804 	iov_write[1].iov_len = page_size;
1805 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1806 	poll_threads();
1807 	CU_ASSERT(g_bserrno == 0);
1808 
1809 	/* Read whole blob and check consistency */
1810 	memset(payload_read, 0xFF, payload_size);
1811 	iov_read[0].iov_base = payload_read;
1812 	iov_read[0].iov_len = cluster_size * 2;
1813 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1814 	iov_read[1].iov_len = cluster_size * 3;
1815 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1816 	poll_threads();
1817 	CU_ASSERT(g_bserrno == 0);
1818 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1819 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1820 
1821 	/* First of iovs fills only first page and second of iovs writes whole blob except
1822 	 *  first page with a pattern. */
1823 	iov_write[0].iov_base = payload_pattern;
1824 	iov_write[0].iov_len = page_size;
1825 	iov_write[1].iov_base = payload_pattern;
1826 	iov_write[1].iov_len = payload_size - page_size;
1827 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1828 	poll_threads();
1829 	CU_ASSERT(g_bserrno == 0);
1830 
1831 	/* Read whole blob and check consistency */
1832 	memset(payload_read, 0xFF, payload_size);
1833 	iov_read[0].iov_base = payload_read;
1834 	iov_read[0].iov_len = cluster_size * 4;
1835 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1836 	iov_read[1].iov_len = cluster_size;
1837 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1838 	poll_threads();
1839 	CU_ASSERT(g_bserrno == 0);
1840 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1841 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1842 
1843 
1844 	/* Fill whole blob with a pattern (5 clusters) */
1845 
1846 	/* 1. Read test. */
1847 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1848 				blob_op_complete, NULL);
1849 	poll_threads();
1850 	CU_ASSERT(g_bserrno == 0);
1851 
1852 	memset(payload_read, 0xFF, payload_size);
1853 	iov_read[0].iov_base = payload_read;
1854 	iov_read[0].iov_len = cluster_size;
1855 	iov_read[1].iov_base = payload_read + cluster_size;
1856 	iov_read[1].iov_len = cluster_size * 4;
1857 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1858 	poll_threads();
1859 	CU_ASSERT(g_bserrno == 0);
1860 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1861 
1862 	/* 2. Write test. */
1863 	iov_write[0].iov_base = payload_read;
1864 	iov_write[0].iov_len = cluster_size * 2;
1865 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1866 	iov_write[1].iov_len = cluster_size * 3;
1867 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1868 	poll_threads();
1869 	CU_ASSERT(g_bserrno == 0);
1870 
1871 	memset(payload_read, 0xFF, payload_size);
1872 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1873 	poll_threads();
1874 	CU_ASSERT(g_bserrno == 0);
1875 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1876 
1877 	spdk_bs_free_io_channel(channel);
1878 	poll_threads();
1879 
1880 	g_blob = NULL;
1881 	g_blobid = 0;
1882 
1883 	free(payload_read);
1884 	free(payload_write);
1885 	free(payload_pattern);
1886 
1887 	ut_blob_close_and_delete(bs, blob);
1888 }
1889 
1890 static void
1891 blob_unmap(void)
1892 {
1893 	struct spdk_blob_store *bs = g_bs;
1894 	struct spdk_blob *blob;
1895 	struct spdk_io_channel *channel;
1896 	struct spdk_blob_opts opts;
1897 	uint8_t payload[4096];
1898 	int i;
1899 
1900 	channel = spdk_bs_alloc_io_channel(bs);
1901 	CU_ASSERT(channel != NULL);
1902 
1903 	ut_spdk_blob_opts_init(&opts);
1904 	opts.num_clusters = 10;
1905 
1906 	blob = ut_blob_create_and_open(bs, &opts);
1907 
1908 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1909 	poll_threads();
1910 	CU_ASSERT(g_bserrno == 0);
1911 
1912 	memset(payload, 0, sizeof(payload));
1913 	payload[0] = 0xFF;
1914 
1915 	/*
1916 	 * Set first byte of every cluster to 0xFF.
1917 	 * First cluster on device is reserved so let's start from cluster number 1
1918 	 */
1919 	for (i = 1; i < 11; i++) {
1920 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1921 	}
1922 
1923 	/* Confirm writes */
1924 	for (i = 0; i < 10; i++) {
1925 		payload[0] = 0;
1926 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1927 				  blob_op_complete, NULL);
1928 		poll_threads();
1929 		CU_ASSERT(g_bserrno == 0);
1930 		CU_ASSERT(payload[0] == 0xFF);
1931 	}
1932 
1933 	/* Mark some clusters as unallocated */
1934 	blob->active.clusters[1] = 0;
1935 	blob->active.clusters[2] = 0;
1936 	blob->active.clusters[3] = 0;
1937 	blob->active.clusters[6] = 0;
1938 	blob->active.clusters[8] = 0;
1939 
1940 	/* Unmap clusters by resizing to 0 */
1941 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
1942 	poll_threads();
1943 	CU_ASSERT(g_bserrno == 0);
1944 
1945 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1946 	poll_threads();
1947 	CU_ASSERT(g_bserrno == 0);
1948 
1949 	/* Confirm that only 'allocated' clusters were unmapped */
1950 	for (i = 1; i < 11; i++) {
1951 		switch (i) {
1952 		case 2:
1953 		case 3:
1954 		case 4:
1955 		case 7:
1956 		case 9:
1957 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
1958 			break;
1959 		default:
1960 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
1961 			break;
1962 		}
1963 	}
1964 
1965 	spdk_bs_free_io_channel(channel);
1966 	poll_threads();
1967 
1968 	ut_blob_close_and_delete(bs, blob);
1969 }
1970 
1971 static void
1972 blob_iter(void)
1973 {
1974 	struct spdk_blob_store *bs = g_bs;
1975 	struct spdk_blob *blob;
1976 	spdk_blob_id blobid;
1977 	struct spdk_blob_opts blob_opts;
1978 
1979 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
1980 	poll_threads();
1981 	CU_ASSERT(g_blob == NULL);
1982 	CU_ASSERT(g_bserrno == -ENOENT);
1983 
1984 	ut_spdk_blob_opts_init(&blob_opts);
1985 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1986 	poll_threads();
1987 	CU_ASSERT(g_bserrno == 0);
1988 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1989 	blobid = g_blobid;
1990 
1991 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
1992 	poll_threads();
1993 	CU_ASSERT(g_blob != NULL);
1994 	CU_ASSERT(g_bserrno == 0);
1995 	blob = g_blob;
1996 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
1997 
1998 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
1999 	poll_threads();
2000 	CU_ASSERT(g_blob == NULL);
2001 	CU_ASSERT(g_bserrno == -ENOENT);
2002 }
2003 
2004 static void
2005 blob_xattr(void)
2006 {
2007 	struct spdk_blob_store *bs = g_bs;
2008 	struct spdk_blob *blob = g_blob;
2009 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2010 	uint64_t length;
2011 	int rc;
2012 	const char *name1, *name2;
2013 	const void *value;
2014 	size_t value_len;
2015 	struct spdk_xattr_names *names;
2016 
2017 	/* Test that set_xattr fails if md_ro flag is set. */
2018 	blob->md_ro = true;
2019 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2020 	CU_ASSERT(rc == -EPERM);
2021 
2022 	blob->md_ro = false;
2023 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2024 	CU_ASSERT(rc == 0);
2025 
2026 	length = 2345;
2027 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2028 	CU_ASSERT(rc == 0);
2029 
2030 	/* Overwrite "length" xattr. */
2031 	length = 3456;
2032 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2033 	CU_ASSERT(rc == 0);
2034 
2035 	/* get_xattr should still work even if md_ro flag is set. */
2036 	value = NULL;
2037 	blob->md_ro = true;
2038 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2039 	CU_ASSERT(rc == 0);
2040 	SPDK_CU_ASSERT_FATAL(value != NULL);
2041 	CU_ASSERT(*(uint64_t *)value == length);
2042 	CU_ASSERT(value_len == 8);
2043 	blob->md_ro = false;
2044 
2045 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2046 	CU_ASSERT(rc == -ENOENT);
2047 
2048 	names = NULL;
2049 	rc = spdk_blob_get_xattr_names(blob, &names);
2050 	CU_ASSERT(rc == 0);
2051 	SPDK_CU_ASSERT_FATAL(names != NULL);
2052 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2053 	name1 = spdk_xattr_names_get_name(names, 0);
2054 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2055 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2056 	name2 = spdk_xattr_names_get_name(names, 1);
2057 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2058 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2059 	CU_ASSERT(strcmp(name1, name2));
2060 	spdk_xattr_names_free(names);
2061 
2062 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2063 	blob->md_ro = true;
2064 	rc = spdk_blob_remove_xattr(blob, "name");
2065 	CU_ASSERT(rc == -EPERM);
2066 
2067 	blob->md_ro = false;
2068 	rc = spdk_blob_remove_xattr(blob, "name");
2069 	CU_ASSERT(rc == 0);
2070 
2071 	rc = spdk_blob_remove_xattr(blob, "foobar");
2072 	CU_ASSERT(rc == -ENOENT);
2073 
2074 	/* Set internal xattr */
2075 	length = 7898;
2076 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2077 	CU_ASSERT(rc == 0);
2078 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2079 	CU_ASSERT(rc == 0);
2080 	CU_ASSERT(*(uint64_t *)value == length);
2081 	/* try to get public xattr with same name */
2082 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2083 	CU_ASSERT(rc != 0);
2084 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2085 	CU_ASSERT(rc != 0);
2086 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2087 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2088 		  SPDK_BLOB_INTERNAL_XATTR);
2089 
2090 	spdk_blob_close(blob, blob_op_complete, NULL);
2091 	poll_threads();
2092 
2093 	/* Check if xattrs are persisted */
2094 	ut_bs_reload(&bs, NULL);
2095 
2096 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2097 	poll_threads();
2098 	CU_ASSERT(g_bserrno == 0);
2099 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2100 	blob = g_blob;
2101 
2102 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2103 	CU_ASSERT(rc == 0);
2104 	CU_ASSERT(*(uint64_t *)value == length);
2105 
2106 	/* try to get internal xattr trough public call */
2107 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2108 	CU_ASSERT(rc != 0);
2109 
2110 	rc = blob_remove_xattr(blob, "internal", true);
2111 	CU_ASSERT(rc == 0);
2112 
2113 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2114 }
2115 
2116 static void
2117 blob_parse_md(void)
2118 {
2119 	struct spdk_blob_store *bs = g_bs;
2120 	struct spdk_blob *blob;
2121 	int rc;
2122 	uint32_t used_pages;
2123 	size_t xattr_length;
2124 	char *xattr;
2125 
2126 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2127 	blob = ut_blob_create_and_open(bs, NULL);
2128 
2129 	/* Create large extent to force more than 1 page of metadata. */
2130 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2131 		       strlen("large_xattr");
2132 	xattr = calloc(xattr_length, sizeof(char));
2133 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2134 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2135 	free(xattr);
2136 	SPDK_CU_ASSERT_FATAL(rc == 0);
2137 
2138 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2139 	poll_threads();
2140 
2141 	/* Delete the blob and verify that number of pages returned to before its creation. */
2142 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2143 	ut_blob_close_and_delete(bs, blob);
2144 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2145 }
2146 
2147 static void
2148 bs_load(void)
2149 {
2150 	struct spdk_blob_store *bs;
2151 	struct spdk_bs_dev *dev;
2152 	spdk_blob_id blobid;
2153 	struct spdk_blob *blob;
2154 	struct spdk_bs_super_block *super_block;
2155 	uint64_t length;
2156 	int rc;
2157 	const void *value;
2158 	size_t value_len;
2159 	struct spdk_bs_opts opts;
2160 	struct spdk_blob_opts blob_opts;
2161 
2162 	dev = init_dev();
2163 	spdk_bs_opts_init(&opts, sizeof(opts));
2164 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2165 
2166 	/* Initialize a new blob store */
2167 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2168 	poll_threads();
2169 	CU_ASSERT(g_bserrno == 0);
2170 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2171 	bs = g_bs;
2172 
2173 	/* Try to open a blobid that does not exist */
2174 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2175 	poll_threads();
2176 	CU_ASSERT(g_bserrno == -ENOENT);
2177 	CU_ASSERT(g_blob == NULL);
2178 
2179 	/* Create a blob */
2180 	blob = ut_blob_create_and_open(bs, NULL);
2181 	blobid = spdk_blob_get_id(blob);
2182 
2183 	/* Try again to open valid blob but without the upper bit set */
2184 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2185 	poll_threads();
2186 	CU_ASSERT(g_bserrno == -ENOENT);
2187 	CU_ASSERT(g_blob == NULL);
2188 
2189 	/* Set some xattrs */
2190 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2191 	CU_ASSERT(rc == 0);
2192 
2193 	length = 2345;
2194 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2195 	CU_ASSERT(rc == 0);
2196 
2197 	/* Resize the blob */
2198 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2199 	poll_threads();
2200 	CU_ASSERT(g_bserrno == 0);
2201 
2202 	spdk_blob_close(blob, blob_op_complete, NULL);
2203 	poll_threads();
2204 	CU_ASSERT(g_bserrno == 0);
2205 	blob = NULL;
2206 	g_blob = NULL;
2207 	g_blobid = SPDK_BLOBID_INVALID;
2208 
2209 	/* Unload the blob store */
2210 	spdk_bs_unload(bs, bs_op_complete, NULL);
2211 	poll_threads();
2212 	CU_ASSERT(g_bserrno == 0);
2213 	g_bs = NULL;
2214 	g_blob = NULL;
2215 	g_blobid = 0;
2216 
2217 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2218 	CU_ASSERT(super_block->clean == 1);
2219 
2220 	/* Load should fail for device with an unsupported blocklen */
2221 	dev = init_dev();
2222 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2223 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2224 	poll_threads();
2225 	CU_ASSERT(g_bserrno == -EINVAL);
2226 
2227 	/* Load should when max_md_ops is set to zero */
2228 	dev = init_dev();
2229 	spdk_bs_opts_init(&opts, sizeof(opts));
2230 	opts.max_md_ops = 0;
2231 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2232 	poll_threads();
2233 	CU_ASSERT(g_bserrno == -EINVAL);
2234 
2235 	/* Load should when max_channel_ops is set to zero */
2236 	dev = init_dev();
2237 	spdk_bs_opts_init(&opts, sizeof(opts));
2238 	opts.max_channel_ops = 0;
2239 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2240 	poll_threads();
2241 	CU_ASSERT(g_bserrno == -EINVAL);
2242 
2243 	/* Load an existing blob store */
2244 	dev = init_dev();
2245 	spdk_bs_opts_init(&opts, sizeof(opts));
2246 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2247 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2248 	poll_threads();
2249 	CU_ASSERT(g_bserrno == 0);
2250 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2251 	bs = g_bs;
2252 
2253 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2254 	CU_ASSERT(super_block->clean == 1);
2255 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2256 
2257 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2258 	poll_threads();
2259 	CU_ASSERT(g_bserrno == 0);
2260 	CU_ASSERT(g_blob != NULL);
2261 	blob = g_blob;
2262 
2263 	/* Verify that blobstore is marked dirty after first metadata sync */
2264 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2265 	CU_ASSERT(super_block->clean == 1);
2266 
2267 	/* Get the xattrs */
2268 	value = NULL;
2269 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2270 	CU_ASSERT(rc == 0);
2271 	SPDK_CU_ASSERT_FATAL(value != NULL);
2272 	CU_ASSERT(*(uint64_t *)value == length);
2273 	CU_ASSERT(value_len == 8);
2274 
2275 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2276 	CU_ASSERT(rc == -ENOENT);
2277 
2278 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2279 
2280 	spdk_blob_close(blob, blob_op_complete, NULL);
2281 	poll_threads();
2282 	CU_ASSERT(g_bserrno == 0);
2283 	blob = NULL;
2284 	g_blob = NULL;
2285 
2286 	spdk_bs_unload(bs, bs_op_complete, NULL);
2287 	poll_threads();
2288 	CU_ASSERT(g_bserrno == 0);
2289 	g_bs = NULL;
2290 
2291 	/* Load should fail: bdev size < saved size */
2292 	dev = init_dev();
2293 	dev->blockcnt /= 2;
2294 
2295 	spdk_bs_opts_init(&opts, sizeof(opts));
2296 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2297 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2298 	poll_threads();
2299 
2300 	CU_ASSERT(g_bserrno == -EILSEQ);
2301 
2302 	/* Load should succeed: bdev size > saved size */
2303 	dev = init_dev();
2304 	dev->blockcnt *= 4;
2305 
2306 	spdk_bs_opts_init(&opts, sizeof(opts));
2307 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2308 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2309 	poll_threads();
2310 	CU_ASSERT(g_bserrno == 0);
2311 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2312 	bs = g_bs;
2313 
2314 	CU_ASSERT(g_bserrno == 0);
2315 	spdk_bs_unload(bs, bs_op_complete, NULL);
2316 	poll_threads();
2317 
2318 
2319 	/* Test compatibility mode */
2320 
2321 	dev = init_dev();
2322 	super_block->size = 0;
2323 	super_block->crc = blob_md_page_calc_crc(super_block);
2324 
2325 	spdk_bs_opts_init(&opts, sizeof(opts));
2326 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2327 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2328 	poll_threads();
2329 	CU_ASSERT(g_bserrno == 0);
2330 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2331 	bs = g_bs;
2332 
2333 	/* Create a blob */
2334 	ut_spdk_blob_opts_init(&blob_opts);
2335 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2336 	poll_threads();
2337 	CU_ASSERT(g_bserrno == 0);
2338 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2339 
2340 	/* Blobstore should update number of blocks in super_block */
2341 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2342 	CU_ASSERT(super_block->clean == 0);
2343 
2344 	spdk_bs_unload(bs, bs_op_complete, NULL);
2345 	poll_threads();
2346 	CU_ASSERT(g_bserrno == 0);
2347 	CU_ASSERT(super_block->clean == 1);
2348 	g_bs = NULL;
2349 
2350 }
2351 
2352 static void
2353 bs_load_pending_removal(void)
2354 {
2355 	struct spdk_blob_store *bs = g_bs;
2356 	struct spdk_blob_opts opts;
2357 	struct spdk_blob *blob, *snapshot;
2358 	spdk_blob_id blobid, snapshotid;
2359 	const void *value;
2360 	size_t value_len;
2361 	int rc;
2362 
2363 	/* Create blob */
2364 	ut_spdk_blob_opts_init(&opts);
2365 	opts.num_clusters = 10;
2366 
2367 	blob = ut_blob_create_and_open(bs, &opts);
2368 	blobid = spdk_blob_get_id(blob);
2369 
2370 	/* Create snapshot */
2371 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2372 	poll_threads();
2373 	CU_ASSERT(g_bserrno == 0);
2374 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2375 	snapshotid = g_blobid;
2376 
2377 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2378 	poll_threads();
2379 	CU_ASSERT(g_bserrno == 0);
2380 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2381 	snapshot = g_blob;
2382 
2383 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2384 	snapshot->md_ro = false;
2385 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2386 	CU_ASSERT(rc == 0);
2387 	snapshot->md_ro = true;
2388 
2389 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2390 	poll_threads();
2391 	CU_ASSERT(g_bserrno == 0);
2392 
2393 	spdk_blob_close(blob, blob_op_complete, NULL);
2394 	poll_threads();
2395 	CU_ASSERT(g_bserrno == 0);
2396 
2397 	/* Reload blobstore */
2398 	ut_bs_reload(&bs, NULL);
2399 
2400 	/* Snapshot should not be removed as blob is still pointing to it */
2401 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2402 	poll_threads();
2403 	CU_ASSERT(g_bserrno == 0);
2404 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2405 	snapshot = g_blob;
2406 
2407 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2408 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2409 	CU_ASSERT(rc != 0);
2410 
2411 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2412 	snapshot->md_ro = false;
2413 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2414 	CU_ASSERT(rc == 0);
2415 	snapshot->md_ro = true;
2416 
2417 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2418 	poll_threads();
2419 	CU_ASSERT(g_bserrno == 0);
2420 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2421 	blob = g_blob;
2422 
2423 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2424 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2425 
2426 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2427 	poll_threads();
2428 	CU_ASSERT(g_bserrno == 0);
2429 
2430 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2431 	poll_threads();
2432 	CU_ASSERT(g_bserrno == 0);
2433 
2434 	spdk_blob_close(blob, blob_op_complete, NULL);
2435 	poll_threads();
2436 	CU_ASSERT(g_bserrno == 0);
2437 
2438 	/* Reload blobstore */
2439 	ut_bs_reload(&bs, NULL);
2440 
2441 	/* Snapshot should be removed as blob is not pointing to it anymore */
2442 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2443 	poll_threads();
2444 	CU_ASSERT(g_bserrno != 0);
2445 }
2446 
2447 static void
2448 bs_load_custom_cluster_size(void)
2449 {
2450 	struct spdk_blob_store *bs;
2451 	struct spdk_bs_dev *dev;
2452 	struct spdk_bs_super_block *super_block;
2453 	struct spdk_bs_opts opts;
2454 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2455 	uint32_t cluster_sz;
2456 	uint64_t total_clusters;
2457 
2458 	dev = init_dev();
2459 	spdk_bs_opts_init(&opts, sizeof(opts));
2460 	opts.cluster_sz = custom_cluster_size;
2461 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2462 
2463 	/* Initialize a new blob store */
2464 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2465 	poll_threads();
2466 	CU_ASSERT(g_bserrno == 0);
2467 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2468 	bs = g_bs;
2469 	cluster_sz = bs->cluster_sz;
2470 	total_clusters = bs->total_clusters;
2471 
2472 	/* Unload the blob store */
2473 	spdk_bs_unload(bs, bs_op_complete, NULL);
2474 	poll_threads();
2475 	CU_ASSERT(g_bserrno == 0);
2476 	g_bs = NULL;
2477 	g_blob = NULL;
2478 	g_blobid = 0;
2479 
2480 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2481 	CU_ASSERT(super_block->clean == 1);
2482 
2483 	/* Load an existing blob store */
2484 	dev = init_dev();
2485 	spdk_bs_opts_init(&opts, sizeof(opts));
2486 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2487 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2488 	poll_threads();
2489 	CU_ASSERT(g_bserrno == 0);
2490 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2491 	bs = g_bs;
2492 	/* Compare cluster size and number to one after initialization */
2493 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2494 	CU_ASSERT(total_clusters == bs->total_clusters);
2495 
2496 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2497 	CU_ASSERT(super_block->clean == 1);
2498 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2499 
2500 	spdk_bs_unload(bs, bs_op_complete, NULL);
2501 	poll_threads();
2502 	CU_ASSERT(g_bserrno == 0);
2503 	CU_ASSERT(super_block->clean == 1);
2504 	g_bs = NULL;
2505 }
2506 
2507 static void
2508 bs_type(void)
2509 {
2510 	struct spdk_blob_store *bs;
2511 	struct spdk_bs_dev *dev;
2512 	struct spdk_bs_opts opts;
2513 
2514 	dev = init_dev();
2515 	spdk_bs_opts_init(&opts, sizeof(opts));
2516 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2517 
2518 	/* Initialize a new blob store */
2519 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2520 	poll_threads();
2521 	CU_ASSERT(g_bserrno == 0);
2522 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2523 	bs = g_bs;
2524 
2525 	/* Unload the blob store */
2526 	spdk_bs_unload(bs, bs_op_complete, NULL);
2527 	poll_threads();
2528 	CU_ASSERT(g_bserrno == 0);
2529 	g_bs = NULL;
2530 	g_blob = NULL;
2531 	g_blobid = 0;
2532 
2533 	/* Load non existing blobstore type */
2534 	dev = init_dev();
2535 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2536 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2537 	poll_threads();
2538 	CU_ASSERT(g_bserrno != 0);
2539 
2540 	/* Load with empty blobstore type */
2541 	dev = init_dev();
2542 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2543 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2544 	poll_threads();
2545 	CU_ASSERT(g_bserrno == 0);
2546 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2547 	bs = g_bs;
2548 
2549 	spdk_bs_unload(bs, bs_op_complete, NULL);
2550 	poll_threads();
2551 	CU_ASSERT(g_bserrno == 0);
2552 	g_bs = NULL;
2553 
2554 	/* Initialize a new blob store with empty bstype */
2555 	dev = init_dev();
2556 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2557 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2558 	poll_threads();
2559 	CU_ASSERT(g_bserrno == 0);
2560 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2561 	bs = g_bs;
2562 
2563 	spdk_bs_unload(bs, bs_op_complete, NULL);
2564 	poll_threads();
2565 	CU_ASSERT(g_bserrno == 0);
2566 	g_bs = NULL;
2567 
2568 	/* Load non existing blobstore type */
2569 	dev = init_dev();
2570 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2571 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2572 	poll_threads();
2573 	CU_ASSERT(g_bserrno != 0);
2574 
2575 	/* Load with empty blobstore type */
2576 	dev = init_dev();
2577 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2578 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2579 	poll_threads();
2580 	CU_ASSERT(g_bserrno == 0);
2581 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2582 	bs = g_bs;
2583 
2584 	spdk_bs_unload(bs, bs_op_complete, NULL);
2585 	poll_threads();
2586 	CU_ASSERT(g_bserrno == 0);
2587 	g_bs = NULL;
2588 }
2589 
2590 static void
2591 bs_super_block(void)
2592 {
2593 	struct spdk_blob_store *bs;
2594 	struct spdk_bs_dev *dev;
2595 	struct spdk_bs_super_block *super_block;
2596 	struct spdk_bs_opts opts;
2597 	struct spdk_bs_super_block_ver1 super_block_v1;
2598 
2599 	dev = init_dev();
2600 	spdk_bs_opts_init(&opts, sizeof(opts));
2601 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2602 
2603 	/* Initialize a new blob store */
2604 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2605 	poll_threads();
2606 	CU_ASSERT(g_bserrno == 0);
2607 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2608 	bs = g_bs;
2609 
2610 	/* Unload the blob store */
2611 	spdk_bs_unload(bs, bs_op_complete, NULL);
2612 	poll_threads();
2613 	CU_ASSERT(g_bserrno == 0);
2614 	g_bs = NULL;
2615 	g_blob = NULL;
2616 	g_blobid = 0;
2617 
2618 	/* Load an existing blob store with version newer than supported */
2619 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2620 	super_block->version++;
2621 
2622 	dev = init_dev();
2623 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2624 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2625 	poll_threads();
2626 	CU_ASSERT(g_bserrno != 0);
2627 
2628 	/* Create a new blob store with super block version 1 */
2629 	dev = init_dev();
2630 	super_block_v1.version = 1;
2631 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2632 	super_block_v1.length = 0x1000;
2633 	super_block_v1.clean = 1;
2634 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2635 	super_block_v1.cluster_size = 0x100000;
2636 	super_block_v1.used_page_mask_start = 0x01;
2637 	super_block_v1.used_page_mask_len = 0x01;
2638 	super_block_v1.used_cluster_mask_start = 0x02;
2639 	super_block_v1.used_cluster_mask_len = 0x01;
2640 	super_block_v1.md_start = 0x03;
2641 	super_block_v1.md_len = 0x40;
2642 	memset(super_block_v1.reserved, 0, 4036);
2643 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2644 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2645 
2646 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2647 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2648 	poll_threads();
2649 	CU_ASSERT(g_bserrno == 0);
2650 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2651 	bs = g_bs;
2652 
2653 	spdk_bs_unload(bs, bs_op_complete, NULL);
2654 	poll_threads();
2655 	CU_ASSERT(g_bserrno == 0);
2656 	g_bs = NULL;
2657 }
2658 
2659 static void
2660 bs_test_recover_cluster_count(void)
2661 {
2662 	struct spdk_blob_store *bs;
2663 	struct spdk_bs_dev *dev;
2664 	struct spdk_bs_super_block super_block;
2665 	struct spdk_bs_opts opts;
2666 
2667 	dev = init_dev();
2668 	spdk_bs_opts_init(&opts, sizeof(opts));
2669 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2670 
2671 	super_block.version = 3;
2672 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2673 	super_block.length = 0x1000;
2674 	super_block.clean = 0;
2675 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2676 	super_block.cluster_size = 4096;
2677 	super_block.used_page_mask_start = 0x01;
2678 	super_block.used_page_mask_len = 0x01;
2679 	super_block.used_cluster_mask_start = 0x02;
2680 	super_block.used_cluster_mask_len = 0x01;
2681 	super_block.used_blobid_mask_start = 0x03;
2682 	super_block.used_blobid_mask_len = 0x01;
2683 	super_block.md_start = 0x04;
2684 	super_block.md_len = 0x40;
2685 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2686 	super_block.size = dev->blockcnt * dev->blocklen;
2687 	super_block.io_unit_size = 0x1000;
2688 	memset(super_block.reserved, 0, 4000);
2689 	super_block.crc = blob_md_page_calc_crc(&super_block);
2690 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2691 
2692 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2693 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2694 	poll_threads();
2695 	CU_ASSERT(g_bserrno == 0);
2696 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2697 	bs = g_bs;
2698 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2699 			super_block.md_len));
2700 
2701 	spdk_bs_unload(bs, bs_op_complete, NULL);
2702 	poll_threads();
2703 	CU_ASSERT(g_bserrno == 0);
2704 	g_bs = NULL;
2705 }
2706 
2707 /*
2708  * Create a blobstore and then unload it.
2709  */
2710 static void
2711 bs_unload(void)
2712 {
2713 	struct spdk_blob_store *bs = g_bs;
2714 	struct spdk_blob *blob;
2715 
2716 	/* Create a blob and open it. */
2717 	blob = ut_blob_create_and_open(bs, NULL);
2718 
2719 	/* Try to unload blobstore, should fail with open blob */
2720 	g_bserrno = -1;
2721 	spdk_bs_unload(bs, bs_op_complete, NULL);
2722 	poll_threads();
2723 	CU_ASSERT(g_bserrno == -EBUSY);
2724 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2725 
2726 	/* Close the blob, then successfully unload blobstore */
2727 	g_bserrno = -1;
2728 	spdk_blob_close(blob, blob_op_complete, NULL);
2729 	poll_threads();
2730 	CU_ASSERT(g_bserrno == 0);
2731 }
2732 
2733 /*
2734  * Create a blobstore with a cluster size different than the default, and ensure it is
2735  *  persisted.
2736  */
2737 static void
2738 bs_cluster_sz(void)
2739 {
2740 	struct spdk_blob_store *bs;
2741 	struct spdk_bs_dev *dev;
2742 	struct spdk_bs_opts opts;
2743 	uint32_t cluster_sz;
2744 
2745 	/* Set cluster size to zero */
2746 	dev = init_dev();
2747 	spdk_bs_opts_init(&opts, sizeof(opts));
2748 	opts.cluster_sz = 0;
2749 
2750 	/* Initialize a new blob store */
2751 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2752 	poll_threads();
2753 	CU_ASSERT(g_bserrno == -EINVAL);
2754 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2755 
2756 	/*
2757 	 * Set cluster size to blobstore page size,
2758 	 * to work it is required to be at least twice the blobstore page size.
2759 	 */
2760 	dev = init_dev();
2761 	spdk_bs_opts_init(&opts, sizeof(opts));
2762 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
2763 
2764 	/* Initialize a new blob store */
2765 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2766 	poll_threads();
2767 	CU_ASSERT(g_bserrno == -ENOMEM);
2768 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2769 
2770 	/*
2771 	 * Set cluster size to lower than page size,
2772 	 * to work it is required to be at least twice the blobstore page size.
2773 	 */
2774 	dev = init_dev();
2775 	spdk_bs_opts_init(&opts, sizeof(opts));
2776 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
2777 
2778 	/* Initialize a new blob store */
2779 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2780 	poll_threads();
2781 	CU_ASSERT(g_bserrno == -EINVAL);
2782 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2783 
2784 	/* Set cluster size to twice the default */
2785 	dev = init_dev();
2786 	spdk_bs_opts_init(&opts, sizeof(opts));
2787 	opts.cluster_sz *= 2;
2788 	cluster_sz = opts.cluster_sz;
2789 
2790 	/* Initialize a new blob store */
2791 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2792 	poll_threads();
2793 	CU_ASSERT(g_bserrno == 0);
2794 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2795 	bs = g_bs;
2796 
2797 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2798 
2799 	ut_bs_reload(&bs, &opts);
2800 
2801 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2802 
2803 	spdk_bs_unload(bs, bs_op_complete, NULL);
2804 	poll_threads();
2805 	CU_ASSERT(g_bserrno == 0);
2806 	g_bs = NULL;
2807 }
2808 
2809 /*
2810  * Create a blobstore, reload it and ensure total usable cluster count
2811  *  stays the same.
2812  */
2813 static void
2814 bs_usable_clusters(void)
2815 {
2816 	struct spdk_blob_store *bs = g_bs;
2817 	struct spdk_blob *blob;
2818 	uint32_t clusters;
2819 	int i;
2820 
2821 
2822 	clusters = spdk_bs_total_data_cluster_count(bs);
2823 
2824 	ut_bs_reload(&bs, NULL);
2825 
2826 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2827 
2828 	/* Create and resize blobs to make sure that useable cluster count won't change */
2829 	for (i = 0; i < 4; i++) {
2830 		g_bserrno = -1;
2831 		g_blobid = SPDK_BLOBID_INVALID;
2832 		blob = ut_blob_create_and_open(bs, NULL);
2833 
2834 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2835 		poll_threads();
2836 		CU_ASSERT(g_bserrno == 0);
2837 
2838 		g_bserrno = -1;
2839 		spdk_blob_close(blob, blob_op_complete, NULL);
2840 		poll_threads();
2841 		CU_ASSERT(g_bserrno == 0);
2842 
2843 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2844 	}
2845 
2846 	/* Reload the blob store to make sure that nothing changed */
2847 	ut_bs_reload(&bs, NULL);
2848 
2849 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
2850 }
2851 
2852 /*
2853  * Test resizing of the metadata blob.  This requires creating enough blobs
2854  *  so that one cluster is not enough to fit the metadata for those blobs.
2855  *  To induce this condition to happen more quickly, we reduce the cluster
2856  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
2857  */
2858 static void
2859 bs_resize_md(void)
2860 {
2861 	struct spdk_blob_store *bs;
2862 	const int CLUSTER_PAGE_COUNT = 4;
2863 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
2864 	struct spdk_bs_dev *dev;
2865 	struct spdk_bs_opts opts;
2866 	struct spdk_blob *blob;
2867 	struct spdk_blob_opts blob_opts;
2868 	uint32_t cluster_sz;
2869 	spdk_blob_id blobids[NUM_BLOBS];
2870 	int i;
2871 
2872 
2873 	dev = init_dev();
2874 	spdk_bs_opts_init(&opts, sizeof(opts));
2875 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
2876 	cluster_sz = opts.cluster_sz;
2877 
2878 	/* Initialize a new blob store */
2879 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2880 	poll_threads();
2881 	CU_ASSERT(g_bserrno == 0);
2882 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2883 	bs = g_bs;
2884 
2885 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2886 
2887 	ut_spdk_blob_opts_init(&blob_opts);
2888 
2889 	for (i = 0; i < NUM_BLOBS; i++) {
2890 		g_bserrno = -1;
2891 		g_blobid = SPDK_BLOBID_INVALID;
2892 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2893 		poll_threads();
2894 		CU_ASSERT(g_bserrno == 0);
2895 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
2896 		blobids[i] = g_blobid;
2897 	}
2898 
2899 	ut_bs_reload(&bs, &opts);
2900 
2901 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
2902 
2903 	for (i = 0; i < NUM_BLOBS; i++) {
2904 		g_bserrno = -1;
2905 		g_blob = NULL;
2906 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
2907 		poll_threads();
2908 		CU_ASSERT(g_bserrno == 0);
2909 		CU_ASSERT(g_blob !=  NULL);
2910 		blob = g_blob;
2911 		g_bserrno = -1;
2912 		spdk_blob_close(blob, blob_op_complete, NULL);
2913 		poll_threads();
2914 		CU_ASSERT(g_bserrno == 0);
2915 	}
2916 
2917 	spdk_bs_unload(bs, bs_op_complete, NULL);
2918 	poll_threads();
2919 	CU_ASSERT(g_bserrno == 0);
2920 	g_bs = NULL;
2921 }
2922 
2923 static void
2924 bs_destroy(void)
2925 {
2926 	struct spdk_blob_store *bs;
2927 	struct spdk_bs_dev *dev;
2928 
2929 	/* Initialize a new blob store */
2930 	dev = init_dev();
2931 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2932 	poll_threads();
2933 	CU_ASSERT(g_bserrno == 0);
2934 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2935 	bs = g_bs;
2936 
2937 	/* Destroy the blob store */
2938 	g_bserrno = -1;
2939 	spdk_bs_destroy(bs, bs_op_complete, NULL);
2940 	poll_threads();
2941 	CU_ASSERT(g_bserrno == 0);
2942 
2943 	/* Loading an non-existent blob store should fail. */
2944 	g_bs = NULL;
2945 	dev = init_dev();
2946 
2947 	g_bserrno = 0;
2948 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2949 	poll_threads();
2950 	CU_ASSERT(g_bserrno != 0);
2951 }
2952 
2953 /* Try to hit all of the corner cases associated with serializing
2954  * a blob to disk
2955  */
2956 static void
2957 blob_serialize_test(void)
2958 {
2959 	struct spdk_bs_dev *dev;
2960 	struct spdk_bs_opts opts;
2961 	struct spdk_blob_store *bs;
2962 	spdk_blob_id blobid[2];
2963 	struct spdk_blob *blob[2];
2964 	uint64_t i;
2965 	char *value;
2966 	int rc;
2967 
2968 	dev = init_dev();
2969 
2970 	/* Initialize a new blobstore with very small clusters */
2971 	spdk_bs_opts_init(&opts, sizeof(opts));
2972 	opts.cluster_sz = dev->blocklen * 8;
2973 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2974 	poll_threads();
2975 	CU_ASSERT(g_bserrno == 0);
2976 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2977 	bs = g_bs;
2978 
2979 	/* Create and open two blobs */
2980 	for (i = 0; i < 2; i++) {
2981 		blob[i] = ut_blob_create_and_open(bs, NULL);
2982 		blobid[i] = spdk_blob_get_id(blob[i]);
2983 
2984 		/* Set a fairly large xattr on both blobs to eat up
2985 		 * metadata space
2986 		 */
2987 		value = calloc(dev->blocklen - 64, sizeof(char));
2988 		SPDK_CU_ASSERT_FATAL(value != NULL);
2989 		memset(value, i, dev->blocklen / 2);
2990 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
2991 		CU_ASSERT(rc == 0);
2992 		free(value);
2993 	}
2994 
2995 	/* Resize the blobs, alternating 1 cluster at a time.
2996 	 * This thwarts run length encoding and will cause spill
2997 	 * over of the extents.
2998 	 */
2999 	for (i = 0; i < 6; i++) {
3000 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3001 		poll_threads();
3002 		CU_ASSERT(g_bserrno == 0);
3003 	}
3004 
3005 	for (i = 0; i < 2; i++) {
3006 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3007 		poll_threads();
3008 		CU_ASSERT(g_bserrno == 0);
3009 	}
3010 
3011 	/* Close the blobs */
3012 	for (i = 0; i < 2; i++) {
3013 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3014 		poll_threads();
3015 		CU_ASSERT(g_bserrno == 0);
3016 	}
3017 
3018 	ut_bs_reload(&bs, &opts);
3019 
3020 	for (i = 0; i < 2; i++) {
3021 		blob[i] = NULL;
3022 
3023 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3024 		poll_threads();
3025 		CU_ASSERT(g_bserrno == 0);
3026 		CU_ASSERT(g_blob != NULL);
3027 		blob[i] = g_blob;
3028 
3029 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3030 
3031 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3032 		poll_threads();
3033 		CU_ASSERT(g_bserrno == 0);
3034 	}
3035 
3036 	spdk_bs_unload(bs, bs_op_complete, NULL);
3037 	poll_threads();
3038 	CU_ASSERT(g_bserrno == 0);
3039 	g_bs = NULL;
3040 }
3041 
3042 static void
3043 blob_crc(void)
3044 {
3045 	struct spdk_blob_store *bs = g_bs;
3046 	struct spdk_blob *blob;
3047 	spdk_blob_id blobid;
3048 	uint32_t page_num;
3049 	int index;
3050 	struct spdk_blob_md_page *page;
3051 
3052 	blob = ut_blob_create_and_open(bs, NULL);
3053 	blobid = spdk_blob_get_id(blob);
3054 
3055 	spdk_blob_close(blob, blob_op_complete, NULL);
3056 	poll_threads();
3057 	CU_ASSERT(g_bserrno == 0);
3058 
3059 	page_num = bs_blobid_to_page(blobid);
3060 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3061 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3062 	page->crc = 0;
3063 
3064 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3065 	poll_threads();
3066 	CU_ASSERT(g_bserrno == -EINVAL);
3067 	CU_ASSERT(g_blob == NULL);
3068 	g_bserrno = 0;
3069 
3070 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3071 	poll_threads();
3072 	CU_ASSERT(g_bserrno == -EINVAL);
3073 }
3074 
3075 static void
3076 super_block_crc(void)
3077 {
3078 	struct spdk_blob_store *bs;
3079 	struct spdk_bs_dev *dev;
3080 	struct spdk_bs_super_block *super_block;
3081 
3082 	dev = init_dev();
3083 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3084 	poll_threads();
3085 	CU_ASSERT(g_bserrno == 0);
3086 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3087 	bs = g_bs;
3088 
3089 	spdk_bs_unload(bs, bs_op_complete, NULL);
3090 	poll_threads();
3091 	CU_ASSERT(g_bserrno == 0);
3092 	g_bs = NULL;
3093 
3094 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3095 	super_block->crc = 0;
3096 	dev = init_dev();
3097 
3098 	/* Load an existing blob store */
3099 	g_bserrno = 0;
3100 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3101 	poll_threads();
3102 	CU_ASSERT(g_bserrno == -EILSEQ);
3103 }
3104 
3105 /* For blob dirty shutdown test case we do the following sub-test cases:
3106  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3107  *   dirty shutdown and reload the blob store and verify the xattrs.
3108  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3109  *   reload the blob store and verify the clusters number.
3110  * 3 Create the second blob and then dirty shutdown, reload the blob store
3111  *   and verify the second blob.
3112  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3113  *   and verify the second blob is invalid.
3114  * 5 Create the second blob again and also create the third blob, modify the
3115  *   md of second blob which makes the md invalid, and then dirty shutdown,
3116  *   reload the blob store verify the second blob, it should invalid and also
3117  *   verify the third blob, it should correct.
3118  */
3119 static void
3120 blob_dirty_shutdown(void)
3121 {
3122 	int rc;
3123 	int index;
3124 	struct spdk_blob_store *bs = g_bs;
3125 	spdk_blob_id blobid1, blobid2, blobid3;
3126 	struct spdk_blob *blob = g_blob;
3127 	uint64_t length;
3128 	uint64_t free_clusters;
3129 	const void *value;
3130 	size_t value_len;
3131 	uint32_t page_num;
3132 	struct spdk_blob_md_page *page;
3133 	struct spdk_blob_opts blob_opts;
3134 
3135 	/* Create first blob */
3136 	blobid1 = spdk_blob_get_id(blob);
3137 
3138 	/* Set some xattrs */
3139 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3140 	CU_ASSERT(rc == 0);
3141 
3142 	length = 2345;
3143 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3144 	CU_ASSERT(rc == 0);
3145 
3146 	/* Put xattr that fits exactly single page.
3147 	 * This results in adding additional pages to MD.
3148 	 * First is flags and smaller xattr, second the large xattr,
3149 	 * third are just the extents.
3150 	 */
3151 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3152 			      strlen("large_xattr");
3153 	char *xattr = calloc(xattr_length, sizeof(char));
3154 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3155 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3156 	free(xattr);
3157 	SPDK_CU_ASSERT_FATAL(rc == 0);
3158 
3159 	/* Resize the blob */
3160 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3161 	poll_threads();
3162 	CU_ASSERT(g_bserrno == 0);
3163 
3164 	/* Set the blob as the super blob */
3165 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3166 	poll_threads();
3167 	CU_ASSERT(g_bserrno == 0);
3168 
3169 	free_clusters = spdk_bs_free_cluster_count(bs);
3170 
3171 	spdk_blob_close(blob, blob_op_complete, NULL);
3172 	poll_threads();
3173 	CU_ASSERT(g_bserrno == 0);
3174 	blob = NULL;
3175 	g_blob = NULL;
3176 	g_blobid = SPDK_BLOBID_INVALID;
3177 
3178 	ut_bs_dirty_load(&bs, NULL);
3179 
3180 	/* Get the super blob */
3181 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3182 	poll_threads();
3183 	CU_ASSERT(g_bserrno == 0);
3184 	CU_ASSERT(blobid1 == g_blobid);
3185 
3186 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3187 	poll_threads();
3188 	CU_ASSERT(g_bserrno == 0);
3189 	CU_ASSERT(g_blob != NULL);
3190 	blob = g_blob;
3191 
3192 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3193 
3194 	/* Get the xattrs */
3195 	value = NULL;
3196 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3197 	CU_ASSERT(rc == 0);
3198 	SPDK_CU_ASSERT_FATAL(value != NULL);
3199 	CU_ASSERT(*(uint64_t *)value == length);
3200 	CU_ASSERT(value_len == 8);
3201 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3202 
3203 	/* Resize the blob */
3204 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3205 	poll_threads();
3206 	CU_ASSERT(g_bserrno == 0);
3207 
3208 	free_clusters = spdk_bs_free_cluster_count(bs);
3209 
3210 	spdk_blob_close(blob, blob_op_complete, NULL);
3211 	poll_threads();
3212 	CU_ASSERT(g_bserrno == 0);
3213 	blob = NULL;
3214 	g_blob = NULL;
3215 	g_blobid = SPDK_BLOBID_INVALID;
3216 
3217 	ut_bs_dirty_load(&bs, NULL);
3218 
3219 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3220 	poll_threads();
3221 	CU_ASSERT(g_bserrno == 0);
3222 	CU_ASSERT(g_blob != NULL);
3223 	blob = g_blob;
3224 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3225 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3226 
3227 	spdk_blob_close(blob, blob_op_complete, NULL);
3228 	poll_threads();
3229 	CU_ASSERT(g_bserrno == 0);
3230 	blob = NULL;
3231 	g_blob = NULL;
3232 	g_blobid = SPDK_BLOBID_INVALID;
3233 
3234 	/* Create second blob */
3235 	blob = ut_blob_create_and_open(bs, NULL);
3236 	blobid2 = spdk_blob_get_id(blob);
3237 
3238 	/* Set some xattrs */
3239 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3240 	CU_ASSERT(rc == 0);
3241 
3242 	length = 5432;
3243 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3244 	CU_ASSERT(rc == 0);
3245 
3246 	/* Resize the blob */
3247 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3248 	poll_threads();
3249 	CU_ASSERT(g_bserrno == 0);
3250 
3251 	free_clusters = spdk_bs_free_cluster_count(bs);
3252 
3253 	spdk_blob_close(blob, blob_op_complete, NULL);
3254 	poll_threads();
3255 	CU_ASSERT(g_bserrno == 0);
3256 	blob = NULL;
3257 	g_blob = NULL;
3258 	g_blobid = SPDK_BLOBID_INVALID;
3259 
3260 	ut_bs_dirty_load(&bs, NULL);
3261 
3262 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3263 	poll_threads();
3264 	CU_ASSERT(g_bserrno == 0);
3265 	CU_ASSERT(g_blob != NULL);
3266 	blob = g_blob;
3267 
3268 	/* Get the xattrs */
3269 	value = NULL;
3270 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3271 	CU_ASSERT(rc == 0);
3272 	SPDK_CU_ASSERT_FATAL(value != NULL);
3273 	CU_ASSERT(*(uint64_t *)value == length);
3274 	CU_ASSERT(value_len == 8);
3275 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3276 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3277 
3278 	ut_blob_close_and_delete(bs, blob);
3279 
3280 	free_clusters = spdk_bs_free_cluster_count(bs);
3281 
3282 	ut_bs_dirty_load(&bs, NULL);
3283 
3284 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3285 	poll_threads();
3286 	CU_ASSERT(g_bserrno != 0);
3287 	CU_ASSERT(g_blob == NULL);
3288 
3289 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3290 	poll_threads();
3291 	CU_ASSERT(g_bserrno == 0);
3292 	CU_ASSERT(g_blob != NULL);
3293 	blob = g_blob;
3294 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3295 	spdk_blob_close(blob, blob_op_complete, NULL);
3296 	poll_threads();
3297 	CU_ASSERT(g_bserrno == 0);
3298 
3299 	ut_bs_reload(&bs, NULL);
3300 
3301 	/* Create second blob */
3302 	ut_spdk_blob_opts_init(&blob_opts);
3303 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3304 	poll_threads();
3305 	CU_ASSERT(g_bserrno == 0);
3306 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3307 	blobid2 = g_blobid;
3308 
3309 	/* Create third blob */
3310 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3311 	poll_threads();
3312 	CU_ASSERT(g_bserrno == 0);
3313 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3314 	blobid3 = g_blobid;
3315 
3316 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3317 	poll_threads();
3318 	CU_ASSERT(g_bserrno == 0);
3319 	CU_ASSERT(g_blob != NULL);
3320 	blob = g_blob;
3321 
3322 	/* Set some xattrs for second blob */
3323 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3324 	CU_ASSERT(rc == 0);
3325 
3326 	length = 5432;
3327 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3328 	CU_ASSERT(rc == 0);
3329 
3330 	spdk_blob_close(blob, blob_op_complete, NULL);
3331 	poll_threads();
3332 	CU_ASSERT(g_bserrno == 0);
3333 	blob = NULL;
3334 	g_blob = NULL;
3335 	g_blobid = SPDK_BLOBID_INVALID;
3336 
3337 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3338 	poll_threads();
3339 	CU_ASSERT(g_bserrno == 0);
3340 	CU_ASSERT(g_blob != NULL);
3341 	blob = g_blob;
3342 
3343 	/* Set some xattrs for third blob */
3344 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3345 	CU_ASSERT(rc == 0);
3346 
3347 	length = 5432;
3348 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3349 	CU_ASSERT(rc == 0);
3350 
3351 	spdk_blob_close(blob, blob_op_complete, NULL);
3352 	poll_threads();
3353 	CU_ASSERT(g_bserrno == 0);
3354 	blob = NULL;
3355 	g_blob = NULL;
3356 	g_blobid = SPDK_BLOBID_INVALID;
3357 
3358 	/* Mark second blob as invalid */
3359 	page_num = bs_blobid_to_page(blobid2);
3360 
3361 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3362 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3363 	page->sequence_num = 1;
3364 	page->crc = blob_md_page_calc_crc(page);
3365 
3366 	free_clusters = spdk_bs_free_cluster_count(bs);
3367 
3368 	ut_bs_dirty_load(&bs, NULL);
3369 
3370 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3371 	poll_threads();
3372 	CU_ASSERT(g_bserrno != 0);
3373 	CU_ASSERT(g_blob == NULL);
3374 
3375 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3376 	poll_threads();
3377 	CU_ASSERT(g_bserrno == 0);
3378 	CU_ASSERT(g_blob != NULL);
3379 	blob = g_blob;
3380 
3381 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3382 }
3383 
3384 static void
3385 blob_flags(void)
3386 {
3387 	struct spdk_blob_store *bs = g_bs;
3388 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3389 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3390 	struct spdk_blob_opts blob_opts;
3391 	int rc;
3392 
3393 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3394 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3395 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3396 
3397 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3398 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3399 
3400 	ut_spdk_blob_opts_init(&blob_opts);
3401 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3402 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3403 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3404 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3405 
3406 	/* Change the size of blob_data_ro to check if flags are serialized
3407 	 * when blob has non zero number of extents */
3408 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3409 	poll_threads();
3410 	CU_ASSERT(g_bserrno == 0);
3411 
3412 	/* Set the xattr to check if flags are serialized
3413 	 * when blob has non zero number of xattrs */
3414 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3415 	CU_ASSERT(rc == 0);
3416 
3417 	blob_invalid->invalid_flags = (1ULL << 63);
3418 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3419 	blob_data_ro->data_ro_flags = (1ULL << 62);
3420 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3421 	blob_md_ro->md_ro_flags = (1ULL << 61);
3422 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3423 
3424 	g_bserrno = -1;
3425 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3426 	poll_threads();
3427 	CU_ASSERT(g_bserrno == 0);
3428 	g_bserrno = -1;
3429 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3430 	poll_threads();
3431 	CU_ASSERT(g_bserrno == 0);
3432 	g_bserrno = -1;
3433 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3434 	poll_threads();
3435 	CU_ASSERT(g_bserrno == 0);
3436 
3437 	g_bserrno = -1;
3438 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3439 	poll_threads();
3440 	CU_ASSERT(g_bserrno == 0);
3441 	blob_invalid = NULL;
3442 	g_bserrno = -1;
3443 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3444 	poll_threads();
3445 	CU_ASSERT(g_bserrno == 0);
3446 	blob_data_ro = NULL;
3447 	g_bserrno = -1;
3448 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3449 	poll_threads();
3450 	CU_ASSERT(g_bserrno == 0);
3451 	blob_md_ro = NULL;
3452 
3453 	g_blob = NULL;
3454 	g_blobid = SPDK_BLOBID_INVALID;
3455 
3456 	ut_bs_reload(&bs, NULL);
3457 
3458 	g_blob = NULL;
3459 	g_bserrno = 0;
3460 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3461 	poll_threads();
3462 	CU_ASSERT(g_bserrno != 0);
3463 	CU_ASSERT(g_blob == NULL);
3464 
3465 	g_blob = NULL;
3466 	g_bserrno = -1;
3467 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3468 	poll_threads();
3469 	CU_ASSERT(g_bserrno == 0);
3470 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3471 	blob_data_ro = g_blob;
3472 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3473 	CU_ASSERT(blob_data_ro->data_ro == true);
3474 	CU_ASSERT(blob_data_ro->md_ro == true);
3475 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3476 
3477 	g_blob = NULL;
3478 	g_bserrno = -1;
3479 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3480 	poll_threads();
3481 	CU_ASSERT(g_bserrno == 0);
3482 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3483 	blob_md_ro = g_blob;
3484 	CU_ASSERT(blob_md_ro->data_ro == false);
3485 	CU_ASSERT(blob_md_ro->md_ro == true);
3486 
3487 	g_bserrno = -1;
3488 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3489 	poll_threads();
3490 	CU_ASSERT(g_bserrno == 0);
3491 
3492 	ut_blob_close_and_delete(bs, blob_data_ro);
3493 	ut_blob_close_and_delete(bs, blob_md_ro);
3494 }
3495 
3496 static void
3497 bs_version(void)
3498 {
3499 	struct spdk_bs_super_block *super;
3500 	struct spdk_blob_store *bs = g_bs;
3501 	struct spdk_bs_dev *dev;
3502 	struct spdk_blob *blob;
3503 	struct spdk_blob_opts blob_opts;
3504 	spdk_blob_id blobid;
3505 
3506 	/* Unload the blob store */
3507 	spdk_bs_unload(bs, bs_op_complete, NULL);
3508 	poll_threads();
3509 	CU_ASSERT(g_bserrno == 0);
3510 	g_bs = NULL;
3511 
3512 	/*
3513 	 * Change the bs version on disk.  This will allow us to
3514 	 *  test that the version does not get modified automatically
3515 	 *  when loading and unloading the blobstore.
3516 	 */
3517 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3518 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3519 	CU_ASSERT(super->clean == 1);
3520 	super->version = 2;
3521 	/*
3522 	 * Version 2 metadata does not have a used blobid mask, so clear
3523 	 *  those fields in the super block and zero the corresponding
3524 	 *  region on "disk".  We will use this to ensure blob IDs are
3525 	 *  correctly reconstructed.
3526 	 */
3527 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3528 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3529 	super->used_blobid_mask_start = 0;
3530 	super->used_blobid_mask_len = 0;
3531 	super->crc = blob_md_page_calc_crc(super);
3532 
3533 	/* Load an existing blob store */
3534 	dev = init_dev();
3535 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3536 	poll_threads();
3537 	CU_ASSERT(g_bserrno == 0);
3538 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3539 	CU_ASSERT(super->clean == 1);
3540 	bs = g_bs;
3541 
3542 	/*
3543 	 * Create a blob - just to make sure that when we unload it
3544 	 *  results in writing the super block (since metadata pages
3545 	 *  were allocated.
3546 	 */
3547 	ut_spdk_blob_opts_init(&blob_opts);
3548 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3549 	poll_threads();
3550 	CU_ASSERT(g_bserrno == 0);
3551 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3552 	blobid = g_blobid;
3553 
3554 	/* Unload the blob store */
3555 	spdk_bs_unload(bs, bs_op_complete, NULL);
3556 	poll_threads();
3557 	CU_ASSERT(g_bserrno == 0);
3558 	g_bs = NULL;
3559 	CU_ASSERT(super->version == 2);
3560 	CU_ASSERT(super->used_blobid_mask_start == 0);
3561 	CU_ASSERT(super->used_blobid_mask_len == 0);
3562 
3563 	dev = init_dev();
3564 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3565 	poll_threads();
3566 	CU_ASSERT(g_bserrno == 0);
3567 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3568 	bs = g_bs;
3569 
3570 	g_blob = NULL;
3571 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3572 	poll_threads();
3573 	CU_ASSERT(g_bserrno == 0);
3574 	CU_ASSERT(g_blob != NULL);
3575 	blob = g_blob;
3576 
3577 	ut_blob_close_and_delete(bs, blob);
3578 
3579 	CU_ASSERT(super->version == 2);
3580 	CU_ASSERT(super->used_blobid_mask_start == 0);
3581 	CU_ASSERT(super->used_blobid_mask_len == 0);
3582 }
3583 
3584 static void
3585 blob_set_xattrs_test(void)
3586 {
3587 	struct spdk_blob_store *bs = g_bs;
3588 	struct spdk_blob *blob;
3589 	struct spdk_blob_opts opts;
3590 	const void *value;
3591 	size_t value_len;
3592 	char *xattr;
3593 	size_t xattr_length;
3594 	int rc;
3595 
3596 	/* Create blob with extra attributes */
3597 	ut_spdk_blob_opts_init(&opts);
3598 
3599 	opts.xattrs.names = g_xattr_names;
3600 	opts.xattrs.get_value = _get_xattr_value;
3601 	opts.xattrs.count = 3;
3602 	opts.xattrs.ctx = &g_ctx;
3603 
3604 	blob = ut_blob_create_and_open(bs, &opts);
3605 
3606 	/* Get the xattrs */
3607 	value = NULL;
3608 
3609 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3610 	CU_ASSERT(rc == 0);
3611 	SPDK_CU_ASSERT_FATAL(value != NULL);
3612 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3613 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3614 
3615 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3616 	CU_ASSERT(rc == 0);
3617 	SPDK_CU_ASSERT_FATAL(value != NULL);
3618 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3619 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3620 
3621 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3622 	CU_ASSERT(rc == 0);
3623 	SPDK_CU_ASSERT_FATAL(value != NULL);
3624 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3625 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3626 
3627 	/* Try to get non existing attribute */
3628 
3629 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3630 	CU_ASSERT(rc == -ENOENT);
3631 
3632 	/* Try xattr exceeding maximum length of descriptor in single page */
3633 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3634 		       strlen("large_xattr") + 1;
3635 	xattr = calloc(xattr_length, sizeof(char));
3636 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3637 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3638 	free(xattr);
3639 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3640 
3641 	spdk_blob_close(blob, blob_op_complete, NULL);
3642 	poll_threads();
3643 	CU_ASSERT(g_bserrno == 0);
3644 	blob = NULL;
3645 	g_blob = NULL;
3646 	g_blobid = SPDK_BLOBID_INVALID;
3647 
3648 	/* NULL callback */
3649 	ut_spdk_blob_opts_init(&opts);
3650 	opts.xattrs.names = g_xattr_names;
3651 	opts.xattrs.get_value = NULL;
3652 	opts.xattrs.count = 1;
3653 	opts.xattrs.ctx = &g_ctx;
3654 
3655 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3656 	poll_threads();
3657 	CU_ASSERT(g_bserrno == -EINVAL);
3658 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3659 
3660 	/* NULL values */
3661 	ut_spdk_blob_opts_init(&opts);
3662 	opts.xattrs.names = g_xattr_names;
3663 	opts.xattrs.get_value = _get_xattr_value_null;
3664 	opts.xattrs.count = 1;
3665 	opts.xattrs.ctx = NULL;
3666 
3667 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3668 	poll_threads();
3669 	CU_ASSERT(g_bserrno == -EINVAL);
3670 }
3671 
3672 static void
3673 blob_thin_prov_alloc(void)
3674 {
3675 	struct spdk_blob_store *bs = g_bs;
3676 	struct spdk_blob *blob;
3677 	struct spdk_blob_opts opts;
3678 	spdk_blob_id blobid;
3679 	uint64_t free_clusters;
3680 
3681 	free_clusters = spdk_bs_free_cluster_count(bs);
3682 
3683 	/* Set blob as thin provisioned */
3684 	ut_spdk_blob_opts_init(&opts);
3685 	opts.thin_provision = true;
3686 
3687 	blob = ut_blob_create_and_open(bs, &opts);
3688 	blobid = spdk_blob_get_id(blob);
3689 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3690 
3691 	CU_ASSERT(blob->active.num_clusters == 0);
3692 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3693 
3694 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3695 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3696 	poll_threads();
3697 	CU_ASSERT(g_bserrno == 0);
3698 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3699 	CU_ASSERT(blob->active.num_clusters == 5);
3700 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3701 
3702 	/* Grow it to 1TB - still unallocated */
3703 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3704 	poll_threads();
3705 	CU_ASSERT(g_bserrno == 0);
3706 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3707 	CU_ASSERT(blob->active.num_clusters == 262144);
3708 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3709 
3710 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3711 	poll_threads();
3712 	CU_ASSERT(g_bserrno == 0);
3713 	/* Sync must not change anything */
3714 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3715 	CU_ASSERT(blob->active.num_clusters == 262144);
3716 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3717 	/* Since clusters are not allocated,
3718 	 * number of metadata pages is expected to be minimal.
3719 	 */
3720 	CU_ASSERT(blob->active.num_pages == 1);
3721 
3722 	/* Shrink the blob to 3 clusters - still unallocated */
3723 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3724 	poll_threads();
3725 	CU_ASSERT(g_bserrno == 0);
3726 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3727 	CU_ASSERT(blob->active.num_clusters == 3);
3728 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3729 
3730 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3731 	poll_threads();
3732 	CU_ASSERT(g_bserrno == 0);
3733 	/* Sync must not change anything */
3734 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3735 	CU_ASSERT(blob->active.num_clusters == 3);
3736 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3737 
3738 	spdk_blob_close(blob, blob_op_complete, NULL);
3739 	poll_threads();
3740 	CU_ASSERT(g_bserrno == 0);
3741 
3742 	ut_bs_reload(&bs, NULL);
3743 
3744 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3745 	poll_threads();
3746 	CU_ASSERT(g_bserrno == 0);
3747 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3748 	blob = g_blob;
3749 
3750 	/* Check that clusters allocation and size is still the same */
3751 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3752 	CU_ASSERT(blob->active.num_clusters == 3);
3753 
3754 	ut_blob_close_and_delete(bs, blob);
3755 }
3756 
3757 static void
3758 blob_insert_cluster_msg_test(void)
3759 {
3760 	struct spdk_blob_store *bs = g_bs;
3761 	struct spdk_blob *blob;
3762 	struct spdk_blob_opts opts;
3763 	spdk_blob_id blobid;
3764 	uint64_t free_clusters;
3765 	uint64_t new_cluster = 0;
3766 	uint32_t cluster_num = 3;
3767 	uint32_t extent_page = 0;
3768 
3769 	free_clusters = spdk_bs_free_cluster_count(bs);
3770 
3771 	/* Set blob as thin provisioned */
3772 	ut_spdk_blob_opts_init(&opts);
3773 	opts.thin_provision = true;
3774 	opts.num_clusters = 4;
3775 
3776 	blob = ut_blob_create_and_open(bs, &opts);
3777 	blobid = spdk_blob_get_id(blob);
3778 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3779 
3780 	CU_ASSERT(blob->active.num_clusters == 4);
3781 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
3782 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3783 
3784 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
3785 	 * This is to simulate behaviour when cluster is allocated after blob creation.
3786 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
3787 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
3788 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
3789 
3790 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page,
3791 					 blob_op_complete, NULL);
3792 	poll_threads();
3793 
3794 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3795 
3796 	spdk_blob_close(blob, blob_op_complete, NULL);
3797 	poll_threads();
3798 	CU_ASSERT(g_bserrno == 0);
3799 
3800 	ut_bs_reload(&bs, NULL);
3801 
3802 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3803 	poll_threads();
3804 	CU_ASSERT(g_bserrno == 0);
3805 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3806 	blob = g_blob;
3807 
3808 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
3809 
3810 	ut_blob_close_and_delete(bs, blob);
3811 }
3812 
3813 static void
3814 blob_thin_prov_rw(void)
3815 {
3816 	static const uint8_t zero[10 * 4096] = { 0 };
3817 	struct spdk_blob_store *bs = g_bs;
3818 	struct spdk_blob *blob, *blob_id0;
3819 	struct spdk_io_channel *channel, *channel_thread1;
3820 	struct spdk_blob_opts opts;
3821 	uint64_t free_clusters;
3822 	uint64_t page_size;
3823 	uint8_t payload_read[10 * 4096];
3824 	uint8_t payload_write[10 * 4096];
3825 	uint64_t write_bytes;
3826 	uint64_t read_bytes;
3827 
3828 	free_clusters = spdk_bs_free_cluster_count(bs);
3829 	page_size = spdk_bs_get_page_size(bs);
3830 
3831 	channel = spdk_bs_alloc_io_channel(bs);
3832 	CU_ASSERT(channel != NULL);
3833 
3834 	ut_spdk_blob_opts_init(&opts);
3835 	opts.thin_provision = true;
3836 
3837 	/* Create and delete blob at md page 0, so that next md page allocation
3838 	 * for extent will use that. */
3839 	blob_id0 = ut_blob_create_and_open(bs, &opts);
3840 	blob = ut_blob_create_and_open(bs, &opts);
3841 	ut_blob_close_and_delete(bs, blob_id0);
3842 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3843 
3844 	CU_ASSERT(blob->active.num_clusters == 0);
3845 
3846 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3847 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3848 	poll_threads();
3849 	CU_ASSERT(g_bserrno == 0);
3850 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3851 	CU_ASSERT(blob->active.num_clusters == 5);
3852 
3853 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3854 	poll_threads();
3855 	CU_ASSERT(g_bserrno == 0);
3856 	/* Sync must not change anything */
3857 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3858 	CU_ASSERT(blob->active.num_clusters == 5);
3859 
3860 	/* Payload should be all zeros from unallocated clusters */
3861 	memset(payload_read, 0xFF, sizeof(payload_read));
3862 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3863 	poll_threads();
3864 	CU_ASSERT(g_bserrno == 0);
3865 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
3866 
3867 	write_bytes = g_dev_write_bytes;
3868 	read_bytes = g_dev_read_bytes;
3869 
3870 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
3871 	set_thread(1);
3872 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
3873 	CU_ASSERT(channel_thread1 != NULL);
3874 	memset(payload_write, 0xE5, sizeof(payload_write));
3875 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
3876 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3877 	/* Perform write on thread 0. That will try to allocate cluster,
3878 	 * but fail due to another thread issuing the cluster allocation first. */
3879 	set_thread(0);
3880 	memset(payload_write, 0xE5, sizeof(payload_write));
3881 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
3882 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
3883 	poll_threads();
3884 	CU_ASSERT(g_bserrno == 0);
3885 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
3886 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
3887 	 * read 0 bytes */
3888 	if (g_use_extent_table) {
3889 		/* Add one more page for EXTENT_PAGE write */
3890 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
3891 	} else {
3892 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
3893 	}
3894 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
3895 
3896 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
3897 	poll_threads();
3898 	CU_ASSERT(g_bserrno == 0);
3899 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
3900 
3901 	ut_blob_close_and_delete(bs, blob);
3902 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3903 
3904 	set_thread(1);
3905 	spdk_bs_free_io_channel(channel_thread1);
3906 	set_thread(0);
3907 	spdk_bs_free_io_channel(channel);
3908 	poll_threads();
3909 	g_blob = NULL;
3910 	g_blobid = 0;
3911 }
3912 
3913 static void
3914 blob_thin_prov_write_count_io(void)
3915 {
3916 	struct spdk_blob_store *bs;
3917 	struct spdk_blob *blob;
3918 	struct spdk_io_channel *ch;
3919 	struct spdk_bs_dev *dev;
3920 	struct spdk_bs_opts bs_opts;
3921 	struct spdk_blob_opts opts;
3922 	uint64_t free_clusters;
3923 	uint64_t page_size;
3924 	uint8_t payload_write[4096];
3925 	uint64_t write_bytes;
3926 	uint64_t read_bytes;
3927 	const uint32_t CLUSTER_SZ = 16384;
3928 	uint32_t pages_per_cluster;
3929 	uint32_t pages_per_extent_page;
3930 	uint32_t i;
3931 
3932 	/* Use a very small cluster size for this test.  This ensures we need multiple
3933 	 * extent pages to hold all of the clusters even for relatively small blobs like
3934 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
3935 	 * buffers).
3936 	 */
3937 	dev = init_dev();
3938 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
3939 	bs_opts.cluster_sz = CLUSTER_SZ;
3940 
3941 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
3942 	poll_threads();
3943 	CU_ASSERT(g_bserrno == 0);
3944 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3945 	bs = g_bs;
3946 
3947 	free_clusters = spdk_bs_free_cluster_count(bs);
3948 	page_size = spdk_bs_get_page_size(bs);
3949 	pages_per_cluster = CLUSTER_SZ / page_size;
3950 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
3951 
3952 	ch = spdk_bs_alloc_io_channel(bs);
3953 	SPDK_CU_ASSERT_FATAL(ch != NULL);
3954 
3955 	ut_spdk_blob_opts_init(&opts);
3956 	opts.thin_provision = true;
3957 
3958 	blob = ut_blob_create_and_open(bs, &opts);
3959 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3960 
3961 	/* Resize the blob so that it will require 8 extent pages to hold all of
3962 	 * the clusters.
3963 	 */
3964 	g_bserrno = -1;
3965 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
3966 	poll_threads();
3967 	CU_ASSERT(g_bserrno == 0);
3968 
3969 	g_bserrno = -1;
3970 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3971 	poll_threads();
3972 	CU_ASSERT(g_bserrno == 0);
3973 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3974 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
3975 
3976 	memset(payload_write, 0, sizeof(payload_write));
3977 	for (i = 0; i < 8; i++) {
3978 		write_bytes = g_dev_write_bytes;
3979 		read_bytes = g_dev_read_bytes;
3980 
3981 		g_bserrno = -1;
3982 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
3983 		poll_threads();
3984 		CU_ASSERT(g_bserrno == 0);
3985 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
3986 
3987 		CU_ASSERT(g_dev_read_bytes == read_bytes);
3988 		if (!g_use_extent_table) {
3989 			/* For legacy metadata, we should have written two pages - one for the
3990 			 * write I/O itself, another for the blob's primary metadata.
3991 			 */
3992 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
3993 		} else {
3994 			/* For extent table metadata, we should have written three pages - one
3995 			 * for the write I/O, one for the extent page, one for the blob's primary
3996 			 * metadata.
3997 			 */
3998 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
3999 		}
4000 
4001 		/* The write should have synced the metadata already.  Do another sync here
4002 		 * just to confirm.
4003 		 */
4004 		write_bytes = g_dev_write_bytes;
4005 		read_bytes = g_dev_read_bytes;
4006 
4007 		g_bserrno = -1;
4008 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4009 		poll_threads();
4010 		CU_ASSERT(g_bserrno == 0);
4011 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4012 
4013 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4014 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4015 
4016 		/* Now write to another unallocated cluster that is part of the same extent page. */
4017 		g_bserrno = -1;
4018 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4019 				   1, blob_op_complete, NULL);
4020 		poll_threads();
4021 		CU_ASSERT(g_bserrno == 0);
4022 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4023 
4024 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4025 		/*
4026 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4027 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4028 		 */
4029 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4030 	}
4031 
4032 	ut_blob_close_and_delete(bs, blob);
4033 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4034 
4035 	spdk_bs_free_io_channel(ch);
4036 	poll_threads();
4037 	g_blob = NULL;
4038 	g_blobid = 0;
4039 
4040 	spdk_bs_unload(bs, bs_op_complete, NULL);
4041 	poll_threads();
4042 	CU_ASSERT(g_bserrno == 0);
4043 	g_bs = NULL;
4044 }
4045 
4046 static void
4047 blob_thin_prov_rle(void)
4048 {
4049 	static const uint8_t zero[10 * 4096] = { 0 };
4050 	struct spdk_blob_store *bs = g_bs;
4051 	struct spdk_blob *blob;
4052 	struct spdk_io_channel *channel;
4053 	struct spdk_blob_opts opts;
4054 	spdk_blob_id blobid;
4055 	uint64_t free_clusters;
4056 	uint64_t page_size;
4057 	uint8_t payload_read[10 * 4096];
4058 	uint8_t payload_write[10 * 4096];
4059 	uint64_t write_bytes;
4060 	uint64_t read_bytes;
4061 	uint64_t io_unit;
4062 
4063 	free_clusters = spdk_bs_free_cluster_count(bs);
4064 	page_size = spdk_bs_get_page_size(bs);
4065 
4066 	ut_spdk_blob_opts_init(&opts);
4067 	opts.thin_provision = true;
4068 	opts.num_clusters = 5;
4069 
4070 	blob = ut_blob_create_and_open(bs, &opts);
4071 	blobid = spdk_blob_get_id(blob);
4072 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4073 
4074 	channel = spdk_bs_alloc_io_channel(bs);
4075 	CU_ASSERT(channel != NULL);
4076 
4077 	/* Target specifically second cluster in a blob as first allocation */
4078 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4079 
4080 	/* Payload should be all zeros from unallocated clusters */
4081 	memset(payload_read, 0xFF, sizeof(payload_read));
4082 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4083 	poll_threads();
4084 	CU_ASSERT(g_bserrno == 0);
4085 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4086 
4087 	write_bytes = g_dev_write_bytes;
4088 	read_bytes = g_dev_read_bytes;
4089 
4090 	/* Issue write to second cluster in a blob */
4091 	memset(payload_write, 0xE5, sizeof(payload_write));
4092 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4093 	poll_threads();
4094 	CU_ASSERT(g_bserrno == 0);
4095 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4096 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4097 	 * read 0 bytes */
4098 	if (g_use_extent_table) {
4099 		/* Add one more page for EXTENT_PAGE write */
4100 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4101 	} else {
4102 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4103 	}
4104 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4105 
4106 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4107 	poll_threads();
4108 	CU_ASSERT(g_bserrno == 0);
4109 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4110 
4111 	spdk_bs_free_io_channel(channel);
4112 	poll_threads();
4113 
4114 	spdk_blob_close(blob, blob_op_complete, NULL);
4115 	poll_threads();
4116 	CU_ASSERT(g_bserrno == 0);
4117 
4118 	ut_bs_reload(&bs, NULL);
4119 
4120 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4121 	poll_threads();
4122 	CU_ASSERT(g_bserrno == 0);
4123 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4124 	blob = g_blob;
4125 
4126 	channel = spdk_bs_alloc_io_channel(bs);
4127 	CU_ASSERT(channel != NULL);
4128 
4129 	/* Read second cluster after blob reload to confirm data written */
4130 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4131 	poll_threads();
4132 	CU_ASSERT(g_bserrno == 0);
4133 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4134 
4135 	spdk_bs_free_io_channel(channel);
4136 	poll_threads();
4137 
4138 	ut_blob_close_and_delete(bs, blob);
4139 }
4140 
4141 static void
4142 blob_thin_prov_rw_iov(void)
4143 {
4144 	static const uint8_t zero[10 * 4096] = { 0 };
4145 	struct spdk_blob_store *bs = g_bs;
4146 	struct spdk_blob *blob;
4147 	struct spdk_io_channel *channel;
4148 	struct spdk_blob_opts opts;
4149 	uint64_t free_clusters;
4150 	uint8_t payload_read[10 * 4096];
4151 	uint8_t payload_write[10 * 4096];
4152 	struct iovec iov_read[3];
4153 	struct iovec iov_write[3];
4154 
4155 	free_clusters = spdk_bs_free_cluster_count(bs);
4156 
4157 	channel = spdk_bs_alloc_io_channel(bs);
4158 	CU_ASSERT(channel != NULL);
4159 
4160 	ut_spdk_blob_opts_init(&opts);
4161 	opts.thin_provision = true;
4162 
4163 	blob = ut_blob_create_and_open(bs, &opts);
4164 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4165 
4166 	CU_ASSERT(blob->active.num_clusters == 0);
4167 
4168 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4169 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4170 	poll_threads();
4171 	CU_ASSERT(g_bserrno == 0);
4172 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4173 	CU_ASSERT(blob->active.num_clusters == 5);
4174 
4175 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4176 	poll_threads();
4177 	CU_ASSERT(g_bserrno == 0);
4178 	/* Sync must not change anything */
4179 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4180 	CU_ASSERT(blob->active.num_clusters == 5);
4181 
4182 	/* Payload should be all zeros from unallocated clusters */
4183 	memset(payload_read, 0xAA, sizeof(payload_read));
4184 	iov_read[0].iov_base = payload_read;
4185 	iov_read[0].iov_len = 3 * 4096;
4186 	iov_read[1].iov_base = payload_read + 3 * 4096;
4187 	iov_read[1].iov_len = 4 * 4096;
4188 	iov_read[2].iov_base = payload_read + 7 * 4096;
4189 	iov_read[2].iov_len = 3 * 4096;
4190 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4191 	poll_threads();
4192 	CU_ASSERT(g_bserrno == 0);
4193 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4194 
4195 	memset(payload_write, 0xE5, sizeof(payload_write));
4196 	iov_write[0].iov_base = payload_write;
4197 	iov_write[0].iov_len = 1 * 4096;
4198 	iov_write[1].iov_base = payload_write + 1 * 4096;
4199 	iov_write[1].iov_len = 5 * 4096;
4200 	iov_write[2].iov_base = payload_write + 6 * 4096;
4201 	iov_write[2].iov_len = 4 * 4096;
4202 
4203 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4204 	poll_threads();
4205 	CU_ASSERT(g_bserrno == 0);
4206 
4207 	memset(payload_read, 0xAA, sizeof(payload_read));
4208 	iov_read[0].iov_base = payload_read;
4209 	iov_read[0].iov_len = 3 * 4096;
4210 	iov_read[1].iov_base = payload_read + 3 * 4096;
4211 	iov_read[1].iov_len = 4 * 4096;
4212 	iov_read[2].iov_base = payload_read + 7 * 4096;
4213 	iov_read[2].iov_len = 3 * 4096;
4214 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4215 	poll_threads();
4216 	CU_ASSERT(g_bserrno == 0);
4217 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4218 
4219 	spdk_bs_free_io_channel(channel);
4220 	poll_threads();
4221 
4222 	ut_blob_close_and_delete(bs, blob);
4223 }
4224 
4225 struct iter_ctx {
4226 	int		current_iter;
4227 	spdk_blob_id	blobid[4];
4228 };
4229 
4230 static void
4231 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4232 {
4233 	struct iter_ctx *iter_ctx = arg;
4234 	spdk_blob_id blobid;
4235 
4236 	CU_ASSERT(bserrno == 0);
4237 	blobid = spdk_blob_get_id(blob);
4238 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4239 }
4240 
4241 static void
4242 bs_load_iter_test(void)
4243 {
4244 	struct spdk_blob_store *bs;
4245 	struct spdk_bs_dev *dev;
4246 	struct iter_ctx iter_ctx = { 0 };
4247 	struct spdk_blob *blob;
4248 	int i, rc;
4249 	struct spdk_bs_opts opts;
4250 
4251 	dev = init_dev();
4252 	spdk_bs_opts_init(&opts, sizeof(opts));
4253 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4254 
4255 	/* Initialize a new blob store */
4256 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4257 	poll_threads();
4258 	CU_ASSERT(g_bserrno == 0);
4259 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4260 	bs = g_bs;
4261 
4262 	for (i = 0; i < 4; i++) {
4263 		blob = ut_blob_create_and_open(bs, NULL);
4264 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4265 
4266 		/* Just save the blobid as an xattr for testing purposes. */
4267 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4268 		CU_ASSERT(rc == 0);
4269 
4270 		/* Resize the blob */
4271 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4272 		poll_threads();
4273 		CU_ASSERT(g_bserrno == 0);
4274 
4275 		spdk_blob_close(blob, blob_op_complete, NULL);
4276 		poll_threads();
4277 		CU_ASSERT(g_bserrno == 0);
4278 	}
4279 
4280 	g_bserrno = -1;
4281 	spdk_bs_unload(bs, bs_op_complete, NULL);
4282 	poll_threads();
4283 	CU_ASSERT(g_bserrno == 0);
4284 
4285 	dev = init_dev();
4286 	spdk_bs_opts_init(&opts, sizeof(opts));
4287 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4288 	opts.iter_cb_fn = test_iter;
4289 	opts.iter_cb_arg = &iter_ctx;
4290 
4291 	/* Test blob iteration during load after a clean shutdown. */
4292 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4293 	poll_threads();
4294 	CU_ASSERT(g_bserrno == 0);
4295 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4296 	bs = g_bs;
4297 
4298 	/* Dirty shutdown */
4299 	bs_free(bs);
4300 
4301 	dev = init_dev();
4302 	spdk_bs_opts_init(&opts, sizeof(opts));
4303 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4304 	opts.iter_cb_fn = test_iter;
4305 	iter_ctx.current_iter = 0;
4306 	opts.iter_cb_arg = &iter_ctx;
4307 
4308 	/* Test blob iteration during load after a dirty shutdown. */
4309 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4310 	poll_threads();
4311 	CU_ASSERT(g_bserrno == 0);
4312 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4313 	bs = g_bs;
4314 
4315 	spdk_bs_unload(bs, bs_op_complete, NULL);
4316 	poll_threads();
4317 	CU_ASSERT(g_bserrno == 0);
4318 	g_bs = NULL;
4319 }
4320 
4321 static void
4322 blob_snapshot_rw(void)
4323 {
4324 	static const uint8_t zero[10 * 4096] = { 0 };
4325 	struct spdk_blob_store *bs = g_bs;
4326 	struct spdk_blob *blob, *snapshot;
4327 	struct spdk_io_channel *channel;
4328 	struct spdk_blob_opts opts;
4329 	spdk_blob_id blobid, snapshotid;
4330 	uint64_t free_clusters;
4331 	uint64_t cluster_size;
4332 	uint64_t page_size;
4333 	uint8_t payload_read[10 * 4096];
4334 	uint8_t payload_write[10 * 4096];
4335 	uint64_t write_bytes;
4336 	uint64_t read_bytes;
4337 
4338 	free_clusters = spdk_bs_free_cluster_count(bs);
4339 	cluster_size = spdk_bs_get_cluster_size(bs);
4340 	page_size = spdk_bs_get_page_size(bs);
4341 
4342 	channel = spdk_bs_alloc_io_channel(bs);
4343 	CU_ASSERT(channel != NULL);
4344 
4345 	ut_spdk_blob_opts_init(&opts);
4346 	opts.thin_provision = true;
4347 	opts.num_clusters = 5;
4348 
4349 	blob = ut_blob_create_and_open(bs, &opts);
4350 	blobid = spdk_blob_get_id(blob);
4351 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4352 
4353 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4354 
4355 	memset(payload_read, 0xFF, sizeof(payload_read));
4356 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4357 	poll_threads();
4358 	CU_ASSERT(g_bserrno == 0);
4359 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4360 
4361 	memset(payload_write, 0xE5, sizeof(payload_write));
4362 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4363 	poll_threads();
4364 	CU_ASSERT(g_bserrno == 0);
4365 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4366 
4367 	/* Create snapshot from blob */
4368 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4369 	poll_threads();
4370 	CU_ASSERT(g_bserrno == 0);
4371 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4372 	snapshotid = g_blobid;
4373 
4374 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4375 	poll_threads();
4376 	CU_ASSERT(g_bserrno == 0);
4377 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4378 	snapshot = g_blob;
4379 	CU_ASSERT(snapshot->data_ro == true);
4380 	CU_ASSERT(snapshot->md_ro == true);
4381 
4382 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4383 
4384 	write_bytes = g_dev_write_bytes;
4385 	read_bytes = g_dev_read_bytes;
4386 
4387 	memset(payload_write, 0xAA, sizeof(payload_write));
4388 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4389 	poll_threads();
4390 	CU_ASSERT(g_bserrno == 0);
4391 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4392 
4393 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4394 	 * and then write 10 pages of payload.
4395 	 */
4396 	if (g_use_extent_table) {
4397 		/* Add one more page for EXTENT_PAGE write */
4398 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
4399 	} else {
4400 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
4401 	}
4402 	CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
4403 
4404 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4405 	poll_threads();
4406 	CU_ASSERT(g_bserrno == 0);
4407 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4408 
4409 	/* Data on snapshot should not change after write to clone */
4410 	memset(payload_write, 0xE5, sizeof(payload_write));
4411 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4412 	poll_threads();
4413 	CU_ASSERT(g_bserrno == 0);
4414 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4415 
4416 	ut_blob_close_and_delete(bs, blob);
4417 	ut_blob_close_and_delete(bs, snapshot);
4418 
4419 	spdk_bs_free_io_channel(channel);
4420 	poll_threads();
4421 	g_blob = NULL;
4422 	g_blobid = 0;
4423 }
4424 
4425 static void
4426 blob_snapshot_rw_iov(void)
4427 {
4428 	static const uint8_t zero[10 * 4096] = { 0 };
4429 	struct spdk_blob_store *bs = g_bs;
4430 	struct spdk_blob *blob, *snapshot;
4431 	struct spdk_io_channel *channel;
4432 	struct spdk_blob_opts opts;
4433 	spdk_blob_id blobid, snapshotid;
4434 	uint64_t free_clusters;
4435 	uint8_t payload_read[10 * 4096];
4436 	uint8_t payload_write[10 * 4096];
4437 	struct iovec iov_read[3];
4438 	struct iovec iov_write[3];
4439 
4440 	free_clusters = spdk_bs_free_cluster_count(bs);
4441 
4442 	channel = spdk_bs_alloc_io_channel(bs);
4443 	CU_ASSERT(channel != NULL);
4444 
4445 	ut_spdk_blob_opts_init(&opts);
4446 	opts.thin_provision = true;
4447 	opts.num_clusters = 5;
4448 
4449 	blob = ut_blob_create_and_open(bs, &opts);
4450 	blobid = spdk_blob_get_id(blob);
4451 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4452 
4453 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4454 
4455 	/* Create snapshot from blob */
4456 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4457 	poll_threads();
4458 	CU_ASSERT(g_bserrno == 0);
4459 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4460 	snapshotid = g_blobid;
4461 
4462 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4463 	poll_threads();
4464 	CU_ASSERT(g_bserrno == 0);
4465 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4466 	snapshot = g_blob;
4467 	CU_ASSERT(snapshot->data_ro == true);
4468 	CU_ASSERT(snapshot->md_ro == true);
4469 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4470 
4471 	/* Payload should be all zeros from unallocated clusters */
4472 	memset(payload_read, 0xAA, sizeof(payload_read));
4473 	iov_read[0].iov_base = payload_read;
4474 	iov_read[0].iov_len = 3 * 4096;
4475 	iov_read[1].iov_base = payload_read + 3 * 4096;
4476 	iov_read[1].iov_len = 4 * 4096;
4477 	iov_read[2].iov_base = payload_read + 7 * 4096;
4478 	iov_read[2].iov_len = 3 * 4096;
4479 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4480 	poll_threads();
4481 	CU_ASSERT(g_bserrno == 0);
4482 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4483 
4484 	memset(payload_write, 0xE5, sizeof(payload_write));
4485 	iov_write[0].iov_base = payload_write;
4486 	iov_write[0].iov_len = 1 * 4096;
4487 	iov_write[1].iov_base = payload_write + 1 * 4096;
4488 	iov_write[1].iov_len = 5 * 4096;
4489 	iov_write[2].iov_base = payload_write + 6 * 4096;
4490 	iov_write[2].iov_len = 4 * 4096;
4491 
4492 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4493 	poll_threads();
4494 	CU_ASSERT(g_bserrno == 0);
4495 
4496 	memset(payload_read, 0xAA, sizeof(payload_read));
4497 	iov_read[0].iov_base = payload_read;
4498 	iov_read[0].iov_len = 3 * 4096;
4499 	iov_read[1].iov_base = payload_read + 3 * 4096;
4500 	iov_read[1].iov_len = 4 * 4096;
4501 	iov_read[2].iov_base = payload_read + 7 * 4096;
4502 	iov_read[2].iov_len = 3 * 4096;
4503 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4504 	poll_threads();
4505 	CU_ASSERT(g_bserrno == 0);
4506 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4507 
4508 	spdk_bs_free_io_channel(channel);
4509 	poll_threads();
4510 
4511 	ut_blob_close_and_delete(bs, blob);
4512 	ut_blob_close_and_delete(bs, snapshot);
4513 }
4514 
4515 /**
4516  * Inflate / decouple parent rw unit tests.
4517  *
4518  * --------------
4519  * original blob:         0         1         2         3         4
4520  *                   ,---------+---------+---------+---------+---------.
4521  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4522  *                   +---------+---------+---------+---------+---------+
4523  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4524  *                   +---------+---------+---------+---------+---------+
4525  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4526  *                   '---------+---------+---------+---------+---------'
4527  *                   .         .         .         .         .         .
4528  * --------          .         .         .         .         .         .
4529  * inflate:          .         .         .         .         .         .
4530  *                   ,---------+---------+---------+---------+---------.
4531  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4532  *                   '---------+---------+---------+---------+---------'
4533  *
4534  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4535  *               on snapshot2 and snapshot removed .         .         .
4536  *                   .         .         .         .         .         .
4537  * ----------------  .         .         .         .         .         .
4538  * decouple parent:  .         .         .         .         .         .
4539  *                   ,---------+---------+---------+---------+---------.
4540  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4541  *                   +---------+---------+---------+---------+---------+
4542  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4543  *                   '---------+---------+---------+---------+---------'
4544  *
4545  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4546  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4547  *               should remain a clone of snapshot.
4548  */
4549 static void
4550 _blob_inflate_rw(bool decouple_parent)
4551 {
4552 	struct spdk_blob_store *bs = g_bs;
4553 	struct spdk_blob *blob, *snapshot, *snapshot2;
4554 	struct spdk_io_channel *channel;
4555 	struct spdk_blob_opts opts;
4556 	spdk_blob_id blobid, snapshotid, snapshot2id;
4557 	uint64_t free_clusters;
4558 	uint64_t cluster_size;
4559 
4560 	uint64_t payload_size;
4561 	uint8_t *payload_read;
4562 	uint8_t *payload_write;
4563 	uint8_t *payload_clone;
4564 
4565 	uint64_t pages_per_cluster;
4566 	uint64_t pages_per_payload;
4567 
4568 	int i;
4569 	spdk_blob_id ids[2];
4570 	size_t count;
4571 
4572 	free_clusters = spdk_bs_free_cluster_count(bs);
4573 	cluster_size = spdk_bs_get_cluster_size(bs);
4574 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4575 	pages_per_payload = pages_per_cluster * 5;
4576 
4577 	payload_size = cluster_size * 5;
4578 
4579 	payload_read = malloc(payload_size);
4580 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4581 
4582 	payload_write = malloc(payload_size);
4583 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4584 
4585 	payload_clone = malloc(payload_size);
4586 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4587 
4588 	channel = spdk_bs_alloc_io_channel(bs);
4589 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4590 
4591 	/* Create blob */
4592 	ut_spdk_blob_opts_init(&opts);
4593 	opts.thin_provision = true;
4594 	opts.num_clusters = 5;
4595 
4596 	blob = ut_blob_create_and_open(bs, &opts);
4597 	blobid = spdk_blob_get_id(blob);
4598 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4599 
4600 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4601 
4602 	/* 1) Initial read should return zeroed payload */
4603 	memset(payload_read, 0xFF, payload_size);
4604 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4605 			  blob_op_complete, NULL);
4606 	poll_threads();
4607 	CU_ASSERT(g_bserrno == 0);
4608 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4609 
4610 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4611 	 * isn't allocated) */
4612 	memset(payload_write, 0xE5, payload_size - cluster_size);
4613 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4614 			   pages_per_cluster, blob_op_complete, NULL);
4615 	poll_threads();
4616 	CU_ASSERT(g_bserrno == 0);
4617 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4618 
4619 	/* 2) Create snapshot from blob (first level) */
4620 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4621 	poll_threads();
4622 	CU_ASSERT(g_bserrno == 0);
4623 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4624 	snapshotid = g_blobid;
4625 
4626 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4627 	poll_threads();
4628 	CU_ASSERT(g_bserrno == 0);
4629 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4630 	snapshot = g_blob;
4631 	CU_ASSERT(snapshot->data_ro == true);
4632 	CU_ASSERT(snapshot->md_ro == true);
4633 
4634 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4635 
4636 	/* Write every second cluster with a pattern.
4637 	 *
4638 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4639 	 * doesn't allocate it.
4640 	 *
4641 	 * payload_clone stores expected result on "blob" read at the time and
4642 	 * is used only to check data consistency on clone before and after
4643 	 * inflation. Initially we fill it with a backing snapshots pattern
4644 	 * used before.
4645 	 */
4646 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4647 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4648 	memset(payload_write, 0xAA, payload_size);
4649 	for (i = 1; i < 5; i += 2) {
4650 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4651 				   pages_per_cluster, blob_op_complete, NULL);
4652 		poll_threads();
4653 		CU_ASSERT(g_bserrno == 0);
4654 
4655 		/* Update expected result */
4656 		memcpy(payload_clone + (cluster_size * i), payload_write,
4657 		       cluster_size);
4658 	}
4659 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4660 
4661 	/* Check data consistency on clone */
4662 	memset(payload_read, 0xFF, payload_size);
4663 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4664 			  blob_op_complete, NULL);
4665 	poll_threads();
4666 	CU_ASSERT(g_bserrno == 0);
4667 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4668 
4669 	/* 3) Create second levels snapshot from blob */
4670 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4671 	poll_threads();
4672 	CU_ASSERT(g_bserrno == 0);
4673 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4674 	snapshot2id = g_blobid;
4675 
4676 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4677 	poll_threads();
4678 	CU_ASSERT(g_bserrno == 0);
4679 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4680 	snapshot2 = g_blob;
4681 	CU_ASSERT(snapshot2->data_ro == true);
4682 	CU_ASSERT(snapshot2->md_ro == true);
4683 
4684 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4685 
4686 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4687 
4688 	/* Write one cluster on the top level blob. This cluster (1) covers
4689 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4690 	 * at all */
4691 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4692 			   pages_per_cluster, blob_op_complete, NULL);
4693 	poll_threads();
4694 	CU_ASSERT(g_bserrno == 0);
4695 
4696 	/* Update expected result */
4697 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4698 
4699 	/* Check data consistency on clone */
4700 	memset(payload_read, 0xFF, payload_size);
4701 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4702 			  blob_op_complete, NULL);
4703 	poll_threads();
4704 	CU_ASSERT(g_bserrno == 0);
4705 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4706 
4707 
4708 	/* Close all blobs */
4709 	spdk_blob_close(blob, blob_op_complete, NULL);
4710 	poll_threads();
4711 	CU_ASSERT(g_bserrno == 0);
4712 
4713 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4714 	poll_threads();
4715 	CU_ASSERT(g_bserrno == 0);
4716 
4717 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4718 	poll_threads();
4719 	CU_ASSERT(g_bserrno == 0);
4720 
4721 	/* Check snapshot-clone relations */
4722 	count = 2;
4723 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4724 	CU_ASSERT(count == 1);
4725 	CU_ASSERT(ids[0] == snapshot2id);
4726 
4727 	count = 2;
4728 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4729 	CU_ASSERT(count == 1);
4730 	CU_ASSERT(ids[0] == blobid);
4731 
4732 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4733 
4734 	free_clusters = spdk_bs_free_cluster_count(bs);
4735 	if (!decouple_parent) {
4736 		/* Do full blob inflation */
4737 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4738 		poll_threads();
4739 		CU_ASSERT(g_bserrno == 0);
4740 
4741 		/* All clusters should be inflated (except one already allocated
4742 		 * in a top level blob) */
4743 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
4744 
4745 		/* Check if relation tree updated correctly */
4746 		count = 2;
4747 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4748 
4749 		/* snapshotid have one clone */
4750 		CU_ASSERT(count == 1);
4751 		CU_ASSERT(ids[0] == snapshot2id);
4752 
4753 		/* snapshot2id have no clones */
4754 		count = 2;
4755 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4756 		CU_ASSERT(count == 0);
4757 
4758 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4759 	} else {
4760 		/* Decouple parent of blob */
4761 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
4762 		poll_threads();
4763 		CU_ASSERT(g_bserrno == 0);
4764 
4765 		/* Only one cluster from a parent should be inflated (second one
4766 		 * is covered by a cluster written on a top level blob, and
4767 		 * already allocated) */
4768 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
4769 
4770 		/* Check if relation tree updated correctly */
4771 		count = 2;
4772 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4773 
4774 		/* snapshotid have two clones now */
4775 		CU_ASSERT(count == 2);
4776 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4777 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
4778 
4779 		/* snapshot2id have no clones */
4780 		count = 2;
4781 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4782 		CU_ASSERT(count == 0);
4783 
4784 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4785 	}
4786 
4787 	/* Try to delete snapshot2 (should pass) */
4788 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
4789 	poll_threads();
4790 	CU_ASSERT(g_bserrno == 0);
4791 
4792 	/* Try to delete base snapshot */
4793 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
4794 	poll_threads();
4795 	CU_ASSERT(g_bserrno == 0);
4796 
4797 	/* Reopen blob after snapshot deletion */
4798 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4799 	poll_threads();
4800 	CU_ASSERT(g_bserrno == 0);
4801 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4802 	blob = g_blob;
4803 
4804 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4805 
4806 	/* Check data consistency on inflated blob */
4807 	memset(payload_read, 0xFF, payload_size);
4808 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4809 			  blob_op_complete, NULL);
4810 	poll_threads();
4811 	CU_ASSERT(g_bserrno == 0);
4812 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4813 
4814 	spdk_bs_free_io_channel(channel);
4815 	poll_threads();
4816 
4817 	free(payload_read);
4818 	free(payload_write);
4819 	free(payload_clone);
4820 
4821 	ut_blob_close_and_delete(bs, blob);
4822 }
4823 
4824 static void
4825 blob_inflate_rw(void)
4826 {
4827 	_blob_inflate_rw(false);
4828 	_blob_inflate_rw(true);
4829 }
4830 
4831 /**
4832  * Snapshot-clones relation test
4833  *
4834  *         snapshot
4835  *            |
4836  *      +-----+-----+
4837  *      |           |
4838  *   blob(ro)   snapshot2
4839  *      |           |
4840  *   clone2      clone
4841  */
4842 static void
4843 blob_relations(void)
4844 {
4845 	struct spdk_blob_store *bs;
4846 	struct spdk_bs_dev *dev;
4847 	struct spdk_bs_opts bs_opts;
4848 	struct spdk_blob_opts opts;
4849 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
4850 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
4851 	int rc;
4852 	size_t count;
4853 	spdk_blob_id ids[10] = {};
4854 
4855 	dev = init_dev();
4856 	spdk_bs_opts_init(&bs_opts, sizeof(opts));
4857 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
4858 
4859 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4860 	poll_threads();
4861 	CU_ASSERT(g_bserrno == 0);
4862 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4863 	bs = g_bs;
4864 
4865 	/* 1. Create blob with 10 clusters */
4866 
4867 	ut_spdk_blob_opts_init(&opts);
4868 	opts.num_clusters = 10;
4869 
4870 	blob = ut_blob_create_and_open(bs, &opts);
4871 	blobid = spdk_blob_get_id(blob);
4872 
4873 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4874 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4875 	CU_ASSERT(!spdk_blob_is_clone(blob));
4876 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
4877 
4878 	/* blob should not have underlying snapshot nor clones */
4879 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
4880 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
4881 	count = SPDK_COUNTOF(ids);
4882 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
4883 	CU_ASSERT(rc == 0);
4884 	CU_ASSERT(count == 0);
4885 
4886 
4887 	/* 2. Create snapshot */
4888 
4889 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4890 	poll_threads();
4891 	CU_ASSERT(g_bserrno == 0);
4892 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4893 	snapshotid = g_blobid;
4894 
4895 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4896 	poll_threads();
4897 	CU_ASSERT(g_bserrno == 0);
4898 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4899 	snapshot = g_blob;
4900 
4901 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
4902 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
4903 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
4904 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
4905 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
4906 
4907 	/* Check if original blob is converted to the clone of snapshot */
4908 	CU_ASSERT(!spdk_blob_is_read_only(blob));
4909 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
4910 	CU_ASSERT(spdk_blob_is_clone(blob));
4911 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
4912 	CU_ASSERT(blob->parent_id == snapshotid);
4913 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
4914 
4915 	count = SPDK_COUNTOF(ids);
4916 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4917 	CU_ASSERT(rc == 0);
4918 	CU_ASSERT(count == 1);
4919 	CU_ASSERT(ids[0] == blobid);
4920 
4921 
4922 	/* 3. Create clone from snapshot */
4923 
4924 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
4925 	poll_threads();
4926 	CU_ASSERT(g_bserrno == 0);
4927 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4928 	cloneid = g_blobid;
4929 
4930 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
4931 	poll_threads();
4932 	CU_ASSERT(g_bserrno == 0);
4933 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4934 	clone = g_blob;
4935 
4936 	CU_ASSERT(!spdk_blob_is_read_only(clone));
4937 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
4938 	CU_ASSERT(spdk_blob_is_clone(clone));
4939 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
4940 	CU_ASSERT(clone->parent_id == snapshotid);
4941 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
4942 
4943 	count = SPDK_COUNTOF(ids);
4944 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
4945 	CU_ASSERT(rc == 0);
4946 	CU_ASSERT(count == 0);
4947 
4948 	/* Check if clone is on the snapshot's list */
4949 	count = SPDK_COUNTOF(ids);
4950 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
4951 	CU_ASSERT(rc == 0);
4952 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
4953 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
4954 
4955 
4956 	/* 4. Create snapshot of the clone */
4957 
4958 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
4959 	poll_threads();
4960 	CU_ASSERT(g_bserrno == 0);
4961 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4962 	snapshotid2 = g_blobid;
4963 
4964 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
4965 	poll_threads();
4966 	CU_ASSERT(g_bserrno == 0);
4967 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4968 	snapshot2 = g_blob;
4969 
4970 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
4971 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
4972 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
4973 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4974 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
4975 
4976 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
4977 	 * is a child of snapshot */
4978 	CU_ASSERT(!spdk_blob_is_read_only(clone));
4979 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
4980 	CU_ASSERT(spdk_blob_is_clone(clone));
4981 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
4982 	CU_ASSERT(clone->parent_id == snapshotid2);
4983 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
4984 
4985 	count = SPDK_COUNTOF(ids);
4986 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
4987 	CU_ASSERT(rc == 0);
4988 	CU_ASSERT(count == 1);
4989 	CU_ASSERT(ids[0] == cloneid);
4990 
4991 
4992 	/* 5. Try to create clone from read only blob */
4993 
4994 	/* Mark blob as read only */
4995 	spdk_blob_set_read_only(blob);
4996 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4997 	poll_threads();
4998 	CU_ASSERT(g_bserrno == 0);
4999 
5000 	/* Check if previously created blob is read only clone */
5001 	CU_ASSERT(spdk_blob_is_read_only(blob));
5002 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5003 	CU_ASSERT(spdk_blob_is_clone(blob));
5004 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5005 
5006 	/* Create clone from read only blob */
5007 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5008 	poll_threads();
5009 	CU_ASSERT(g_bserrno == 0);
5010 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5011 	cloneid2 = g_blobid;
5012 
5013 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5014 	poll_threads();
5015 	CU_ASSERT(g_bserrno == 0);
5016 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5017 	clone2 = g_blob;
5018 
5019 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5020 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5021 	CU_ASSERT(spdk_blob_is_clone(clone2));
5022 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5023 
5024 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5025 
5026 	count = SPDK_COUNTOF(ids);
5027 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5028 	CU_ASSERT(rc == 0);
5029 
5030 	CU_ASSERT(count == 1);
5031 	CU_ASSERT(ids[0] == cloneid2);
5032 
5033 	/* Close blobs */
5034 
5035 	spdk_blob_close(clone2, blob_op_complete, NULL);
5036 	poll_threads();
5037 	CU_ASSERT(g_bserrno == 0);
5038 
5039 	spdk_blob_close(blob, blob_op_complete, NULL);
5040 	poll_threads();
5041 	CU_ASSERT(g_bserrno == 0);
5042 
5043 	spdk_blob_close(clone, blob_op_complete, NULL);
5044 	poll_threads();
5045 	CU_ASSERT(g_bserrno == 0);
5046 
5047 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5048 	poll_threads();
5049 	CU_ASSERT(g_bserrno == 0);
5050 
5051 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5052 	poll_threads();
5053 	CU_ASSERT(g_bserrno == 0);
5054 
5055 	/* Try to delete snapshot with more than 1 clone */
5056 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5057 	poll_threads();
5058 	CU_ASSERT(g_bserrno != 0);
5059 
5060 	ut_bs_reload(&bs, &bs_opts);
5061 
5062 	/* NULL ids array should return number of clones in count */
5063 	count = SPDK_COUNTOF(ids);
5064 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5065 	CU_ASSERT(rc == -ENOMEM);
5066 	CU_ASSERT(count == 2);
5067 
5068 	/* incorrect array size */
5069 	count = 1;
5070 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5071 	CU_ASSERT(rc == -ENOMEM);
5072 	CU_ASSERT(count == 2);
5073 
5074 
5075 	/* Verify structure of loaded blob store */
5076 
5077 	/* snapshot */
5078 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5079 
5080 	count = SPDK_COUNTOF(ids);
5081 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5082 	CU_ASSERT(rc == 0);
5083 	CU_ASSERT(count == 2);
5084 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5085 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5086 
5087 	/* blob */
5088 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5089 	count = SPDK_COUNTOF(ids);
5090 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5091 	CU_ASSERT(rc == 0);
5092 	CU_ASSERT(count == 1);
5093 	CU_ASSERT(ids[0] == cloneid2);
5094 
5095 	/* clone */
5096 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5097 	count = SPDK_COUNTOF(ids);
5098 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5099 	CU_ASSERT(rc == 0);
5100 	CU_ASSERT(count == 0);
5101 
5102 	/* snapshot2 */
5103 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5104 	count = SPDK_COUNTOF(ids);
5105 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5106 	CU_ASSERT(rc == 0);
5107 	CU_ASSERT(count == 1);
5108 	CU_ASSERT(ids[0] == cloneid);
5109 
5110 	/* clone2 */
5111 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5112 	count = SPDK_COUNTOF(ids);
5113 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5114 	CU_ASSERT(rc == 0);
5115 	CU_ASSERT(count == 0);
5116 
5117 	/* Try to delete blob that user should not be able to remove */
5118 
5119 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5120 	poll_threads();
5121 	CU_ASSERT(g_bserrno != 0);
5122 
5123 	/* Remove all blobs */
5124 
5125 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5126 	poll_threads();
5127 	CU_ASSERT(g_bserrno == 0);
5128 
5129 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5130 	poll_threads();
5131 	CU_ASSERT(g_bserrno == 0);
5132 
5133 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5134 	poll_threads();
5135 	CU_ASSERT(g_bserrno == 0);
5136 
5137 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5138 	poll_threads();
5139 	CU_ASSERT(g_bserrno == 0);
5140 
5141 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5142 	poll_threads();
5143 	CU_ASSERT(g_bserrno == 0);
5144 
5145 	spdk_bs_unload(bs, bs_op_complete, NULL);
5146 	poll_threads();
5147 	CU_ASSERT(g_bserrno == 0);
5148 
5149 	g_bs = NULL;
5150 }
5151 
5152 /**
5153  * Snapshot-clones relation test 2
5154  *
5155  *         snapshot1
5156  *            |
5157  *         snapshot2
5158  *            |
5159  *      +-----+-----+
5160  *      |           |
5161  *   blob(ro)   snapshot3
5162  *      |           |
5163  *      |       snapshot4
5164  *      |        |     |
5165  *   clone2   clone  clone3
5166  */
5167 static void
5168 blob_relations2(void)
5169 {
5170 	struct spdk_blob_store *bs;
5171 	struct spdk_bs_dev *dev;
5172 	struct spdk_bs_opts bs_opts;
5173 	struct spdk_blob_opts opts;
5174 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5175 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5176 		     cloneid3;
5177 	int rc;
5178 	size_t count;
5179 	spdk_blob_id ids[10] = {};
5180 
5181 	dev = init_dev();
5182 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5183 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5184 
5185 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5186 	poll_threads();
5187 	CU_ASSERT(g_bserrno == 0);
5188 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5189 	bs = g_bs;
5190 
5191 	/* 1. Create blob with 10 clusters */
5192 
5193 	ut_spdk_blob_opts_init(&opts);
5194 	opts.num_clusters = 10;
5195 
5196 	blob = ut_blob_create_and_open(bs, &opts);
5197 	blobid = spdk_blob_get_id(blob);
5198 
5199 	/* 2. Create snapshot1 */
5200 
5201 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5202 	poll_threads();
5203 	CU_ASSERT(g_bserrno == 0);
5204 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5205 	snapshotid1 = g_blobid;
5206 
5207 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5208 	poll_threads();
5209 	CU_ASSERT(g_bserrno == 0);
5210 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5211 	snapshot1 = g_blob;
5212 
5213 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5214 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5215 
5216 	CU_ASSERT(blob->parent_id == snapshotid1);
5217 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5218 
5219 	/* Check if blob is the clone of snapshot1 */
5220 	CU_ASSERT(blob->parent_id == snapshotid1);
5221 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5222 
5223 	count = SPDK_COUNTOF(ids);
5224 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5225 	CU_ASSERT(rc == 0);
5226 	CU_ASSERT(count == 1);
5227 	CU_ASSERT(ids[0] == blobid);
5228 
5229 	/* 3. Create another snapshot */
5230 
5231 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5232 	poll_threads();
5233 	CU_ASSERT(g_bserrno == 0);
5234 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5235 	snapshotid2 = g_blobid;
5236 
5237 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5238 	poll_threads();
5239 	CU_ASSERT(g_bserrno == 0);
5240 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5241 	snapshot2 = g_blob;
5242 
5243 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5244 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5245 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5246 
5247 	/* Check if snapshot2 is the clone of snapshot1 and blob
5248 	 * is a child of snapshot2 */
5249 	CU_ASSERT(blob->parent_id == snapshotid2);
5250 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5251 
5252 	count = SPDK_COUNTOF(ids);
5253 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5254 	CU_ASSERT(rc == 0);
5255 	CU_ASSERT(count == 1);
5256 	CU_ASSERT(ids[0] == blobid);
5257 
5258 	/* 4. Create clone from snapshot */
5259 
5260 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5261 	poll_threads();
5262 	CU_ASSERT(g_bserrno == 0);
5263 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5264 	cloneid = g_blobid;
5265 
5266 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5267 	poll_threads();
5268 	CU_ASSERT(g_bserrno == 0);
5269 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5270 	clone = g_blob;
5271 
5272 	CU_ASSERT(clone->parent_id == snapshotid2);
5273 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5274 
5275 	/* Check if clone is on the snapshot's list */
5276 	count = SPDK_COUNTOF(ids);
5277 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5278 	CU_ASSERT(rc == 0);
5279 	CU_ASSERT(count == 2);
5280 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5281 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5282 
5283 	/* 5. Create snapshot of the clone */
5284 
5285 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5286 	poll_threads();
5287 	CU_ASSERT(g_bserrno == 0);
5288 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5289 	snapshotid3 = g_blobid;
5290 
5291 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5292 	poll_threads();
5293 	CU_ASSERT(g_bserrno == 0);
5294 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5295 	snapshot3 = g_blob;
5296 
5297 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5298 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5299 
5300 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5301 	 * is a child of snapshot2 */
5302 	CU_ASSERT(clone->parent_id == snapshotid3);
5303 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5304 
5305 	count = SPDK_COUNTOF(ids);
5306 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5307 	CU_ASSERT(rc == 0);
5308 	CU_ASSERT(count == 1);
5309 	CU_ASSERT(ids[0] == cloneid);
5310 
5311 	/* 6. Create another snapshot of the clone */
5312 
5313 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5314 	poll_threads();
5315 	CU_ASSERT(g_bserrno == 0);
5316 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5317 	snapshotid4 = g_blobid;
5318 
5319 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5320 	poll_threads();
5321 	CU_ASSERT(g_bserrno == 0);
5322 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5323 	snapshot4 = g_blob;
5324 
5325 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5326 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5327 
5328 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5329 	 * is a child of snapshot3 */
5330 	CU_ASSERT(clone->parent_id == snapshotid4);
5331 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5332 
5333 	count = SPDK_COUNTOF(ids);
5334 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5335 	CU_ASSERT(rc == 0);
5336 	CU_ASSERT(count == 1);
5337 	CU_ASSERT(ids[0] == cloneid);
5338 
5339 	/* 7. Remove snapshot 4 */
5340 
5341 	ut_blob_close_and_delete(bs, snapshot4);
5342 
5343 	/* Check if relations are back to state from before creating snapshot 4 */
5344 	CU_ASSERT(clone->parent_id == snapshotid3);
5345 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5346 
5347 	count = SPDK_COUNTOF(ids);
5348 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5349 	CU_ASSERT(rc == 0);
5350 	CU_ASSERT(count == 1);
5351 	CU_ASSERT(ids[0] == cloneid);
5352 
5353 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5354 
5355 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5356 	poll_threads();
5357 	CU_ASSERT(g_bserrno == 0);
5358 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5359 	cloneid3 = g_blobid;
5360 
5361 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5362 	poll_threads();
5363 	CU_ASSERT(g_bserrno != 0);
5364 
5365 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5366 
5367 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5368 	poll_threads();
5369 	CU_ASSERT(g_bserrno == 0);
5370 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5371 	snapshot3 = g_blob;
5372 
5373 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5374 	poll_threads();
5375 	CU_ASSERT(g_bserrno != 0);
5376 
5377 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5378 	poll_threads();
5379 	CU_ASSERT(g_bserrno == 0);
5380 
5381 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5382 	poll_threads();
5383 	CU_ASSERT(g_bserrno == 0);
5384 
5385 	/* 10. Remove snapshot 1 */
5386 
5387 	ut_blob_close_and_delete(bs, snapshot1);
5388 
5389 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5390 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5391 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5392 
5393 	count = SPDK_COUNTOF(ids);
5394 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5395 	CU_ASSERT(rc == 0);
5396 	CU_ASSERT(count == 2);
5397 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5398 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5399 
5400 	/* 11. Try to create clone from read only blob */
5401 
5402 	/* Mark blob as read only */
5403 	spdk_blob_set_read_only(blob);
5404 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5405 	poll_threads();
5406 	CU_ASSERT(g_bserrno == 0);
5407 
5408 	/* Create clone from read only blob */
5409 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5410 	poll_threads();
5411 	CU_ASSERT(g_bserrno == 0);
5412 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5413 	cloneid2 = g_blobid;
5414 
5415 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5416 	poll_threads();
5417 	CU_ASSERT(g_bserrno == 0);
5418 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5419 	clone2 = g_blob;
5420 
5421 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5422 
5423 	count = SPDK_COUNTOF(ids);
5424 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5425 	CU_ASSERT(rc == 0);
5426 	CU_ASSERT(count == 1);
5427 	CU_ASSERT(ids[0] == cloneid2);
5428 
5429 	/* Close blobs */
5430 
5431 	spdk_blob_close(clone2, blob_op_complete, NULL);
5432 	poll_threads();
5433 	CU_ASSERT(g_bserrno == 0);
5434 
5435 	spdk_blob_close(blob, blob_op_complete, NULL);
5436 	poll_threads();
5437 	CU_ASSERT(g_bserrno == 0);
5438 
5439 	spdk_blob_close(clone, blob_op_complete, NULL);
5440 	poll_threads();
5441 	CU_ASSERT(g_bserrno == 0);
5442 
5443 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5444 	poll_threads();
5445 	CU_ASSERT(g_bserrno == 0);
5446 
5447 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5448 	poll_threads();
5449 	CU_ASSERT(g_bserrno == 0);
5450 
5451 	ut_bs_reload(&bs, &bs_opts);
5452 
5453 	/* Verify structure of loaded blob store */
5454 
5455 	/* snapshot2 */
5456 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5457 
5458 	count = SPDK_COUNTOF(ids);
5459 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5460 	CU_ASSERT(rc == 0);
5461 	CU_ASSERT(count == 2);
5462 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5463 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5464 
5465 	/* blob */
5466 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5467 	count = SPDK_COUNTOF(ids);
5468 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5469 	CU_ASSERT(rc == 0);
5470 	CU_ASSERT(count == 1);
5471 	CU_ASSERT(ids[0] == cloneid2);
5472 
5473 	/* clone */
5474 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5475 	count = SPDK_COUNTOF(ids);
5476 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5477 	CU_ASSERT(rc == 0);
5478 	CU_ASSERT(count == 0);
5479 
5480 	/* snapshot3 */
5481 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5482 	count = SPDK_COUNTOF(ids);
5483 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5484 	CU_ASSERT(rc == 0);
5485 	CU_ASSERT(count == 1);
5486 	CU_ASSERT(ids[0] == cloneid);
5487 
5488 	/* clone2 */
5489 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5490 	count = SPDK_COUNTOF(ids);
5491 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5492 	CU_ASSERT(rc == 0);
5493 	CU_ASSERT(count == 0);
5494 
5495 	/* Try to delete all blobs in the worse possible order */
5496 
5497 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5498 	poll_threads();
5499 	CU_ASSERT(g_bserrno != 0);
5500 
5501 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5502 	poll_threads();
5503 	CU_ASSERT(g_bserrno == 0);
5504 
5505 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5506 	poll_threads();
5507 	CU_ASSERT(g_bserrno != 0);
5508 
5509 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5510 	poll_threads();
5511 	CU_ASSERT(g_bserrno == 0);
5512 
5513 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5514 	poll_threads();
5515 	CU_ASSERT(g_bserrno == 0);
5516 
5517 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5518 	poll_threads();
5519 	CU_ASSERT(g_bserrno == 0);
5520 
5521 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5522 	poll_threads();
5523 	CU_ASSERT(g_bserrno == 0);
5524 
5525 	spdk_bs_unload(bs, bs_op_complete, NULL);
5526 	poll_threads();
5527 	CU_ASSERT(g_bserrno == 0);
5528 
5529 	g_bs = NULL;
5530 }
5531 
5532 /**
5533  * Snapshot-clones relation test 3
5534  *
5535  *         snapshot0
5536  *            |
5537  *         snapshot1
5538  *            |
5539  *         snapshot2
5540  *            |
5541  *           blob
5542  */
5543 static void
5544 blob_relations3(void)
5545 {
5546 	struct spdk_blob_store *bs;
5547 	struct spdk_bs_dev *dev;
5548 	struct spdk_io_channel *channel;
5549 	struct spdk_bs_opts bs_opts;
5550 	struct spdk_blob_opts opts;
5551 	struct spdk_blob *blob;
5552 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
5553 
5554 	dev = init_dev();
5555 	spdk_bs_opts_init(&bs_opts, sizeof(opts));
5556 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5557 
5558 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5559 	poll_threads();
5560 	CU_ASSERT(g_bserrno == 0);
5561 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5562 	bs = g_bs;
5563 
5564 	channel = spdk_bs_alloc_io_channel(bs);
5565 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5566 
5567 	/* 1. Create blob with 10 clusters */
5568 	ut_spdk_blob_opts_init(&opts);
5569 	opts.num_clusters = 10;
5570 
5571 	blob = ut_blob_create_and_open(bs, &opts);
5572 	blobid = spdk_blob_get_id(blob);
5573 
5574 	/* 2. Create snapshot0 */
5575 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5576 	poll_threads();
5577 	CU_ASSERT(g_bserrno == 0);
5578 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5579 	snapshotid0 = g_blobid;
5580 
5581 	/* 3. Create snapshot1 */
5582 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5583 	poll_threads();
5584 	CU_ASSERT(g_bserrno == 0);
5585 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5586 	snapshotid1 = g_blobid;
5587 
5588 	/* 4. Create snapshot2 */
5589 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5590 	poll_threads();
5591 	CU_ASSERT(g_bserrno == 0);
5592 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5593 	snapshotid2 = g_blobid;
5594 
5595 	/* 5. Decouple blob */
5596 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5597 	poll_threads();
5598 	CU_ASSERT(g_bserrno == 0);
5599 
5600 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
5601 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
5602 	poll_threads();
5603 	CU_ASSERT(g_bserrno == 0);
5604 
5605 	/* 7. Delete blob */
5606 	spdk_blob_close(blob, blob_op_complete, NULL);
5607 	poll_threads();
5608 	CU_ASSERT(g_bserrno == 0);
5609 
5610 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5611 	poll_threads();
5612 	CU_ASSERT(g_bserrno == 0);
5613 
5614 	/* 8. Delete snapshot2.
5615 	 * If md of snapshot 2 was updated, it should be possible to delete it */
5616 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5617 	poll_threads();
5618 	CU_ASSERT(g_bserrno == 0);
5619 
5620 	/* Remove remaining blobs and unload bs */
5621 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
5622 	poll_threads();
5623 	CU_ASSERT(g_bserrno == 0);
5624 
5625 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
5626 	poll_threads();
5627 	CU_ASSERT(g_bserrno == 0);
5628 
5629 	spdk_bs_free_io_channel(channel);
5630 	poll_threads();
5631 
5632 	spdk_bs_unload(bs, bs_op_complete, NULL);
5633 	poll_threads();
5634 	CU_ASSERT(g_bserrno == 0);
5635 
5636 	g_bs = NULL;
5637 }
5638 
5639 static void
5640 blobstore_clean_power_failure(void)
5641 {
5642 	struct spdk_blob_store *bs;
5643 	struct spdk_blob *blob;
5644 	struct spdk_power_failure_thresholds thresholds = {};
5645 	bool clean = false;
5646 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5647 	struct spdk_bs_super_block super_copy = {};
5648 
5649 	thresholds.general_threshold = 1;
5650 	while (!clean) {
5651 		/* Create bs and blob */
5652 		suite_blob_setup();
5653 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5654 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5655 		bs = g_bs;
5656 		blob = g_blob;
5657 
5658 		/* Super block should not change for rest of the UT,
5659 		 * save it and compare later. */
5660 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5661 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5662 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5663 
5664 		/* Force bs/super block in a clean state.
5665 		 * Along with marking blob dirty, to cause blob persist. */
5666 		blob->state = SPDK_BLOB_STATE_DIRTY;
5667 		bs->clean = 1;
5668 		super->clean = 1;
5669 		super->crc = blob_md_page_calc_crc(super);
5670 
5671 		g_bserrno = -1;
5672 		dev_set_power_failure_thresholds(thresholds);
5673 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5674 		poll_threads();
5675 		dev_reset_power_failure_event();
5676 
5677 		if (g_bserrno == 0) {
5678 			/* After successful md sync, both bs and super block
5679 			 * should be marked as not clean. */
5680 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5681 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5682 			clean = true;
5683 		}
5684 
5685 		/* Depending on the point of failure, super block was either updated or not. */
5686 		super_copy.clean = super->clean;
5687 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5688 		/* Compare that the values in super block remained unchanged. */
5689 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5690 
5691 		/* Delete blob and unload bs */
5692 		suite_blob_cleanup();
5693 
5694 		thresholds.general_threshold++;
5695 	}
5696 }
5697 
5698 static void
5699 blob_delete_snapshot_power_failure(void)
5700 {
5701 	struct spdk_bs_dev *dev;
5702 	struct spdk_blob_store *bs;
5703 	struct spdk_blob_opts opts;
5704 	struct spdk_blob *blob, *snapshot;
5705 	struct spdk_power_failure_thresholds thresholds = {};
5706 	spdk_blob_id blobid, snapshotid;
5707 	const void *value;
5708 	size_t value_len;
5709 	size_t count;
5710 	spdk_blob_id ids[3] = {};
5711 	int rc;
5712 	bool deleted = false;
5713 	int delete_snapshot_bserrno = -1;
5714 
5715 	thresholds.general_threshold = 1;
5716 	while (!deleted) {
5717 		dev = init_dev();
5718 
5719 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5720 		poll_threads();
5721 		CU_ASSERT(g_bserrno == 0);
5722 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5723 		bs = g_bs;
5724 
5725 		/* Create blob */
5726 		ut_spdk_blob_opts_init(&opts);
5727 		opts.num_clusters = 10;
5728 
5729 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5730 		poll_threads();
5731 		CU_ASSERT(g_bserrno == 0);
5732 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5733 		blobid = g_blobid;
5734 
5735 		/* Create snapshot */
5736 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5737 		poll_threads();
5738 		CU_ASSERT(g_bserrno == 0);
5739 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5740 		snapshotid = g_blobid;
5741 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5742 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5743 
5744 		dev_set_power_failure_thresholds(thresholds);
5745 
5746 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5747 		poll_threads();
5748 		delete_snapshot_bserrno = g_bserrno;
5749 
5750 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
5751 		 * reports success, changes to both blobs should already persisted. */
5752 		dev_reset_power_failure_event();
5753 		ut_bs_dirty_load(&bs, NULL);
5754 
5755 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5756 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5757 
5758 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5759 		poll_threads();
5760 		CU_ASSERT(g_bserrno == 0);
5761 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5762 		blob = g_blob;
5763 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5764 
5765 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5766 		poll_threads();
5767 
5768 		if (g_bserrno == 0) {
5769 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5770 			snapshot = g_blob;
5771 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5772 			count = SPDK_COUNTOF(ids);
5773 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5774 			CU_ASSERT(rc == 0);
5775 			CU_ASSERT(count == 1);
5776 			CU_ASSERT(ids[0] == blobid);
5777 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
5778 			CU_ASSERT(rc != 0);
5779 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5780 
5781 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5782 			poll_threads();
5783 			CU_ASSERT(g_bserrno == 0);
5784 		} else {
5785 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5786 			/* Snapshot might have been left in unrecoverable state, so it does not open.
5787 			 * Yet delete might perform further changes to the clone after that.
5788 			 * This UT should test until snapshot is deleted and delete call succeeds. */
5789 			if (delete_snapshot_bserrno == 0) {
5790 				deleted = true;
5791 			}
5792 		}
5793 
5794 		spdk_blob_close(blob, blob_op_complete, NULL);
5795 		poll_threads();
5796 		CU_ASSERT(g_bserrno == 0);
5797 
5798 		spdk_bs_unload(bs, bs_op_complete, NULL);
5799 		poll_threads();
5800 		CU_ASSERT(g_bserrno == 0);
5801 
5802 		thresholds.general_threshold++;
5803 	}
5804 }
5805 
5806 static void
5807 blob_create_snapshot_power_failure(void)
5808 {
5809 	struct spdk_blob_store *bs = g_bs;
5810 	struct spdk_bs_dev *dev;
5811 	struct spdk_blob_opts opts;
5812 	struct spdk_blob *blob, *snapshot;
5813 	struct spdk_power_failure_thresholds thresholds = {};
5814 	spdk_blob_id blobid, snapshotid;
5815 	const void *value;
5816 	size_t value_len;
5817 	size_t count;
5818 	spdk_blob_id ids[3] = {};
5819 	int rc;
5820 	bool created = false;
5821 	int create_snapshot_bserrno = -1;
5822 
5823 	thresholds.general_threshold = 1;
5824 	while (!created) {
5825 		dev = init_dev();
5826 
5827 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5828 		poll_threads();
5829 		CU_ASSERT(g_bserrno == 0);
5830 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5831 		bs = g_bs;
5832 
5833 		/* Create blob */
5834 		ut_spdk_blob_opts_init(&opts);
5835 		opts.num_clusters = 10;
5836 
5837 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5838 		poll_threads();
5839 		CU_ASSERT(g_bserrno == 0);
5840 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5841 		blobid = g_blobid;
5842 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5843 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5844 
5845 		dev_set_power_failure_thresholds(thresholds);
5846 
5847 		/* Create snapshot */
5848 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5849 		poll_threads();
5850 		create_snapshot_bserrno = g_bserrno;
5851 		snapshotid = g_blobid;
5852 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5853 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5854 
5855 		/* Do not shut down cleanly. Assumption is that after create snapshot
5856 		 * reports success, both blobs should be power-fail safe. */
5857 		dev_reset_power_failure_event();
5858 		ut_bs_dirty_load(&bs, NULL);
5859 
5860 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5861 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5862 
5863 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5864 		poll_threads();
5865 		CU_ASSERT(g_bserrno == 0);
5866 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5867 		blob = g_blob;
5868 
5869 		if (snapshotid != SPDK_BLOBID_INVALID) {
5870 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5871 			poll_threads();
5872 		}
5873 
5874 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
5875 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5876 			snapshot = g_blob;
5877 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
5878 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
5879 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5880 			count = SPDK_COUNTOF(ids);
5881 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5882 			CU_ASSERT(rc == 0);
5883 			CU_ASSERT(count == 1);
5884 			CU_ASSERT(ids[0] == blobid);
5885 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
5886 			CU_ASSERT(rc != 0);
5887 
5888 			spdk_blob_close(snapshot, blob_op_complete, NULL);
5889 			poll_threads();
5890 			CU_ASSERT(g_bserrno == 0);
5891 			if (create_snapshot_bserrno == 0) {
5892 				created = true;
5893 			}
5894 		} else {
5895 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5896 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
5897 		}
5898 
5899 		spdk_blob_close(blob, blob_op_complete, NULL);
5900 		poll_threads();
5901 		CU_ASSERT(g_bserrno == 0);
5902 
5903 		spdk_bs_unload(bs, bs_op_complete, NULL);
5904 		poll_threads();
5905 		CU_ASSERT(g_bserrno == 0);
5906 
5907 		thresholds.general_threshold++;
5908 	}
5909 }
5910 
5911 static void
5912 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
5913 {
5914 	uint8_t payload_ff[64 * 512];
5915 	uint8_t payload_aa[64 * 512];
5916 	uint8_t payload_00[64 * 512];
5917 	uint8_t *cluster0, *cluster1;
5918 
5919 	memset(payload_ff, 0xFF, sizeof(payload_ff));
5920 	memset(payload_aa, 0xAA, sizeof(payload_aa));
5921 	memset(payload_00, 0x00, sizeof(payload_00));
5922 
5923 	/* Try to perform I/O with io unit = 512 */
5924 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
5925 	poll_threads();
5926 	CU_ASSERT(g_bserrno == 0);
5927 
5928 	/* If thin provisioned is set cluster should be allocated now */
5929 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
5930 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
5931 
5932 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
5933 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
5934 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5935 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5936 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
5937 
5938 	/* Verify write with offset on first page */
5939 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
5940 	poll_threads();
5941 	CU_ASSERT(g_bserrno == 0);
5942 
5943 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5944 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5945 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5946 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5947 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5948 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
5949 
5950 	/* Verify write with offset on first page */
5951 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
5952 	poll_threads();
5953 
5954 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
5955 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5956 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5957 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5958 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5959 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
5960 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
5961 
5962 	/* Verify write with offset on second page */
5963 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
5964 	poll_threads();
5965 
5966 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
5967 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5968 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5969 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5970 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5971 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
5972 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
5973 
5974 	/* Verify write across multiple pages */
5975 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
5976 	poll_threads();
5977 
5978 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
5979 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5980 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5981 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5982 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5983 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
5984 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
5985 
5986 	/* Verify write across multiple clusters */
5987 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
5988 	poll_threads();
5989 
5990 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
5991 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
5992 
5993 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
5994 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
5995 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
5996 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
5997 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
5998 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
5999 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6000 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6001 
6002 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6003 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6004 
6005 	/* Verify write to second cluster */
6006 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6007 	poll_threads();
6008 
6009 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6010 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6011 
6012 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6013 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6014 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6015 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6016 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6017 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6018 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6019 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6020 
6021 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6022 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6023 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6024 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6025 }
6026 
6027 static void
6028 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6029 {
6030 	uint8_t payload_read[64 * 512];
6031 	uint8_t payload_ff[64 * 512];
6032 	uint8_t payload_aa[64 * 512];
6033 	uint8_t payload_00[64 * 512];
6034 
6035 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6036 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6037 	memset(payload_00, 0x00, sizeof(payload_00));
6038 
6039 	/* Read only first io unit */
6040 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6041 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6042 	 * payload_read: F000 0000 | 0000 0000 ... */
6043 	memset(payload_read, 0x00, sizeof(payload_read));
6044 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6045 	poll_threads();
6046 	CU_ASSERT(g_bserrno == 0);
6047 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6048 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6049 
6050 	/* Read four io_units starting from offset = 2
6051 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6052 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6053 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6054 
6055 	memset(payload_read, 0x00, sizeof(payload_read));
6056 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6057 	poll_threads();
6058 	CU_ASSERT(g_bserrno == 0);
6059 
6060 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6061 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6062 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6063 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6064 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6065 
6066 	/* Read eight io_units across multiple pages
6067 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6068 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6069 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6070 	memset(payload_read, 0x00, sizeof(payload_read));
6071 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6072 	poll_threads();
6073 	CU_ASSERT(g_bserrno == 0);
6074 
6075 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6076 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6077 
6078 	/* Read eight io_units across multiple clusters
6079 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6080 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6081 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6082 	memset(payload_read, 0x00, sizeof(payload_read));
6083 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6084 	poll_threads();
6085 	CU_ASSERT(g_bserrno == 0);
6086 
6087 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6088 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6089 
6090 	/* Read four io_units from second cluster
6091 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6092 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6093 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6094 	memset(payload_read, 0x00, sizeof(payload_read));
6095 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6096 	poll_threads();
6097 	CU_ASSERT(g_bserrno == 0);
6098 
6099 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6100 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6101 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6102 
6103 	/* Read second cluster
6104 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6105 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6106 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6107 	memset(payload_read, 0x00, sizeof(payload_read));
6108 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6109 	poll_threads();
6110 	CU_ASSERT(g_bserrno == 0);
6111 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6112 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6113 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6114 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6115 
6116 	/* Read whole two clusters
6117 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6118 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6119 	memset(payload_read, 0x00, sizeof(payload_read));
6120 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6121 	poll_threads();
6122 	CU_ASSERT(g_bserrno == 0);
6123 
6124 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6125 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6126 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6127 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6128 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6129 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6130 
6131 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6132 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6133 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6134 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6135 }
6136 
6137 
6138 static void
6139 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6140 {
6141 	uint8_t payload_ff[64 * 512];
6142 	uint8_t payload_aa[64 * 512];
6143 	uint8_t payload_00[64 * 512];
6144 	uint8_t *cluster0, *cluster1;
6145 
6146 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6147 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6148 	memset(payload_00, 0x00, sizeof(payload_00));
6149 
6150 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6151 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6152 
6153 	/* Unmap */
6154 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6155 	poll_threads();
6156 
6157 	CU_ASSERT(g_bserrno == 0);
6158 
6159 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6160 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6161 }
6162 
6163 static void
6164 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6165 {
6166 	uint8_t payload_ff[64 * 512];
6167 	uint8_t payload_aa[64 * 512];
6168 	uint8_t payload_00[64 * 512];
6169 	uint8_t *cluster0, *cluster1;
6170 
6171 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6172 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6173 	memset(payload_00, 0x00, sizeof(payload_00));
6174 
6175 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6176 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6177 
6178 	/* Write zeroes  */
6179 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6180 	poll_threads();
6181 
6182 	CU_ASSERT(g_bserrno == 0);
6183 
6184 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6185 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6186 }
6187 
6188 
6189 static void
6190 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6191 {
6192 	uint8_t payload_ff[64 * 512];
6193 	uint8_t payload_aa[64 * 512];
6194 	uint8_t payload_00[64 * 512];
6195 	uint8_t *cluster0, *cluster1;
6196 	struct iovec iov[4];
6197 
6198 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6199 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6200 	memset(payload_00, 0x00, sizeof(payload_00));
6201 
6202 	/* Try to perform I/O with io unit = 512 */
6203 	iov[0].iov_base = payload_ff;
6204 	iov[0].iov_len = 1 * 512;
6205 	spdk_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
6206 	poll_threads();
6207 	CU_ASSERT(g_bserrno == 0);
6208 
6209 	/* If thin provisioned is set cluster should be allocated now */
6210 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6211 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6212 
6213 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6214 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6215 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6216 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6217 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6218 
6219 	/* Verify write with offset on first page */
6220 	iov[0].iov_base = payload_ff;
6221 	iov[0].iov_len = 1 * 512;
6222 	spdk_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL);
6223 	poll_threads();
6224 	CU_ASSERT(g_bserrno == 0);
6225 
6226 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6227 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6228 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6229 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6230 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6231 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6232 
6233 	/* Verify write with offset on first page */
6234 	iov[0].iov_base = payload_ff;
6235 	iov[0].iov_len = 4 * 512;
6236 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6237 	poll_threads();
6238 
6239 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6240 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6241 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6242 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6243 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6244 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6245 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6246 
6247 	/* Verify write with offset on second page */
6248 	iov[0].iov_base = payload_ff;
6249 	iov[0].iov_len = 4 * 512;
6250 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6251 	poll_threads();
6252 
6253 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6254 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6255 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6256 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6257 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6258 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6259 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6260 
6261 	/* Verify write across multiple pages */
6262 	iov[0].iov_base = payload_aa;
6263 	iov[0].iov_len = 8 * 512;
6264 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL);
6265 	poll_threads();
6266 
6267 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6268 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6269 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6270 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6271 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6272 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6273 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6274 
6275 	/* Verify write across multiple clusters */
6276 
6277 	iov[0].iov_base = payload_ff;
6278 	iov[0].iov_len = 8 * 512;
6279 	spdk_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL);
6280 	poll_threads();
6281 
6282 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6283 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6284 
6285 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6286 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6287 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6288 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6289 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6290 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6291 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6292 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6293 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6294 
6295 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6296 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6297 
6298 	/* Verify write to second cluster */
6299 
6300 	iov[0].iov_base = payload_ff;
6301 	iov[0].iov_len = 2 * 512;
6302 	spdk_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL);
6303 	poll_threads();
6304 
6305 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6306 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6307 
6308 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6309 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6310 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6311 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6312 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6313 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6314 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6315 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6316 
6317 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6318 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6319 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6320 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6321 }
6322 
6323 static void
6324 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6325 {
6326 	uint8_t payload_read[64 * 512];
6327 	uint8_t payload_ff[64 * 512];
6328 	uint8_t payload_aa[64 * 512];
6329 	uint8_t payload_00[64 * 512];
6330 	struct iovec iov[4];
6331 
6332 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6333 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6334 	memset(payload_00, 0x00, sizeof(payload_00));
6335 
6336 	/* Read only first io unit */
6337 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6338 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6339 	 * payload_read: F000 0000 | 0000 0000 ... */
6340 	memset(payload_read, 0x00, sizeof(payload_read));
6341 	iov[0].iov_base = payload_read;
6342 	iov[0].iov_len = 1 * 512;
6343 	spdk_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL);
6344 	poll_threads();
6345 
6346 	CU_ASSERT(g_bserrno == 0);
6347 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6348 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6349 
6350 	/* Read four io_units starting from offset = 2
6351 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6352 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6353 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6354 
6355 	memset(payload_read, 0x00, sizeof(payload_read));
6356 	iov[0].iov_base = payload_read;
6357 	iov[0].iov_len = 4 * 512;
6358 	spdk_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL);
6359 	poll_threads();
6360 	CU_ASSERT(g_bserrno == 0);
6361 
6362 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6363 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6364 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6365 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6366 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6367 
6368 	/* Read eight io_units across multiple pages
6369 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6370 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6371 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6372 	memset(payload_read, 0x00, sizeof(payload_read));
6373 	iov[0].iov_base = payload_read;
6374 	iov[0].iov_len = 4 * 512;
6375 	iov[1].iov_base = payload_read + 4 * 512;
6376 	iov[1].iov_len = 4 * 512;
6377 	spdk_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL);
6378 	poll_threads();
6379 	CU_ASSERT(g_bserrno == 0);
6380 
6381 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6382 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6383 
6384 	/* Read eight io_units across multiple clusters
6385 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6386 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6387 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6388 	memset(payload_read, 0x00, sizeof(payload_read));
6389 	iov[0].iov_base = payload_read;
6390 	iov[0].iov_len = 2 * 512;
6391 	iov[1].iov_base = payload_read + 2 * 512;
6392 	iov[1].iov_len = 2 * 512;
6393 	iov[2].iov_base = payload_read + 4 * 512;
6394 	iov[2].iov_len = 2 * 512;
6395 	iov[3].iov_base = payload_read + 6 * 512;
6396 	iov[3].iov_len = 2 * 512;
6397 	spdk_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL);
6398 	poll_threads();
6399 	CU_ASSERT(g_bserrno == 0);
6400 
6401 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6402 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6403 
6404 	/* Read four io_units from second cluster
6405 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6406 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6407 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6408 	memset(payload_read, 0x00, sizeof(payload_read));
6409 	iov[0].iov_base = payload_read;
6410 	iov[0].iov_len = 1 * 512;
6411 	iov[1].iov_base = payload_read + 1 * 512;
6412 	iov[1].iov_len = 3 * 512;
6413 	spdk_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL);
6414 	poll_threads();
6415 	CU_ASSERT(g_bserrno == 0);
6416 
6417 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6418 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6419 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6420 
6421 	/* Read second cluster
6422 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6423 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6424 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6425 	memset(payload_read, 0x00, sizeof(payload_read));
6426 	iov[0].iov_base = payload_read;
6427 	iov[0].iov_len = 1 * 512;
6428 	iov[1].iov_base = payload_read + 1 * 512;
6429 	iov[1].iov_len = 2 * 512;
6430 	iov[2].iov_base = payload_read + 3 * 512;
6431 	iov[2].iov_len = 4 * 512;
6432 	iov[3].iov_base = payload_read + 7 * 512;
6433 	iov[3].iov_len = 25 * 512;
6434 	spdk_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL);
6435 	poll_threads();
6436 	CU_ASSERT(g_bserrno == 0);
6437 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6438 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6439 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6440 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6441 
6442 	/* Read whole two clusters
6443 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6444 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6445 	memset(payload_read, 0x00, sizeof(payload_read));
6446 	iov[0].iov_base = payload_read;
6447 	iov[0].iov_len = 1 * 512;
6448 	iov[1].iov_base = payload_read + 1 * 512;
6449 	iov[1].iov_len = 8 * 512;
6450 	iov[2].iov_base = payload_read + 9 * 512;
6451 	iov[2].iov_len = 16 * 512;
6452 	iov[3].iov_base = payload_read + 25 * 512;
6453 	iov[3].iov_len = 39 * 512;
6454 	spdk_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL);
6455 	poll_threads();
6456 	CU_ASSERT(g_bserrno == 0);
6457 
6458 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6459 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6460 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6461 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6462 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6463 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6464 
6465 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6466 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6467 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6468 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6469 }
6470 
6471 static void
6472 blob_io_unit(void)
6473 {
6474 	struct spdk_bs_opts bsopts;
6475 	struct spdk_blob_opts opts;
6476 	struct spdk_blob_store *bs;
6477 	struct spdk_bs_dev *dev;
6478 	struct spdk_blob *blob, *snapshot, *clone;
6479 	spdk_blob_id blobid;
6480 	struct spdk_io_channel *channel;
6481 
6482 	/* Create dev with 512 bytes io unit size */
6483 
6484 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6485 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6486 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6487 
6488 	/* Try to initialize a new blob store with unsupported io_unit */
6489 	dev = init_dev();
6490 	dev->blocklen = 512;
6491 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6492 
6493 	/* Initialize a new blob store */
6494 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6495 	poll_threads();
6496 	CU_ASSERT(g_bserrno == 0);
6497 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6498 	bs = g_bs;
6499 
6500 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6501 	channel = spdk_bs_alloc_io_channel(bs);
6502 
6503 	/* Create thick provisioned blob */
6504 	ut_spdk_blob_opts_init(&opts);
6505 	opts.thin_provision = false;
6506 	opts.num_clusters = 32;
6507 
6508 	blob = ut_blob_create_and_open(bs, &opts);
6509 	blobid = spdk_blob_get_id(blob);
6510 
6511 	test_io_write(dev, blob, channel);
6512 	test_io_read(dev, blob, channel);
6513 	test_io_zeroes(dev, blob, channel);
6514 
6515 	test_iov_write(dev, blob, channel);
6516 	test_iov_read(dev, blob, channel);
6517 
6518 	test_io_unmap(dev, blob, channel);
6519 
6520 	spdk_blob_close(blob, blob_op_complete, NULL);
6521 	poll_threads();
6522 	CU_ASSERT(g_bserrno == 0);
6523 	blob = NULL;
6524 	g_blob = NULL;
6525 
6526 	/* Create thin provisioned blob */
6527 
6528 	ut_spdk_blob_opts_init(&opts);
6529 	opts.thin_provision = true;
6530 	opts.num_clusters = 32;
6531 
6532 	blob = ut_blob_create_and_open(bs, &opts);
6533 	blobid = spdk_blob_get_id(blob);
6534 
6535 	test_io_write(dev, blob, channel);
6536 	test_io_read(dev, blob, channel);
6537 
6538 	test_io_zeroes(dev, blob, channel);
6539 
6540 	test_iov_write(dev, blob, channel);
6541 	test_iov_read(dev, blob, channel);
6542 
6543 	/* Create snapshot */
6544 
6545 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6546 	poll_threads();
6547 	CU_ASSERT(g_bserrno == 0);
6548 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6549 	blobid = g_blobid;
6550 
6551 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6552 	poll_threads();
6553 	CU_ASSERT(g_bserrno == 0);
6554 	CU_ASSERT(g_blob != NULL);
6555 	snapshot = g_blob;
6556 
6557 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6558 	poll_threads();
6559 	CU_ASSERT(g_bserrno == 0);
6560 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6561 	blobid = g_blobid;
6562 
6563 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6564 	poll_threads();
6565 	CU_ASSERT(g_bserrno == 0);
6566 	CU_ASSERT(g_blob != NULL);
6567 	clone = g_blob;
6568 
6569 	test_io_read(dev, blob, channel);
6570 	test_io_read(dev, snapshot, channel);
6571 	test_io_read(dev, clone, channel);
6572 
6573 	test_iov_read(dev, blob, channel);
6574 	test_iov_read(dev, snapshot, channel);
6575 	test_iov_read(dev, clone, channel);
6576 
6577 	/* Inflate clone */
6578 
6579 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6580 	poll_threads();
6581 
6582 	CU_ASSERT(g_bserrno == 0);
6583 
6584 	test_io_read(dev, clone, channel);
6585 
6586 	test_io_unmap(dev, clone, channel);
6587 
6588 	test_iov_write(dev, clone, channel);
6589 	test_iov_read(dev, clone, channel);
6590 
6591 	spdk_blob_close(blob, blob_op_complete, NULL);
6592 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6593 	spdk_blob_close(clone, blob_op_complete, NULL);
6594 	poll_threads();
6595 	CU_ASSERT(g_bserrno == 0);
6596 	blob = NULL;
6597 	g_blob = NULL;
6598 
6599 	spdk_bs_free_io_channel(channel);
6600 	poll_threads();
6601 
6602 	/* Unload the blob store */
6603 	spdk_bs_unload(bs, bs_op_complete, NULL);
6604 	poll_threads();
6605 	CU_ASSERT(g_bserrno == 0);
6606 	g_bs = NULL;
6607 	g_blob = NULL;
6608 	g_blobid = 0;
6609 }
6610 
6611 static void
6612 blob_io_unit_compatiblity(void)
6613 {
6614 	struct spdk_bs_opts bsopts;
6615 	struct spdk_blob_store *bs;
6616 	struct spdk_bs_dev *dev;
6617 	struct spdk_bs_super_block *super;
6618 
6619 	/* Create dev with 512 bytes io unit size */
6620 
6621 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6622 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6623 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6624 
6625 	/* Try to initialize a new blob store with unsupported io_unit */
6626 	dev = init_dev();
6627 	dev->blocklen = 512;
6628 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6629 
6630 	/* Initialize a new blob store */
6631 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6632 	poll_threads();
6633 	CU_ASSERT(g_bserrno == 0);
6634 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6635 	bs = g_bs;
6636 
6637 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6638 
6639 	/* Unload the blob store */
6640 	spdk_bs_unload(bs, bs_op_complete, NULL);
6641 	poll_threads();
6642 	CU_ASSERT(g_bserrno == 0);
6643 
6644 	/* Modify super block to behave like older version.
6645 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6646 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6647 	super->io_unit_size = 0;
6648 	super->crc = blob_md_page_calc_crc(super);
6649 
6650 	dev = init_dev();
6651 	dev->blocklen = 512;
6652 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6653 
6654 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6655 	poll_threads();
6656 	CU_ASSERT(g_bserrno == 0);
6657 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6658 	bs = g_bs;
6659 
6660 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6661 
6662 	/* Unload the blob store */
6663 	spdk_bs_unload(bs, bs_op_complete, NULL);
6664 	poll_threads();
6665 	CU_ASSERT(g_bserrno == 0);
6666 
6667 	g_bs = NULL;
6668 	g_blob = NULL;
6669 	g_blobid = 0;
6670 }
6671 
6672 static void
6673 first_sync_complete(void *cb_arg, int bserrno)
6674 {
6675 	struct spdk_blob *blob = cb_arg;
6676 	int rc;
6677 
6678 	CU_ASSERT(bserrno == 0);
6679 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
6680 	CU_ASSERT(rc == 0);
6681 	CU_ASSERT(g_bserrno == -1);
6682 
6683 	/* Keep g_bserrno at -1, only the
6684 	 * second sync completion should set it at 0. */
6685 }
6686 
6687 static void
6688 second_sync_complete(void *cb_arg, int bserrno)
6689 {
6690 	struct spdk_blob *blob = cb_arg;
6691 	const void *value;
6692 	size_t value_len;
6693 	int rc;
6694 
6695 	CU_ASSERT(bserrno == 0);
6696 
6697 	/* Verify that the first sync completion had a chance to execute */
6698 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
6699 	CU_ASSERT(rc == 0);
6700 	SPDK_CU_ASSERT_FATAL(value != NULL);
6701 	CU_ASSERT(value_len == strlen("second") + 1);
6702 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
6703 
6704 	CU_ASSERT(g_bserrno == -1);
6705 	g_bserrno = bserrno;
6706 }
6707 
6708 static void
6709 blob_simultaneous_operations(void)
6710 {
6711 	struct spdk_blob_store *bs = g_bs;
6712 	struct spdk_blob_opts opts;
6713 	struct spdk_blob *blob, *snapshot;
6714 	spdk_blob_id blobid, snapshotid;
6715 	struct spdk_io_channel *channel;
6716 	int rc;
6717 
6718 	channel = spdk_bs_alloc_io_channel(bs);
6719 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6720 
6721 	ut_spdk_blob_opts_init(&opts);
6722 	opts.num_clusters = 10;
6723 
6724 	blob = ut_blob_create_and_open(bs, &opts);
6725 	blobid = spdk_blob_get_id(blob);
6726 
6727 	/* Create snapshot and try to remove blob in the same time:
6728 	 * - snapshot should be created successfully
6729 	 * - delete operation should fail w -EBUSY */
6730 	CU_ASSERT(blob->locked_operation_in_progress == false);
6731 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6732 	CU_ASSERT(blob->locked_operation_in_progress == true);
6733 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6734 	CU_ASSERT(blob->locked_operation_in_progress == true);
6735 	/* Deletion failure */
6736 	CU_ASSERT(g_bserrno == -EBUSY);
6737 	poll_threads();
6738 	CU_ASSERT(blob->locked_operation_in_progress == false);
6739 	/* Snapshot creation success */
6740 	CU_ASSERT(g_bserrno == 0);
6741 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6742 
6743 	snapshotid = g_blobid;
6744 
6745 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6746 	poll_threads();
6747 	CU_ASSERT(g_bserrno == 0);
6748 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6749 	snapshot = g_blob;
6750 
6751 	/* Inflate blob and try to remove blob in the same time:
6752 	 * - blob should be inflated successfully
6753 	 * - delete operation should fail w -EBUSY */
6754 	CU_ASSERT(blob->locked_operation_in_progress == false);
6755 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6756 	CU_ASSERT(blob->locked_operation_in_progress == true);
6757 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6758 	CU_ASSERT(blob->locked_operation_in_progress == true);
6759 	/* Deletion failure */
6760 	CU_ASSERT(g_bserrno == -EBUSY);
6761 	poll_threads();
6762 	CU_ASSERT(blob->locked_operation_in_progress == false);
6763 	/* Inflation success */
6764 	CU_ASSERT(g_bserrno == 0);
6765 
6766 	/* Clone snapshot and try to remove snapshot in the same time:
6767 	 * - snapshot should be cloned successfully
6768 	 * - delete operation should fail w -EBUSY */
6769 	CU_ASSERT(blob->locked_operation_in_progress == false);
6770 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
6771 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6772 	/* Deletion failure */
6773 	CU_ASSERT(g_bserrno == -EBUSY);
6774 	poll_threads();
6775 	CU_ASSERT(blob->locked_operation_in_progress == false);
6776 	/* Clone created */
6777 	CU_ASSERT(g_bserrno == 0);
6778 
6779 	/* Resize blob and try to remove blob in the same time:
6780 	 * - blob should be resized successfully
6781 	 * - delete operation should fail w -EBUSY */
6782 	CU_ASSERT(blob->locked_operation_in_progress == false);
6783 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
6784 	CU_ASSERT(blob->locked_operation_in_progress == true);
6785 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6786 	CU_ASSERT(blob->locked_operation_in_progress == true);
6787 	/* Deletion failure */
6788 	CU_ASSERT(g_bserrno == -EBUSY);
6789 	poll_threads();
6790 	CU_ASSERT(blob->locked_operation_in_progress == false);
6791 	/* Blob resized successfully */
6792 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6793 	poll_threads();
6794 	CU_ASSERT(g_bserrno == 0);
6795 
6796 	/* Issue two consecutive blob syncs, neither should fail.
6797 	 * Force sync to actually occur by marking blob dirty each time.
6798 	 * Execution of sync should not be enough to complete the operation,
6799 	 * since disk I/O is required to complete it. */
6800 	g_bserrno = -1;
6801 
6802 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
6803 	CU_ASSERT(rc == 0);
6804 	spdk_blob_sync_md(blob, first_sync_complete, blob);
6805 	CU_ASSERT(g_bserrno == -1);
6806 
6807 	spdk_blob_sync_md(blob, second_sync_complete, blob);
6808 	CU_ASSERT(g_bserrno == -1);
6809 
6810 	poll_threads();
6811 	CU_ASSERT(g_bserrno == 0);
6812 
6813 	spdk_bs_free_io_channel(channel);
6814 	poll_threads();
6815 
6816 	ut_blob_close_and_delete(bs, snapshot);
6817 	ut_blob_close_and_delete(bs, blob);
6818 }
6819 
6820 static void
6821 blob_persist_test(void)
6822 {
6823 	struct spdk_blob_store *bs = g_bs;
6824 	struct spdk_blob_opts opts;
6825 	struct spdk_blob *blob;
6826 	spdk_blob_id blobid;
6827 	struct spdk_io_channel *channel;
6828 	char *xattr;
6829 	size_t xattr_length;
6830 	int rc;
6831 	uint32_t page_count_clear, page_count_xattr;
6832 	uint64_t poller_iterations;
6833 	bool run_poller;
6834 
6835 	channel = spdk_bs_alloc_io_channel(bs);
6836 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6837 
6838 	ut_spdk_blob_opts_init(&opts);
6839 	opts.num_clusters = 10;
6840 
6841 	blob = ut_blob_create_and_open(bs, &opts);
6842 	blobid = spdk_blob_get_id(blob);
6843 
6844 	/* Save the amount of md pages used after creation of a blob.
6845 	 * This should be consistent after removing xattr. */
6846 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
6847 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
6848 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
6849 
6850 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
6851 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
6852 		       strlen("large_xattr");
6853 	xattr = calloc(xattr_length, sizeof(char));
6854 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
6855 
6856 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6857 	SPDK_CU_ASSERT_FATAL(rc == 0);
6858 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6859 	poll_threads();
6860 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
6861 
6862 	/* Save the amount of md pages used after adding the large xattr */
6863 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
6864 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
6865 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
6866 
6867 	/* Add xattr to a blob and sync it. While sync is occuring, remove the xattr and sync again.
6868 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
6869 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
6870 	poller_iterations = 1;
6871 	run_poller = true;
6872 	while (run_poller) {
6873 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
6874 		SPDK_CU_ASSERT_FATAL(rc == 0);
6875 		g_bserrno = -1;
6876 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6877 		poll_thread_times(0, poller_iterations);
6878 		if (g_bserrno == 0) {
6879 			/* Poller iteration count was high enough for first sync to complete.
6880 			 * Verify that blob takes up enough of md_pages to store the xattr. */
6881 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
6882 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
6883 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
6884 			run_poller = false;
6885 		}
6886 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
6887 		SPDK_CU_ASSERT_FATAL(rc == 0);
6888 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6889 		poll_threads();
6890 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
6891 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
6892 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
6893 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
6894 
6895 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
6896 		spdk_blob_close(blob, blob_op_complete, NULL);
6897 		poll_threads();
6898 		CU_ASSERT(g_bserrno == 0);
6899 
6900 		ut_bs_reload(&bs, NULL);
6901 
6902 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6903 		poll_threads();
6904 		CU_ASSERT(g_bserrno == 0);
6905 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6906 		blob = g_blob;
6907 
6908 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
6909 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
6910 
6911 		poller_iterations++;
6912 		/* Stop at high iteration count to prevent infinite loop.
6913 		 * This value should be enough for first md sync to complete in any case. */
6914 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
6915 	}
6916 
6917 	free(xattr);
6918 
6919 	ut_blob_close_and_delete(bs, blob);
6920 
6921 	spdk_bs_free_io_channel(channel);
6922 	poll_threads();
6923 }
6924 
6925 static void
6926 blob_decouple_snapshot(void)
6927 {
6928 	struct spdk_blob_store *bs = g_bs;
6929 	struct spdk_blob_opts opts;
6930 	struct spdk_blob *blob, *snapshot1, *snapshot2;
6931 	struct spdk_io_channel *channel;
6932 	spdk_blob_id blobid, snapshotid;
6933 	uint64_t cluster;
6934 
6935 	channel = spdk_bs_alloc_io_channel(bs);
6936 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6937 
6938 	ut_spdk_blob_opts_init(&opts);
6939 	opts.num_clusters = 10;
6940 	opts.thin_provision = false;
6941 
6942 	blob = ut_blob_create_and_open(bs, &opts);
6943 	blobid = spdk_blob_get_id(blob);
6944 
6945 	/* Create first snapshot */
6946 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
6947 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6948 	poll_threads();
6949 	CU_ASSERT(g_bserrno == 0);
6950 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6951 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
6952 	snapshotid = g_blobid;
6953 
6954 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6955 	poll_threads();
6956 	CU_ASSERT(g_bserrno == 0);
6957 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6958 	snapshot1 = g_blob;
6959 
6960 	/* Create the second one */
6961 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
6962 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6963 	poll_threads();
6964 	CU_ASSERT(g_bserrno == 0);
6965 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6966 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
6967 	snapshotid = g_blobid;
6968 
6969 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6970 	poll_threads();
6971 	CU_ASSERT(g_bserrno == 0);
6972 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6973 	snapshot2 = g_blob;
6974 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
6975 
6976 	/* Now decouple the second snapshot forcing it to copy the written clusters */
6977 	spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
6978 	poll_threads();
6979 	CU_ASSERT(g_bserrno == 0);
6980 
6981 	/* Verify that the snapshot has been decoupled and that the clusters have been copied */
6982 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
6983 	for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
6984 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
6985 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
6986 				    snapshot1->active.clusters[cluster]);
6987 	}
6988 
6989 	spdk_bs_free_io_channel(channel);
6990 
6991 	ut_blob_close_and_delete(bs, snapshot2);
6992 	ut_blob_close_and_delete(bs, snapshot1);
6993 	ut_blob_close_and_delete(bs, blob);
6994 	poll_threads();
6995 }
6996 
6997 static void
6998 suite_bs_setup(void)
6999 {
7000 	struct spdk_bs_dev *dev;
7001 
7002 	dev = init_dev();
7003 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7004 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
7005 	poll_threads();
7006 	CU_ASSERT(g_bserrno == 0);
7007 	CU_ASSERT(g_bs != NULL);
7008 }
7009 
7010 static void
7011 suite_bs_cleanup(void)
7012 {
7013 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
7014 	poll_threads();
7015 	CU_ASSERT(g_bserrno == 0);
7016 	g_bs = NULL;
7017 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7018 }
7019 
7020 static struct spdk_blob *
7021 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
7022 {
7023 	struct spdk_blob *blob;
7024 	struct spdk_blob_opts create_blob_opts;
7025 	spdk_blob_id blobid;
7026 
7027 	if (blob_opts == NULL) {
7028 		ut_spdk_blob_opts_init(&create_blob_opts);
7029 		blob_opts = &create_blob_opts;
7030 	}
7031 
7032 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
7033 	poll_threads();
7034 	CU_ASSERT(g_bserrno == 0);
7035 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7036 	blobid = g_blobid;
7037 	g_blobid = -1;
7038 
7039 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7040 	poll_threads();
7041 	CU_ASSERT(g_bserrno == 0);
7042 	CU_ASSERT(g_blob != NULL);
7043 	blob = g_blob;
7044 
7045 	g_blob = NULL;
7046 	g_bserrno = -1;
7047 
7048 	return blob;
7049 }
7050 
7051 static void
7052 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
7053 {
7054 	spdk_blob_id blobid = spdk_blob_get_id(blob);
7055 
7056 	spdk_blob_close(blob, blob_op_complete, NULL);
7057 	poll_threads();
7058 	CU_ASSERT(g_bserrno == 0);
7059 	g_blob = NULL;
7060 
7061 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7062 	poll_threads();
7063 	CU_ASSERT(g_bserrno == 0);
7064 	g_bserrno = -1;
7065 }
7066 
7067 static void
7068 suite_blob_setup(void)
7069 {
7070 	suite_bs_setup();
7071 	CU_ASSERT(g_bs != NULL);
7072 
7073 	g_blob = ut_blob_create_and_open(g_bs, NULL);
7074 	CU_ASSERT(g_blob != NULL);
7075 }
7076 
7077 static void
7078 suite_blob_cleanup(void)
7079 {
7080 	ut_blob_close_and_delete(g_bs, g_blob);
7081 	CU_ASSERT(g_blob == NULL);
7082 
7083 	suite_bs_cleanup();
7084 	CU_ASSERT(g_bs == NULL);
7085 }
7086 
7087 int main(int argc, char **argv)
7088 {
7089 	CU_pSuite	suite, suite_bs, suite_blob;
7090 	unsigned int	num_failures;
7091 
7092 	CU_set_error_action(CUEA_ABORT);
7093 	CU_initialize_registry();
7094 
7095 	suite = CU_add_suite("blob", NULL, NULL);
7096 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
7097 			suite_bs_setup, suite_bs_cleanup);
7098 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
7099 			suite_blob_setup, suite_blob_cleanup);
7100 
7101 	CU_ADD_TEST(suite, blob_init);
7102 	CU_ADD_TEST(suite_bs, blob_open);
7103 	CU_ADD_TEST(suite_bs, blob_create);
7104 	CU_ADD_TEST(suite_bs, blob_create_loop);
7105 	CU_ADD_TEST(suite_bs, blob_create_fail);
7106 	CU_ADD_TEST(suite_bs, blob_create_internal);
7107 	CU_ADD_TEST(suite, blob_thin_provision);
7108 	CU_ADD_TEST(suite_bs, blob_snapshot);
7109 	CU_ADD_TEST(suite_bs, blob_clone);
7110 	CU_ADD_TEST(suite_bs, blob_inflate);
7111 	CU_ADD_TEST(suite_bs, blob_delete);
7112 	CU_ADD_TEST(suite_bs, blob_resize_test);
7113 	CU_ADD_TEST(suite, blob_read_only);
7114 	CU_ADD_TEST(suite_bs, channel_ops);
7115 	CU_ADD_TEST(suite_bs, blob_super);
7116 	CU_ADD_TEST(suite_blob, blob_write);
7117 	CU_ADD_TEST(suite_blob, blob_read);
7118 	CU_ADD_TEST(suite_blob, blob_rw_verify);
7119 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
7120 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
7121 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
7122 	CU_ADD_TEST(suite_bs, blob_unmap);
7123 	CU_ADD_TEST(suite_bs, blob_iter);
7124 	CU_ADD_TEST(suite_blob, blob_xattr);
7125 	CU_ADD_TEST(suite_bs, blob_parse_md);
7126 	CU_ADD_TEST(suite, bs_load);
7127 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
7128 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
7129 	CU_ADD_TEST(suite_bs, bs_unload);
7130 	CU_ADD_TEST(suite, bs_cluster_sz);
7131 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
7132 	CU_ADD_TEST(suite, bs_resize_md);
7133 	CU_ADD_TEST(suite, bs_destroy);
7134 	CU_ADD_TEST(suite, bs_type);
7135 	CU_ADD_TEST(suite, bs_super_block);
7136 	CU_ADD_TEST(suite, bs_test_recover_cluster_count);
7137 	CU_ADD_TEST(suite, blob_serialize_test);
7138 	CU_ADD_TEST(suite_bs, blob_crc);
7139 	CU_ADD_TEST(suite, super_block_crc);
7140 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
7141 	CU_ADD_TEST(suite_bs, blob_flags);
7142 	CU_ADD_TEST(suite_bs, bs_version);
7143 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
7144 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
7145 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
7146 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
7147 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
7148 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
7149 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
7150 	CU_ADD_TEST(suite, bs_load_iter_test);
7151 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
7152 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
7153 	CU_ADD_TEST(suite, blob_relations);
7154 	CU_ADD_TEST(suite, blob_relations2);
7155 	CU_ADD_TEST(suite, blob_relations3);
7156 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
7157 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
7158 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
7159 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
7160 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
7161 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
7162 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
7163 	CU_ADD_TEST(suite, blob_io_unit);
7164 	CU_ADD_TEST(suite, blob_io_unit_compatiblity);
7165 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
7166 	CU_ADD_TEST(suite_bs, blob_persist_test);
7167 	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
7168 
7169 	allocate_threads(2);
7170 	set_thread(0);
7171 
7172 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
7173 
7174 	CU_basic_set_mode(CU_BRM_VERBOSE);
7175 	g_use_extent_table = false;
7176 	CU_basic_run_tests();
7177 	num_failures = CU_get_number_of_failures();
7178 	g_use_extent_table = true;
7179 	CU_basic_run_tests();
7180 	num_failures += CU_get_number_of_failures();
7181 	CU_cleanup_registry();
7182 
7183 	free(g_dev_buffer);
7184 
7185 	free_threads();
7186 
7187 	return num_failures;
7188 }
7189