xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision fecffda6ecf8853b82edccde429b68252f0a62c5)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "blob/blobstore.c"
16 #include "blob/request.c"
17 #include "blob/zeroes.c"
18 #include "blob/blob_bs_dev.c"
19 
20 struct spdk_blob_store *g_bs;
21 spdk_blob_id g_blobid;
22 struct spdk_blob *g_blob, *g_blob2;
23 int g_bserrno, g_bserrno2;
24 struct spdk_xattr_names *g_names;
25 int g_done;
26 char *g_xattr_names[] = {"first", "second", "third"};
27 char *g_xattr_values[] = {"one", "two", "three"};
28 uint64_t g_ctx = 1729;
29 bool g_use_extent_table = false;
30 
31 struct spdk_bs_super_block_ver1 {
32 	uint8_t		signature[8];
33 	uint32_t        version;
34 	uint32_t        length;
35 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
36 	spdk_blob_id	super_blob;
37 
38 	uint32_t	cluster_size; /* In bytes */
39 
40 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
41 	uint32_t	used_page_mask_len; /* Count, in pages */
42 
43 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
44 	uint32_t	used_cluster_mask_len; /* Count, in pages */
45 
46 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
47 	uint32_t	md_len; /* Count, in pages */
48 
49 	uint8_t		reserved[4036];
50 	uint32_t	crc;
51 } __attribute__((packed));
52 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
53 
54 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
55 		struct spdk_blob_opts *blob_opts);
56 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
57 static void suite_blob_setup(void);
58 static void suite_blob_cleanup(void);
59 
60 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
61 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
62 		void *cpl_cb_arg), 0);
63 
64 static void
65 _get_xattr_value(void *arg, const char *name,
66 		 const void **value, size_t *value_len)
67 {
68 	uint64_t i;
69 
70 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
71 	SPDK_CU_ASSERT_FATAL(value != NULL);
72 	CU_ASSERT(arg == &g_ctx);
73 
74 	for (i = 0; i < sizeof(g_xattr_names); i++) {
75 		if (!strcmp(name, g_xattr_names[i])) {
76 			*value_len = strlen(g_xattr_values[i]);
77 			*value = g_xattr_values[i];
78 			break;
79 		}
80 	}
81 }
82 
83 static void
84 _get_xattr_value_null(void *arg, const char *name,
85 		      const void **value, size_t *value_len)
86 {
87 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
88 	SPDK_CU_ASSERT_FATAL(value != NULL);
89 	CU_ASSERT(arg == NULL);
90 
91 	*value_len = 0;
92 	*value = NULL;
93 }
94 
95 static int
96 _get_snapshots_count(struct spdk_blob_store *bs)
97 {
98 	struct spdk_blob_list *snapshot = NULL;
99 	int count = 0;
100 
101 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
102 		count += 1;
103 	}
104 
105 	return count;
106 }
107 
108 static void
109 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
110 {
111 	spdk_blob_opts_init(opts, sizeof(*opts));
112 	opts->use_extent_table = g_use_extent_table;
113 }
114 
115 static void
116 bs_op_complete(void *cb_arg, int bserrno)
117 {
118 	g_bserrno = bserrno;
119 }
120 
121 static void
122 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
123 			   int bserrno)
124 {
125 	g_bs = bs;
126 	g_bserrno = bserrno;
127 }
128 
129 static void
130 blob_op_complete(void *cb_arg, int bserrno)
131 {
132 	g_bserrno = bserrno;
133 }
134 
135 static void
136 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
137 {
138 	g_blobid = blobid;
139 	g_bserrno = bserrno;
140 }
141 
142 static void
143 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
144 {
145 	g_blob = blb;
146 	g_bserrno = bserrno;
147 }
148 
149 static void
150 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
151 {
152 	if (g_blob == NULL) {
153 		g_blob = blob;
154 		g_bserrno = bserrno;
155 	} else {
156 		g_blob2 = blob;
157 		g_bserrno2 = bserrno;
158 	}
159 }
160 
161 static void
162 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
163 {
164 	struct spdk_bs_dev *dev;
165 
166 	/* Unload the blob store */
167 	spdk_bs_unload(*bs, bs_op_complete, NULL);
168 	poll_threads();
169 	CU_ASSERT(g_bserrno == 0);
170 
171 	dev = init_dev();
172 	/* Load an existing blob store */
173 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
174 	poll_threads();
175 	CU_ASSERT(g_bserrno == 0);
176 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
177 	*bs = g_bs;
178 
179 	g_bserrno = -1;
180 }
181 
182 static void
183 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
184 {
185 	struct spdk_bs_dev *dev;
186 
187 	/* Dirty shutdown */
188 	bs_free(*bs);
189 
190 	dev = init_dev();
191 	/* Load an existing blob store */
192 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
193 	poll_threads();
194 	CU_ASSERT(g_bserrno == 0);
195 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
196 	*bs = g_bs;
197 
198 	g_bserrno = -1;
199 }
200 
201 static void
202 blob_init(void)
203 {
204 	struct spdk_blob_store *bs;
205 	struct spdk_bs_dev *dev;
206 
207 	dev = init_dev();
208 
209 	/* should fail for an unsupported blocklen */
210 	dev->blocklen = 500;
211 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
212 	poll_threads();
213 	CU_ASSERT(g_bserrno == -EINVAL);
214 
215 	dev = init_dev();
216 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
217 	poll_threads();
218 	CU_ASSERT(g_bserrno == 0);
219 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
220 	bs = g_bs;
221 
222 	spdk_bs_unload(bs, bs_op_complete, NULL);
223 	poll_threads();
224 	CU_ASSERT(g_bserrno == 0);
225 	g_bs = NULL;
226 }
227 
228 static void
229 blob_super(void)
230 {
231 	struct spdk_blob_store *bs = g_bs;
232 	spdk_blob_id blobid;
233 	struct spdk_blob_opts blob_opts;
234 
235 	/* Get the super blob without having set one */
236 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
237 	poll_threads();
238 	CU_ASSERT(g_bserrno == -ENOENT);
239 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
240 
241 	/* Create a blob */
242 	ut_spdk_blob_opts_init(&blob_opts);
243 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
244 	poll_threads();
245 	CU_ASSERT(g_bserrno == 0);
246 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
247 	blobid = g_blobid;
248 
249 	/* Set the blob as the super blob */
250 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
251 	poll_threads();
252 	CU_ASSERT(g_bserrno == 0);
253 
254 	/* Get the super blob */
255 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
256 	poll_threads();
257 	CU_ASSERT(g_bserrno == 0);
258 	CU_ASSERT(blobid == g_blobid);
259 }
260 
261 static void
262 blob_open(void)
263 {
264 	struct spdk_blob_store *bs = g_bs;
265 	struct spdk_blob *blob;
266 	struct spdk_blob_opts blob_opts;
267 	spdk_blob_id blobid, blobid2;
268 
269 	ut_spdk_blob_opts_init(&blob_opts);
270 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
271 	poll_threads();
272 	CU_ASSERT(g_bserrno == 0);
273 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
274 	blobid = g_blobid;
275 
276 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
277 	poll_threads();
278 	CU_ASSERT(g_bserrno == 0);
279 	CU_ASSERT(g_blob != NULL);
280 	blob = g_blob;
281 
282 	blobid2 = spdk_blob_get_id(blob);
283 	CU_ASSERT(blobid == blobid2);
284 
285 	/* Try to open file again.  It should return success. */
286 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
287 	poll_threads();
288 	CU_ASSERT(g_bserrno == 0);
289 	CU_ASSERT(blob == g_blob);
290 
291 	spdk_blob_close(blob, blob_op_complete, NULL);
292 	poll_threads();
293 	CU_ASSERT(g_bserrno == 0);
294 
295 	/*
296 	 * Close the file a second time, releasing the second reference.  This
297 	 *  should succeed.
298 	 */
299 	blob = g_blob;
300 	spdk_blob_close(blob, blob_op_complete, NULL);
301 	poll_threads();
302 	CU_ASSERT(g_bserrno == 0);
303 
304 	/*
305 	 * Try to open file again.  It should succeed.  This tests the case
306 	 *  where the file is opened, closed, then re-opened again.
307 	 */
308 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
309 	poll_threads();
310 	CU_ASSERT(g_bserrno == 0);
311 	CU_ASSERT(g_blob != NULL);
312 	blob = g_blob;
313 	spdk_blob_close(blob, blob_op_complete, NULL);
314 	poll_threads();
315 	CU_ASSERT(g_bserrno == 0);
316 
317 	/* Try to open file twice in succession.  This should return the same
318 	 * blob object.
319 	 */
320 	g_blob = NULL;
321 	g_blob2 = NULL;
322 	g_bserrno = -1;
323 	g_bserrno2 = -1;
324 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
325 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
326 	poll_threads();
327 	CU_ASSERT(g_bserrno == 0);
328 	CU_ASSERT(g_bserrno2 == 0);
329 	CU_ASSERT(g_blob != NULL);
330 	CU_ASSERT(g_blob2 != NULL);
331 	CU_ASSERT(g_blob == g_blob2);
332 
333 	g_bserrno = -1;
334 	spdk_blob_close(g_blob, blob_op_complete, NULL);
335 	poll_threads();
336 	CU_ASSERT(g_bserrno == 0);
337 
338 	ut_blob_close_and_delete(bs, g_blob);
339 }
340 
341 static void
342 blob_create(void)
343 {
344 	struct spdk_blob_store *bs = g_bs;
345 	struct spdk_blob *blob;
346 	struct spdk_blob_opts opts;
347 	spdk_blob_id blobid;
348 
349 	/* Create blob with 10 clusters */
350 
351 	ut_spdk_blob_opts_init(&opts);
352 	opts.num_clusters = 10;
353 
354 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
355 	poll_threads();
356 	CU_ASSERT(g_bserrno == 0);
357 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
358 	blobid = g_blobid;
359 
360 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
361 	poll_threads();
362 	CU_ASSERT(g_bserrno == 0);
363 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
364 	blob = g_blob;
365 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
366 
367 	spdk_blob_close(blob, blob_op_complete, NULL);
368 	poll_threads();
369 	CU_ASSERT(g_bserrno == 0);
370 
371 	/* Create blob with 0 clusters */
372 
373 	ut_spdk_blob_opts_init(&opts);
374 	opts.num_clusters = 0;
375 
376 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
377 	poll_threads();
378 	CU_ASSERT(g_bserrno == 0);
379 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
380 	blobid = g_blobid;
381 
382 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
383 	poll_threads();
384 	CU_ASSERT(g_bserrno == 0);
385 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
386 	blob = g_blob;
387 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
388 
389 	spdk_blob_close(blob, blob_op_complete, NULL);
390 	poll_threads();
391 	CU_ASSERT(g_bserrno == 0);
392 
393 	/* Create blob with default options (opts == NULL) */
394 
395 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
396 	poll_threads();
397 	CU_ASSERT(g_bserrno == 0);
398 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
399 	blobid = g_blobid;
400 
401 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
402 	poll_threads();
403 	CU_ASSERT(g_bserrno == 0);
404 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
405 	blob = g_blob;
406 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
407 
408 	spdk_blob_close(blob, blob_op_complete, NULL);
409 	poll_threads();
410 	CU_ASSERT(g_bserrno == 0);
411 
412 	/* Try to create blob with size larger than blobstore */
413 
414 	ut_spdk_blob_opts_init(&opts);
415 	opts.num_clusters = bs->total_clusters + 1;
416 
417 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
418 	poll_threads();
419 	CU_ASSERT(g_bserrno == -ENOSPC);
420 }
421 
422 static void
423 blob_create_zero_extent(void)
424 {
425 	struct spdk_blob_store *bs = g_bs;
426 	struct spdk_blob *blob;
427 	spdk_blob_id blobid;
428 
429 	/* Create blob with default options (opts == NULL) */
430 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
431 	poll_threads();
432 	CU_ASSERT(g_bserrno == 0);
433 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
434 	blobid = g_blobid;
435 
436 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
437 	poll_threads();
438 	CU_ASSERT(g_bserrno == 0);
439 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
440 	blob = g_blob;
441 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
442 	CU_ASSERT(blob->extent_table_found == true);
443 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
444 	CU_ASSERT(blob->active.extent_pages == NULL);
445 
446 	spdk_blob_close(blob, blob_op_complete, NULL);
447 	poll_threads();
448 	CU_ASSERT(g_bserrno == 0);
449 
450 	/* Create blob with NULL internal options  */
451 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
452 	poll_threads();
453 	CU_ASSERT(g_bserrno == 0);
454 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
455 	blobid = g_blobid;
456 
457 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
458 	poll_threads();
459 	CU_ASSERT(g_bserrno == 0);
460 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
461 	blob = g_blob;
462 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
463 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
464 	CU_ASSERT(blob->extent_table_found == true);
465 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
466 	CU_ASSERT(blob->active.extent_pages == NULL);
467 
468 	spdk_blob_close(blob, blob_op_complete, NULL);
469 	poll_threads();
470 	CU_ASSERT(g_bserrno == 0);
471 }
472 
473 /*
474  * Create and delete one blob in a loop over and over again.  This helps ensure
475  * that the internal bit masks tracking used clusters and md_pages are being
476  * tracked correctly.
477  */
478 static void
479 blob_create_loop(void)
480 {
481 	struct spdk_blob_store *bs = g_bs;
482 	struct spdk_blob_opts opts;
483 	uint32_t i, loop_count;
484 
485 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
486 				  spdk_bit_pool_capacity(bs->used_clusters));
487 
488 	for (i = 0; i < loop_count; i++) {
489 		ut_spdk_blob_opts_init(&opts);
490 		opts.num_clusters = 1;
491 		g_bserrno = -1;
492 		g_blobid = SPDK_BLOBID_INVALID;
493 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
494 		poll_threads();
495 		CU_ASSERT(g_bserrno == 0);
496 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
497 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
498 		poll_threads();
499 		CU_ASSERT(g_bserrno == 0);
500 	}
501 }
502 
503 static void
504 blob_create_fail(void)
505 {
506 	struct spdk_blob_store *bs = g_bs;
507 	struct spdk_blob_opts opts;
508 	spdk_blob_id blobid;
509 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
510 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
511 
512 	/* NULL callback */
513 	ut_spdk_blob_opts_init(&opts);
514 	opts.xattrs.names = g_xattr_names;
515 	opts.xattrs.get_value = NULL;
516 	opts.xattrs.count = 1;
517 	opts.xattrs.ctx = &g_ctx;
518 
519 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
520 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
521 	poll_threads();
522 	CU_ASSERT(g_bserrno == -EINVAL);
523 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
524 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
525 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
526 
527 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
528 	poll_threads();
529 	CU_ASSERT(g_bserrno == -ENOENT);
530 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
531 
532 	ut_bs_reload(&bs, NULL);
533 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
534 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
535 
536 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
537 	poll_threads();
538 	CU_ASSERT(g_blob == NULL);
539 	CU_ASSERT(g_bserrno == -ENOENT);
540 }
541 
542 static void
543 blob_create_internal(void)
544 {
545 	struct spdk_blob_store *bs = g_bs;
546 	struct spdk_blob *blob;
547 	struct spdk_blob_opts opts;
548 	struct spdk_blob_xattr_opts internal_xattrs;
549 	const void *value;
550 	size_t value_len;
551 	spdk_blob_id blobid;
552 	int rc;
553 
554 	/* Create blob with custom xattrs */
555 
556 	ut_spdk_blob_opts_init(&opts);
557 	blob_xattrs_init(&internal_xattrs);
558 	internal_xattrs.count = 3;
559 	internal_xattrs.names = g_xattr_names;
560 	internal_xattrs.get_value = _get_xattr_value;
561 	internal_xattrs.ctx = &g_ctx;
562 
563 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
564 	poll_threads();
565 	CU_ASSERT(g_bserrno == 0);
566 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
567 	blobid = g_blobid;
568 
569 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
570 	poll_threads();
571 	CU_ASSERT(g_bserrno == 0);
572 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
573 	blob = g_blob;
574 
575 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
576 	CU_ASSERT(rc == 0);
577 	SPDK_CU_ASSERT_FATAL(value != NULL);
578 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
579 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
580 
581 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
582 	CU_ASSERT(rc == 0);
583 	SPDK_CU_ASSERT_FATAL(value != NULL);
584 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
585 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
586 
587 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
588 	CU_ASSERT(rc == 0);
589 	SPDK_CU_ASSERT_FATAL(value != NULL);
590 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
591 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
592 
593 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
594 	CU_ASSERT(rc != 0);
595 
596 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
597 	CU_ASSERT(rc != 0);
598 
599 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
600 	CU_ASSERT(rc != 0);
601 
602 	spdk_blob_close(blob, blob_op_complete, NULL);
603 	poll_threads();
604 	CU_ASSERT(g_bserrno == 0);
605 
606 	/* Create blob with NULL internal options  */
607 
608 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
609 	poll_threads();
610 	CU_ASSERT(g_bserrno == 0);
611 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
612 	blobid = g_blobid;
613 
614 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
615 	poll_threads();
616 	CU_ASSERT(g_bserrno == 0);
617 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
618 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
619 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
620 
621 	blob = g_blob;
622 
623 	spdk_blob_close(blob, blob_op_complete, NULL);
624 	poll_threads();
625 	CU_ASSERT(g_bserrno == 0);
626 }
627 
628 static void
629 blob_thin_provision(void)
630 {
631 	struct spdk_blob_store *bs;
632 	struct spdk_bs_dev *dev;
633 	struct spdk_blob *blob;
634 	struct spdk_blob_opts opts;
635 	struct spdk_bs_opts bs_opts;
636 	spdk_blob_id blobid;
637 
638 	dev = init_dev();
639 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
640 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
641 
642 	/* Initialize a new blob store */
643 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
644 	poll_threads();
645 	CU_ASSERT(g_bserrno == 0);
646 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
647 
648 	bs = g_bs;
649 
650 	/* Create blob with thin provisioning enabled */
651 
652 	ut_spdk_blob_opts_init(&opts);
653 	opts.thin_provision = true;
654 	opts.num_clusters = 10;
655 
656 	blob = ut_blob_create_and_open(bs, &opts);
657 	blobid = spdk_blob_get_id(blob);
658 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
659 	/* In thin provisioning with num_clusters is set, if not using the
660 	 * extent table, there is no allocation. If extent table is used,
661 	 * there is related allocation happened. */
662 	if (blob->extent_table_found == true) {
663 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
664 		CU_ASSERT(blob->active.extent_pages != NULL);
665 	} else {
666 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
667 		CU_ASSERT(blob->active.extent_pages == NULL);
668 	}
669 
670 	spdk_blob_close(blob, blob_op_complete, NULL);
671 	CU_ASSERT(g_bserrno == 0);
672 
673 	/* Do not shut down cleanly.  This makes sure that when we load again
674 	 *  and try to recover a valid used_cluster map, that blobstore will
675 	 *  ignore clusters with index 0 since these are unallocated clusters.
676 	 */
677 	ut_bs_dirty_load(&bs, &bs_opts);
678 
679 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
680 	poll_threads();
681 	CU_ASSERT(g_bserrno == 0);
682 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
683 	blob = g_blob;
684 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
685 
686 	ut_blob_close_and_delete(bs, blob);
687 
688 	spdk_bs_unload(bs, bs_op_complete, NULL);
689 	poll_threads();
690 	CU_ASSERT(g_bserrno == 0);
691 	g_bs = NULL;
692 }
693 
694 static void
695 blob_snapshot(void)
696 {
697 	struct spdk_blob_store *bs = g_bs;
698 	struct spdk_blob *blob;
699 	struct spdk_blob *snapshot, *snapshot2;
700 	struct spdk_blob_bs_dev *blob_bs_dev;
701 	struct spdk_blob_opts opts;
702 	struct spdk_blob_xattr_opts xattrs;
703 	spdk_blob_id blobid;
704 	spdk_blob_id snapshotid;
705 	spdk_blob_id snapshotid2;
706 	const void *value;
707 	size_t value_len;
708 	int rc;
709 	spdk_blob_id ids[2];
710 	size_t count;
711 
712 	/* Create blob with 10 clusters */
713 	ut_spdk_blob_opts_init(&opts);
714 	opts.num_clusters = 10;
715 
716 	blob = ut_blob_create_and_open(bs, &opts);
717 	blobid = spdk_blob_get_id(blob);
718 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
719 
720 	/* Create snapshot from blob */
721 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
722 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
723 	poll_threads();
724 	CU_ASSERT(g_bserrno == 0);
725 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
726 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
727 	snapshotid = g_blobid;
728 
729 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
730 	poll_threads();
731 	CU_ASSERT(g_bserrno == 0);
732 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
733 	snapshot = g_blob;
734 	CU_ASSERT(snapshot->data_ro == true);
735 	CU_ASSERT(snapshot->md_ro == true);
736 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
737 
738 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
739 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
740 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
741 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
742 
743 	/* Try to create snapshot from clone with xattrs */
744 	xattrs.names = g_xattr_names;
745 	xattrs.get_value = _get_xattr_value;
746 	xattrs.count = 3;
747 	xattrs.ctx = &g_ctx;
748 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
749 	poll_threads();
750 	CU_ASSERT(g_bserrno == 0);
751 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
752 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
753 	snapshotid2 = g_blobid;
754 
755 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
756 	CU_ASSERT(g_bserrno == 0);
757 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
758 	snapshot2 = g_blob;
759 	CU_ASSERT(snapshot2->data_ro == true);
760 	CU_ASSERT(snapshot2->md_ro == true);
761 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
762 
763 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
764 	CU_ASSERT(snapshot->back_bs_dev == NULL);
765 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
766 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
767 
768 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
769 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
770 
771 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
772 	CU_ASSERT(blob_bs_dev->blob == snapshot);
773 
774 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
775 	CU_ASSERT(rc == 0);
776 	SPDK_CU_ASSERT_FATAL(value != NULL);
777 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
778 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
779 
780 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
781 	CU_ASSERT(rc == 0);
782 	SPDK_CU_ASSERT_FATAL(value != NULL);
783 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
784 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
785 
786 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
787 	CU_ASSERT(rc == 0);
788 	SPDK_CU_ASSERT_FATAL(value != NULL);
789 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
790 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
791 
792 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
793 	count = 2;
794 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
795 	CU_ASSERT(count == 1);
796 	CU_ASSERT(ids[0] == blobid);
797 
798 	count = 2;
799 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
800 	CU_ASSERT(count == 1);
801 	CU_ASSERT(ids[0] == snapshotid2);
802 
803 	/* Try to create snapshot from snapshot */
804 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
805 	poll_threads();
806 	CU_ASSERT(g_bserrno == -EINVAL);
807 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
808 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
809 
810 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
811 	ut_blob_close_and_delete(bs, blob);
812 	count = 2;
813 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
814 	CU_ASSERT(count == 0);
815 
816 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
817 	ut_blob_close_and_delete(bs, snapshot2);
818 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
819 	count = 2;
820 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
821 	CU_ASSERT(count == 0);
822 
823 	ut_blob_close_and_delete(bs, snapshot);
824 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
825 }
826 
827 static void
828 blob_snapshot_freeze_io(void)
829 {
830 	struct spdk_io_channel *channel;
831 	struct spdk_bs_channel *bs_channel;
832 	struct spdk_blob_store *bs = g_bs;
833 	struct spdk_blob *blob;
834 	struct spdk_blob_opts opts;
835 	spdk_blob_id blobid;
836 	uint32_t num_of_pages = 10;
837 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
838 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
839 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
840 
841 	memset(payload_write, 0xE5, sizeof(payload_write));
842 	memset(payload_read, 0x00, sizeof(payload_read));
843 	memset(payload_zero, 0x00, sizeof(payload_zero));
844 
845 	/* Test freeze I/O during snapshot */
846 	channel = spdk_bs_alloc_io_channel(bs);
847 	bs_channel = spdk_io_channel_get_ctx(channel);
848 
849 	/* Create blob with 10 clusters */
850 	ut_spdk_blob_opts_init(&opts);
851 	opts.num_clusters = 10;
852 	opts.thin_provision = false;
853 
854 	blob = ut_blob_create_and_open(bs, &opts);
855 	blobid = spdk_blob_get_id(blob);
856 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
857 
858 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
859 
860 	/* This is implementation specific.
861 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
862 	 * Four async I/O operations happen before that. */
863 	poll_thread_times(0, 5);
864 
865 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
866 
867 	/* Blob I/O should be frozen here */
868 	CU_ASSERT(blob->frozen_refcnt == 1);
869 
870 	/* Write to the blob */
871 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
872 
873 	/* Verify that I/O is queued */
874 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
875 	/* Verify that payload is not written to disk, at this point the blobs already switched */
876 	CU_ASSERT(blob->active.clusters[0] == 0);
877 
878 	/* Finish all operations including spdk_bs_create_snapshot */
879 	poll_threads();
880 
881 	/* Verify snapshot */
882 	CU_ASSERT(g_bserrno == 0);
883 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
884 
885 	/* Verify that blob has unset frozen_io */
886 	CU_ASSERT(blob->frozen_refcnt == 0);
887 
888 	/* Verify that postponed I/O completed successfully by comparing payload */
889 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
890 	poll_threads();
891 	CU_ASSERT(g_bserrno == 0);
892 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
893 
894 	spdk_bs_free_io_channel(channel);
895 	poll_threads();
896 
897 	ut_blob_close_and_delete(bs, blob);
898 }
899 
900 static void
901 blob_clone(void)
902 {
903 	struct spdk_blob_store *bs = g_bs;
904 	struct spdk_blob_opts opts;
905 	struct spdk_blob *blob, *snapshot, *clone;
906 	spdk_blob_id blobid, cloneid, snapshotid;
907 	struct spdk_blob_xattr_opts xattrs;
908 	const void *value;
909 	size_t value_len;
910 	int rc;
911 
912 	/* Create blob with 10 clusters */
913 
914 	ut_spdk_blob_opts_init(&opts);
915 	opts.num_clusters = 10;
916 
917 	blob = ut_blob_create_and_open(bs, &opts);
918 	blobid = spdk_blob_get_id(blob);
919 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
920 
921 	/* Create snapshot */
922 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
923 	poll_threads();
924 	CU_ASSERT(g_bserrno == 0);
925 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
926 	snapshotid = g_blobid;
927 
928 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
929 	poll_threads();
930 	CU_ASSERT(g_bserrno == 0);
931 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
932 	snapshot = g_blob;
933 	CU_ASSERT(snapshot->data_ro == true);
934 	CU_ASSERT(snapshot->md_ro == true);
935 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
936 
937 	spdk_blob_close(snapshot, blob_op_complete, NULL);
938 	poll_threads();
939 	CU_ASSERT(g_bserrno == 0);
940 
941 	/* Create clone from snapshot with xattrs */
942 	xattrs.names = g_xattr_names;
943 	xattrs.get_value = _get_xattr_value;
944 	xattrs.count = 3;
945 	xattrs.ctx = &g_ctx;
946 
947 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
948 	poll_threads();
949 	CU_ASSERT(g_bserrno == 0);
950 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
951 	cloneid = g_blobid;
952 
953 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
954 	poll_threads();
955 	CU_ASSERT(g_bserrno == 0);
956 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
957 	clone = g_blob;
958 	CU_ASSERT(clone->data_ro == false);
959 	CU_ASSERT(clone->md_ro == false);
960 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
961 
962 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
963 	CU_ASSERT(rc == 0);
964 	SPDK_CU_ASSERT_FATAL(value != NULL);
965 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
966 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
967 
968 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
969 	CU_ASSERT(rc == 0);
970 	SPDK_CU_ASSERT_FATAL(value != NULL);
971 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
972 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
973 
974 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
975 	CU_ASSERT(rc == 0);
976 	SPDK_CU_ASSERT_FATAL(value != NULL);
977 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
978 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
979 
980 
981 	spdk_blob_close(clone, blob_op_complete, NULL);
982 	poll_threads();
983 	CU_ASSERT(g_bserrno == 0);
984 
985 	/* Try to create clone from not read only blob */
986 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
987 	poll_threads();
988 	CU_ASSERT(g_bserrno == -EINVAL);
989 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
990 
991 	/* Mark blob as read only */
992 	spdk_blob_set_read_only(blob);
993 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
994 	poll_threads();
995 	CU_ASSERT(g_bserrno == 0);
996 
997 	/* Create clone from read only blob */
998 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
999 	poll_threads();
1000 	CU_ASSERT(g_bserrno == 0);
1001 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1002 	cloneid = g_blobid;
1003 
1004 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1005 	poll_threads();
1006 	CU_ASSERT(g_bserrno == 0);
1007 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1008 	clone = g_blob;
1009 	CU_ASSERT(clone->data_ro == false);
1010 	CU_ASSERT(clone->md_ro == false);
1011 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1012 
1013 	ut_blob_close_and_delete(bs, clone);
1014 	ut_blob_close_and_delete(bs, blob);
1015 }
1016 
1017 static void
1018 _blob_inflate(bool decouple_parent)
1019 {
1020 	struct spdk_blob_store *bs = g_bs;
1021 	struct spdk_blob_opts opts;
1022 	struct spdk_blob *blob, *snapshot;
1023 	spdk_blob_id blobid, snapshotid;
1024 	struct spdk_io_channel *channel;
1025 	uint64_t free_clusters;
1026 
1027 	channel = spdk_bs_alloc_io_channel(bs);
1028 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1029 
1030 	/* Create blob with 10 clusters */
1031 
1032 	ut_spdk_blob_opts_init(&opts);
1033 	opts.num_clusters = 10;
1034 	opts.thin_provision = true;
1035 
1036 	blob = ut_blob_create_and_open(bs, &opts);
1037 	blobid = spdk_blob_get_id(blob);
1038 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1039 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1040 
1041 	/* 1) Blob with no parent */
1042 	if (decouple_parent) {
1043 		/* Decouple parent of blob with no parent (should fail) */
1044 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1045 		poll_threads();
1046 		CU_ASSERT(g_bserrno != 0);
1047 	} else {
1048 		/* Inflate of thin blob with no parent should made it thick */
1049 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1050 		poll_threads();
1051 		CU_ASSERT(g_bserrno == 0);
1052 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1053 	}
1054 
1055 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1056 	poll_threads();
1057 	CU_ASSERT(g_bserrno == 0);
1058 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1059 	snapshotid = g_blobid;
1060 
1061 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1062 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1063 
1064 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1065 	poll_threads();
1066 	CU_ASSERT(g_bserrno == 0);
1067 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1068 	snapshot = g_blob;
1069 	CU_ASSERT(snapshot->data_ro == true);
1070 	CU_ASSERT(snapshot->md_ro == true);
1071 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1072 
1073 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1074 	poll_threads();
1075 	CU_ASSERT(g_bserrno == 0);
1076 
1077 	free_clusters = spdk_bs_free_cluster_count(bs);
1078 
1079 	/* 2) Blob with parent */
1080 	if (!decouple_parent) {
1081 		/* Do full blob inflation */
1082 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1083 		poll_threads();
1084 		CU_ASSERT(g_bserrno == 0);
1085 		/* all 10 clusters should be allocated */
1086 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1087 	} else {
1088 		/* Decouple parent of blob */
1089 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1090 		poll_threads();
1091 		CU_ASSERT(g_bserrno == 0);
1092 		/* when only parent is removed, none of the clusters should be allocated */
1093 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1094 	}
1095 
1096 	/* Now, it should be possible to delete snapshot */
1097 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1098 	poll_threads();
1099 	CU_ASSERT(g_bserrno == 0);
1100 
1101 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1102 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1103 
1104 	spdk_bs_free_io_channel(channel);
1105 	poll_threads();
1106 
1107 	ut_blob_close_and_delete(bs, blob);
1108 }
1109 
1110 static void
1111 blob_inflate(void)
1112 {
1113 	_blob_inflate(false);
1114 	_blob_inflate(true);
1115 }
1116 
1117 static void
1118 blob_delete(void)
1119 {
1120 	struct spdk_blob_store *bs = g_bs;
1121 	struct spdk_blob_opts blob_opts;
1122 	spdk_blob_id blobid;
1123 
1124 	/* Create a blob and then delete it. */
1125 	ut_spdk_blob_opts_init(&blob_opts);
1126 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1127 	poll_threads();
1128 	CU_ASSERT(g_bserrno == 0);
1129 	CU_ASSERT(g_blobid > 0);
1130 	blobid = g_blobid;
1131 
1132 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1133 	poll_threads();
1134 	CU_ASSERT(g_bserrno == 0);
1135 
1136 	/* Try to open the blob */
1137 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1138 	poll_threads();
1139 	CU_ASSERT(g_bserrno == -ENOENT);
1140 }
1141 
1142 static void
1143 blob_resize_test(void)
1144 {
1145 	struct spdk_blob_store *bs = g_bs;
1146 	struct spdk_blob *blob;
1147 	uint64_t free_clusters;
1148 
1149 	free_clusters = spdk_bs_free_cluster_count(bs);
1150 
1151 	blob = ut_blob_create_and_open(bs, NULL);
1152 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1153 
1154 	/* Confirm that resize fails if blob is marked read-only. */
1155 	blob->md_ro = true;
1156 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1157 	poll_threads();
1158 	CU_ASSERT(g_bserrno == -EPERM);
1159 	blob->md_ro = false;
1160 
1161 	/* The blob started at 0 clusters. Resize it to be 5. */
1162 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1163 	poll_threads();
1164 	CU_ASSERT(g_bserrno == 0);
1165 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1166 
1167 	/* Shrink the blob to 3 clusters. This will not actually release
1168 	 * the old clusters until the blob is synced.
1169 	 */
1170 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1171 	poll_threads();
1172 	CU_ASSERT(g_bserrno == 0);
1173 	/* Verify there are still 5 clusters in use */
1174 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1175 
1176 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1177 	poll_threads();
1178 	CU_ASSERT(g_bserrno == 0);
1179 	/* Now there are only 3 clusters in use */
1180 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1181 
1182 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1183 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1184 	poll_threads();
1185 	CU_ASSERT(g_bserrno == 0);
1186 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1187 
1188 	/* Try to resize the blob to size larger than blobstore. */
1189 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1190 	poll_threads();
1191 	CU_ASSERT(g_bserrno == -ENOSPC);
1192 
1193 	ut_blob_close_and_delete(bs, blob);
1194 }
1195 
1196 static void
1197 blob_read_only(void)
1198 {
1199 	struct spdk_blob_store *bs;
1200 	struct spdk_bs_dev *dev;
1201 	struct spdk_blob *blob;
1202 	struct spdk_bs_opts opts;
1203 	spdk_blob_id blobid;
1204 	int rc;
1205 
1206 	dev = init_dev();
1207 	spdk_bs_opts_init(&opts, sizeof(opts));
1208 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1209 
1210 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1211 	poll_threads();
1212 	CU_ASSERT(g_bserrno == 0);
1213 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1214 	bs = g_bs;
1215 
1216 	blob = ut_blob_create_and_open(bs, NULL);
1217 	blobid = spdk_blob_get_id(blob);
1218 
1219 	rc = spdk_blob_set_read_only(blob);
1220 	CU_ASSERT(rc == 0);
1221 
1222 	CU_ASSERT(blob->data_ro == false);
1223 	CU_ASSERT(blob->md_ro == false);
1224 
1225 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1226 	poll_threads();
1227 
1228 	CU_ASSERT(blob->data_ro == true);
1229 	CU_ASSERT(blob->md_ro == true);
1230 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1231 
1232 	spdk_blob_close(blob, blob_op_complete, NULL);
1233 	poll_threads();
1234 	CU_ASSERT(g_bserrno == 0);
1235 
1236 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1237 	poll_threads();
1238 	CU_ASSERT(g_bserrno == 0);
1239 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1240 	blob = g_blob;
1241 
1242 	CU_ASSERT(blob->data_ro == true);
1243 	CU_ASSERT(blob->md_ro == true);
1244 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1245 
1246 	spdk_blob_close(blob, blob_op_complete, NULL);
1247 	poll_threads();
1248 	CU_ASSERT(g_bserrno == 0);
1249 
1250 	ut_bs_reload(&bs, &opts);
1251 
1252 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1253 	poll_threads();
1254 	CU_ASSERT(g_bserrno == 0);
1255 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1256 	blob = g_blob;
1257 
1258 	CU_ASSERT(blob->data_ro == true);
1259 	CU_ASSERT(blob->md_ro == true);
1260 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1261 
1262 	ut_blob_close_and_delete(bs, blob);
1263 
1264 	spdk_bs_unload(bs, bs_op_complete, NULL);
1265 	poll_threads();
1266 	CU_ASSERT(g_bserrno == 0);
1267 }
1268 
1269 static void
1270 channel_ops(void)
1271 {
1272 	struct spdk_blob_store *bs = g_bs;
1273 	struct spdk_io_channel *channel;
1274 
1275 	channel = spdk_bs_alloc_io_channel(bs);
1276 	CU_ASSERT(channel != NULL);
1277 
1278 	spdk_bs_free_io_channel(channel);
1279 	poll_threads();
1280 }
1281 
1282 static void
1283 blob_write(void)
1284 {
1285 	struct spdk_blob_store *bs = g_bs;
1286 	struct spdk_blob *blob = g_blob;
1287 	struct spdk_io_channel *channel;
1288 	uint64_t pages_per_cluster;
1289 	uint8_t payload[10 * 4096];
1290 
1291 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1292 
1293 	channel = spdk_bs_alloc_io_channel(bs);
1294 	CU_ASSERT(channel != NULL);
1295 
1296 	/* Write to a blob with 0 size */
1297 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1298 	poll_threads();
1299 	CU_ASSERT(g_bserrno == -EINVAL);
1300 
1301 	/* Resize the blob */
1302 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1303 	poll_threads();
1304 	CU_ASSERT(g_bserrno == 0);
1305 
1306 	/* Confirm that write fails if blob is marked read-only. */
1307 	blob->data_ro = true;
1308 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1309 	poll_threads();
1310 	CU_ASSERT(g_bserrno == -EPERM);
1311 	blob->data_ro = false;
1312 
1313 	/* Write to the blob */
1314 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1315 	poll_threads();
1316 	CU_ASSERT(g_bserrno == 0);
1317 
1318 	/* Write starting beyond the end */
1319 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1320 			   NULL);
1321 	poll_threads();
1322 	CU_ASSERT(g_bserrno == -EINVAL);
1323 
1324 	/* Write starting at a valid location but going off the end */
1325 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1326 			   blob_op_complete, NULL);
1327 	poll_threads();
1328 	CU_ASSERT(g_bserrno == -EINVAL);
1329 
1330 	spdk_bs_free_io_channel(channel);
1331 	poll_threads();
1332 }
1333 
1334 static void
1335 blob_read(void)
1336 {
1337 	struct spdk_blob_store *bs = g_bs;
1338 	struct spdk_blob *blob = g_blob;
1339 	struct spdk_io_channel *channel;
1340 	uint64_t pages_per_cluster;
1341 	uint8_t payload[10 * 4096];
1342 
1343 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1344 
1345 	channel = spdk_bs_alloc_io_channel(bs);
1346 	CU_ASSERT(channel != NULL);
1347 
1348 	/* Read from a blob with 0 size */
1349 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1350 	poll_threads();
1351 	CU_ASSERT(g_bserrno == -EINVAL);
1352 
1353 	/* Resize the blob */
1354 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1355 	poll_threads();
1356 	CU_ASSERT(g_bserrno == 0);
1357 
1358 	/* Confirm that read passes if blob is marked read-only. */
1359 	blob->data_ro = true;
1360 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1361 	poll_threads();
1362 	CU_ASSERT(g_bserrno == 0);
1363 	blob->data_ro = false;
1364 
1365 	/* Read from the blob */
1366 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1367 	poll_threads();
1368 	CU_ASSERT(g_bserrno == 0);
1369 
1370 	/* Read starting beyond the end */
1371 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1372 			  NULL);
1373 	poll_threads();
1374 	CU_ASSERT(g_bserrno == -EINVAL);
1375 
1376 	/* Read starting at a valid location but going off the end */
1377 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1378 			  blob_op_complete, NULL);
1379 	poll_threads();
1380 	CU_ASSERT(g_bserrno == -EINVAL);
1381 
1382 	spdk_bs_free_io_channel(channel);
1383 	poll_threads();
1384 }
1385 
1386 static void
1387 blob_rw_verify(void)
1388 {
1389 	struct spdk_blob_store *bs = g_bs;
1390 	struct spdk_blob *blob = g_blob;
1391 	struct spdk_io_channel *channel;
1392 	uint8_t payload_read[10 * 4096];
1393 	uint8_t payload_write[10 * 4096];
1394 
1395 	channel = spdk_bs_alloc_io_channel(bs);
1396 	CU_ASSERT(channel != NULL);
1397 
1398 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1399 	poll_threads();
1400 	CU_ASSERT(g_bserrno == 0);
1401 
1402 	memset(payload_write, 0xE5, sizeof(payload_write));
1403 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1404 	poll_threads();
1405 	CU_ASSERT(g_bserrno == 0);
1406 
1407 	memset(payload_read, 0x00, sizeof(payload_read));
1408 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1409 	poll_threads();
1410 	CU_ASSERT(g_bserrno == 0);
1411 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1412 
1413 	spdk_bs_free_io_channel(channel);
1414 	poll_threads();
1415 }
1416 
1417 static void
1418 blob_rw_verify_iov(void)
1419 {
1420 	struct spdk_blob_store *bs = g_bs;
1421 	struct spdk_blob *blob;
1422 	struct spdk_io_channel *channel;
1423 	uint8_t payload_read[10 * 4096];
1424 	uint8_t payload_write[10 * 4096];
1425 	struct iovec iov_read[3];
1426 	struct iovec iov_write[3];
1427 	void *buf;
1428 
1429 	channel = spdk_bs_alloc_io_channel(bs);
1430 	CU_ASSERT(channel != NULL);
1431 
1432 	blob = ut_blob_create_and_open(bs, NULL);
1433 
1434 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1435 	poll_threads();
1436 	CU_ASSERT(g_bserrno == 0);
1437 
1438 	/*
1439 	 * Manually adjust the offset of the blob's second cluster.  This allows
1440 	 *  us to make sure that the readv/write code correctly accounts for I/O
1441 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1442 	 *  clusters are where we expect before modifying the second cluster.
1443 	 */
1444 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1445 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1446 	blob->active.clusters[1] = 3 * 256;
1447 
1448 	memset(payload_write, 0xE5, sizeof(payload_write));
1449 	iov_write[0].iov_base = payload_write;
1450 	iov_write[0].iov_len = 1 * 4096;
1451 	iov_write[1].iov_base = payload_write + 1 * 4096;
1452 	iov_write[1].iov_len = 5 * 4096;
1453 	iov_write[2].iov_base = payload_write + 6 * 4096;
1454 	iov_write[2].iov_len = 4 * 4096;
1455 	/*
1456 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1457 	 *  will get written to the first cluster, the last 4 to the second cluster.
1458 	 */
1459 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1460 	poll_threads();
1461 	CU_ASSERT(g_bserrno == 0);
1462 
1463 	memset(payload_read, 0xAA, sizeof(payload_read));
1464 	iov_read[0].iov_base = payload_read;
1465 	iov_read[0].iov_len = 3 * 4096;
1466 	iov_read[1].iov_base = payload_read + 3 * 4096;
1467 	iov_read[1].iov_len = 4 * 4096;
1468 	iov_read[2].iov_base = payload_read + 7 * 4096;
1469 	iov_read[2].iov_len = 3 * 4096;
1470 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1471 	poll_threads();
1472 	CU_ASSERT(g_bserrno == 0);
1473 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1474 
1475 	buf = calloc(1, 256 * 4096);
1476 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1477 	/* Check that cluster 2 on "disk" was not modified. */
1478 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1479 	free(buf);
1480 
1481 	spdk_blob_close(blob, blob_op_complete, NULL);
1482 	poll_threads();
1483 	CU_ASSERT(g_bserrno == 0);
1484 
1485 	spdk_bs_free_io_channel(channel);
1486 	poll_threads();
1487 }
1488 
1489 static uint32_t
1490 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1491 {
1492 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1493 	struct spdk_bs_request_set *set;
1494 	uint32_t count = 0;
1495 
1496 	TAILQ_FOREACH(set, &channel->reqs, link) {
1497 		count++;
1498 	}
1499 
1500 	return count;
1501 }
1502 
1503 static void
1504 blob_rw_verify_iov_nomem(void)
1505 {
1506 	struct spdk_blob_store *bs = g_bs;
1507 	struct spdk_blob *blob = g_blob;
1508 	struct spdk_io_channel *channel;
1509 	uint8_t payload_write[10 * 4096];
1510 	struct iovec iov_write[3];
1511 	uint32_t req_count;
1512 
1513 	channel = spdk_bs_alloc_io_channel(bs);
1514 	CU_ASSERT(channel != NULL);
1515 
1516 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1517 	poll_threads();
1518 	CU_ASSERT(g_bserrno == 0);
1519 
1520 	/*
1521 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1522 	 *  will get written to the first cluster, the last 4 to the second cluster.
1523 	 */
1524 	iov_write[0].iov_base = payload_write;
1525 	iov_write[0].iov_len = 1 * 4096;
1526 	iov_write[1].iov_base = payload_write + 1 * 4096;
1527 	iov_write[1].iov_len = 5 * 4096;
1528 	iov_write[2].iov_base = payload_write + 6 * 4096;
1529 	iov_write[2].iov_len = 4 * 4096;
1530 	MOCK_SET(calloc, NULL);
1531 	req_count = bs_channel_get_req_count(channel);
1532 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1533 	poll_threads();
1534 	CU_ASSERT(g_bserrno = -ENOMEM);
1535 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1536 	MOCK_CLEAR(calloc);
1537 
1538 	spdk_bs_free_io_channel(channel);
1539 	poll_threads();
1540 }
1541 
1542 static void
1543 blob_rw_iov_read_only(void)
1544 {
1545 	struct spdk_blob_store *bs = g_bs;
1546 	struct spdk_blob *blob = g_blob;
1547 	struct spdk_io_channel *channel;
1548 	uint8_t payload_read[4096];
1549 	uint8_t payload_write[4096];
1550 	struct iovec iov_read;
1551 	struct iovec iov_write;
1552 
1553 	channel = spdk_bs_alloc_io_channel(bs);
1554 	CU_ASSERT(channel != NULL);
1555 
1556 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1557 	poll_threads();
1558 	CU_ASSERT(g_bserrno == 0);
1559 
1560 	/* Verify that writev failed if read_only flag is set. */
1561 	blob->data_ro = true;
1562 	iov_write.iov_base = payload_write;
1563 	iov_write.iov_len = sizeof(payload_write);
1564 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1565 	poll_threads();
1566 	CU_ASSERT(g_bserrno == -EPERM);
1567 
1568 	/* Verify that reads pass if data_ro flag is set. */
1569 	iov_read.iov_base = payload_read;
1570 	iov_read.iov_len = sizeof(payload_read);
1571 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1572 	poll_threads();
1573 	CU_ASSERT(g_bserrno == 0);
1574 
1575 	spdk_bs_free_io_channel(channel);
1576 	poll_threads();
1577 }
1578 
1579 static void
1580 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1581 		       uint8_t *payload, uint64_t offset, uint64_t length,
1582 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1583 {
1584 	uint64_t i;
1585 	uint8_t *buf;
1586 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1587 
1588 	/* To be sure that operation is NOT split, read one page at the time */
1589 	buf = payload;
1590 	for (i = 0; i < length; i++) {
1591 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1592 		poll_threads();
1593 		if (g_bserrno != 0) {
1594 			/* Pass the error code up */
1595 			break;
1596 		}
1597 		buf += page_size;
1598 	}
1599 
1600 	cb_fn(cb_arg, g_bserrno);
1601 }
1602 
1603 static void
1604 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1605 			uint8_t *payload, uint64_t offset, uint64_t length,
1606 			spdk_blob_op_complete cb_fn, void *cb_arg)
1607 {
1608 	uint64_t i;
1609 	uint8_t *buf;
1610 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1611 
1612 	/* To be sure that operation is NOT split, write one page at the time */
1613 	buf = payload;
1614 	for (i = 0; i < length; i++) {
1615 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1616 		poll_threads();
1617 		if (g_bserrno != 0) {
1618 			/* Pass the error code up */
1619 			break;
1620 		}
1621 		buf += page_size;
1622 	}
1623 
1624 	cb_fn(cb_arg, g_bserrno);
1625 }
1626 
1627 static void
1628 blob_operation_split_rw(void)
1629 {
1630 	struct spdk_blob_store *bs = g_bs;
1631 	struct spdk_blob *blob;
1632 	struct spdk_io_channel *channel;
1633 	struct spdk_blob_opts opts;
1634 	uint64_t cluster_size;
1635 
1636 	uint64_t payload_size;
1637 	uint8_t *payload_read;
1638 	uint8_t *payload_write;
1639 	uint8_t *payload_pattern;
1640 
1641 	uint64_t page_size;
1642 	uint64_t pages_per_cluster;
1643 	uint64_t pages_per_payload;
1644 
1645 	uint64_t i;
1646 
1647 	cluster_size = spdk_bs_get_cluster_size(bs);
1648 	page_size = spdk_bs_get_page_size(bs);
1649 	pages_per_cluster = cluster_size / page_size;
1650 	pages_per_payload = pages_per_cluster * 5;
1651 	payload_size = cluster_size * 5;
1652 
1653 	payload_read = malloc(payload_size);
1654 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1655 
1656 	payload_write = malloc(payload_size);
1657 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1658 
1659 	payload_pattern = malloc(payload_size);
1660 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1661 
1662 	/* Prepare random pattern to write */
1663 	memset(payload_pattern, 0xFF, payload_size);
1664 	for (i = 0; i < pages_per_payload; i++) {
1665 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1666 	}
1667 
1668 	channel = spdk_bs_alloc_io_channel(bs);
1669 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1670 
1671 	/* Create blob */
1672 	ut_spdk_blob_opts_init(&opts);
1673 	opts.thin_provision = false;
1674 	opts.num_clusters = 5;
1675 
1676 	blob = ut_blob_create_and_open(bs, &opts);
1677 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1678 
1679 	/* Initial read should return zeroed payload */
1680 	memset(payload_read, 0xFF, payload_size);
1681 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1682 	poll_threads();
1683 	CU_ASSERT(g_bserrno == 0);
1684 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1685 
1686 	/* Fill whole blob except last page */
1687 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1688 			   blob_op_complete, NULL);
1689 	poll_threads();
1690 	CU_ASSERT(g_bserrno == 0);
1691 
1692 	/* Write last page with a pattern */
1693 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1694 			   blob_op_complete, NULL);
1695 	poll_threads();
1696 	CU_ASSERT(g_bserrno == 0);
1697 
1698 	/* Read whole blob and check consistency */
1699 	memset(payload_read, 0xFF, payload_size);
1700 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1701 	poll_threads();
1702 	CU_ASSERT(g_bserrno == 0);
1703 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1704 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1705 
1706 	/* Fill whole blob except first page */
1707 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1708 			   blob_op_complete, NULL);
1709 	poll_threads();
1710 	CU_ASSERT(g_bserrno == 0);
1711 
1712 	/* Write first page with a pattern */
1713 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1714 			   blob_op_complete, NULL);
1715 	poll_threads();
1716 	CU_ASSERT(g_bserrno == 0);
1717 
1718 	/* Read whole blob and check consistency */
1719 	memset(payload_read, 0xFF, payload_size);
1720 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1721 	poll_threads();
1722 	CU_ASSERT(g_bserrno == 0);
1723 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1724 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1725 
1726 
1727 	/* Fill whole blob with a pattern (5 clusters) */
1728 
1729 	/* 1. Read test. */
1730 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1731 				blob_op_complete, NULL);
1732 	poll_threads();
1733 	CU_ASSERT(g_bserrno == 0);
1734 
1735 	memset(payload_read, 0xFF, payload_size);
1736 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1737 	poll_threads();
1738 	poll_threads();
1739 	CU_ASSERT(g_bserrno == 0);
1740 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1741 
1742 	/* 2. Write test. */
1743 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1744 			   blob_op_complete, NULL);
1745 	poll_threads();
1746 	CU_ASSERT(g_bserrno == 0);
1747 
1748 	memset(payload_read, 0xFF, payload_size);
1749 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1750 	poll_threads();
1751 	CU_ASSERT(g_bserrno == 0);
1752 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1753 
1754 	spdk_bs_free_io_channel(channel);
1755 	poll_threads();
1756 
1757 	g_blob = NULL;
1758 	g_blobid = 0;
1759 
1760 	free(payload_read);
1761 	free(payload_write);
1762 	free(payload_pattern);
1763 
1764 	ut_blob_close_and_delete(bs, blob);
1765 }
1766 
1767 static void
1768 blob_operation_split_rw_iov(void)
1769 {
1770 	struct spdk_blob_store *bs = g_bs;
1771 	struct spdk_blob *blob;
1772 	struct spdk_io_channel *channel;
1773 	struct spdk_blob_opts opts;
1774 	uint64_t cluster_size;
1775 
1776 	uint64_t payload_size;
1777 	uint8_t *payload_read;
1778 	uint8_t *payload_write;
1779 	uint8_t *payload_pattern;
1780 
1781 	uint64_t page_size;
1782 	uint64_t pages_per_cluster;
1783 	uint64_t pages_per_payload;
1784 
1785 	struct iovec iov_read[2];
1786 	struct iovec iov_write[2];
1787 
1788 	uint64_t i, j;
1789 
1790 	cluster_size = spdk_bs_get_cluster_size(bs);
1791 	page_size = spdk_bs_get_page_size(bs);
1792 	pages_per_cluster = cluster_size / page_size;
1793 	pages_per_payload = pages_per_cluster * 5;
1794 	payload_size = cluster_size * 5;
1795 
1796 	payload_read = malloc(payload_size);
1797 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1798 
1799 	payload_write = malloc(payload_size);
1800 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1801 
1802 	payload_pattern = malloc(payload_size);
1803 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1804 
1805 	/* Prepare random pattern to write */
1806 	for (i = 0; i < pages_per_payload; i++) {
1807 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1808 			uint64_t *tmp;
1809 
1810 			tmp = (uint64_t *)payload_pattern;
1811 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1812 			*tmp = i + 1;
1813 		}
1814 	}
1815 
1816 	channel = spdk_bs_alloc_io_channel(bs);
1817 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1818 
1819 	/* Create blob */
1820 	ut_spdk_blob_opts_init(&opts);
1821 	opts.thin_provision = false;
1822 	opts.num_clusters = 5;
1823 
1824 	blob = ut_blob_create_and_open(bs, &opts);
1825 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1826 
1827 	/* Initial read should return zeroes payload */
1828 	memset(payload_read, 0xFF, payload_size);
1829 	iov_read[0].iov_base = payload_read;
1830 	iov_read[0].iov_len = cluster_size * 3;
1831 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1832 	iov_read[1].iov_len = cluster_size * 2;
1833 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1834 	poll_threads();
1835 	CU_ASSERT(g_bserrno == 0);
1836 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1837 
1838 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1839 	 *  with a pattern. */
1840 	iov_write[0].iov_base = payload_pattern;
1841 	iov_write[0].iov_len = payload_size - page_size;
1842 	iov_write[1].iov_base = payload_pattern;
1843 	iov_write[1].iov_len = page_size;
1844 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1845 	poll_threads();
1846 	CU_ASSERT(g_bserrno == 0);
1847 
1848 	/* Read whole blob and check consistency */
1849 	memset(payload_read, 0xFF, payload_size);
1850 	iov_read[0].iov_base = payload_read;
1851 	iov_read[0].iov_len = cluster_size * 2;
1852 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1853 	iov_read[1].iov_len = cluster_size * 3;
1854 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1855 	poll_threads();
1856 	CU_ASSERT(g_bserrno == 0);
1857 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1858 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1859 
1860 	/* First of iovs fills only first page and second of iovs writes whole blob except
1861 	 *  first page with a pattern. */
1862 	iov_write[0].iov_base = payload_pattern;
1863 	iov_write[0].iov_len = page_size;
1864 	iov_write[1].iov_base = payload_pattern;
1865 	iov_write[1].iov_len = payload_size - page_size;
1866 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1867 	poll_threads();
1868 	CU_ASSERT(g_bserrno == 0);
1869 
1870 	/* Read whole blob and check consistency */
1871 	memset(payload_read, 0xFF, payload_size);
1872 	iov_read[0].iov_base = payload_read;
1873 	iov_read[0].iov_len = cluster_size * 4;
1874 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1875 	iov_read[1].iov_len = cluster_size;
1876 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1877 	poll_threads();
1878 	CU_ASSERT(g_bserrno == 0);
1879 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1880 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1881 
1882 
1883 	/* Fill whole blob with a pattern (5 clusters) */
1884 
1885 	/* 1. Read test. */
1886 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1887 				blob_op_complete, NULL);
1888 	poll_threads();
1889 	CU_ASSERT(g_bserrno == 0);
1890 
1891 	memset(payload_read, 0xFF, payload_size);
1892 	iov_read[0].iov_base = payload_read;
1893 	iov_read[0].iov_len = cluster_size;
1894 	iov_read[1].iov_base = payload_read + cluster_size;
1895 	iov_read[1].iov_len = cluster_size * 4;
1896 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1897 	poll_threads();
1898 	CU_ASSERT(g_bserrno == 0);
1899 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1900 
1901 	/* 2. Write test. */
1902 	iov_write[0].iov_base = payload_read;
1903 	iov_write[0].iov_len = cluster_size * 2;
1904 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1905 	iov_write[1].iov_len = cluster_size * 3;
1906 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1907 	poll_threads();
1908 	CU_ASSERT(g_bserrno == 0);
1909 
1910 	memset(payload_read, 0xFF, payload_size);
1911 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1912 	poll_threads();
1913 	CU_ASSERT(g_bserrno == 0);
1914 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1915 
1916 	spdk_bs_free_io_channel(channel);
1917 	poll_threads();
1918 
1919 	g_blob = NULL;
1920 	g_blobid = 0;
1921 
1922 	free(payload_read);
1923 	free(payload_write);
1924 	free(payload_pattern);
1925 
1926 	ut_blob_close_and_delete(bs, blob);
1927 }
1928 
1929 static void
1930 blob_unmap(void)
1931 {
1932 	struct spdk_blob_store *bs = g_bs;
1933 	struct spdk_blob *blob;
1934 	struct spdk_io_channel *channel;
1935 	struct spdk_blob_opts opts;
1936 	uint8_t payload[4096];
1937 	int i;
1938 
1939 	channel = spdk_bs_alloc_io_channel(bs);
1940 	CU_ASSERT(channel != NULL);
1941 
1942 	ut_spdk_blob_opts_init(&opts);
1943 	opts.num_clusters = 10;
1944 
1945 	blob = ut_blob_create_and_open(bs, &opts);
1946 
1947 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1948 	poll_threads();
1949 	CU_ASSERT(g_bserrno == 0);
1950 
1951 	memset(payload, 0, sizeof(payload));
1952 	payload[0] = 0xFF;
1953 
1954 	/*
1955 	 * Set first byte of every cluster to 0xFF.
1956 	 * First cluster on device is reserved so let's start from cluster number 1
1957 	 */
1958 	for (i = 1; i < 11; i++) {
1959 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1960 	}
1961 
1962 	/* Confirm writes */
1963 	for (i = 0; i < 10; i++) {
1964 		payload[0] = 0;
1965 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1966 				  blob_op_complete, NULL);
1967 		poll_threads();
1968 		CU_ASSERT(g_bserrno == 0);
1969 		CU_ASSERT(payload[0] == 0xFF);
1970 	}
1971 
1972 	/* Mark some clusters as unallocated */
1973 	blob->active.clusters[1] = 0;
1974 	blob->active.clusters[2] = 0;
1975 	blob->active.clusters[3] = 0;
1976 	blob->active.clusters[6] = 0;
1977 	blob->active.clusters[8] = 0;
1978 
1979 	/* Unmap clusters by resizing to 0 */
1980 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
1981 	poll_threads();
1982 	CU_ASSERT(g_bserrno == 0);
1983 
1984 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1985 	poll_threads();
1986 	CU_ASSERT(g_bserrno == 0);
1987 
1988 	/* Confirm that only 'allocated' clusters were unmapped */
1989 	for (i = 1; i < 11; i++) {
1990 		switch (i) {
1991 		case 2:
1992 		case 3:
1993 		case 4:
1994 		case 7:
1995 		case 9:
1996 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
1997 			break;
1998 		default:
1999 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2000 			break;
2001 		}
2002 	}
2003 
2004 	spdk_bs_free_io_channel(channel);
2005 	poll_threads();
2006 
2007 	ut_blob_close_and_delete(bs, blob);
2008 }
2009 
2010 static void
2011 blob_iter(void)
2012 {
2013 	struct spdk_blob_store *bs = g_bs;
2014 	struct spdk_blob *blob;
2015 	spdk_blob_id blobid;
2016 	struct spdk_blob_opts blob_opts;
2017 
2018 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2019 	poll_threads();
2020 	CU_ASSERT(g_blob == NULL);
2021 	CU_ASSERT(g_bserrno == -ENOENT);
2022 
2023 	ut_spdk_blob_opts_init(&blob_opts);
2024 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2025 	poll_threads();
2026 	CU_ASSERT(g_bserrno == 0);
2027 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2028 	blobid = g_blobid;
2029 
2030 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2031 	poll_threads();
2032 	CU_ASSERT(g_blob != NULL);
2033 	CU_ASSERT(g_bserrno == 0);
2034 	blob = g_blob;
2035 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2036 
2037 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2038 	poll_threads();
2039 	CU_ASSERT(g_blob == NULL);
2040 	CU_ASSERT(g_bserrno == -ENOENT);
2041 }
2042 
2043 static void
2044 blob_xattr(void)
2045 {
2046 	struct spdk_blob_store *bs = g_bs;
2047 	struct spdk_blob *blob = g_blob;
2048 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2049 	uint64_t length;
2050 	int rc;
2051 	const char *name1, *name2;
2052 	const void *value;
2053 	size_t value_len;
2054 	struct spdk_xattr_names *names;
2055 
2056 	/* Test that set_xattr fails if md_ro flag is set. */
2057 	blob->md_ro = true;
2058 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2059 	CU_ASSERT(rc == -EPERM);
2060 
2061 	blob->md_ro = false;
2062 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2063 	CU_ASSERT(rc == 0);
2064 
2065 	length = 2345;
2066 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2067 	CU_ASSERT(rc == 0);
2068 
2069 	/* Overwrite "length" xattr. */
2070 	length = 3456;
2071 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2072 	CU_ASSERT(rc == 0);
2073 
2074 	/* get_xattr should still work even if md_ro flag is set. */
2075 	value = NULL;
2076 	blob->md_ro = true;
2077 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2078 	CU_ASSERT(rc == 0);
2079 	SPDK_CU_ASSERT_FATAL(value != NULL);
2080 	CU_ASSERT(*(uint64_t *)value == length);
2081 	CU_ASSERT(value_len == 8);
2082 	blob->md_ro = false;
2083 
2084 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2085 	CU_ASSERT(rc == -ENOENT);
2086 
2087 	names = NULL;
2088 	rc = spdk_blob_get_xattr_names(blob, &names);
2089 	CU_ASSERT(rc == 0);
2090 	SPDK_CU_ASSERT_FATAL(names != NULL);
2091 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2092 	name1 = spdk_xattr_names_get_name(names, 0);
2093 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2094 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2095 	name2 = spdk_xattr_names_get_name(names, 1);
2096 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2097 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2098 	CU_ASSERT(strcmp(name1, name2));
2099 	spdk_xattr_names_free(names);
2100 
2101 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2102 	blob->md_ro = true;
2103 	rc = spdk_blob_remove_xattr(blob, "name");
2104 	CU_ASSERT(rc == -EPERM);
2105 
2106 	blob->md_ro = false;
2107 	rc = spdk_blob_remove_xattr(blob, "name");
2108 	CU_ASSERT(rc == 0);
2109 
2110 	rc = spdk_blob_remove_xattr(blob, "foobar");
2111 	CU_ASSERT(rc == -ENOENT);
2112 
2113 	/* Set internal xattr */
2114 	length = 7898;
2115 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2116 	CU_ASSERT(rc == 0);
2117 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2118 	CU_ASSERT(rc == 0);
2119 	CU_ASSERT(*(uint64_t *)value == length);
2120 	/* try to get public xattr with same name */
2121 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2122 	CU_ASSERT(rc != 0);
2123 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2124 	CU_ASSERT(rc != 0);
2125 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2126 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2127 		  SPDK_BLOB_INTERNAL_XATTR);
2128 
2129 	spdk_blob_close(blob, blob_op_complete, NULL);
2130 	poll_threads();
2131 
2132 	/* Check if xattrs are persisted */
2133 	ut_bs_reload(&bs, NULL);
2134 
2135 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2136 	poll_threads();
2137 	CU_ASSERT(g_bserrno == 0);
2138 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2139 	blob = g_blob;
2140 
2141 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2142 	CU_ASSERT(rc == 0);
2143 	CU_ASSERT(*(uint64_t *)value == length);
2144 
2145 	/* try to get internal xattr trough public call */
2146 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2147 	CU_ASSERT(rc != 0);
2148 
2149 	rc = blob_remove_xattr(blob, "internal", true);
2150 	CU_ASSERT(rc == 0);
2151 
2152 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2153 }
2154 
2155 static void
2156 blob_parse_md(void)
2157 {
2158 	struct spdk_blob_store *bs = g_bs;
2159 	struct spdk_blob *blob;
2160 	int rc;
2161 	uint32_t used_pages;
2162 	size_t xattr_length;
2163 	char *xattr;
2164 
2165 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2166 	blob = ut_blob_create_and_open(bs, NULL);
2167 
2168 	/* Create large extent to force more than 1 page of metadata. */
2169 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2170 		       strlen("large_xattr");
2171 	xattr = calloc(xattr_length, sizeof(char));
2172 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2173 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2174 	free(xattr);
2175 	SPDK_CU_ASSERT_FATAL(rc == 0);
2176 
2177 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2178 	poll_threads();
2179 
2180 	/* Delete the blob and verify that number of pages returned to before its creation. */
2181 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2182 	ut_blob_close_and_delete(bs, blob);
2183 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2184 }
2185 
2186 static void
2187 bs_load(void)
2188 {
2189 	struct spdk_blob_store *bs;
2190 	struct spdk_bs_dev *dev;
2191 	spdk_blob_id blobid;
2192 	struct spdk_blob *blob;
2193 	struct spdk_bs_super_block *super_block;
2194 	uint64_t length;
2195 	int rc;
2196 	const void *value;
2197 	size_t value_len;
2198 	struct spdk_bs_opts opts;
2199 	struct spdk_blob_opts blob_opts;
2200 
2201 	dev = init_dev();
2202 	spdk_bs_opts_init(&opts, sizeof(opts));
2203 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2204 
2205 	/* Initialize a new blob store */
2206 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2207 	poll_threads();
2208 	CU_ASSERT(g_bserrno == 0);
2209 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2210 	bs = g_bs;
2211 
2212 	/* Try to open a blobid that does not exist */
2213 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2214 	poll_threads();
2215 	CU_ASSERT(g_bserrno == -ENOENT);
2216 	CU_ASSERT(g_blob == NULL);
2217 
2218 	/* Create a blob */
2219 	blob = ut_blob_create_and_open(bs, NULL);
2220 	blobid = spdk_blob_get_id(blob);
2221 
2222 	/* Try again to open valid blob but without the upper bit set */
2223 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2224 	poll_threads();
2225 	CU_ASSERT(g_bserrno == -ENOENT);
2226 	CU_ASSERT(g_blob == NULL);
2227 
2228 	/* Set some xattrs */
2229 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2230 	CU_ASSERT(rc == 0);
2231 
2232 	length = 2345;
2233 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2234 	CU_ASSERT(rc == 0);
2235 
2236 	/* Resize the blob */
2237 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2238 	poll_threads();
2239 	CU_ASSERT(g_bserrno == 0);
2240 
2241 	spdk_blob_close(blob, blob_op_complete, NULL);
2242 	poll_threads();
2243 	CU_ASSERT(g_bserrno == 0);
2244 	blob = NULL;
2245 	g_blob = NULL;
2246 	g_blobid = SPDK_BLOBID_INVALID;
2247 
2248 	/* Unload the blob store */
2249 	spdk_bs_unload(bs, bs_op_complete, NULL);
2250 	poll_threads();
2251 	CU_ASSERT(g_bserrno == 0);
2252 	g_bs = NULL;
2253 	g_blob = NULL;
2254 	g_blobid = 0;
2255 
2256 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2257 	CU_ASSERT(super_block->clean == 1);
2258 
2259 	/* Load should fail for device with an unsupported blocklen */
2260 	dev = init_dev();
2261 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2262 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2263 	poll_threads();
2264 	CU_ASSERT(g_bserrno == -EINVAL);
2265 
2266 	/* Load should when max_md_ops is set to zero */
2267 	dev = init_dev();
2268 	spdk_bs_opts_init(&opts, sizeof(opts));
2269 	opts.max_md_ops = 0;
2270 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2271 	poll_threads();
2272 	CU_ASSERT(g_bserrno == -EINVAL);
2273 
2274 	/* Load should when max_channel_ops is set to zero */
2275 	dev = init_dev();
2276 	spdk_bs_opts_init(&opts, sizeof(opts));
2277 	opts.max_channel_ops = 0;
2278 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2279 	poll_threads();
2280 	CU_ASSERT(g_bserrno == -EINVAL);
2281 
2282 	/* Load an existing blob store */
2283 	dev = init_dev();
2284 	spdk_bs_opts_init(&opts, sizeof(opts));
2285 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2286 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2287 	poll_threads();
2288 	CU_ASSERT(g_bserrno == 0);
2289 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2290 	bs = g_bs;
2291 
2292 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2293 	CU_ASSERT(super_block->clean == 1);
2294 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2295 
2296 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2297 	poll_threads();
2298 	CU_ASSERT(g_bserrno == 0);
2299 	CU_ASSERT(g_blob != NULL);
2300 	blob = g_blob;
2301 
2302 	/* Verify that blobstore is marked dirty after first metadata sync */
2303 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2304 	CU_ASSERT(super_block->clean == 1);
2305 
2306 	/* Get the xattrs */
2307 	value = NULL;
2308 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2309 	CU_ASSERT(rc == 0);
2310 	SPDK_CU_ASSERT_FATAL(value != NULL);
2311 	CU_ASSERT(*(uint64_t *)value == length);
2312 	CU_ASSERT(value_len == 8);
2313 
2314 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2315 	CU_ASSERT(rc == -ENOENT);
2316 
2317 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2318 
2319 	spdk_blob_close(blob, blob_op_complete, NULL);
2320 	poll_threads();
2321 	CU_ASSERT(g_bserrno == 0);
2322 	blob = NULL;
2323 	g_blob = NULL;
2324 
2325 	spdk_bs_unload(bs, bs_op_complete, NULL);
2326 	poll_threads();
2327 	CU_ASSERT(g_bserrno == 0);
2328 	g_bs = NULL;
2329 
2330 	/* Load should fail: bdev size < saved size */
2331 	dev = init_dev();
2332 	dev->blockcnt /= 2;
2333 
2334 	spdk_bs_opts_init(&opts, sizeof(opts));
2335 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2336 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2337 	poll_threads();
2338 
2339 	CU_ASSERT(g_bserrno == -EILSEQ);
2340 
2341 	/* Load should succeed: bdev size > saved size */
2342 	dev = init_dev();
2343 	dev->blockcnt *= 4;
2344 
2345 	spdk_bs_opts_init(&opts, sizeof(opts));
2346 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2347 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2348 	poll_threads();
2349 	CU_ASSERT(g_bserrno == 0);
2350 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2351 	bs = g_bs;
2352 
2353 	CU_ASSERT(g_bserrno == 0);
2354 	spdk_bs_unload(bs, bs_op_complete, NULL);
2355 	poll_threads();
2356 
2357 
2358 	/* Test compatibility mode */
2359 
2360 	dev = init_dev();
2361 	super_block->size = 0;
2362 	super_block->crc = blob_md_page_calc_crc(super_block);
2363 
2364 	spdk_bs_opts_init(&opts, sizeof(opts));
2365 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2366 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2367 	poll_threads();
2368 	CU_ASSERT(g_bserrno == 0);
2369 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2370 	bs = g_bs;
2371 
2372 	/* Create a blob */
2373 	ut_spdk_blob_opts_init(&blob_opts);
2374 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2375 	poll_threads();
2376 	CU_ASSERT(g_bserrno == 0);
2377 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2378 
2379 	/* Blobstore should update number of blocks in super_block */
2380 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2381 	CU_ASSERT(super_block->clean == 0);
2382 
2383 	spdk_bs_unload(bs, bs_op_complete, NULL);
2384 	poll_threads();
2385 	CU_ASSERT(g_bserrno == 0);
2386 	CU_ASSERT(super_block->clean == 1);
2387 	g_bs = NULL;
2388 
2389 }
2390 
2391 static void
2392 bs_load_pending_removal(void)
2393 {
2394 	struct spdk_blob_store *bs = g_bs;
2395 	struct spdk_blob_opts opts;
2396 	struct spdk_blob *blob, *snapshot;
2397 	spdk_blob_id blobid, snapshotid;
2398 	const void *value;
2399 	size_t value_len;
2400 	int rc;
2401 
2402 	/* Create blob */
2403 	ut_spdk_blob_opts_init(&opts);
2404 	opts.num_clusters = 10;
2405 
2406 	blob = ut_blob_create_and_open(bs, &opts);
2407 	blobid = spdk_blob_get_id(blob);
2408 
2409 	/* Create snapshot */
2410 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2411 	poll_threads();
2412 	CU_ASSERT(g_bserrno == 0);
2413 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2414 	snapshotid = g_blobid;
2415 
2416 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2417 	poll_threads();
2418 	CU_ASSERT(g_bserrno == 0);
2419 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2420 	snapshot = g_blob;
2421 
2422 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2423 	snapshot->md_ro = false;
2424 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2425 	CU_ASSERT(rc == 0);
2426 	snapshot->md_ro = true;
2427 
2428 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2429 	poll_threads();
2430 	CU_ASSERT(g_bserrno == 0);
2431 
2432 	spdk_blob_close(blob, blob_op_complete, NULL);
2433 	poll_threads();
2434 	CU_ASSERT(g_bserrno == 0);
2435 
2436 	/* Reload blobstore */
2437 	ut_bs_reload(&bs, NULL);
2438 
2439 	/* Snapshot should not be removed as blob is still pointing to it */
2440 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2441 	poll_threads();
2442 	CU_ASSERT(g_bserrno == 0);
2443 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2444 	snapshot = g_blob;
2445 
2446 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2447 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2448 	CU_ASSERT(rc != 0);
2449 
2450 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2451 	snapshot->md_ro = false;
2452 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2453 	CU_ASSERT(rc == 0);
2454 	snapshot->md_ro = true;
2455 
2456 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2457 	poll_threads();
2458 	CU_ASSERT(g_bserrno == 0);
2459 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2460 	blob = g_blob;
2461 
2462 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2463 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2464 
2465 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2466 	poll_threads();
2467 	CU_ASSERT(g_bserrno == 0);
2468 
2469 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2470 	poll_threads();
2471 	CU_ASSERT(g_bserrno == 0);
2472 
2473 	spdk_blob_close(blob, blob_op_complete, NULL);
2474 	poll_threads();
2475 	CU_ASSERT(g_bserrno == 0);
2476 
2477 	/* Reload blobstore */
2478 	ut_bs_reload(&bs, NULL);
2479 
2480 	/* Snapshot should be removed as blob is not pointing to it anymore */
2481 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2482 	poll_threads();
2483 	CU_ASSERT(g_bserrno != 0);
2484 }
2485 
2486 static void
2487 bs_load_custom_cluster_size(void)
2488 {
2489 	struct spdk_blob_store *bs;
2490 	struct spdk_bs_dev *dev;
2491 	struct spdk_bs_super_block *super_block;
2492 	struct spdk_bs_opts opts;
2493 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2494 	uint32_t cluster_sz;
2495 	uint64_t total_clusters;
2496 
2497 	dev = init_dev();
2498 	spdk_bs_opts_init(&opts, sizeof(opts));
2499 	opts.cluster_sz = custom_cluster_size;
2500 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2501 
2502 	/* Initialize a new blob store */
2503 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2504 	poll_threads();
2505 	CU_ASSERT(g_bserrno == 0);
2506 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2507 	bs = g_bs;
2508 	cluster_sz = bs->cluster_sz;
2509 	total_clusters = bs->total_clusters;
2510 
2511 	/* Unload the blob store */
2512 	spdk_bs_unload(bs, bs_op_complete, NULL);
2513 	poll_threads();
2514 	CU_ASSERT(g_bserrno == 0);
2515 	g_bs = NULL;
2516 	g_blob = NULL;
2517 	g_blobid = 0;
2518 
2519 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2520 	CU_ASSERT(super_block->clean == 1);
2521 
2522 	/* Load an existing blob store */
2523 	dev = init_dev();
2524 	spdk_bs_opts_init(&opts, sizeof(opts));
2525 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2526 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2527 	poll_threads();
2528 	CU_ASSERT(g_bserrno == 0);
2529 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2530 	bs = g_bs;
2531 	/* Compare cluster size and number to one after initialization */
2532 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2533 	CU_ASSERT(total_clusters == bs->total_clusters);
2534 
2535 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2536 	CU_ASSERT(super_block->clean == 1);
2537 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2538 
2539 	spdk_bs_unload(bs, bs_op_complete, NULL);
2540 	poll_threads();
2541 	CU_ASSERT(g_bserrno == 0);
2542 	CU_ASSERT(super_block->clean == 1);
2543 	g_bs = NULL;
2544 }
2545 
2546 static void
2547 bs_load_after_failed_grow(void)
2548 {
2549 	struct spdk_blob_store *bs;
2550 	struct spdk_bs_dev *dev;
2551 	struct spdk_bs_super_block *super_block;
2552 	struct spdk_bs_opts opts;
2553 	struct spdk_bs_md_mask *mask;
2554 	struct spdk_blob_opts blob_opts;
2555 	struct spdk_blob *blob, *snapshot;
2556 	spdk_blob_id blobid, snapshotid;
2557 	uint64_t total_data_clusters;
2558 
2559 	dev = init_dev();
2560 	spdk_bs_opts_init(&opts, sizeof(opts));
2561 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2562 	/*
2563 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2564 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2565 	 * thus the blobstore could grow to the double size.
2566 	 */
2567 	opts.num_md_pages = 128;
2568 
2569 	/* Initialize a new blob store */
2570 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2571 	poll_threads();
2572 	CU_ASSERT(g_bserrno == 0);
2573 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2574 	bs = g_bs;
2575 
2576 	/* Create blob */
2577 	ut_spdk_blob_opts_init(&blob_opts);
2578 	blob_opts.num_clusters = 10;
2579 
2580 	blob = ut_blob_create_and_open(bs, &blob_opts);
2581 	blobid = spdk_blob_get_id(blob);
2582 
2583 	/* Create snapshot */
2584 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2585 	poll_threads();
2586 	CU_ASSERT(g_bserrno == 0);
2587 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2588 	snapshotid = g_blobid;
2589 
2590 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2591 	poll_threads();
2592 	CU_ASSERT(g_bserrno == 0);
2593 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2594 	snapshot = g_blob;
2595 
2596 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2597 	poll_threads();
2598 	CU_ASSERT(g_bserrno == 0);
2599 
2600 	spdk_blob_close(blob, blob_op_complete, NULL);
2601 	poll_threads();
2602 	CU_ASSERT(g_bserrno == 0);
2603 
2604 	total_data_clusters = bs->total_data_clusters;
2605 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2606 
2607 	/* Unload the blob store */
2608 	spdk_bs_unload(bs, bs_op_complete, NULL);
2609 	poll_threads();
2610 	CU_ASSERT(g_bserrno == 0);
2611 	g_bs = NULL;
2612 	g_blob = NULL;
2613 	g_blobid = 0;
2614 
2615 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2616 	CU_ASSERT(super_block->clean == 1);
2617 
2618 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start * 4096);
2619 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2620 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2621 
2622 	/*
2623 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2624 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2625 	 */
2626 	mask->length *= 2;
2627 
2628 	/* Load an existing blob store */
2629 	dev = init_dev();
2630 	dev->blockcnt *= 2;
2631 	spdk_bs_opts_init(&opts, sizeof(opts));
2632 	opts.clear_method = BS_CLEAR_WITH_NONE;
2633 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2634 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2635 	poll_threads();
2636 	CU_ASSERT(g_bserrno == 0);
2637 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2638 	bs = g_bs;
2639 
2640 	/* Check the capacity is the same as before */
2641 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2642 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2643 
2644 	/* Check the blob and the snapshot are still available */
2645 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2646 	poll_threads();
2647 	CU_ASSERT(g_bserrno == 0);
2648 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2649 	blob = g_blob;
2650 
2651 	spdk_blob_close(blob, blob_op_complete, NULL);
2652 	poll_threads();
2653 	CU_ASSERT(g_bserrno == 0);
2654 
2655 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2656 	poll_threads();
2657 	CU_ASSERT(g_bserrno == 0);
2658 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2659 	snapshot = g_blob;
2660 
2661 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2662 	poll_threads();
2663 	CU_ASSERT(g_bserrno == 0);
2664 
2665 	spdk_bs_unload(bs, bs_op_complete, NULL);
2666 	poll_threads();
2667 	CU_ASSERT(g_bserrno == 0);
2668 	CU_ASSERT(super_block->clean == 1);
2669 	g_bs = NULL;
2670 }
2671 
2672 static void
2673 bs_type(void)
2674 {
2675 	struct spdk_blob_store *bs;
2676 	struct spdk_bs_dev *dev;
2677 	struct spdk_bs_opts opts;
2678 
2679 	dev = init_dev();
2680 	spdk_bs_opts_init(&opts, sizeof(opts));
2681 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2682 
2683 	/* Initialize a new blob store */
2684 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2685 	poll_threads();
2686 	CU_ASSERT(g_bserrno == 0);
2687 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2688 	bs = g_bs;
2689 
2690 	/* Unload the blob store */
2691 	spdk_bs_unload(bs, bs_op_complete, NULL);
2692 	poll_threads();
2693 	CU_ASSERT(g_bserrno == 0);
2694 	g_bs = NULL;
2695 	g_blob = NULL;
2696 	g_blobid = 0;
2697 
2698 	/* Load non existing blobstore type */
2699 	dev = init_dev();
2700 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2701 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2702 	poll_threads();
2703 	CU_ASSERT(g_bserrno != 0);
2704 
2705 	/* Load with empty blobstore type */
2706 	dev = init_dev();
2707 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2708 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2709 	poll_threads();
2710 	CU_ASSERT(g_bserrno == 0);
2711 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2712 	bs = g_bs;
2713 
2714 	spdk_bs_unload(bs, bs_op_complete, NULL);
2715 	poll_threads();
2716 	CU_ASSERT(g_bserrno == 0);
2717 	g_bs = NULL;
2718 
2719 	/* Initialize a new blob store with empty bstype */
2720 	dev = init_dev();
2721 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2722 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2723 	poll_threads();
2724 	CU_ASSERT(g_bserrno == 0);
2725 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2726 	bs = g_bs;
2727 
2728 	spdk_bs_unload(bs, bs_op_complete, NULL);
2729 	poll_threads();
2730 	CU_ASSERT(g_bserrno == 0);
2731 	g_bs = NULL;
2732 
2733 	/* Load non existing blobstore type */
2734 	dev = init_dev();
2735 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2736 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2737 	poll_threads();
2738 	CU_ASSERT(g_bserrno != 0);
2739 
2740 	/* Load with empty blobstore type */
2741 	dev = init_dev();
2742 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2743 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2744 	poll_threads();
2745 	CU_ASSERT(g_bserrno == 0);
2746 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2747 	bs = g_bs;
2748 
2749 	spdk_bs_unload(bs, bs_op_complete, NULL);
2750 	poll_threads();
2751 	CU_ASSERT(g_bserrno == 0);
2752 	g_bs = NULL;
2753 }
2754 
2755 static void
2756 bs_super_block(void)
2757 {
2758 	struct spdk_blob_store *bs;
2759 	struct spdk_bs_dev *dev;
2760 	struct spdk_bs_super_block *super_block;
2761 	struct spdk_bs_opts opts;
2762 	struct spdk_bs_super_block_ver1 super_block_v1;
2763 
2764 	dev = init_dev();
2765 	spdk_bs_opts_init(&opts, sizeof(opts));
2766 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2767 
2768 	/* Initialize a new blob store */
2769 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2770 	poll_threads();
2771 	CU_ASSERT(g_bserrno == 0);
2772 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2773 	bs = g_bs;
2774 
2775 	/* Unload the blob store */
2776 	spdk_bs_unload(bs, bs_op_complete, NULL);
2777 	poll_threads();
2778 	CU_ASSERT(g_bserrno == 0);
2779 	g_bs = NULL;
2780 	g_blob = NULL;
2781 	g_blobid = 0;
2782 
2783 	/* Load an existing blob store with version newer than supported */
2784 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2785 	super_block->version++;
2786 
2787 	dev = init_dev();
2788 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2789 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2790 	poll_threads();
2791 	CU_ASSERT(g_bserrno != 0);
2792 
2793 	/* Create a new blob store with super block version 1 */
2794 	dev = init_dev();
2795 	super_block_v1.version = 1;
2796 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2797 	super_block_v1.length = 0x1000;
2798 	super_block_v1.clean = 1;
2799 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2800 	super_block_v1.cluster_size = 0x100000;
2801 	super_block_v1.used_page_mask_start = 0x01;
2802 	super_block_v1.used_page_mask_len = 0x01;
2803 	super_block_v1.used_cluster_mask_start = 0x02;
2804 	super_block_v1.used_cluster_mask_len = 0x01;
2805 	super_block_v1.md_start = 0x03;
2806 	super_block_v1.md_len = 0x40;
2807 	memset(super_block_v1.reserved, 0, 4036);
2808 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2809 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2810 
2811 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2812 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2813 	poll_threads();
2814 	CU_ASSERT(g_bserrno == 0);
2815 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2816 	bs = g_bs;
2817 
2818 	spdk_bs_unload(bs, bs_op_complete, NULL);
2819 	poll_threads();
2820 	CU_ASSERT(g_bserrno == 0);
2821 	g_bs = NULL;
2822 }
2823 
2824 static void
2825 bs_test_recover_cluster_count(void)
2826 {
2827 	struct spdk_blob_store *bs;
2828 	struct spdk_bs_dev *dev;
2829 	struct spdk_bs_super_block super_block;
2830 	struct spdk_bs_opts opts;
2831 
2832 	dev = init_dev();
2833 	spdk_bs_opts_init(&opts, sizeof(opts));
2834 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2835 
2836 	super_block.version = 3;
2837 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2838 	super_block.length = 0x1000;
2839 	super_block.clean = 0;
2840 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2841 	super_block.cluster_size = 4096;
2842 	super_block.used_page_mask_start = 0x01;
2843 	super_block.used_page_mask_len = 0x01;
2844 	super_block.used_cluster_mask_start = 0x02;
2845 	super_block.used_cluster_mask_len = 0x01;
2846 	super_block.used_blobid_mask_start = 0x03;
2847 	super_block.used_blobid_mask_len = 0x01;
2848 	super_block.md_start = 0x04;
2849 	super_block.md_len = 0x40;
2850 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2851 	super_block.size = dev->blockcnt * dev->blocklen;
2852 	super_block.io_unit_size = 0x1000;
2853 	memset(super_block.reserved, 0, 4000);
2854 	super_block.crc = blob_md_page_calc_crc(&super_block);
2855 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2856 
2857 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2858 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2859 	poll_threads();
2860 	CU_ASSERT(g_bserrno == 0);
2861 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2862 	bs = g_bs;
2863 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2864 			super_block.md_len));
2865 
2866 	spdk_bs_unload(bs, bs_op_complete, NULL);
2867 	poll_threads();
2868 	CU_ASSERT(g_bserrno == 0);
2869 	g_bs = NULL;
2870 }
2871 
2872 static void
2873 bs_test_grow(void)
2874 {
2875 	struct spdk_blob_store *bs;
2876 	struct spdk_bs_dev *dev;
2877 	struct spdk_bs_super_block super_block;
2878 	struct spdk_bs_opts opts;
2879 	struct spdk_bs_md_mask mask;
2880 	uint64_t bdev_size;
2881 
2882 	dev = init_dev();
2883 	bdev_size = dev->blockcnt * dev->blocklen;
2884 	spdk_bs_opts_init(&opts, sizeof(opts));
2885 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2886 	poll_threads();
2887 	CU_ASSERT(g_bserrno == 0);
2888 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2889 	bs = g_bs;
2890 
2891 	spdk_bs_unload(bs, bs_op_complete, NULL);
2892 	poll_threads();
2893 	CU_ASSERT(g_bserrno == 0);
2894 	g_bs = NULL;
2895 
2896 	/*
2897 	 * To make sure all the metadata are updated to the disk,
2898 	 * we check the g_dev_buffer after spdk_bs_unload.
2899 	 */
2900 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2901 	CU_ASSERT(super_block.size == bdev_size);
2902 
2903 	/*
2904 	 * Make sure the used_cluster mask is correct.
2905 	 */
2906 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2907 	       sizeof(struct spdk_bs_md_mask));
2908 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2909 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2910 
2911 	/*
2912 	 * The default dev size is 64M, here we set the dev size to 128M,
2913 	 * then the blobstore will adjust the metadata according to the new size.
2914 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
2915 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
2916 	 * the end of g_dev_buffer.
2917 	 */
2918 	dev = init_dev();
2919 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
2920 	bdev_size = dev->blockcnt * dev->blocklen;
2921 	spdk_bs_opts_init(&opts, sizeof(opts));
2922 	opts.clear_method = BS_CLEAR_WITH_NONE;
2923 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
2924 	poll_threads();
2925 	CU_ASSERT(g_bserrno == 0);
2926 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2927 	bs = g_bs;
2928 
2929 	/*
2930 	 * After spdk_bs_grow, all metadata are updated to the disk.
2931 	 * So we can check g_dev_buffer now.
2932 	 */
2933 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2934 	CU_ASSERT(super_block.size == bdev_size);
2935 
2936 	/*
2937 	 * Make sure the used_cluster mask has been updated according to the bdev size
2938 	 */
2939 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2940 	       sizeof(struct spdk_bs_md_mask));
2941 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2942 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2943 
2944 	spdk_bs_unload(bs, bs_op_complete, NULL);
2945 	poll_threads();
2946 	CU_ASSERT(g_bserrno == 0);
2947 	g_bs = NULL;
2948 }
2949 
2950 /*
2951  * Create a blobstore and then unload it.
2952  */
2953 static void
2954 bs_unload(void)
2955 {
2956 	struct spdk_blob_store *bs = g_bs;
2957 	struct spdk_blob *blob;
2958 
2959 	/* Create a blob and open it. */
2960 	blob = ut_blob_create_and_open(bs, NULL);
2961 
2962 	/* Try to unload blobstore, should fail with open blob */
2963 	g_bserrno = -1;
2964 	spdk_bs_unload(bs, bs_op_complete, NULL);
2965 	poll_threads();
2966 	CU_ASSERT(g_bserrno == -EBUSY);
2967 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2968 
2969 	/* Close the blob, then successfully unload blobstore */
2970 	g_bserrno = -1;
2971 	spdk_blob_close(blob, blob_op_complete, NULL);
2972 	poll_threads();
2973 	CU_ASSERT(g_bserrno == 0);
2974 }
2975 
2976 /*
2977  * Create a blobstore with a cluster size different than the default, and ensure it is
2978  *  persisted.
2979  */
2980 static void
2981 bs_cluster_sz(void)
2982 {
2983 	struct spdk_blob_store *bs;
2984 	struct spdk_bs_dev *dev;
2985 	struct spdk_bs_opts opts;
2986 	uint32_t cluster_sz;
2987 
2988 	/* Set cluster size to zero */
2989 	dev = init_dev();
2990 	spdk_bs_opts_init(&opts, sizeof(opts));
2991 	opts.cluster_sz = 0;
2992 
2993 	/* Initialize a new blob store */
2994 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2995 	poll_threads();
2996 	CU_ASSERT(g_bserrno == -EINVAL);
2997 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2998 
2999 	/*
3000 	 * Set cluster size to blobstore page size,
3001 	 * to work it is required to be at least twice the blobstore page size.
3002 	 */
3003 	dev = init_dev();
3004 	spdk_bs_opts_init(&opts, sizeof(opts));
3005 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
3006 
3007 	/* Initialize a new blob store */
3008 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3009 	poll_threads();
3010 	CU_ASSERT(g_bserrno == -ENOMEM);
3011 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3012 
3013 	/*
3014 	 * Set cluster size to lower than page size,
3015 	 * to work it is required to be at least twice the blobstore page size.
3016 	 */
3017 	dev = init_dev();
3018 	spdk_bs_opts_init(&opts, sizeof(opts));
3019 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
3020 
3021 	/* Initialize a new blob store */
3022 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3023 	poll_threads();
3024 	CU_ASSERT(g_bserrno == -EINVAL);
3025 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3026 
3027 	/* Set cluster size to twice the default */
3028 	dev = init_dev();
3029 	spdk_bs_opts_init(&opts, sizeof(opts));
3030 	opts.cluster_sz *= 2;
3031 	cluster_sz = opts.cluster_sz;
3032 
3033 	/* Initialize a new blob store */
3034 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3035 	poll_threads();
3036 	CU_ASSERT(g_bserrno == 0);
3037 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3038 	bs = g_bs;
3039 
3040 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3041 
3042 	ut_bs_reload(&bs, &opts);
3043 
3044 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3045 
3046 	spdk_bs_unload(bs, bs_op_complete, NULL);
3047 	poll_threads();
3048 	CU_ASSERT(g_bserrno == 0);
3049 	g_bs = NULL;
3050 }
3051 
3052 /*
3053  * Create a blobstore, reload it and ensure total usable cluster count
3054  *  stays the same.
3055  */
3056 static void
3057 bs_usable_clusters(void)
3058 {
3059 	struct spdk_blob_store *bs = g_bs;
3060 	struct spdk_blob *blob;
3061 	uint32_t clusters;
3062 	int i;
3063 
3064 
3065 	clusters = spdk_bs_total_data_cluster_count(bs);
3066 
3067 	ut_bs_reload(&bs, NULL);
3068 
3069 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3070 
3071 	/* Create and resize blobs to make sure that useable cluster count won't change */
3072 	for (i = 0; i < 4; i++) {
3073 		g_bserrno = -1;
3074 		g_blobid = SPDK_BLOBID_INVALID;
3075 		blob = ut_blob_create_and_open(bs, NULL);
3076 
3077 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3078 		poll_threads();
3079 		CU_ASSERT(g_bserrno == 0);
3080 
3081 		g_bserrno = -1;
3082 		spdk_blob_close(blob, blob_op_complete, NULL);
3083 		poll_threads();
3084 		CU_ASSERT(g_bserrno == 0);
3085 
3086 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3087 	}
3088 
3089 	/* Reload the blob store to make sure that nothing changed */
3090 	ut_bs_reload(&bs, NULL);
3091 
3092 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3093 }
3094 
3095 /*
3096  * Test resizing of the metadata blob.  This requires creating enough blobs
3097  *  so that one cluster is not enough to fit the metadata for those blobs.
3098  *  To induce this condition to happen more quickly, we reduce the cluster
3099  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3100  */
3101 static void
3102 bs_resize_md(void)
3103 {
3104 	struct spdk_blob_store *bs;
3105 	const int CLUSTER_PAGE_COUNT = 4;
3106 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3107 	struct spdk_bs_dev *dev;
3108 	struct spdk_bs_opts opts;
3109 	struct spdk_blob *blob;
3110 	struct spdk_blob_opts blob_opts;
3111 	uint32_t cluster_sz;
3112 	spdk_blob_id blobids[NUM_BLOBS];
3113 	int i;
3114 
3115 
3116 	dev = init_dev();
3117 	spdk_bs_opts_init(&opts, sizeof(opts));
3118 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
3119 	cluster_sz = opts.cluster_sz;
3120 
3121 	/* Initialize a new blob store */
3122 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3123 	poll_threads();
3124 	CU_ASSERT(g_bserrno == 0);
3125 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3126 	bs = g_bs;
3127 
3128 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3129 
3130 	ut_spdk_blob_opts_init(&blob_opts);
3131 
3132 	for (i = 0; i < NUM_BLOBS; i++) {
3133 		g_bserrno = -1;
3134 		g_blobid = SPDK_BLOBID_INVALID;
3135 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3136 		poll_threads();
3137 		CU_ASSERT(g_bserrno == 0);
3138 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3139 		blobids[i] = g_blobid;
3140 	}
3141 
3142 	ut_bs_reload(&bs, &opts);
3143 
3144 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3145 
3146 	for (i = 0; i < NUM_BLOBS; i++) {
3147 		g_bserrno = -1;
3148 		g_blob = NULL;
3149 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3150 		poll_threads();
3151 		CU_ASSERT(g_bserrno == 0);
3152 		CU_ASSERT(g_blob !=  NULL);
3153 		blob = g_blob;
3154 		g_bserrno = -1;
3155 		spdk_blob_close(blob, blob_op_complete, NULL);
3156 		poll_threads();
3157 		CU_ASSERT(g_bserrno == 0);
3158 	}
3159 
3160 	spdk_bs_unload(bs, bs_op_complete, NULL);
3161 	poll_threads();
3162 	CU_ASSERT(g_bserrno == 0);
3163 	g_bs = NULL;
3164 }
3165 
3166 static void
3167 bs_destroy(void)
3168 {
3169 	struct spdk_blob_store *bs;
3170 	struct spdk_bs_dev *dev;
3171 
3172 	/* Initialize a new blob store */
3173 	dev = init_dev();
3174 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3175 	poll_threads();
3176 	CU_ASSERT(g_bserrno == 0);
3177 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3178 	bs = g_bs;
3179 
3180 	/* Destroy the blob store */
3181 	g_bserrno = -1;
3182 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3183 	poll_threads();
3184 	CU_ASSERT(g_bserrno == 0);
3185 
3186 	/* Loading an non-existent blob store should fail. */
3187 	g_bs = NULL;
3188 	dev = init_dev();
3189 
3190 	g_bserrno = 0;
3191 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3192 	poll_threads();
3193 	CU_ASSERT(g_bserrno != 0);
3194 }
3195 
3196 /* Try to hit all of the corner cases associated with serializing
3197  * a blob to disk
3198  */
3199 static void
3200 blob_serialize_test(void)
3201 {
3202 	struct spdk_bs_dev *dev;
3203 	struct spdk_bs_opts opts;
3204 	struct spdk_blob_store *bs;
3205 	spdk_blob_id blobid[2];
3206 	struct spdk_blob *blob[2];
3207 	uint64_t i;
3208 	char *value;
3209 	int rc;
3210 
3211 	dev = init_dev();
3212 
3213 	/* Initialize a new blobstore with very small clusters */
3214 	spdk_bs_opts_init(&opts, sizeof(opts));
3215 	opts.cluster_sz = dev->blocklen * 8;
3216 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3217 	poll_threads();
3218 	CU_ASSERT(g_bserrno == 0);
3219 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3220 	bs = g_bs;
3221 
3222 	/* Create and open two blobs */
3223 	for (i = 0; i < 2; i++) {
3224 		blob[i] = ut_blob_create_and_open(bs, NULL);
3225 		blobid[i] = spdk_blob_get_id(blob[i]);
3226 
3227 		/* Set a fairly large xattr on both blobs to eat up
3228 		 * metadata space
3229 		 */
3230 		value = calloc(dev->blocklen - 64, sizeof(char));
3231 		SPDK_CU_ASSERT_FATAL(value != NULL);
3232 		memset(value, i, dev->blocklen / 2);
3233 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3234 		CU_ASSERT(rc == 0);
3235 		free(value);
3236 	}
3237 
3238 	/* Resize the blobs, alternating 1 cluster at a time.
3239 	 * This thwarts run length encoding and will cause spill
3240 	 * over of the extents.
3241 	 */
3242 	for (i = 0; i < 6; i++) {
3243 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3244 		poll_threads();
3245 		CU_ASSERT(g_bserrno == 0);
3246 	}
3247 
3248 	for (i = 0; i < 2; i++) {
3249 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3250 		poll_threads();
3251 		CU_ASSERT(g_bserrno == 0);
3252 	}
3253 
3254 	/* Close the blobs */
3255 	for (i = 0; i < 2; i++) {
3256 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3257 		poll_threads();
3258 		CU_ASSERT(g_bserrno == 0);
3259 	}
3260 
3261 	ut_bs_reload(&bs, &opts);
3262 
3263 	for (i = 0; i < 2; i++) {
3264 		blob[i] = NULL;
3265 
3266 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3267 		poll_threads();
3268 		CU_ASSERT(g_bserrno == 0);
3269 		CU_ASSERT(g_blob != NULL);
3270 		blob[i] = g_blob;
3271 
3272 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3273 
3274 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3275 		poll_threads();
3276 		CU_ASSERT(g_bserrno == 0);
3277 	}
3278 
3279 	spdk_bs_unload(bs, bs_op_complete, NULL);
3280 	poll_threads();
3281 	CU_ASSERT(g_bserrno == 0);
3282 	g_bs = NULL;
3283 }
3284 
3285 static void
3286 blob_crc(void)
3287 {
3288 	struct spdk_blob_store *bs = g_bs;
3289 	struct spdk_blob *blob;
3290 	spdk_blob_id blobid;
3291 	uint32_t page_num;
3292 	int index;
3293 	struct spdk_blob_md_page *page;
3294 
3295 	blob = ut_blob_create_and_open(bs, NULL);
3296 	blobid = spdk_blob_get_id(blob);
3297 
3298 	spdk_blob_close(blob, blob_op_complete, NULL);
3299 	poll_threads();
3300 	CU_ASSERT(g_bserrno == 0);
3301 
3302 	page_num = bs_blobid_to_page(blobid);
3303 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3304 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3305 	page->crc = 0;
3306 
3307 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3308 	poll_threads();
3309 	CU_ASSERT(g_bserrno == -EINVAL);
3310 	CU_ASSERT(g_blob == NULL);
3311 	g_bserrno = 0;
3312 
3313 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3314 	poll_threads();
3315 	CU_ASSERT(g_bserrno == -EINVAL);
3316 }
3317 
3318 static void
3319 super_block_crc(void)
3320 {
3321 	struct spdk_blob_store *bs;
3322 	struct spdk_bs_dev *dev;
3323 	struct spdk_bs_super_block *super_block;
3324 
3325 	dev = init_dev();
3326 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3327 	poll_threads();
3328 	CU_ASSERT(g_bserrno == 0);
3329 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3330 	bs = g_bs;
3331 
3332 	spdk_bs_unload(bs, bs_op_complete, NULL);
3333 	poll_threads();
3334 	CU_ASSERT(g_bserrno == 0);
3335 	g_bs = NULL;
3336 
3337 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3338 	super_block->crc = 0;
3339 	dev = init_dev();
3340 
3341 	/* Load an existing blob store */
3342 	g_bserrno = 0;
3343 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3344 	poll_threads();
3345 	CU_ASSERT(g_bserrno == -EILSEQ);
3346 }
3347 
3348 /* For blob dirty shutdown test case we do the following sub-test cases:
3349  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3350  *   dirty shutdown and reload the blob store and verify the xattrs.
3351  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3352  *   reload the blob store and verify the clusters number.
3353  * 3 Create the second blob and then dirty shutdown, reload the blob store
3354  *   and verify the second blob.
3355  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3356  *   and verify the second blob is invalid.
3357  * 5 Create the second blob again and also create the third blob, modify the
3358  *   md of second blob which makes the md invalid, and then dirty shutdown,
3359  *   reload the blob store verify the second blob, it should invalid and also
3360  *   verify the third blob, it should correct.
3361  */
3362 static void
3363 blob_dirty_shutdown(void)
3364 {
3365 	int rc;
3366 	int index;
3367 	struct spdk_blob_store *bs = g_bs;
3368 	spdk_blob_id blobid1, blobid2, blobid3;
3369 	struct spdk_blob *blob = g_blob;
3370 	uint64_t length;
3371 	uint64_t free_clusters;
3372 	const void *value;
3373 	size_t value_len;
3374 	uint32_t page_num;
3375 	struct spdk_blob_md_page *page;
3376 	struct spdk_blob_opts blob_opts;
3377 
3378 	/* Create first blob */
3379 	blobid1 = spdk_blob_get_id(blob);
3380 
3381 	/* Set some xattrs */
3382 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3383 	CU_ASSERT(rc == 0);
3384 
3385 	length = 2345;
3386 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3387 	CU_ASSERT(rc == 0);
3388 
3389 	/* Put xattr that fits exactly single page.
3390 	 * This results in adding additional pages to MD.
3391 	 * First is flags and smaller xattr, second the large xattr,
3392 	 * third are just the extents.
3393 	 */
3394 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3395 			      strlen("large_xattr");
3396 	char *xattr = calloc(xattr_length, sizeof(char));
3397 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3398 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3399 	free(xattr);
3400 	SPDK_CU_ASSERT_FATAL(rc == 0);
3401 
3402 	/* Resize the blob */
3403 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3404 	poll_threads();
3405 	CU_ASSERT(g_bserrno == 0);
3406 
3407 	/* Set the blob as the super blob */
3408 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3409 	poll_threads();
3410 	CU_ASSERT(g_bserrno == 0);
3411 
3412 	free_clusters = spdk_bs_free_cluster_count(bs);
3413 
3414 	spdk_blob_close(blob, blob_op_complete, NULL);
3415 	poll_threads();
3416 	CU_ASSERT(g_bserrno == 0);
3417 	blob = NULL;
3418 	g_blob = NULL;
3419 	g_blobid = SPDK_BLOBID_INVALID;
3420 
3421 	ut_bs_dirty_load(&bs, NULL);
3422 
3423 	/* Get the super blob */
3424 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3425 	poll_threads();
3426 	CU_ASSERT(g_bserrno == 0);
3427 	CU_ASSERT(blobid1 == g_blobid);
3428 
3429 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3430 	poll_threads();
3431 	CU_ASSERT(g_bserrno == 0);
3432 	CU_ASSERT(g_blob != NULL);
3433 	blob = g_blob;
3434 
3435 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3436 
3437 	/* Get the xattrs */
3438 	value = NULL;
3439 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3440 	CU_ASSERT(rc == 0);
3441 	SPDK_CU_ASSERT_FATAL(value != NULL);
3442 	CU_ASSERT(*(uint64_t *)value == length);
3443 	CU_ASSERT(value_len == 8);
3444 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3445 
3446 	/* Resize the blob */
3447 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3448 	poll_threads();
3449 	CU_ASSERT(g_bserrno == 0);
3450 
3451 	free_clusters = spdk_bs_free_cluster_count(bs);
3452 
3453 	spdk_blob_close(blob, blob_op_complete, NULL);
3454 	poll_threads();
3455 	CU_ASSERT(g_bserrno == 0);
3456 	blob = NULL;
3457 	g_blob = NULL;
3458 	g_blobid = SPDK_BLOBID_INVALID;
3459 
3460 	ut_bs_dirty_load(&bs, NULL);
3461 
3462 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3463 	poll_threads();
3464 	CU_ASSERT(g_bserrno == 0);
3465 	CU_ASSERT(g_blob != NULL);
3466 	blob = g_blob;
3467 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3468 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3469 
3470 	spdk_blob_close(blob, blob_op_complete, NULL);
3471 	poll_threads();
3472 	CU_ASSERT(g_bserrno == 0);
3473 	blob = NULL;
3474 	g_blob = NULL;
3475 	g_blobid = SPDK_BLOBID_INVALID;
3476 
3477 	/* Create second blob */
3478 	blob = ut_blob_create_and_open(bs, NULL);
3479 	blobid2 = spdk_blob_get_id(blob);
3480 
3481 	/* Set some xattrs */
3482 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3483 	CU_ASSERT(rc == 0);
3484 
3485 	length = 5432;
3486 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3487 	CU_ASSERT(rc == 0);
3488 
3489 	/* Resize the blob */
3490 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3491 	poll_threads();
3492 	CU_ASSERT(g_bserrno == 0);
3493 
3494 	free_clusters = spdk_bs_free_cluster_count(bs);
3495 
3496 	spdk_blob_close(blob, blob_op_complete, NULL);
3497 	poll_threads();
3498 	CU_ASSERT(g_bserrno == 0);
3499 	blob = NULL;
3500 	g_blob = NULL;
3501 	g_blobid = SPDK_BLOBID_INVALID;
3502 
3503 	ut_bs_dirty_load(&bs, NULL);
3504 
3505 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3506 	poll_threads();
3507 	CU_ASSERT(g_bserrno == 0);
3508 	CU_ASSERT(g_blob != NULL);
3509 	blob = g_blob;
3510 
3511 	/* Get the xattrs */
3512 	value = NULL;
3513 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3514 	CU_ASSERT(rc == 0);
3515 	SPDK_CU_ASSERT_FATAL(value != NULL);
3516 	CU_ASSERT(*(uint64_t *)value == length);
3517 	CU_ASSERT(value_len == 8);
3518 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3519 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3520 
3521 	ut_blob_close_and_delete(bs, blob);
3522 
3523 	free_clusters = spdk_bs_free_cluster_count(bs);
3524 
3525 	ut_bs_dirty_load(&bs, NULL);
3526 
3527 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3528 	poll_threads();
3529 	CU_ASSERT(g_bserrno != 0);
3530 	CU_ASSERT(g_blob == NULL);
3531 
3532 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3533 	poll_threads();
3534 	CU_ASSERT(g_bserrno == 0);
3535 	CU_ASSERT(g_blob != NULL);
3536 	blob = g_blob;
3537 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3538 	spdk_blob_close(blob, blob_op_complete, NULL);
3539 	poll_threads();
3540 	CU_ASSERT(g_bserrno == 0);
3541 
3542 	ut_bs_reload(&bs, NULL);
3543 
3544 	/* Create second blob */
3545 	ut_spdk_blob_opts_init(&blob_opts);
3546 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3547 	poll_threads();
3548 	CU_ASSERT(g_bserrno == 0);
3549 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3550 	blobid2 = g_blobid;
3551 
3552 	/* Create third blob */
3553 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3554 	poll_threads();
3555 	CU_ASSERT(g_bserrno == 0);
3556 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3557 	blobid3 = g_blobid;
3558 
3559 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3560 	poll_threads();
3561 	CU_ASSERT(g_bserrno == 0);
3562 	CU_ASSERT(g_blob != NULL);
3563 	blob = g_blob;
3564 
3565 	/* Set some xattrs for second blob */
3566 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3567 	CU_ASSERT(rc == 0);
3568 
3569 	length = 5432;
3570 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3571 	CU_ASSERT(rc == 0);
3572 
3573 	spdk_blob_close(blob, blob_op_complete, NULL);
3574 	poll_threads();
3575 	CU_ASSERT(g_bserrno == 0);
3576 	blob = NULL;
3577 	g_blob = NULL;
3578 	g_blobid = SPDK_BLOBID_INVALID;
3579 
3580 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3581 	poll_threads();
3582 	CU_ASSERT(g_bserrno == 0);
3583 	CU_ASSERT(g_blob != NULL);
3584 	blob = g_blob;
3585 
3586 	/* Set some xattrs for third blob */
3587 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3588 	CU_ASSERT(rc == 0);
3589 
3590 	length = 5432;
3591 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3592 	CU_ASSERT(rc == 0);
3593 
3594 	spdk_blob_close(blob, blob_op_complete, NULL);
3595 	poll_threads();
3596 	CU_ASSERT(g_bserrno == 0);
3597 	blob = NULL;
3598 	g_blob = NULL;
3599 	g_blobid = SPDK_BLOBID_INVALID;
3600 
3601 	/* Mark second blob as invalid */
3602 	page_num = bs_blobid_to_page(blobid2);
3603 
3604 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3605 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3606 	page->sequence_num = 1;
3607 	page->crc = blob_md_page_calc_crc(page);
3608 
3609 	free_clusters = spdk_bs_free_cluster_count(bs);
3610 
3611 	ut_bs_dirty_load(&bs, NULL);
3612 
3613 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3614 	poll_threads();
3615 	CU_ASSERT(g_bserrno != 0);
3616 	CU_ASSERT(g_blob == NULL);
3617 
3618 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3619 	poll_threads();
3620 	CU_ASSERT(g_bserrno == 0);
3621 	CU_ASSERT(g_blob != NULL);
3622 	blob = g_blob;
3623 
3624 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3625 }
3626 
3627 static void
3628 blob_flags(void)
3629 {
3630 	struct spdk_blob_store *bs = g_bs;
3631 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3632 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3633 	struct spdk_blob_opts blob_opts;
3634 	int rc;
3635 
3636 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3637 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3638 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3639 
3640 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3641 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3642 
3643 	ut_spdk_blob_opts_init(&blob_opts);
3644 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3645 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3646 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3647 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3648 
3649 	/* Change the size of blob_data_ro to check if flags are serialized
3650 	 * when blob has non zero number of extents */
3651 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3652 	poll_threads();
3653 	CU_ASSERT(g_bserrno == 0);
3654 
3655 	/* Set the xattr to check if flags are serialized
3656 	 * when blob has non zero number of xattrs */
3657 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3658 	CU_ASSERT(rc == 0);
3659 
3660 	blob_invalid->invalid_flags = (1ULL << 63);
3661 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3662 	blob_data_ro->data_ro_flags = (1ULL << 62);
3663 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3664 	blob_md_ro->md_ro_flags = (1ULL << 61);
3665 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3666 
3667 	g_bserrno = -1;
3668 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3669 	poll_threads();
3670 	CU_ASSERT(g_bserrno == 0);
3671 	g_bserrno = -1;
3672 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3673 	poll_threads();
3674 	CU_ASSERT(g_bserrno == 0);
3675 	g_bserrno = -1;
3676 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3677 	poll_threads();
3678 	CU_ASSERT(g_bserrno == 0);
3679 
3680 	g_bserrno = -1;
3681 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3682 	poll_threads();
3683 	CU_ASSERT(g_bserrno == 0);
3684 	blob_invalid = NULL;
3685 	g_bserrno = -1;
3686 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3687 	poll_threads();
3688 	CU_ASSERT(g_bserrno == 0);
3689 	blob_data_ro = NULL;
3690 	g_bserrno = -1;
3691 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3692 	poll_threads();
3693 	CU_ASSERT(g_bserrno == 0);
3694 	blob_md_ro = NULL;
3695 
3696 	g_blob = NULL;
3697 	g_blobid = SPDK_BLOBID_INVALID;
3698 
3699 	ut_bs_reload(&bs, NULL);
3700 
3701 	g_blob = NULL;
3702 	g_bserrno = 0;
3703 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3704 	poll_threads();
3705 	CU_ASSERT(g_bserrno != 0);
3706 	CU_ASSERT(g_blob == NULL);
3707 
3708 	g_blob = NULL;
3709 	g_bserrno = -1;
3710 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3711 	poll_threads();
3712 	CU_ASSERT(g_bserrno == 0);
3713 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3714 	blob_data_ro = g_blob;
3715 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3716 	CU_ASSERT(blob_data_ro->data_ro == true);
3717 	CU_ASSERT(blob_data_ro->md_ro == true);
3718 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3719 
3720 	g_blob = NULL;
3721 	g_bserrno = -1;
3722 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3723 	poll_threads();
3724 	CU_ASSERT(g_bserrno == 0);
3725 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3726 	blob_md_ro = g_blob;
3727 	CU_ASSERT(blob_md_ro->data_ro == false);
3728 	CU_ASSERT(blob_md_ro->md_ro == true);
3729 
3730 	g_bserrno = -1;
3731 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3732 	poll_threads();
3733 	CU_ASSERT(g_bserrno == 0);
3734 
3735 	ut_blob_close_and_delete(bs, blob_data_ro);
3736 	ut_blob_close_and_delete(bs, blob_md_ro);
3737 }
3738 
3739 static void
3740 bs_version(void)
3741 {
3742 	struct spdk_bs_super_block *super;
3743 	struct spdk_blob_store *bs = g_bs;
3744 	struct spdk_bs_dev *dev;
3745 	struct spdk_blob *blob;
3746 	struct spdk_blob_opts blob_opts;
3747 	spdk_blob_id blobid;
3748 
3749 	/* Unload the blob store */
3750 	spdk_bs_unload(bs, bs_op_complete, NULL);
3751 	poll_threads();
3752 	CU_ASSERT(g_bserrno == 0);
3753 	g_bs = NULL;
3754 
3755 	/*
3756 	 * Change the bs version on disk.  This will allow us to
3757 	 *  test that the version does not get modified automatically
3758 	 *  when loading and unloading the blobstore.
3759 	 */
3760 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3761 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3762 	CU_ASSERT(super->clean == 1);
3763 	super->version = 2;
3764 	/*
3765 	 * Version 2 metadata does not have a used blobid mask, so clear
3766 	 *  those fields in the super block and zero the corresponding
3767 	 *  region on "disk".  We will use this to ensure blob IDs are
3768 	 *  correctly reconstructed.
3769 	 */
3770 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3771 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3772 	super->used_blobid_mask_start = 0;
3773 	super->used_blobid_mask_len = 0;
3774 	super->crc = blob_md_page_calc_crc(super);
3775 
3776 	/* Load an existing blob store */
3777 	dev = init_dev();
3778 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3779 	poll_threads();
3780 	CU_ASSERT(g_bserrno == 0);
3781 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3782 	CU_ASSERT(super->clean == 1);
3783 	bs = g_bs;
3784 
3785 	/*
3786 	 * Create a blob - just to make sure that when we unload it
3787 	 *  results in writing the super block (since metadata pages
3788 	 *  were allocated.
3789 	 */
3790 	ut_spdk_blob_opts_init(&blob_opts);
3791 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3792 	poll_threads();
3793 	CU_ASSERT(g_bserrno == 0);
3794 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3795 	blobid = g_blobid;
3796 
3797 	/* Unload the blob store */
3798 	spdk_bs_unload(bs, bs_op_complete, NULL);
3799 	poll_threads();
3800 	CU_ASSERT(g_bserrno == 0);
3801 	g_bs = NULL;
3802 	CU_ASSERT(super->version == 2);
3803 	CU_ASSERT(super->used_blobid_mask_start == 0);
3804 	CU_ASSERT(super->used_blobid_mask_len == 0);
3805 
3806 	dev = init_dev();
3807 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3808 	poll_threads();
3809 	CU_ASSERT(g_bserrno == 0);
3810 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3811 	bs = g_bs;
3812 
3813 	g_blob = NULL;
3814 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3815 	poll_threads();
3816 	CU_ASSERT(g_bserrno == 0);
3817 	CU_ASSERT(g_blob != NULL);
3818 	blob = g_blob;
3819 
3820 	ut_blob_close_and_delete(bs, blob);
3821 
3822 	CU_ASSERT(super->version == 2);
3823 	CU_ASSERT(super->used_blobid_mask_start == 0);
3824 	CU_ASSERT(super->used_blobid_mask_len == 0);
3825 }
3826 
3827 static void
3828 blob_set_xattrs_test(void)
3829 {
3830 	struct spdk_blob_store *bs = g_bs;
3831 	struct spdk_blob *blob;
3832 	struct spdk_blob_opts opts;
3833 	const void *value;
3834 	size_t value_len;
3835 	char *xattr;
3836 	size_t xattr_length;
3837 	int rc;
3838 
3839 	/* Create blob with extra attributes */
3840 	ut_spdk_blob_opts_init(&opts);
3841 
3842 	opts.xattrs.names = g_xattr_names;
3843 	opts.xattrs.get_value = _get_xattr_value;
3844 	opts.xattrs.count = 3;
3845 	opts.xattrs.ctx = &g_ctx;
3846 
3847 	blob = ut_blob_create_and_open(bs, &opts);
3848 
3849 	/* Get the xattrs */
3850 	value = NULL;
3851 
3852 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3853 	CU_ASSERT(rc == 0);
3854 	SPDK_CU_ASSERT_FATAL(value != NULL);
3855 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3856 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3857 
3858 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3859 	CU_ASSERT(rc == 0);
3860 	SPDK_CU_ASSERT_FATAL(value != NULL);
3861 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3862 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3863 
3864 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3865 	CU_ASSERT(rc == 0);
3866 	SPDK_CU_ASSERT_FATAL(value != NULL);
3867 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3868 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3869 
3870 	/* Try to get non existing attribute */
3871 
3872 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3873 	CU_ASSERT(rc == -ENOENT);
3874 
3875 	/* Try xattr exceeding maximum length of descriptor in single page */
3876 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3877 		       strlen("large_xattr") + 1;
3878 	xattr = calloc(xattr_length, sizeof(char));
3879 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3880 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3881 	free(xattr);
3882 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3883 
3884 	spdk_blob_close(blob, blob_op_complete, NULL);
3885 	poll_threads();
3886 	CU_ASSERT(g_bserrno == 0);
3887 	blob = NULL;
3888 	g_blob = NULL;
3889 	g_blobid = SPDK_BLOBID_INVALID;
3890 
3891 	/* NULL callback */
3892 	ut_spdk_blob_opts_init(&opts);
3893 	opts.xattrs.names = g_xattr_names;
3894 	opts.xattrs.get_value = NULL;
3895 	opts.xattrs.count = 1;
3896 	opts.xattrs.ctx = &g_ctx;
3897 
3898 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3899 	poll_threads();
3900 	CU_ASSERT(g_bserrno == -EINVAL);
3901 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3902 
3903 	/* NULL values */
3904 	ut_spdk_blob_opts_init(&opts);
3905 	opts.xattrs.names = g_xattr_names;
3906 	opts.xattrs.get_value = _get_xattr_value_null;
3907 	opts.xattrs.count = 1;
3908 	opts.xattrs.ctx = NULL;
3909 
3910 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3911 	poll_threads();
3912 	CU_ASSERT(g_bserrno == -EINVAL);
3913 }
3914 
3915 static void
3916 blob_thin_prov_alloc(void)
3917 {
3918 	struct spdk_blob_store *bs = g_bs;
3919 	struct spdk_blob *blob;
3920 	struct spdk_blob_opts opts;
3921 	spdk_blob_id blobid;
3922 	uint64_t free_clusters;
3923 
3924 	free_clusters = spdk_bs_free_cluster_count(bs);
3925 
3926 	/* Set blob as thin provisioned */
3927 	ut_spdk_blob_opts_init(&opts);
3928 	opts.thin_provision = true;
3929 
3930 	blob = ut_blob_create_and_open(bs, &opts);
3931 	blobid = spdk_blob_get_id(blob);
3932 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3933 
3934 	CU_ASSERT(blob->active.num_clusters == 0);
3935 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3936 
3937 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3938 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3939 	poll_threads();
3940 	CU_ASSERT(g_bserrno == 0);
3941 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3942 	CU_ASSERT(blob->active.num_clusters == 5);
3943 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3944 
3945 	/* Grow it to 1TB - still unallocated */
3946 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3947 	poll_threads();
3948 	CU_ASSERT(g_bserrno == 0);
3949 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3950 	CU_ASSERT(blob->active.num_clusters == 262144);
3951 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3952 
3953 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3954 	poll_threads();
3955 	CU_ASSERT(g_bserrno == 0);
3956 	/* Sync must not change anything */
3957 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3958 	CU_ASSERT(blob->active.num_clusters == 262144);
3959 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3960 	/* Since clusters are not allocated,
3961 	 * number of metadata pages is expected to be minimal.
3962 	 */
3963 	CU_ASSERT(blob->active.num_pages == 1);
3964 
3965 	/* Shrink the blob to 3 clusters - still unallocated */
3966 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3967 	poll_threads();
3968 	CU_ASSERT(g_bserrno == 0);
3969 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3970 	CU_ASSERT(blob->active.num_clusters == 3);
3971 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3972 
3973 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3974 	poll_threads();
3975 	CU_ASSERT(g_bserrno == 0);
3976 	/* Sync must not change anything */
3977 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3978 	CU_ASSERT(blob->active.num_clusters == 3);
3979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3980 
3981 	spdk_blob_close(blob, blob_op_complete, NULL);
3982 	poll_threads();
3983 	CU_ASSERT(g_bserrno == 0);
3984 
3985 	ut_bs_reload(&bs, NULL);
3986 
3987 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3988 	poll_threads();
3989 	CU_ASSERT(g_bserrno == 0);
3990 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3991 	blob = g_blob;
3992 
3993 	/* Check that clusters allocation and size is still the same */
3994 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3995 	CU_ASSERT(blob->active.num_clusters == 3);
3996 
3997 	ut_blob_close_and_delete(bs, blob);
3998 }
3999 
4000 static void
4001 blob_insert_cluster_msg_test(void)
4002 {
4003 	struct spdk_blob_store *bs = g_bs;
4004 	struct spdk_blob *blob;
4005 	struct spdk_blob_opts opts;
4006 	struct spdk_blob_md_page page = {};
4007 	spdk_blob_id blobid;
4008 	uint64_t free_clusters;
4009 	uint64_t new_cluster = 0;
4010 	uint32_t cluster_num = 3;
4011 	uint32_t extent_page = 0;
4012 
4013 	free_clusters = spdk_bs_free_cluster_count(bs);
4014 
4015 	/* Set blob as thin provisioned */
4016 	ut_spdk_blob_opts_init(&opts);
4017 	opts.thin_provision = true;
4018 	opts.num_clusters = 4;
4019 
4020 	blob = ut_blob_create_and_open(bs, &opts);
4021 	blobid = spdk_blob_get_id(blob);
4022 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4023 
4024 	CU_ASSERT(blob->active.num_clusters == 4);
4025 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4026 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4027 
4028 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4029 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4030 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4031 	spdk_spin_lock(&bs->used_lock);
4032 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4033 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4034 	spdk_spin_unlock(&bs->used_lock);
4035 
4036 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
4037 					 blob_op_complete, NULL);
4038 	poll_threads();
4039 
4040 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4041 
4042 	spdk_blob_close(blob, blob_op_complete, NULL);
4043 	poll_threads();
4044 	CU_ASSERT(g_bserrno == 0);
4045 
4046 	ut_bs_reload(&bs, NULL);
4047 
4048 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4049 	poll_threads();
4050 	CU_ASSERT(g_bserrno == 0);
4051 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4052 	blob = g_blob;
4053 
4054 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4055 
4056 	ut_blob_close_and_delete(bs, blob);
4057 }
4058 
4059 static void
4060 blob_thin_prov_rw(void)
4061 {
4062 	static const uint8_t zero[10 * 4096] = { 0 };
4063 	struct spdk_blob_store *bs = g_bs;
4064 	struct spdk_blob *blob, *blob_id0;
4065 	struct spdk_io_channel *channel, *channel_thread1;
4066 	struct spdk_blob_opts opts;
4067 	uint64_t free_clusters;
4068 	uint64_t page_size;
4069 	uint8_t payload_read[10 * 4096];
4070 	uint8_t payload_write[10 * 4096];
4071 	uint64_t write_bytes;
4072 	uint64_t read_bytes;
4073 
4074 	free_clusters = spdk_bs_free_cluster_count(bs);
4075 	page_size = spdk_bs_get_page_size(bs);
4076 
4077 	channel = spdk_bs_alloc_io_channel(bs);
4078 	CU_ASSERT(channel != NULL);
4079 
4080 	ut_spdk_blob_opts_init(&opts);
4081 	opts.thin_provision = true;
4082 
4083 	/* Create and delete blob at md page 0, so that next md page allocation
4084 	 * for extent will use that. */
4085 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4086 	blob = ut_blob_create_and_open(bs, &opts);
4087 	ut_blob_close_and_delete(bs, blob_id0);
4088 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4089 
4090 	CU_ASSERT(blob->active.num_clusters == 0);
4091 
4092 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4093 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4094 	poll_threads();
4095 	CU_ASSERT(g_bserrno == 0);
4096 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4097 	CU_ASSERT(blob->active.num_clusters == 5);
4098 
4099 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4100 	poll_threads();
4101 	CU_ASSERT(g_bserrno == 0);
4102 	/* Sync must not change anything */
4103 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4104 	CU_ASSERT(blob->active.num_clusters == 5);
4105 
4106 	/* Payload should be all zeros from unallocated clusters */
4107 	memset(payload_read, 0xFF, sizeof(payload_read));
4108 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4109 	poll_threads();
4110 	CU_ASSERT(g_bserrno == 0);
4111 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4112 
4113 	write_bytes = g_dev_write_bytes;
4114 	read_bytes = g_dev_read_bytes;
4115 
4116 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4117 	set_thread(1);
4118 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4119 	CU_ASSERT(channel_thread1 != NULL);
4120 	memset(payload_write, 0xE5, sizeof(payload_write));
4121 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4122 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4123 	/* Perform write on thread 0. That will try to allocate cluster,
4124 	 * but fail due to another thread issuing the cluster allocation first. */
4125 	set_thread(0);
4126 	memset(payload_write, 0xE5, sizeof(payload_write));
4127 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4128 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4129 	poll_threads();
4130 	CU_ASSERT(g_bserrno == 0);
4131 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4132 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
4133 	 * read 0 bytes */
4134 	if (g_use_extent_table) {
4135 		/* Add one more page for EXTENT_PAGE write */
4136 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
4137 	} else {
4138 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
4139 	}
4140 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4141 
4142 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4143 	poll_threads();
4144 	CU_ASSERT(g_bserrno == 0);
4145 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4146 
4147 	ut_blob_close_and_delete(bs, blob);
4148 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4149 
4150 	set_thread(1);
4151 	spdk_bs_free_io_channel(channel_thread1);
4152 	set_thread(0);
4153 	spdk_bs_free_io_channel(channel);
4154 	poll_threads();
4155 	g_blob = NULL;
4156 	g_blobid = 0;
4157 }
4158 
4159 static void
4160 blob_thin_prov_write_count_io(void)
4161 {
4162 	struct spdk_blob_store *bs;
4163 	struct spdk_blob *blob;
4164 	struct spdk_io_channel *ch;
4165 	struct spdk_bs_dev *dev;
4166 	struct spdk_bs_opts bs_opts;
4167 	struct spdk_blob_opts opts;
4168 	uint64_t free_clusters;
4169 	uint64_t page_size;
4170 	uint8_t payload_write[4096];
4171 	uint64_t write_bytes;
4172 	uint64_t read_bytes;
4173 	const uint32_t CLUSTER_SZ = 16384;
4174 	uint32_t pages_per_cluster;
4175 	uint32_t pages_per_extent_page;
4176 	uint32_t i;
4177 
4178 	/* Use a very small cluster size for this test.  This ensures we need multiple
4179 	 * extent pages to hold all of the clusters even for relatively small blobs like
4180 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4181 	 * buffers).
4182 	 */
4183 	dev = init_dev();
4184 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4185 	bs_opts.cluster_sz = CLUSTER_SZ;
4186 
4187 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4188 	poll_threads();
4189 	CU_ASSERT(g_bserrno == 0);
4190 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4191 	bs = g_bs;
4192 
4193 	free_clusters = spdk_bs_free_cluster_count(bs);
4194 	page_size = spdk_bs_get_page_size(bs);
4195 	pages_per_cluster = CLUSTER_SZ / page_size;
4196 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4197 
4198 	ch = spdk_bs_alloc_io_channel(bs);
4199 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4200 
4201 	ut_spdk_blob_opts_init(&opts);
4202 	opts.thin_provision = true;
4203 
4204 	blob = ut_blob_create_and_open(bs, &opts);
4205 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4206 
4207 	/* Resize the blob so that it will require 8 extent pages to hold all of
4208 	 * the clusters.
4209 	 */
4210 	g_bserrno = -1;
4211 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4212 	poll_threads();
4213 	CU_ASSERT(g_bserrno == 0);
4214 
4215 	g_bserrno = -1;
4216 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4217 	poll_threads();
4218 	CU_ASSERT(g_bserrno == 0);
4219 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4220 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4221 
4222 	memset(payload_write, 0, sizeof(payload_write));
4223 	for (i = 0; i < 8; i++) {
4224 		write_bytes = g_dev_write_bytes;
4225 		read_bytes = g_dev_read_bytes;
4226 
4227 		g_bserrno = -1;
4228 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4229 		poll_threads();
4230 		CU_ASSERT(g_bserrno == 0);
4231 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4232 
4233 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4234 		if (!g_use_extent_table) {
4235 			/* For legacy metadata, we should have written two pages - one for the
4236 			 * write I/O itself, another for the blob's primary metadata.
4237 			 */
4238 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4239 		} else {
4240 			/* For extent table metadata, we should have written three pages - one
4241 			 * for the write I/O, one for the extent page, one for the blob's primary
4242 			 * metadata.
4243 			 */
4244 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4245 		}
4246 
4247 		/* The write should have synced the metadata already.  Do another sync here
4248 		 * just to confirm.
4249 		 */
4250 		write_bytes = g_dev_write_bytes;
4251 		read_bytes = g_dev_read_bytes;
4252 
4253 		g_bserrno = -1;
4254 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4255 		poll_threads();
4256 		CU_ASSERT(g_bserrno == 0);
4257 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4258 
4259 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4260 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4261 
4262 		/* Now write to another unallocated cluster that is part of the same extent page. */
4263 		g_bserrno = -1;
4264 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4265 				   1, blob_op_complete, NULL);
4266 		poll_threads();
4267 		CU_ASSERT(g_bserrno == 0);
4268 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4269 
4270 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4271 		/*
4272 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4273 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4274 		 */
4275 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4276 	}
4277 
4278 	ut_blob_close_and_delete(bs, blob);
4279 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4280 
4281 	spdk_bs_free_io_channel(ch);
4282 	poll_threads();
4283 	g_blob = NULL;
4284 	g_blobid = 0;
4285 
4286 	spdk_bs_unload(bs, bs_op_complete, NULL);
4287 	poll_threads();
4288 	CU_ASSERT(g_bserrno == 0);
4289 	g_bs = NULL;
4290 }
4291 
4292 static void
4293 blob_thin_prov_rle(void)
4294 {
4295 	static const uint8_t zero[10 * 4096] = { 0 };
4296 	struct spdk_blob_store *bs = g_bs;
4297 	struct spdk_blob *blob;
4298 	struct spdk_io_channel *channel;
4299 	struct spdk_blob_opts opts;
4300 	spdk_blob_id blobid;
4301 	uint64_t free_clusters;
4302 	uint64_t page_size;
4303 	uint8_t payload_read[10 * 4096];
4304 	uint8_t payload_write[10 * 4096];
4305 	uint64_t write_bytes;
4306 	uint64_t read_bytes;
4307 	uint64_t io_unit;
4308 
4309 	free_clusters = spdk_bs_free_cluster_count(bs);
4310 	page_size = spdk_bs_get_page_size(bs);
4311 
4312 	ut_spdk_blob_opts_init(&opts);
4313 	opts.thin_provision = true;
4314 	opts.num_clusters = 5;
4315 
4316 	blob = ut_blob_create_and_open(bs, &opts);
4317 	blobid = spdk_blob_get_id(blob);
4318 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4319 
4320 	channel = spdk_bs_alloc_io_channel(bs);
4321 	CU_ASSERT(channel != NULL);
4322 
4323 	/* Target specifically second cluster in a blob as first allocation */
4324 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4325 
4326 	/* Payload should be all zeros from unallocated clusters */
4327 	memset(payload_read, 0xFF, sizeof(payload_read));
4328 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4329 	poll_threads();
4330 	CU_ASSERT(g_bserrno == 0);
4331 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4332 
4333 	write_bytes = g_dev_write_bytes;
4334 	read_bytes = g_dev_read_bytes;
4335 
4336 	/* Issue write to second cluster in a blob */
4337 	memset(payload_write, 0xE5, sizeof(payload_write));
4338 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4339 	poll_threads();
4340 	CU_ASSERT(g_bserrno == 0);
4341 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4342 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4343 	 * read 0 bytes */
4344 	if (g_use_extent_table) {
4345 		/* Add one more page for EXTENT_PAGE write */
4346 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4347 	} else {
4348 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4349 	}
4350 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4351 
4352 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4353 	poll_threads();
4354 	CU_ASSERT(g_bserrno == 0);
4355 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4356 
4357 	spdk_bs_free_io_channel(channel);
4358 	poll_threads();
4359 
4360 	spdk_blob_close(blob, blob_op_complete, NULL);
4361 	poll_threads();
4362 	CU_ASSERT(g_bserrno == 0);
4363 
4364 	ut_bs_reload(&bs, NULL);
4365 
4366 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4367 	poll_threads();
4368 	CU_ASSERT(g_bserrno == 0);
4369 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4370 	blob = g_blob;
4371 
4372 	channel = spdk_bs_alloc_io_channel(bs);
4373 	CU_ASSERT(channel != NULL);
4374 
4375 	/* Read second cluster after blob reload to confirm data written */
4376 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4377 	poll_threads();
4378 	CU_ASSERT(g_bserrno == 0);
4379 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4380 
4381 	spdk_bs_free_io_channel(channel);
4382 	poll_threads();
4383 
4384 	ut_blob_close_and_delete(bs, blob);
4385 }
4386 
4387 static void
4388 blob_thin_prov_rw_iov(void)
4389 {
4390 	static const uint8_t zero[10 * 4096] = { 0 };
4391 	struct spdk_blob_store *bs = g_bs;
4392 	struct spdk_blob *blob;
4393 	struct spdk_io_channel *channel;
4394 	struct spdk_blob_opts opts;
4395 	uint64_t free_clusters;
4396 	uint8_t payload_read[10 * 4096];
4397 	uint8_t payload_write[10 * 4096];
4398 	struct iovec iov_read[3];
4399 	struct iovec iov_write[3];
4400 
4401 	free_clusters = spdk_bs_free_cluster_count(bs);
4402 
4403 	channel = spdk_bs_alloc_io_channel(bs);
4404 	CU_ASSERT(channel != NULL);
4405 
4406 	ut_spdk_blob_opts_init(&opts);
4407 	opts.thin_provision = true;
4408 
4409 	blob = ut_blob_create_and_open(bs, &opts);
4410 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4411 
4412 	CU_ASSERT(blob->active.num_clusters == 0);
4413 
4414 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4415 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4416 	poll_threads();
4417 	CU_ASSERT(g_bserrno == 0);
4418 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4419 	CU_ASSERT(blob->active.num_clusters == 5);
4420 
4421 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4422 	poll_threads();
4423 	CU_ASSERT(g_bserrno == 0);
4424 	/* Sync must not change anything */
4425 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4426 	CU_ASSERT(blob->active.num_clusters == 5);
4427 
4428 	/* Payload should be all zeros from unallocated clusters */
4429 	memset(payload_read, 0xAA, sizeof(payload_read));
4430 	iov_read[0].iov_base = payload_read;
4431 	iov_read[0].iov_len = 3 * 4096;
4432 	iov_read[1].iov_base = payload_read + 3 * 4096;
4433 	iov_read[1].iov_len = 4 * 4096;
4434 	iov_read[2].iov_base = payload_read + 7 * 4096;
4435 	iov_read[2].iov_len = 3 * 4096;
4436 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4437 	poll_threads();
4438 	CU_ASSERT(g_bserrno == 0);
4439 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4440 
4441 	memset(payload_write, 0xE5, sizeof(payload_write));
4442 	iov_write[0].iov_base = payload_write;
4443 	iov_write[0].iov_len = 1 * 4096;
4444 	iov_write[1].iov_base = payload_write + 1 * 4096;
4445 	iov_write[1].iov_len = 5 * 4096;
4446 	iov_write[2].iov_base = payload_write + 6 * 4096;
4447 	iov_write[2].iov_len = 4 * 4096;
4448 
4449 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4450 	poll_threads();
4451 	CU_ASSERT(g_bserrno == 0);
4452 
4453 	memset(payload_read, 0xAA, sizeof(payload_read));
4454 	iov_read[0].iov_base = payload_read;
4455 	iov_read[0].iov_len = 3 * 4096;
4456 	iov_read[1].iov_base = payload_read + 3 * 4096;
4457 	iov_read[1].iov_len = 4 * 4096;
4458 	iov_read[2].iov_base = payload_read + 7 * 4096;
4459 	iov_read[2].iov_len = 3 * 4096;
4460 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4461 	poll_threads();
4462 	CU_ASSERT(g_bserrno == 0);
4463 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4464 
4465 	spdk_bs_free_io_channel(channel);
4466 	poll_threads();
4467 
4468 	ut_blob_close_and_delete(bs, blob);
4469 }
4470 
4471 struct iter_ctx {
4472 	int		current_iter;
4473 	spdk_blob_id	blobid[4];
4474 };
4475 
4476 static void
4477 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4478 {
4479 	struct iter_ctx *iter_ctx = arg;
4480 	spdk_blob_id blobid;
4481 
4482 	CU_ASSERT(bserrno == 0);
4483 	blobid = spdk_blob_get_id(blob);
4484 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4485 }
4486 
4487 static void
4488 bs_load_iter_test(void)
4489 {
4490 	struct spdk_blob_store *bs;
4491 	struct spdk_bs_dev *dev;
4492 	struct iter_ctx iter_ctx = { 0 };
4493 	struct spdk_blob *blob;
4494 	int i, rc;
4495 	struct spdk_bs_opts opts;
4496 
4497 	dev = init_dev();
4498 	spdk_bs_opts_init(&opts, sizeof(opts));
4499 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4500 
4501 	/* Initialize a new blob store */
4502 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4503 	poll_threads();
4504 	CU_ASSERT(g_bserrno == 0);
4505 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4506 	bs = g_bs;
4507 
4508 	for (i = 0; i < 4; i++) {
4509 		blob = ut_blob_create_and_open(bs, NULL);
4510 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4511 
4512 		/* Just save the blobid as an xattr for testing purposes. */
4513 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4514 		CU_ASSERT(rc == 0);
4515 
4516 		/* Resize the blob */
4517 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4518 		poll_threads();
4519 		CU_ASSERT(g_bserrno == 0);
4520 
4521 		spdk_blob_close(blob, blob_op_complete, NULL);
4522 		poll_threads();
4523 		CU_ASSERT(g_bserrno == 0);
4524 	}
4525 
4526 	g_bserrno = -1;
4527 	spdk_bs_unload(bs, bs_op_complete, NULL);
4528 	poll_threads();
4529 	CU_ASSERT(g_bserrno == 0);
4530 
4531 	dev = init_dev();
4532 	spdk_bs_opts_init(&opts, sizeof(opts));
4533 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4534 	opts.iter_cb_fn = test_iter;
4535 	opts.iter_cb_arg = &iter_ctx;
4536 
4537 	/* Test blob iteration during load after a clean shutdown. */
4538 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4539 	poll_threads();
4540 	CU_ASSERT(g_bserrno == 0);
4541 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4542 	bs = g_bs;
4543 
4544 	/* Dirty shutdown */
4545 	bs_free(bs);
4546 
4547 	dev = init_dev();
4548 	spdk_bs_opts_init(&opts, sizeof(opts));
4549 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4550 	opts.iter_cb_fn = test_iter;
4551 	iter_ctx.current_iter = 0;
4552 	opts.iter_cb_arg = &iter_ctx;
4553 
4554 	/* Test blob iteration during load after a dirty shutdown. */
4555 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4556 	poll_threads();
4557 	CU_ASSERT(g_bserrno == 0);
4558 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4559 	bs = g_bs;
4560 
4561 	spdk_bs_unload(bs, bs_op_complete, NULL);
4562 	poll_threads();
4563 	CU_ASSERT(g_bserrno == 0);
4564 	g_bs = NULL;
4565 }
4566 
4567 static void
4568 blob_snapshot_rw(void)
4569 {
4570 	static const uint8_t zero[10 * 4096] = { 0 };
4571 	struct spdk_blob_store *bs = g_bs;
4572 	struct spdk_blob *blob, *snapshot;
4573 	struct spdk_io_channel *channel;
4574 	struct spdk_blob_opts opts;
4575 	spdk_blob_id blobid, snapshotid;
4576 	uint64_t free_clusters;
4577 	uint64_t cluster_size;
4578 	uint64_t page_size;
4579 	uint8_t payload_read[10 * 4096];
4580 	uint8_t payload_write[10 * 4096];
4581 	uint64_t write_bytes;
4582 	uint64_t read_bytes;
4583 
4584 	free_clusters = spdk_bs_free_cluster_count(bs);
4585 	cluster_size = spdk_bs_get_cluster_size(bs);
4586 	page_size = spdk_bs_get_page_size(bs);
4587 
4588 	channel = spdk_bs_alloc_io_channel(bs);
4589 	CU_ASSERT(channel != NULL);
4590 
4591 	ut_spdk_blob_opts_init(&opts);
4592 	opts.thin_provision = true;
4593 	opts.num_clusters = 5;
4594 
4595 	blob = ut_blob_create_and_open(bs, &opts);
4596 	blobid = spdk_blob_get_id(blob);
4597 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4598 
4599 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4600 
4601 	memset(payload_read, 0xFF, sizeof(payload_read));
4602 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4603 	poll_threads();
4604 	CU_ASSERT(g_bserrno == 0);
4605 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4606 
4607 	memset(payload_write, 0xE5, sizeof(payload_write));
4608 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4609 	poll_threads();
4610 	CU_ASSERT(g_bserrno == 0);
4611 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4612 
4613 	/* Create snapshot from blob */
4614 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4615 	poll_threads();
4616 	CU_ASSERT(g_bserrno == 0);
4617 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4618 	snapshotid = g_blobid;
4619 
4620 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4621 	poll_threads();
4622 	CU_ASSERT(g_bserrno == 0);
4623 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4624 	snapshot = g_blob;
4625 	CU_ASSERT(snapshot->data_ro == true);
4626 	CU_ASSERT(snapshot->md_ro == true);
4627 
4628 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4629 
4630 	write_bytes = g_dev_write_bytes;
4631 	read_bytes = g_dev_read_bytes;
4632 
4633 	memset(payload_write, 0xAA, sizeof(payload_write));
4634 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4635 	poll_threads();
4636 	CU_ASSERT(g_bserrno == 0);
4637 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4638 
4639 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4640 	 * and then write 10 pages of payload.
4641 	 */
4642 	if (g_use_extent_table) {
4643 		/* Add one more page for EXTENT_PAGE write */
4644 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
4645 	} else {
4646 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
4647 	}
4648 	CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
4649 
4650 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4651 	poll_threads();
4652 	CU_ASSERT(g_bserrno == 0);
4653 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4654 
4655 	/* Data on snapshot should not change after write to clone */
4656 	memset(payload_write, 0xE5, sizeof(payload_write));
4657 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4658 	poll_threads();
4659 	CU_ASSERT(g_bserrno == 0);
4660 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4661 
4662 	ut_blob_close_and_delete(bs, blob);
4663 	ut_blob_close_and_delete(bs, snapshot);
4664 
4665 	spdk_bs_free_io_channel(channel);
4666 	poll_threads();
4667 	g_blob = NULL;
4668 	g_blobid = 0;
4669 }
4670 
4671 static void
4672 blob_snapshot_rw_iov(void)
4673 {
4674 	static const uint8_t zero[10 * 4096] = { 0 };
4675 	struct spdk_blob_store *bs = g_bs;
4676 	struct spdk_blob *blob, *snapshot;
4677 	struct spdk_io_channel *channel;
4678 	struct spdk_blob_opts opts;
4679 	spdk_blob_id blobid, snapshotid;
4680 	uint64_t free_clusters;
4681 	uint8_t payload_read[10 * 4096];
4682 	uint8_t payload_write[10 * 4096];
4683 	struct iovec iov_read[3];
4684 	struct iovec iov_write[3];
4685 
4686 	free_clusters = spdk_bs_free_cluster_count(bs);
4687 
4688 	channel = spdk_bs_alloc_io_channel(bs);
4689 	CU_ASSERT(channel != NULL);
4690 
4691 	ut_spdk_blob_opts_init(&opts);
4692 	opts.thin_provision = true;
4693 	opts.num_clusters = 5;
4694 
4695 	blob = ut_blob_create_and_open(bs, &opts);
4696 	blobid = spdk_blob_get_id(blob);
4697 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4698 
4699 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4700 
4701 	/* Create snapshot from blob */
4702 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4703 	poll_threads();
4704 	CU_ASSERT(g_bserrno == 0);
4705 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4706 	snapshotid = g_blobid;
4707 
4708 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4709 	poll_threads();
4710 	CU_ASSERT(g_bserrno == 0);
4711 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4712 	snapshot = g_blob;
4713 	CU_ASSERT(snapshot->data_ro == true);
4714 	CU_ASSERT(snapshot->md_ro == true);
4715 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4716 
4717 	/* Payload should be all zeros from unallocated clusters */
4718 	memset(payload_read, 0xAA, sizeof(payload_read));
4719 	iov_read[0].iov_base = payload_read;
4720 	iov_read[0].iov_len = 3 * 4096;
4721 	iov_read[1].iov_base = payload_read + 3 * 4096;
4722 	iov_read[1].iov_len = 4 * 4096;
4723 	iov_read[2].iov_base = payload_read + 7 * 4096;
4724 	iov_read[2].iov_len = 3 * 4096;
4725 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4726 	poll_threads();
4727 	CU_ASSERT(g_bserrno == 0);
4728 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4729 
4730 	memset(payload_write, 0xE5, sizeof(payload_write));
4731 	iov_write[0].iov_base = payload_write;
4732 	iov_write[0].iov_len = 1 * 4096;
4733 	iov_write[1].iov_base = payload_write + 1 * 4096;
4734 	iov_write[1].iov_len = 5 * 4096;
4735 	iov_write[2].iov_base = payload_write + 6 * 4096;
4736 	iov_write[2].iov_len = 4 * 4096;
4737 
4738 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4739 	poll_threads();
4740 	CU_ASSERT(g_bserrno == 0);
4741 
4742 	memset(payload_read, 0xAA, sizeof(payload_read));
4743 	iov_read[0].iov_base = payload_read;
4744 	iov_read[0].iov_len = 3 * 4096;
4745 	iov_read[1].iov_base = payload_read + 3 * 4096;
4746 	iov_read[1].iov_len = 4 * 4096;
4747 	iov_read[2].iov_base = payload_read + 7 * 4096;
4748 	iov_read[2].iov_len = 3 * 4096;
4749 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4750 	poll_threads();
4751 	CU_ASSERT(g_bserrno == 0);
4752 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4753 
4754 	spdk_bs_free_io_channel(channel);
4755 	poll_threads();
4756 
4757 	ut_blob_close_and_delete(bs, blob);
4758 	ut_blob_close_and_delete(bs, snapshot);
4759 }
4760 
4761 /**
4762  * Inflate / decouple parent rw unit tests.
4763  *
4764  * --------------
4765  * original blob:         0         1         2         3         4
4766  *                   ,---------+---------+---------+---------+---------.
4767  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4768  *                   +---------+---------+---------+---------+---------+
4769  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4770  *                   +---------+---------+---------+---------+---------+
4771  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4772  *                   '---------+---------+---------+---------+---------'
4773  *                   .         .         .         .         .         .
4774  * --------          .         .         .         .         .         .
4775  * inflate:          .         .         .         .         .         .
4776  *                   ,---------+---------+---------+---------+---------.
4777  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4778  *                   '---------+---------+---------+---------+---------'
4779  *
4780  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4781  *               on snapshot2 and snapshot removed .         .         .
4782  *                   .         .         .         .         .         .
4783  * ----------------  .         .         .         .         .         .
4784  * decouple parent:  .         .         .         .         .         .
4785  *                   ,---------+---------+---------+---------+---------.
4786  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4787  *                   +---------+---------+---------+---------+---------+
4788  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4789  *                   '---------+---------+---------+---------+---------'
4790  *
4791  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4792  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4793  *               should remain a clone of snapshot.
4794  */
4795 static void
4796 _blob_inflate_rw(bool decouple_parent)
4797 {
4798 	struct spdk_blob_store *bs = g_bs;
4799 	struct spdk_blob *blob, *snapshot, *snapshot2;
4800 	struct spdk_io_channel *channel;
4801 	struct spdk_blob_opts opts;
4802 	spdk_blob_id blobid, snapshotid, snapshot2id;
4803 	uint64_t free_clusters;
4804 	uint64_t cluster_size;
4805 
4806 	uint64_t payload_size;
4807 	uint8_t *payload_read;
4808 	uint8_t *payload_write;
4809 	uint8_t *payload_clone;
4810 
4811 	uint64_t pages_per_cluster;
4812 	uint64_t pages_per_payload;
4813 
4814 	int i;
4815 	spdk_blob_id ids[2];
4816 	size_t count;
4817 
4818 	free_clusters = spdk_bs_free_cluster_count(bs);
4819 	cluster_size = spdk_bs_get_cluster_size(bs);
4820 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4821 	pages_per_payload = pages_per_cluster * 5;
4822 
4823 	payload_size = cluster_size * 5;
4824 
4825 	payload_read = malloc(payload_size);
4826 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4827 
4828 	payload_write = malloc(payload_size);
4829 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4830 
4831 	payload_clone = malloc(payload_size);
4832 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4833 
4834 	channel = spdk_bs_alloc_io_channel(bs);
4835 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4836 
4837 	/* Create blob */
4838 	ut_spdk_blob_opts_init(&opts);
4839 	opts.thin_provision = true;
4840 	opts.num_clusters = 5;
4841 
4842 	blob = ut_blob_create_and_open(bs, &opts);
4843 	blobid = spdk_blob_get_id(blob);
4844 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4845 
4846 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4847 
4848 	/* 1) Initial read should return zeroed payload */
4849 	memset(payload_read, 0xFF, payload_size);
4850 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4851 			  blob_op_complete, NULL);
4852 	poll_threads();
4853 	CU_ASSERT(g_bserrno == 0);
4854 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4855 
4856 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4857 	 * isn't allocated) */
4858 	memset(payload_write, 0xE5, payload_size - cluster_size);
4859 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4860 			   pages_per_cluster, blob_op_complete, NULL);
4861 	poll_threads();
4862 	CU_ASSERT(g_bserrno == 0);
4863 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4864 
4865 	/* 2) Create snapshot from blob (first level) */
4866 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4867 	poll_threads();
4868 	CU_ASSERT(g_bserrno == 0);
4869 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4870 	snapshotid = g_blobid;
4871 
4872 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4873 	poll_threads();
4874 	CU_ASSERT(g_bserrno == 0);
4875 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4876 	snapshot = g_blob;
4877 	CU_ASSERT(snapshot->data_ro == true);
4878 	CU_ASSERT(snapshot->md_ro == true);
4879 
4880 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4881 
4882 	/* Write every second cluster with a pattern.
4883 	 *
4884 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4885 	 * doesn't allocate it.
4886 	 *
4887 	 * payload_clone stores expected result on "blob" read at the time and
4888 	 * is used only to check data consistency on clone before and after
4889 	 * inflation. Initially we fill it with a backing snapshots pattern
4890 	 * used before.
4891 	 */
4892 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4893 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4894 	memset(payload_write, 0xAA, payload_size);
4895 	for (i = 1; i < 5; i += 2) {
4896 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4897 				   pages_per_cluster, blob_op_complete, NULL);
4898 		poll_threads();
4899 		CU_ASSERT(g_bserrno == 0);
4900 
4901 		/* Update expected result */
4902 		memcpy(payload_clone + (cluster_size * i), payload_write,
4903 		       cluster_size);
4904 	}
4905 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4906 
4907 	/* Check data consistency on clone */
4908 	memset(payload_read, 0xFF, payload_size);
4909 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4910 			  blob_op_complete, NULL);
4911 	poll_threads();
4912 	CU_ASSERT(g_bserrno == 0);
4913 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4914 
4915 	/* 3) Create second levels snapshot from blob */
4916 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4917 	poll_threads();
4918 	CU_ASSERT(g_bserrno == 0);
4919 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4920 	snapshot2id = g_blobid;
4921 
4922 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4923 	poll_threads();
4924 	CU_ASSERT(g_bserrno == 0);
4925 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4926 	snapshot2 = g_blob;
4927 	CU_ASSERT(snapshot2->data_ro == true);
4928 	CU_ASSERT(snapshot2->md_ro == true);
4929 
4930 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4931 
4932 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4933 
4934 	/* Write one cluster on the top level blob. This cluster (1) covers
4935 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4936 	 * at all */
4937 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4938 			   pages_per_cluster, blob_op_complete, NULL);
4939 	poll_threads();
4940 	CU_ASSERT(g_bserrno == 0);
4941 
4942 	/* Update expected result */
4943 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4944 
4945 	/* Check data consistency on clone */
4946 	memset(payload_read, 0xFF, payload_size);
4947 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4948 			  blob_op_complete, NULL);
4949 	poll_threads();
4950 	CU_ASSERT(g_bserrno == 0);
4951 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4952 
4953 
4954 	/* Close all blobs */
4955 	spdk_blob_close(blob, blob_op_complete, NULL);
4956 	poll_threads();
4957 	CU_ASSERT(g_bserrno == 0);
4958 
4959 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4960 	poll_threads();
4961 	CU_ASSERT(g_bserrno == 0);
4962 
4963 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4964 	poll_threads();
4965 	CU_ASSERT(g_bserrno == 0);
4966 
4967 	/* Check snapshot-clone relations */
4968 	count = 2;
4969 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4970 	CU_ASSERT(count == 1);
4971 	CU_ASSERT(ids[0] == snapshot2id);
4972 
4973 	count = 2;
4974 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4975 	CU_ASSERT(count == 1);
4976 	CU_ASSERT(ids[0] == blobid);
4977 
4978 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4979 
4980 	free_clusters = spdk_bs_free_cluster_count(bs);
4981 	if (!decouple_parent) {
4982 		/* Do full blob inflation */
4983 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4984 		poll_threads();
4985 		CU_ASSERT(g_bserrno == 0);
4986 
4987 		/* All clusters should be inflated (except one already allocated
4988 		 * in a top level blob) */
4989 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
4990 
4991 		/* Check if relation tree updated correctly */
4992 		count = 2;
4993 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4994 
4995 		/* snapshotid have one clone */
4996 		CU_ASSERT(count == 1);
4997 		CU_ASSERT(ids[0] == snapshot2id);
4998 
4999 		/* snapshot2id have no clones */
5000 		count = 2;
5001 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5002 		CU_ASSERT(count == 0);
5003 
5004 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5005 	} else {
5006 		/* Decouple parent of blob */
5007 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5008 		poll_threads();
5009 		CU_ASSERT(g_bserrno == 0);
5010 
5011 		/* Only one cluster from a parent should be inflated (second one
5012 		 * is covered by a cluster written on a top level blob, and
5013 		 * already allocated) */
5014 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5015 
5016 		/* Check if relation tree updated correctly */
5017 		count = 2;
5018 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5019 
5020 		/* snapshotid have two clones now */
5021 		CU_ASSERT(count == 2);
5022 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5023 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5024 
5025 		/* snapshot2id have no clones */
5026 		count = 2;
5027 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5028 		CU_ASSERT(count == 0);
5029 
5030 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5031 	}
5032 
5033 	/* Try to delete snapshot2 (should pass) */
5034 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5035 	poll_threads();
5036 	CU_ASSERT(g_bserrno == 0);
5037 
5038 	/* Try to delete base snapshot */
5039 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5040 	poll_threads();
5041 	CU_ASSERT(g_bserrno == 0);
5042 
5043 	/* Reopen blob after snapshot deletion */
5044 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5045 	poll_threads();
5046 	CU_ASSERT(g_bserrno == 0);
5047 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5048 	blob = g_blob;
5049 
5050 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5051 
5052 	/* Check data consistency on inflated blob */
5053 	memset(payload_read, 0xFF, payload_size);
5054 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5055 			  blob_op_complete, NULL);
5056 	poll_threads();
5057 	CU_ASSERT(g_bserrno == 0);
5058 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5059 
5060 	spdk_bs_free_io_channel(channel);
5061 	poll_threads();
5062 
5063 	free(payload_read);
5064 	free(payload_write);
5065 	free(payload_clone);
5066 
5067 	ut_blob_close_and_delete(bs, blob);
5068 }
5069 
5070 static void
5071 blob_inflate_rw(void)
5072 {
5073 	_blob_inflate_rw(false);
5074 	_blob_inflate_rw(true);
5075 }
5076 
5077 /**
5078  * Snapshot-clones relation test
5079  *
5080  *         snapshot
5081  *            |
5082  *      +-----+-----+
5083  *      |           |
5084  *   blob(ro)   snapshot2
5085  *      |           |
5086  *   clone2      clone
5087  */
5088 static void
5089 blob_relations(void)
5090 {
5091 	struct spdk_blob_store *bs;
5092 	struct spdk_bs_dev *dev;
5093 	struct spdk_bs_opts bs_opts;
5094 	struct spdk_blob_opts opts;
5095 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5096 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5097 	int rc;
5098 	size_t count;
5099 	spdk_blob_id ids[10] = {};
5100 
5101 	dev = init_dev();
5102 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5103 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5104 
5105 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5106 	poll_threads();
5107 	CU_ASSERT(g_bserrno == 0);
5108 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5109 	bs = g_bs;
5110 
5111 	/* 1. Create blob with 10 clusters */
5112 
5113 	ut_spdk_blob_opts_init(&opts);
5114 	opts.num_clusters = 10;
5115 
5116 	blob = ut_blob_create_and_open(bs, &opts);
5117 	blobid = spdk_blob_get_id(blob);
5118 
5119 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5120 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5121 	CU_ASSERT(!spdk_blob_is_clone(blob));
5122 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5123 
5124 	/* blob should not have underlying snapshot nor clones */
5125 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5126 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5127 	count = SPDK_COUNTOF(ids);
5128 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5129 	CU_ASSERT(rc == 0);
5130 	CU_ASSERT(count == 0);
5131 
5132 
5133 	/* 2. Create snapshot */
5134 
5135 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5136 	poll_threads();
5137 	CU_ASSERT(g_bserrno == 0);
5138 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5139 	snapshotid = g_blobid;
5140 
5141 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5142 	poll_threads();
5143 	CU_ASSERT(g_bserrno == 0);
5144 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5145 	snapshot = g_blob;
5146 
5147 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5148 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5149 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5150 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5151 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5152 
5153 	/* Check if original blob is converted to the clone of snapshot */
5154 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5155 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5156 	CU_ASSERT(spdk_blob_is_clone(blob));
5157 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5158 	CU_ASSERT(blob->parent_id == snapshotid);
5159 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5160 
5161 	count = SPDK_COUNTOF(ids);
5162 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5163 	CU_ASSERT(rc == 0);
5164 	CU_ASSERT(count == 1);
5165 	CU_ASSERT(ids[0] == blobid);
5166 
5167 
5168 	/* 3. Create clone from snapshot */
5169 
5170 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5171 	poll_threads();
5172 	CU_ASSERT(g_bserrno == 0);
5173 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5174 	cloneid = g_blobid;
5175 
5176 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5177 	poll_threads();
5178 	CU_ASSERT(g_bserrno == 0);
5179 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5180 	clone = g_blob;
5181 
5182 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5183 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5184 	CU_ASSERT(spdk_blob_is_clone(clone));
5185 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5186 	CU_ASSERT(clone->parent_id == snapshotid);
5187 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5188 
5189 	count = SPDK_COUNTOF(ids);
5190 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5191 	CU_ASSERT(rc == 0);
5192 	CU_ASSERT(count == 0);
5193 
5194 	/* Check if clone is on the snapshot's list */
5195 	count = SPDK_COUNTOF(ids);
5196 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5197 	CU_ASSERT(rc == 0);
5198 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5199 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5200 
5201 
5202 	/* 4. Create snapshot of the clone */
5203 
5204 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5205 	poll_threads();
5206 	CU_ASSERT(g_bserrno == 0);
5207 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5208 	snapshotid2 = g_blobid;
5209 
5210 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5211 	poll_threads();
5212 	CU_ASSERT(g_bserrno == 0);
5213 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5214 	snapshot2 = g_blob;
5215 
5216 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5217 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5218 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5219 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5220 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5221 
5222 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5223 	 * is a child of snapshot */
5224 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5225 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5226 	CU_ASSERT(spdk_blob_is_clone(clone));
5227 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5228 	CU_ASSERT(clone->parent_id == snapshotid2);
5229 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5230 
5231 	count = SPDK_COUNTOF(ids);
5232 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5233 	CU_ASSERT(rc == 0);
5234 	CU_ASSERT(count == 1);
5235 	CU_ASSERT(ids[0] == cloneid);
5236 
5237 
5238 	/* 5. Try to create clone from read only blob */
5239 
5240 	/* Mark blob as read only */
5241 	spdk_blob_set_read_only(blob);
5242 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5243 	poll_threads();
5244 	CU_ASSERT(g_bserrno == 0);
5245 
5246 	/* Check if previously created blob is read only clone */
5247 	CU_ASSERT(spdk_blob_is_read_only(blob));
5248 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5249 	CU_ASSERT(spdk_blob_is_clone(blob));
5250 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5251 
5252 	/* Create clone from read only blob */
5253 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5254 	poll_threads();
5255 	CU_ASSERT(g_bserrno == 0);
5256 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5257 	cloneid2 = g_blobid;
5258 
5259 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5260 	poll_threads();
5261 	CU_ASSERT(g_bserrno == 0);
5262 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5263 	clone2 = g_blob;
5264 
5265 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5266 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5267 	CU_ASSERT(spdk_blob_is_clone(clone2));
5268 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5269 
5270 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5271 
5272 	count = SPDK_COUNTOF(ids);
5273 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5274 	CU_ASSERT(rc == 0);
5275 
5276 	CU_ASSERT(count == 1);
5277 	CU_ASSERT(ids[0] == cloneid2);
5278 
5279 	/* Close blobs */
5280 
5281 	spdk_blob_close(clone2, blob_op_complete, NULL);
5282 	poll_threads();
5283 	CU_ASSERT(g_bserrno == 0);
5284 
5285 	spdk_blob_close(blob, blob_op_complete, NULL);
5286 	poll_threads();
5287 	CU_ASSERT(g_bserrno == 0);
5288 
5289 	spdk_blob_close(clone, blob_op_complete, NULL);
5290 	poll_threads();
5291 	CU_ASSERT(g_bserrno == 0);
5292 
5293 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5294 	poll_threads();
5295 	CU_ASSERT(g_bserrno == 0);
5296 
5297 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5298 	poll_threads();
5299 	CU_ASSERT(g_bserrno == 0);
5300 
5301 	/* Try to delete snapshot with more than 1 clone */
5302 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5303 	poll_threads();
5304 	CU_ASSERT(g_bserrno != 0);
5305 
5306 	ut_bs_reload(&bs, &bs_opts);
5307 
5308 	/* NULL ids array should return number of clones in count */
5309 	count = SPDK_COUNTOF(ids);
5310 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5311 	CU_ASSERT(rc == -ENOMEM);
5312 	CU_ASSERT(count == 2);
5313 
5314 	/* incorrect array size */
5315 	count = 1;
5316 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5317 	CU_ASSERT(rc == -ENOMEM);
5318 	CU_ASSERT(count == 2);
5319 
5320 
5321 	/* Verify structure of loaded blob store */
5322 
5323 	/* snapshot */
5324 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5325 
5326 	count = SPDK_COUNTOF(ids);
5327 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5328 	CU_ASSERT(rc == 0);
5329 	CU_ASSERT(count == 2);
5330 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5331 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5332 
5333 	/* blob */
5334 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5335 	count = SPDK_COUNTOF(ids);
5336 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5337 	CU_ASSERT(rc == 0);
5338 	CU_ASSERT(count == 1);
5339 	CU_ASSERT(ids[0] == cloneid2);
5340 
5341 	/* clone */
5342 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5343 	count = SPDK_COUNTOF(ids);
5344 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5345 	CU_ASSERT(rc == 0);
5346 	CU_ASSERT(count == 0);
5347 
5348 	/* snapshot2 */
5349 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5350 	count = SPDK_COUNTOF(ids);
5351 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5352 	CU_ASSERT(rc == 0);
5353 	CU_ASSERT(count == 1);
5354 	CU_ASSERT(ids[0] == cloneid);
5355 
5356 	/* clone2 */
5357 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5358 	count = SPDK_COUNTOF(ids);
5359 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5360 	CU_ASSERT(rc == 0);
5361 	CU_ASSERT(count == 0);
5362 
5363 	/* Try to delete blob that user should not be able to remove */
5364 
5365 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5366 	poll_threads();
5367 	CU_ASSERT(g_bserrno != 0);
5368 
5369 	/* Remove all blobs */
5370 
5371 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5372 	poll_threads();
5373 	CU_ASSERT(g_bserrno == 0);
5374 
5375 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5376 	poll_threads();
5377 	CU_ASSERT(g_bserrno == 0);
5378 
5379 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5380 	poll_threads();
5381 	CU_ASSERT(g_bserrno == 0);
5382 
5383 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5384 	poll_threads();
5385 	CU_ASSERT(g_bserrno == 0);
5386 
5387 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5388 	poll_threads();
5389 	CU_ASSERT(g_bserrno == 0);
5390 
5391 	spdk_bs_unload(bs, bs_op_complete, NULL);
5392 	poll_threads();
5393 	CU_ASSERT(g_bserrno == 0);
5394 
5395 	g_bs = NULL;
5396 }
5397 
5398 /**
5399  * Snapshot-clones relation test 2
5400  *
5401  *         snapshot1
5402  *            |
5403  *         snapshot2
5404  *            |
5405  *      +-----+-----+
5406  *      |           |
5407  *   blob(ro)   snapshot3
5408  *      |           |
5409  *      |       snapshot4
5410  *      |        |     |
5411  *   clone2   clone  clone3
5412  */
5413 static void
5414 blob_relations2(void)
5415 {
5416 	struct spdk_blob_store *bs;
5417 	struct spdk_bs_dev *dev;
5418 	struct spdk_bs_opts bs_opts;
5419 	struct spdk_blob_opts opts;
5420 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5421 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5422 		     cloneid3;
5423 	int rc;
5424 	size_t count;
5425 	spdk_blob_id ids[10] = {};
5426 
5427 	dev = init_dev();
5428 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5429 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5430 
5431 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5432 	poll_threads();
5433 	CU_ASSERT(g_bserrno == 0);
5434 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5435 	bs = g_bs;
5436 
5437 	/* 1. Create blob with 10 clusters */
5438 
5439 	ut_spdk_blob_opts_init(&opts);
5440 	opts.num_clusters = 10;
5441 
5442 	blob = ut_blob_create_and_open(bs, &opts);
5443 	blobid = spdk_blob_get_id(blob);
5444 
5445 	/* 2. Create snapshot1 */
5446 
5447 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5448 	poll_threads();
5449 	CU_ASSERT(g_bserrno == 0);
5450 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5451 	snapshotid1 = g_blobid;
5452 
5453 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5454 	poll_threads();
5455 	CU_ASSERT(g_bserrno == 0);
5456 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5457 	snapshot1 = g_blob;
5458 
5459 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5460 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5461 
5462 	CU_ASSERT(blob->parent_id == snapshotid1);
5463 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5464 
5465 	/* Check if blob is the clone of snapshot1 */
5466 	CU_ASSERT(blob->parent_id == snapshotid1);
5467 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5468 
5469 	count = SPDK_COUNTOF(ids);
5470 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5471 	CU_ASSERT(rc == 0);
5472 	CU_ASSERT(count == 1);
5473 	CU_ASSERT(ids[0] == blobid);
5474 
5475 	/* 3. Create another snapshot */
5476 
5477 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5478 	poll_threads();
5479 	CU_ASSERT(g_bserrno == 0);
5480 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5481 	snapshotid2 = g_blobid;
5482 
5483 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5484 	poll_threads();
5485 	CU_ASSERT(g_bserrno == 0);
5486 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5487 	snapshot2 = g_blob;
5488 
5489 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5490 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5491 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5492 
5493 	/* Check if snapshot2 is the clone of snapshot1 and blob
5494 	 * is a child of snapshot2 */
5495 	CU_ASSERT(blob->parent_id == snapshotid2);
5496 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5497 
5498 	count = SPDK_COUNTOF(ids);
5499 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5500 	CU_ASSERT(rc == 0);
5501 	CU_ASSERT(count == 1);
5502 	CU_ASSERT(ids[0] == blobid);
5503 
5504 	/* 4. Create clone from snapshot */
5505 
5506 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5507 	poll_threads();
5508 	CU_ASSERT(g_bserrno == 0);
5509 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5510 	cloneid = g_blobid;
5511 
5512 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5513 	poll_threads();
5514 	CU_ASSERT(g_bserrno == 0);
5515 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5516 	clone = g_blob;
5517 
5518 	CU_ASSERT(clone->parent_id == snapshotid2);
5519 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5520 
5521 	/* Check if clone is on the snapshot's list */
5522 	count = SPDK_COUNTOF(ids);
5523 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5524 	CU_ASSERT(rc == 0);
5525 	CU_ASSERT(count == 2);
5526 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5527 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5528 
5529 	/* 5. Create snapshot of the clone */
5530 
5531 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5532 	poll_threads();
5533 	CU_ASSERT(g_bserrno == 0);
5534 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5535 	snapshotid3 = g_blobid;
5536 
5537 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5538 	poll_threads();
5539 	CU_ASSERT(g_bserrno == 0);
5540 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5541 	snapshot3 = g_blob;
5542 
5543 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5544 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5545 
5546 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5547 	 * is a child of snapshot2 */
5548 	CU_ASSERT(clone->parent_id == snapshotid3);
5549 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5550 
5551 	count = SPDK_COUNTOF(ids);
5552 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5553 	CU_ASSERT(rc == 0);
5554 	CU_ASSERT(count == 1);
5555 	CU_ASSERT(ids[0] == cloneid);
5556 
5557 	/* 6. Create another snapshot of the clone */
5558 
5559 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5560 	poll_threads();
5561 	CU_ASSERT(g_bserrno == 0);
5562 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5563 	snapshotid4 = g_blobid;
5564 
5565 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5566 	poll_threads();
5567 	CU_ASSERT(g_bserrno == 0);
5568 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5569 	snapshot4 = g_blob;
5570 
5571 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5572 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5573 
5574 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5575 	 * is a child of snapshot3 */
5576 	CU_ASSERT(clone->parent_id == snapshotid4);
5577 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5578 
5579 	count = SPDK_COUNTOF(ids);
5580 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5581 	CU_ASSERT(rc == 0);
5582 	CU_ASSERT(count == 1);
5583 	CU_ASSERT(ids[0] == cloneid);
5584 
5585 	/* 7. Remove snapshot 4 */
5586 
5587 	ut_blob_close_and_delete(bs, snapshot4);
5588 
5589 	/* Check if relations are back to state from before creating snapshot 4 */
5590 	CU_ASSERT(clone->parent_id == snapshotid3);
5591 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5592 
5593 	count = SPDK_COUNTOF(ids);
5594 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5595 	CU_ASSERT(rc == 0);
5596 	CU_ASSERT(count == 1);
5597 	CU_ASSERT(ids[0] == cloneid);
5598 
5599 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5600 
5601 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5602 	poll_threads();
5603 	CU_ASSERT(g_bserrno == 0);
5604 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5605 	cloneid3 = g_blobid;
5606 
5607 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5608 	poll_threads();
5609 	CU_ASSERT(g_bserrno != 0);
5610 
5611 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5612 
5613 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5614 	poll_threads();
5615 	CU_ASSERT(g_bserrno == 0);
5616 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5617 	snapshot3 = g_blob;
5618 
5619 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5620 	poll_threads();
5621 	CU_ASSERT(g_bserrno != 0);
5622 
5623 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5624 	poll_threads();
5625 	CU_ASSERT(g_bserrno == 0);
5626 
5627 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5628 	poll_threads();
5629 	CU_ASSERT(g_bserrno == 0);
5630 
5631 	/* 10. Remove snapshot 1 */
5632 
5633 	ut_blob_close_and_delete(bs, snapshot1);
5634 
5635 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5636 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5637 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5638 
5639 	count = SPDK_COUNTOF(ids);
5640 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5641 	CU_ASSERT(rc == 0);
5642 	CU_ASSERT(count == 2);
5643 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5644 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5645 
5646 	/* 11. Try to create clone from read only blob */
5647 
5648 	/* Mark blob as read only */
5649 	spdk_blob_set_read_only(blob);
5650 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5651 	poll_threads();
5652 	CU_ASSERT(g_bserrno == 0);
5653 
5654 	/* Create clone from read only blob */
5655 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5656 	poll_threads();
5657 	CU_ASSERT(g_bserrno == 0);
5658 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5659 	cloneid2 = g_blobid;
5660 
5661 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5662 	poll_threads();
5663 	CU_ASSERT(g_bserrno == 0);
5664 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5665 	clone2 = g_blob;
5666 
5667 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5668 
5669 	count = SPDK_COUNTOF(ids);
5670 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5671 	CU_ASSERT(rc == 0);
5672 	CU_ASSERT(count == 1);
5673 	CU_ASSERT(ids[0] == cloneid2);
5674 
5675 	/* Close blobs */
5676 
5677 	spdk_blob_close(clone2, blob_op_complete, NULL);
5678 	poll_threads();
5679 	CU_ASSERT(g_bserrno == 0);
5680 
5681 	spdk_blob_close(blob, blob_op_complete, NULL);
5682 	poll_threads();
5683 	CU_ASSERT(g_bserrno == 0);
5684 
5685 	spdk_blob_close(clone, blob_op_complete, NULL);
5686 	poll_threads();
5687 	CU_ASSERT(g_bserrno == 0);
5688 
5689 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5690 	poll_threads();
5691 	CU_ASSERT(g_bserrno == 0);
5692 
5693 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5694 	poll_threads();
5695 	CU_ASSERT(g_bserrno == 0);
5696 
5697 	ut_bs_reload(&bs, &bs_opts);
5698 
5699 	/* Verify structure of loaded blob store */
5700 
5701 	/* snapshot2 */
5702 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5703 
5704 	count = SPDK_COUNTOF(ids);
5705 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5706 	CU_ASSERT(rc == 0);
5707 	CU_ASSERT(count == 2);
5708 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5709 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5710 
5711 	/* blob */
5712 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5713 	count = SPDK_COUNTOF(ids);
5714 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5715 	CU_ASSERT(rc == 0);
5716 	CU_ASSERT(count == 1);
5717 	CU_ASSERT(ids[0] == cloneid2);
5718 
5719 	/* clone */
5720 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5721 	count = SPDK_COUNTOF(ids);
5722 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5723 	CU_ASSERT(rc == 0);
5724 	CU_ASSERT(count == 0);
5725 
5726 	/* snapshot3 */
5727 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5728 	count = SPDK_COUNTOF(ids);
5729 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5730 	CU_ASSERT(rc == 0);
5731 	CU_ASSERT(count == 1);
5732 	CU_ASSERT(ids[0] == cloneid);
5733 
5734 	/* clone2 */
5735 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5736 	count = SPDK_COUNTOF(ids);
5737 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5738 	CU_ASSERT(rc == 0);
5739 	CU_ASSERT(count == 0);
5740 
5741 	/* Try to delete all blobs in the worse possible order */
5742 
5743 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5744 	poll_threads();
5745 	CU_ASSERT(g_bserrno != 0);
5746 
5747 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5748 	poll_threads();
5749 	CU_ASSERT(g_bserrno == 0);
5750 
5751 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5752 	poll_threads();
5753 	CU_ASSERT(g_bserrno != 0);
5754 
5755 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5756 	poll_threads();
5757 	CU_ASSERT(g_bserrno == 0);
5758 
5759 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5760 	poll_threads();
5761 	CU_ASSERT(g_bserrno == 0);
5762 
5763 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5764 	poll_threads();
5765 	CU_ASSERT(g_bserrno == 0);
5766 
5767 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5768 	poll_threads();
5769 	CU_ASSERT(g_bserrno == 0);
5770 
5771 	spdk_bs_unload(bs, bs_op_complete, NULL);
5772 	poll_threads();
5773 	CU_ASSERT(g_bserrno == 0);
5774 
5775 	g_bs = NULL;
5776 }
5777 
5778 /**
5779  * Snapshot-clones relation test 3
5780  *
5781  *         snapshot0
5782  *            |
5783  *         snapshot1
5784  *            |
5785  *         snapshot2
5786  *            |
5787  *           blob
5788  */
5789 static void
5790 blob_relations3(void)
5791 {
5792 	struct spdk_blob_store *bs;
5793 	struct spdk_bs_dev *dev;
5794 	struct spdk_io_channel *channel;
5795 	struct spdk_bs_opts bs_opts;
5796 	struct spdk_blob_opts opts;
5797 	struct spdk_blob *blob;
5798 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
5799 
5800 	dev = init_dev();
5801 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5802 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5803 
5804 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5805 	poll_threads();
5806 	CU_ASSERT(g_bserrno == 0);
5807 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5808 	bs = g_bs;
5809 
5810 	channel = spdk_bs_alloc_io_channel(bs);
5811 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5812 
5813 	/* 1. Create blob with 10 clusters */
5814 	ut_spdk_blob_opts_init(&opts);
5815 	opts.num_clusters = 10;
5816 
5817 	blob = ut_blob_create_and_open(bs, &opts);
5818 	blobid = spdk_blob_get_id(blob);
5819 
5820 	/* 2. Create snapshot0 */
5821 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5822 	poll_threads();
5823 	CU_ASSERT(g_bserrno == 0);
5824 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5825 	snapshotid0 = g_blobid;
5826 
5827 	/* 3. Create snapshot1 */
5828 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5829 	poll_threads();
5830 	CU_ASSERT(g_bserrno == 0);
5831 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5832 	snapshotid1 = g_blobid;
5833 
5834 	/* 4. Create snapshot2 */
5835 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5836 	poll_threads();
5837 	CU_ASSERT(g_bserrno == 0);
5838 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5839 	snapshotid2 = g_blobid;
5840 
5841 	/* 5. Decouple blob */
5842 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5843 	poll_threads();
5844 	CU_ASSERT(g_bserrno == 0);
5845 
5846 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
5847 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
5848 	poll_threads();
5849 	CU_ASSERT(g_bserrno == 0);
5850 
5851 	/* 7. Delete blob */
5852 	spdk_blob_close(blob, blob_op_complete, NULL);
5853 	poll_threads();
5854 	CU_ASSERT(g_bserrno == 0);
5855 
5856 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5857 	poll_threads();
5858 	CU_ASSERT(g_bserrno == 0);
5859 
5860 	/* 8. Delete snapshot2.
5861 	 * If md of snapshot 2 was updated, it should be possible to delete it */
5862 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5863 	poll_threads();
5864 	CU_ASSERT(g_bserrno == 0);
5865 
5866 	/* Remove remaining blobs and unload bs */
5867 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
5868 	poll_threads();
5869 	CU_ASSERT(g_bserrno == 0);
5870 
5871 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
5872 	poll_threads();
5873 	CU_ASSERT(g_bserrno == 0);
5874 
5875 	spdk_bs_free_io_channel(channel);
5876 	poll_threads();
5877 
5878 	spdk_bs_unload(bs, bs_op_complete, NULL);
5879 	poll_threads();
5880 	CU_ASSERT(g_bserrno == 0);
5881 
5882 	g_bs = NULL;
5883 }
5884 
5885 static void
5886 blobstore_clean_power_failure(void)
5887 {
5888 	struct spdk_blob_store *bs;
5889 	struct spdk_blob *blob;
5890 	struct spdk_power_failure_thresholds thresholds = {};
5891 	bool clean = false;
5892 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5893 	struct spdk_bs_super_block super_copy = {};
5894 
5895 	thresholds.general_threshold = 1;
5896 	while (!clean) {
5897 		/* Create bs and blob */
5898 		suite_blob_setup();
5899 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5900 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5901 		bs = g_bs;
5902 		blob = g_blob;
5903 
5904 		/* Super block should not change for rest of the UT,
5905 		 * save it and compare later. */
5906 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5907 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5908 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5909 
5910 		/* Force bs/super block in a clean state.
5911 		 * Along with marking blob dirty, to cause blob persist. */
5912 		blob->state = SPDK_BLOB_STATE_DIRTY;
5913 		bs->clean = 1;
5914 		super->clean = 1;
5915 		super->crc = blob_md_page_calc_crc(super);
5916 
5917 		g_bserrno = -1;
5918 		dev_set_power_failure_thresholds(thresholds);
5919 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5920 		poll_threads();
5921 		dev_reset_power_failure_event();
5922 
5923 		if (g_bserrno == 0) {
5924 			/* After successful md sync, both bs and super block
5925 			 * should be marked as not clean. */
5926 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5927 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5928 			clean = true;
5929 		}
5930 
5931 		/* Depending on the point of failure, super block was either updated or not. */
5932 		super_copy.clean = super->clean;
5933 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5934 		/* Compare that the values in super block remained unchanged. */
5935 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5936 
5937 		/* Delete blob and unload bs */
5938 		suite_blob_cleanup();
5939 
5940 		thresholds.general_threshold++;
5941 	}
5942 }
5943 
5944 static void
5945 blob_delete_snapshot_power_failure(void)
5946 {
5947 	struct spdk_bs_dev *dev;
5948 	struct spdk_blob_store *bs;
5949 	struct spdk_blob_opts opts;
5950 	struct spdk_blob *blob, *snapshot;
5951 	struct spdk_power_failure_thresholds thresholds = {};
5952 	spdk_blob_id blobid, snapshotid;
5953 	const void *value;
5954 	size_t value_len;
5955 	size_t count;
5956 	spdk_blob_id ids[3] = {};
5957 	int rc;
5958 	bool deleted = false;
5959 	int delete_snapshot_bserrno = -1;
5960 
5961 	thresholds.general_threshold = 1;
5962 	while (!deleted) {
5963 		dev = init_dev();
5964 
5965 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5966 		poll_threads();
5967 		CU_ASSERT(g_bserrno == 0);
5968 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5969 		bs = g_bs;
5970 
5971 		/* Create blob */
5972 		ut_spdk_blob_opts_init(&opts);
5973 		opts.num_clusters = 10;
5974 
5975 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5976 		poll_threads();
5977 		CU_ASSERT(g_bserrno == 0);
5978 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5979 		blobid = g_blobid;
5980 
5981 		/* Create snapshot */
5982 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5983 		poll_threads();
5984 		CU_ASSERT(g_bserrno == 0);
5985 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5986 		snapshotid = g_blobid;
5987 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5988 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5989 
5990 		dev_set_power_failure_thresholds(thresholds);
5991 
5992 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5993 		poll_threads();
5994 		delete_snapshot_bserrno = g_bserrno;
5995 
5996 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
5997 		 * reports success, changes to both blobs should already persisted. */
5998 		dev_reset_power_failure_event();
5999 		ut_bs_dirty_load(&bs, NULL);
6000 
6001 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6002 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6003 
6004 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6005 		poll_threads();
6006 		CU_ASSERT(g_bserrno == 0);
6007 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6008 		blob = g_blob;
6009 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6010 
6011 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6012 		poll_threads();
6013 
6014 		if (g_bserrno == 0) {
6015 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6016 			snapshot = g_blob;
6017 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6018 			count = SPDK_COUNTOF(ids);
6019 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6020 			CU_ASSERT(rc == 0);
6021 			CU_ASSERT(count == 1);
6022 			CU_ASSERT(ids[0] == blobid);
6023 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6024 			CU_ASSERT(rc != 0);
6025 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6026 
6027 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6028 			poll_threads();
6029 			CU_ASSERT(g_bserrno == 0);
6030 		} else {
6031 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6032 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6033 			 * Yet delete might perform further changes to the clone after that.
6034 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6035 			if (delete_snapshot_bserrno == 0) {
6036 				deleted = true;
6037 			}
6038 		}
6039 
6040 		spdk_blob_close(blob, blob_op_complete, NULL);
6041 		poll_threads();
6042 		CU_ASSERT(g_bserrno == 0);
6043 
6044 		spdk_bs_unload(bs, bs_op_complete, NULL);
6045 		poll_threads();
6046 		CU_ASSERT(g_bserrno == 0);
6047 
6048 		thresholds.general_threshold++;
6049 	}
6050 }
6051 
6052 static void
6053 blob_create_snapshot_power_failure(void)
6054 {
6055 	struct spdk_blob_store *bs = g_bs;
6056 	struct spdk_bs_dev *dev;
6057 	struct spdk_blob_opts opts;
6058 	struct spdk_blob *blob, *snapshot;
6059 	struct spdk_power_failure_thresholds thresholds = {};
6060 	spdk_blob_id blobid, snapshotid;
6061 	const void *value;
6062 	size_t value_len;
6063 	size_t count;
6064 	spdk_blob_id ids[3] = {};
6065 	int rc;
6066 	bool created = false;
6067 	int create_snapshot_bserrno = -1;
6068 
6069 	thresholds.general_threshold = 1;
6070 	while (!created) {
6071 		dev = init_dev();
6072 
6073 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6074 		poll_threads();
6075 		CU_ASSERT(g_bserrno == 0);
6076 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6077 		bs = g_bs;
6078 
6079 		/* Create blob */
6080 		ut_spdk_blob_opts_init(&opts);
6081 		opts.num_clusters = 10;
6082 
6083 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6084 		poll_threads();
6085 		CU_ASSERT(g_bserrno == 0);
6086 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6087 		blobid = g_blobid;
6088 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6089 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6090 
6091 		dev_set_power_failure_thresholds(thresholds);
6092 
6093 		/* Create snapshot */
6094 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6095 		poll_threads();
6096 		create_snapshot_bserrno = g_bserrno;
6097 		snapshotid = g_blobid;
6098 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6099 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6100 
6101 		/* Do not shut down cleanly. Assumption is that after create snapshot
6102 		 * reports success, both blobs should be power-fail safe. */
6103 		dev_reset_power_failure_event();
6104 		ut_bs_dirty_load(&bs, NULL);
6105 
6106 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6107 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6108 
6109 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6110 		poll_threads();
6111 		CU_ASSERT(g_bserrno == 0);
6112 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6113 		blob = g_blob;
6114 
6115 		if (snapshotid != SPDK_BLOBID_INVALID) {
6116 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6117 			poll_threads();
6118 		}
6119 
6120 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6121 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6122 			snapshot = g_blob;
6123 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6124 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6125 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6126 			count = SPDK_COUNTOF(ids);
6127 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6128 			CU_ASSERT(rc == 0);
6129 			CU_ASSERT(count == 1);
6130 			CU_ASSERT(ids[0] == blobid);
6131 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6132 			CU_ASSERT(rc != 0);
6133 
6134 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6135 			poll_threads();
6136 			CU_ASSERT(g_bserrno == 0);
6137 			if (create_snapshot_bserrno == 0) {
6138 				created = true;
6139 			}
6140 		} else {
6141 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6142 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6143 		}
6144 
6145 		spdk_blob_close(blob, blob_op_complete, NULL);
6146 		poll_threads();
6147 		CU_ASSERT(g_bserrno == 0);
6148 
6149 		spdk_bs_unload(bs, bs_op_complete, NULL);
6150 		poll_threads();
6151 		CU_ASSERT(g_bserrno == 0);
6152 
6153 		thresholds.general_threshold++;
6154 	}
6155 }
6156 
6157 static void
6158 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6159 {
6160 	uint8_t payload_ff[64 * 512];
6161 	uint8_t payload_aa[64 * 512];
6162 	uint8_t payload_00[64 * 512];
6163 	uint8_t *cluster0, *cluster1;
6164 
6165 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6166 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6167 	memset(payload_00, 0x00, sizeof(payload_00));
6168 
6169 	/* Try to perform I/O with io unit = 512 */
6170 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6171 	poll_threads();
6172 	CU_ASSERT(g_bserrno == 0);
6173 
6174 	/* If thin provisioned is set cluster should be allocated now */
6175 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6176 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6177 
6178 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6179 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6180 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6181 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6182 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6183 
6184 	/* Verify write with offset on first page */
6185 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6186 	poll_threads();
6187 	CU_ASSERT(g_bserrno == 0);
6188 
6189 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6190 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6191 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6192 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6193 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6194 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6195 
6196 	/* Verify write with offset on first page */
6197 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6198 	poll_threads();
6199 
6200 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6201 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6202 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6203 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6204 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6205 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6206 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6207 
6208 	/* Verify write with offset on second page */
6209 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6210 	poll_threads();
6211 
6212 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6213 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6214 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6215 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6216 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6217 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6218 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6219 
6220 	/* Verify write across multiple pages */
6221 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6222 	poll_threads();
6223 
6224 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6225 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6226 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6227 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6228 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6229 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6230 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6231 
6232 	/* Verify write across multiple clusters */
6233 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6234 	poll_threads();
6235 
6236 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6237 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6238 
6239 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6240 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6241 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6242 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6243 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6244 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6245 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6246 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6247 
6248 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6249 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6250 
6251 	/* Verify write to second cluster */
6252 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6253 	poll_threads();
6254 
6255 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6256 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6257 
6258 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6259 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6260 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6261 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6262 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6263 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6264 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6265 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6266 
6267 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6268 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6269 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6270 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6271 }
6272 
6273 static void
6274 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6275 {
6276 	uint8_t payload_read[64 * 512];
6277 	uint8_t payload_ff[64 * 512];
6278 	uint8_t payload_aa[64 * 512];
6279 	uint8_t payload_00[64 * 512];
6280 
6281 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6282 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6283 	memset(payload_00, 0x00, sizeof(payload_00));
6284 
6285 	/* Read only first io unit */
6286 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6287 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6288 	 * payload_read: F000 0000 | 0000 0000 ... */
6289 	memset(payload_read, 0x00, sizeof(payload_read));
6290 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6291 	poll_threads();
6292 	CU_ASSERT(g_bserrno == 0);
6293 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6294 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6295 
6296 	/* Read four io_units starting from offset = 2
6297 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6298 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6299 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6300 
6301 	memset(payload_read, 0x00, sizeof(payload_read));
6302 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6303 	poll_threads();
6304 	CU_ASSERT(g_bserrno == 0);
6305 
6306 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6307 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6308 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6309 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6310 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6311 
6312 	/* Read eight io_units across multiple pages
6313 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6314 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6315 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6316 	memset(payload_read, 0x00, sizeof(payload_read));
6317 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6318 	poll_threads();
6319 	CU_ASSERT(g_bserrno == 0);
6320 
6321 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6322 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6323 
6324 	/* Read eight io_units across multiple clusters
6325 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6326 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6327 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6328 	memset(payload_read, 0x00, sizeof(payload_read));
6329 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6330 	poll_threads();
6331 	CU_ASSERT(g_bserrno == 0);
6332 
6333 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6334 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6335 
6336 	/* Read four io_units from second cluster
6337 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6338 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6339 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6340 	memset(payload_read, 0x00, sizeof(payload_read));
6341 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6342 	poll_threads();
6343 	CU_ASSERT(g_bserrno == 0);
6344 
6345 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6346 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6347 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6348 
6349 	/* Read second cluster
6350 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6351 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6352 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6353 	memset(payload_read, 0x00, sizeof(payload_read));
6354 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6355 	poll_threads();
6356 	CU_ASSERT(g_bserrno == 0);
6357 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6358 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6359 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6360 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6361 
6362 	/* Read whole two clusters
6363 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6364 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6365 	memset(payload_read, 0x00, sizeof(payload_read));
6366 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6367 	poll_threads();
6368 	CU_ASSERT(g_bserrno == 0);
6369 
6370 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6371 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6372 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6373 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6374 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6375 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6376 
6377 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6378 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6379 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6380 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6381 }
6382 
6383 
6384 static void
6385 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6386 {
6387 	uint8_t payload_ff[64 * 512];
6388 	uint8_t payload_aa[64 * 512];
6389 	uint8_t payload_00[64 * 512];
6390 	uint8_t *cluster0, *cluster1;
6391 
6392 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6393 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6394 	memset(payload_00, 0x00, sizeof(payload_00));
6395 
6396 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6397 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6398 
6399 	/* Unmap */
6400 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6401 	poll_threads();
6402 
6403 	CU_ASSERT(g_bserrno == 0);
6404 
6405 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6406 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6407 }
6408 
6409 static void
6410 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6411 {
6412 	uint8_t payload_ff[64 * 512];
6413 	uint8_t payload_aa[64 * 512];
6414 	uint8_t payload_00[64 * 512];
6415 	uint8_t *cluster0, *cluster1;
6416 
6417 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6418 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6419 	memset(payload_00, 0x00, sizeof(payload_00));
6420 
6421 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6422 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6423 
6424 	/* Write zeroes  */
6425 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6426 	poll_threads();
6427 
6428 	CU_ASSERT(g_bserrno == 0);
6429 
6430 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6431 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6432 }
6433 
6434 static inline void
6435 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6436 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6437 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6438 {
6439 	if (io_opts) {
6440 		g_dev_writev_ext_called = false;
6441 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6442 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
6443 					io_opts);
6444 	} else {
6445 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6446 	}
6447 	poll_threads();
6448 	CU_ASSERT(g_bserrno == 0);
6449 	if (io_opts) {
6450 		CU_ASSERT(g_dev_writev_ext_called);
6451 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6452 	}
6453 }
6454 
6455 static void
6456 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6457 	       bool ext_api)
6458 {
6459 	uint8_t payload_ff[64 * 512];
6460 	uint8_t payload_aa[64 * 512];
6461 	uint8_t payload_00[64 * 512];
6462 	uint8_t *cluster0, *cluster1;
6463 	struct iovec iov[4];
6464 	struct spdk_blob_ext_io_opts ext_opts = {
6465 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6466 		.memory_domain_ctx = (void *)0xf00df00d,
6467 		.size = sizeof(struct spdk_blob_ext_io_opts),
6468 		.user_ctx = (void *)123,
6469 	};
6470 
6471 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6472 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6473 	memset(payload_00, 0x00, sizeof(payload_00));
6474 
6475 	/* Try to perform I/O with io unit = 512 */
6476 	iov[0].iov_base = payload_ff;
6477 	iov[0].iov_len = 1 * 512;
6478 
6479 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
6480 			    ext_api ? &ext_opts : NULL);
6481 
6482 	/* If thin provisioned is set cluster should be allocated now */
6483 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6484 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6485 
6486 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6487 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6488 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6489 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6490 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6491 
6492 	/* Verify write with offset on first page */
6493 	iov[0].iov_base = payload_ff;
6494 	iov[0].iov_len = 1 * 512;
6495 
6496 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
6497 			    ext_api ? &ext_opts : NULL);
6498 
6499 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6500 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6501 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6502 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6503 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6504 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6505 
6506 	/* Verify write with offset on first page */
6507 	iov[0].iov_base = payload_ff;
6508 	iov[0].iov_len = 4 * 512;
6509 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6510 	poll_threads();
6511 
6512 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6513 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6514 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6515 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6516 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6517 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6518 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6519 
6520 	/* Verify write with offset on second page */
6521 	iov[0].iov_base = payload_ff;
6522 	iov[0].iov_len = 4 * 512;
6523 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6524 	poll_threads();
6525 
6526 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6527 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6528 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6529 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6530 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6531 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6532 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6533 
6534 	/* Verify write across multiple pages */
6535 	iov[0].iov_base = payload_aa;
6536 	iov[0].iov_len = 8 * 512;
6537 
6538 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
6539 			    ext_api ? &ext_opts : NULL);
6540 
6541 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6542 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6543 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6544 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6545 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6546 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6547 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6548 
6549 	/* Verify write across multiple clusters */
6550 
6551 	iov[0].iov_base = payload_ff;
6552 	iov[0].iov_len = 8 * 512;
6553 
6554 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
6555 			    ext_api ? &ext_opts : NULL);
6556 
6557 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6558 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6559 
6560 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6561 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6562 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6563 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6564 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6565 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6566 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6567 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6568 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6569 
6570 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6571 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6572 
6573 	/* Verify write to second cluster */
6574 
6575 	iov[0].iov_base = payload_ff;
6576 	iov[0].iov_len = 2 * 512;
6577 
6578 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
6579 			    ext_api ? &ext_opts : NULL);
6580 
6581 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6582 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6583 
6584 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6585 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6586 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6587 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6588 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6589 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6590 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6591 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6592 
6593 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6594 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6595 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6596 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6597 }
6598 
6599 static inline void
6600 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6601 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6602 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6603 {
6604 	if (io_opts) {
6605 		g_dev_readv_ext_called = false;
6606 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6607 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
6608 	} else {
6609 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6610 	}
6611 	poll_threads();
6612 	CU_ASSERT(g_bserrno == 0);
6613 	if (io_opts) {
6614 		CU_ASSERT(g_dev_readv_ext_called);
6615 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6616 	}
6617 }
6618 
6619 static void
6620 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6621 	      bool ext_api)
6622 {
6623 	uint8_t payload_read[64 * 512];
6624 	uint8_t payload_ff[64 * 512];
6625 	uint8_t payload_aa[64 * 512];
6626 	uint8_t payload_00[64 * 512];
6627 	struct iovec iov[4];
6628 	struct spdk_blob_ext_io_opts ext_opts = {
6629 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6630 		.memory_domain_ctx = (void *)0xf00df00d,
6631 		.size = sizeof(struct spdk_blob_ext_io_opts),
6632 		.user_ctx = (void *)123,
6633 	};
6634 
6635 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6636 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6637 	memset(payload_00, 0x00, sizeof(payload_00));
6638 
6639 	/* Read only first io unit */
6640 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6641 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6642 	 * payload_read: F000 0000 | 0000 0000 ... */
6643 	memset(payload_read, 0x00, sizeof(payload_read));
6644 	iov[0].iov_base = payload_read;
6645 	iov[0].iov_len = 1 * 512;
6646 
6647 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6648 
6649 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6650 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6651 
6652 	/* Read four io_units starting from offset = 2
6653 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6654 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6655 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6656 
6657 	memset(payload_read, 0x00, sizeof(payload_read));
6658 	iov[0].iov_base = payload_read;
6659 	iov[0].iov_len = 4 * 512;
6660 
6661 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6662 
6663 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6664 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6665 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6666 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6667 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6668 
6669 	/* Read eight io_units across multiple pages
6670 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6671 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6672 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6673 	memset(payload_read, 0x00, sizeof(payload_read));
6674 	iov[0].iov_base = payload_read;
6675 	iov[0].iov_len = 4 * 512;
6676 	iov[1].iov_base = payload_read + 4 * 512;
6677 	iov[1].iov_len = 4 * 512;
6678 
6679 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6680 
6681 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6682 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6683 
6684 	/* Read eight io_units across multiple clusters
6685 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6686 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6687 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6688 	memset(payload_read, 0x00, sizeof(payload_read));
6689 	iov[0].iov_base = payload_read;
6690 	iov[0].iov_len = 2 * 512;
6691 	iov[1].iov_base = payload_read + 2 * 512;
6692 	iov[1].iov_len = 2 * 512;
6693 	iov[2].iov_base = payload_read + 4 * 512;
6694 	iov[2].iov_len = 2 * 512;
6695 	iov[3].iov_base = payload_read + 6 * 512;
6696 	iov[3].iov_len = 2 * 512;
6697 
6698 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
6699 			   ext_api ? &ext_opts : NULL);
6700 
6701 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6702 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6703 
6704 	/* Read four io_units from second cluster
6705 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6706 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6707 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6708 	memset(payload_read, 0x00, sizeof(payload_read));
6709 	iov[0].iov_base = payload_read;
6710 	iov[0].iov_len = 1 * 512;
6711 	iov[1].iov_base = payload_read + 1 * 512;
6712 	iov[1].iov_len = 3 * 512;
6713 
6714 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
6715 			   ext_api ? &ext_opts : NULL);
6716 
6717 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6718 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6719 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6720 
6721 	/* Read second cluster
6722 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6723 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6724 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6725 	memset(payload_read, 0x00, sizeof(payload_read));
6726 	iov[0].iov_base = payload_read;
6727 	iov[0].iov_len = 1 * 512;
6728 	iov[1].iov_base = payload_read + 1 * 512;
6729 	iov[1].iov_len = 2 * 512;
6730 	iov[2].iov_base = payload_read + 3 * 512;
6731 	iov[2].iov_len = 4 * 512;
6732 	iov[3].iov_base = payload_read + 7 * 512;
6733 	iov[3].iov_len = 25 * 512;
6734 
6735 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
6736 			   ext_api ? &ext_opts : NULL);
6737 
6738 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6739 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6740 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6741 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6742 
6743 	/* Read whole two clusters
6744 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6745 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6746 	memset(payload_read, 0x00, sizeof(payload_read));
6747 	iov[0].iov_base = payload_read;
6748 	iov[0].iov_len = 1 * 512;
6749 	iov[1].iov_base = payload_read + 1 * 512;
6750 	iov[1].iov_len = 8 * 512;
6751 	iov[2].iov_base = payload_read + 9 * 512;
6752 	iov[2].iov_len = 16 * 512;
6753 	iov[3].iov_base = payload_read + 25 * 512;
6754 	iov[3].iov_len = 39 * 512;
6755 
6756 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
6757 			   ext_api ? &ext_opts : NULL);
6758 
6759 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6760 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6761 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6762 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6763 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6764 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6765 
6766 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6767 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6768 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6769 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6770 }
6771 
6772 static void
6773 blob_io_unit(void)
6774 {
6775 	struct spdk_bs_opts bsopts;
6776 	struct spdk_blob_opts opts;
6777 	struct spdk_blob_store *bs;
6778 	struct spdk_bs_dev *dev;
6779 	struct spdk_blob *blob, *snapshot, *clone;
6780 	spdk_blob_id blobid;
6781 	struct spdk_io_channel *channel;
6782 
6783 	/* Create dev with 512 bytes io unit size */
6784 
6785 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6786 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6787 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6788 
6789 	/* Try to initialize a new blob store with unsupported io_unit */
6790 	dev = init_dev();
6791 	dev->blocklen = 512;
6792 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6793 
6794 	/* Initialize a new blob store */
6795 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6796 	poll_threads();
6797 	CU_ASSERT(g_bserrno == 0);
6798 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6799 	bs = g_bs;
6800 
6801 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6802 	channel = spdk_bs_alloc_io_channel(bs);
6803 
6804 	/* Create thick provisioned blob */
6805 	ut_spdk_blob_opts_init(&opts);
6806 	opts.thin_provision = false;
6807 	opts.num_clusters = 32;
6808 
6809 	blob = ut_blob_create_and_open(bs, &opts);
6810 	blobid = spdk_blob_get_id(blob);
6811 
6812 	test_io_write(dev, blob, channel);
6813 	test_io_read(dev, blob, channel);
6814 	test_io_zeroes(dev, blob, channel);
6815 
6816 	test_iov_write(dev, blob, channel, false);
6817 	test_iov_read(dev, blob, channel, false);
6818 	test_io_zeroes(dev, blob, channel);
6819 
6820 	test_iov_write(dev, blob, channel, true);
6821 	test_iov_read(dev, blob, channel, true);
6822 
6823 	test_io_unmap(dev, blob, channel);
6824 
6825 	spdk_blob_close(blob, blob_op_complete, NULL);
6826 	poll_threads();
6827 	CU_ASSERT(g_bserrno == 0);
6828 	blob = NULL;
6829 	g_blob = NULL;
6830 
6831 	/* Create thin provisioned blob */
6832 
6833 	ut_spdk_blob_opts_init(&opts);
6834 	opts.thin_provision = true;
6835 	opts.num_clusters = 32;
6836 
6837 	blob = ut_blob_create_and_open(bs, &opts);
6838 	blobid = spdk_blob_get_id(blob);
6839 
6840 	test_io_write(dev, blob, channel);
6841 	test_io_read(dev, blob, channel);
6842 	test_io_zeroes(dev, blob, channel);
6843 
6844 	test_iov_write(dev, blob, channel, false);
6845 	test_iov_read(dev, blob, channel, false);
6846 	test_io_zeroes(dev, blob, channel);
6847 
6848 	test_iov_write(dev, blob, channel, true);
6849 	test_iov_read(dev, blob, channel, true);
6850 
6851 	/* Create snapshot */
6852 
6853 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6854 	poll_threads();
6855 	CU_ASSERT(g_bserrno == 0);
6856 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6857 	blobid = g_blobid;
6858 
6859 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6860 	poll_threads();
6861 	CU_ASSERT(g_bserrno == 0);
6862 	CU_ASSERT(g_blob != NULL);
6863 	snapshot = g_blob;
6864 
6865 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6866 	poll_threads();
6867 	CU_ASSERT(g_bserrno == 0);
6868 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6869 	blobid = g_blobid;
6870 
6871 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6872 	poll_threads();
6873 	CU_ASSERT(g_bserrno == 0);
6874 	CU_ASSERT(g_blob != NULL);
6875 	clone = g_blob;
6876 
6877 	test_io_read(dev, blob, channel);
6878 	test_io_read(dev, snapshot, channel);
6879 	test_io_read(dev, clone, channel);
6880 
6881 	test_iov_read(dev, blob, channel, false);
6882 	test_iov_read(dev, snapshot, channel, false);
6883 	test_iov_read(dev, clone, channel, false);
6884 
6885 	test_iov_read(dev, blob, channel, true);
6886 	test_iov_read(dev, snapshot, channel, true);
6887 	test_iov_read(dev, clone, channel, true);
6888 
6889 	/* Inflate clone */
6890 
6891 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6892 	poll_threads();
6893 
6894 	CU_ASSERT(g_bserrno == 0);
6895 
6896 	test_io_read(dev, clone, channel);
6897 
6898 	test_io_unmap(dev, clone, channel);
6899 
6900 	test_iov_write(dev, clone, channel, false);
6901 	test_iov_read(dev, clone, channel, false);
6902 	test_io_unmap(dev, clone, channel);
6903 
6904 	test_iov_write(dev, clone, channel, true);
6905 	test_iov_read(dev, clone, channel, true);
6906 
6907 	spdk_blob_close(blob, blob_op_complete, NULL);
6908 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6909 	spdk_blob_close(clone, blob_op_complete, NULL);
6910 	poll_threads();
6911 	CU_ASSERT(g_bserrno == 0);
6912 	blob = NULL;
6913 	g_blob = NULL;
6914 
6915 	spdk_bs_free_io_channel(channel);
6916 	poll_threads();
6917 
6918 	/* Unload the blob store */
6919 	spdk_bs_unload(bs, bs_op_complete, NULL);
6920 	poll_threads();
6921 	CU_ASSERT(g_bserrno == 0);
6922 	g_bs = NULL;
6923 	g_blob = NULL;
6924 	g_blobid = 0;
6925 }
6926 
6927 static void
6928 blob_io_unit_compatibility(void)
6929 {
6930 	struct spdk_bs_opts bsopts;
6931 	struct spdk_blob_store *bs;
6932 	struct spdk_bs_dev *dev;
6933 	struct spdk_bs_super_block *super;
6934 
6935 	/* Create dev with 512 bytes io unit size */
6936 
6937 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6938 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6939 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6940 
6941 	/* Try to initialize a new blob store with unsupported io_unit */
6942 	dev = init_dev();
6943 	dev->blocklen = 512;
6944 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6945 
6946 	/* Initialize a new blob store */
6947 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6948 	poll_threads();
6949 	CU_ASSERT(g_bserrno == 0);
6950 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6951 	bs = g_bs;
6952 
6953 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6954 
6955 	/* Unload the blob store */
6956 	spdk_bs_unload(bs, bs_op_complete, NULL);
6957 	poll_threads();
6958 	CU_ASSERT(g_bserrno == 0);
6959 
6960 	/* Modify super block to behave like older version.
6961 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6962 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6963 	super->io_unit_size = 0;
6964 	super->crc = blob_md_page_calc_crc(super);
6965 
6966 	dev = init_dev();
6967 	dev->blocklen = 512;
6968 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6969 
6970 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6971 	poll_threads();
6972 	CU_ASSERT(g_bserrno == 0);
6973 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6974 	bs = g_bs;
6975 
6976 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6977 
6978 	/* Unload the blob store */
6979 	spdk_bs_unload(bs, bs_op_complete, NULL);
6980 	poll_threads();
6981 	CU_ASSERT(g_bserrno == 0);
6982 
6983 	g_bs = NULL;
6984 	g_blob = NULL;
6985 	g_blobid = 0;
6986 }
6987 
6988 static void
6989 first_sync_complete(void *cb_arg, int bserrno)
6990 {
6991 	struct spdk_blob *blob = cb_arg;
6992 	int rc;
6993 
6994 	CU_ASSERT(bserrno == 0);
6995 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
6996 	CU_ASSERT(rc == 0);
6997 	CU_ASSERT(g_bserrno == -1);
6998 
6999 	/* Keep g_bserrno at -1, only the
7000 	 * second sync completion should set it at 0. */
7001 }
7002 
7003 static void
7004 second_sync_complete(void *cb_arg, int bserrno)
7005 {
7006 	struct spdk_blob *blob = cb_arg;
7007 	const void *value;
7008 	size_t value_len;
7009 	int rc;
7010 
7011 	CU_ASSERT(bserrno == 0);
7012 
7013 	/* Verify that the first sync completion had a chance to execute */
7014 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7015 	CU_ASSERT(rc == 0);
7016 	SPDK_CU_ASSERT_FATAL(value != NULL);
7017 	CU_ASSERT(value_len == strlen("second") + 1);
7018 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7019 
7020 	CU_ASSERT(g_bserrno == -1);
7021 	g_bserrno = bserrno;
7022 }
7023 
7024 static void
7025 blob_simultaneous_operations(void)
7026 {
7027 	struct spdk_blob_store *bs = g_bs;
7028 	struct spdk_blob_opts opts;
7029 	struct spdk_blob *blob, *snapshot;
7030 	spdk_blob_id blobid, snapshotid;
7031 	struct spdk_io_channel *channel;
7032 	int rc;
7033 
7034 	channel = spdk_bs_alloc_io_channel(bs);
7035 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7036 
7037 	ut_spdk_blob_opts_init(&opts);
7038 	opts.num_clusters = 10;
7039 
7040 	blob = ut_blob_create_and_open(bs, &opts);
7041 	blobid = spdk_blob_get_id(blob);
7042 
7043 	/* Create snapshot and try to remove blob in the same time:
7044 	 * - snapshot should be created successfully
7045 	 * - delete operation should fail w -EBUSY */
7046 	CU_ASSERT(blob->locked_operation_in_progress == false);
7047 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7048 	CU_ASSERT(blob->locked_operation_in_progress == true);
7049 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7050 	CU_ASSERT(blob->locked_operation_in_progress == true);
7051 	/* Deletion failure */
7052 	CU_ASSERT(g_bserrno == -EBUSY);
7053 	poll_threads();
7054 	CU_ASSERT(blob->locked_operation_in_progress == false);
7055 	/* Snapshot creation success */
7056 	CU_ASSERT(g_bserrno == 0);
7057 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7058 
7059 	snapshotid = g_blobid;
7060 
7061 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7062 	poll_threads();
7063 	CU_ASSERT(g_bserrno == 0);
7064 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7065 	snapshot = g_blob;
7066 
7067 	/* Inflate blob and try to remove blob in the same time:
7068 	 * - blob should be inflated successfully
7069 	 * - delete operation should fail w -EBUSY */
7070 	CU_ASSERT(blob->locked_operation_in_progress == false);
7071 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7072 	CU_ASSERT(blob->locked_operation_in_progress == true);
7073 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7074 	CU_ASSERT(blob->locked_operation_in_progress == true);
7075 	/* Deletion failure */
7076 	CU_ASSERT(g_bserrno == -EBUSY);
7077 	poll_threads();
7078 	CU_ASSERT(blob->locked_operation_in_progress == false);
7079 	/* Inflation success */
7080 	CU_ASSERT(g_bserrno == 0);
7081 
7082 	/* Clone snapshot and try to remove snapshot in the same time:
7083 	 * - snapshot should be cloned successfully
7084 	 * - delete operation should fail w -EBUSY */
7085 	CU_ASSERT(blob->locked_operation_in_progress == false);
7086 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7087 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7088 	/* Deletion failure */
7089 	CU_ASSERT(g_bserrno == -EBUSY);
7090 	poll_threads();
7091 	CU_ASSERT(blob->locked_operation_in_progress == false);
7092 	/* Clone created */
7093 	CU_ASSERT(g_bserrno == 0);
7094 
7095 	/* Resize blob and try to remove blob in the same time:
7096 	 * - blob should be resized successfully
7097 	 * - delete operation should fail w -EBUSY */
7098 	CU_ASSERT(blob->locked_operation_in_progress == false);
7099 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7100 	CU_ASSERT(blob->locked_operation_in_progress == true);
7101 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7102 	CU_ASSERT(blob->locked_operation_in_progress == true);
7103 	/* Deletion failure */
7104 	CU_ASSERT(g_bserrno == -EBUSY);
7105 	poll_threads();
7106 	CU_ASSERT(blob->locked_operation_in_progress == false);
7107 	/* Blob resized successfully */
7108 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7109 	poll_threads();
7110 	CU_ASSERT(g_bserrno == 0);
7111 
7112 	/* Issue two consecutive blob syncs, neither should fail.
7113 	 * Force sync to actually occur by marking blob dirty each time.
7114 	 * Execution of sync should not be enough to complete the operation,
7115 	 * since disk I/O is required to complete it. */
7116 	g_bserrno = -1;
7117 
7118 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7119 	CU_ASSERT(rc == 0);
7120 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7121 	CU_ASSERT(g_bserrno == -1);
7122 
7123 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7124 	CU_ASSERT(g_bserrno == -1);
7125 
7126 	poll_threads();
7127 	CU_ASSERT(g_bserrno == 0);
7128 
7129 	spdk_bs_free_io_channel(channel);
7130 	poll_threads();
7131 
7132 	ut_blob_close_and_delete(bs, snapshot);
7133 	ut_blob_close_and_delete(bs, blob);
7134 }
7135 
7136 static void
7137 blob_persist_test(void)
7138 {
7139 	struct spdk_blob_store *bs = g_bs;
7140 	struct spdk_blob_opts opts;
7141 	struct spdk_blob *blob;
7142 	spdk_blob_id blobid;
7143 	struct spdk_io_channel *channel;
7144 	char *xattr;
7145 	size_t xattr_length;
7146 	int rc;
7147 	uint32_t page_count_clear, page_count_xattr;
7148 	uint64_t poller_iterations;
7149 	bool run_poller;
7150 
7151 	channel = spdk_bs_alloc_io_channel(bs);
7152 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7153 
7154 	ut_spdk_blob_opts_init(&opts);
7155 	opts.num_clusters = 10;
7156 
7157 	blob = ut_blob_create_and_open(bs, &opts);
7158 	blobid = spdk_blob_get_id(blob);
7159 
7160 	/* Save the amount of md pages used after creation of a blob.
7161 	 * This should be consistent after removing xattr. */
7162 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7163 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7164 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7165 
7166 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7167 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7168 		       strlen("large_xattr");
7169 	xattr = calloc(xattr_length, sizeof(char));
7170 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7171 
7172 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7173 	SPDK_CU_ASSERT_FATAL(rc == 0);
7174 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7175 	poll_threads();
7176 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7177 
7178 	/* Save the amount of md pages used after adding the large xattr */
7179 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7180 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7181 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7182 
7183 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7184 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7185 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7186 	poller_iterations = 1;
7187 	run_poller = true;
7188 	while (run_poller) {
7189 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7190 		SPDK_CU_ASSERT_FATAL(rc == 0);
7191 		g_bserrno = -1;
7192 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7193 		poll_thread_times(0, poller_iterations);
7194 		if (g_bserrno == 0) {
7195 			/* Poller iteration count was high enough for first sync to complete.
7196 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7197 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7198 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7199 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7200 			run_poller = false;
7201 		}
7202 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7203 		SPDK_CU_ASSERT_FATAL(rc == 0);
7204 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7205 		poll_threads();
7206 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7207 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7208 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7209 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7210 
7211 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7212 		spdk_blob_close(blob, blob_op_complete, NULL);
7213 		poll_threads();
7214 		CU_ASSERT(g_bserrno == 0);
7215 
7216 		ut_bs_reload(&bs, NULL);
7217 
7218 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7219 		poll_threads();
7220 		CU_ASSERT(g_bserrno == 0);
7221 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7222 		blob = g_blob;
7223 
7224 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7225 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7226 
7227 		poller_iterations++;
7228 		/* Stop at high iteration count to prevent infinite loop.
7229 		 * This value should be enough for first md sync to complete in any case. */
7230 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7231 	}
7232 
7233 	free(xattr);
7234 
7235 	ut_blob_close_and_delete(bs, blob);
7236 
7237 	spdk_bs_free_io_channel(channel);
7238 	poll_threads();
7239 }
7240 
7241 static void
7242 blob_decouple_snapshot(void)
7243 {
7244 	struct spdk_blob_store *bs = g_bs;
7245 	struct spdk_blob_opts opts;
7246 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7247 	struct spdk_io_channel *channel;
7248 	spdk_blob_id blobid, snapshotid;
7249 	uint64_t cluster;
7250 
7251 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7252 		channel = spdk_bs_alloc_io_channel(bs);
7253 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7254 
7255 		ut_spdk_blob_opts_init(&opts);
7256 		opts.num_clusters = 10;
7257 		opts.thin_provision = false;
7258 
7259 		blob = ut_blob_create_and_open(bs, &opts);
7260 		blobid = spdk_blob_get_id(blob);
7261 
7262 		/* Create first snapshot */
7263 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7264 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7265 		poll_threads();
7266 		CU_ASSERT(g_bserrno == 0);
7267 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7268 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7269 		snapshotid = g_blobid;
7270 
7271 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7272 		poll_threads();
7273 		CU_ASSERT(g_bserrno == 0);
7274 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7275 		snapshot1 = g_blob;
7276 
7277 		/* Create the second one */
7278 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7279 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7280 		poll_threads();
7281 		CU_ASSERT(g_bserrno == 0);
7282 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7283 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7284 		snapshotid = g_blobid;
7285 
7286 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7287 		poll_threads();
7288 		CU_ASSERT(g_bserrno == 0);
7289 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7290 		snapshot2 = g_blob;
7291 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7292 
7293 		/* Now decouple the second snapshot forcing it to copy the written clusters */
7294 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7295 		poll_threads();
7296 		CU_ASSERT(g_bserrno == 0);
7297 
7298 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7299 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7300 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7301 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7302 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7303 					    snapshot1->active.clusters[cluster]);
7304 		}
7305 
7306 		spdk_bs_free_io_channel(channel);
7307 
7308 		if (delete_snapshot_first) {
7309 			ut_blob_close_and_delete(bs, snapshot2);
7310 			ut_blob_close_and_delete(bs, snapshot1);
7311 			ut_blob_close_and_delete(bs, blob);
7312 		} else {
7313 			ut_blob_close_and_delete(bs, blob);
7314 			ut_blob_close_and_delete(bs, snapshot2);
7315 			ut_blob_close_and_delete(bs, snapshot1);
7316 		}
7317 		poll_threads();
7318 	}
7319 }
7320 
7321 static void
7322 blob_seek_io_unit(void)
7323 {
7324 	struct spdk_blob_store *bs = g_bs;
7325 	struct spdk_blob *blob;
7326 	struct spdk_io_channel *channel;
7327 	struct spdk_blob_opts opts;
7328 	uint64_t free_clusters;
7329 	uint8_t payload[10 * 4096];
7330 	uint64_t offset;
7331 	uint64_t io_unit, io_units_per_cluster;
7332 
7333 	free_clusters = spdk_bs_free_cluster_count(bs);
7334 
7335 	channel = spdk_bs_alloc_io_channel(bs);
7336 	CU_ASSERT(channel != NULL);
7337 
7338 	/* Set blob as thin provisioned */
7339 	ut_spdk_blob_opts_init(&opts);
7340 	opts.thin_provision = true;
7341 
7342 	/* Create a blob */
7343 	blob = ut_blob_create_and_open(bs, &opts);
7344 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7345 
7346 	io_units_per_cluster = bs_io_units_per_cluster(blob);
7347 
7348 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
7349 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
7350 	poll_threads();
7351 	CU_ASSERT(g_bserrno == 0);
7352 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7353 	CU_ASSERT(blob->active.num_clusters == 5);
7354 
7355 	/* Write at the beginning of first cluster */
7356 	offset = 0;
7357 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7358 	poll_threads();
7359 	CU_ASSERT(g_bserrno == 0);
7360 
7361 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
7362 	CU_ASSERT(io_unit == offset);
7363 
7364 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
7365 	CU_ASSERT(io_unit == io_units_per_cluster);
7366 
7367 	/* Write in the middle of third cluster */
7368 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
7369 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7370 	poll_threads();
7371 	CU_ASSERT(g_bserrno == 0);
7372 
7373 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
7374 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
7375 
7376 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
7377 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
7378 
7379 	/* Write at the end of last cluster */
7380 	offset = 5 * io_units_per_cluster - 1;
7381 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7382 	poll_threads();
7383 	CU_ASSERT(g_bserrno == 0);
7384 
7385 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
7386 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
7387 
7388 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
7389 	CU_ASSERT(io_unit == UINT64_MAX);
7390 
7391 	spdk_bs_free_io_channel(channel);
7392 	poll_threads();
7393 
7394 	ut_blob_close_and_delete(bs, blob);
7395 }
7396 
7397 static void
7398 suite_bs_setup(void)
7399 {
7400 	struct spdk_bs_dev *dev;
7401 
7402 	dev = init_dev();
7403 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7404 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
7405 	poll_threads();
7406 	CU_ASSERT(g_bserrno == 0);
7407 	CU_ASSERT(g_bs != NULL);
7408 }
7409 
7410 static void
7411 suite_bs_cleanup(void)
7412 {
7413 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
7414 	poll_threads();
7415 	CU_ASSERT(g_bserrno == 0);
7416 	g_bs = NULL;
7417 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7418 }
7419 
7420 static struct spdk_blob *
7421 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
7422 {
7423 	struct spdk_blob *blob;
7424 	struct spdk_blob_opts create_blob_opts;
7425 	spdk_blob_id blobid;
7426 
7427 	if (blob_opts == NULL) {
7428 		ut_spdk_blob_opts_init(&create_blob_opts);
7429 		blob_opts = &create_blob_opts;
7430 	}
7431 
7432 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
7433 	poll_threads();
7434 	CU_ASSERT(g_bserrno == 0);
7435 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7436 	blobid = g_blobid;
7437 	g_blobid = -1;
7438 
7439 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7440 	poll_threads();
7441 	CU_ASSERT(g_bserrno == 0);
7442 	CU_ASSERT(g_blob != NULL);
7443 	blob = g_blob;
7444 
7445 	g_blob = NULL;
7446 	g_bserrno = -1;
7447 
7448 	return blob;
7449 }
7450 
7451 static void
7452 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
7453 {
7454 	spdk_blob_id blobid = spdk_blob_get_id(blob);
7455 
7456 	spdk_blob_close(blob, blob_op_complete, NULL);
7457 	poll_threads();
7458 	CU_ASSERT(g_bserrno == 0);
7459 	g_blob = NULL;
7460 
7461 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7462 	poll_threads();
7463 	CU_ASSERT(g_bserrno == 0);
7464 	g_bserrno = -1;
7465 }
7466 
7467 static void
7468 suite_blob_setup(void)
7469 {
7470 	suite_bs_setup();
7471 	CU_ASSERT(g_bs != NULL);
7472 
7473 	g_blob = ut_blob_create_and_open(g_bs, NULL);
7474 	CU_ASSERT(g_blob != NULL);
7475 }
7476 
7477 static void
7478 suite_blob_cleanup(void)
7479 {
7480 	ut_blob_close_and_delete(g_bs, g_blob);
7481 	CU_ASSERT(g_blob == NULL);
7482 
7483 	suite_bs_cleanup();
7484 	CU_ASSERT(g_bs == NULL);
7485 }
7486 
7487 int
7488 main(int argc, char **argv)
7489 {
7490 	CU_pSuite	suite, suite_bs, suite_blob;
7491 	unsigned int	num_failures;
7492 
7493 	CU_set_error_action(CUEA_ABORT);
7494 	CU_initialize_registry();
7495 
7496 	suite = CU_add_suite("blob", NULL, NULL);
7497 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
7498 			suite_bs_setup, suite_bs_cleanup);
7499 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
7500 			suite_blob_setup, suite_blob_cleanup);
7501 
7502 	CU_ADD_TEST(suite, blob_init);
7503 	CU_ADD_TEST(suite_bs, blob_open);
7504 	CU_ADD_TEST(suite_bs, blob_create);
7505 	CU_ADD_TEST(suite_bs, blob_create_loop);
7506 	CU_ADD_TEST(suite_bs, blob_create_fail);
7507 	CU_ADD_TEST(suite_bs, blob_create_internal);
7508 	CU_ADD_TEST(suite_bs, blob_create_zero_extent);
7509 	CU_ADD_TEST(suite, blob_thin_provision);
7510 	CU_ADD_TEST(suite_bs, blob_snapshot);
7511 	CU_ADD_TEST(suite_bs, blob_clone);
7512 	CU_ADD_TEST(suite_bs, blob_inflate);
7513 	CU_ADD_TEST(suite_bs, blob_delete);
7514 	CU_ADD_TEST(suite_bs, blob_resize_test);
7515 	CU_ADD_TEST(suite, blob_read_only);
7516 	CU_ADD_TEST(suite_bs, channel_ops);
7517 	CU_ADD_TEST(suite_bs, blob_super);
7518 	CU_ADD_TEST(suite_blob, blob_write);
7519 	CU_ADD_TEST(suite_blob, blob_read);
7520 	CU_ADD_TEST(suite_blob, blob_rw_verify);
7521 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
7522 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
7523 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
7524 	CU_ADD_TEST(suite_bs, blob_unmap);
7525 	CU_ADD_TEST(suite_bs, blob_iter);
7526 	CU_ADD_TEST(suite_blob, blob_xattr);
7527 	CU_ADD_TEST(suite_bs, blob_parse_md);
7528 	CU_ADD_TEST(suite, bs_load);
7529 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
7530 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
7531 	CU_ADD_TEST(suite, bs_load_after_failed_grow);
7532 	CU_ADD_TEST(suite_bs, bs_unload);
7533 	CU_ADD_TEST(suite, bs_cluster_sz);
7534 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
7535 	CU_ADD_TEST(suite, bs_resize_md);
7536 	CU_ADD_TEST(suite, bs_destroy);
7537 	CU_ADD_TEST(suite, bs_type);
7538 	CU_ADD_TEST(suite, bs_super_block);
7539 	CU_ADD_TEST(suite, bs_test_recover_cluster_count);
7540 	CU_ADD_TEST(suite, bs_test_grow);
7541 	CU_ADD_TEST(suite, blob_serialize_test);
7542 	CU_ADD_TEST(suite_bs, blob_crc);
7543 	CU_ADD_TEST(suite, super_block_crc);
7544 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
7545 	CU_ADD_TEST(suite_bs, blob_flags);
7546 	CU_ADD_TEST(suite_bs, bs_version);
7547 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
7548 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
7549 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
7550 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
7551 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
7552 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
7553 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
7554 	CU_ADD_TEST(suite, bs_load_iter_test);
7555 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
7556 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
7557 	CU_ADD_TEST(suite, blob_relations);
7558 	CU_ADD_TEST(suite, blob_relations2);
7559 	CU_ADD_TEST(suite, blob_relations3);
7560 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
7561 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
7562 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
7563 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
7564 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
7565 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
7566 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
7567 	CU_ADD_TEST(suite, blob_io_unit);
7568 	CU_ADD_TEST(suite, blob_io_unit_compatibility);
7569 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
7570 	CU_ADD_TEST(suite_bs, blob_persist_test);
7571 	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
7572 	CU_ADD_TEST(suite_bs, blob_seek_io_unit);
7573 
7574 	allocate_threads(2);
7575 	set_thread(0);
7576 
7577 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
7578 
7579 	CU_basic_set_mode(CU_BRM_VERBOSE);
7580 	g_use_extent_table = false;
7581 	CU_basic_run_tests();
7582 	num_failures = CU_get_number_of_failures();
7583 	g_use_extent_table = true;
7584 	CU_basic_run_tests();
7585 	num_failures += CU_get_number_of_failures();
7586 	CU_cleanup_registry();
7587 
7588 	free(g_dev_buffer);
7589 
7590 	free_threads();
7591 
7592 	return num_failures;
7593 }
7594