xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision a0d24145bf3d795cf89adc414320b138fae480ab)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "blob/blobstore.c"
16 #include "blob/request.c"
17 #include "blob/zeroes.c"
18 #include "blob/blob_bs_dev.c"
19 #include "esnap_dev.c"
20 
21 struct spdk_blob_store *g_bs;
22 spdk_blob_id g_blobid;
23 struct spdk_blob *g_blob, *g_blob2;
24 int g_bserrno, g_bserrno2;
25 struct spdk_xattr_names *g_names;
26 int g_done;
27 char *g_xattr_names[] = {"first", "second", "third"};
28 char *g_xattr_values[] = {"one", "two", "three"};
29 uint64_t g_ctx = 1729;
30 bool g_use_extent_table = false;
31 
32 struct spdk_bs_super_block_ver1 {
33 	uint8_t		signature[8];
34 	uint32_t        version;
35 	uint32_t        length;
36 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
37 	spdk_blob_id	super_blob;
38 
39 	uint32_t	cluster_size; /* In bytes */
40 
41 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
42 	uint32_t	used_page_mask_len; /* Count, in pages */
43 
44 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
45 	uint32_t	used_cluster_mask_len; /* Count, in pages */
46 
47 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
48 	uint32_t	md_len; /* Count, in pages */
49 
50 	uint8_t		reserved[4036];
51 	uint32_t	crc;
52 } __attribute__((packed));
53 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
54 
55 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
56 		struct spdk_blob_opts *blob_opts);
57 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
58 static void suite_blob_setup(void);
59 static void suite_blob_cleanup(void);
60 
61 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
62 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
63 		void *cpl_cb_arg), 0);
64 
65 static void
66 _get_xattr_value(void *arg, const char *name,
67 		 const void **value, size_t *value_len)
68 {
69 	uint64_t i;
70 
71 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
72 	SPDK_CU_ASSERT_FATAL(value != NULL);
73 	CU_ASSERT(arg == &g_ctx);
74 
75 	for (i = 0; i < sizeof(g_xattr_names); i++) {
76 		if (!strcmp(name, g_xattr_names[i])) {
77 			*value_len = strlen(g_xattr_values[i]);
78 			*value = g_xattr_values[i];
79 			break;
80 		}
81 	}
82 }
83 
84 static void
85 _get_xattr_value_null(void *arg, const char *name,
86 		      const void **value, size_t *value_len)
87 {
88 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
89 	SPDK_CU_ASSERT_FATAL(value != NULL);
90 	CU_ASSERT(arg == NULL);
91 
92 	*value_len = 0;
93 	*value = NULL;
94 }
95 
96 static int
97 _get_snapshots_count(struct spdk_blob_store *bs)
98 {
99 	struct spdk_blob_list *snapshot = NULL;
100 	int count = 0;
101 
102 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
103 		count += 1;
104 	}
105 
106 	return count;
107 }
108 
109 static void
110 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
111 {
112 	spdk_blob_opts_init(opts, sizeof(*opts));
113 	opts->use_extent_table = g_use_extent_table;
114 }
115 
116 static void
117 bs_op_complete(void *cb_arg, int bserrno)
118 {
119 	g_bserrno = bserrno;
120 }
121 
122 static void
123 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
124 			   int bserrno)
125 {
126 	g_bs = bs;
127 	g_bserrno = bserrno;
128 }
129 
130 static void
131 blob_op_complete(void *cb_arg, int bserrno)
132 {
133 	g_bserrno = bserrno;
134 }
135 
136 static void
137 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
138 {
139 	g_blobid = blobid;
140 	g_bserrno = bserrno;
141 }
142 
143 static void
144 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
145 {
146 	g_blob = blb;
147 	g_bserrno = bserrno;
148 }
149 
150 static void
151 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
152 {
153 	if (g_blob == NULL) {
154 		g_blob = blob;
155 		g_bserrno = bserrno;
156 	} else {
157 		g_blob2 = blob;
158 		g_bserrno2 = bserrno;
159 	}
160 }
161 
162 static void
163 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
164 {
165 	struct spdk_bs_dev *dev;
166 
167 	/* Unload the blob store */
168 	spdk_bs_unload(*bs, bs_op_complete, NULL);
169 	poll_threads();
170 	CU_ASSERT(g_bserrno == 0);
171 
172 	dev = init_dev();
173 	/* Load an existing blob store */
174 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
175 	poll_threads();
176 	CU_ASSERT(g_bserrno == 0);
177 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
178 	*bs = g_bs;
179 
180 	g_bserrno = -1;
181 }
182 
183 static void
184 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
185 {
186 	struct spdk_bs_dev *dev;
187 
188 	/* Dirty shutdown */
189 	bs_free(*bs);
190 
191 	dev = init_dev();
192 	/* Load an existing blob store */
193 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
194 	poll_threads();
195 	CU_ASSERT(g_bserrno == 0);
196 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
197 	*bs = g_bs;
198 
199 	g_bserrno = -1;
200 }
201 
202 static void
203 blob_init(void)
204 {
205 	struct spdk_blob_store *bs;
206 	struct spdk_bs_dev *dev;
207 
208 	dev = init_dev();
209 
210 	/* should fail for an unsupported blocklen */
211 	dev->blocklen = 500;
212 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
213 	poll_threads();
214 	CU_ASSERT(g_bserrno == -EINVAL);
215 
216 	dev = init_dev();
217 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
218 	poll_threads();
219 	CU_ASSERT(g_bserrno == 0);
220 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
221 	bs = g_bs;
222 
223 	spdk_bs_unload(bs, bs_op_complete, NULL);
224 	poll_threads();
225 	CU_ASSERT(g_bserrno == 0);
226 	g_bs = NULL;
227 }
228 
229 static void
230 blob_super(void)
231 {
232 	struct spdk_blob_store *bs = g_bs;
233 	spdk_blob_id blobid;
234 	struct spdk_blob_opts blob_opts;
235 
236 	/* Get the super blob without having set one */
237 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
238 	poll_threads();
239 	CU_ASSERT(g_bserrno == -ENOENT);
240 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
241 
242 	/* Create a blob */
243 	ut_spdk_blob_opts_init(&blob_opts);
244 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
245 	poll_threads();
246 	CU_ASSERT(g_bserrno == 0);
247 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
248 	blobid = g_blobid;
249 
250 	/* Set the blob as the super blob */
251 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
252 	poll_threads();
253 	CU_ASSERT(g_bserrno == 0);
254 
255 	/* Get the super blob */
256 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
257 	poll_threads();
258 	CU_ASSERT(g_bserrno == 0);
259 	CU_ASSERT(blobid == g_blobid);
260 }
261 
262 static void
263 blob_open(void)
264 {
265 	struct spdk_blob_store *bs = g_bs;
266 	struct spdk_blob *blob;
267 	struct spdk_blob_opts blob_opts;
268 	spdk_blob_id blobid, blobid2;
269 
270 	ut_spdk_blob_opts_init(&blob_opts);
271 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
272 	poll_threads();
273 	CU_ASSERT(g_bserrno == 0);
274 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
275 	blobid = g_blobid;
276 
277 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
278 	poll_threads();
279 	CU_ASSERT(g_bserrno == 0);
280 	CU_ASSERT(g_blob != NULL);
281 	blob = g_blob;
282 
283 	blobid2 = spdk_blob_get_id(blob);
284 	CU_ASSERT(blobid == blobid2);
285 
286 	/* Try to open file again.  It should return success. */
287 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
288 	poll_threads();
289 	CU_ASSERT(g_bserrno == 0);
290 	CU_ASSERT(blob == g_blob);
291 
292 	spdk_blob_close(blob, blob_op_complete, NULL);
293 	poll_threads();
294 	CU_ASSERT(g_bserrno == 0);
295 
296 	/*
297 	 * Close the file a second time, releasing the second reference.  This
298 	 *  should succeed.
299 	 */
300 	blob = g_blob;
301 	spdk_blob_close(blob, blob_op_complete, NULL);
302 	poll_threads();
303 	CU_ASSERT(g_bserrno == 0);
304 
305 	/*
306 	 * Try to open file again.  It should succeed.  This tests the case
307 	 *  where the file is opened, closed, then re-opened again.
308 	 */
309 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
310 	poll_threads();
311 	CU_ASSERT(g_bserrno == 0);
312 	CU_ASSERT(g_blob != NULL);
313 	blob = g_blob;
314 	spdk_blob_close(blob, blob_op_complete, NULL);
315 	poll_threads();
316 	CU_ASSERT(g_bserrno == 0);
317 
318 	/* Try to open file twice in succession.  This should return the same
319 	 * blob object.
320 	 */
321 	g_blob = NULL;
322 	g_blob2 = NULL;
323 	g_bserrno = -1;
324 	g_bserrno2 = -1;
325 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
326 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
327 	poll_threads();
328 	CU_ASSERT(g_bserrno == 0);
329 	CU_ASSERT(g_bserrno2 == 0);
330 	CU_ASSERT(g_blob != NULL);
331 	CU_ASSERT(g_blob2 != NULL);
332 	CU_ASSERT(g_blob == g_blob2);
333 
334 	g_bserrno = -1;
335 	spdk_blob_close(g_blob, blob_op_complete, NULL);
336 	poll_threads();
337 	CU_ASSERT(g_bserrno == 0);
338 
339 	ut_blob_close_and_delete(bs, g_blob);
340 }
341 
342 static void
343 blob_create(void)
344 {
345 	struct spdk_blob_store *bs = g_bs;
346 	struct spdk_blob *blob;
347 	struct spdk_blob_opts opts;
348 	spdk_blob_id blobid;
349 
350 	/* Create blob with 10 clusters */
351 
352 	ut_spdk_blob_opts_init(&opts);
353 	opts.num_clusters = 10;
354 
355 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
356 	poll_threads();
357 	CU_ASSERT(g_bserrno == 0);
358 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
359 	blobid = g_blobid;
360 
361 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
362 	poll_threads();
363 	CU_ASSERT(g_bserrno == 0);
364 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
365 	blob = g_blob;
366 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
367 
368 	spdk_blob_close(blob, blob_op_complete, NULL);
369 	poll_threads();
370 	CU_ASSERT(g_bserrno == 0);
371 
372 	/* Create blob with 0 clusters */
373 
374 	ut_spdk_blob_opts_init(&opts);
375 	opts.num_clusters = 0;
376 
377 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
381 	blobid = g_blobid;
382 
383 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
384 	poll_threads();
385 	CU_ASSERT(g_bserrno == 0);
386 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
387 	blob = g_blob;
388 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
389 
390 	spdk_blob_close(blob, blob_op_complete, NULL);
391 	poll_threads();
392 	CU_ASSERT(g_bserrno == 0);
393 
394 	/* Create blob with default options (opts == NULL) */
395 
396 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
397 	poll_threads();
398 	CU_ASSERT(g_bserrno == 0);
399 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
400 	blobid = g_blobid;
401 
402 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
403 	poll_threads();
404 	CU_ASSERT(g_bserrno == 0);
405 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
406 	blob = g_blob;
407 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
408 
409 	spdk_blob_close(blob, blob_op_complete, NULL);
410 	poll_threads();
411 	CU_ASSERT(g_bserrno == 0);
412 
413 	/* Try to create blob with size larger than blobstore */
414 
415 	ut_spdk_blob_opts_init(&opts);
416 	opts.num_clusters = bs->total_clusters + 1;
417 
418 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
419 	poll_threads();
420 	CU_ASSERT(g_bserrno == -ENOSPC);
421 }
422 
423 static void
424 blob_create_zero_extent(void)
425 {
426 	struct spdk_blob_store *bs = g_bs;
427 	struct spdk_blob *blob;
428 	spdk_blob_id blobid;
429 
430 	/* Create blob with default options (opts == NULL) */
431 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
432 	poll_threads();
433 	CU_ASSERT(g_bserrno == 0);
434 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
435 	blobid = g_blobid;
436 
437 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
438 	poll_threads();
439 	CU_ASSERT(g_bserrno == 0);
440 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
441 	blob = g_blob;
442 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
443 	CU_ASSERT(blob->extent_table_found == true);
444 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
445 	CU_ASSERT(blob->active.extent_pages == NULL);
446 
447 	spdk_blob_close(blob, blob_op_complete, NULL);
448 	poll_threads();
449 	CU_ASSERT(g_bserrno == 0);
450 
451 	/* Create blob with NULL internal options  */
452 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
453 	poll_threads();
454 	CU_ASSERT(g_bserrno == 0);
455 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
456 	blobid = g_blobid;
457 
458 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
459 	poll_threads();
460 	CU_ASSERT(g_bserrno == 0);
461 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
462 	blob = g_blob;
463 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
464 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
465 	CU_ASSERT(blob->extent_table_found == true);
466 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
467 	CU_ASSERT(blob->active.extent_pages == NULL);
468 
469 	spdk_blob_close(blob, blob_op_complete, NULL);
470 	poll_threads();
471 	CU_ASSERT(g_bserrno == 0);
472 }
473 
474 /*
475  * Create and delete one blob in a loop over and over again.  This helps ensure
476  * that the internal bit masks tracking used clusters and md_pages are being
477  * tracked correctly.
478  */
479 static void
480 blob_create_loop(void)
481 {
482 	struct spdk_blob_store *bs = g_bs;
483 	struct spdk_blob_opts opts;
484 	uint32_t i, loop_count;
485 
486 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
487 				  spdk_bit_pool_capacity(bs->used_clusters));
488 
489 	for (i = 0; i < loop_count; i++) {
490 		ut_spdk_blob_opts_init(&opts);
491 		opts.num_clusters = 1;
492 		g_bserrno = -1;
493 		g_blobid = SPDK_BLOBID_INVALID;
494 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
495 		poll_threads();
496 		CU_ASSERT(g_bserrno == 0);
497 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
498 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
499 		poll_threads();
500 		CU_ASSERT(g_bserrno == 0);
501 	}
502 }
503 
504 static void
505 blob_create_fail(void)
506 {
507 	struct spdk_blob_store *bs = g_bs;
508 	struct spdk_blob_opts opts;
509 	spdk_blob_id blobid;
510 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
511 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
512 
513 	/* NULL callback */
514 	ut_spdk_blob_opts_init(&opts);
515 	opts.xattrs.names = g_xattr_names;
516 	opts.xattrs.get_value = NULL;
517 	opts.xattrs.count = 1;
518 	opts.xattrs.ctx = &g_ctx;
519 
520 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
521 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
522 	poll_threads();
523 	CU_ASSERT(g_bserrno == -EINVAL);
524 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
525 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
526 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
527 
528 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
529 	poll_threads();
530 	CU_ASSERT(g_bserrno == -ENOENT);
531 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
532 
533 	ut_bs_reload(&bs, NULL);
534 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
535 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
536 
537 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
538 	poll_threads();
539 	CU_ASSERT(g_blob == NULL);
540 	CU_ASSERT(g_bserrno == -ENOENT);
541 }
542 
543 static void
544 blob_create_internal(void)
545 {
546 	struct spdk_blob_store *bs = g_bs;
547 	struct spdk_blob *blob;
548 	struct spdk_blob_opts opts;
549 	struct spdk_blob_xattr_opts internal_xattrs;
550 	const void *value;
551 	size_t value_len;
552 	spdk_blob_id blobid;
553 	int rc;
554 
555 	/* Create blob with custom xattrs */
556 
557 	ut_spdk_blob_opts_init(&opts);
558 	blob_xattrs_init(&internal_xattrs);
559 	internal_xattrs.count = 3;
560 	internal_xattrs.names = g_xattr_names;
561 	internal_xattrs.get_value = _get_xattr_value;
562 	internal_xattrs.ctx = &g_ctx;
563 
564 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
565 	poll_threads();
566 	CU_ASSERT(g_bserrno == 0);
567 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
568 	blobid = g_blobid;
569 
570 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
571 	poll_threads();
572 	CU_ASSERT(g_bserrno == 0);
573 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
574 	blob = g_blob;
575 
576 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
577 	CU_ASSERT(rc == 0);
578 	SPDK_CU_ASSERT_FATAL(value != NULL);
579 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
580 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
581 
582 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
583 	CU_ASSERT(rc == 0);
584 	SPDK_CU_ASSERT_FATAL(value != NULL);
585 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
586 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
587 
588 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
589 	CU_ASSERT(rc == 0);
590 	SPDK_CU_ASSERT_FATAL(value != NULL);
591 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
592 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
593 
594 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
595 	CU_ASSERT(rc != 0);
596 
597 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
598 	CU_ASSERT(rc != 0);
599 
600 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
601 	CU_ASSERT(rc != 0);
602 
603 	spdk_blob_close(blob, blob_op_complete, NULL);
604 	poll_threads();
605 	CU_ASSERT(g_bserrno == 0);
606 
607 	/* Create blob with NULL internal options  */
608 
609 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
610 	poll_threads();
611 	CU_ASSERT(g_bserrno == 0);
612 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
613 	blobid = g_blobid;
614 
615 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
616 	poll_threads();
617 	CU_ASSERT(g_bserrno == 0);
618 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
619 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
620 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
621 
622 	blob = g_blob;
623 
624 	spdk_blob_close(blob, blob_op_complete, NULL);
625 	poll_threads();
626 	CU_ASSERT(g_bserrno == 0);
627 }
628 
629 static void
630 blob_thin_provision(void)
631 {
632 	struct spdk_blob_store *bs;
633 	struct spdk_bs_dev *dev;
634 	struct spdk_blob *blob;
635 	struct spdk_blob_opts opts;
636 	struct spdk_bs_opts bs_opts;
637 	spdk_blob_id blobid;
638 
639 	dev = init_dev();
640 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
641 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
642 
643 	/* Initialize a new blob store */
644 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
645 	poll_threads();
646 	CU_ASSERT(g_bserrno == 0);
647 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
648 
649 	bs = g_bs;
650 
651 	/* Create blob with thin provisioning enabled */
652 
653 	ut_spdk_blob_opts_init(&opts);
654 	opts.thin_provision = true;
655 	opts.num_clusters = 10;
656 
657 	blob = ut_blob_create_and_open(bs, &opts);
658 	blobid = spdk_blob_get_id(blob);
659 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
660 	/* In thin provisioning with num_clusters is set, if not using the
661 	 * extent table, there is no allocation. If extent table is used,
662 	 * there is related allocation happened. */
663 	if (blob->extent_table_found == true) {
664 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
665 		CU_ASSERT(blob->active.extent_pages != NULL);
666 	} else {
667 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
668 		CU_ASSERT(blob->active.extent_pages == NULL);
669 	}
670 
671 	spdk_blob_close(blob, blob_op_complete, NULL);
672 	CU_ASSERT(g_bserrno == 0);
673 
674 	/* Do not shut down cleanly.  This makes sure that when we load again
675 	 *  and try to recover a valid used_cluster map, that blobstore will
676 	 *  ignore clusters with index 0 since these are unallocated clusters.
677 	 */
678 	ut_bs_dirty_load(&bs, &bs_opts);
679 
680 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
681 	poll_threads();
682 	CU_ASSERT(g_bserrno == 0);
683 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
684 	blob = g_blob;
685 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
686 
687 	ut_blob_close_and_delete(bs, blob);
688 
689 	spdk_bs_unload(bs, bs_op_complete, NULL);
690 	poll_threads();
691 	CU_ASSERT(g_bserrno == 0);
692 	g_bs = NULL;
693 }
694 
695 static void
696 blob_snapshot(void)
697 {
698 	struct spdk_blob_store *bs = g_bs;
699 	struct spdk_blob *blob;
700 	struct spdk_blob *snapshot, *snapshot2;
701 	struct spdk_blob_bs_dev *blob_bs_dev;
702 	struct spdk_blob_opts opts;
703 	struct spdk_blob_xattr_opts xattrs;
704 	spdk_blob_id blobid;
705 	spdk_blob_id snapshotid;
706 	spdk_blob_id snapshotid2;
707 	const void *value;
708 	size_t value_len;
709 	int rc;
710 	spdk_blob_id ids[2];
711 	size_t count;
712 
713 	/* Create blob with 10 clusters */
714 	ut_spdk_blob_opts_init(&opts);
715 	opts.num_clusters = 10;
716 
717 	blob = ut_blob_create_and_open(bs, &opts);
718 	blobid = spdk_blob_get_id(blob);
719 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
720 
721 	/* Create snapshot from blob */
722 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
723 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
724 	poll_threads();
725 	CU_ASSERT(g_bserrno == 0);
726 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
727 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
728 	snapshotid = g_blobid;
729 
730 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
731 	poll_threads();
732 	CU_ASSERT(g_bserrno == 0);
733 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
734 	snapshot = g_blob;
735 	CU_ASSERT(snapshot->data_ro == true);
736 	CU_ASSERT(snapshot->md_ro == true);
737 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
738 
739 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
740 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
741 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
742 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
743 
744 	/* Try to create snapshot from clone with xattrs */
745 	xattrs.names = g_xattr_names;
746 	xattrs.get_value = _get_xattr_value;
747 	xattrs.count = 3;
748 	xattrs.ctx = &g_ctx;
749 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
750 	poll_threads();
751 	CU_ASSERT(g_bserrno == 0);
752 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
753 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
754 	snapshotid2 = g_blobid;
755 
756 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
757 	CU_ASSERT(g_bserrno == 0);
758 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
759 	snapshot2 = g_blob;
760 	CU_ASSERT(snapshot2->data_ro == true);
761 	CU_ASSERT(snapshot2->md_ro == true);
762 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
763 
764 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
765 	CU_ASSERT(snapshot->back_bs_dev == NULL);
766 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
767 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
768 
769 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
770 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
771 
772 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
773 	CU_ASSERT(blob_bs_dev->blob == snapshot);
774 
775 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
776 	CU_ASSERT(rc == 0);
777 	SPDK_CU_ASSERT_FATAL(value != NULL);
778 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
779 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
780 
781 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
782 	CU_ASSERT(rc == 0);
783 	SPDK_CU_ASSERT_FATAL(value != NULL);
784 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
785 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
786 
787 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
788 	CU_ASSERT(rc == 0);
789 	SPDK_CU_ASSERT_FATAL(value != NULL);
790 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
791 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
792 
793 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
794 	count = 2;
795 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
796 	CU_ASSERT(count == 1);
797 	CU_ASSERT(ids[0] == blobid);
798 
799 	count = 2;
800 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
801 	CU_ASSERT(count == 1);
802 	CU_ASSERT(ids[0] == snapshotid2);
803 
804 	/* Try to create snapshot from snapshot */
805 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
806 	poll_threads();
807 	CU_ASSERT(g_bserrno == -EINVAL);
808 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
809 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
810 
811 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
812 	ut_blob_close_and_delete(bs, blob);
813 	count = 2;
814 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
815 	CU_ASSERT(count == 0);
816 
817 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
818 	ut_blob_close_and_delete(bs, snapshot2);
819 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
820 	count = 2;
821 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
822 	CU_ASSERT(count == 0);
823 
824 	ut_blob_close_and_delete(bs, snapshot);
825 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
826 }
827 
828 static void
829 blob_snapshot_freeze_io(void)
830 {
831 	struct spdk_io_channel *channel;
832 	struct spdk_bs_channel *bs_channel;
833 	struct spdk_blob_store *bs = g_bs;
834 	struct spdk_blob *blob;
835 	struct spdk_blob_opts opts;
836 	spdk_blob_id blobid;
837 	uint32_t num_of_pages = 10;
838 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
839 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
840 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
841 
842 	memset(payload_write, 0xE5, sizeof(payload_write));
843 	memset(payload_read, 0x00, sizeof(payload_read));
844 	memset(payload_zero, 0x00, sizeof(payload_zero));
845 
846 	/* Test freeze I/O during snapshot */
847 	channel = spdk_bs_alloc_io_channel(bs);
848 	bs_channel = spdk_io_channel_get_ctx(channel);
849 
850 	/* Create blob with 10 clusters */
851 	ut_spdk_blob_opts_init(&opts);
852 	opts.num_clusters = 10;
853 	opts.thin_provision = false;
854 
855 	blob = ut_blob_create_and_open(bs, &opts);
856 	blobid = spdk_blob_get_id(blob);
857 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
858 
859 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
860 
861 	/* This is implementation specific.
862 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
863 	 * Four async I/O operations happen before that. */
864 	poll_thread_times(0, 5);
865 
866 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
867 
868 	/* Blob I/O should be frozen here */
869 	CU_ASSERT(blob->frozen_refcnt == 1);
870 
871 	/* Write to the blob */
872 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
873 
874 	/* Verify that I/O is queued */
875 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
876 	/* Verify that payload is not written to disk, at this point the blobs already switched */
877 	CU_ASSERT(blob->active.clusters[0] == 0);
878 
879 	/* Finish all operations including spdk_bs_create_snapshot */
880 	poll_threads();
881 
882 	/* Verify snapshot */
883 	CU_ASSERT(g_bserrno == 0);
884 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
885 
886 	/* Verify that blob has unset frozen_io */
887 	CU_ASSERT(blob->frozen_refcnt == 0);
888 
889 	/* Verify that postponed I/O completed successfully by comparing payload */
890 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
891 	poll_threads();
892 	CU_ASSERT(g_bserrno == 0);
893 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
894 
895 	spdk_bs_free_io_channel(channel);
896 	poll_threads();
897 
898 	ut_blob_close_and_delete(bs, blob);
899 }
900 
901 static void
902 blob_clone(void)
903 {
904 	struct spdk_blob_store *bs = g_bs;
905 	struct spdk_blob_opts opts;
906 	struct spdk_blob *blob, *snapshot, *clone;
907 	spdk_blob_id blobid, cloneid, snapshotid;
908 	struct spdk_blob_xattr_opts xattrs;
909 	const void *value;
910 	size_t value_len;
911 	int rc;
912 
913 	/* Create blob with 10 clusters */
914 
915 	ut_spdk_blob_opts_init(&opts);
916 	opts.num_clusters = 10;
917 
918 	blob = ut_blob_create_and_open(bs, &opts);
919 	blobid = spdk_blob_get_id(blob);
920 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
921 
922 	/* Create snapshot */
923 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
924 	poll_threads();
925 	CU_ASSERT(g_bserrno == 0);
926 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
927 	snapshotid = g_blobid;
928 
929 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
930 	poll_threads();
931 	CU_ASSERT(g_bserrno == 0);
932 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
933 	snapshot = g_blob;
934 	CU_ASSERT(snapshot->data_ro == true);
935 	CU_ASSERT(snapshot->md_ro == true);
936 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
937 
938 	spdk_blob_close(snapshot, blob_op_complete, NULL);
939 	poll_threads();
940 	CU_ASSERT(g_bserrno == 0);
941 
942 	/* Create clone from snapshot with xattrs */
943 	xattrs.names = g_xattr_names;
944 	xattrs.get_value = _get_xattr_value;
945 	xattrs.count = 3;
946 	xattrs.ctx = &g_ctx;
947 
948 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
949 	poll_threads();
950 	CU_ASSERT(g_bserrno == 0);
951 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
952 	cloneid = g_blobid;
953 
954 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
955 	poll_threads();
956 	CU_ASSERT(g_bserrno == 0);
957 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
958 	clone = g_blob;
959 	CU_ASSERT(clone->data_ro == false);
960 	CU_ASSERT(clone->md_ro == false);
961 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
962 
963 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
964 	CU_ASSERT(rc == 0);
965 	SPDK_CU_ASSERT_FATAL(value != NULL);
966 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
967 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
968 
969 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
970 	CU_ASSERT(rc == 0);
971 	SPDK_CU_ASSERT_FATAL(value != NULL);
972 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
973 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
974 
975 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
976 	CU_ASSERT(rc == 0);
977 	SPDK_CU_ASSERT_FATAL(value != NULL);
978 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
979 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
980 
981 
982 	spdk_blob_close(clone, blob_op_complete, NULL);
983 	poll_threads();
984 	CU_ASSERT(g_bserrno == 0);
985 
986 	/* Try to create clone from not read only blob */
987 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
988 	poll_threads();
989 	CU_ASSERT(g_bserrno == -EINVAL);
990 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
991 
992 	/* Mark blob as read only */
993 	spdk_blob_set_read_only(blob);
994 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
995 	poll_threads();
996 	CU_ASSERT(g_bserrno == 0);
997 
998 	/* Create clone from read only blob */
999 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1000 	poll_threads();
1001 	CU_ASSERT(g_bserrno == 0);
1002 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1003 	cloneid = g_blobid;
1004 
1005 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1006 	poll_threads();
1007 	CU_ASSERT(g_bserrno == 0);
1008 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1009 	clone = g_blob;
1010 	CU_ASSERT(clone->data_ro == false);
1011 	CU_ASSERT(clone->md_ro == false);
1012 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1013 
1014 	ut_blob_close_and_delete(bs, clone);
1015 	ut_blob_close_and_delete(bs, blob);
1016 }
1017 
1018 static void
1019 _blob_inflate(bool decouple_parent)
1020 {
1021 	struct spdk_blob_store *bs = g_bs;
1022 	struct spdk_blob_opts opts;
1023 	struct spdk_blob *blob, *snapshot;
1024 	spdk_blob_id blobid, snapshotid;
1025 	struct spdk_io_channel *channel;
1026 	uint64_t free_clusters;
1027 
1028 	channel = spdk_bs_alloc_io_channel(bs);
1029 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1030 
1031 	/* Create blob with 10 clusters */
1032 
1033 	ut_spdk_blob_opts_init(&opts);
1034 	opts.num_clusters = 10;
1035 	opts.thin_provision = true;
1036 
1037 	blob = ut_blob_create_and_open(bs, &opts);
1038 	blobid = spdk_blob_get_id(blob);
1039 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1040 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1041 
1042 	/* 1) Blob with no parent */
1043 	if (decouple_parent) {
1044 		/* Decouple parent of blob with no parent (should fail) */
1045 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1046 		poll_threads();
1047 		CU_ASSERT(g_bserrno != 0);
1048 	} else {
1049 		/* Inflate of thin blob with no parent should made it thick */
1050 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1051 		poll_threads();
1052 		CU_ASSERT(g_bserrno == 0);
1053 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1054 	}
1055 
1056 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1057 	poll_threads();
1058 	CU_ASSERT(g_bserrno == 0);
1059 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1060 	snapshotid = g_blobid;
1061 
1062 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1063 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1064 
1065 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1066 	poll_threads();
1067 	CU_ASSERT(g_bserrno == 0);
1068 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1069 	snapshot = g_blob;
1070 	CU_ASSERT(snapshot->data_ro == true);
1071 	CU_ASSERT(snapshot->md_ro == true);
1072 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1073 
1074 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1075 	poll_threads();
1076 	CU_ASSERT(g_bserrno == 0);
1077 
1078 	free_clusters = spdk_bs_free_cluster_count(bs);
1079 
1080 	/* 2) Blob with parent */
1081 	if (!decouple_parent) {
1082 		/* Do full blob inflation */
1083 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1084 		poll_threads();
1085 		CU_ASSERT(g_bserrno == 0);
1086 		/* all 10 clusters should be allocated */
1087 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1088 	} else {
1089 		/* Decouple parent of blob */
1090 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1091 		poll_threads();
1092 		CU_ASSERT(g_bserrno == 0);
1093 		/* when only parent is removed, none of the clusters should be allocated */
1094 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1095 	}
1096 
1097 	/* Now, it should be possible to delete snapshot */
1098 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1099 	poll_threads();
1100 	CU_ASSERT(g_bserrno == 0);
1101 
1102 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1103 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1104 
1105 	spdk_bs_free_io_channel(channel);
1106 	poll_threads();
1107 
1108 	ut_blob_close_and_delete(bs, blob);
1109 }
1110 
1111 static void
1112 blob_inflate(void)
1113 {
1114 	_blob_inflate(false);
1115 	_blob_inflate(true);
1116 }
1117 
1118 static void
1119 blob_delete(void)
1120 {
1121 	struct spdk_blob_store *bs = g_bs;
1122 	struct spdk_blob_opts blob_opts;
1123 	spdk_blob_id blobid;
1124 
1125 	/* Create a blob and then delete it. */
1126 	ut_spdk_blob_opts_init(&blob_opts);
1127 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1128 	poll_threads();
1129 	CU_ASSERT(g_bserrno == 0);
1130 	CU_ASSERT(g_blobid > 0);
1131 	blobid = g_blobid;
1132 
1133 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1134 	poll_threads();
1135 	CU_ASSERT(g_bserrno == 0);
1136 
1137 	/* Try to open the blob */
1138 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1139 	poll_threads();
1140 	CU_ASSERT(g_bserrno == -ENOENT);
1141 }
1142 
1143 static void
1144 blob_resize_test(void)
1145 {
1146 	struct spdk_blob_store *bs = g_bs;
1147 	struct spdk_blob *blob;
1148 	uint64_t free_clusters;
1149 
1150 	free_clusters = spdk_bs_free_cluster_count(bs);
1151 
1152 	blob = ut_blob_create_and_open(bs, NULL);
1153 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1154 
1155 	/* Confirm that resize fails if blob is marked read-only. */
1156 	blob->md_ro = true;
1157 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1158 	poll_threads();
1159 	CU_ASSERT(g_bserrno == -EPERM);
1160 	blob->md_ro = false;
1161 
1162 	/* The blob started at 0 clusters. Resize it to be 5. */
1163 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1164 	poll_threads();
1165 	CU_ASSERT(g_bserrno == 0);
1166 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1167 
1168 	/* Shrink the blob to 3 clusters. This will not actually release
1169 	 * the old clusters until the blob is synced.
1170 	 */
1171 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1172 	poll_threads();
1173 	CU_ASSERT(g_bserrno == 0);
1174 	/* Verify there are still 5 clusters in use */
1175 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1176 
1177 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1178 	poll_threads();
1179 	CU_ASSERT(g_bserrno == 0);
1180 	/* Now there are only 3 clusters in use */
1181 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1182 
1183 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1184 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1185 	poll_threads();
1186 	CU_ASSERT(g_bserrno == 0);
1187 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1188 
1189 	/* Try to resize the blob to size larger than blobstore. */
1190 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1191 	poll_threads();
1192 	CU_ASSERT(g_bserrno == -ENOSPC);
1193 
1194 	ut_blob_close_and_delete(bs, blob);
1195 }
1196 
1197 static void
1198 blob_read_only(void)
1199 {
1200 	struct spdk_blob_store *bs;
1201 	struct spdk_bs_dev *dev;
1202 	struct spdk_blob *blob;
1203 	struct spdk_bs_opts opts;
1204 	spdk_blob_id blobid;
1205 	int rc;
1206 
1207 	dev = init_dev();
1208 	spdk_bs_opts_init(&opts, sizeof(opts));
1209 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1210 
1211 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1212 	poll_threads();
1213 	CU_ASSERT(g_bserrno == 0);
1214 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1215 	bs = g_bs;
1216 
1217 	blob = ut_blob_create_and_open(bs, NULL);
1218 	blobid = spdk_blob_get_id(blob);
1219 
1220 	rc = spdk_blob_set_read_only(blob);
1221 	CU_ASSERT(rc == 0);
1222 
1223 	CU_ASSERT(blob->data_ro == false);
1224 	CU_ASSERT(blob->md_ro == false);
1225 
1226 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1227 	poll_threads();
1228 
1229 	CU_ASSERT(blob->data_ro == true);
1230 	CU_ASSERT(blob->md_ro == true);
1231 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1232 
1233 	spdk_blob_close(blob, blob_op_complete, NULL);
1234 	poll_threads();
1235 	CU_ASSERT(g_bserrno == 0);
1236 
1237 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1238 	poll_threads();
1239 	CU_ASSERT(g_bserrno == 0);
1240 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1241 	blob = g_blob;
1242 
1243 	CU_ASSERT(blob->data_ro == true);
1244 	CU_ASSERT(blob->md_ro == true);
1245 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1246 
1247 	spdk_blob_close(blob, blob_op_complete, NULL);
1248 	poll_threads();
1249 	CU_ASSERT(g_bserrno == 0);
1250 
1251 	ut_bs_reload(&bs, &opts);
1252 
1253 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1254 	poll_threads();
1255 	CU_ASSERT(g_bserrno == 0);
1256 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1257 	blob = g_blob;
1258 
1259 	CU_ASSERT(blob->data_ro == true);
1260 	CU_ASSERT(blob->md_ro == true);
1261 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1262 
1263 	ut_blob_close_and_delete(bs, blob);
1264 
1265 	spdk_bs_unload(bs, bs_op_complete, NULL);
1266 	poll_threads();
1267 	CU_ASSERT(g_bserrno == 0);
1268 }
1269 
1270 static void
1271 channel_ops(void)
1272 {
1273 	struct spdk_blob_store *bs = g_bs;
1274 	struct spdk_io_channel *channel;
1275 
1276 	channel = spdk_bs_alloc_io_channel(bs);
1277 	CU_ASSERT(channel != NULL);
1278 
1279 	spdk_bs_free_io_channel(channel);
1280 	poll_threads();
1281 }
1282 
1283 static void
1284 blob_write(void)
1285 {
1286 	struct spdk_blob_store *bs = g_bs;
1287 	struct spdk_blob *blob = g_blob;
1288 	struct spdk_io_channel *channel;
1289 	uint64_t pages_per_cluster;
1290 	uint8_t payload[10 * 4096];
1291 
1292 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1293 
1294 	channel = spdk_bs_alloc_io_channel(bs);
1295 	CU_ASSERT(channel != NULL);
1296 
1297 	/* Write to a blob with 0 size */
1298 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1299 	poll_threads();
1300 	CU_ASSERT(g_bserrno == -EINVAL);
1301 
1302 	/* Resize the blob */
1303 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1304 	poll_threads();
1305 	CU_ASSERT(g_bserrno == 0);
1306 
1307 	/* Confirm that write fails if blob is marked read-only. */
1308 	blob->data_ro = true;
1309 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1310 	poll_threads();
1311 	CU_ASSERT(g_bserrno == -EPERM);
1312 	blob->data_ro = false;
1313 
1314 	/* Write to the blob */
1315 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1316 	poll_threads();
1317 	CU_ASSERT(g_bserrno == 0);
1318 
1319 	/* Write starting beyond the end */
1320 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1321 			   NULL);
1322 	poll_threads();
1323 	CU_ASSERT(g_bserrno == -EINVAL);
1324 
1325 	/* Write starting at a valid location but going off the end */
1326 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1327 			   blob_op_complete, NULL);
1328 	poll_threads();
1329 	CU_ASSERT(g_bserrno == -EINVAL);
1330 
1331 	spdk_bs_free_io_channel(channel);
1332 	poll_threads();
1333 }
1334 
1335 static void
1336 blob_read(void)
1337 {
1338 	struct spdk_blob_store *bs = g_bs;
1339 	struct spdk_blob *blob = g_blob;
1340 	struct spdk_io_channel *channel;
1341 	uint64_t pages_per_cluster;
1342 	uint8_t payload[10 * 4096];
1343 
1344 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1345 
1346 	channel = spdk_bs_alloc_io_channel(bs);
1347 	CU_ASSERT(channel != NULL);
1348 
1349 	/* Read from a blob with 0 size */
1350 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1351 	poll_threads();
1352 	CU_ASSERT(g_bserrno == -EINVAL);
1353 
1354 	/* Resize the blob */
1355 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1356 	poll_threads();
1357 	CU_ASSERT(g_bserrno == 0);
1358 
1359 	/* Confirm that read passes if blob is marked read-only. */
1360 	blob->data_ro = true;
1361 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1362 	poll_threads();
1363 	CU_ASSERT(g_bserrno == 0);
1364 	blob->data_ro = false;
1365 
1366 	/* Read from the blob */
1367 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1368 	poll_threads();
1369 	CU_ASSERT(g_bserrno == 0);
1370 
1371 	/* Read starting beyond the end */
1372 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1373 			  NULL);
1374 	poll_threads();
1375 	CU_ASSERT(g_bserrno == -EINVAL);
1376 
1377 	/* Read starting at a valid location but going off the end */
1378 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1379 			  blob_op_complete, NULL);
1380 	poll_threads();
1381 	CU_ASSERT(g_bserrno == -EINVAL);
1382 
1383 	spdk_bs_free_io_channel(channel);
1384 	poll_threads();
1385 }
1386 
1387 static void
1388 blob_rw_verify(void)
1389 {
1390 	struct spdk_blob_store *bs = g_bs;
1391 	struct spdk_blob *blob = g_blob;
1392 	struct spdk_io_channel *channel;
1393 	uint8_t payload_read[10 * 4096];
1394 	uint8_t payload_write[10 * 4096];
1395 
1396 	channel = spdk_bs_alloc_io_channel(bs);
1397 	CU_ASSERT(channel != NULL);
1398 
1399 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1400 	poll_threads();
1401 	CU_ASSERT(g_bserrno == 0);
1402 
1403 	memset(payload_write, 0xE5, sizeof(payload_write));
1404 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1405 	poll_threads();
1406 	CU_ASSERT(g_bserrno == 0);
1407 
1408 	memset(payload_read, 0x00, sizeof(payload_read));
1409 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1410 	poll_threads();
1411 	CU_ASSERT(g_bserrno == 0);
1412 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1413 
1414 	spdk_bs_free_io_channel(channel);
1415 	poll_threads();
1416 }
1417 
1418 static void
1419 blob_rw_verify_iov(void)
1420 {
1421 	struct spdk_blob_store *bs = g_bs;
1422 	struct spdk_blob *blob;
1423 	struct spdk_io_channel *channel;
1424 	uint8_t payload_read[10 * 4096];
1425 	uint8_t payload_write[10 * 4096];
1426 	struct iovec iov_read[3];
1427 	struct iovec iov_write[3];
1428 	void *buf;
1429 
1430 	channel = spdk_bs_alloc_io_channel(bs);
1431 	CU_ASSERT(channel != NULL);
1432 
1433 	blob = ut_blob_create_and_open(bs, NULL);
1434 
1435 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1436 	poll_threads();
1437 	CU_ASSERT(g_bserrno == 0);
1438 
1439 	/*
1440 	 * Manually adjust the offset of the blob's second cluster.  This allows
1441 	 *  us to make sure that the readv/write code correctly accounts for I/O
1442 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1443 	 *  clusters are where we expect before modifying the second cluster.
1444 	 */
1445 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1446 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1447 	blob->active.clusters[1] = 3 * 256;
1448 
1449 	memset(payload_write, 0xE5, sizeof(payload_write));
1450 	iov_write[0].iov_base = payload_write;
1451 	iov_write[0].iov_len = 1 * 4096;
1452 	iov_write[1].iov_base = payload_write + 1 * 4096;
1453 	iov_write[1].iov_len = 5 * 4096;
1454 	iov_write[2].iov_base = payload_write + 6 * 4096;
1455 	iov_write[2].iov_len = 4 * 4096;
1456 	/*
1457 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1458 	 *  will get written to the first cluster, the last 4 to the second cluster.
1459 	 */
1460 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1461 	poll_threads();
1462 	CU_ASSERT(g_bserrno == 0);
1463 
1464 	memset(payload_read, 0xAA, sizeof(payload_read));
1465 	iov_read[0].iov_base = payload_read;
1466 	iov_read[0].iov_len = 3 * 4096;
1467 	iov_read[1].iov_base = payload_read + 3 * 4096;
1468 	iov_read[1].iov_len = 4 * 4096;
1469 	iov_read[2].iov_base = payload_read + 7 * 4096;
1470 	iov_read[2].iov_len = 3 * 4096;
1471 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1472 	poll_threads();
1473 	CU_ASSERT(g_bserrno == 0);
1474 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1475 
1476 	buf = calloc(1, 256 * 4096);
1477 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1478 	/* Check that cluster 2 on "disk" was not modified. */
1479 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1480 	free(buf);
1481 
1482 	spdk_blob_close(blob, blob_op_complete, NULL);
1483 	poll_threads();
1484 	CU_ASSERT(g_bserrno == 0);
1485 
1486 	spdk_bs_free_io_channel(channel);
1487 	poll_threads();
1488 }
1489 
1490 static uint32_t
1491 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1492 {
1493 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1494 	struct spdk_bs_request_set *set;
1495 	uint32_t count = 0;
1496 
1497 	TAILQ_FOREACH(set, &channel->reqs, link) {
1498 		count++;
1499 	}
1500 
1501 	return count;
1502 }
1503 
1504 static void
1505 blob_rw_verify_iov_nomem(void)
1506 {
1507 	struct spdk_blob_store *bs = g_bs;
1508 	struct spdk_blob *blob = g_blob;
1509 	struct spdk_io_channel *channel;
1510 	uint8_t payload_write[10 * 4096];
1511 	struct iovec iov_write[3];
1512 	uint32_t req_count;
1513 
1514 	channel = spdk_bs_alloc_io_channel(bs);
1515 	CU_ASSERT(channel != NULL);
1516 
1517 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1518 	poll_threads();
1519 	CU_ASSERT(g_bserrno == 0);
1520 
1521 	/*
1522 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1523 	 *  will get written to the first cluster, the last 4 to the second cluster.
1524 	 */
1525 	iov_write[0].iov_base = payload_write;
1526 	iov_write[0].iov_len = 1 * 4096;
1527 	iov_write[1].iov_base = payload_write + 1 * 4096;
1528 	iov_write[1].iov_len = 5 * 4096;
1529 	iov_write[2].iov_base = payload_write + 6 * 4096;
1530 	iov_write[2].iov_len = 4 * 4096;
1531 	MOCK_SET(calloc, NULL);
1532 	req_count = bs_channel_get_req_count(channel);
1533 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1534 	poll_threads();
1535 	CU_ASSERT(g_bserrno = -ENOMEM);
1536 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1537 	MOCK_CLEAR(calloc);
1538 
1539 	spdk_bs_free_io_channel(channel);
1540 	poll_threads();
1541 }
1542 
1543 static void
1544 blob_rw_iov_read_only(void)
1545 {
1546 	struct spdk_blob_store *bs = g_bs;
1547 	struct spdk_blob *blob = g_blob;
1548 	struct spdk_io_channel *channel;
1549 	uint8_t payload_read[4096];
1550 	uint8_t payload_write[4096];
1551 	struct iovec iov_read;
1552 	struct iovec iov_write;
1553 
1554 	channel = spdk_bs_alloc_io_channel(bs);
1555 	CU_ASSERT(channel != NULL);
1556 
1557 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1558 	poll_threads();
1559 	CU_ASSERT(g_bserrno == 0);
1560 
1561 	/* Verify that writev failed if read_only flag is set. */
1562 	blob->data_ro = true;
1563 	iov_write.iov_base = payload_write;
1564 	iov_write.iov_len = sizeof(payload_write);
1565 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1566 	poll_threads();
1567 	CU_ASSERT(g_bserrno == -EPERM);
1568 
1569 	/* Verify that reads pass if data_ro flag is set. */
1570 	iov_read.iov_base = payload_read;
1571 	iov_read.iov_len = sizeof(payload_read);
1572 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1573 	poll_threads();
1574 	CU_ASSERT(g_bserrno == 0);
1575 
1576 	spdk_bs_free_io_channel(channel);
1577 	poll_threads();
1578 }
1579 
1580 static void
1581 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1582 		       uint8_t *payload, uint64_t offset, uint64_t length,
1583 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1584 {
1585 	uint64_t i;
1586 	uint8_t *buf;
1587 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1588 
1589 	/* To be sure that operation is NOT split, read one page at the time */
1590 	buf = payload;
1591 	for (i = 0; i < length; i++) {
1592 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1593 		poll_threads();
1594 		if (g_bserrno != 0) {
1595 			/* Pass the error code up */
1596 			break;
1597 		}
1598 		buf += page_size;
1599 	}
1600 
1601 	cb_fn(cb_arg, g_bserrno);
1602 }
1603 
1604 static void
1605 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1606 			uint8_t *payload, uint64_t offset, uint64_t length,
1607 			spdk_blob_op_complete cb_fn, void *cb_arg)
1608 {
1609 	uint64_t i;
1610 	uint8_t *buf;
1611 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1612 
1613 	/* To be sure that operation is NOT split, write one page at the time */
1614 	buf = payload;
1615 	for (i = 0; i < length; i++) {
1616 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1617 		poll_threads();
1618 		if (g_bserrno != 0) {
1619 			/* Pass the error code up */
1620 			break;
1621 		}
1622 		buf += page_size;
1623 	}
1624 
1625 	cb_fn(cb_arg, g_bserrno);
1626 }
1627 
1628 static void
1629 blob_operation_split_rw(void)
1630 {
1631 	struct spdk_blob_store *bs = g_bs;
1632 	struct spdk_blob *blob;
1633 	struct spdk_io_channel *channel;
1634 	struct spdk_blob_opts opts;
1635 	uint64_t cluster_size;
1636 
1637 	uint64_t payload_size;
1638 	uint8_t *payload_read;
1639 	uint8_t *payload_write;
1640 	uint8_t *payload_pattern;
1641 
1642 	uint64_t page_size;
1643 	uint64_t pages_per_cluster;
1644 	uint64_t pages_per_payload;
1645 
1646 	uint64_t i;
1647 
1648 	cluster_size = spdk_bs_get_cluster_size(bs);
1649 	page_size = spdk_bs_get_page_size(bs);
1650 	pages_per_cluster = cluster_size / page_size;
1651 	pages_per_payload = pages_per_cluster * 5;
1652 	payload_size = cluster_size * 5;
1653 
1654 	payload_read = malloc(payload_size);
1655 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1656 
1657 	payload_write = malloc(payload_size);
1658 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1659 
1660 	payload_pattern = malloc(payload_size);
1661 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1662 
1663 	/* Prepare random pattern to write */
1664 	memset(payload_pattern, 0xFF, payload_size);
1665 	for (i = 0; i < pages_per_payload; i++) {
1666 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1667 	}
1668 
1669 	channel = spdk_bs_alloc_io_channel(bs);
1670 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1671 
1672 	/* Create blob */
1673 	ut_spdk_blob_opts_init(&opts);
1674 	opts.thin_provision = false;
1675 	opts.num_clusters = 5;
1676 
1677 	blob = ut_blob_create_and_open(bs, &opts);
1678 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1679 
1680 	/* Initial read should return zeroed payload */
1681 	memset(payload_read, 0xFF, payload_size);
1682 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1683 	poll_threads();
1684 	CU_ASSERT(g_bserrno == 0);
1685 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1686 
1687 	/* Fill whole blob except last page */
1688 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1689 			   blob_op_complete, NULL);
1690 	poll_threads();
1691 	CU_ASSERT(g_bserrno == 0);
1692 
1693 	/* Write last page with a pattern */
1694 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1695 			   blob_op_complete, NULL);
1696 	poll_threads();
1697 	CU_ASSERT(g_bserrno == 0);
1698 
1699 	/* Read whole blob and check consistency */
1700 	memset(payload_read, 0xFF, payload_size);
1701 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1702 	poll_threads();
1703 	CU_ASSERT(g_bserrno == 0);
1704 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1705 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1706 
1707 	/* Fill whole blob except first page */
1708 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1709 			   blob_op_complete, NULL);
1710 	poll_threads();
1711 	CU_ASSERT(g_bserrno == 0);
1712 
1713 	/* Write first page with a pattern */
1714 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1715 			   blob_op_complete, NULL);
1716 	poll_threads();
1717 	CU_ASSERT(g_bserrno == 0);
1718 
1719 	/* Read whole blob and check consistency */
1720 	memset(payload_read, 0xFF, payload_size);
1721 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1722 	poll_threads();
1723 	CU_ASSERT(g_bserrno == 0);
1724 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1725 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1726 
1727 
1728 	/* Fill whole blob with a pattern (5 clusters) */
1729 
1730 	/* 1. Read test. */
1731 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1732 				blob_op_complete, NULL);
1733 	poll_threads();
1734 	CU_ASSERT(g_bserrno == 0);
1735 
1736 	memset(payload_read, 0xFF, payload_size);
1737 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1738 	poll_threads();
1739 	poll_threads();
1740 	CU_ASSERT(g_bserrno == 0);
1741 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1742 
1743 	/* 2. Write test. */
1744 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1745 			   blob_op_complete, NULL);
1746 	poll_threads();
1747 	CU_ASSERT(g_bserrno == 0);
1748 
1749 	memset(payload_read, 0xFF, payload_size);
1750 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1751 	poll_threads();
1752 	CU_ASSERT(g_bserrno == 0);
1753 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1754 
1755 	spdk_bs_free_io_channel(channel);
1756 	poll_threads();
1757 
1758 	g_blob = NULL;
1759 	g_blobid = 0;
1760 
1761 	free(payload_read);
1762 	free(payload_write);
1763 	free(payload_pattern);
1764 
1765 	ut_blob_close_and_delete(bs, blob);
1766 }
1767 
1768 static void
1769 blob_operation_split_rw_iov(void)
1770 {
1771 	struct spdk_blob_store *bs = g_bs;
1772 	struct spdk_blob *blob;
1773 	struct spdk_io_channel *channel;
1774 	struct spdk_blob_opts opts;
1775 	uint64_t cluster_size;
1776 
1777 	uint64_t payload_size;
1778 	uint8_t *payload_read;
1779 	uint8_t *payload_write;
1780 	uint8_t *payload_pattern;
1781 
1782 	uint64_t page_size;
1783 	uint64_t pages_per_cluster;
1784 	uint64_t pages_per_payload;
1785 
1786 	struct iovec iov_read[2];
1787 	struct iovec iov_write[2];
1788 
1789 	uint64_t i, j;
1790 
1791 	cluster_size = spdk_bs_get_cluster_size(bs);
1792 	page_size = spdk_bs_get_page_size(bs);
1793 	pages_per_cluster = cluster_size / page_size;
1794 	pages_per_payload = pages_per_cluster * 5;
1795 	payload_size = cluster_size * 5;
1796 
1797 	payload_read = malloc(payload_size);
1798 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1799 
1800 	payload_write = malloc(payload_size);
1801 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1802 
1803 	payload_pattern = malloc(payload_size);
1804 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1805 
1806 	/* Prepare random pattern to write */
1807 	for (i = 0; i < pages_per_payload; i++) {
1808 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1809 			uint64_t *tmp;
1810 
1811 			tmp = (uint64_t *)payload_pattern;
1812 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1813 			*tmp = i + 1;
1814 		}
1815 	}
1816 
1817 	channel = spdk_bs_alloc_io_channel(bs);
1818 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1819 
1820 	/* Create blob */
1821 	ut_spdk_blob_opts_init(&opts);
1822 	opts.thin_provision = false;
1823 	opts.num_clusters = 5;
1824 
1825 	blob = ut_blob_create_and_open(bs, &opts);
1826 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1827 
1828 	/* Initial read should return zeroes payload */
1829 	memset(payload_read, 0xFF, payload_size);
1830 	iov_read[0].iov_base = payload_read;
1831 	iov_read[0].iov_len = cluster_size * 3;
1832 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1833 	iov_read[1].iov_len = cluster_size * 2;
1834 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1835 	poll_threads();
1836 	CU_ASSERT(g_bserrno == 0);
1837 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1838 
1839 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1840 	 *  with a pattern. */
1841 	iov_write[0].iov_base = payload_pattern;
1842 	iov_write[0].iov_len = payload_size - page_size;
1843 	iov_write[1].iov_base = payload_pattern;
1844 	iov_write[1].iov_len = page_size;
1845 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1846 	poll_threads();
1847 	CU_ASSERT(g_bserrno == 0);
1848 
1849 	/* Read whole blob and check consistency */
1850 	memset(payload_read, 0xFF, payload_size);
1851 	iov_read[0].iov_base = payload_read;
1852 	iov_read[0].iov_len = cluster_size * 2;
1853 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1854 	iov_read[1].iov_len = cluster_size * 3;
1855 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1856 	poll_threads();
1857 	CU_ASSERT(g_bserrno == 0);
1858 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1859 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1860 
1861 	/* First of iovs fills only first page and second of iovs writes whole blob except
1862 	 *  first page with a pattern. */
1863 	iov_write[0].iov_base = payload_pattern;
1864 	iov_write[0].iov_len = page_size;
1865 	iov_write[1].iov_base = payload_pattern;
1866 	iov_write[1].iov_len = payload_size - page_size;
1867 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1868 	poll_threads();
1869 	CU_ASSERT(g_bserrno == 0);
1870 
1871 	/* Read whole blob and check consistency */
1872 	memset(payload_read, 0xFF, payload_size);
1873 	iov_read[0].iov_base = payload_read;
1874 	iov_read[0].iov_len = cluster_size * 4;
1875 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1876 	iov_read[1].iov_len = cluster_size;
1877 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1878 	poll_threads();
1879 	CU_ASSERT(g_bserrno == 0);
1880 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1881 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1882 
1883 
1884 	/* Fill whole blob with a pattern (5 clusters) */
1885 
1886 	/* 1. Read test. */
1887 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1888 				blob_op_complete, NULL);
1889 	poll_threads();
1890 	CU_ASSERT(g_bserrno == 0);
1891 
1892 	memset(payload_read, 0xFF, payload_size);
1893 	iov_read[0].iov_base = payload_read;
1894 	iov_read[0].iov_len = cluster_size;
1895 	iov_read[1].iov_base = payload_read + cluster_size;
1896 	iov_read[1].iov_len = cluster_size * 4;
1897 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1898 	poll_threads();
1899 	CU_ASSERT(g_bserrno == 0);
1900 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1901 
1902 	/* 2. Write test. */
1903 	iov_write[0].iov_base = payload_read;
1904 	iov_write[0].iov_len = cluster_size * 2;
1905 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1906 	iov_write[1].iov_len = cluster_size * 3;
1907 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1908 	poll_threads();
1909 	CU_ASSERT(g_bserrno == 0);
1910 
1911 	memset(payload_read, 0xFF, payload_size);
1912 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1913 	poll_threads();
1914 	CU_ASSERT(g_bserrno == 0);
1915 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1916 
1917 	spdk_bs_free_io_channel(channel);
1918 	poll_threads();
1919 
1920 	g_blob = NULL;
1921 	g_blobid = 0;
1922 
1923 	free(payload_read);
1924 	free(payload_write);
1925 	free(payload_pattern);
1926 
1927 	ut_blob_close_and_delete(bs, blob);
1928 }
1929 
1930 static void
1931 blob_unmap(void)
1932 {
1933 	struct spdk_blob_store *bs = g_bs;
1934 	struct spdk_blob *blob;
1935 	struct spdk_io_channel *channel;
1936 	struct spdk_blob_opts opts;
1937 	uint8_t payload[4096];
1938 	int i;
1939 
1940 	channel = spdk_bs_alloc_io_channel(bs);
1941 	CU_ASSERT(channel != NULL);
1942 
1943 	ut_spdk_blob_opts_init(&opts);
1944 	opts.num_clusters = 10;
1945 
1946 	blob = ut_blob_create_and_open(bs, &opts);
1947 
1948 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1949 	poll_threads();
1950 	CU_ASSERT(g_bserrno == 0);
1951 
1952 	memset(payload, 0, sizeof(payload));
1953 	payload[0] = 0xFF;
1954 
1955 	/*
1956 	 * Set first byte of every cluster to 0xFF.
1957 	 * First cluster on device is reserved so let's start from cluster number 1
1958 	 */
1959 	for (i = 1; i < 11; i++) {
1960 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1961 	}
1962 
1963 	/* Confirm writes */
1964 	for (i = 0; i < 10; i++) {
1965 		payload[0] = 0;
1966 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1967 				  blob_op_complete, NULL);
1968 		poll_threads();
1969 		CU_ASSERT(g_bserrno == 0);
1970 		CU_ASSERT(payload[0] == 0xFF);
1971 	}
1972 
1973 	/* Mark some clusters as unallocated */
1974 	blob->active.clusters[1] = 0;
1975 	blob->active.clusters[2] = 0;
1976 	blob->active.clusters[3] = 0;
1977 	blob->active.clusters[6] = 0;
1978 	blob->active.clusters[8] = 0;
1979 
1980 	/* Unmap clusters by resizing to 0 */
1981 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
1982 	poll_threads();
1983 	CU_ASSERT(g_bserrno == 0);
1984 
1985 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1986 	poll_threads();
1987 	CU_ASSERT(g_bserrno == 0);
1988 
1989 	/* Confirm that only 'allocated' clusters were unmapped */
1990 	for (i = 1; i < 11; i++) {
1991 		switch (i) {
1992 		case 2:
1993 		case 3:
1994 		case 4:
1995 		case 7:
1996 		case 9:
1997 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
1998 			break;
1999 		default:
2000 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2001 			break;
2002 		}
2003 	}
2004 
2005 	spdk_bs_free_io_channel(channel);
2006 	poll_threads();
2007 
2008 	ut_blob_close_and_delete(bs, blob);
2009 }
2010 
2011 static void
2012 blob_iter(void)
2013 {
2014 	struct spdk_blob_store *bs = g_bs;
2015 	struct spdk_blob *blob;
2016 	spdk_blob_id blobid;
2017 	struct spdk_blob_opts blob_opts;
2018 
2019 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2020 	poll_threads();
2021 	CU_ASSERT(g_blob == NULL);
2022 	CU_ASSERT(g_bserrno == -ENOENT);
2023 
2024 	ut_spdk_blob_opts_init(&blob_opts);
2025 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2026 	poll_threads();
2027 	CU_ASSERT(g_bserrno == 0);
2028 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2029 	blobid = g_blobid;
2030 
2031 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2032 	poll_threads();
2033 	CU_ASSERT(g_blob != NULL);
2034 	CU_ASSERT(g_bserrno == 0);
2035 	blob = g_blob;
2036 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2037 
2038 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2039 	poll_threads();
2040 	CU_ASSERT(g_blob == NULL);
2041 	CU_ASSERT(g_bserrno == -ENOENT);
2042 }
2043 
2044 static void
2045 blob_xattr(void)
2046 {
2047 	struct spdk_blob_store *bs = g_bs;
2048 	struct spdk_blob *blob = g_blob;
2049 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2050 	uint64_t length;
2051 	int rc;
2052 	const char *name1, *name2;
2053 	const void *value;
2054 	size_t value_len;
2055 	struct spdk_xattr_names *names;
2056 
2057 	/* Test that set_xattr fails if md_ro flag is set. */
2058 	blob->md_ro = true;
2059 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2060 	CU_ASSERT(rc == -EPERM);
2061 
2062 	blob->md_ro = false;
2063 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2064 	CU_ASSERT(rc == 0);
2065 
2066 	length = 2345;
2067 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2068 	CU_ASSERT(rc == 0);
2069 
2070 	/* Overwrite "length" xattr. */
2071 	length = 3456;
2072 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2073 	CU_ASSERT(rc == 0);
2074 
2075 	/* get_xattr should still work even if md_ro flag is set. */
2076 	value = NULL;
2077 	blob->md_ro = true;
2078 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2079 	CU_ASSERT(rc == 0);
2080 	SPDK_CU_ASSERT_FATAL(value != NULL);
2081 	CU_ASSERT(*(uint64_t *)value == length);
2082 	CU_ASSERT(value_len == 8);
2083 	blob->md_ro = false;
2084 
2085 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2086 	CU_ASSERT(rc == -ENOENT);
2087 
2088 	names = NULL;
2089 	rc = spdk_blob_get_xattr_names(blob, &names);
2090 	CU_ASSERT(rc == 0);
2091 	SPDK_CU_ASSERT_FATAL(names != NULL);
2092 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2093 	name1 = spdk_xattr_names_get_name(names, 0);
2094 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2095 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2096 	name2 = spdk_xattr_names_get_name(names, 1);
2097 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2098 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2099 	CU_ASSERT(strcmp(name1, name2));
2100 	spdk_xattr_names_free(names);
2101 
2102 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2103 	blob->md_ro = true;
2104 	rc = spdk_blob_remove_xattr(blob, "name");
2105 	CU_ASSERT(rc == -EPERM);
2106 
2107 	blob->md_ro = false;
2108 	rc = spdk_blob_remove_xattr(blob, "name");
2109 	CU_ASSERT(rc == 0);
2110 
2111 	rc = spdk_blob_remove_xattr(blob, "foobar");
2112 	CU_ASSERT(rc == -ENOENT);
2113 
2114 	/* Set internal xattr */
2115 	length = 7898;
2116 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2117 	CU_ASSERT(rc == 0);
2118 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2119 	CU_ASSERT(rc == 0);
2120 	CU_ASSERT(*(uint64_t *)value == length);
2121 	/* try to get public xattr with same name */
2122 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2123 	CU_ASSERT(rc != 0);
2124 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2125 	CU_ASSERT(rc != 0);
2126 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2127 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2128 		  SPDK_BLOB_INTERNAL_XATTR);
2129 
2130 	spdk_blob_close(blob, blob_op_complete, NULL);
2131 	poll_threads();
2132 
2133 	/* Check if xattrs are persisted */
2134 	ut_bs_reload(&bs, NULL);
2135 
2136 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2137 	poll_threads();
2138 	CU_ASSERT(g_bserrno == 0);
2139 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2140 	blob = g_blob;
2141 
2142 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2143 	CU_ASSERT(rc == 0);
2144 	CU_ASSERT(*(uint64_t *)value == length);
2145 
2146 	/* try to get internal xattr trough public call */
2147 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2148 	CU_ASSERT(rc != 0);
2149 
2150 	rc = blob_remove_xattr(blob, "internal", true);
2151 	CU_ASSERT(rc == 0);
2152 
2153 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2154 }
2155 
2156 static void
2157 blob_parse_md(void)
2158 {
2159 	struct spdk_blob_store *bs = g_bs;
2160 	struct spdk_blob *blob;
2161 	int rc;
2162 	uint32_t used_pages;
2163 	size_t xattr_length;
2164 	char *xattr;
2165 
2166 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2167 	blob = ut_blob_create_and_open(bs, NULL);
2168 
2169 	/* Create large extent to force more than 1 page of metadata. */
2170 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2171 		       strlen("large_xattr");
2172 	xattr = calloc(xattr_length, sizeof(char));
2173 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2174 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2175 	free(xattr);
2176 	SPDK_CU_ASSERT_FATAL(rc == 0);
2177 
2178 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2179 	poll_threads();
2180 
2181 	/* Delete the blob and verify that number of pages returned to before its creation. */
2182 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2183 	ut_blob_close_and_delete(bs, blob);
2184 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2185 }
2186 
2187 static void
2188 bs_load(void)
2189 {
2190 	struct spdk_blob_store *bs;
2191 	struct spdk_bs_dev *dev;
2192 	spdk_blob_id blobid;
2193 	struct spdk_blob *blob;
2194 	struct spdk_bs_super_block *super_block;
2195 	uint64_t length;
2196 	int rc;
2197 	const void *value;
2198 	size_t value_len;
2199 	struct spdk_bs_opts opts;
2200 	struct spdk_blob_opts blob_opts;
2201 
2202 	dev = init_dev();
2203 	spdk_bs_opts_init(&opts, sizeof(opts));
2204 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2205 
2206 	/* Initialize a new blob store */
2207 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2208 	poll_threads();
2209 	CU_ASSERT(g_bserrno == 0);
2210 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2211 	bs = g_bs;
2212 
2213 	/* Try to open a blobid that does not exist */
2214 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2215 	poll_threads();
2216 	CU_ASSERT(g_bserrno == -ENOENT);
2217 	CU_ASSERT(g_blob == NULL);
2218 
2219 	/* Create a blob */
2220 	blob = ut_blob_create_and_open(bs, NULL);
2221 	blobid = spdk_blob_get_id(blob);
2222 
2223 	/* Try again to open valid blob but without the upper bit set */
2224 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2225 	poll_threads();
2226 	CU_ASSERT(g_bserrno == -ENOENT);
2227 	CU_ASSERT(g_blob == NULL);
2228 
2229 	/* Set some xattrs */
2230 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2231 	CU_ASSERT(rc == 0);
2232 
2233 	length = 2345;
2234 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2235 	CU_ASSERT(rc == 0);
2236 
2237 	/* Resize the blob */
2238 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2239 	poll_threads();
2240 	CU_ASSERT(g_bserrno == 0);
2241 
2242 	spdk_blob_close(blob, blob_op_complete, NULL);
2243 	poll_threads();
2244 	CU_ASSERT(g_bserrno == 0);
2245 	blob = NULL;
2246 	g_blob = NULL;
2247 	g_blobid = SPDK_BLOBID_INVALID;
2248 
2249 	/* Unload the blob store */
2250 	spdk_bs_unload(bs, bs_op_complete, NULL);
2251 	poll_threads();
2252 	CU_ASSERT(g_bserrno == 0);
2253 	g_bs = NULL;
2254 	g_blob = NULL;
2255 	g_blobid = 0;
2256 
2257 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2258 	CU_ASSERT(super_block->clean == 1);
2259 
2260 	/* Load should fail for device with an unsupported blocklen */
2261 	dev = init_dev();
2262 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2263 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2264 	poll_threads();
2265 	CU_ASSERT(g_bserrno == -EINVAL);
2266 
2267 	/* Load should when max_md_ops is set to zero */
2268 	dev = init_dev();
2269 	spdk_bs_opts_init(&opts, sizeof(opts));
2270 	opts.max_md_ops = 0;
2271 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2272 	poll_threads();
2273 	CU_ASSERT(g_bserrno == -EINVAL);
2274 
2275 	/* Load should when max_channel_ops is set to zero */
2276 	dev = init_dev();
2277 	spdk_bs_opts_init(&opts, sizeof(opts));
2278 	opts.max_channel_ops = 0;
2279 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2280 	poll_threads();
2281 	CU_ASSERT(g_bserrno == -EINVAL);
2282 
2283 	/* Load an existing blob store */
2284 	dev = init_dev();
2285 	spdk_bs_opts_init(&opts, sizeof(opts));
2286 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2287 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2288 	poll_threads();
2289 	CU_ASSERT(g_bserrno == 0);
2290 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2291 	bs = g_bs;
2292 
2293 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2294 	CU_ASSERT(super_block->clean == 1);
2295 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2296 
2297 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2298 	poll_threads();
2299 	CU_ASSERT(g_bserrno == 0);
2300 	CU_ASSERT(g_blob != NULL);
2301 	blob = g_blob;
2302 
2303 	/* Verify that blobstore is marked dirty after first metadata sync */
2304 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2305 	CU_ASSERT(super_block->clean == 1);
2306 
2307 	/* Get the xattrs */
2308 	value = NULL;
2309 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2310 	CU_ASSERT(rc == 0);
2311 	SPDK_CU_ASSERT_FATAL(value != NULL);
2312 	CU_ASSERT(*(uint64_t *)value == length);
2313 	CU_ASSERT(value_len == 8);
2314 
2315 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2316 	CU_ASSERT(rc == -ENOENT);
2317 
2318 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2319 
2320 	spdk_blob_close(blob, blob_op_complete, NULL);
2321 	poll_threads();
2322 	CU_ASSERT(g_bserrno == 0);
2323 	blob = NULL;
2324 	g_blob = NULL;
2325 
2326 	spdk_bs_unload(bs, bs_op_complete, NULL);
2327 	poll_threads();
2328 	CU_ASSERT(g_bserrno == 0);
2329 	g_bs = NULL;
2330 
2331 	/* Load should fail: bdev size < saved size */
2332 	dev = init_dev();
2333 	dev->blockcnt /= 2;
2334 
2335 	spdk_bs_opts_init(&opts, sizeof(opts));
2336 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2337 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2338 	poll_threads();
2339 
2340 	CU_ASSERT(g_bserrno == -EILSEQ);
2341 
2342 	/* Load should succeed: bdev size > saved size */
2343 	dev = init_dev();
2344 	dev->blockcnt *= 4;
2345 
2346 	spdk_bs_opts_init(&opts, sizeof(opts));
2347 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2348 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2349 	poll_threads();
2350 	CU_ASSERT(g_bserrno == 0);
2351 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2352 	bs = g_bs;
2353 
2354 	CU_ASSERT(g_bserrno == 0);
2355 	spdk_bs_unload(bs, bs_op_complete, NULL);
2356 	poll_threads();
2357 
2358 
2359 	/* Test compatibility mode */
2360 
2361 	dev = init_dev();
2362 	super_block->size = 0;
2363 	super_block->crc = blob_md_page_calc_crc(super_block);
2364 
2365 	spdk_bs_opts_init(&opts, sizeof(opts));
2366 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2367 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2368 	poll_threads();
2369 	CU_ASSERT(g_bserrno == 0);
2370 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2371 	bs = g_bs;
2372 
2373 	/* Create a blob */
2374 	ut_spdk_blob_opts_init(&blob_opts);
2375 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2376 	poll_threads();
2377 	CU_ASSERT(g_bserrno == 0);
2378 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2379 
2380 	/* Blobstore should update number of blocks in super_block */
2381 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2382 	CU_ASSERT(super_block->clean == 0);
2383 
2384 	spdk_bs_unload(bs, bs_op_complete, NULL);
2385 	poll_threads();
2386 	CU_ASSERT(g_bserrno == 0);
2387 	CU_ASSERT(super_block->clean == 1);
2388 	g_bs = NULL;
2389 
2390 }
2391 
2392 static void
2393 bs_load_pending_removal(void)
2394 {
2395 	struct spdk_blob_store *bs = g_bs;
2396 	struct spdk_blob_opts opts;
2397 	struct spdk_blob *blob, *snapshot;
2398 	spdk_blob_id blobid, snapshotid;
2399 	const void *value;
2400 	size_t value_len;
2401 	int rc;
2402 
2403 	/* Create blob */
2404 	ut_spdk_blob_opts_init(&opts);
2405 	opts.num_clusters = 10;
2406 
2407 	blob = ut_blob_create_and_open(bs, &opts);
2408 	blobid = spdk_blob_get_id(blob);
2409 
2410 	/* Create snapshot */
2411 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2412 	poll_threads();
2413 	CU_ASSERT(g_bserrno == 0);
2414 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2415 	snapshotid = g_blobid;
2416 
2417 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2418 	poll_threads();
2419 	CU_ASSERT(g_bserrno == 0);
2420 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2421 	snapshot = g_blob;
2422 
2423 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2424 	snapshot->md_ro = false;
2425 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2426 	CU_ASSERT(rc == 0);
2427 	snapshot->md_ro = true;
2428 
2429 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2430 	poll_threads();
2431 	CU_ASSERT(g_bserrno == 0);
2432 
2433 	spdk_blob_close(blob, blob_op_complete, NULL);
2434 	poll_threads();
2435 	CU_ASSERT(g_bserrno == 0);
2436 
2437 	/* Reload blobstore */
2438 	ut_bs_reload(&bs, NULL);
2439 
2440 	/* Snapshot should not be removed as blob is still pointing to it */
2441 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2442 	poll_threads();
2443 	CU_ASSERT(g_bserrno == 0);
2444 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2445 	snapshot = g_blob;
2446 
2447 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2448 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2449 	CU_ASSERT(rc != 0);
2450 
2451 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2452 	snapshot->md_ro = false;
2453 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2454 	CU_ASSERT(rc == 0);
2455 	snapshot->md_ro = true;
2456 
2457 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2458 	poll_threads();
2459 	CU_ASSERT(g_bserrno == 0);
2460 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2461 	blob = g_blob;
2462 
2463 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2464 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2465 
2466 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2467 	poll_threads();
2468 	CU_ASSERT(g_bserrno == 0);
2469 
2470 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2471 	poll_threads();
2472 	CU_ASSERT(g_bserrno == 0);
2473 
2474 	spdk_blob_close(blob, blob_op_complete, NULL);
2475 	poll_threads();
2476 	CU_ASSERT(g_bserrno == 0);
2477 
2478 	/* Reload blobstore */
2479 	ut_bs_reload(&bs, NULL);
2480 
2481 	/* Snapshot should be removed as blob is not pointing to it anymore */
2482 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2483 	poll_threads();
2484 	CU_ASSERT(g_bserrno != 0);
2485 }
2486 
2487 static void
2488 bs_load_custom_cluster_size(void)
2489 {
2490 	struct spdk_blob_store *bs;
2491 	struct spdk_bs_dev *dev;
2492 	struct spdk_bs_super_block *super_block;
2493 	struct spdk_bs_opts opts;
2494 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2495 	uint32_t cluster_sz;
2496 	uint64_t total_clusters;
2497 
2498 	dev = init_dev();
2499 	spdk_bs_opts_init(&opts, sizeof(opts));
2500 	opts.cluster_sz = custom_cluster_size;
2501 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2502 
2503 	/* Initialize a new blob store */
2504 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2505 	poll_threads();
2506 	CU_ASSERT(g_bserrno == 0);
2507 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2508 	bs = g_bs;
2509 	cluster_sz = bs->cluster_sz;
2510 	total_clusters = bs->total_clusters;
2511 
2512 	/* Unload the blob store */
2513 	spdk_bs_unload(bs, bs_op_complete, NULL);
2514 	poll_threads();
2515 	CU_ASSERT(g_bserrno == 0);
2516 	g_bs = NULL;
2517 	g_blob = NULL;
2518 	g_blobid = 0;
2519 
2520 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2521 	CU_ASSERT(super_block->clean == 1);
2522 
2523 	/* Load an existing blob store */
2524 	dev = init_dev();
2525 	spdk_bs_opts_init(&opts, sizeof(opts));
2526 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2527 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2528 	poll_threads();
2529 	CU_ASSERT(g_bserrno == 0);
2530 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2531 	bs = g_bs;
2532 	/* Compare cluster size and number to one after initialization */
2533 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2534 	CU_ASSERT(total_clusters == bs->total_clusters);
2535 
2536 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2537 	CU_ASSERT(super_block->clean == 1);
2538 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2539 
2540 	spdk_bs_unload(bs, bs_op_complete, NULL);
2541 	poll_threads();
2542 	CU_ASSERT(g_bserrno == 0);
2543 	CU_ASSERT(super_block->clean == 1);
2544 	g_bs = NULL;
2545 }
2546 
2547 static void
2548 bs_load_after_failed_grow(void)
2549 {
2550 	struct spdk_blob_store *bs;
2551 	struct spdk_bs_dev *dev;
2552 	struct spdk_bs_super_block *super_block;
2553 	struct spdk_bs_opts opts;
2554 	struct spdk_bs_md_mask *mask;
2555 	struct spdk_blob_opts blob_opts;
2556 	struct spdk_blob *blob, *snapshot;
2557 	spdk_blob_id blobid, snapshotid;
2558 	uint64_t total_data_clusters;
2559 
2560 	dev = init_dev();
2561 	spdk_bs_opts_init(&opts, sizeof(opts));
2562 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2563 	/*
2564 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2565 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2566 	 * thus the blobstore could grow to the double size.
2567 	 */
2568 	opts.num_md_pages = 128;
2569 
2570 	/* Initialize a new blob store */
2571 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2572 	poll_threads();
2573 	CU_ASSERT(g_bserrno == 0);
2574 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2575 	bs = g_bs;
2576 
2577 	/* Create blob */
2578 	ut_spdk_blob_opts_init(&blob_opts);
2579 	blob_opts.num_clusters = 10;
2580 
2581 	blob = ut_blob_create_and_open(bs, &blob_opts);
2582 	blobid = spdk_blob_get_id(blob);
2583 
2584 	/* Create snapshot */
2585 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2586 	poll_threads();
2587 	CU_ASSERT(g_bserrno == 0);
2588 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2589 	snapshotid = g_blobid;
2590 
2591 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2592 	poll_threads();
2593 	CU_ASSERT(g_bserrno == 0);
2594 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2595 	snapshot = g_blob;
2596 
2597 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2598 	poll_threads();
2599 	CU_ASSERT(g_bserrno == 0);
2600 
2601 	spdk_blob_close(blob, blob_op_complete, NULL);
2602 	poll_threads();
2603 	CU_ASSERT(g_bserrno == 0);
2604 
2605 	total_data_clusters = bs->total_data_clusters;
2606 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2607 
2608 	/* Unload the blob store */
2609 	spdk_bs_unload(bs, bs_op_complete, NULL);
2610 	poll_threads();
2611 	CU_ASSERT(g_bserrno == 0);
2612 	g_bs = NULL;
2613 	g_blob = NULL;
2614 	g_blobid = 0;
2615 
2616 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2617 	CU_ASSERT(super_block->clean == 1);
2618 
2619 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start * 4096);
2620 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2621 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2622 
2623 	/*
2624 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2625 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2626 	 */
2627 	mask->length *= 2;
2628 
2629 	/* Load an existing blob store */
2630 	dev = init_dev();
2631 	dev->blockcnt *= 2;
2632 	spdk_bs_opts_init(&opts, sizeof(opts));
2633 	opts.clear_method = BS_CLEAR_WITH_NONE;
2634 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2635 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2636 	poll_threads();
2637 	CU_ASSERT(g_bserrno == 0);
2638 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2639 	bs = g_bs;
2640 
2641 	/* Check the capacity is the same as before */
2642 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2643 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2644 
2645 	/* Check the blob and the snapshot are still available */
2646 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2647 	poll_threads();
2648 	CU_ASSERT(g_bserrno == 0);
2649 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2650 	blob = g_blob;
2651 
2652 	spdk_blob_close(blob, blob_op_complete, NULL);
2653 	poll_threads();
2654 	CU_ASSERT(g_bserrno == 0);
2655 
2656 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2657 	poll_threads();
2658 	CU_ASSERT(g_bserrno == 0);
2659 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2660 	snapshot = g_blob;
2661 
2662 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2663 	poll_threads();
2664 	CU_ASSERT(g_bserrno == 0);
2665 
2666 	spdk_bs_unload(bs, bs_op_complete, NULL);
2667 	poll_threads();
2668 	CU_ASSERT(g_bserrno == 0);
2669 	CU_ASSERT(super_block->clean == 1);
2670 	g_bs = NULL;
2671 }
2672 
2673 static void
2674 bs_type(void)
2675 {
2676 	struct spdk_blob_store *bs;
2677 	struct spdk_bs_dev *dev;
2678 	struct spdk_bs_opts opts;
2679 
2680 	dev = init_dev();
2681 	spdk_bs_opts_init(&opts, sizeof(opts));
2682 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2683 
2684 	/* Initialize a new blob store */
2685 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2686 	poll_threads();
2687 	CU_ASSERT(g_bserrno == 0);
2688 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2689 	bs = g_bs;
2690 
2691 	/* Unload the blob store */
2692 	spdk_bs_unload(bs, bs_op_complete, NULL);
2693 	poll_threads();
2694 	CU_ASSERT(g_bserrno == 0);
2695 	g_bs = NULL;
2696 	g_blob = NULL;
2697 	g_blobid = 0;
2698 
2699 	/* Load non existing blobstore type */
2700 	dev = init_dev();
2701 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2702 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2703 	poll_threads();
2704 	CU_ASSERT(g_bserrno != 0);
2705 
2706 	/* Load with empty blobstore type */
2707 	dev = init_dev();
2708 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2709 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2710 	poll_threads();
2711 	CU_ASSERT(g_bserrno == 0);
2712 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2713 	bs = g_bs;
2714 
2715 	spdk_bs_unload(bs, bs_op_complete, NULL);
2716 	poll_threads();
2717 	CU_ASSERT(g_bserrno == 0);
2718 	g_bs = NULL;
2719 
2720 	/* Initialize a new blob store with empty bstype */
2721 	dev = init_dev();
2722 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2723 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2724 	poll_threads();
2725 	CU_ASSERT(g_bserrno == 0);
2726 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2727 	bs = g_bs;
2728 
2729 	spdk_bs_unload(bs, bs_op_complete, NULL);
2730 	poll_threads();
2731 	CU_ASSERT(g_bserrno == 0);
2732 	g_bs = NULL;
2733 
2734 	/* Load non existing blobstore type */
2735 	dev = init_dev();
2736 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2737 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2738 	poll_threads();
2739 	CU_ASSERT(g_bserrno != 0);
2740 
2741 	/* Load with empty blobstore type */
2742 	dev = init_dev();
2743 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2744 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2745 	poll_threads();
2746 	CU_ASSERT(g_bserrno == 0);
2747 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2748 	bs = g_bs;
2749 
2750 	spdk_bs_unload(bs, bs_op_complete, NULL);
2751 	poll_threads();
2752 	CU_ASSERT(g_bserrno == 0);
2753 	g_bs = NULL;
2754 }
2755 
2756 static void
2757 bs_super_block(void)
2758 {
2759 	struct spdk_blob_store *bs;
2760 	struct spdk_bs_dev *dev;
2761 	struct spdk_bs_super_block *super_block;
2762 	struct spdk_bs_opts opts;
2763 	struct spdk_bs_super_block_ver1 super_block_v1;
2764 
2765 	dev = init_dev();
2766 	spdk_bs_opts_init(&opts, sizeof(opts));
2767 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2768 
2769 	/* Initialize a new blob store */
2770 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2771 	poll_threads();
2772 	CU_ASSERT(g_bserrno == 0);
2773 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2774 	bs = g_bs;
2775 
2776 	/* Unload the blob store */
2777 	spdk_bs_unload(bs, bs_op_complete, NULL);
2778 	poll_threads();
2779 	CU_ASSERT(g_bserrno == 0);
2780 	g_bs = NULL;
2781 	g_blob = NULL;
2782 	g_blobid = 0;
2783 
2784 	/* Load an existing blob store with version newer than supported */
2785 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2786 	super_block->version++;
2787 
2788 	dev = init_dev();
2789 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2790 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2791 	poll_threads();
2792 	CU_ASSERT(g_bserrno != 0);
2793 
2794 	/* Create a new blob store with super block version 1 */
2795 	dev = init_dev();
2796 	super_block_v1.version = 1;
2797 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2798 	super_block_v1.length = 0x1000;
2799 	super_block_v1.clean = 1;
2800 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2801 	super_block_v1.cluster_size = 0x100000;
2802 	super_block_v1.used_page_mask_start = 0x01;
2803 	super_block_v1.used_page_mask_len = 0x01;
2804 	super_block_v1.used_cluster_mask_start = 0x02;
2805 	super_block_v1.used_cluster_mask_len = 0x01;
2806 	super_block_v1.md_start = 0x03;
2807 	super_block_v1.md_len = 0x40;
2808 	memset(super_block_v1.reserved, 0, 4036);
2809 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2810 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2811 
2812 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2813 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2814 	poll_threads();
2815 	CU_ASSERT(g_bserrno == 0);
2816 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2817 	bs = g_bs;
2818 
2819 	spdk_bs_unload(bs, bs_op_complete, NULL);
2820 	poll_threads();
2821 	CU_ASSERT(g_bserrno == 0);
2822 	g_bs = NULL;
2823 }
2824 
2825 static void
2826 bs_test_recover_cluster_count(void)
2827 {
2828 	struct spdk_blob_store *bs;
2829 	struct spdk_bs_dev *dev;
2830 	struct spdk_bs_super_block super_block;
2831 	struct spdk_bs_opts opts;
2832 
2833 	dev = init_dev();
2834 	spdk_bs_opts_init(&opts, sizeof(opts));
2835 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2836 
2837 	super_block.version = 3;
2838 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2839 	super_block.length = 0x1000;
2840 	super_block.clean = 0;
2841 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2842 	super_block.cluster_size = 4096;
2843 	super_block.used_page_mask_start = 0x01;
2844 	super_block.used_page_mask_len = 0x01;
2845 	super_block.used_cluster_mask_start = 0x02;
2846 	super_block.used_cluster_mask_len = 0x01;
2847 	super_block.used_blobid_mask_start = 0x03;
2848 	super_block.used_blobid_mask_len = 0x01;
2849 	super_block.md_start = 0x04;
2850 	super_block.md_len = 0x40;
2851 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2852 	super_block.size = dev->blockcnt * dev->blocklen;
2853 	super_block.io_unit_size = 0x1000;
2854 	memset(super_block.reserved, 0, 4000);
2855 	super_block.crc = blob_md_page_calc_crc(&super_block);
2856 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2857 
2858 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2859 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2860 	poll_threads();
2861 	CU_ASSERT(g_bserrno == 0);
2862 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2863 	bs = g_bs;
2864 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2865 			super_block.md_len));
2866 
2867 	spdk_bs_unload(bs, bs_op_complete, NULL);
2868 	poll_threads();
2869 	CU_ASSERT(g_bserrno == 0);
2870 	g_bs = NULL;
2871 }
2872 
2873 static void
2874 bs_test_grow(void)
2875 {
2876 	struct spdk_blob_store *bs;
2877 	struct spdk_bs_dev *dev;
2878 	struct spdk_bs_super_block super_block;
2879 	struct spdk_bs_opts opts;
2880 	struct spdk_bs_md_mask mask;
2881 	uint64_t bdev_size;
2882 
2883 	dev = init_dev();
2884 	bdev_size = dev->blockcnt * dev->blocklen;
2885 	spdk_bs_opts_init(&opts, sizeof(opts));
2886 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2887 	poll_threads();
2888 	CU_ASSERT(g_bserrno == 0);
2889 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2890 	bs = g_bs;
2891 
2892 	spdk_bs_unload(bs, bs_op_complete, NULL);
2893 	poll_threads();
2894 	CU_ASSERT(g_bserrno == 0);
2895 	g_bs = NULL;
2896 
2897 	/*
2898 	 * To make sure all the metadata are updated to the disk,
2899 	 * we check the g_dev_buffer after spdk_bs_unload.
2900 	 */
2901 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2902 	CU_ASSERT(super_block.size == bdev_size);
2903 
2904 	/*
2905 	 * Make sure the used_cluster mask is correct.
2906 	 */
2907 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2908 	       sizeof(struct spdk_bs_md_mask));
2909 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2910 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2911 
2912 	/*
2913 	 * The default dev size is 64M, here we set the dev size to 128M,
2914 	 * then the blobstore will adjust the metadata according to the new size.
2915 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
2916 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
2917 	 * the end of g_dev_buffer.
2918 	 */
2919 	dev = init_dev();
2920 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
2921 	bdev_size = dev->blockcnt * dev->blocklen;
2922 	spdk_bs_opts_init(&opts, sizeof(opts));
2923 	opts.clear_method = BS_CLEAR_WITH_NONE;
2924 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
2925 	poll_threads();
2926 	CU_ASSERT(g_bserrno == 0);
2927 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2928 	bs = g_bs;
2929 
2930 	/*
2931 	 * After spdk_bs_grow, all metadata are updated to the disk.
2932 	 * So we can check g_dev_buffer now.
2933 	 */
2934 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2935 	CU_ASSERT(super_block.size == bdev_size);
2936 
2937 	/*
2938 	 * Make sure the used_cluster mask has been updated according to the bdev size
2939 	 */
2940 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2941 	       sizeof(struct spdk_bs_md_mask));
2942 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2943 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2944 
2945 	spdk_bs_unload(bs, bs_op_complete, NULL);
2946 	poll_threads();
2947 	CU_ASSERT(g_bserrno == 0);
2948 	g_bs = NULL;
2949 }
2950 
2951 /*
2952  * Create a blobstore and then unload it.
2953  */
2954 static void
2955 bs_unload(void)
2956 {
2957 	struct spdk_blob_store *bs = g_bs;
2958 	struct spdk_blob *blob;
2959 
2960 	/* Create a blob and open it. */
2961 	blob = ut_blob_create_and_open(bs, NULL);
2962 
2963 	/* Try to unload blobstore, should fail with open blob */
2964 	g_bserrno = -1;
2965 	spdk_bs_unload(bs, bs_op_complete, NULL);
2966 	poll_threads();
2967 	CU_ASSERT(g_bserrno == -EBUSY);
2968 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2969 
2970 	/* Close the blob, then successfully unload blobstore */
2971 	g_bserrno = -1;
2972 	spdk_blob_close(blob, blob_op_complete, NULL);
2973 	poll_threads();
2974 	CU_ASSERT(g_bserrno == 0);
2975 }
2976 
2977 /*
2978  * Create a blobstore with a cluster size different than the default, and ensure it is
2979  *  persisted.
2980  */
2981 static void
2982 bs_cluster_sz(void)
2983 {
2984 	struct spdk_blob_store *bs;
2985 	struct spdk_bs_dev *dev;
2986 	struct spdk_bs_opts opts;
2987 	uint32_t cluster_sz;
2988 
2989 	/* Set cluster size to zero */
2990 	dev = init_dev();
2991 	spdk_bs_opts_init(&opts, sizeof(opts));
2992 	opts.cluster_sz = 0;
2993 
2994 	/* Initialize a new blob store */
2995 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2996 	poll_threads();
2997 	CU_ASSERT(g_bserrno == -EINVAL);
2998 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2999 
3000 	/*
3001 	 * Set cluster size to blobstore page size,
3002 	 * to work it is required to be at least twice the blobstore page size.
3003 	 */
3004 	dev = init_dev();
3005 	spdk_bs_opts_init(&opts, sizeof(opts));
3006 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
3007 
3008 	/* Initialize a new blob store */
3009 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3010 	poll_threads();
3011 	CU_ASSERT(g_bserrno == -ENOMEM);
3012 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3013 
3014 	/*
3015 	 * Set cluster size to lower than page size,
3016 	 * to work it is required to be at least twice the blobstore page size.
3017 	 */
3018 	dev = init_dev();
3019 	spdk_bs_opts_init(&opts, sizeof(opts));
3020 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
3021 
3022 	/* Initialize a new blob store */
3023 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3024 	poll_threads();
3025 	CU_ASSERT(g_bserrno == -EINVAL);
3026 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3027 
3028 	/* Set cluster size to twice the default */
3029 	dev = init_dev();
3030 	spdk_bs_opts_init(&opts, sizeof(opts));
3031 	opts.cluster_sz *= 2;
3032 	cluster_sz = opts.cluster_sz;
3033 
3034 	/* Initialize a new blob store */
3035 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3036 	poll_threads();
3037 	CU_ASSERT(g_bserrno == 0);
3038 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3039 	bs = g_bs;
3040 
3041 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3042 
3043 	ut_bs_reload(&bs, &opts);
3044 
3045 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3046 
3047 	spdk_bs_unload(bs, bs_op_complete, NULL);
3048 	poll_threads();
3049 	CU_ASSERT(g_bserrno == 0);
3050 	g_bs = NULL;
3051 }
3052 
3053 /*
3054  * Create a blobstore, reload it and ensure total usable cluster count
3055  *  stays the same.
3056  */
3057 static void
3058 bs_usable_clusters(void)
3059 {
3060 	struct spdk_blob_store *bs = g_bs;
3061 	struct spdk_blob *blob;
3062 	uint32_t clusters;
3063 	int i;
3064 
3065 
3066 	clusters = spdk_bs_total_data_cluster_count(bs);
3067 
3068 	ut_bs_reload(&bs, NULL);
3069 
3070 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3071 
3072 	/* Create and resize blobs to make sure that useable cluster count won't change */
3073 	for (i = 0; i < 4; i++) {
3074 		g_bserrno = -1;
3075 		g_blobid = SPDK_BLOBID_INVALID;
3076 		blob = ut_blob_create_and_open(bs, NULL);
3077 
3078 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3079 		poll_threads();
3080 		CU_ASSERT(g_bserrno == 0);
3081 
3082 		g_bserrno = -1;
3083 		spdk_blob_close(blob, blob_op_complete, NULL);
3084 		poll_threads();
3085 		CU_ASSERT(g_bserrno == 0);
3086 
3087 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3088 	}
3089 
3090 	/* Reload the blob store to make sure that nothing changed */
3091 	ut_bs_reload(&bs, NULL);
3092 
3093 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3094 }
3095 
3096 /*
3097  * Test resizing of the metadata blob.  This requires creating enough blobs
3098  *  so that one cluster is not enough to fit the metadata for those blobs.
3099  *  To induce this condition to happen more quickly, we reduce the cluster
3100  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3101  */
3102 static void
3103 bs_resize_md(void)
3104 {
3105 	struct spdk_blob_store *bs;
3106 	const int CLUSTER_PAGE_COUNT = 4;
3107 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3108 	struct spdk_bs_dev *dev;
3109 	struct spdk_bs_opts opts;
3110 	struct spdk_blob *blob;
3111 	struct spdk_blob_opts blob_opts;
3112 	uint32_t cluster_sz;
3113 	spdk_blob_id blobids[NUM_BLOBS];
3114 	int i;
3115 
3116 
3117 	dev = init_dev();
3118 	spdk_bs_opts_init(&opts, sizeof(opts));
3119 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
3120 	cluster_sz = opts.cluster_sz;
3121 
3122 	/* Initialize a new blob store */
3123 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3124 	poll_threads();
3125 	CU_ASSERT(g_bserrno == 0);
3126 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3127 	bs = g_bs;
3128 
3129 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3130 
3131 	ut_spdk_blob_opts_init(&blob_opts);
3132 
3133 	for (i = 0; i < NUM_BLOBS; i++) {
3134 		g_bserrno = -1;
3135 		g_blobid = SPDK_BLOBID_INVALID;
3136 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3137 		poll_threads();
3138 		CU_ASSERT(g_bserrno == 0);
3139 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3140 		blobids[i] = g_blobid;
3141 	}
3142 
3143 	ut_bs_reload(&bs, &opts);
3144 
3145 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3146 
3147 	for (i = 0; i < NUM_BLOBS; i++) {
3148 		g_bserrno = -1;
3149 		g_blob = NULL;
3150 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3151 		poll_threads();
3152 		CU_ASSERT(g_bserrno == 0);
3153 		CU_ASSERT(g_blob !=  NULL);
3154 		blob = g_blob;
3155 		g_bserrno = -1;
3156 		spdk_blob_close(blob, blob_op_complete, NULL);
3157 		poll_threads();
3158 		CU_ASSERT(g_bserrno == 0);
3159 	}
3160 
3161 	spdk_bs_unload(bs, bs_op_complete, NULL);
3162 	poll_threads();
3163 	CU_ASSERT(g_bserrno == 0);
3164 	g_bs = NULL;
3165 }
3166 
3167 static void
3168 bs_destroy(void)
3169 {
3170 	struct spdk_blob_store *bs;
3171 	struct spdk_bs_dev *dev;
3172 
3173 	/* Initialize a new blob store */
3174 	dev = init_dev();
3175 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3176 	poll_threads();
3177 	CU_ASSERT(g_bserrno == 0);
3178 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3179 	bs = g_bs;
3180 
3181 	/* Destroy the blob store */
3182 	g_bserrno = -1;
3183 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3184 	poll_threads();
3185 	CU_ASSERT(g_bserrno == 0);
3186 
3187 	/* Loading an non-existent blob store should fail. */
3188 	g_bs = NULL;
3189 	dev = init_dev();
3190 
3191 	g_bserrno = 0;
3192 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3193 	poll_threads();
3194 	CU_ASSERT(g_bserrno != 0);
3195 }
3196 
3197 /* Try to hit all of the corner cases associated with serializing
3198  * a blob to disk
3199  */
3200 static void
3201 blob_serialize_test(void)
3202 {
3203 	struct spdk_bs_dev *dev;
3204 	struct spdk_bs_opts opts;
3205 	struct spdk_blob_store *bs;
3206 	spdk_blob_id blobid[2];
3207 	struct spdk_blob *blob[2];
3208 	uint64_t i;
3209 	char *value;
3210 	int rc;
3211 
3212 	dev = init_dev();
3213 
3214 	/* Initialize a new blobstore with very small clusters */
3215 	spdk_bs_opts_init(&opts, sizeof(opts));
3216 	opts.cluster_sz = dev->blocklen * 8;
3217 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3218 	poll_threads();
3219 	CU_ASSERT(g_bserrno == 0);
3220 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3221 	bs = g_bs;
3222 
3223 	/* Create and open two blobs */
3224 	for (i = 0; i < 2; i++) {
3225 		blob[i] = ut_blob_create_and_open(bs, NULL);
3226 		blobid[i] = spdk_blob_get_id(blob[i]);
3227 
3228 		/* Set a fairly large xattr on both blobs to eat up
3229 		 * metadata space
3230 		 */
3231 		value = calloc(dev->blocklen - 64, sizeof(char));
3232 		SPDK_CU_ASSERT_FATAL(value != NULL);
3233 		memset(value, i, dev->blocklen / 2);
3234 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3235 		CU_ASSERT(rc == 0);
3236 		free(value);
3237 	}
3238 
3239 	/* Resize the blobs, alternating 1 cluster at a time.
3240 	 * This thwarts run length encoding and will cause spill
3241 	 * over of the extents.
3242 	 */
3243 	for (i = 0; i < 6; i++) {
3244 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3245 		poll_threads();
3246 		CU_ASSERT(g_bserrno == 0);
3247 	}
3248 
3249 	for (i = 0; i < 2; i++) {
3250 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3251 		poll_threads();
3252 		CU_ASSERT(g_bserrno == 0);
3253 	}
3254 
3255 	/* Close the blobs */
3256 	for (i = 0; i < 2; i++) {
3257 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3258 		poll_threads();
3259 		CU_ASSERT(g_bserrno == 0);
3260 	}
3261 
3262 	ut_bs_reload(&bs, &opts);
3263 
3264 	for (i = 0; i < 2; i++) {
3265 		blob[i] = NULL;
3266 
3267 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3268 		poll_threads();
3269 		CU_ASSERT(g_bserrno == 0);
3270 		CU_ASSERT(g_blob != NULL);
3271 		blob[i] = g_blob;
3272 
3273 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3274 
3275 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3276 		poll_threads();
3277 		CU_ASSERT(g_bserrno == 0);
3278 	}
3279 
3280 	spdk_bs_unload(bs, bs_op_complete, NULL);
3281 	poll_threads();
3282 	CU_ASSERT(g_bserrno == 0);
3283 	g_bs = NULL;
3284 }
3285 
3286 static void
3287 blob_crc(void)
3288 {
3289 	struct spdk_blob_store *bs = g_bs;
3290 	struct spdk_blob *blob;
3291 	spdk_blob_id blobid;
3292 	uint32_t page_num;
3293 	int index;
3294 	struct spdk_blob_md_page *page;
3295 
3296 	blob = ut_blob_create_and_open(bs, NULL);
3297 	blobid = spdk_blob_get_id(blob);
3298 
3299 	spdk_blob_close(blob, blob_op_complete, NULL);
3300 	poll_threads();
3301 	CU_ASSERT(g_bserrno == 0);
3302 
3303 	page_num = bs_blobid_to_page(blobid);
3304 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3305 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3306 	page->crc = 0;
3307 
3308 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3309 	poll_threads();
3310 	CU_ASSERT(g_bserrno == -EINVAL);
3311 	CU_ASSERT(g_blob == NULL);
3312 	g_bserrno = 0;
3313 
3314 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3315 	poll_threads();
3316 	CU_ASSERT(g_bserrno == -EINVAL);
3317 }
3318 
3319 static void
3320 super_block_crc(void)
3321 {
3322 	struct spdk_blob_store *bs;
3323 	struct spdk_bs_dev *dev;
3324 	struct spdk_bs_super_block *super_block;
3325 
3326 	dev = init_dev();
3327 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3328 	poll_threads();
3329 	CU_ASSERT(g_bserrno == 0);
3330 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3331 	bs = g_bs;
3332 
3333 	spdk_bs_unload(bs, bs_op_complete, NULL);
3334 	poll_threads();
3335 	CU_ASSERT(g_bserrno == 0);
3336 	g_bs = NULL;
3337 
3338 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3339 	super_block->crc = 0;
3340 	dev = init_dev();
3341 
3342 	/* Load an existing blob store */
3343 	g_bserrno = 0;
3344 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3345 	poll_threads();
3346 	CU_ASSERT(g_bserrno == -EILSEQ);
3347 }
3348 
3349 /* For blob dirty shutdown test case we do the following sub-test cases:
3350  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3351  *   dirty shutdown and reload the blob store and verify the xattrs.
3352  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3353  *   reload the blob store and verify the clusters number.
3354  * 3 Create the second blob and then dirty shutdown, reload the blob store
3355  *   and verify the second blob.
3356  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3357  *   and verify the second blob is invalid.
3358  * 5 Create the second blob again and also create the third blob, modify the
3359  *   md of second blob which makes the md invalid, and then dirty shutdown,
3360  *   reload the blob store verify the second blob, it should invalid and also
3361  *   verify the third blob, it should correct.
3362  */
3363 static void
3364 blob_dirty_shutdown(void)
3365 {
3366 	int rc;
3367 	int index;
3368 	struct spdk_blob_store *bs = g_bs;
3369 	spdk_blob_id blobid1, blobid2, blobid3;
3370 	struct spdk_blob *blob = g_blob;
3371 	uint64_t length;
3372 	uint64_t free_clusters;
3373 	const void *value;
3374 	size_t value_len;
3375 	uint32_t page_num;
3376 	struct spdk_blob_md_page *page;
3377 	struct spdk_blob_opts blob_opts;
3378 
3379 	/* Create first blob */
3380 	blobid1 = spdk_blob_get_id(blob);
3381 
3382 	/* Set some xattrs */
3383 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3384 	CU_ASSERT(rc == 0);
3385 
3386 	length = 2345;
3387 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3388 	CU_ASSERT(rc == 0);
3389 
3390 	/* Put xattr that fits exactly single page.
3391 	 * This results in adding additional pages to MD.
3392 	 * First is flags and smaller xattr, second the large xattr,
3393 	 * third are just the extents.
3394 	 */
3395 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3396 			      strlen("large_xattr");
3397 	char *xattr = calloc(xattr_length, sizeof(char));
3398 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3399 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3400 	free(xattr);
3401 	SPDK_CU_ASSERT_FATAL(rc == 0);
3402 
3403 	/* Resize the blob */
3404 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3405 	poll_threads();
3406 	CU_ASSERT(g_bserrno == 0);
3407 
3408 	/* Set the blob as the super blob */
3409 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3410 	poll_threads();
3411 	CU_ASSERT(g_bserrno == 0);
3412 
3413 	free_clusters = spdk_bs_free_cluster_count(bs);
3414 
3415 	spdk_blob_close(blob, blob_op_complete, NULL);
3416 	poll_threads();
3417 	CU_ASSERT(g_bserrno == 0);
3418 	blob = NULL;
3419 	g_blob = NULL;
3420 	g_blobid = SPDK_BLOBID_INVALID;
3421 
3422 	ut_bs_dirty_load(&bs, NULL);
3423 
3424 	/* Get the super blob */
3425 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3426 	poll_threads();
3427 	CU_ASSERT(g_bserrno == 0);
3428 	CU_ASSERT(blobid1 == g_blobid);
3429 
3430 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3431 	poll_threads();
3432 	CU_ASSERT(g_bserrno == 0);
3433 	CU_ASSERT(g_blob != NULL);
3434 	blob = g_blob;
3435 
3436 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3437 
3438 	/* Get the xattrs */
3439 	value = NULL;
3440 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3441 	CU_ASSERT(rc == 0);
3442 	SPDK_CU_ASSERT_FATAL(value != NULL);
3443 	CU_ASSERT(*(uint64_t *)value == length);
3444 	CU_ASSERT(value_len == 8);
3445 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3446 
3447 	/* Resize the blob */
3448 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3449 	poll_threads();
3450 	CU_ASSERT(g_bserrno == 0);
3451 
3452 	free_clusters = spdk_bs_free_cluster_count(bs);
3453 
3454 	spdk_blob_close(blob, blob_op_complete, NULL);
3455 	poll_threads();
3456 	CU_ASSERT(g_bserrno == 0);
3457 	blob = NULL;
3458 	g_blob = NULL;
3459 	g_blobid = SPDK_BLOBID_INVALID;
3460 
3461 	ut_bs_dirty_load(&bs, NULL);
3462 
3463 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3464 	poll_threads();
3465 	CU_ASSERT(g_bserrno == 0);
3466 	CU_ASSERT(g_blob != NULL);
3467 	blob = g_blob;
3468 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3469 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3470 
3471 	spdk_blob_close(blob, blob_op_complete, NULL);
3472 	poll_threads();
3473 	CU_ASSERT(g_bserrno == 0);
3474 	blob = NULL;
3475 	g_blob = NULL;
3476 	g_blobid = SPDK_BLOBID_INVALID;
3477 
3478 	/* Create second blob */
3479 	blob = ut_blob_create_and_open(bs, NULL);
3480 	blobid2 = spdk_blob_get_id(blob);
3481 
3482 	/* Set some xattrs */
3483 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3484 	CU_ASSERT(rc == 0);
3485 
3486 	length = 5432;
3487 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3488 	CU_ASSERT(rc == 0);
3489 
3490 	/* Resize the blob */
3491 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3492 	poll_threads();
3493 	CU_ASSERT(g_bserrno == 0);
3494 
3495 	free_clusters = spdk_bs_free_cluster_count(bs);
3496 
3497 	spdk_blob_close(blob, blob_op_complete, NULL);
3498 	poll_threads();
3499 	CU_ASSERT(g_bserrno == 0);
3500 	blob = NULL;
3501 	g_blob = NULL;
3502 	g_blobid = SPDK_BLOBID_INVALID;
3503 
3504 	ut_bs_dirty_load(&bs, NULL);
3505 
3506 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3507 	poll_threads();
3508 	CU_ASSERT(g_bserrno == 0);
3509 	CU_ASSERT(g_blob != NULL);
3510 	blob = g_blob;
3511 
3512 	/* Get the xattrs */
3513 	value = NULL;
3514 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3515 	CU_ASSERT(rc == 0);
3516 	SPDK_CU_ASSERT_FATAL(value != NULL);
3517 	CU_ASSERT(*(uint64_t *)value == length);
3518 	CU_ASSERT(value_len == 8);
3519 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3520 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3521 
3522 	ut_blob_close_and_delete(bs, blob);
3523 
3524 	free_clusters = spdk_bs_free_cluster_count(bs);
3525 
3526 	ut_bs_dirty_load(&bs, NULL);
3527 
3528 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3529 	poll_threads();
3530 	CU_ASSERT(g_bserrno != 0);
3531 	CU_ASSERT(g_blob == NULL);
3532 
3533 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3534 	poll_threads();
3535 	CU_ASSERT(g_bserrno == 0);
3536 	CU_ASSERT(g_blob != NULL);
3537 	blob = g_blob;
3538 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3539 	spdk_blob_close(blob, blob_op_complete, NULL);
3540 	poll_threads();
3541 	CU_ASSERT(g_bserrno == 0);
3542 
3543 	ut_bs_reload(&bs, NULL);
3544 
3545 	/* Create second blob */
3546 	ut_spdk_blob_opts_init(&blob_opts);
3547 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3548 	poll_threads();
3549 	CU_ASSERT(g_bserrno == 0);
3550 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3551 	blobid2 = g_blobid;
3552 
3553 	/* Create third blob */
3554 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3555 	poll_threads();
3556 	CU_ASSERT(g_bserrno == 0);
3557 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3558 	blobid3 = g_blobid;
3559 
3560 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3561 	poll_threads();
3562 	CU_ASSERT(g_bserrno == 0);
3563 	CU_ASSERT(g_blob != NULL);
3564 	blob = g_blob;
3565 
3566 	/* Set some xattrs for second blob */
3567 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3568 	CU_ASSERT(rc == 0);
3569 
3570 	length = 5432;
3571 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3572 	CU_ASSERT(rc == 0);
3573 
3574 	spdk_blob_close(blob, blob_op_complete, NULL);
3575 	poll_threads();
3576 	CU_ASSERT(g_bserrno == 0);
3577 	blob = NULL;
3578 	g_blob = NULL;
3579 	g_blobid = SPDK_BLOBID_INVALID;
3580 
3581 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3582 	poll_threads();
3583 	CU_ASSERT(g_bserrno == 0);
3584 	CU_ASSERT(g_blob != NULL);
3585 	blob = g_blob;
3586 
3587 	/* Set some xattrs for third blob */
3588 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3589 	CU_ASSERT(rc == 0);
3590 
3591 	length = 5432;
3592 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3593 	CU_ASSERT(rc == 0);
3594 
3595 	spdk_blob_close(blob, blob_op_complete, NULL);
3596 	poll_threads();
3597 	CU_ASSERT(g_bserrno == 0);
3598 	blob = NULL;
3599 	g_blob = NULL;
3600 	g_blobid = SPDK_BLOBID_INVALID;
3601 
3602 	/* Mark second blob as invalid */
3603 	page_num = bs_blobid_to_page(blobid2);
3604 
3605 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3606 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3607 	page->sequence_num = 1;
3608 	page->crc = blob_md_page_calc_crc(page);
3609 
3610 	free_clusters = spdk_bs_free_cluster_count(bs);
3611 
3612 	ut_bs_dirty_load(&bs, NULL);
3613 
3614 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3615 	poll_threads();
3616 	CU_ASSERT(g_bserrno != 0);
3617 	CU_ASSERT(g_blob == NULL);
3618 
3619 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3620 	poll_threads();
3621 	CU_ASSERT(g_bserrno == 0);
3622 	CU_ASSERT(g_blob != NULL);
3623 	blob = g_blob;
3624 
3625 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3626 }
3627 
3628 static void
3629 blob_flags(void)
3630 {
3631 	struct spdk_blob_store *bs = g_bs;
3632 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3633 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3634 	struct spdk_blob_opts blob_opts;
3635 	int rc;
3636 
3637 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3638 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3639 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3640 
3641 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3642 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3643 
3644 	ut_spdk_blob_opts_init(&blob_opts);
3645 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3646 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3647 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3648 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3649 
3650 	/* Change the size of blob_data_ro to check if flags are serialized
3651 	 * when blob has non zero number of extents */
3652 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3653 	poll_threads();
3654 	CU_ASSERT(g_bserrno == 0);
3655 
3656 	/* Set the xattr to check if flags are serialized
3657 	 * when blob has non zero number of xattrs */
3658 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3659 	CU_ASSERT(rc == 0);
3660 
3661 	blob_invalid->invalid_flags = (1ULL << 63);
3662 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3663 	blob_data_ro->data_ro_flags = (1ULL << 62);
3664 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3665 	blob_md_ro->md_ro_flags = (1ULL << 61);
3666 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3667 
3668 	g_bserrno = -1;
3669 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3670 	poll_threads();
3671 	CU_ASSERT(g_bserrno == 0);
3672 	g_bserrno = -1;
3673 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3674 	poll_threads();
3675 	CU_ASSERT(g_bserrno == 0);
3676 	g_bserrno = -1;
3677 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3678 	poll_threads();
3679 	CU_ASSERT(g_bserrno == 0);
3680 
3681 	g_bserrno = -1;
3682 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3683 	poll_threads();
3684 	CU_ASSERT(g_bserrno == 0);
3685 	blob_invalid = NULL;
3686 	g_bserrno = -1;
3687 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3688 	poll_threads();
3689 	CU_ASSERT(g_bserrno == 0);
3690 	blob_data_ro = NULL;
3691 	g_bserrno = -1;
3692 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3693 	poll_threads();
3694 	CU_ASSERT(g_bserrno == 0);
3695 	blob_md_ro = NULL;
3696 
3697 	g_blob = NULL;
3698 	g_blobid = SPDK_BLOBID_INVALID;
3699 
3700 	ut_bs_reload(&bs, NULL);
3701 
3702 	g_blob = NULL;
3703 	g_bserrno = 0;
3704 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3705 	poll_threads();
3706 	CU_ASSERT(g_bserrno != 0);
3707 	CU_ASSERT(g_blob == NULL);
3708 
3709 	g_blob = NULL;
3710 	g_bserrno = -1;
3711 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3712 	poll_threads();
3713 	CU_ASSERT(g_bserrno == 0);
3714 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3715 	blob_data_ro = g_blob;
3716 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3717 	CU_ASSERT(blob_data_ro->data_ro == true);
3718 	CU_ASSERT(blob_data_ro->md_ro == true);
3719 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3720 
3721 	g_blob = NULL;
3722 	g_bserrno = -1;
3723 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3724 	poll_threads();
3725 	CU_ASSERT(g_bserrno == 0);
3726 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3727 	blob_md_ro = g_blob;
3728 	CU_ASSERT(blob_md_ro->data_ro == false);
3729 	CU_ASSERT(blob_md_ro->md_ro == true);
3730 
3731 	g_bserrno = -1;
3732 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3733 	poll_threads();
3734 	CU_ASSERT(g_bserrno == 0);
3735 
3736 	ut_blob_close_and_delete(bs, blob_data_ro);
3737 	ut_blob_close_and_delete(bs, blob_md_ro);
3738 }
3739 
3740 static void
3741 bs_version(void)
3742 {
3743 	struct spdk_bs_super_block *super;
3744 	struct spdk_blob_store *bs = g_bs;
3745 	struct spdk_bs_dev *dev;
3746 	struct spdk_blob *blob;
3747 	struct spdk_blob_opts blob_opts;
3748 	spdk_blob_id blobid;
3749 
3750 	/* Unload the blob store */
3751 	spdk_bs_unload(bs, bs_op_complete, NULL);
3752 	poll_threads();
3753 	CU_ASSERT(g_bserrno == 0);
3754 	g_bs = NULL;
3755 
3756 	/*
3757 	 * Change the bs version on disk.  This will allow us to
3758 	 *  test that the version does not get modified automatically
3759 	 *  when loading and unloading the blobstore.
3760 	 */
3761 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3762 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3763 	CU_ASSERT(super->clean == 1);
3764 	super->version = 2;
3765 	/*
3766 	 * Version 2 metadata does not have a used blobid mask, so clear
3767 	 *  those fields in the super block and zero the corresponding
3768 	 *  region on "disk".  We will use this to ensure blob IDs are
3769 	 *  correctly reconstructed.
3770 	 */
3771 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3772 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3773 	super->used_blobid_mask_start = 0;
3774 	super->used_blobid_mask_len = 0;
3775 	super->crc = blob_md_page_calc_crc(super);
3776 
3777 	/* Load an existing blob store */
3778 	dev = init_dev();
3779 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3780 	poll_threads();
3781 	CU_ASSERT(g_bserrno == 0);
3782 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3783 	CU_ASSERT(super->clean == 1);
3784 	bs = g_bs;
3785 
3786 	/*
3787 	 * Create a blob - just to make sure that when we unload it
3788 	 *  results in writing the super block (since metadata pages
3789 	 *  were allocated.
3790 	 */
3791 	ut_spdk_blob_opts_init(&blob_opts);
3792 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3793 	poll_threads();
3794 	CU_ASSERT(g_bserrno == 0);
3795 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3796 	blobid = g_blobid;
3797 
3798 	/* Unload the blob store */
3799 	spdk_bs_unload(bs, bs_op_complete, NULL);
3800 	poll_threads();
3801 	CU_ASSERT(g_bserrno == 0);
3802 	g_bs = NULL;
3803 	CU_ASSERT(super->version == 2);
3804 	CU_ASSERT(super->used_blobid_mask_start == 0);
3805 	CU_ASSERT(super->used_blobid_mask_len == 0);
3806 
3807 	dev = init_dev();
3808 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3809 	poll_threads();
3810 	CU_ASSERT(g_bserrno == 0);
3811 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3812 	bs = g_bs;
3813 
3814 	g_blob = NULL;
3815 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3816 	poll_threads();
3817 	CU_ASSERT(g_bserrno == 0);
3818 	CU_ASSERT(g_blob != NULL);
3819 	blob = g_blob;
3820 
3821 	ut_blob_close_and_delete(bs, blob);
3822 
3823 	CU_ASSERT(super->version == 2);
3824 	CU_ASSERT(super->used_blobid_mask_start == 0);
3825 	CU_ASSERT(super->used_blobid_mask_len == 0);
3826 }
3827 
3828 static void
3829 blob_set_xattrs_test(void)
3830 {
3831 	struct spdk_blob_store *bs = g_bs;
3832 	struct spdk_blob *blob;
3833 	struct spdk_blob_opts opts;
3834 	const void *value;
3835 	size_t value_len;
3836 	char *xattr;
3837 	size_t xattr_length;
3838 	int rc;
3839 
3840 	/* Create blob with extra attributes */
3841 	ut_spdk_blob_opts_init(&opts);
3842 
3843 	opts.xattrs.names = g_xattr_names;
3844 	opts.xattrs.get_value = _get_xattr_value;
3845 	opts.xattrs.count = 3;
3846 	opts.xattrs.ctx = &g_ctx;
3847 
3848 	blob = ut_blob_create_and_open(bs, &opts);
3849 
3850 	/* Get the xattrs */
3851 	value = NULL;
3852 
3853 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3854 	CU_ASSERT(rc == 0);
3855 	SPDK_CU_ASSERT_FATAL(value != NULL);
3856 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3857 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3858 
3859 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3860 	CU_ASSERT(rc == 0);
3861 	SPDK_CU_ASSERT_FATAL(value != NULL);
3862 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3863 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3864 
3865 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3866 	CU_ASSERT(rc == 0);
3867 	SPDK_CU_ASSERT_FATAL(value != NULL);
3868 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3869 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3870 
3871 	/* Try to get non existing attribute */
3872 
3873 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3874 	CU_ASSERT(rc == -ENOENT);
3875 
3876 	/* Try xattr exceeding maximum length of descriptor in single page */
3877 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3878 		       strlen("large_xattr") + 1;
3879 	xattr = calloc(xattr_length, sizeof(char));
3880 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3881 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3882 	free(xattr);
3883 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3884 
3885 	spdk_blob_close(blob, blob_op_complete, NULL);
3886 	poll_threads();
3887 	CU_ASSERT(g_bserrno == 0);
3888 	blob = NULL;
3889 	g_blob = NULL;
3890 	g_blobid = SPDK_BLOBID_INVALID;
3891 
3892 	/* NULL callback */
3893 	ut_spdk_blob_opts_init(&opts);
3894 	opts.xattrs.names = g_xattr_names;
3895 	opts.xattrs.get_value = NULL;
3896 	opts.xattrs.count = 1;
3897 	opts.xattrs.ctx = &g_ctx;
3898 
3899 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3900 	poll_threads();
3901 	CU_ASSERT(g_bserrno == -EINVAL);
3902 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3903 
3904 	/* NULL values */
3905 	ut_spdk_blob_opts_init(&opts);
3906 	opts.xattrs.names = g_xattr_names;
3907 	opts.xattrs.get_value = _get_xattr_value_null;
3908 	opts.xattrs.count = 1;
3909 	opts.xattrs.ctx = NULL;
3910 
3911 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3912 	poll_threads();
3913 	CU_ASSERT(g_bserrno == -EINVAL);
3914 }
3915 
3916 static void
3917 blob_thin_prov_alloc(void)
3918 {
3919 	struct spdk_blob_store *bs = g_bs;
3920 	struct spdk_blob *blob;
3921 	struct spdk_blob_opts opts;
3922 	spdk_blob_id blobid;
3923 	uint64_t free_clusters;
3924 
3925 	free_clusters = spdk_bs_free_cluster_count(bs);
3926 
3927 	/* Set blob as thin provisioned */
3928 	ut_spdk_blob_opts_init(&opts);
3929 	opts.thin_provision = true;
3930 
3931 	blob = ut_blob_create_and_open(bs, &opts);
3932 	blobid = spdk_blob_get_id(blob);
3933 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3934 
3935 	CU_ASSERT(blob->active.num_clusters == 0);
3936 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3937 
3938 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3939 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3940 	poll_threads();
3941 	CU_ASSERT(g_bserrno == 0);
3942 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3943 	CU_ASSERT(blob->active.num_clusters == 5);
3944 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3945 
3946 	/* Grow it to 1TB - still unallocated */
3947 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3948 	poll_threads();
3949 	CU_ASSERT(g_bserrno == 0);
3950 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3951 	CU_ASSERT(blob->active.num_clusters == 262144);
3952 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3953 
3954 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3955 	poll_threads();
3956 	CU_ASSERT(g_bserrno == 0);
3957 	/* Sync must not change anything */
3958 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3959 	CU_ASSERT(blob->active.num_clusters == 262144);
3960 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3961 	/* Since clusters are not allocated,
3962 	 * number of metadata pages is expected to be minimal.
3963 	 */
3964 	CU_ASSERT(blob->active.num_pages == 1);
3965 
3966 	/* Shrink the blob to 3 clusters - still unallocated */
3967 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3968 	poll_threads();
3969 	CU_ASSERT(g_bserrno == 0);
3970 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3971 	CU_ASSERT(blob->active.num_clusters == 3);
3972 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3973 
3974 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3975 	poll_threads();
3976 	CU_ASSERT(g_bserrno == 0);
3977 	/* Sync must not change anything */
3978 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3979 	CU_ASSERT(blob->active.num_clusters == 3);
3980 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3981 
3982 	spdk_blob_close(blob, blob_op_complete, NULL);
3983 	poll_threads();
3984 	CU_ASSERT(g_bserrno == 0);
3985 
3986 	ut_bs_reload(&bs, NULL);
3987 
3988 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3989 	poll_threads();
3990 	CU_ASSERT(g_bserrno == 0);
3991 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3992 	blob = g_blob;
3993 
3994 	/* Check that clusters allocation and size is still the same */
3995 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3996 	CU_ASSERT(blob->active.num_clusters == 3);
3997 
3998 	ut_blob_close_and_delete(bs, blob);
3999 }
4000 
4001 static void
4002 blob_insert_cluster_msg_test(void)
4003 {
4004 	struct spdk_blob_store *bs = g_bs;
4005 	struct spdk_blob *blob;
4006 	struct spdk_blob_opts opts;
4007 	struct spdk_blob_md_page page = {};
4008 	spdk_blob_id blobid;
4009 	uint64_t free_clusters;
4010 	uint64_t new_cluster = 0;
4011 	uint32_t cluster_num = 3;
4012 	uint32_t extent_page = 0;
4013 
4014 	free_clusters = spdk_bs_free_cluster_count(bs);
4015 
4016 	/* Set blob as thin provisioned */
4017 	ut_spdk_blob_opts_init(&opts);
4018 	opts.thin_provision = true;
4019 	opts.num_clusters = 4;
4020 
4021 	blob = ut_blob_create_and_open(bs, &opts);
4022 	blobid = spdk_blob_get_id(blob);
4023 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4024 
4025 	CU_ASSERT(blob->active.num_clusters == 4);
4026 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4027 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4028 
4029 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4030 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4031 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4032 	spdk_spin_lock(&bs->used_lock);
4033 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4034 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4035 	spdk_spin_unlock(&bs->used_lock);
4036 
4037 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
4038 					 blob_op_complete, NULL);
4039 	poll_threads();
4040 
4041 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4042 
4043 	spdk_blob_close(blob, blob_op_complete, NULL);
4044 	poll_threads();
4045 	CU_ASSERT(g_bserrno == 0);
4046 
4047 	ut_bs_reload(&bs, NULL);
4048 
4049 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4050 	poll_threads();
4051 	CU_ASSERT(g_bserrno == 0);
4052 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4053 	blob = g_blob;
4054 
4055 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4056 
4057 	ut_blob_close_and_delete(bs, blob);
4058 }
4059 
4060 static void
4061 blob_thin_prov_rw(void)
4062 {
4063 	static const uint8_t zero[10 * 4096] = { 0 };
4064 	struct spdk_blob_store *bs = g_bs;
4065 	struct spdk_blob *blob, *blob_id0;
4066 	struct spdk_io_channel *channel, *channel_thread1;
4067 	struct spdk_blob_opts opts;
4068 	uint64_t free_clusters;
4069 	uint64_t page_size;
4070 	uint8_t payload_read[10 * 4096];
4071 	uint8_t payload_write[10 * 4096];
4072 	uint64_t write_bytes;
4073 	uint64_t read_bytes;
4074 
4075 	free_clusters = spdk_bs_free_cluster_count(bs);
4076 	page_size = spdk_bs_get_page_size(bs);
4077 
4078 	channel = spdk_bs_alloc_io_channel(bs);
4079 	CU_ASSERT(channel != NULL);
4080 
4081 	ut_spdk_blob_opts_init(&opts);
4082 	opts.thin_provision = true;
4083 
4084 	/* Create and delete blob at md page 0, so that next md page allocation
4085 	 * for extent will use that. */
4086 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4087 	blob = ut_blob_create_and_open(bs, &opts);
4088 	ut_blob_close_and_delete(bs, blob_id0);
4089 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4090 
4091 	CU_ASSERT(blob->active.num_clusters == 0);
4092 
4093 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4094 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4095 	poll_threads();
4096 	CU_ASSERT(g_bserrno == 0);
4097 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4098 	CU_ASSERT(blob->active.num_clusters == 5);
4099 
4100 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4101 	poll_threads();
4102 	CU_ASSERT(g_bserrno == 0);
4103 	/* Sync must not change anything */
4104 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4105 	CU_ASSERT(blob->active.num_clusters == 5);
4106 
4107 	/* Payload should be all zeros from unallocated clusters */
4108 	memset(payload_read, 0xFF, sizeof(payload_read));
4109 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4110 	poll_threads();
4111 	CU_ASSERT(g_bserrno == 0);
4112 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4113 
4114 	write_bytes = g_dev_write_bytes;
4115 	read_bytes = g_dev_read_bytes;
4116 
4117 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4118 	set_thread(1);
4119 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4120 	CU_ASSERT(channel_thread1 != NULL);
4121 	memset(payload_write, 0xE5, sizeof(payload_write));
4122 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4123 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4124 	/* Perform write on thread 0. That will try to allocate cluster,
4125 	 * but fail due to another thread issuing the cluster allocation first. */
4126 	set_thread(0);
4127 	memset(payload_write, 0xE5, sizeof(payload_write));
4128 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4129 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4130 	poll_threads();
4131 	CU_ASSERT(g_bserrno == 0);
4132 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4133 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
4134 	 * read 0 bytes */
4135 	if (g_use_extent_table) {
4136 		/* Add one more page for EXTENT_PAGE write */
4137 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
4138 	} else {
4139 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
4140 	}
4141 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4142 
4143 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4144 	poll_threads();
4145 	CU_ASSERT(g_bserrno == 0);
4146 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4147 
4148 	ut_blob_close_and_delete(bs, blob);
4149 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4150 
4151 	set_thread(1);
4152 	spdk_bs_free_io_channel(channel_thread1);
4153 	set_thread(0);
4154 	spdk_bs_free_io_channel(channel);
4155 	poll_threads();
4156 	g_blob = NULL;
4157 	g_blobid = 0;
4158 }
4159 
4160 static void
4161 blob_thin_prov_write_count_io(void)
4162 {
4163 	struct spdk_blob_store *bs;
4164 	struct spdk_blob *blob;
4165 	struct spdk_io_channel *ch;
4166 	struct spdk_bs_dev *dev;
4167 	struct spdk_bs_opts bs_opts;
4168 	struct spdk_blob_opts opts;
4169 	uint64_t free_clusters;
4170 	uint64_t page_size;
4171 	uint8_t payload_write[4096];
4172 	uint64_t write_bytes;
4173 	uint64_t read_bytes;
4174 	const uint32_t CLUSTER_SZ = 16384;
4175 	uint32_t pages_per_cluster;
4176 	uint32_t pages_per_extent_page;
4177 	uint32_t i;
4178 
4179 	/* Use a very small cluster size for this test.  This ensures we need multiple
4180 	 * extent pages to hold all of the clusters even for relatively small blobs like
4181 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4182 	 * buffers).
4183 	 */
4184 	dev = init_dev();
4185 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4186 	bs_opts.cluster_sz = CLUSTER_SZ;
4187 
4188 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4189 	poll_threads();
4190 	CU_ASSERT(g_bserrno == 0);
4191 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4192 	bs = g_bs;
4193 
4194 	free_clusters = spdk_bs_free_cluster_count(bs);
4195 	page_size = spdk_bs_get_page_size(bs);
4196 	pages_per_cluster = CLUSTER_SZ / page_size;
4197 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4198 
4199 	ch = spdk_bs_alloc_io_channel(bs);
4200 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4201 
4202 	ut_spdk_blob_opts_init(&opts);
4203 	opts.thin_provision = true;
4204 
4205 	blob = ut_blob_create_and_open(bs, &opts);
4206 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4207 
4208 	/* Resize the blob so that it will require 8 extent pages to hold all of
4209 	 * the clusters.
4210 	 */
4211 	g_bserrno = -1;
4212 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4213 	poll_threads();
4214 	CU_ASSERT(g_bserrno == 0);
4215 
4216 	g_bserrno = -1;
4217 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4218 	poll_threads();
4219 	CU_ASSERT(g_bserrno == 0);
4220 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4221 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4222 
4223 	memset(payload_write, 0, sizeof(payload_write));
4224 	for (i = 0; i < 8; i++) {
4225 		write_bytes = g_dev_write_bytes;
4226 		read_bytes = g_dev_read_bytes;
4227 
4228 		g_bserrno = -1;
4229 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4230 		poll_threads();
4231 		CU_ASSERT(g_bserrno == 0);
4232 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4233 
4234 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4235 		if (!g_use_extent_table) {
4236 			/* For legacy metadata, we should have written two pages - one for the
4237 			 * write I/O itself, another for the blob's primary metadata.
4238 			 */
4239 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4240 		} else {
4241 			/* For extent table metadata, we should have written three pages - one
4242 			 * for the write I/O, one for the extent page, one for the blob's primary
4243 			 * metadata.
4244 			 */
4245 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4246 		}
4247 
4248 		/* The write should have synced the metadata already.  Do another sync here
4249 		 * just to confirm.
4250 		 */
4251 		write_bytes = g_dev_write_bytes;
4252 		read_bytes = g_dev_read_bytes;
4253 
4254 		g_bserrno = -1;
4255 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4256 		poll_threads();
4257 		CU_ASSERT(g_bserrno == 0);
4258 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4259 
4260 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4261 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4262 
4263 		/* Now write to another unallocated cluster that is part of the same extent page. */
4264 		g_bserrno = -1;
4265 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4266 				   1, blob_op_complete, NULL);
4267 		poll_threads();
4268 		CU_ASSERT(g_bserrno == 0);
4269 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4270 
4271 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4272 		/*
4273 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4274 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4275 		 */
4276 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4277 	}
4278 
4279 	ut_blob_close_and_delete(bs, blob);
4280 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4281 
4282 	spdk_bs_free_io_channel(ch);
4283 	poll_threads();
4284 	g_blob = NULL;
4285 	g_blobid = 0;
4286 
4287 	spdk_bs_unload(bs, bs_op_complete, NULL);
4288 	poll_threads();
4289 	CU_ASSERT(g_bserrno == 0);
4290 	g_bs = NULL;
4291 }
4292 
4293 static void
4294 blob_thin_prov_rle(void)
4295 {
4296 	static const uint8_t zero[10 * 4096] = { 0 };
4297 	struct spdk_blob_store *bs = g_bs;
4298 	struct spdk_blob *blob;
4299 	struct spdk_io_channel *channel;
4300 	struct spdk_blob_opts opts;
4301 	spdk_blob_id blobid;
4302 	uint64_t free_clusters;
4303 	uint64_t page_size;
4304 	uint8_t payload_read[10 * 4096];
4305 	uint8_t payload_write[10 * 4096];
4306 	uint64_t write_bytes;
4307 	uint64_t read_bytes;
4308 	uint64_t io_unit;
4309 
4310 	free_clusters = spdk_bs_free_cluster_count(bs);
4311 	page_size = spdk_bs_get_page_size(bs);
4312 
4313 	ut_spdk_blob_opts_init(&opts);
4314 	opts.thin_provision = true;
4315 	opts.num_clusters = 5;
4316 
4317 	blob = ut_blob_create_and_open(bs, &opts);
4318 	blobid = spdk_blob_get_id(blob);
4319 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4320 
4321 	channel = spdk_bs_alloc_io_channel(bs);
4322 	CU_ASSERT(channel != NULL);
4323 
4324 	/* Target specifically second cluster in a blob as first allocation */
4325 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4326 
4327 	/* Payload should be all zeros from unallocated clusters */
4328 	memset(payload_read, 0xFF, sizeof(payload_read));
4329 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4330 	poll_threads();
4331 	CU_ASSERT(g_bserrno == 0);
4332 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4333 
4334 	write_bytes = g_dev_write_bytes;
4335 	read_bytes = g_dev_read_bytes;
4336 
4337 	/* Issue write to second cluster in a blob */
4338 	memset(payload_write, 0xE5, sizeof(payload_write));
4339 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4340 	poll_threads();
4341 	CU_ASSERT(g_bserrno == 0);
4342 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4343 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4344 	 * read 0 bytes */
4345 	if (g_use_extent_table) {
4346 		/* Add one more page for EXTENT_PAGE write */
4347 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4348 	} else {
4349 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4350 	}
4351 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4352 
4353 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4354 	poll_threads();
4355 	CU_ASSERT(g_bserrno == 0);
4356 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4357 
4358 	spdk_bs_free_io_channel(channel);
4359 	poll_threads();
4360 
4361 	spdk_blob_close(blob, blob_op_complete, NULL);
4362 	poll_threads();
4363 	CU_ASSERT(g_bserrno == 0);
4364 
4365 	ut_bs_reload(&bs, NULL);
4366 
4367 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4368 	poll_threads();
4369 	CU_ASSERT(g_bserrno == 0);
4370 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4371 	blob = g_blob;
4372 
4373 	channel = spdk_bs_alloc_io_channel(bs);
4374 	CU_ASSERT(channel != NULL);
4375 
4376 	/* Read second cluster after blob reload to confirm data written */
4377 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4378 	poll_threads();
4379 	CU_ASSERT(g_bserrno == 0);
4380 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4381 
4382 	spdk_bs_free_io_channel(channel);
4383 	poll_threads();
4384 
4385 	ut_blob_close_and_delete(bs, blob);
4386 }
4387 
4388 static void
4389 blob_thin_prov_rw_iov(void)
4390 {
4391 	static const uint8_t zero[10 * 4096] = { 0 };
4392 	struct spdk_blob_store *bs = g_bs;
4393 	struct spdk_blob *blob;
4394 	struct spdk_io_channel *channel;
4395 	struct spdk_blob_opts opts;
4396 	uint64_t free_clusters;
4397 	uint8_t payload_read[10 * 4096];
4398 	uint8_t payload_write[10 * 4096];
4399 	struct iovec iov_read[3];
4400 	struct iovec iov_write[3];
4401 
4402 	free_clusters = spdk_bs_free_cluster_count(bs);
4403 
4404 	channel = spdk_bs_alloc_io_channel(bs);
4405 	CU_ASSERT(channel != NULL);
4406 
4407 	ut_spdk_blob_opts_init(&opts);
4408 	opts.thin_provision = true;
4409 
4410 	blob = ut_blob_create_and_open(bs, &opts);
4411 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4412 
4413 	CU_ASSERT(blob->active.num_clusters == 0);
4414 
4415 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4416 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4417 	poll_threads();
4418 	CU_ASSERT(g_bserrno == 0);
4419 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4420 	CU_ASSERT(blob->active.num_clusters == 5);
4421 
4422 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4423 	poll_threads();
4424 	CU_ASSERT(g_bserrno == 0);
4425 	/* Sync must not change anything */
4426 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4427 	CU_ASSERT(blob->active.num_clusters == 5);
4428 
4429 	/* Payload should be all zeros from unallocated clusters */
4430 	memset(payload_read, 0xAA, sizeof(payload_read));
4431 	iov_read[0].iov_base = payload_read;
4432 	iov_read[0].iov_len = 3 * 4096;
4433 	iov_read[1].iov_base = payload_read + 3 * 4096;
4434 	iov_read[1].iov_len = 4 * 4096;
4435 	iov_read[2].iov_base = payload_read + 7 * 4096;
4436 	iov_read[2].iov_len = 3 * 4096;
4437 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4438 	poll_threads();
4439 	CU_ASSERT(g_bserrno == 0);
4440 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4441 
4442 	memset(payload_write, 0xE5, sizeof(payload_write));
4443 	iov_write[0].iov_base = payload_write;
4444 	iov_write[0].iov_len = 1 * 4096;
4445 	iov_write[1].iov_base = payload_write + 1 * 4096;
4446 	iov_write[1].iov_len = 5 * 4096;
4447 	iov_write[2].iov_base = payload_write + 6 * 4096;
4448 	iov_write[2].iov_len = 4 * 4096;
4449 
4450 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4451 	poll_threads();
4452 	CU_ASSERT(g_bserrno == 0);
4453 
4454 	memset(payload_read, 0xAA, sizeof(payload_read));
4455 	iov_read[0].iov_base = payload_read;
4456 	iov_read[0].iov_len = 3 * 4096;
4457 	iov_read[1].iov_base = payload_read + 3 * 4096;
4458 	iov_read[1].iov_len = 4 * 4096;
4459 	iov_read[2].iov_base = payload_read + 7 * 4096;
4460 	iov_read[2].iov_len = 3 * 4096;
4461 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4462 	poll_threads();
4463 	CU_ASSERT(g_bserrno == 0);
4464 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4465 
4466 	spdk_bs_free_io_channel(channel);
4467 	poll_threads();
4468 
4469 	ut_blob_close_and_delete(bs, blob);
4470 }
4471 
4472 struct iter_ctx {
4473 	int		current_iter;
4474 	spdk_blob_id	blobid[4];
4475 };
4476 
4477 static void
4478 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4479 {
4480 	struct iter_ctx *iter_ctx = arg;
4481 	spdk_blob_id blobid;
4482 
4483 	CU_ASSERT(bserrno == 0);
4484 	blobid = spdk_blob_get_id(blob);
4485 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4486 }
4487 
4488 static void
4489 bs_load_iter_test(void)
4490 {
4491 	struct spdk_blob_store *bs;
4492 	struct spdk_bs_dev *dev;
4493 	struct iter_ctx iter_ctx = { 0 };
4494 	struct spdk_blob *blob;
4495 	int i, rc;
4496 	struct spdk_bs_opts opts;
4497 
4498 	dev = init_dev();
4499 	spdk_bs_opts_init(&opts, sizeof(opts));
4500 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4501 
4502 	/* Initialize a new blob store */
4503 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4504 	poll_threads();
4505 	CU_ASSERT(g_bserrno == 0);
4506 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4507 	bs = g_bs;
4508 
4509 	for (i = 0; i < 4; i++) {
4510 		blob = ut_blob_create_and_open(bs, NULL);
4511 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4512 
4513 		/* Just save the blobid as an xattr for testing purposes. */
4514 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4515 		CU_ASSERT(rc == 0);
4516 
4517 		/* Resize the blob */
4518 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4519 		poll_threads();
4520 		CU_ASSERT(g_bserrno == 0);
4521 
4522 		spdk_blob_close(blob, blob_op_complete, NULL);
4523 		poll_threads();
4524 		CU_ASSERT(g_bserrno == 0);
4525 	}
4526 
4527 	g_bserrno = -1;
4528 	spdk_bs_unload(bs, bs_op_complete, NULL);
4529 	poll_threads();
4530 	CU_ASSERT(g_bserrno == 0);
4531 
4532 	dev = init_dev();
4533 	spdk_bs_opts_init(&opts, sizeof(opts));
4534 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4535 	opts.iter_cb_fn = test_iter;
4536 	opts.iter_cb_arg = &iter_ctx;
4537 
4538 	/* Test blob iteration during load after a clean shutdown. */
4539 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4540 	poll_threads();
4541 	CU_ASSERT(g_bserrno == 0);
4542 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4543 	bs = g_bs;
4544 
4545 	/* Dirty shutdown */
4546 	bs_free(bs);
4547 
4548 	dev = init_dev();
4549 	spdk_bs_opts_init(&opts, sizeof(opts));
4550 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4551 	opts.iter_cb_fn = test_iter;
4552 	iter_ctx.current_iter = 0;
4553 	opts.iter_cb_arg = &iter_ctx;
4554 
4555 	/* Test blob iteration during load after a dirty shutdown. */
4556 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4557 	poll_threads();
4558 	CU_ASSERT(g_bserrno == 0);
4559 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4560 	bs = g_bs;
4561 
4562 	spdk_bs_unload(bs, bs_op_complete, NULL);
4563 	poll_threads();
4564 	CU_ASSERT(g_bserrno == 0);
4565 	g_bs = NULL;
4566 }
4567 
4568 static void
4569 blob_snapshot_rw(void)
4570 {
4571 	static const uint8_t zero[10 * 4096] = { 0 };
4572 	struct spdk_blob_store *bs = g_bs;
4573 	struct spdk_blob *blob, *snapshot;
4574 	struct spdk_io_channel *channel;
4575 	struct spdk_blob_opts opts;
4576 	spdk_blob_id blobid, snapshotid;
4577 	uint64_t free_clusters;
4578 	uint64_t cluster_size;
4579 	uint64_t page_size;
4580 	uint8_t payload_read[10 * 4096];
4581 	uint8_t payload_write[10 * 4096];
4582 	uint64_t write_bytes_start;
4583 	uint64_t read_bytes_start;
4584 	uint64_t copy_bytes_start;
4585 	uint64_t write_bytes;
4586 	uint64_t read_bytes;
4587 	uint64_t copy_bytes;
4588 
4589 	free_clusters = spdk_bs_free_cluster_count(bs);
4590 	cluster_size = spdk_bs_get_cluster_size(bs);
4591 	page_size = spdk_bs_get_page_size(bs);
4592 
4593 	channel = spdk_bs_alloc_io_channel(bs);
4594 	CU_ASSERT(channel != NULL);
4595 
4596 	ut_spdk_blob_opts_init(&opts);
4597 	opts.thin_provision = true;
4598 	opts.num_clusters = 5;
4599 
4600 	blob = ut_blob_create_and_open(bs, &opts);
4601 	blobid = spdk_blob_get_id(blob);
4602 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4603 
4604 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4605 
4606 	memset(payload_read, 0xFF, sizeof(payload_read));
4607 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4608 	poll_threads();
4609 	CU_ASSERT(g_bserrno == 0);
4610 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4611 
4612 	memset(payload_write, 0xE5, sizeof(payload_write));
4613 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4614 	poll_threads();
4615 	CU_ASSERT(g_bserrno == 0);
4616 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4617 
4618 	/* Create snapshot from blob */
4619 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4620 	poll_threads();
4621 	CU_ASSERT(g_bserrno == 0);
4622 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4623 	snapshotid = g_blobid;
4624 
4625 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4626 	poll_threads();
4627 	CU_ASSERT(g_bserrno == 0);
4628 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4629 	snapshot = g_blob;
4630 	CU_ASSERT(snapshot->data_ro == true);
4631 	CU_ASSERT(snapshot->md_ro == true);
4632 
4633 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4634 
4635 	write_bytes_start = g_dev_write_bytes;
4636 	read_bytes_start = g_dev_read_bytes;
4637 	copy_bytes_start = g_dev_copy_bytes;
4638 
4639 	memset(payload_write, 0xAA, sizeof(payload_write));
4640 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4641 	poll_threads();
4642 	CU_ASSERT(g_bserrno == 0);
4643 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4644 
4645 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4646 	 * and then write 10 pages of payload.
4647 	 */
4648 	write_bytes = g_dev_write_bytes - write_bytes_start;
4649 	read_bytes = g_dev_read_bytes - read_bytes_start;
4650 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
4651 	if (g_dev_copy_enabled) {
4652 		CU_ASSERT(copy_bytes == cluster_size);
4653 	} else {
4654 		CU_ASSERT(copy_bytes == 0);
4655 	}
4656 	if (g_use_extent_table) {
4657 		/* Add one more page for EXTENT_PAGE write */
4658 		CU_ASSERT(write_bytes + copy_bytes == page_size * 12 + cluster_size);
4659 	} else {
4660 		CU_ASSERT(write_bytes + copy_bytes == page_size * 11 + cluster_size);
4661 	}
4662 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
4663 
4664 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4665 	poll_threads();
4666 	CU_ASSERT(g_bserrno == 0);
4667 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4668 
4669 	/* Data on snapshot should not change after write to clone */
4670 	memset(payload_write, 0xE5, sizeof(payload_write));
4671 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4672 	poll_threads();
4673 	CU_ASSERT(g_bserrno == 0);
4674 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4675 
4676 	ut_blob_close_and_delete(bs, blob);
4677 	ut_blob_close_and_delete(bs, snapshot);
4678 
4679 	spdk_bs_free_io_channel(channel);
4680 	poll_threads();
4681 	g_blob = NULL;
4682 	g_blobid = 0;
4683 }
4684 
4685 static void
4686 blob_snapshot_rw_iov(void)
4687 {
4688 	static const uint8_t zero[10 * 4096] = { 0 };
4689 	struct spdk_blob_store *bs = g_bs;
4690 	struct spdk_blob *blob, *snapshot;
4691 	struct spdk_io_channel *channel;
4692 	struct spdk_blob_opts opts;
4693 	spdk_blob_id blobid, snapshotid;
4694 	uint64_t free_clusters;
4695 	uint8_t payload_read[10 * 4096];
4696 	uint8_t payload_write[10 * 4096];
4697 	struct iovec iov_read[3];
4698 	struct iovec iov_write[3];
4699 
4700 	free_clusters = spdk_bs_free_cluster_count(bs);
4701 
4702 	channel = spdk_bs_alloc_io_channel(bs);
4703 	CU_ASSERT(channel != NULL);
4704 
4705 	ut_spdk_blob_opts_init(&opts);
4706 	opts.thin_provision = true;
4707 	opts.num_clusters = 5;
4708 
4709 	blob = ut_blob_create_and_open(bs, &opts);
4710 	blobid = spdk_blob_get_id(blob);
4711 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4712 
4713 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4714 
4715 	/* Create snapshot from blob */
4716 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4717 	poll_threads();
4718 	CU_ASSERT(g_bserrno == 0);
4719 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4720 	snapshotid = g_blobid;
4721 
4722 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4723 	poll_threads();
4724 	CU_ASSERT(g_bserrno == 0);
4725 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4726 	snapshot = g_blob;
4727 	CU_ASSERT(snapshot->data_ro == true);
4728 	CU_ASSERT(snapshot->md_ro == true);
4729 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4730 
4731 	/* Payload should be all zeros from unallocated clusters */
4732 	memset(payload_read, 0xAA, sizeof(payload_read));
4733 	iov_read[0].iov_base = payload_read;
4734 	iov_read[0].iov_len = 3 * 4096;
4735 	iov_read[1].iov_base = payload_read + 3 * 4096;
4736 	iov_read[1].iov_len = 4 * 4096;
4737 	iov_read[2].iov_base = payload_read + 7 * 4096;
4738 	iov_read[2].iov_len = 3 * 4096;
4739 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4740 	poll_threads();
4741 	CU_ASSERT(g_bserrno == 0);
4742 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4743 
4744 	memset(payload_write, 0xE5, sizeof(payload_write));
4745 	iov_write[0].iov_base = payload_write;
4746 	iov_write[0].iov_len = 1 * 4096;
4747 	iov_write[1].iov_base = payload_write + 1 * 4096;
4748 	iov_write[1].iov_len = 5 * 4096;
4749 	iov_write[2].iov_base = payload_write + 6 * 4096;
4750 	iov_write[2].iov_len = 4 * 4096;
4751 
4752 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4753 	poll_threads();
4754 	CU_ASSERT(g_bserrno == 0);
4755 
4756 	memset(payload_read, 0xAA, sizeof(payload_read));
4757 	iov_read[0].iov_base = payload_read;
4758 	iov_read[0].iov_len = 3 * 4096;
4759 	iov_read[1].iov_base = payload_read + 3 * 4096;
4760 	iov_read[1].iov_len = 4 * 4096;
4761 	iov_read[2].iov_base = payload_read + 7 * 4096;
4762 	iov_read[2].iov_len = 3 * 4096;
4763 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4764 	poll_threads();
4765 	CU_ASSERT(g_bserrno == 0);
4766 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4767 
4768 	spdk_bs_free_io_channel(channel);
4769 	poll_threads();
4770 
4771 	ut_blob_close_and_delete(bs, blob);
4772 	ut_blob_close_and_delete(bs, snapshot);
4773 }
4774 
4775 /**
4776  * Inflate / decouple parent rw unit tests.
4777  *
4778  * --------------
4779  * original blob:         0         1         2         3         4
4780  *                   ,---------+---------+---------+---------+---------.
4781  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4782  *                   +---------+---------+---------+---------+---------+
4783  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4784  *                   +---------+---------+---------+---------+---------+
4785  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4786  *                   '---------+---------+---------+---------+---------'
4787  *                   .         .         .         .         .         .
4788  * --------          .         .         .         .         .         .
4789  * inflate:          .         .         .         .         .         .
4790  *                   ,---------+---------+---------+---------+---------.
4791  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4792  *                   '---------+---------+---------+---------+---------'
4793  *
4794  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4795  *               on snapshot2 and snapshot removed .         .         .
4796  *                   .         .         .         .         .         .
4797  * ----------------  .         .         .         .         .         .
4798  * decouple parent:  .         .         .         .         .         .
4799  *                   ,---------+---------+---------+---------+---------.
4800  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4801  *                   +---------+---------+---------+---------+---------+
4802  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4803  *                   '---------+---------+---------+---------+---------'
4804  *
4805  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4806  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4807  *               should remain a clone of snapshot.
4808  */
4809 static void
4810 _blob_inflate_rw(bool decouple_parent)
4811 {
4812 	struct spdk_blob_store *bs = g_bs;
4813 	struct spdk_blob *blob, *snapshot, *snapshot2;
4814 	struct spdk_io_channel *channel;
4815 	struct spdk_blob_opts opts;
4816 	spdk_blob_id blobid, snapshotid, snapshot2id;
4817 	uint64_t free_clusters;
4818 	uint64_t cluster_size;
4819 
4820 	uint64_t payload_size;
4821 	uint8_t *payload_read;
4822 	uint8_t *payload_write;
4823 	uint8_t *payload_clone;
4824 
4825 	uint64_t pages_per_cluster;
4826 	uint64_t pages_per_payload;
4827 
4828 	int i;
4829 	spdk_blob_id ids[2];
4830 	size_t count;
4831 
4832 	free_clusters = spdk_bs_free_cluster_count(bs);
4833 	cluster_size = spdk_bs_get_cluster_size(bs);
4834 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4835 	pages_per_payload = pages_per_cluster * 5;
4836 
4837 	payload_size = cluster_size * 5;
4838 
4839 	payload_read = malloc(payload_size);
4840 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4841 
4842 	payload_write = malloc(payload_size);
4843 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4844 
4845 	payload_clone = malloc(payload_size);
4846 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4847 
4848 	channel = spdk_bs_alloc_io_channel(bs);
4849 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4850 
4851 	/* Create blob */
4852 	ut_spdk_blob_opts_init(&opts);
4853 	opts.thin_provision = true;
4854 	opts.num_clusters = 5;
4855 
4856 	blob = ut_blob_create_and_open(bs, &opts);
4857 	blobid = spdk_blob_get_id(blob);
4858 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4859 
4860 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4861 
4862 	/* 1) Initial read should return zeroed payload */
4863 	memset(payload_read, 0xFF, payload_size);
4864 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4865 			  blob_op_complete, NULL);
4866 	poll_threads();
4867 	CU_ASSERT(g_bserrno == 0);
4868 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4869 
4870 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4871 	 * isn't allocated) */
4872 	memset(payload_write, 0xE5, payload_size - cluster_size);
4873 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4874 			   pages_per_cluster, blob_op_complete, NULL);
4875 	poll_threads();
4876 	CU_ASSERT(g_bserrno == 0);
4877 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4878 
4879 	/* 2) Create snapshot from blob (first level) */
4880 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4881 	poll_threads();
4882 	CU_ASSERT(g_bserrno == 0);
4883 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4884 	snapshotid = g_blobid;
4885 
4886 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4887 	poll_threads();
4888 	CU_ASSERT(g_bserrno == 0);
4889 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4890 	snapshot = g_blob;
4891 	CU_ASSERT(snapshot->data_ro == true);
4892 	CU_ASSERT(snapshot->md_ro == true);
4893 
4894 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4895 
4896 	/* Write every second cluster with a pattern.
4897 	 *
4898 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4899 	 * doesn't allocate it.
4900 	 *
4901 	 * payload_clone stores expected result on "blob" read at the time and
4902 	 * is used only to check data consistency on clone before and after
4903 	 * inflation. Initially we fill it with a backing snapshots pattern
4904 	 * used before.
4905 	 */
4906 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4907 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4908 	memset(payload_write, 0xAA, payload_size);
4909 	for (i = 1; i < 5; i += 2) {
4910 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4911 				   pages_per_cluster, blob_op_complete, NULL);
4912 		poll_threads();
4913 		CU_ASSERT(g_bserrno == 0);
4914 
4915 		/* Update expected result */
4916 		memcpy(payload_clone + (cluster_size * i), payload_write,
4917 		       cluster_size);
4918 	}
4919 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4920 
4921 	/* Check data consistency on clone */
4922 	memset(payload_read, 0xFF, payload_size);
4923 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4924 			  blob_op_complete, NULL);
4925 	poll_threads();
4926 	CU_ASSERT(g_bserrno == 0);
4927 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4928 
4929 	/* 3) Create second levels snapshot from blob */
4930 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4931 	poll_threads();
4932 	CU_ASSERT(g_bserrno == 0);
4933 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4934 	snapshot2id = g_blobid;
4935 
4936 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4937 	poll_threads();
4938 	CU_ASSERT(g_bserrno == 0);
4939 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4940 	snapshot2 = g_blob;
4941 	CU_ASSERT(snapshot2->data_ro == true);
4942 	CU_ASSERT(snapshot2->md_ro == true);
4943 
4944 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4945 
4946 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4947 
4948 	/* Write one cluster on the top level blob. This cluster (1) covers
4949 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4950 	 * at all */
4951 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4952 			   pages_per_cluster, blob_op_complete, NULL);
4953 	poll_threads();
4954 	CU_ASSERT(g_bserrno == 0);
4955 
4956 	/* Update expected result */
4957 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4958 
4959 	/* Check data consistency on clone */
4960 	memset(payload_read, 0xFF, payload_size);
4961 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4962 			  blob_op_complete, NULL);
4963 	poll_threads();
4964 	CU_ASSERT(g_bserrno == 0);
4965 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4966 
4967 
4968 	/* Close all blobs */
4969 	spdk_blob_close(blob, blob_op_complete, NULL);
4970 	poll_threads();
4971 	CU_ASSERT(g_bserrno == 0);
4972 
4973 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4974 	poll_threads();
4975 	CU_ASSERT(g_bserrno == 0);
4976 
4977 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4978 	poll_threads();
4979 	CU_ASSERT(g_bserrno == 0);
4980 
4981 	/* Check snapshot-clone relations */
4982 	count = 2;
4983 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4984 	CU_ASSERT(count == 1);
4985 	CU_ASSERT(ids[0] == snapshot2id);
4986 
4987 	count = 2;
4988 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4989 	CU_ASSERT(count == 1);
4990 	CU_ASSERT(ids[0] == blobid);
4991 
4992 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4993 
4994 	free_clusters = spdk_bs_free_cluster_count(bs);
4995 	if (!decouple_parent) {
4996 		/* Do full blob inflation */
4997 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4998 		poll_threads();
4999 		CU_ASSERT(g_bserrno == 0);
5000 
5001 		/* All clusters should be inflated (except one already allocated
5002 		 * in a top level blob) */
5003 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5004 
5005 		/* Check if relation tree updated correctly */
5006 		count = 2;
5007 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5008 
5009 		/* snapshotid have one clone */
5010 		CU_ASSERT(count == 1);
5011 		CU_ASSERT(ids[0] == snapshot2id);
5012 
5013 		/* snapshot2id have no clones */
5014 		count = 2;
5015 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5016 		CU_ASSERT(count == 0);
5017 
5018 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5019 	} else {
5020 		/* Decouple parent of blob */
5021 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5022 		poll_threads();
5023 		CU_ASSERT(g_bserrno == 0);
5024 
5025 		/* Only one cluster from a parent should be inflated (second one
5026 		 * is covered by a cluster written on a top level blob, and
5027 		 * already allocated) */
5028 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5029 
5030 		/* Check if relation tree updated correctly */
5031 		count = 2;
5032 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5033 
5034 		/* snapshotid have two clones now */
5035 		CU_ASSERT(count == 2);
5036 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5037 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5038 
5039 		/* snapshot2id have no clones */
5040 		count = 2;
5041 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5042 		CU_ASSERT(count == 0);
5043 
5044 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5045 	}
5046 
5047 	/* Try to delete snapshot2 (should pass) */
5048 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5049 	poll_threads();
5050 	CU_ASSERT(g_bserrno == 0);
5051 
5052 	/* Try to delete base snapshot */
5053 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5054 	poll_threads();
5055 	CU_ASSERT(g_bserrno == 0);
5056 
5057 	/* Reopen blob after snapshot deletion */
5058 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5059 	poll_threads();
5060 	CU_ASSERT(g_bserrno == 0);
5061 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5062 	blob = g_blob;
5063 
5064 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5065 
5066 	/* Check data consistency on inflated blob */
5067 	memset(payload_read, 0xFF, payload_size);
5068 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5069 			  blob_op_complete, NULL);
5070 	poll_threads();
5071 	CU_ASSERT(g_bserrno == 0);
5072 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5073 
5074 	spdk_bs_free_io_channel(channel);
5075 	poll_threads();
5076 
5077 	free(payload_read);
5078 	free(payload_write);
5079 	free(payload_clone);
5080 
5081 	ut_blob_close_and_delete(bs, blob);
5082 }
5083 
5084 static void
5085 blob_inflate_rw(void)
5086 {
5087 	_blob_inflate_rw(false);
5088 	_blob_inflate_rw(true);
5089 }
5090 
5091 /**
5092  * Snapshot-clones relation test
5093  *
5094  *         snapshot
5095  *            |
5096  *      +-----+-----+
5097  *      |           |
5098  *   blob(ro)   snapshot2
5099  *      |           |
5100  *   clone2      clone
5101  */
5102 static void
5103 blob_relations(void)
5104 {
5105 	struct spdk_blob_store *bs;
5106 	struct spdk_bs_dev *dev;
5107 	struct spdk_bs_opts bs_opts;
5108 	struct spdk_blob_opts opts;
5109 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5110 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5111 	int rc;
5112 	size_t count;
5113 	spdk_blob_id ids[10] = {};
5114 
5115 	dev = init_dev();
5116 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5117 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5118 
5119 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5120 	poll_threads();
5121 	CU_ASSERT(g_bserrno == 0);
5122 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5123 	bs = g_bs;
5124 
5125 	/* 1. Create blob with 10 clusters */
5126 
5127 	ut_spdk_blob_opts_init(&opts);
5128 	opts.num_clusters = 10;
5129 
5130 	blob = ut_blob_create_and_open(bs, &opts);
5131 	blobid = spdk_blob_get_id(blob);
5132 
5133 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5134 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5135 	CU_ASSERT(!spdk_blob_is_clone(blob));
5136 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5137 
5138 	/* blob should not have underlying snapshot nor clones */
5139 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5140 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5141 	count = SPDK_COUNTOF(ids);
5142 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5143 	CU_ASSERT(rc == 0);
5144 	CU_ASSERT(count == 0);
5145 
5146 
5147 	/* 2. Create snapshot */
5148 
5149 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5150 	poll_threads();
5151 	CU_ASSERT(g_bserrno == 0);
5152 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5153 	snapshotid = g_blobid;
5154 
5155 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5156 	poll_threads();
5157 	CU_ASSERT(g_bserrno == 0);
5158 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5159 	snapshot = g_blob;
5160 
5161 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5162 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5163 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5164 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5165 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5166 
5167 	/* Check if original blob is converted to the clone of snapshot */
5168 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5169 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5170 	CU_ASSERT(spdk_blob_is_clone(blob));
5171 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5172 	CU_ASSERT(blob->parent_id == snapshotid);
5173 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5174 
5175 	count = SPDK_COUNTOF(ids);
5176 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5177 	CU_ASSERT(rc == 0);
5178 	CU_ASSERT(count == 1);
5179 	CU_ASSERT(ids[0] == blobid);
5180 
5181 
5182 	/* 3. Create clone from snapshot */
5183 
5184 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5185 	poll_threads();
5186 	CU_ASSERT(g_bserrno == 0);
5187 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5188 	cloneid = g_blobid;
5189 
5190 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5191 	poll_threads();
5192 	CU_ASSERT(g_bserrno == 0);
5193 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5194 	clone = g_blob;
5195 
5196 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5197 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5198 	CU_ASSERT(spdk_blob_is_clone(clone));
5199 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5200 	CU_ASSERT(clone->parent_id == snapshotid);
5201 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5202 
5203 	count = SPDK_COUNTOF(ids);
5204 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5205 	CU_ASSERT(rc == 0);
5206 	CU_ASSERT(count == 0);
5207 
5208 	/* Check if clone is on the snapshot's list */
5209 	count = SPDK_COUNTOF(ids);
5210 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5211 	CU_ASSERT(rc == 0);
5212 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5213 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5214 
5215 
5216 	/* 4. Create snapshot of the clone */
5217 
5218 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5219 	poll_threads();
5220 	CU_ASSERT(g_bserrno == 0);
5221 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5222 	snapshotid2 = g_blobid;
5223 
5224 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5225 	poll_threads();
5226 	CU_ASSERT(g_bserrno == 0);
5227 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5228 	snapshot2 = g_blob;
5229 
5230 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5231 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5232 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5233 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5234 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5235 
5236 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5237 	 * is a child of snapshot */
5238 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5239 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5240 	CU_ASSERT(spdk_blob_is_clone(clone));
5241 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5242 	CU_ASSERT(clone->parent_id == snapshotid2);
5243 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5244 
5245 	count = SPDK_COUNTOF(ids);
5246 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5247 	CU_ASSERT(rc == 0);
5248 	CU_ASSERT(count == 1);
5249 	CU_ASSERT(ids[0] == cloneid);
5250 
5251 
5252 	/* 5. Try to create clone from read only blob */
5253 
5254 	/* Mark blob as read only */
5255 	spdk_blob_set_read_only(blob);
5256 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5257 	poll_threads();
5258 	CU_ASSERT(g_bserrno == 0);
5259 
5260 	/* Check if previously created blob is read only clone */
5261 	CU_ASSERT(spdk_blob_is_read_only(blob));
5262 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5263 	CU_ASSERT(spdk_blob_is_clone(blob));
5264 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5265 
5266 	/* Create clone from read only blob */
5267 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5268 	poll_threads();
5269 	CU_ASSERT(g_bserrno == 0);
5270 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5271 	cloneid2 = g_blobid;
5272 
5273 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5274 	poll_threads();
5275 	CU_ASSERT(g_bserrno == 0);
5276 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5277 	clone2 = g_blob;
5278 
5279 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5280 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5281 	CU_ASSERT(spdk_blob_is_clone(clone2));
5282 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5283 
5284 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5285 
5286 	count = SPDK_COUNTOF(ids);
5287 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5288 	CU_ASSERT(rc == 0);
5289 
5290 	CU_ASSERT(count == 1);
5291 	CU_ASSERT(ids[0] == cloneid2);
5292 
5293 	/* Close blobs */
5294 
5295 	spdk_blob_close(clone2, blob_op_complete, NULL);
5296 	poll_threads();
5297 	CU_ASSERT(g_bserrno == 0);
5298 
5299 	spdk_blob_close(blob, blob_op_complete, NULL);
5300 	poll_threads();
5301 	CU_ASSERT(g_bserrno == 0);
5302 
5303 	spdk_blob_close(clone, blob_op_complete, NULL);
5304 	poll_threads();
5305 	CU_ASSERT(g_bserrno == 0);
5306 
5307 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5308 	poll_threads();
5309 	CU_ASSERT(g_bserrno == 0);
5310 
5311 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5312 	poll_threads();
5313 	CU_ASSERT(g_bserrno == 0);
5314 
5315 	/* Try to delete snapshot with more than 1 clone */
5316 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5317 	poll_threads();
5318 	CU_ASSERT(g_bserrno != 0);
5319 
5320 	ut_bs_reload(&bs, &bs_opts);
5321 
5322 	/* NULL ids array should return number of clones in count */
5323 	count = SPDK_COUNTOF(ids);
5324 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5325 	CU_ASSERT(rc == -ENOMEM);
5326 	CU_ASSERT(count == 2);
5327 
5328 	/* incorrect array size */
5329 	count = 1;
5330 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5331 	CU_ASSERT(rc == -ENOMEM);
5332 	CU_ASSERT(count == 2);
5333 
5334 
5335 	/* Verify structure of loaded blob store */
5336 
5337 	/* snapshot */
5338 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5339 
5340 	count = SPDK_COUNTOF(ids);
5341 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5342 	CU_ASSERT(rc == 0);
5343 	CU_ASSERT(count == 2);
5344 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5345 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5346 
5347 	/* blob */
5348 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5349 	count = SPDK_COUNTOF(ids);
5350 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5351 	CU_ASSERT(rc == 0);
5352 	CU_ASSERT(count == 1);
5353 	CU_ASSERT(ids[0] == cloneid2);
5354 
5355 	/* clone */
5356 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5357 	count = SPDK_COUNTOF(ids);
5358 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5359 	CU_ASSERT(rc == 0);
5360 	CU_ASSERT(count == 0);
5361 
5362 	/* snapshot2 */
5363 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5364 	count = SPDK_COUNTOF(ids);
5365 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5366 	CU_ASSERT(rc == 0);
5367 	CU_ASSERT(count == 1);
5368 	CU_ASSERT(ids[0] == cloneid);
5369 
5370 	/* clone2 */
5371 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5372 	count = SPDK_COUNTOF(ids);
5373 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5374 	CU_ASSERT(rc == 0);
5375 	CU_ASSERT(count == 0);
5376 
5377 	/* Try to delete blob that user should not be able to remove */
5378 
5379 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5380 	poll_threads();
5381 	CU_ASSERT(g_bserrno != 0);
5382 
5383 	/* Remove all blobs */
5384 
5385 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5386 	poll_threads();
5387 	CU_ASSERT(g_bserrno == 0);
5388 
5389 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5390 	poll_threads();
5391 	CU_ASSERT(g_bserrno == 0);
5392 
5393 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5394 	poll_threads();
5395 	CU_ASSERT(g_bserrno == 0);
5396 
5397 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5398 	poll_threads();
5399 	CU_ASSERT(g_bserrno == 0);
5400 
5401 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5402 	poll_threads();
5403 	CU_ASSERT(g_bserrno == 0);
5404 
5405 	spdk_bs_unload(bs, bs_op_complete, NULL);
5406 	poll_threads();
5407 	CU_ASSERT(g_bserrno == 0);
5408 
5409 	g_bs = NULL;
5410 }
5411 
5412 /**
5413  * Snapshot-clones relation test 2
5414  *
5415  *         snapshot1
5416  *            |
5417  *         snapshot2
5418  *            |
5419  *      +-----+-----+
5420  *      |           |
5421  *   blob(ro)   snapshot3
5422  *      |           |
5423  *      |       snapshot4
5424  *      |        |     |
5425  *   clone2   clone  clone3
5426  */
5427 static void
5428 blob_relations2(void)
5429 {
5430 	struct spdk_blob_store *bs;
5431 	struct spdk_bs_dev *dev;
5432 	struct spdk_bs_opts bs_opts;
5433 	struct spdk_blob_opts opts;
5434 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5435 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5436 		     cloneid3;
5437 	int rc;
5438 	size_t count;
5439 	spdk_blob_id ids[10] = {};
5440 
5441 	dev = init_dev();
5442 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5443 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5444 
5445 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5446 	poll_threads();
5447 	CU_ASSERT(g_bserrno == 0);
5448 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5449 	bs = g_bs;
5450 
5451 	/* 1. Create blob with 10 clusters */
5452 
5453 	ut_spdk_blob_opts_init(&opts);
5454 	opts.num_clusters = 10;
5455 
5456 	blob = ut_blob_create_and_open(bs, &opts);
5457 	blobid = spdk_blob_get_id(blob);
5458 
5459 	/* 2. Create snapshot1 */
5460 
5461 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5462 	poll_threads();
5463 	CU_ASSERT(g_bserrno == 0);
5464 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5465 	snapshotid1 = g_blobid;
5466 
5467 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5468 	poll_threads();
5469 	CU_ASSERT(g_bserrno == 0);
5470 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5471 	snapshot1 = g_blob;
5472 
5473 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5474 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5475 
5476 	CU_ASSERT(blob->parent_id == snapshotid1);
5477 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5478 
5479 	/* Check if blob is the clone of snapshot1 */
5480 	CU_ASSERT(blob->parent_id == snapshotid1);
5481 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5482 
5483 	count = SPDK_COUNTOF(ids);
5484 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5485 	CU_ASSERT(rc == 0);
5486 	CU_ASSERT(count == 1);
5487 	CU_ASSERT(ids[0] == blobid);
5488 
5489 	/* 3. Create another snapshot */
5490 
5491 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5492 	poll_threads();
5493 	CU_ASSERT(g_bserrno == 0);
5494 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5495 	snapshotid2 = g_blobid;
5496 
5497 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5498 	poll_threads();
5499 	CU_ASSERT(g_bserrno == 0);
5500 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5501 	snapshot2 = g_blob;
5502 
5503 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5504 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5505 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5506 
5507 	/* Check if snapshot2 is the clone of snapshot1 and blob
5508 	 * is a child of snapshot2 */
5509 	CU_ASSERT(blob->parent_id == snapshotid2);
5510 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5511 
5512 	count = SPDK_COUNTOF(ids);
5513 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5514 	CU_ASSERT(rc == 0);
5515 	CU_ASSERT(count == 1);
5516 	CU_ASSERT(ids[0] == blobid);
5517 
5518 	/* 4. Create clone from snapshot */
5519 
5520 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5521 	poll_threads();
5522 	CU_ASSERT(g_bserrno == 0);
5523 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5524 	cloneid = g_blobid;
5525 
5526 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5527 	poll_threads();
5528 	CU_ASSERT(g_bserrno == 0);
5529 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5530 	clone = g_blob;
5531 
5532 	CU_ASSERT(clone->parent_id == snapshotid2);
5533 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5534 
5535 	/* Check if clone is on the snapshot's list */
5536 	count = SPDK_COUNTOF(ids);
5537 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5538 	CU_ASSERT(rc == 0);
5539 	CU_ASSERT(count == 2);
5540 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5541 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5542 
5543 	/* 5. Create snapshot of the clone */
5544 
5545 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5546 	poll_threads();
5547 	CU_ASSERT(g_bserrno == 0);
5548 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5549 	snapshotid3 = g_blobid;
5550 
5551 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5552 	poll_threads();
5553 	CU_ASSERT(g_bserrno == 0);
5554 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5555 	snapshot3 = g_blob;
5556 
5557 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5558 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5559 
5560 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5561 	 * is a child of snapshot2 */
5562 	CU_ASSERT(clone->parent_id == snapshotid3);
5563 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5564 
5565 	count = SPDK_COUNTOF(ids);
5566 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5567 	CU_ASSERT(rc == 0);
5568 	CU_ASSERT(count == 1);
5569 	CU_ASSERT(ids[0] == cloneid);
5570 
5571 	/* 6. Create another snapshot of the clone */
5572 
5573 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5574 	poll_threads();
5575 	CU_ASSERT(g_bserrno == 0);
5576 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5577 	snapshotid4 = g_blobid;
5578 
5579 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5580 	poll_threads();
5581 	CU_ASSERT(g_bserrno == 0);
5582 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5583 	snapshot4 = g_blob;
5584 
5585 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5586 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5587 
5588 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5589 	 * is a child of snapshot3 */
5590 	CU_ASSERT(clone->parent_id == snapshotid4);
5591 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5592 
5593 	count = SPDK_COUNTOF(ids);
5594 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5595 	CU_ASSERT(rc == 0);
5596 	CU_ASSERT(count == 1);
5597 	CU_ASSERT(ids[0] == cloneid);
5598 
5599 	/* 7. Remove snapshot 4 */
5600 
5601 	ut_blob_close_and_delete(bs, snapshot4);
5602 
5603 	/* Check if relations are back to state from before creating snapshot 4 */
5604 	CU_ASSERT(clone->parent_id == snapshotid3);
5605 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5606 
5607 	count = SPDK_COUNTOF(ids);
5608 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5609 	CU_ASSERT(rc == 0);
5610 	CU_ASSERT(count == 1);
5611 	CU_ASSERT(ids[0] == cloneid);
5612 
5613 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5614 
5615 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5616 	poll_threads();
5617 	CU_ASSERT(g_bserrno == 0);
5618 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5619 	cloneid3 = g_blobid;
5620 
5621 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5622 	poll_threads();
5623 	CU_ASSERT(g_bserrno != 0);
5624 
5625 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5626 
5627 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5628 	poll_threads();
5629 	CU_ASSERT(g_bserrno == 0);
5630 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5631 	snapshot3 = g_blob;
5632 
5633 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5634 	poll_threads();
5635 	CU_ASSERT(g_bserrno != 0);
5636 
5637 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5638 	poll_threads();
5639 	CU_ASSERT(g_bserrno == 0);
5640 
5641 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5642 	poll_threads();
5643 	CU_ASSERT(g_bserrno == 0);
5644 
5645 	/* 10. Remove snapshot 1 */
5646 
5647 	ut_blob_close_and_delete(bs, snapshot1);
5648 
5649 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5650 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5651 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5652 
5653 	count = SPDK_COUNTOF(ids);
5654 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5655 	CU_ASSERT(rc == 0);
5656 	CU_ASSERT(count == 2);
5657 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5658 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5659 
5660 	/* 11. Try to create clone from read only blob */
5661 
5662 	/* Mark blob as read only */
5663 	spdk_blob_set_read_only(blob);
5664 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5665 	poll_threads();
5666 	CU_ASSERT(g_bserrno == 0);
5667 
5668 	/* Create clone from read only blob */
5669 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5670 	poll_threads();
5671 	CU_ASSERT(g_bserrno == 0);
5672 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5673 	cloneid2 = g_blobid;
5674 
5675 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5676 	poll_threads();
5677 	CU_ASSERT(g_bserrno == 0);
5678 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5679 	clone2 = g_blob;
5680 
5681 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5682 
5683 	count = SPDK_COUNTOF(ids);
5684 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5685 	CU_ASSERT(rc == 0);
5686 	CU_ASSERT(count == 1);
5687 	CU_ASSERT(ids[0] == cloneid2);
5688 
5689 	/* Close blobs */
5690 
5691 	spdk_blob_close(clone2, blob_op_complete, NULL);
5692 	poll_threads();
5693 	CU_ASSERT(g_bserrno == 0);
5694 
5695 	spdk_blob_close(blob, blob_op_complete, NULL);
5696 	poll_threads();
5697 	CU_ASSERT(g_bserrno == 0);
5698 
5699 	spdk_blob_close(clone, blob_op_complete, NULL);
5700 	poll_threads();
5701 	CU_ASSERT(g_bserrno == 0);
5702 
5703 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5704 	poll_threads();
5705 	CU_ASSERT(g_bserrno == 0);
5706 
5707 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5708 	poll_threads();
5709 	CU_ASSERT(g_bserrno == 0);
5710 
5711 	ut_bs_reload(&bs, &bs_opts);
5712 
5713 	/* Verify structure of loaded blob store */
5714 
5715 	/* snapshot2 */
5716 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5717 
5718 	count = SPDK_COUNTOF(ids);
5719 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5720 	CU_ASSERT(rc == 0);
5721 	CU_ASSERT(count == 2);
5722 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5723 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5724 
5725 	/* blob */
5726 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5727 	count = SPDK_COUNTOF(ids);
5728 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5729 	CU_ASSERT(rc == 0);
5730 	CU_ASSERT(count == 1);
5731 	CU_ASSERT(ids[0] == cloneid2);
5732 
5733 	/* clone */
5734 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5735 	count = SPDK_COUNTOF(ids);
5736 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5737 	CU_ASSERT(rc == 0);
5738 	CU_ASSERT(count == 0);
5739 
5740 	/* snapshot3 */
5741 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5742 	count = SPDK_COUNTOF(ids);
5743 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5744 	CU_ASSERT(rc == 0);
5745 	CU_ASSERT(count == 1);
5746 	CU_ASSERT(ids[0] == cloneid);
5747 
5748 	/* clone2 */
5749 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5750 	count = SPDK_COUNTOF(ids);
5751 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5752 	CU_ASSERT(rc == 0);
5753 	CU_ASSERT(count == 0);
5754 
5755 	/* Try to delete all blobs in the worse possible order */
5756 
5757 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5758 	poll_threads();
5759 	CU_ASSERT(g_bserrno != 0);
5760 
5761 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5762 	poll_threads();
5763 	CU_ASSERT(g_bserrno == 0);
5764 
5765 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5766 	poll_threads();
5767 	CU_ASSERT(g_bserrno != 0);
5768 
5769 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5770 	poll_threads();
5771 	CU_ASSERT(g_bserrno == 0);
5772 
5773 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5774 	poll_threads();
5775 	CU_ASSERT(g_bserrno == 0);
5776 
5777 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5778 	poll_threads();
5779 	CU_ASSERT(g_bserrno == 0);
5780 
5781 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5782 	poll_threads();
5783 	CU_ASSERT(g_bserrno == 0);
5784 
5785 	spdk_bs_unload(bs, bs_op_complete, NULL);
5786 	poll_threads();
5787 	CU_ASSERT(g_bserrno == 0);
5788 
5789 	g_bs = NULL;
5790 }
5791 
5792 /**
5793  * Snapshot-clones relation test 3
5794  *
5795  *         snapshot0
5796  *            |
5797  *         snapshot1
5798  *            |
5799  *         snapshot2
5800  *            |
5801  *           blob
5802  */
5803 static void
5804 blob_relations3(void)
5805 {
5806 	struct spdk_blob_store *bs;
5807 	struct spdk_bs_dev *dev;
5808 	struct spdk_io_channel *channel;
5809 	struct spdk_bs_opts bs_opts;
5810 	struct spdk_blob_opts opts;
5811 	struct spdk_blob *blob;
5812 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
5813 
5814 	dev = init_dev();
5815 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5816 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5817 
5818 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5819 	poll_threads();
5820 	CU_ASSERT(g_bserrno == 0);
5821 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5822 	bs = g_bs;
5823 
5824 	channel = spdk_bs_alloc_io_channel(bs);
5825 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5826 
5827 	/* 1. Create blob with 10 clusters */
5828 	ut_spdk_blob_opts_init(&opts);
5829 	opts.num_clusters = 10;
5830 
5831 	blob = ut_blob_create_and_open(bs, &opts);
5832 	blobid = spdk_blob_get_id(blob);
5833 
5834 	/* 2. Create snapshot0 */
5835 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5836 	poll_threads();
5837 	CU_ASSERT(g_bserrno == 0);
5838 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5839 	snapshotid0 = g_blobid;
5840 
5841 	/* 3. Create snapshot1 */
5842 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5843 	poll_threads();
5844 	CU_ASSERT(g_bserrno == 0);
5845 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5846 	snapshotid1 = g_blobid;
5847 
5848 	/* 4. Create snapshot2 */
5849 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5850 	poll_threads();
5851 	CU_ASSERT(g_bserrno == 0);
5852 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5853 	snapshotid2 = g_blobid;
5854 
5855 	/* 5. Decouple blob */
5856 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5857 	poll_threads();
5858 	CU_ASSERT(g_bserrno == 0);
5859 
5860 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
5861 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
5862 	poll_threads();
5863 	CU_ASSERT(g_bserrno == 0);
5864 
5865 	/* 7. Delete blob */
5866 	spdk_blob_close(blob, blob_op_complete, NULL);
5867 	poll_threads();
5868 	CU_ASSERT(g_bserrno == 0);
5869 
5870 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5871 	poll_threads();
5872 	CU_ASSERT(g_bserrno == 0);
5873 
5874 	/* 8. Delete snapshot2.
5875 	 * If md of snapshot 2 was updated, it should be possible to delete it */
5876 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5877 	poll_threads();
5878 	CU_ASSERT(g_bserrno == 0);
5879 
5880 	/* Remove remaining blobs and unload bs */
5881 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
5882 	poll_threads();
5883 	CU_ASSERT(g_bserrno == 0);
5884 
5885 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
5886 	poll_threads();
5887 	CU_ASSERT(g_bserrno == 0);
5888 
5889 	spdk_bs_free_io_channel(channel);
5890 	poll_threads();
5891 
5892 	spdk_bs_unload(bs, bs_op_complete, NULL);
5893 	poll_threads();
5894 	CU_ASSERT(g_bserrno == 0);
5895 
5896 	g_bs = NULL;
5897 }
5898 
5899 static void
5900 blobstore_clean_power_failure(void)
5901 {
5902 	struct spdk_blob_store *bs;
5903 	struct spdk_blob *blob;
5904 	struct spdk_power_failure_thresholds thresholds = {};
5905 	bool clean = false;
5906 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5907 	struct spdk_bs_super_block super_copy = {};
5908 
5909 	thresholds.general_threshold = 1;
5910 	while (!clean) {
5911 		/* Create bs and blob */
5912 		suite_blob_setup();
5913 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5914 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5915 		bs = g_bs;
5916 		blob = g_blob;
5917 
5918 		/* Super block should not change for rest of the UT,
5919 		 * save it and compare later. */
5920 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5921 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5922 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5923 
5924 		/* Force bs/super block in a clean state.
5925 		 * Along with marking blob dirty, to cause blob persist. */
5926 		blob->state = SPDK_BLOB_STATE_DIRTY;
5927 		bs->clean = 1;
5928 		super->clean = 1;
5929 		super->crc = blob_md_page_calc_crc(super);
5930 
5931 		g_bserrno = -1;
5932 		dev_set_power_failure_thresholds(thresholds);
5933 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5934 		poll_threads();
5935 		dev_reset_power_failure_event();
5936 
5937 		if (g_bserrno == 0) {
5938 			/* After successful md sync, both bs and super block
5939 			 * should be marked as not clean. */
5940 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5941 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5942 			clean = true;
5943 		}
5944 
5945 		/* Depending on the point of failure, super block was either updated or not. */
5946 		super_copy.clean = super->clean;
5947 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5948 		/* Compare that the values in super block remained unchanged. */
5949 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5950 
5951 		/* Delete blob and unload bs */
5952 		suite_blob_cleanup();
5953 
5954 		thresholds.general_threshold++;
5955 	}
5956 }
5957 
5958 static void
5959 blob_delete_snapshot_power_failure(void)
5960 {
5961 	struct spdk_bs_dev *dev;
5962 	struct spdk_blob_store *bs;
5963 	struct spdk_blob_opts opts;
5964 	struct spdk_blob *blob, *snapshot;
5965 	struct spdk_power_failure_thresholds thresholds = {};
5966 	spdk_blob_id blobid, snapshotid;
5967 	const void *value;
5968 	size_t value_len;
5969 	size_t count;
5970 	spdk_blob_id ids[3] = {};
5971 	int rc;
5972 	bool deleted = false;
5973 	int delete_snapshot_bserrno = -1;
5974 
5975 	thresholds.general_threshold = 1;
5976 	while (!deleted) {
5977 		dev = init_dev();
5978 
5979 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5980 		poll_threads();
5981 		CU_ASSERT(g_bserrno == 0);
5982 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5983 		bs = g_bs;
5984 
5985 		/* Create blob */
5986 		ut_spdk_blob_opts_init(&opts);
5987 		opts.num_clusters = 10;
5988 
5989 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5990 		poll_threads();
5991 		CU_ASSERT(g_bserrno == 0);
5992 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5993 		blobid = g_blobid;
5994 
5995 		/* Create snapshot */
5996 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5997 		poll_threads();
5998 		CU_ASSERT(g_bserrno == 0);
5999 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6000 		snapshotid = g_blobid;
6001 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6002 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6003 
6004 		dev_set_power_failure_thresholds(thresholds);
6005 
6006 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6007 		poll_threads();
6008 		delete_snapshot_bserrno = g_bserrno;
6009 
6010 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6011 		 * reports success, changes to both blobs should already persisted. */
6012 		dev_reset_power_failure_event();
6013 		ut_bs_dirty_load(&bs, NULL);
6014 
6015 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6016 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6017 
6018 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6019 		poll_threads();
6020 		CU_ASSERT(g_bserrno == 0);
6021 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6022 		blob = g_blob;
6023 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6024 
6025 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6026 		poll_threads();
6027 
6028 		if (g_bserrno == 0) {
6029 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6030 			snapshot = g_blob;
6031 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6032 			count = SPDK_COUNTOF(ids);
6033 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6034 			CU_ASSERT(rc == 0);
6035 			CU_ASSERT(count == 1);
6036 			CU_ASSERT(ids[0] == blobid);
6037 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6038 			CU_ASSERT(rc != 0);
6039 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6040 
6041 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6042 			poll_threads();
6043 			CU_ASSERT(g_bserrno == 0);
6044 		} else {
6045 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6046 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6047 			 * Yet delete might perform further changes to the clone after that.
6048 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6049 			if (delete_snapshot_bserrno == 0) {
6050 				deleted = true;
6051 			}
6052 		}
6053 
6054 		spdk_blob_close(blob, blob_op_complete, NULL);
6055 		poll_threads();
6056 		CU_ASSERT(g_bserrno == 0);
6057 
6058 		spdk_bs_unload(bs, bs_op_complete, NULL);
6059 		poll_threads();
6060 		CU_ASSERT(g_bserrno == 0);
6061 
6062 		thresholds.general_threshold++;
6063 	}
6064 }
6065 
6066 static void
6067 blob_create_snapshot_power_failure(void)
6068 {
6069 	struct spdk_blob_store *bs = g_bs;
6070 	struct spdk_bs_dev *dev;
6071 	struct spdk_blob_opts opts;
6072 	struct spdk_blob *blob, *snapshot;
6073 	struct spdk_power_failure_thresholds thresholds = {};
6074 	spdk_blob_id blobid, snapshotid;
6075 	const void *value;
6076 	size_t value_len;
6077 	size_t count;
6078 	spdk_blob_id ids[3] = {};
6079 	int rc;
6080 	bool created = false;
6081 	int create_snapshot_bserrno = -1;
6082 
6083 	thresholds.general_threshold = 1;
6084 	while (!created) {
6085 		dev = init_dev();
6086 
6087 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6088 		poll_threads();
6089 		CU_ASSERT(g_bserrno == 0);
6090 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6091 		bs = g_bs;
6092 
6093 		/* Create blob */
6094 		ut_spdk_blob_opts_init(&opts);
6095 		opts.num_clusters = 10;
6096 
6097 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6098 		poll_threads();
6099 		CU_ASSERT(g_bserrno == 0);
6100 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6101 		blobid = g_blobid;
6102 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6103 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6104 
6105 		dev_set_power_failure_thresholds(thresholds);
6106 
6107 		/* Create snapshot */
6108 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6109 		poll_threads();
6110 		create_snapshot_bserrno = g_bserrno;
6111 		snapshotid = g_blobid;
6112 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6113 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6114 
6115 		/* Do not shut down cleanly. Assumption is that after create snapshot
6116 		 * reports success, both blobs should be power-fail safe. */
6117 		dev_reset_power_failure_event();
6118 		ut_bs_dirty_load(&bs, NULL);
6119 
6120 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6121 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6122 
6123 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6124 		poll_threads();
6125 		CU_ASSERT(g_bserrno == 0);
6126 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6127 		blob = g_blob;
6128 
6129 		if (snapshotid != SPDK_BLOBID_INVALID) {
6130 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6131 			poll_threads();
6132 		}
6133 
6134 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6135 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6136 			snapshot = g_blob;
6137 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6138 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6139 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6140 			count = SPDK_COUNTOF(ids);
6141 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6142 			CU_ASSERT(rc == 0);
6143 			CU_ASSERT(count == 1);
6144 			CU_ASSERT(ids[0] == blobid);
6145 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6146 			CU_ASSERT(rc != 0);
6147 
6148 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6149 			poll_threads();
6150 			CU_ASSERT(g_bserrno == 0);
6151 			if (create_snapshot_bserrno == 0) {
6152 				created = true;
6153 			}
6154 		} else {
6155 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6156 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6157 		}
6158 
6159 		spdk_blob_close(blob, blob_op_complete, NULL);
6160 		poll_threads();
6161 		CU_ASSERT(g_bserrno == 0);
6162 
6163 		spdk_bs_unload(bs, bs_op_complete, NULL);
6164 		poll_threads();
6165 		CU_ASSERT(g_bserrno == 0);
6166 
6167 		thresholds.general_threshold++;
6168 	}
6169 }
6170 
6171 static void
6172 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6173 {
6174 	uint8_t payload_ff[64 * 512];
6175 	uint8_t payload_aa[64 * 512];
6176 	uint8_t payload_00[64 * 512];
6177 	uint8_t *cluster0, *cluster1;
6178 
6179 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6180 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6181 	memset(payload_00, 0x00, sizeof(payload_00));
6182 
6183 	/* Try to perform I/O with io unit = 512 */
6184 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6185 	poll_threads();
6186 	CU_ASSERT(g_bserrno == 0);
6187 
6188 	/* If thin provisioned is set cluster should be allocated now */
6189 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6190 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6191 
6192 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6193 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6194 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6195 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6196 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6197 
6198 	/* Verify write with offset on first page */
6199 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6200 	poll_threads();
6201 	CU_ASSERT(g_bserrno == 0);
6202 
6203 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6204 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6205 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6206 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6207 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6208 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6209 
6210 	/* Verify write with offset on first page */
6211 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6212 	poll_threads();
6213 
6214 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6215 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6216 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6217 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6218 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6219 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6220 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6221 
6222 	/* Verify write with offset on second page */
6223 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6224 	poll_threads();
6225 
6226 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6227 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6228 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6229 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6230 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6231 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6232 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6233 
6234 	/* Verify write across multiple pages */
6235 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6236 	poll_threads();
6237 
6238 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6239 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6240 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6241 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6242 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6243 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6244 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6245 
6246 	/* Verify write across multiple clusters */
6247 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6248 	poll_threads();
6249 
6250 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6251 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6252 
6253 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6254 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6255 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6256 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6257 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6258 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6259 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6260 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6261 
6262 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6263 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6264 
6265 	/* Verify write to second cluster */
6266 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6267 	poll_threads();
6268 
6269 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6270 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6271 
6272 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6273 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6274 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6275 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6276 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6277 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6278 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6279 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6280 
6281 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6282 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6283 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6284 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6285 }
6286 
6287 static void
6288 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6289 {
6290 	uint8_t payload_read[64 * 512];
6291 	uint8_t payload_ff[64 * 512];
6292 	uint8_t payload_aa[64 * 512];
6293 	uint8_t payload_00[64 * 512];
6294 
6295 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6296 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6297 	memset(payload_00, 0x00, sizeof(payload_00));
6298 
6299 	/* Read only first io unit */
6300 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6301 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6302 	 * payload_read: F000 0000 | 0000 0000 ... */
6303 	memset(payload_read, 0x00, sizeof(payload_read));
6304 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6305 	poll_threads();
6306 	CU_ASSERT(g_bserrno == 0);
6307 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6308 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6309 
6310 	/* Read four io_units starting from offset = 2
6311 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6312 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6313 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6314 
6315 	memset(payload_read, 0x00, sizeof(payload_read));
6316 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6317 	poll_threads();
6318 	CU_ASSERT(g_bserrno == 0);
6319 
6320 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6321 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6322 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6323 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6324 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6325 
6326 	/* Read eight io_units across multiple pages
6327 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6328 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6329 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6330 	memset(payload_read, 0x00, sizeof(payload_read));
6331 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6332 	poll_threads();
6333 	CU_ASSERT(g_bserrno == 0);
6334 
6335 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6336 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6337 
6338 	/* Read eight io_units across multiple clusters
6339 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6340 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6341 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6342 	memset(payload_read, 0x00, sizeof(payload_read));
6343 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6344 	poll_threads();
6345 	CU_ASSERT(g_bserrno == 0);
6346 
6347 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6348 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6349 
6350 	/* Read four io_units from second cluster
6351 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6352 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6353 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6354 	memset(payload_read, 0x00, sizeof(payload_read));
6355 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6356 	poll_threads();
6357 	CU_ASSERT(g_bserrno == 0);
6358 
6359 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6360 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6361 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6362 
6363 	/* Read second cluster
6364 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6365 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6366 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6367 	memset(payload_read, 0x00, sizeof(payload_read));
6368 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6369 	poll_threads();
6370 	CU_ASSERT(g_bserrno == 0);
6371 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6372 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6373 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6374 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6375 
6376 	/* Read whole two clusters
6377 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6378 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6379 	memset(payload_read, 0x00, sizeof(payload_read));
6380 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6381 	poll_threads();
6382 	CU_ASSERT(g_bserrno == 0);
6383 
6384 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6385 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6386 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6387 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6388 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6389 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6390 
6391 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6392 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6393 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6394 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6395 }
6396 
6397 
6398 static void
6399 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6400 {
6401 	uint8_t payload_ff[64 * 512];
6402 	uint8_t payload_aa[64 * 512];
6403 	uint8_t payload_00[64 * 512];
6404 	uint8_t *cluster0, *cluster1;
6405 
6406 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6407 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6408 	memset(payload_00, 0x00, sizeof(payload_00));
6409 
6410 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6411 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6412 
6413 	/* Unmap */
6414 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6415 	poll_threads();
6416 
6417 	CU_ASSERT(g_bserrno == 0);
6418 
6419 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6420 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6421 }
6422 
6423 static void
6424 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6425 {
6426 	uint8_t payload_ff[64 * 512];
6427 	uint8_t payload_aa[64 * 512];
6428 	uint8_t payload_00[64 * 512];
6429 	uint8_t *cluster0, *cluster1;
6430 
6431 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6432 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6433 	memset(payload_00, 0x00, sizeof(payload_00));
6434 
6435 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6436 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6437 
6438 	/* Write zeroes  */
6439 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6440 	poll_threads();
6441 
6442 	CU_ASSERT(g_bserrno == 0);
6443 
6444 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6445 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6446 }
6447 
6448 static inline void
6449 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6450 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6451 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6452 {
6453 	if (io_opts) {
6454 		g_dev_writev_ext_called = false;
6455 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6456 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
6457 					io_opts);
6458 	} else {
6459 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6460 	}
6461 	poll_threads();
6462 	CU_ASSERT(g_bserrno == 0);
6463 	if (io_opts) {
6464 		CU_ASSERT(g_dev_writev_ext_called);
6465 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6466 	}
6467 }
6468 
6469 static void
6470 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6471 	       bool ext_api)
6472 {
6473 	uint8_t payload_ff[64 * 512];
6474 	uint8_t payload_aa[64 * 512];
6475 	uint8_t payload_00[64 * 512];
6476 	uint8_t *cluster0, *cluster1;
6477 	struct iovec iov[4];
6478 	struct spdk_blob_ext_io_opts ext_opts = {
6479 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6480 		.memory_domain_ctx = (void *)0xf00df00d,
6481 		.size = sizeof(struct spdk_blob_ext_io_opts),
6482 		.user_ctx = (void *)123,
6483 	};
6484 
6485 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6486 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6487 	memset(payload_00, 0x00, sizeof(payload_00));
6488 
6489 	/* Try to perform I/O with io unit = 512 */
6490 	iov[0].iov_base = payload_ff;
6491 	iov[0].iov_len = 1 * 512;
6492 
6493 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
6494 			    ext_api ? &ext_opts : NULL);
6495 
6496 	/* If thin provisioned is set cluster should be allocated now */
6497 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6498 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6499 
6500 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6501 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6502 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6503 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6504 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6505 
6506 	/* Verify write with offset on first page */
6507 	iov[0].iov_base = payload_ff;
6508 	iov[0].iov_len = 1 * 512;
6509 
6510 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
6511 			    ext_api ? &ext_opts : NULL);
6512 
6513 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6514 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6515 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6516 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6517 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6518 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6519 
6520 	/* Verify write with offset on first page */
6521 	iov[0].iov_base = payload_ff;
6522 	iov[0].iov_len = 4 * 512;
6523 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6524 	poll_threads();
6525 
6526 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6527 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6528 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6529 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6530 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6531 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6532 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6533 
6534 	/* Verify write with offset on second page */
6535 	iov[0].iov_base = payload_ff;
6536 	iov[0].iov_len = 4 * 512;
6537 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6538 	poll_threads();
6539 
6540 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6541 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6542 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6543 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6544 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6545 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6546 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6547 
6548 	/* Verify write across multiple pages */
6549 	iov[0].iov_base = payload_aa;
6550 	iov[0].iov_len = 8 * 512;
6551 
6552 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
6553 			    ext_api ? &ext_opts : NULL);
6554 
6555 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6556 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6557 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6558 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6559 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6560 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6561 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6562 
6563 	/* Verify write across multiple clusters */
6564 
6565 	iov[0].iov_base = payload_ff;
6566 	iov[0].iov_len = 8 * 512;
6567 
6568 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
6569 			    ext_api ? &ext_opts : NULL);
6570 
6571 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6572 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6573 
6574 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6575 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6576 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6577 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6578 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6579 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6580 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6581 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6582 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6583 
6584 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6585 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6586 
6587 	/* Verify write to second cluster */
6588 
6589 	iov[0].iov_base = payload_ff;
6590 	iov[0].iov_len = 2 * 512;
6591 
6592 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
6593 			    ext_api ? &ext_opts : NULL);
6594 
6595 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6596 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6597 
6598 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6599 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6600 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6601 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6602 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6603 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6604 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6605 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6606 
6607 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6608 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6609 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6610 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6611 }
6612 
6613 static inline void
6614 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6615 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6616 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6617 {
6618 	if (io_opts) {
6619 		g_dev_readv_ext_called = false;
6620 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6621 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
6622 	} else {
6623 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6624 	}
6625 	poll_threads();
6626 	CU_ASSERT(g_bserrno == 0);
6627 	if (io_opts) {
6628 		CU_ASSERT(g_dev_readv_ext_called);
6629 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6630 	}
6631 }
6632 
6633 static void
6634 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6635 	      bool ext_api)
6636 {
6637 	uint8_t payload_read[64 * 512];
6638 	uint8_t payload_ff[64 * 512];
6639 	uint8_t payload_aa[64 * 512];
6640 	uint8_t payload_00[64 * 512];
6641 	struct iovec iov[4];
6642 	struct spdk_blob_ext_io_opts ext_opts = {
6643 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6644 		.memory_domain_ctx = (void *)0xf00df00d,
6645 		.size = sizeof(struct spdk_blob_ext_io_opts),
6646 		.user_ctx = (void *)123,
6647 	};
6648 
6649 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6650 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6651 	memset(payload_00, 0x00, sizeof(payload_00));
6652 
6653 	/* Read only first io unit */
6654 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6655 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6656 	 * payload_read: F000 0000 | 0000 0000 ... */
6657 	memset(payload_read, 0x00, sizeof(payload_read));
6658 	iov[0].iov_base = payload_read;
6659 	iov[0].iov_len = 1 * 512;
6660 
6661 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6662 
6663 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6664 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6665 
6666 	/* Read four io_units starting from offset = 2
6667 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6668 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6669 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6670 
6671 	memset(payload_read, 0x00, sizeof(payload_read));
6672 	iov[0].iov_base = payload_read;
6673 	iov[0].iov_len = 4 * 512;
6674 
6675 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6676 
6677 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6678 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6679 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6680 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6681 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6682 
6683 	/* Read eight io_units across multiple pages
6684 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6685 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6686 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6687 	memset(payload_read, 0x00, sizeof(payload_read));
6688 	iov[0].iov_base = payload_read;
6689 	iov[0].iov_len = 4 * 512;
6690 	iov[1].iov_base = payload_read + 4 * 512;
6691 	iov[1].iov_len = 4 * 512;
6692 
6693 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6694 
6695 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6696 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6697 
6698 	/* Read eight io_units across multiple clusters
6699 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6700 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6701 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6702 	memset(payload_read, 0x00, sizeof(payload_read));
6703 	iov[0].iov_base = payload_read;
6704 	iov[0].iov_len = 2 * 512;
6705 	iov[1].iov_base = payload_read + 2 * 512;
6706 	iov[1].iov_len = 2 * 512;
6707 	iov[2].iov_base = payload_read + 4 * 512;
6708 	iov[2].iov_len = 2 * 512;
6709 	iov[3].iov_base = payload_read + 6 * 512;
6710 	iov[3].iov_len = 2 * 512;
6711 
6712 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
6713 			   ext_api ? &ext_opts : NULL);
6714 
6715 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6716 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6717 
6718 	/* Read four io_units from second cluster
6719 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6720 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6721 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6722 	memset(payload_read, 0x00, sizeof(payload_read));
6723 	iov[0].iov_base = payload_read;
6724 	iov[0].iov_len = 1 * 512;
6725 	iov[1].iov_base = payload_read + 1 * 512;
6726 	iov[1].iov_len = 3 * 512;
6727 
6728 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
6729 			   ext_api ? &ext_opts : NULL);
6730 
6731 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6732 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6733 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6734 
6735 	/* Read second cluster
6736 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6737 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6738 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6739 	memset(payload_read, 0x00, sizeof(payload_read));
6740 	iov[0].iov_base = payload_read;
6741 	iov[0].iov_len = 1 * 512;
6742 	iov[1].iov_base = payload_read + 1 * 512;
6743 	iov[1].iov_len = 2 * 512;
6744 	iov[2].iov_base = payload_read + 3 * 512;
6745 	iov[2].iov_len = 4 * 512;
6746 	iov[3].iov_base = payload_read + 7 * 512;
6747 	iov[3].iov_len = 25 * 512;
6748 
6749 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
6750 			   ext_api ? &ext_opts : NULL);
6751 
6752 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6753 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6754 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6755 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6756 
6757 	/* Read whole two clusters
6758 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6759 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6760 	memset(payload_read, 0x00, sizeof(payload_read));
6761 	iov[0].iov_base = payload_read;
6762 	iov[0].iov_len = 1 * 512;
6763 	iov[1].iov_base = payload_read + 1 * 512;
6764 	iov[1].iov_len = 8 * 512;
6765 	iov[2].iov_base = payload_read + 9 * 512;
6766 	iov[2].iov_len = 16 * 512;
6767 	iov[3].iov_base = payload_read + 25 * 512;
6768 	iov[3].iov_len = 39 * 512;
6769 
6770 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
6771 			   ext_api ? &ext_opts : NULL);
6772 
6773 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6774 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6775 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6776 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6777 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6778 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6779 
6780 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6781 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6782 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6783 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6784 }
6785 
6786 static void
6787 blob_io_unit(void)
6788 {
6789 	struct spdk_bs_opts bsopts;
6790 	struct spdk_blob_opts opts;
6791 	struct spdk_blob_store *bs;
6792 	struct spdk_bs_dev *dev;
6793 	struct spdk_blob *blob, *snapshot, *clone;
6794 	spdk_blob_id blobid;
6795 	struct spdk_io_channel *channel;
6796 
6797 	/* Create dev with 512 bytes io unit size */
6798 
6799 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6800 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6801 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6802 
6803 	/* Try to initialize a new blob store with unsupported io_unit */
6804 	dev = init_dev();
6805 	dev->blocklen = 512;
6806 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6807 
6808 	/* Initialize a new blob store */
6809 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6810 	poll_threads();
6811 	CU_ASSERT(g_bserrno == 0);
6812 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6813 	bs = g_bs;
6814 
6815 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6816 	channel = spdk_bs_alloc_io_channel(bs);
6817 
6818 	/* Create thick provisioned blob */
6819 	ut_spdk_blob_opts_init(&opts);
6820 	opts.thin_provision = false;
6821 	opts.num_clusters = 32;
6822 
6823 	blob = ut_blob_create_and_open(bs, &opts);
6824 	blobid = spdk_blob_get_id(blob);
6825 
6826 	test_io_write(dev, blob, channel);
6827 	test_io_read(dev, blob, channel);
6828 	test_io_zeroes(dev, blob, channel);
6829 
6830 	test_iov_write(dev, blob, channel, false);
6831 	test_iov_read(dev, blob, channel, false);
6832 	test_io_zeroes(dev, blob, channel);
6833 
6834 	test_iov_write(dev, blob, channel, true);
6835 	test_iov_read(dev, blob, channel, true);
6836 
6837 	test_io_unmap(dev, blob, channel);
6838 
6839 	spdk_blob_close(blob, blob_op_complete, NULL);
6840 	poll_threads();
6841 	CU_ASSERT(g_bserrno == 0);
6842 	blob = NULL;
6843 	g_blob = NULL;
6844 
6845 	/* Create thin provisioned blob */
6846 
6847 	ut_spdk_blob_opts_init(&opts);
6848 	opts.thin_provision = true;
6849 	opts.num_clusters = 32;
6850 
6851 	blob = ut_blob_create_and_open(bs, &opts);
6852 	blobid = spdk_blob_get_id(blob);
6853 
6854 	test_io_write(dev, blob, channel);
6855 	test_io_read(dev, blob, channel);
6856 	test_io_zeroes(dev, blob, channel);
6857 
6858 	test_iov_write(dev, blob, channel, false);
6859 	test_iov_read(dev, blob, channel, false);
6860 	test_io_zeroes(dev, blob, channel);
6861 
6862 	test_iov_write(dev, blob, channel, true);
6863 	test_iov_read(dev, blob, channel, true);
6864 
6865 	/* Create snapshot */
6866 
6867 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6868 	poll_threads();
6869 	CU_ASSERT(g_bserrno == 0);
6870 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6871 	blobid = g_blobid;
6872 
6873 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6874 	poll_threads();
6875 	CU_ASSERT(g_bserrno == 0);
6876 	CU_ASSERT(g_blob != NULL);
6877 	snapshot = g_blob;
6878 
6879 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6880 	poll_threads();
6881 	CU_ASSERT(g_bserrno == 0);
6882 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6883 	blobid = g_blobid;
6884 
6885 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6886 	poll_threads();
6887 	CU_ASSERT(g_bserrno == 0);
6888 	CU_ASSERT(g_blob != NULL);
6889 	clone = g_blob;
6890 
6891 	test_io_read(dev, blob, channel);
6892 	test_io_read(dev, snapshot, channel);
6893 	test_io_read(dev, clone, channel);
6894 
6895 	test_iov_read(dev, blob, channel, false);
6896 	test_iov_read(dev, snapshot, channel, false);
6897 	test_iov_read(dev, clone, channel, false);
6898 
6899 	test_iov_read(dev, blob, channel, true);
6900 	test_iov_read(dev, snapshot, channel, true);
6901 	test_iov_read(dev, clone, channel, true);
6902 
6903 	/* Inflate clone */
6904 
6905 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6906 	poll_threads();
6907 
6908 	CU_ASSERT(g_bserrno == 0);
6909 
6910 	test_io_read(dev, clone, channel);
6911 
6912 	test_io_unmap(dev, clone, channel);
6913 
6914 	test_iov_write(dev, clone, channel, false);
6915 	test_iov_read(dev, clone, channel, false);
6916 	test_io_unmap(dev, clone, channel);
6917 
6918 	test_iov_write(dev, clone, channel, true);
6919 	test_iov_read(dev, clone, channel, true);
6920 
6921 	spdk_blob_close(blob, blob_op_complete, NULL);
6922 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6923 	spdk_blob_close(clone, blob_op_complete, NULL);
6924 	poll_threads();
6925 	CU_ASSERT(g_bserrno == 0);
6926 	blob = NULL;
6927 	g_blob = NULL;
6928 
6929 	spdk_bs_free_io_channel(channel);
6930 	poll_threads();
6931 
6932 	/* Unload the blob store */
6933 	spdk_bs_unload(bs, bs_op_complete, NULL);
6934 	poll_threads();
6935 	CU_ASSERT(g_bserrno == 0);
6936 	g_bs = NULL;
6937 	g_blob = NULL;
6938 	g_blobid = 0;
6939 }
6940 
6941 static void
6942 blob_io_unit_compatibility(void)
6943 {
6944 	struct spdk_bs_opts bsopts;
6945 	struct spdk_blob_store *bs;
6946 	struct spdk_bs_dev *dev;
6947 	struct spdk_bs_super_block *super;
6948 
6949 	/* Create dev with 512 bytes io unit size */
6950 
6951 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6952 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6953 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6954 
6955 	/* Try to initialize a new blob store with unsupported io_unit */
6956 	dev = init_dev();
6957 	dev->blocklen = 512;
6958 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6959 
6960 	/* Initialize a new blob store */
6961 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6962 	poll_threads();
6963 	CU_ASSERT(g_bserrno == 0);
6964 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6965 	bs = g_bs;
6966 
6967 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6968 
6969 	/* Unload the blob store */
6970 	spdk_bs_unload(bs, bs_op_complete, NULL);
6971 	poll_threads();
6972 	CU_ASSERT(g_bserrno == 0);
6973 
6974 	/* Modify super block to behave like older version.
6975 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6976 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6977 	super->io_unit_size = 0;
6978 	super->crc = blob_md_page_calc_crc(super);
6979 
6980 	dev = init_dev();
6981 	dev->blocklen = 512;
6982 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6983 
6984 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6985 	poll_threads();
6986 	CU_ASSERT(g_bserrno == 0);
6987 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6988 	bs = g_bs;
6989 
6990 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6991 
6992 	/* Unload the blob store */
6993 	spdk_bs_unload(bs, bs_op_complete, NULL);
6994 	poll_threads();
6995 	CU_ASSERT(g_bserrno == 0);
6996 
6997 	g_bs = NULL;
6998 	g_blob = NULL;
6999 	g_blobid = 0;
7000 }
7001 
7002 static void
7003 first_sync_complete(void *cb_arg, int bserrno)
7004 {
7005 	struct spdk_blob *blob = cb_arg;
7006 	int rc;
7007 
7008 	CU_ASSERT(bserrno == 0);
7009 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7010 	CU_ASSERT(rc == 0);
7011 	CU_ASSERT(g_bserrno == -1);
7012 
7013 	/* Keep g_bserrno at -1, only the
7014 	 * second sync completion should set it at 0. */
7015 }
7016 
7017 static void
7018 second_sync_complete(void *cb_arg, int bserrno)
7019 {
7020 	struct spdk_blob *blob = cb_arg;
7021 	const void *value;
7022 	size_t value_len;
7023 	int rc;
7024 
7025 	CU_ASSERT(bserrno == 0);
7026 
7027 	/* Verify that the first sync completion had a chance to execute */
7028 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7029 	CU_ASSERT(rc == 0);
7030 	SPDK_CU_ASSERT_FATAL(value != NULL);
7031 	CU_ASSERT(value_len == strlen("second") + 1);
7032 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7033 
7034 	CU_ASSERT(g_bserrno == -1);
7035 	g_bserrno = bserrno;
7036 }
7037 
7038 static void
7039 blob_simultaneous_operations(void)
7040 {
7041 	struct spdk_blob_store *bs = g_bs;
7042 	struct spdk_blob_opts opts;
7043 	struct spdk_blob *blob, *snapshot;
7044 	spdk_blob_id blobid, snapshotid;
7045 	struct spdk_io_channel *channel;
7046 	int rc;
7047 
7048 	channel = spdk_bs_alloc_io_channel(bs);
7049 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7050 
7051 	ut_spdk_blob_opts_init(&opts);
7052 	opts.num_clusters = 10;
7053 
7054 	blob = ut_blob_create_and_open(bs, &opts);
7055 	blobid = spdk_blob_get_id(blob);
7056 
7057 	/* Create snapshot and try to remove blob in the same time:
7058 	 * - snapshot should be created successfully
7059 	 * - delete operation should fail w -EBUSY */
7060 	CU_ASSERT(blob->locked_operation_in_progress == false);
7061 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7062 	CU_ASSERT(blob->locked_operation_in_progress == true);
7063 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7064 	CU_ASSERT(blob->locked_operation_in_progress == true);
7065 	/* Deletion failure */
7066 	CU_ASSERT(g_bserrno == -EBUSY);
7067 	poll_threads();
7068 	CU_ASSERT(blob->locked_operation_in_progress == false);
7069 	/* Snapshot creation success */
7070 	CU_ASSERT(g_bserrno == 0);
7071 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7072 
7073 	snapshotid = g_blobid;
7074 
7075 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7076 	poll_threads();
7077 	CU_ASSERT(g_bserrno == 0);
7078 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7079 	snapshot = g_blob;
7080 
7081 	/* Inflate blob and try to remove blob in the same time:
7082 	 * - blob should be inflated successfully
7083 	 * - delete operation should fail w -EBUSY */
7084 	CU_ASSERT(blob->locked_operation_in_progress == false);
7085 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7086 	CU_ASSERT(blob->locked_operation_in_progress == true);
7087 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7088 	CU_ASSERT(blob->locked_operation_in_progress == true);
7089 	/* Deletion failure */
7090 	CU_ASSERT(g_bserrno == -EBUSY);
7091 	poll_threads();
7092 	CU_ASSERT(blob->locked_operation_in_progress == false);
7093 	/* Inflation success */
7094 	CU_ASSERT(g_bserrno == 0);
7095 
7096 	/* Clone snapshot and try to remove snapshot in the same time:
7097 	 * - snapshot should be cloned successfully
7098 	 * - delete operation should fail w -EBUSY */
7099 	CU_ASSERT(blob->locked_operation_in_progress == false);
7100 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7101 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7102 	/* Deletion failure */
7103 	CU_ASSERT(g_bserrno == -EBUSY);
7104 	poll_threads();
7105 	CU_ASSERT(blob->locked_operation_in_progress == false);
7106 	/* Clone created */
7107 	CU_ASSERT(g_bserrno == 0);
7108 
7109 	/* Resize blob and try to remove blob in the same time:
7110 	 * - blob should be resized successfully
7111 	 * - delete operation should fail w -EBUSY */
7112 	CU_ASSERT(blob->locked_operation_in_progress == false);
7113 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7114 	CU_ASSERT(blob->locked_operation_in_progress == true);
7115 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7116 	CU_ASSERT(blob->locked_operation_in_progress == true);
7117 	/* Deletion failure */
7118 	CU_ASSERT(g_bserrno == -EBUSY);
7119 	poll_threads();
7120 	CU_ASSERT(blob->locked_operation_in_progress == false);
7121 	/* Blob resized successfully */
7122 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7123 	poll_threads();
7124 	CU_ASSERT(g_bserrno == 0);
7125 
7126 	/* Issue two consecutive blob syncs, neither should fail.
7127 	 * Force sync to actually occur by marking blob dirty each time.
7128 	 * Execution of sync should not be enough to complete the operation,
7129 	 * since disk I/O is required to complete it. */
7130 	g_bserrno = -1;
7131 
7132 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7133 	CU_ASSERT(rc == 0);
7134 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7135 	CU_ASSERT(g_bserrno == -1);
7136 
7137 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7138 	CU_ASSERT(g_bserrno == -1);
7139 
7140 	poll_threads();
7141 	CU_ASSERT(g_bserrno == 0);
7142 
7143 	spdk_bs_free_io_channel(channel);
7144 	poll_threads();
7145 
7146 	ut_blob_close_and_delete(bs, snapshot);
7147 	ut_blob_close_and_delete(bs, blob);
7148 }
7149 
7150 static void
7151 blob_persist_test(void)
7152 {
7153 	struct spdk_blob_store *bs = g_bs;
7154 	struct spdk_blob_opts opts;
7155 	struct spdk_blob *blob;
7156 	spdk_blob_id blobid;
7157 	struct spdk_io_channel *channel;
7158 	char *xattr;
7159 	size_t xattr_length;
7160 	int rc;
7161 	uint32_t page_count_clear, page_count_xattr;
7162 	uint64_t poller_iterations;
7163 	bool run_poller;
7164 
7165 	channel = spdk_bs_alloc_io_channel(bs);
7166 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7167 
7168 	ut_spdk_blob_opts_init(&opts);
7169 	opts.num_clusters = 10;
7170 
7171 	blob = ut_blob_create_and_open(bs, &opts);
7172 	blobid = spdk_blob_get_id(blob);
7173 
7174 	/* Save the amount of md pages used after creation of a blob.
7175 	 * This should be consistent after removing xattr. */
7176 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7177 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7178 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7179 
7180 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7181 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7182 		       strlen("large_xattr");
7183 	xattr = calloc(xattr_length, sizeof(char));
7184 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7185 
7186 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7187 	SPDK_CU_ASSERT_FATAL(rc == 0);
7188 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7189 	poll_threads();
7190 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7191 
7192 	/* Save the amount of md pages used after adding the large xattr */
7193 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7194 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7195 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7196 
7197 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7198 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7199 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7200 	poller_iterations = 1;
7201 	run_poller = true;
7202 	while (run_poller) {
7203 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7204 		SPDK_CU_ASSERT_FATAL(rc == 0);
7205 		g_bserrno = -1;
7206 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7207 		poll_thread_times(0, poller_iterations);
7208 		if (g_bserrno == 0) {
7209 			/* Poller iteration count was high enough for first sync to complete.
7210 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7211 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7212 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7213 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7214 			run_poller = false;
7215 		}
7216 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7217 		SPDK_CU_ASSERT_FATAL(rc == 0);
7218 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7219 		poll_threads();
7220 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7221 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7222 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7223 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7224 
7225 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7226 		spdk_blob_close(blob, blob_op_complete, NULL);
7227 		poll_threads();
7228 		CU_ASSERT(g_bserrno == 0);
7229 
7230 		ut_bs_reload(&bs, NULL);
7231 
7232 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7233 		poll_threads();
7234 		CU_ASSERT(g_bserrno == 0);
7235 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7236 		blob = g_blob;
7237 
7238 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7239 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7240 
7241 		poller_iterations++;
7242 		/* Stop at high iteration count to prevent infinite loop.
7243 		 * This value should be enough for first md sync to complete in any case. */
7244 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7245 	}
7246 
7247 	free(xattr);
7248 
7249 	ut_blob_close_and_delete(bs, blob);
7250 
7251 	spdk_bs_free_io_channel(channel);
7252 	poll_threads();
7253 }
7254 
7255 static void
7256 blob_decouple_snapshot(void)
7257 {
7258 	struct spdk_blob_store *bs = g_bs;
7259 	struct spdk_blob_opts opts;
7260 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7261 	struct spdk_io_channel *channel;
7262 	spdk_blob_id blobid, snapshotid;
7263 	uint64_t cluster;
7264 
7265 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7266 		channel = spdk_bs_alloc_io_channel(bs);
7267 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7268 
7269 		ut_spdk_blob_opts_init(&opts);
7270 		opts.num_clusters = 10;
7271 		opts.thin_provision = false;
7272 
7273 		blob = ut_blob_create_and_open(bs, &opts);
7274 		blobid = spdk_blob_get_id(blob);
7275 
7276 		/* Create first snapshot */
7277 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7278 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7279 		poll_threads();
7280 		CU_ASSERT(g_bserrno == 0);
7281 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7282 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7283 		snapshotid = g_blobid;
7284 
7285 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7286 		poll_threads();
7287 		CU_ASSERT(g_bserrno == 0);
7288 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7289 		snapshot1 = g_blob;
7290 
7291 		/* Create the second one */
7292 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7293 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7294 		poll_threads();
7295 		CU_ASSERT(g_bserrno == 0);
7296 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7297 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7298 		snapshotid = g_blobid;
7299 
7300 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7301 		poll_threads();
7302 		CU_ASSERT(g_bserrno == 0);
7303 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7304 		snapshot2 = g_blob;
7305 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7306 
7307 		/* Now decouple the second snapshot forcing it to copy the written clusters */
7308 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7309 		poll_threads();
7310 		CU_ASSERT(g_bserrno == 0);
7311 
7312 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7313 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7314 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7315 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7316 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7317 					    snapshot1->active.clusters[cluster]);
7318 		}
7319 
7320 		spdk_bs_free_io_channel(channel);
7321 
7322 		if (delete_snapshot_first) {
7323 			ut_blob_close_and_delete(bs, snapshot2);
7324 			ut_blob_close_and_delete(bs, snapshot1);
7325 			ut_blob_close_and_delete(bs, blob);
7326 		} else {
7327 			ut_blob_close_and_delete(bs, blob);
7328 			ut_blob_close_and_delete(bs, snapshot2);
7329 			ut_blob_close_and_delete(bs, snapshot1);
7330 		}
7331 		poll_threads();
7332 	}
7333 }
7334 
7335 static void
7336 blob_seek_io_unit(void)
7337 {
7338 	struct spdk_blob_store *bs = g_bs;
7339 	struct spdk_blob *blob;
7340 	struct spdk_io_channel *channel;
7341 	struct spdk_blob_opts opts;
7342 	uint64_t free_clusters;
7343 	uint8_t payload[10 * 4096];
7344 	uint64_t offset;
7345 	uint64_t io_unit, io_units_per_cluster;
7346 
7347 	free_clusters = spdk_bs_free_cluster_count(bs);
7348 
7349 	channel = spdk_bs_alloc_io_channel(bs);
7350 	CU_ASSERT(channel != NULL);
7351 
7352 	/* Set blob as thin provisioned */
7353 	ut_spdk_blob_opts_init(&opts);
7354 	opts.thin_provision = true;
7355 
7356 	/* Create a blob */
7357 	blob = ut_blob_create_and_open(bs, &opts);
7358 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7359 
7360 	io_units_per_cluster = bs_io_units_per_cluster(blob);
7361 
7362 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
7363 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
7364 	poll_threads();
7365 	CU_ASSERT(g_bserrno == 0);
7366 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7367 	CU_ASSERT(blob->active.num_clusters == 5);
7368 
7369 	/* Write at the beginning of first cluster */
7370 	offset = 0;
7371 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7372 	poll_threads();
7373 	CU_ASSERT(g_bserrno == 0);
7374 
7375 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
7376 	CU_ASSERT(io_unit == offset);
7377 
7378 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
7379 	CU_ASSERT(io_unit == io_units_per_cluster);
7380 
7381 	/* Write in the middle of third cluster */
7382 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
7383 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7384 	poll_threads();
7385 	CU_ASSERT(g_bserrno == 0);
7386 
7387 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
7388 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
7389 
7390 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
7391 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
7392 
7393 	/* Write at the end of last cluster */
7394 	offset = 5 * io_units_per_cluster - 1;
7395 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7396 	poll_threads();
7397 	CU_ASSERT(g_bserrno == 0);
7398 
7399 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
7400 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
7401 
7402 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
7403 	CU_ASSERT(io_unit == UINT64_MAX);
7404 
7405 	spdk_bs_free_io_channel(channel);
7406 	poll_threads();
7407 
7408 	ut_blob_close_and_delete(bs, blob);
7409 }
7410 
7411 static void
7412 blob_esnap_create(void)
7413 {
7414 	struct spdk_blob_store	*bs = g_bs;
7415 	struct spdk_bs_opts	bs_opts;
7416 	struct ut_esnap_opts	esnap_opts;
7417 	struct spdk_blob_opts	opts;
7418 	struct spdk_blob	*blob;
7419 	uint32_t		cluster_sz, block_sz;
7420 	const uint32_t		esnap_num_clusters = 4;
7421 	uint64_t		esnap_num_blocks;
7422 	uint32_t		sz;
7423 	spdk_blob_id		blobid;
7424 	uint32_t		bs_ctx_count;
7425 
7426 	cluster_sz = spdk_bs_get_cluster_size(bs);
7427 	block_sz = spdk_bs_get_io_unit_size(bs);
7428 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
7429 
7430 	/* Create a normal blob and verify it is not an esnap clone. */
7431 	ut_spdk_blob_opts_init(&opts);
7432 	blob = ut_blob_create_and_open(bs, &opts);
7433 	CU_ASSERT(!blob_is_esnap_clone(blob));
7434 	ut_blob_close_and_delete(bs, blob);
7435 
7436 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
7437 	ut_spdk_blob_opts_init(&opts);
7438 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, &esnap_opts);
7439 	opts.esnap_id = &esnap_opts;
7440 	opts.esnap_id_len = sizeof(esnap_opts);
7441 	opts.num_clusters = esnap_num_clusters;
7442 	blob = ut_blob_create_and_open(bs, &opts);
7443 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
7444 	sz = spdk_blob_get_num_clusters(blob);
7445 	CU_ASSERT(sz == esnap_num_clusters);
7446 	ut_blob_close_and_delete(bs, blob);
7447 
7448 	/* Create an esnap clone without the size and verify it can be grown */
7449 	ut_spdk_blob_opts_init(&opts);
7450 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, &esnap_opts);
7451 	opts.esnap_id = &esnap_opts;
7452 	opts.esnap_id_len = sizeof(esnap_opts);
7453 	blob = ut_blob_create_and_open(bs, &opts);
7454 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
7455 	sz = spdk_blob_get_num_clusters(blob);
7456 	CU_ASSERT(sz == 0);
7457 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
7458 	poll_threads();
7459 	CU_ASSERT(g_bserrno == 0);
7460 	sz = spdk_blob_get_num_clusters(blob);
7461 	CU_ASSERT(sz == 1);
7462 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
7463 	poll_threads();
7464 	CU_ASSERT(g_bserrno == 0);
7465 	sz = spdk_blob_get_num_clusters(blob);
7466 	CU_ASSERT(sz == esnap_num_clusters);
7467 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
7468 	poll_threads();
7469 	CU_ASSERT(g_bserrno == 0);
7470 	sz = spdk_blob_get_num_clusters(blob);
7471 	CU_ASSERT(sz == esnap_num_clusters + 1);
7472 
7473 	/* Reload the blobstore and be sure that the blob can be opened. */
7474 	blobid = spdk_blob_get_id(blob);
7475 	spdk_blob_close(blob, blob_op_complete, NULL);
7476 	poll_threads();
7477 	CU_ASSERT(g_bserrno == 0);
7478 	g_blob = NULL;
7479 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7480 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
7481 	ut_bs_reload(&bs, &bs_opts);
7482 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7483 	poll_threads();
7484 	CU_ASSERT(g_bserrno == 0);
7485 	CU_ASSERT(g_blob != NULL);
7486 	blob = g_blob;
7487 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
7488 	sz = spdk_blob_get_num_clusters(blob);
7489 	CU_ASSERT(sz == esnap_num_clusters + 1);
7490 
7491 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
7492 	spdk_blob_close(blob, blob_op_complete, NULL);
7493 	poll_threads();
7494 	CU_ASSERT(g_bserrno == 0);
7495 	g_blob = NULL;
7496 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7497 	ut_bs_reload(&bs, &bs_opts);
7498 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7499 	poll_threads();
7500 	CU_ASSERT(g_bserrno != 0);
7501 	CU_ASSERT(g_blob == NULL);
7502 
7503 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
7504 	bs_ctx_count = 0;
7505 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7506 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
7507 	bs_opts.esnap_ctx = &bs_ctx_count;
7508 	ut_bs_reload(&bs, &bs_opts);
7509 	/* Loading the blobstore triggers the esnap to be loaded */
7510 	CU_ASSERT(bs_ctx_count == 1);
7511 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7512 	poll_threads();
7513 	CU_ASSERT(g_bserrno == 0);
7514 	CU_ASSERT(g_blob != NULL);
7515 	/* Opening the blob also triggers the esnap to be loaded */
7516 	CU_ASSERT(bs_ctx_count == 2);
7517 	blob = g_blob;
7518 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
7519 	sz = spdk_blob_get_num_clusters(blob);
7520 	CU_ASSERT(sz == esnap_num_clusters + 1);
7521 	spdk_blob_close(blob, blob_op_complete, NULL);
7522 	poll_threads();
7523 	CU_ASSERT(g_bserrno == 0);
7524 	g_blob = NULL;
7525 }
7526 
7527 static void
7528 freeze_done(void *cb_arg, int bserrno)
7529 {
7530 	uint32_t *freeze_cnt = cb_arg;
7531 
7532 	CU_ASSERT(bserrno == 0);
7533 	(*freeze_cnt)++;
7534 }
7535 
7536 static void
7537 unfreeze_done(void *cb_arg, int bserrno)
7538 {
7539 	uint32_t *unfreeze_cnt = cb_arg;
7540 
7541 	CU_ASSERT(bserrno == 0);
7542 	(*unfreeze_cnt)++;
7543 }
7544 
7545 static void
7546 blob_nested_freezes(void)
7547 {
7548 	struct spdk_blob_store *bs = g_bs;
7549 	struct spdk_blob *blob;
7550 	struct spdk_io_channel *channel[2];
7551 	struct spdk_blob_opts opts;
7552 	uint32_t freeze_cnt, unfreeze_cnt;
7553 	int i;
7554 
7555 	for (i = 0; i < 2; i++) {
7556 		set_thread(i);
7557 		channel[i] = spdk_bs_alloc_io_channel(bs);
7558 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
7559 	}
7560 
7561 	set_thread(0);
7562 
7563 	ut_spdk_blob_opts_init(&opts);
7564 	blob = ut_blob_create_and_open(bs, &opts);
7565 
7566 	/* First just test a single freeze/unfreeze. */
7567 	freeze_cnt = 0;
7568 	unfreeze_cnt = 0;
7569 	CU_ASSERT(blob->frozen_refcnt == 0);
7570 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
7571 	CU_ASSERT(blob->frozen_refcnt == 1);
7572 	CU_ASSERT(freeze_cnt == 0);
7573 	poll_threads();
7574 	CU_ASSERT(freeze_cnt == 1);
7575 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
7576 	CU_ASSERT(blob->frozen_refcnt == 0);
7577 	CU_ASSERT(unfreeze_cnt == 0);
7578 	poll_threads();
7579 	CU_ASSERT(unfreeze_cnt == 1);
7580 
7581 	/* Now nest multiple freeze/unfreeze operations.  We should
7582 	 * expect a callback for each operation, but only after
7583 	 * the threads have been polled to ensure a for_each_channel()
7584 	 * was executed.
7585 	 */
7586 	freeze_cnt = 0;
7587 	unfreeze_cnt = 0;
7588 	CU_ASSERT(blob->frozen_refcnt == 0);
7589 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
7590 	CU_ASSERT(blob->frozen_refcnt == 1);
7591 	CU_ASSERT(freeze_cnt == 0);
7592 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
7593 	CU_ASSERT(blob->frozen_refcnt == 2);
7594 	CU_ASSERT(freeze_cnt == 0);
7595 	poll_threads();
7596 	CU_ASSERT(freeze_cnt == 2);
7597 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
7598 	CU_ASSERT(blob->frozen_refcnt == 1);
7599 	CU_ASSERT(unfreeze_cnt == 0);
7600 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
7601 	CU_ASSERT(blob->frozen_refcnt == 0);
7602 	CU_ASSERT(unfreeze_cnt == 0);
7603 	poll_threads();
7604 	CU_ASSERT(unfreeze_cnt == 2);
7605 
7606 	for (i = 0; i < 2; i++) {
7607 		set_thread(i);
7608 		spdk_bs_free_io_channel(channel[i]);
7609 	}
7610 	set_thread(0);
7611 	ut_blob_close_and_delete(bs, blob);
7612 
7613 	poll_threads();
7614 	g_blob = NULL;
7615 	g_blobid = 0;
7616 }
7617 
7618 static void
7619 blob_ext_md_pages(void)
7620 {
7621 	struct spdk_blob_store *bs;
7622 	struct spdk_bs_dev *dev;
7623 	struct spdk_blob *blob;
7624 	struct spdk_blob_opts opts;
7625 	struct spdk_bs_opts bs_opts;
7626 	uint64_t free_clusters;
7627 
7628 	dev = init_dev();
7629 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7630 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
7631 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
7632 	 * It requires num_md_pages that is much smaller than the number of clusters.
7633 	 * Make sure we can create a blob that uses all of the free clusters.
7634 	 */
7635 	bs_opts.cluster_sz = 65536;
7636 	bs_opts.num_md_pages = 16;
7637 
7638 	/* Initialize a new blob store */
7639 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
7640 	poll_threads();
7641 	CU_ASSERT(g_bserrno == 0);
7642 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7643 	bs = g_bs;
7644 
7645 	free_clusters = spdk_bs_free_cluster_count(bs);
7646 
7647 	ut_spdk_blob_opts_init(&opts);
7648 	opts.num_clusters = free_clusters;
7649 
7650 	blob = ut_blob_create_and_open(bs, &opts);
7651 	spdk_blob_close(blob, blob_op_complete, NULL);
7652 	CU_ASSERT(g_bserrno == 0);
7653 
7654 	spdk_bs_unload(bs, bs_op_complete, NULL);
7655 	poll_threads();
7656 	CU_ASSERT(g_bserrno == 0);
7657 	g_bs = NULL;
7658 }
7659 
7660 static void
7661 suite_bs_setup(void)
7662 {
7663 	struct spdk_bs_dev *dev;
7664 
7665 	dev = init_dev();
7666 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7667 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
7668 	poll_threads();
7669 	CU_ASSERT(g_bserrno == 0);
7670 	CU_ASSERT(g_bs != NULL);
7671 }
7672 
7673 static void
7674 suite_esnap_bs_setup(void)
7675 {
7676 	struct spdk_bs_dev	*dev;
7677 	struct spdk_bs_opts	bs_opts;
7678 
7679 	dev = init_dev();
7680 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7681 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7682 	bs_opts.cluster_sz = 16 * 1024;
7683 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
7684 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
7685 	poll_threads();
7686 	CU_ASSERT(g_bserrno == 0);
7687 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7688 }
7689 
7690 static void
7691 suite_bs_cleanup(void)
7692 {
7693 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
7694 	poll_threads();
7695 	CU_ASSERT(g_bserrno == 0);
7696 	g_bs = NULL;
7697 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7698 }
7699 
7700 static struct spdk_blob *
7701 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
7702 {
7703 	struct spdk_blob *blob;
7704 	struct spdk_blob_opts create_blob_opts;
7705 	spdk_blob_id blobid;
7706 
7707 	if (blob_opts == NULL) {
7708 		ut_spdk_blob_opts_init(&create_blob_opts);
7709 		blob_opts = &create_blob_opts;
7710 	}
7711 
7712 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
7713 	poll_threads();
7714 	CU_ASSERT(g_bserrno == 0);
7715 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7716 	blobid = g_blobid;
7717 	g_blobid = -1;
7718 
7719 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7720 	poll_threads();
7721 	CU_ASSERT(g_bserrno == 0);
7722 	CU_ASSERT(g_blob != NULL);
7723 	blob = g_blob;
7724 
7725 	g_blob = NULL;
7726 	g_bserrno = -1;
7727 
7728 	return blob;
7729 }
7730 
7731 static void
7732 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
7733 {
7734 	spdk_blob_id blobid = spdk_blob_get_id(blob);
7735 
7736 	spdk_blob_close(blob, blob_op_complete, NULL);
7737 	poll_threads();
7738 	CU_ASSERT(g_bserrno == 0);
7739 	g_blob = NULL;
7740 
7741 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7742 	poll_threads();
7743 	CU_ASSERT(g_bserrno == 0);
7744 	g_bserrno = -1;
7745 }
7746 
7747 static void
7748 suite_blob_setup(void)
7749 {
7750 	suite_bs_setup();
7751 	CU_ASSERT(g_bs != NULL);
7752 
7753 	g_blob = ut_blob_create_and_open(g_bs, NULL);
7754 	CU_ASSERT(g_blob != NULL);
7755 }
7756 
7757 static void
7758 suite_blob_cleanup(void)
7759 {
7760 	ut_blob_close_and_delete(g_bs, g_blob);
7761 	CU_ASSERT(g_blob == NULL);
7762 
7763 	suite_bs_cleanup();
7764 	CU_ASSERT(g_bs == NULL);
7765 }
7766 
7767 int
7768 main(int argc, char **argv)
7769 {
7770 	CU_pSuite	suite, suite_bs, suite_blob, suite_esnap_bs;
7771 	unsigned int	num_failures;
7772 
7773 	CU_set_error_action(CUEA_ABORT);
7774 	CU_initialize_registry();
7775 
7776 	suite = CU_add_suite("blob", NULL, NULL);
7777 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
7778 			suite_bs_setup, suite_bs_cleanup);
7779 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
7780 			suite_blob_setup, suite_blob_cleanup);
7781 	suite_esnap_bs = CU_add_suite_with_setup_and_teardown("blob_esnap_bs", NULL, NULL,
7782 			 suite_esnap_bs_setup,
7783 			 suite_bs_cleanup);
7784 
7785 	CU_ADD_TEST(suite, blob_init);
7786 	CU_ADD_TEST(suite_bs, blob_open);
7787 	CU_ADD_TEST(suite_bs, blob_create);
7788 	CU_ADD_TEST(suite_bs, blob_create_loop);
7789 	CU_ADD_TEST(suite_bs, blob_create_fail);
7790 	CU_ADD_TEST(suite_bs, blob_create_internal);
7791 	CU_ADD_TEST(suite_bs, blob_create_zero_extent);
7792 	CU_ADD_TEST(suite, blob_thin_provision);
7793 	CU_ADD_TEST(suite_bs, blob_snapshot);
7794 	CU_ADD_TEST(suite_bs, blob_clone);
7795 	CU_ADD_TEST(suite_bs, blob_inflate);
7796 	CU_ADD_TEST(suite_bs, blob_delete);
7797 	CU_ADD_TEST(suite_bs, blob_resize_test);
7798 	CU_ADD_TEST(suite, blob_read_only);
7799 	CU_ADD_TEST(suite_bs, channel_ops);
7800 	CU_ADD_TEST(suite_bs, blob_super);
7801 	CU_ADD_TEST(suite_blob, blob_write);
7802 	CU_ADD_TEST(suite_blob, blob_read);
7803 	CU_ADD_TEST(suite_blob, blob_rw_verify);
7804 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
7805 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
7806 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
7807 	CU_ADD_TEST(suite_bs, blob_unmap);
7808 	CU_ADD_TEST(suite_bs, blob_iter);
7809 	CU_ADD_TEST(suite_blob, blob_xattr);
7810 	CU_ADD_TEST(suite_bs, blob_parse_md);
7811 	CU_ADD_TEST(suite, bs_load);
7812 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
7813 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
7814 	CU_ADD_TEST(suite, bs_load_after_failed_grow);
7815 	CU_ADD_TEST(suite_bs, bs_unload);
7816 	CU_ADD_TEST(suite, bs_cluster_sz);
7817 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
7818 	CU_ADD_TEST(suite, bs_resize_md);
7819 	CU_ADD_TEST(suite, bs_destroy);
7820 	CU_ADD_TEST(suite, bs_type);
7821 	CU_ADD_TEST(suite, bs_super_block);
7822 	CU_ADD_TEST(suite, bs_test_recover_cluster_count);
7823 	CU_ADD_TEST(suite, bs_test_grow);
7824 	CU_ADD_TEST(suite, blob_serialize_test);
7825 	CU_ADD_TEST(suite_bs, blob_crc);
7826 	CU_ADD_TEST(suite, super_block_crc);
7827 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
7828 	CU_ADD_TEST(suite_bs, blob_flags);
7829 	CU_ADD_TEST(suite_bs, bs_version);
7830 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
7831 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
7832 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
7833 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
7834 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
7835 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
7836 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
7837 	CU_ADD_TEST(suite, bs_load_iter_test);
7838 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
7839 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
7840 	CU_ADD_TEST(suite, blob_relations);
7841 	CU_ADD_TEST(suite, blob_relations2);
7842 	CU_ADD_TEST(suite, blob_relations3);
7843 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
7844 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
7845 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
7846 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
7847 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
7848 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
7849 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
7850 	CU_ADD_TEST(suite, blob_io_unit);
7851 	CU_ADD_TEST(suite, blob_io_unit_compatibility);
7852 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
7853 	CU_ADD_TEST(suite_bs, blob_persist_test);
7854 	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
7855 	CU_ADD_TEST(suite_bs, blob_seek_io_unit);
7856 	CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
7857 	CU_ADD_TEST(suite_bs, blob_nested_freezes);
7858 	CU_ADD_TEST(suite, blob_ext_md_pages);
7859 
7860 	allocate_threads(2);
7861 	set_thread(0);
7862 
7863 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
7864 
7865 	g_dev_copy_enabled = false;
7866 	CU_basic_set_mode(CU_BRM_VERBOSE);
7867 	g_use_extent_table = false;
7868 	CU_basic_run_tests();
7869 	num_failures = CU_get_number_of_failures();
7870 	g_use_extent_table = true;
7871 	CU_basic_run_tests();
7872 	num_failures += CU_get_number_of_failures();
7873 
7874 	g_dev_copy_enabled = true;
7875 	CU_basic_set_mode(CU_BRM_VERBOSE);
7876 	g_use_extent_table = false;
7877 	CU_basic_run_tests();
7878 	num_failures = CU_get_number_of_failures();
7879 	g_use_extent_table = true;
7880 	CU_basic_run_tests();
7881 	num_failures += CU_get_number_of_failures();
7882 	CU_cleanup_registry();
7883 
7884 	free(g_dev_buffer);
7885 
7886 	free_threads();
7887 
7888 	return num_failures;
7889 }
7890