xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision b47cee6c96303f16c01318884c2969cf855c49e9)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "thread/thread.c"
16 #include "blob/blobstore.c"
17 #include "blob/request.c"
18 #include "blob/zeroes.c"
19 #include "blob/blob_bs_dev.c"
20 #include "esnap_dev.c"
21 
22 struct spdk_blob_store *g_bs;
23 spdk_blob_id g_blobid;
24 struct spdk_blob *g_blob, *g_blob2;
25 int g_bserrno, g_bserrno2;
26 struct spdk_xattr_names *g_names;
27 int g_done;
28 char *g_xattr_names[] = {"first", "second", "third"};
29 char *g_xattr_values[] = {"one", "two", "three"};
30 uint64_t g_ctx = 1729;
31 bool g_use_extent_table = false;
32 
33 struct spdk_bs_super_block_ver1 {
34 	uint8_t		signature[8];
35 	uint32_t        version;
36 	uint32_t        length;
37 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
38 	spdk_blob_id	super_blob;
39 
40 	uint32_t	cluster_size; /* In bytes */
41 
42 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
43 	uint32_t	used_page_mask_len; /* Count, in pages */
44 
45 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
46 	uint32_t	used_cluster_mask_len; /* Count, in pages */
47 
48 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
49 	uint32_t	md_len; /* Count, in pages */
50 
51 	uint8_t		reserved[4036];
52 	uint32_t	crc;
53 } __attribute__((packed));
54 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
55 
56 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
57 		struct spdk_blob_opts *blob_opts);
58 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
59 static void suite_blob_setup(void);
60 static void suite_blob_cleanup(void);
61 
62 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
63 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
64 		void *cpl_cb_arg), 0);
65 
66 static void
67 _get_xattr_value(void *arg, const char *name,
68 		 const void **value, size_t *value_len)
69 {
70 	uint64_t i;
71 
72 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
73 	SPDK_CU_ASSERT_FATAL(value != NULL);
74 	CU_ASSERT(arg == &g_ctx);
75 
76 	for (i = 0; i < sizeof(g_xattr_names); i++) {
77 		if (!strcmp(name, g_xattr_names[i])) {
78 			*value_len = strlen(g_xattr_values[i]);
79 			*value = g_xattr_values[i];
80 			break;
81 		}
82 	}
83 }
84 
85 static void
86 _get_xattr_value_null(void *arg, const char *name,
87 		      const void **value, size_t *value_len)
88 {
89 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
90 	SPDK_CU_ASSERT_FATAL(value != NULL);
91 	CU_ASSERT(arg == NULL);
92 
93 	*value_len = 0;
94 	*value = NULL;
95 }
96 
97 static int
98 _get_snapshots_count(struct spdk_blob_store *bs)
99 {
100 	struct spdk_blob_list *snapshot = NULL;
101 	int count = 0;
102 
103 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
104 		count += 1;
105 	}
106 
107 	return count;
108 }
109 
110 static void
111 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
112 {
113 	spdk_blob_opts_init(opts, sizeof(*opts));
114 	opts->use_extent_table = g_use_extent_table;
115 }
116 
117 static void
118 bs_op_complete(void *cb_arg, int bserrno)
119 {
120 	g_bserrno = bserrno;
121 }
122 
123 static void
124 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
125 			   int bserrno)
126 {
127 	g_bs = bs;
128 	g_bserrno = bserrno;
129 }
130 
131 static void
132 blob_op_complete(void *cb_arg, int bserrno)
133 {
134 	g_bserrno = bserrno;
135 }
136 
137 static void
138 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
139 {
140 	g_blobid = blobid;
141 	g_bserrno = bserrno;
142 }
143 
144 static void
145 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
146 {
147 	g_blob = blb;
148 	g_bserrno = bserrno;
149 }
150 
151 static void
152 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
153 {
154 	if (g_blob == NULL) {
155 		g_blob = blob;
156 		g_bserrno = bserrno;
157 	} else {
158 		g_blob2 = blob;
159 		g_bserrno2 = bserrno;
160 	}
161 }
162 
163 static void
164 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
165 {
166 	struct spdk_bs_dev *dev;
167 
168 	/* Unload the blob store */
169 	spdk_bs_unload(*bs, bs_op_complete, NULL);
170 	poll_threads();
171 	CU_ASSERT(g_bserrno == 0);
172 
173 	dev = init_dev();
174 	/* Load an existing blob store */
175 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
176 	poll_threads();
177 	CU_ASSERT(g_bserrno == 0);
178 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
179 	*bs = g_bs;
180 
181 	g_bserrno = -1;
182 }
183 
184 static void
185 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
186 {
187 	struct spdk_bs_dev *dev;
188 
189 	/* Dirty shutdown */
190 	bs_free(*bs);
191 
192 	dev = init_dev();
193 	/* Load an existing blob store */
194 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
195 	poll_threads();
196 	CU_ASSERT(g_bserrno == 0);
197 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
198 	*bs = g_bs;
199 
200 	g_bserrno = -1;
201 }
202 
203 static void
204 blob_init(void)
205 {
206 	struct spdk_blob_store *bs;
207 	struct spdk_bs_dev *dev;
208 
209 	dev = init_dev();
210 
211 	/* should fail for an unsupported blocklen */
212 	dev->blocklen = 500;
213 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
214 	poll_threads();
215 	CU_ASSERT(g_bserrno == -EINVAL);
216 
217 	dev = init_dev();
218 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
219 	poll_threads();
220 	CU_ASSERT(g_bserrno == 0);
221 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
222 	bs = g_bs;
223 
224 	spdk_bs_unload(bs, bs_op_complete, NULL);
225 	poll_threads();
226 	CU_ASSERT(g_bserrno == 0);
227 	g_bs = NULL;
228 }
229 
230 static void
231 blob_super(void)
232 {
233 	struct spdk_blob_store *bs = g_bs;
234 	spdk_blob_id blobid;
235 	struct spdk_blob_opts blob_opts;
236 
237 	/* Get the super blob without having set one */
238 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
239 	poll_threads();
240 	CU_ASSERT(g_bserrno == -ENOENT);
241 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
242 
243 	/* Create a blob */
244 	ut_spdk_blob_opts_init(&blob_opts);
245 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
246 	poll_threads();
247 	CU_ASSERT(g_bserrno == 0);
248 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
249 	blobid = g_blobid;
250 
251 	/* Set the blob as the super blob */
252 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
253 	poll_threads();
254 	CU_ASSERT(g_bserrno == 0);
255 
256 	/* Get the super blob */
257 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
258 	poll_threads();
259 	CU_ASSERT(g_bserrno == 0);
260 	CU_ASSERT(blobid == g_blobid);
261 }
262 
263 static void
264 blob_open(void)
265 {
266 	struct spdk_blob_store *bs = g_bs;
267 	struct spdk_blob *blob;
268 	struct spdk_blob_opts blob_opts;
269 	spdk_blob_id blobid, blobid2;
270 
271 	ut_spdk_blob_opts_init(&blob_opts);
272 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
273 	poll_threads();
274 	CU_ASSERT(g_bserrno == 0);
275 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
276 	blobid = g_blobid;
277 
278 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
279 	poll_threads();
280 	CU_ASSERT(g_bserrno == 0);
281 	CU_ASSERT(g_blob != NULL);
282 	blob = g_blob;
283 
284 	blobid2 = spdk_blob_get_id(blob);
285 	CU_ASSERT(blobid == blobid2);
286 
287 	/* Try to open file again.  It should return success. */
288 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
289 	poll_threads();
290 	CU_ASSERT(g_bserrno == 0);
291 	CU_ASSERT(blob == g_blob);
292 
293 	spdk_blob_close(blob, blob_op_complete, NULL);
294 	poll_threads();
295 	CU_ASSERT(g_bserrno == 0);
296 
297 	/*
298 	 * Close the file a second time, releasing the second reference.  This
299 	 *  should succeed.
300 	 */
301 	blob = g_blob;
302 	spdk_blob_close(blob, blob_op_complete, NULL);
303 	poll_threads();
304 	CU_ASSERT(g_bserrno == 0);
305 
306 	/*
307 	 * Try to open file again.  It should succeed.  This tests the case
308 	 *  where the file is opened, closed, then re-opened again.
309 	 */
310 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
311 	poll_threads();
312 	CU_ASSERT(g_bserrno == 0);
313 	CU_ASSERT(g_blob != NULL);
314 	blob = g_blob;
315 	spdk_blob_close(blob, blob_op_complete, NULL);
316 	poll_threads();
317 	CU_ASSERT(g_bserrno == 0);
318 
319 	/* Try to open file twice in succession.  This should return the same
320 	 * blob object.
321 	 */
322 	g_blob = NULL;
323 	g_blob2 = NULL;
324 	g_bserrno = -1;
325 	g_bserrno2 = -1;
326 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
327 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
328 	poll_threads();
329 	CU_ASSERT(g_bserrno == 0);
330 	CU_ASSERT(g_bserrno2 == 0);
331 	CU_ASSERT(g_blob != NULL);
332 	CU_ASSERT(g_blob2 != NULL);
333 	CU_ASSERT(g_blob == g_blob2);
334 
335 	g_bserrno = -1;
336 	spdk_blob_close(g_blob, blob_op_complete, NULL);
337 	poll_threads();
338 	CU_ASSERT(g_bserrno == 0);
339 
340 	ut_blob_close_and_delete(bs, g_blob);
341 }
342 
343 static void
344 blob_create(void)
345 {
346 	struct spdk_blob_store *bs = g_bs;
347 	struct spdk_blob *blob;
348 	struct spdk_blob_opts opts;
349 	spdk_blob_id blobid;
350 
351 	/* Create blob with 10 clusters */
352 
353 	ut_spdk_blob_opts_init(&opts);
354 	opts.num_clusters = 10;
355 
356 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
357 	poll_threads();
358 	CU_ASSERT(g_bserrno == 0);
359 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
360 	blobid = g_blobid;
361 
362 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
363 	poll_threads();
364 	CU_ASSERT(g_bserrno == 0);
365 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
366 	blob = g_blob;
367 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
368 
369 	spdk_blob_close(blob, blob_op_complete, NULL);
370 	poll_threads();
371 	CU_ASSERT(g_bserrno == 0);
372 
373 	/* Create blob with 0 clusters */
374 
375 	ut_spdk_blob_opts_init(&opts);
376 	opts.num_clusters = 0;
377 
378 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
379 	poll_threads();
380 	CU_ASSERT(g_bserrno == 0);
381 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
382 	blobid = g_blobid;
383 
384 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
385 	poll_threads();
386 	CU_ASSERT(g_bserrno == 0);
387 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
388 	blob = g_blob;
389 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
390 
391 	spdk_blob_close(blob, blob_op_complete, NULL);
392 	poll_threads();
393 	CU_ASSERT(g_bserrno == 0);
394 
395 	/* Create blob with default options (opts == NULL) */
396 
397 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
398 	poll_threads();
399 	CU_ASSERT(g_bserrno == 0);
400 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
401 	blobid = g_blobid;
402 
403 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
404 	poll_threads();
405 	CU_ASSERT(g_bserrno == 0);
406 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
407 	blob = g_blob;
408 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
409 
410 	spdk_blob_close(blob, blob_op_complete, NULL);
411 	poll_threads();
412 	CU_ASSERT(g_bserrno == 0);
413 
414 	/* Try to create blob with size larger than blobstore */
415 
416 	ut_spdk_blob_opts_init(&opts);
417 	opts.num_clusters = bs->total_clusters + 1;
418 
419 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
420 	poll_threads();
421 	CU_ASSERT(g_bserrno == -ENOSPC);
422 }
423 
424 static void
425 blob_create_zero_extent(void)
426 {
427 	struct spdk_blob_store *bs = g_bs;
428 	struct spdk_blob *blob;
429 	spdk_blob_id blobid;
430 
431 	/* Create blob with default options (opts == NULL) */
432 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
433 	poll_threads();
434 	CU_ASSERT(g_bserrno == 0);
435 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
436 	blobid = g_blobid;
437 
438 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
439 	poll_threads();
440 	CU_ASSERT(g_bserrno == 0);
441 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
442 	blob = g_blob;
443 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
444 	CU_ASSERT(blob->extent_table_found == true);
445 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
446 	CU_ASSERT(blob->active.extent_pages == NULL);
447 
448 	spdk_blob_close(blob, blob_op_complete, NULL);
449 	poll_threads();
450 	CU_ASSERT(g_bserrno == 0);
451 
452 	/* Create blob with NULL internal options  */
453 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
454 	poll_threads();
455 	CU_ASSERT(g_bserrno == 0);
456 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
457 	blobid = g_blobid;
458 
459 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
460 	poll_threads();
461 	CU_ASSERT(g_bserrno == 0);
462 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
463 	blob = g_blob;
464 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
465 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
466 	CU_ASSERT(blob->extent_table_found == true);
467 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
468 	CU_ASSERT(blob->active.extent_pages == NULL);
469 
470 	spdk_blob_close(blob, blob_op_complete, NULL);
471 	poll_threads();
472 	CU_ASSERT(g_bserrno == 0);
473 }
474 
475 /*
476  * Create and delete one blob in a loop over and over again.  This helps ensure
477  * that the internal bit masks tracking used clusters and md_pages are being
478  * tracked correctly.
479  */
480 static void
481 blob_create_loop(void)
482 {
483 	struct spdk_blob_store *bs = g_bs;
484 	struct spdk_blob_opts opts;
485 	uint32_t i, loop_count;
486 
487 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
488 				  spdk_bit_pool_capacity(bs->used_clusters));
489 
490 	for (i = 0; i < loop_count; i++) {
491 		ut_spdk_blob_opts_init(&opts);
492 		opts.num_clusters = 1;
493 		g_bserrno = -1;
494 		g_blobid = SPDK_BLOBID_INVALID;
495 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
496 		poll_threads();
497 		CU_ASSERT(g_bserrno == 0);
498 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
499 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
500 		poll_threads();
501 		CU_ASSERT(g_bserrno == 0);
502 	}
503 }
504 
505 static void
506 blob_create_fail(void)
507 {
508 	struct spdk_blob_store *bs = g_bs;
509 	struct spdk_blob_opts opts;
510 	spdk_blob_id blobid;
511 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
512 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
513 
514 	/* NULL callback */
515 	ut_spdk_blob_opts_init(&opts);
516 	opts.xattrs.names = g_xattr_names;
517 	opts.xattrs.get_value = NULL;
518 	opts.xattrs.count = 1;
519 	opts.xattrs.ctx = &g_ctx;
520 
521 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
522 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
523 	poll_threads();
524 	CU_ASSERT(g_bserrno == -EINVAL);
525 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
526 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
527 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
528 
529 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
530 	poll_threads();
531 	CU_ASSERT(g_bserrno == -ENOENT);
532 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
533 
534 	ut_bs_reload(&bs, NULL);
535 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
536 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
537 
538 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
539 	poll_threads();
540 	CU_ASSERT(g_blob == NULL);
541 	CU_ASSERT(g_bserrno == -ENOENT);
542 }
543 
544 static void
545 blob_create_internal(void)
546 {
547 	struct spdk_blob_store *bs = g_bs;
548 	struct spdk_blob *blob;
549 	struct spdk_blob_opts opts;
550 	struct spdk_blob_xattr_opts internal_xattrs;
551 	const void *value;
552 	size_t value_len;
553 	spdk_blob_id blobid;
554 	int rc;
555 
556 	/* Create blob with custom xattrs */
557 
558 	ut_spdk_blob_opts_init(&opts);
559 	blob_xattrs_init(&internal_xattrs);
560 	internal_xattrs.count = 3;
561 	internal_xattrs.names = g_xattr_names;
562 	internal_xattrs.get_value = _get_xattr_value;
563 	internal_xattrs.ctx = &g_ctx;
564 
565 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
566 	poll_threads();
567 	CU_ASSERT(g_bserrno == 0);
568 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
569 	blobid = g_blobid;
570 
571 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
572 	poll_threads();
573 	CU_ASSERT(g_bserrno == 0);
574 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
575 	blob = g_blob;
576 
577 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
578 	CU_ASSERT(rc == 0);
579 	SPDK_CU_ASSERT_FATAL(value != NULL);
580 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
581 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
582 
583 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
584 	CU_ASSERT(rc == 0);
585 	SPDK_CU_ASSERT_FATAL(value != NULL);
586 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
587 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
588 
589 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
590 	CU_ASSERT(rc == 0);
591 	SPDK_CU_ASSERT_FATAL(value != NULL);
592 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
593 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
594 
595 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
596 	CU_ASSERT(rc != 0);
597 
598 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
599 	CU_ASSERT(rc != 0);
600 
601 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
602 	CU_ASSERT(rc != 0);
603 
604 	spdk_blob_close(blob, blob_op_complete, NULL);
605 	poll_threads();
606 	CU_ASSERT(g_bserrno == 0);
607 
608 	/* Create blob with NULL internal options  */
609 
610 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
611 	poll_threads();
612 	CU_ASSERT(g_bserrno == 0);
613 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
614 	blobid = g_blobid;
615 
616 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
617 	poll_threads();
618 	CU_ASSERT(g_bserrno == 0);
619 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
620 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
621 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
622 
623 	blob = g_blob;
624 
625 	spdk_blob_close(blob, blob_op_complete, NULL);
626 	poll_threads();
627 	CU_ASSERT(g_bserrno == 0);
628 }
629 
630 static void
631 blob_thin_provision(void)
632 {
633 	struct spdk_blob_store *bs;
634 	struct spdk_bs_dev *dev;
635 	struct spdk_blob *blob;
636 	struct spdk_blob_opts opts;
637 	struct spdk_bs_opts bs_opts;
638 	spdk_blob_id blobid;
639 
640 	dev = init_dev();
641 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
642 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
643 
644 	/* Initialize a new blob store */
645 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
646 	poll_threads();
647 	CU_ASSERT(g_bserrno == 0);
648 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
649 
650 	bs = g_bs;
651 
652 	/* Create blob with thin provisioning enabled */
653 
654 	ut_spdk_blob_opts_init(&opts);
655 	opts.thin_provision = true;
656 	opts.num_clusters = 10;
657 
658 	blob = ut_blob_create_and_open(bs, &opts);
659 	blobid = spdk_blob_get_id(blob);
660 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
661 	/* In thin provisioning with num_clusters is set, if not using the
662 	 * extent table, there is no allocation. If extent table is used,
663 	 * there is related allocation happened. */
664 	if (blob->extent_table_found == true) {
665 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
666 		CU_ASSERT(blob->active.extent_pages != NULL);
667 	} else {
668 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
669 		CU_ASSERT(blob->active.extent_pages == NULL);
670 	}
671 
672 	spdk_blob_close(blob, blob_op_complete, NULL);
673 	CU_ASSERT(g_bserrno == 0);
674 
675 	/* Do not shut down cleanly.  This makes sure that when we load again
676 	 *  and try to recover a valid used_cluster map, that blobstore will
677 	 *  ignore clusters with index 0 since these are unallocated clusters.
678 	 */
679 	ut_bs_dirty_load(&bs, &bs_opts);
680 
681 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
682 	poll_threads();
683 	CU_ASSERT(g_bserrno == 0);
684 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
685 	blob = g_blob;
686 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
687 
688 	ut_blob_close_and_delete(bs, blob);
689 
690 	spdk_bs_unload(bs, bs_op_complete, NULL);
691 	poll_threads();
692 	CU_ASSERT(g_bserrno == 0);
693 	g_bs = NULL;
694 }
695 
696 static void
697 blob_snapshot(void)
698 {
699 	struct spdk_blob_store *bs = g_bs;
700 	struct spdk_blob *blob;
701 	struct spdk_blob *snapshot, *snapshot2;
702 	struct spdk_blob_bs_dev *blob_bs_dev;
703 	struct spdk_blob_opts opts;
704 	struct spdk_blob_xattr_opts xattrs;
705 	spdk_blob_id blobid;
706 	spdk_blob_id snapshotid;
707 	spdk_blob_id snapshotid2;
708 	const void *value;
709 	size_t value_len;
710 	int rc;
711 	spdk_blob_id ids[2];
712 	size_t count;
713 
714 	/* Create blob with 10 clusters */
715 	ut_spdk_blob_opts_init(&opts);
716 	opts.num_clusters = 10;
717 
718 	blob = ut_blob_create_and_open(bs, &opts);
719 	blobid = spdk_blob_get_id(blob);
720 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
721 
722 	/* Create snapshot from blob */
723 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
724 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
725 	poll_threads();
726 	CU_ASSERT(g_bserrno == 0);
727 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
728 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
729 	snapshotid = g_blobid;
730 
731 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
732 	poll_threads();
733 	CU_ASSERT(g_bserrno == 0);
734 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
735 	snapshot = g_blob;
736 	CU_ASSERT(snapshot->data_ro == true);
737 	CU_ASSERT(snapshot->md_ro == true);
738 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
739 
740 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
741 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
742 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
743 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
744 
745 	/* Try to create snapshot from clone with xattrs */
746 	xattrs.names = g_xattr_names;
747 	xattrs.get_value = _get_xattr_value;
748 	xattrs.count = 3;
749 	xattrs.ctx = &g_ctx;
750 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
751 	poll_threads();
752 	CU_ASSERT(g_bserrno == 0);
753 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
754 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
755 	snapshotid2 = g_blobid;
756 
757 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
758 	CU_ASSERT(g_bserrno == 0);
759 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
760 	snapshot2 = g_blob;
761 	CU_ASSERT(snapshot2->data_ro == true);
762 	CU_ASSERT(snapshot2->md_ro == true);
763 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
764 
765 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
766 	CU_ASSERT(snapshot->back_bs_dev == NULL);
767 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
768 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
769 
770 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
771 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
772 
773 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
774 	CU_ASSERT(blob_bs_dev->blob == snapshot);
775 
776 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
777 	CU_ASSERT(rc == 0);
778 	SPDK_CU_ASSERT_FATAL(value != NULL);
779 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
780 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
781 
782 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
783 	CU_ASSERT(rc == 0);
784 	SPDK_CU_ASSERT_FATAL(value != NULL);
785 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
786 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
787 
788 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
789 	CU_ASSERT(rc == 0);
790 	SPDK_CU_ASSERT_FATAL(value != NULL);
791 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
792 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
793 
794 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
795 	count = 2;
796 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
797 	CU_ASSERT(count == 1);
798 	CU_ASSERT(ids[0] == blobid);
799 
800 	count = 2;
801 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
802 	CU_ASSERT(count == 1);
803 	CU_ASSERT(ids[0] == snapshotid2);
804 
805 	/* Try to create snapshot from snapshot */
806 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
807 	poll_threads();
808 	CU_ASSERT(g_bserrno == -EINVAL);
809 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
810 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
811 
812 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
813 	ut_blob_close_and_delete(bs, blob);
814 	count = 2;
815 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
816 	CU_ASSERT(count == 0);
817 
818 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
819 	ut_blob_close_and_delete(bs, snapshot2);
820 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
821 	count = 2;
822 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
823 	CU_ASSERT(count == 0);
824 
825 	ut_blob_close_and_delete(bs, snapshot);
826 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
827 }
828 
829 static void
830 blob_snapshot_freeze_io(void)
831 {
832 	struct spdk_io_channel *channel;
833 	struct spdk_bs_channel *bs_channel;
834 	struct spdk_blob_store *bs = g_bs;
835 	struct spdk_blob *blob;
836 	struct spdk_blob_opts opts;
837 	spdk_blob_id blobid;
838 	uint32_t num_of_pages = 10;
839 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
840 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
841 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
842 
843 	memset(payload_write, 0xE5, sizeof(payload_write));
844 	memset(payload_read, 0x00, sizeof(payload_read));
845 	memset(payload_zero, 0x00, sizeof(payload_zero));
846 
847 	/* Test freeze I/O during snapshot */
848 	channel = spdk_bs_alloc_io_channel(bs);
849 	bs_channel = spdk_io_channel_get_ctx(channel);
850 
851 	/* Create blob with 10 clusters */
852 	ut_spdk_blob_opts_init(&opts);
853 	opts.num_clusters = 10;
854 	opts.thin_provision = false;
855 
856 	blob = ut_blob_create_and_open(bs, &opts);
857 	blobid = spdk_blob_get_id(blob);
858 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
859 
860 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
861 
862 	/* This is implementation specific.
863 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
864 	 * Four async I/O operations happen before that. */
865 	poll_thread_times(0, 5);
866 
867 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
868 
869 	/* Blob I/O should be frozen here */
870 	CU_ASSERT(blob->frozen_refcnt == 1);
871 
872 	/* Write to the blob */
873 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
874 
875 	/* Verify that I/O is queued */
876 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
877 	/* Verify that payload is not written to disk, at this point the blobs already switched */
878 	CU_ASSERT(blob->active.clusters[0] == 0);
879 
880 	/* Finish all operations including spdk_bs_create_snapshot */
881 	poll_threads();
882 
883 	/* Verify snapshot */
884 	CU_ASSERT(g_bserrno == 0);
885 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
886 
887 	/* Verify that blob has unset frozen_io */
888 	CU_ASSERT(blob->frozen_refcnt == 0);
889 
890 	/* Verify that postponed I/O completed successfully by comparing payload */
891 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
892 	poll_threads();
893 	CU_ASSERT(g_bserrno == 0);
894 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
895 
896 	spdk_bs_free_io_channel(channel);
897 	poll_threads();
898 
899 	ut_blob_close_and_delete(bs, blob);
900 }
901 
902 static void
903 blob_clone(void)
904 {
905 	struct spdk_blob_store *bs = g_bs;
906 	struct spdk_blob_opts opts;
907 	struct spdk_blob *blob, *snapshot, *clone;
908 	spdk_blob_id blobid, cloneid, snapshotid;
909 	struct spdk_blob_xattr_opts xattrs;
910 	const void *value;
911 	size_t value_len;
912 	int rc;
913 
914 	/* Create blob with 10 clusters */
915 
916 	ut_spdk_blob_opts_init(&opts);
917 	opts.num_clusters = 10;
918 
919 	blob = ut_blob_create_and_open(bs, &opts);
920 	blobid = spdk_blob_get_id(blob);
921 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
922 
923 	/* Create snapshot */
924 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
925 	poll_threads();
926 	CU_ASSERT(g_bserrno == 0);
927 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
928 	snapshotid = g_blobid;
929 
930 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
931 	poll_threads();
932 	CU_ASSERT(g_bserrno == 0);
933 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
934 	snapshot = g_blob;
935 	CU_ASSERT(snapshot->data_ro == true);
936 	CU_ASSERT(snapshot->md_ro == true);
937 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
938 
939 	spdk_blob_close(snapshot, blob_op_complete, NULL);
940 	poll_threads();
941 	CU_ASSERT(g_bserrno == 0);
942 
943 	/* Create clone from snapshot with xattrs */
944 	xattrs.names = g_xattr_names;
945 	xattrs.get_value = _get_xattr_value;
946 	xattrs.count = 3;
947 	xattrs.ctx = &g_ctx;
948 
949 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
950 	poll_threads();
951 	CU_ASSERT(g_bserrno == 0);
952 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
953 	cloneid = g_blobid;
954 
955 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
956 	poll_threads();
957 	CU_ASSERT(g_bserrno == 0);
958 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
959 	clone = g_blob;
960 	CU_ASSERT(clone->data_ro == false);
961 	CU_ASSERT(clone->md_ro == false);
962 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
963 
964 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
965 	CU_ASSERT(rc == 0);
966 	SPDK_CU_ASSERT_FATAL(value != NULL);
967 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
968 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
969 
970 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
971 	CU_ASSERT(rc == 0);
972 	SPDK_CU_ASSERT_FATAL(value != NULL);
973 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
974 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
975 
976 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
977 	CU_ASSERT(rc == 0);
978 	SPDK_CU_ASSERT_FATAL(value != NULL);
979 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
980 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
981 
982 
983 	spdk_blob_close(clone, blob_op_complete, NULL);
984 	poll_threads();
985 	CU_ASSERT(g_bserrno == 0);
986 
987 	/* Try to create clone from not read only blob */
988 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
989 	poll_threads();
990 	CU_ASSERT(g_bserrno == -EINVAL);
991 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
992 
993 	/* Mark blob as read only */
994 	spdk_blob_set_read_only(blob);
995 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
996 	poll_threads();
997 	CU_ASSERT(g_bserrno == 0);
998 
999 	/* Create clone from read only blob */
1000 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1001 	poll_threads();
1002 	CU_ASSERT(g_bserrno == 0);
1003 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1004 	cloneid = g_blobid;
1005 
1006 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1007 	poll_threads();
1008 	CU_ASSERT(g_bserrno == 0);
1009 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1010 	clone = g_blob;
1011 	CU_ASSERT(clone->data_ro == false);
1012 	CU_ASSERT(clone->md_ro == false);
1013 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1014 
1015 	ut_blob_close_and_delete(bs, clone);
1016 	ut_blob_close_and_delete(bs, blob);
1017 }
1018 
1019 static void
1020 _blob_inflate(bool decouple_parent)
1021 {
1022 	struct spdk_blob_store *bs = g_bs;
1023 	struct spdk_blob_opts opts;
1024 	struct spdk_blob *blob, *snapshot;
1025 	spdk_blob_id blobid, snapshotid;
1026 	struct spdk_io_channel *channel;
1027 	uint64_t free_clusters;
1028 
1029 	channel = spdk_bs_alloc_io_channel(bs);
1030 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1031 
1032 	/* Create blob with 10 clusters */
1033 
1034 	ut_spdk_blob_opts_init(&opts);
1035 	opts.num_clusters = 10;
1036 	opts.thin_provision = true;
1037 
1038 	blob = ut_blob_create_and_open(bs, &opts);
1039 	blobid = spdk_blob_get_id(blob);
1040 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1041 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1042 
1043 	/* 1) Blob with no parent */
1044 	if (decouple_parent) {
1045 		/* Decouple parent of blob with no parent (should fail) */
1046 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1047 		poll_threads();
1048 		CU_ASSERT(g_bserrno != 0);
1049 	} else {
1050 		/* Inflate of thin blob with no parent should made it thick */
1051 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1052 		poll_threads();
1053 		CU_ASSERT(g_bserrno == 0);
1054 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1055 	}
1056 
1057 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1058 	poll_threads();
1059 	CU_ASSERT(g_bserrno == 0);
1060 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1061 	snapshotid = g_blobid;
1062 
1063 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1064 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1065 
1066 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1067 	poll_threads();
1068 	CU_ASSERT(g_bserrno == 0);
1069 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1070 	snapshot = g_blob;
1071 	CU_ASSERT(snapshot->data_ro == true);
1072 	CU_ASSERT(snapshot->md_ro == true);
1073 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1074 
1075 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1076 	poll_threads();
1077 	CU_ASSERT(g_bserrno == 0);
1078 
1079 	free_clusters = spdk_bs_free_cluster_count(bs);
1080 
1081 	/* 2) Blob with parent */
1082 	if (!decouple_parent) {
1083 		/* Do full blob inflation */
1084 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1085 		poll_threads();
1086 		CU_ASSERT(g_bserrno == 0);
1087 		/* all 10 clusters should be allocated */
1088 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1089 	} else {
1090 		/* Decouple parent of blob */
1091 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1092 		poll_threads();
1093 		CU_ASSERT(g_bserrno == 0);
1094 		/* when only parent is removed, none of the clusters should be allocated */
1095 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1096 	}
1097 
1098 	/* Now, it should be possible to delete snapshot */
1099 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1100 	poll_threads();
1101 	CU_ASSERT(g_bserrno == 0);
1102 
1103 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1104 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1105 
1106 	spdk_bs_free_io_channel(channel);
1107 	poll_threads();
1108 
1109 	ut_blob_close_and_delete(bs, blob);
1110 }
1111 
1112 static void
1113 blob_inflate(void)
1114 {
1115 	_blob_inflate(false);
1116 	_blob_inflate(true);
1117 }
1118 
1119 static void
1120 blob_delete(void)
1121 {
1122 	struct spdk_blob_store *bs = g_bs;
1123 	struct spdk_blob_opts blob_opts;
1124 	spdk_blob_id blobid;
1125 
1126 	/* Create a blob and then delete it. */
1127 	ut_spdk_blob_opts_init(&blob_opts);
1128 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1129 	poll_threads();
1130 	CU_ASSERT(g_bserrno == 0);
1131 	CU_ASSERT(g_blobid > 0);
1132 	blobid = g_blobid;
1133 
1134 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1135 	poll_threads();
1136 	CU_ASSERT(g_bserrno == 0);
1137 
1138 	/* Try to open the blob */
1139 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1140 	poll_threads();
1141 	CU_ASSERT(g_bserrno == -ENOENT);
1142 }
1143 
1144 static void
1145 blob_resize_test(void)
1146 {
1147 	struct spdk_blob_store *bs = g_bs;
1148 	struct spdk_blob *blob;
1149 	uint64_t free_clusters;
1150 
1151 	free_clusters = spdk_bs_free_cluster_count(bs);
1152 
1153 	blob = ut_blob_create_and_open(bs, NULL);
1154 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1155 
1156 	/* Confirm that resize fails if blob is marked read-only. */
1157 	blob->md_ro = true;
1158 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1159 	poll_threads();
1160 	CU_ASSERT(g_bserrno == -EPERM);
1161 	blob->md_ro = false;
1162 
1163 	/* The blob started at 0 clusters. Resize it to be 5. */
1164 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1165 	poll_threads();
1166 	CU_ASSERT(g_bserrno == 0);
1167 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1168 
1169 	/* Shrink the blob to 3 clusters. This will not actually release
1170 	 * the old clusters until the blob is synced.
1171 	 */
1172 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1173 	poll_threads();
1174 	CU_ASSERT(g_bserrno == 0);
1175 	/* Verify there are still 5 clusters in use */
1176 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1177 
1178 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1179 	poll_threads();
1180 	CU_ASSERT(g_bserrno == 0);
1181 	/* Now there are only 3 clusters in use */
1182 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1183 
1184 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1185 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1186 	poll_threads();
1187 	CU_ASSERT(g_bserrno == 0);
1188 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1189 
1190 	/* Try to resize the blob to size larger than blobstore. */
1191 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1192 	poll_threads();
1193 	CU_ASSERT(g_bserrno == -ENOSPC);
1194 
1195 	ut_blob_close_and_delete(bs, blob);
1196 }
1197 
1198 static void
1199 blob_read_only(void)
1200 {
1201 	struct spdk_blob_store *bs;
1202 	struct spdk_bs_dev *dev;
1203 	struct spdk_blob *blob;
1204 	struct spdk_bs_opts opts;
1205 	spdk_blob_id blobid;
1206 	int rc;
1207 
1208 	dev = init_dev();
1209 	spdk_bs_opts_init(&opts, sizeof(opts));
1210 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1211 
1212 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1213 	poll_threads();
1214 	CU_ASSERT(g_bserrno == 0);
1215 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1216 	bs = g_bs;
1217 
1218 	blob = ut_blob_create_and_open(bs, NULL);
1219 	blobid = spdk_blob_get_id(blob);
1220 
1221 	rc = spdk_blob_set_read_only(blob);
1222 	CU_ASSERT(rc == 0);
1223 
1224 	CU_ASSERT(blob->data_ro == false);
1225 	CU_ASSERT(blob->md_ro == false);
1226 
1227 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1228 	poll_threads();
1229 
1230 	CU_ASSERT(blob->data_ro == true);
1231 	CU_ASSERT(blob->md_ro == true);
1232 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1233 
1234 	spdk_blob_close(blob, blob_op_complete, NULL);
1235 	poll_threads();
1236 	CU_ASSERT(g_bserrno == 0);
1237 
1238 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1239 	poll_threads();
1240 	CU_ASSERT(g_bserrno == 0);
1241 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1242 	blob = g_blob;
1243 
1244 	CU_ASSERT(blob->data_ro == true);
1245 	CU_ASSERT(blob->md_ro == true);
1246 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1247 
1248 	spdk_blob_close(blob, blob_op_complete, NULL);
1249 	poll_threads();
1250 	CU_ASSERT(g_bserrno == 0);
1251 
1252 	ut_bs_reload(&bs, &opts);
1253 
1254 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1255 	poll_threads();
1256 	CU_ASSERT(g_bserrno == 0);
1257 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1258 	blob = g_blob;
1259 
1260 	CU_ASSERT(blob->data_ro == true);
1261 	CU_ASSERT(blob->md_ro == true);
1262 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1263 
1264 	ut_blob_close_and_delete(bs, blob);
1265 
1266 	spdk_bs_unload(bs, bs_op_complete, NULL);
1267 	poll_threads();
1268 	CU_ASSERT(g_bserrno == 0);
1269 }
1270 
1271 static void
1272 channel_ops(void)
1273 {
1274 	struct spdk_blob_store *bs = g_bs;
1275 	struct spdk_io_channel *channel;
1276 
1277 	channel = spdk_bs_alloc_io_channel(bs);
1278 	CU_ASSERT(channel != NULL);
1279 
1280 	spdk_bs_free_io_channel(channel);
1281 	poll_threads();
1282 }
1283 
1284 static void
1285 blob_write(void)
1286 {
1287 	struct spdk_blob_store *bs = g_bs;
1288 	struct spdk_blob *blob = g_blob;
1289 	struct spdk_io_channel *channel;
1290 	uint64_t pages_per_cluster;
1291 	uint8_t payload[10 * 4096];
1292 
1293 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1294 
1295 	channel = spdk_bs_alloc_io_channel(bs);
1296 	CU_ASSERT(channel != NULL);
1297 
1298 	/* Write to a blob with 0 size */
1299 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1300 	poll_threads();
1301 	CU_ASSERT(g_bserrno == -EINVAL);
1302 
1303 	/* Resize the blob */
1304 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1305 	poll_threads();
1306 	CU_ASSERT(g_bserrno == 0);
1307 
1308 	/* Confirm that write fails if blob is marked read-only. */
1309 	blob->data_ro = true;
1310 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1311 	poll_threads();
1312 	CU_ASSERT(g_bserrno == -EPERM);
1313 	blob->data_ro = false;
1314 
1315 	/* Write to the blob */
1316 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1317 	poll_threads();
1318 	CU_ASSERT(g_bserrno == 0);
1319 
1320 	/* Write starting beyond the end */
1321 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1322 			   NULL);
1323 	poll_threads();
1324 	CU_ASSERT(g_bserrno == -EINVAL);
1325 
1326 	/* Write starting at a valid location but going off the end */
1327 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1328 			   blob_op_complete, NULL);
1329 	poll_threads();
1330 	CU_ASSERT(g_bserrno == -EINVAL);
1331 
1332 	spdk_bs_free_io_channel(channel);
1333 	poll_threads();
1334 }
1335 
1336 static void
1337 blob_read(void)
1338 {
1339 	struct spdk_blob_store *bs = g_bs;
1340 	struct spdk_blob *blob = g_blob;
1341 	struct spdk_io_channel *channel;
1342 	uint64_t pages_per_cluster;
1343 	uint8_t payload[10 * 4096];
1344 
1345 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1346 
1347 	channel = spdk_bs_alloc_io_channel(bs);
1348 	CU_ASSERT(channel != NULL);
1349 
1350 	/* Read from a blob with 0 size */
1351 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1352 	poll_threads();
1353 	CU_ASSERT(g_bserrno == -EINVAL);
1354 
1355 	/* Resize the blob */
1356 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1357 	poll_threads();
1358 	CU_ASSERT(g_bserrno == 0);
1359 
1360 	/* Confirm that read passes if blob is marked read-only. */
1361 	blob->data_ro = true;
1362 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1363 	poll_threads();
1364 	CU_ASSERT(g_bserrno == 0);
1365 	blob->data_ro = false;
1366 
1367 	/* Read from the blob */
1368 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1369 	poll_threads();
1370 	CU_ASSERT(g_bserrno == 0);
1371 
1372 	/* Read starting beyond the end */
1373 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1374 			  NULL);
1375 	poll_threads();
1376 	CU_ASSERT(g_bserrno == -EINVAL);
1377 
1378 	/* Read starting at a valid location but going off the end */
1379 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1380 			  blob_op_complete, NULL);
1381 	poll_threads();
1382 	CU_ASSERT(g_bserrno == -EINVAL);
1383 
1384 	spdk_bs_free_io_channel(channel);
1385 	poll_threads();
1386 }
1387 
1388 static void
1389 blob_rw_verify(void)
1390 {
1391 	struct spdk_blob_store *bs = g_bs;
1392 	struct spdk_blob *blob = g_blob;
1393 	struct spdk_io_channel *channel;
1394 	uint8_t payload_read[10 * 4096];
1395 	uint8_t payload_write[10 * 4096];
1396 
1397 	channel = spdk_bs_alloc_io_channel(bs);
1398 	CU_ASSERT(channel != NULL);
1399 
1400 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1401 	poll_threads();
1402 	CU_ASSERT(g_bserrno == 0);
1403 
1404 	memset(payload_write, 0xE5, sizeof(payload_write));
1405 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1406 	poll_threads();
1407 	CU_ASSERT(g_bserrno == 0);
1408 
1409 	memset(payload_read, 0x00, sizeof(payload_read));
1410 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1411 	poll_threads();
1412 	CU_ASSERT(g_bserrno == 0);
1413 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1414 
1415 	spdk_bs_free_io_channel(channel);
1416 	poll_threads();
1417 }
1418 
1419 static void
1420 blob_rw_verify_iov(void)
1421 {
1422 	struct spdk_blob_store *bs = g_bs;
1423 	struct spdk_blob *blob;
1424 	struct spdk_io_channel *channel;
1425 	uint8_t payload_read[10 * 4096];
1426 	uint8_t payload_write[10 * 4096];
1427 	struct iovec iov_read[3];
1428 	struct iovec iov_write[3];
1429 	void *buf;
1430 
1431 	channel = spdk_bs_alloc_io_channel(bs);
1432 	CU_ASSERT(channel != NULL);
1433 
1434 	blob = ut_blob_create_and_open(bs, NULL);
1435 
1436 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1437 	poll_threads();
1438 	CU_ASSERT(g_bserrno == 0);
1439 
1440 	/*
1441 	 * Manually adjust the offset of the blob's second cluster.  This allows
1442 	 *  us to make sure that the readv/write code correctly accounts for I/O
1443 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1444 	 *  clusters are where we expect before modifying the second cluster.
1445 	 */
1446 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1447 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1448 	blob->active.clusters[1] = 3 * 256;
1449 
1450 	memset(payload_write, 0xE5, sizeof(payload_write));
1451 	iov_write[0].iov_base = payload_write;
1452 	iov_write[0].iov_len = 1 * 4096;
1453 	iov_write[1].iov_base = payload_write + 1 * 4096;
1454 	iov_write[1].iov_len = 5 * 4096;
1455 	iov_write[2].iov_base = payload_write + 6 * 4096;
1456 	iov_write[2].iov_len = 4 * 4096;
1457 	/*
1458 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1459 	 *  will get written to the first cluster, the last 4 to the second cluster.
1460 	 */
1461 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1462 	poll_threads();
1463 	CU_ASSERT(g_bserrno == 0);
1464 
1465 	memset(payload_read, 0xAA, sizeof(payload_read));
1466 	iov_read[0].iov_base = payload_read;
1467 	iov_read[0].iov_len = 3 * 4096;
1468 	iov_read[1].iov_base = payload_read + 3 * 4096;
1469 	iov_read[1].iov_len = 4 * 4096;
1470 	iov_read[2].iov_base = payload_read + 7 * 4096;
1471 	iov_read[2].iov_len = 3 * 4096;
1472 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1473 	poll_threads();
1474 	CU_ASSERT(g_bserrno == 0);
1475 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1476 
1477 	buf = calloc(1, 256 * 4096);
1478 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1479 	/* Check that cluster 2 on "disk" was not modified. */
1480 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1481 	free(buf);
1482 
1483 	spdk_blob_close(blob, blob_op_complete, NULL);
1484 	poll_threads();
1485 	CU_ASSERT(g_bserrno == 0);
1486 
1487 	spdk_bs_free_io_channel(channel);
1488 	poll_threads();
1489 }
1490 
1491 static uint32_t
1492 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1493 {
1494 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1495 	struct spdk_bs_request_set *set;
1496 	uint32_t count = 0;
1497 
1498 	TAILQ_FOREACH(set, &channel->reqs, link) {
1499 		count++;
1500 	}
1501 
1502 	return count;
1503 }
1504 
1505 static void
1506 blob_rw_verify_iov_nomem(void)
1507 {
1508 	struct spdk_blob_store *bs = g_bs;
1509 	struct spdk_blob *blob = g_blob;
1510 	struct spdk_io_channel *channel;
1511 	uint8_t payload_write[10 * 4096];
1512 	struct iovec iov_write[3];
1513 	uint32_t req_count;
1514 
1515 	channel = spdk_bs_alloc_io_channel(bs);
1516 	CU_ASSERT(channel != NULL);
1517 
1518 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1519 	poll_threads();
1520 	CU_ASSERT(g_bserrno == 0);
1521 
1522 	/*
1523 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1524 	 *  will get written to the first cluster, the last 4 to the second cluster.
1525 	 */
1526 	iov_write[0].iov_base = payload_write;
1527 	iov_write[0].iov_len = 1 * 4096;
1528 	iov_write[1].iov_base = payload_write + 1 * 4096;
1529 	iov_write[1].iov_len = 5 * 4096;
1530 	iov_write[2].iov_base = payload_write + 6 * 4096;
1531 	iov_write[2].iov_len = 4 * 4096;
1532 	MOCK_SET(calloc, NULL);
1533 	req_count = bs_channel_get_req_count(channel);
1534 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1535 	poll_threads();
1536 	CU_ASSERT(g_bserrno = -ENOMEM);
1537 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1538 	MOCK_CLEAR(calloc);
1539 
1540 	spdk_bs_free_io_channel(channel);
1541 	poll_threads();
1542 }
1543 
1544 static void
1545 blob_rw_iov_read_only(void)
1546 {
1547 	struct spdk_blob_store *bs = g_bs;
1548 	struct spdk_blob *blob = g_blob;
1549 	struct spdk_io_channel *channel;
1550 	uint8_t payload_read[4096];
1551 	uint8_t payload_write[4096];
1552 	struct iovec iov_read;
1553 	struct iovec iov_write;
1554 
1555 	channel = spdk_bs_alloc_io_channel(bs);
1556 	CU_ASSERT(channel != NULL);
1557 
1558 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1559 	poll_threads();
1560 	CU_ASSERT(g_bserrno == 0);
1561 
1562 	/* Verify that writev failed if read_only flag is set. */
1563 	blob->data_ro = true;
1564 	iov_write.iov_base = payload_write;
1565 	iov_write.iov_len = sizeof(payload_write);
1566 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1567 	poll_threads();
1568 	CU_ASSERT(g_bserrno == -EPERM);
1569 
1570 	/* Verify that reads pass if data_ro flag is set. */
1571 	iov_read.iov_base = payload_read;
1572 	iov_read.iov_len = sizeof(payload_read);
1573 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1574 	poll_threads();
1575 	CU_ASSERT(g_bserrno == 0);
1576 
1577 	spdk_bs_free_io_channel(channel);
1578 	poll_threads();
1579 }
1580 
1581 static void
1582 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1583 		       uint8_t *payload, uint64_t offset, uint64_t length,
1584 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1585 {
1586 	uint64_t i;
1587 	uint8_t *buf;
1588 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1589 
1590 	/* To be sure that operation is NOT split, read one page at the time */
1591 	buf = payload;
1592 	for (i = 0; i < length; i++) {
1593 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1594 		poll_threads();
1595 		if (g_bserrno != 0) {
1596 			/* Pass the error code up */
1597 			break;
1598 		}
1599 		buf += page_size;
1600 	}
1601 
1602 	cb_fn(cb_arg, g_bserrno);
1603 }
1604 
1605 static void
1606 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1607 			uint8_t *payload, uint64_t offset, uint64_t length,
1608 			spdk_blob_op_complete cb_fn, void *cb_arg)
1609 {
1610 	uint64_t i;
1611 	uint8_t *buf;
1612 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1613 
1614 	/* To be sure that operation is NOT split, write one page at the time */
1615 	buf = payload;
1616 	for (i = 0; i < length; i++) {
1617 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1618 		poll_threads();
1619 		if (g_bserrno != 0) {
1620 			/* Pass the error code up */
1621 			break;
1622 		}
1623 		buf += page_size;
1624 	}
1625 
1626 	cb_fn(cb_arg, g_bserrno);
1627 }
1628 
1629 static void
1630 blob_operation_split_rw(void)
1631 {
1632 	struct spdk_blob_store *bs = g_bs;
1633 	struct spdk_blob *blob;
1634 	struct spdk_io_channel *channel;
1635 	struct spdk_blob_opts opts;
1636 	uint64_t cluster_size;
1637 
1638 	uint64_t payload_size;
1639 	uint8_t *payload_read;
1640 	uint8_t *payload_write;
1641 	uint8_t *payload_pattern;
1642 
1643 	uint64_t page_size;
1644 	uint64_t pages_per_cluster;
1645 	uint64_t pages_per_payload;
1646 
1647 	uint64_t i;
1648 
1649 	cluster_size = spdk_bs_get_cluster_size(bs);
1650 	page_size = spdk_bs_get_page_size(bs);
1651 	pages_per_cluster = cluster_size / page_size;
1652 	pages_per_payload = pages_per_cluster * 5;
1653 	payload_size = cluster_size * 5;
1654 
1655 	payload_read = malloc(payload_size);
1656 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1657 
1658 	payload_write = malloc(payload_size);
1659 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1660 
1661 	payload_pattern = malloc(payload_size);
1662 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1663 
1664 	/* Prepare random pattern to write */
1665 	memset(payload_pattern, 0xFF, payload_size);
1666 	for (i = 0; i < pages_per_payload; i++) {
1667 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1668 	}
1669 
1670 	channel = spdk_bs_alloc_io_channel(bs);
1671 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1672 
1673 	/* Create blob */
1674 	ut_spdk_blob_opts_init(&opts);
1675 	opts.thin_provision = false;
1676 	opts.num_clusters = 5;
1677 
1678 	blob = ut_blob_create_and_open(bs, &opts);
1679 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1680 
1681 	/* Initial read should return zeroed payload */
1682 	memset(payload_read, 0xFF, payload_size);
1683 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1684 	poll_threads();
1685 	CU_ASSERT(g_bserrno == 0);
1686 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1687 
1688 	/* Fill whole blob except last page */
1689 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1690 			   blob_op_complete, NULL);
1691 	poll_threads();
1692 	CU_ASSERT(g_bserrno == 0);
1693 
1694 	/* Write last page with a pattern */
1695 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1696 			   blob_op_complete, NULL);
1697 	poll_threads();
1698 	CU_ASSERT(g_bserrno == 0);
1699 
1700 	/* Read whole blob and check consistency */
1701 	memset(payload_read, 0xFF, payload_size);
1702 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1703 	poll_threads();
1704 	CU_ASSERT(g_bserrno == 0);
1705 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1706 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1707 
1708 	/* Fill whole blob except first page */
1709 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1710 			   blob_op_complete, NULL);
1711 	poll_threads();
1712 	CU_ASSERT(g_bserrno == 0);
1713 
1714 	/* Write first page with a pattern */
1715 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1716 			   blob_op_complete, NULL);
1717 	poll_threads();
1718 	CU_ASSERT(g_bserrno == 0);
1719 
1720 	/* Read whole blob and check consistency */
1721 	memset(payload_read, 0xFF, payload_size);
1722 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1723 	poll_threads();
1724 	CU_ASSERT(g_bserrno == 0);
1725 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1726 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1727 
1728 
1729 	/* Fill whole blob with a pattern (5 clusters) */
1730 
1731 	/* 1. Read test. */
1732 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1733 				blob_op_complete, NULL);
1734 	poll_threads();
1735 	CU_ASSERT(g_bserrno == 0);
1736 
1737 	memset(payload_read, 0xFF, payload_size);
1738 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1739 	poll_threads();
1740 	poll_threads();
1741 	CU_ASSERT(g_bserrno == 0);
1742 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1743 
1744 	/* 2. Write test. */
1745 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1746 			   blob_op_complete, NULL);
1747 	poll_threads();
1748 	CU_ASSERT(g_bserrno == 0);
1749 
1750 	memset(payload_read, 0xFF, payload_size);
1751 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1752 	poll_threads();
1753 	CU_ASSERT(g_bserrno == 0);
1754 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1755 
1756 	spdk_bs_free_io_channel(channel);
1757 	poll_threads();
1758 
1759 	g_blob = NULL;
1760 	g_blobid = 0;
1761 
1762 	free(payload_read);
1763 	free(payload_write);
1764 	free(payload_pattern);
1765 
1766 	ut_blob_close_and_delete(bs, blob);
1767 }
1768 
1769 static void
1770 blob_operation_split_rw_iov(void)
1771 {
1772 	struct spdk_blob_store *bs = g_bs;
1773 	struct spdk_blob *blob;
1774 	struct spdk_io_channel *channel;
1775 	struct spdk_blob_opts opts;
1776 	uint64_t cluster_size;
1777 
1778 	uint64_t payload_size;
1779 	uint8_t *payload_read;
1780 	uint8_t *payload_write;
1781 	uint8_t *payload_pattern;
1782 
1783 	uint64_t page_size;
1784 	uint64_t pages_per_cluster;
1785 	uint64_t pages_per_payload;
1786 
1787 	struct iovec iov_read[2];
1788 	struct iovec iov_write[2];
1789 
1790 	uint64_t i, j;
1791 
1792 	cluster_size = spdk_bs_get_cluster_size(bs);
1793 	page_size = spdk_bs_get_page_size(bs);
1794 	pages_per_cluster = cluster_size / page_size;
1795 	pages_per_payload = pages_per_cluster * 5;
1796 	payload_size = cluster_size * 5;
1797 
1798 	payload_read = malloc(payload_size);
1799 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1800 
1801 	payload_write = malloc(payload_size);
1802 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1803 
1804 	payload_pattern = malloc(payload_size);
1805 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1806 
1807 	/* Prepare random pattern to write */
1808 	for (i = 0; i < pages_per_payload; i++) {
1809 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1810 			uint64_t *tmp;
1811 
1812 			tmp = (uint64_t *)payload_pattern;
1813 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1814 			*tmp = i + 1;
1815 		}
1816 	}
1817 
1818 	channel = spdk_bs_alloc_io_channel(bs);
1819 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1820 
1821 	/* Create blob */
1822 	ut_spdk_blob_opts_init(&opts);
1823 	opts.thin_provision = false;
1824 	opts.num_clusters = 5;
1825 
1826 	blob = ut_blob_create_and_open(bs, &opts);
1827 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1828 
1829 	/* Initial read should return zeroes payload */
1830 	memset(payload_read, 0xFF, payload_size);
1831 	iov_read[0].iov_base = payload_read;
1832 	iov_read[0].iov_len = cluster_size * 3;
1833 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1834 	iov_read[1].iov_len = cluster_size * 2;
1835 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1836 	poll_threads();
1837 	CU_ASSERT(g_bserrno == 0);
1838 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1839 
1840 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1841 	 *  with a pattern. */
1842 	iov_write[0].iov_base = payload_pattern;
1843 	iov_write[0].iov_len = payload_size - page_size;
1844 	iov_write[1].iov_base = payload_pattern;
1845 	iov_write[1].iov_len = page_size;
1846 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1847 	poll_threads();
1848 	CU_ASSERT(g_bserrno == 0);
1849 
1850 	/* Read whole blob and check consistency */
1851 	memset(payload_read, 0xFF, payload_size);
1852 	iov_read[0].iov_base = payload_read;
1853 	iov_read[0].iov_len = cluster_size * 2;
1854 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1855 	iov_read[1].iov_len = cluster_size * 3;
1856 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1857 	poll_threads();
1858 	CU_ASSERT(g_bserrno == 0);
1859 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1860 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1861 
1862 	/* First of iovs fills only first page and second of iovs writes whole blob except
1863 	 *  first page with a pattern. */
1864 	iov_write[0].iov_base = payload_pattern;
1865 	iov_write[0].iov_len = page_size;
1866 	iov_write[1].iov_base = payload_pattern;
1867 	iov_write[1].iov_len = payload_size - page_size;
1868 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1869 	poll_threads();
1870 	CU_ASSERT(g_bserrno == 0);
1871 
1872 	/* Read whole blob and check consistency */
1873 	memset(payload_read, 0xFF, payload_size);
1874 	iov_read[0].iov_base = payload_read;
1875 	iov_read[0].iov_len = cluster_size * 4;
1876 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1877 	iov_read[1].iov_len = cluster_size;
1878 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1879 	poll_threads();
1880 	CU_ASSERT(g_bserrno == 0);
1881 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1882 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1883 
1884 
1885 	/* Fill whole blob with a pattern (5 clusters) */
1886 
1887 	/* 1. Read test. */
1888 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1889 				blob_op_complete, NULL);
1890 	poll_threads();
1891 	CU_ASSERT(g_bserrno == 0);
1892 
1893 	memset(payload_read, 0xFF, payload_size);
1894 	iov_read[0].iov_base = payload_read;
1895 	iov_read[0].iov_len = cluster_size;
1896 	iov_read[1].iov_base = payload_read + cluster_size;
1897 	iov_read[1].iov_len = cluster_size * 4;
1898 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1899 	poll_threads();
1900 	CU_ASSERT(g_bserrno == 0);
1901 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1902 
1903 	/* 2. Write test. */
1904 	iov_write[0].iov_base = payload_read;
1905 	iov_write[0].iov_len = cluster_size * 2;
1906 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1907 	iov_write[1].iov_len = cluster_size * 3;
1908 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1909 	poll_threads();
1910 	CU_ASSERT(g_bserrno == 0);
1911 
1912 	memset(payload_read, 0xFF, payload_size);
1913 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1914 	poll_threads();
1915 	CU_ASSERT(g_bserrno == 0);
1916 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1917 
1918 	spdk_bs_free_io_channel(channel);
1919 	poll_threads();
1920 
1921 	g_blob = NULL;
1922 	g_blobid = 0;
1923 
1924 	free(payload_read);
1925 	free(payload_write);
1926 	free(payload_pattern);
1927 
1928 	ut_blob_close_and_delete(bs, blob);
1929 }
1930 
1931 static void
1932 blob_unmap(void)
1933 {
1934 	struct spdk_blob_store *bs = g_bs;
1935 	struct spdk_blob *blob;
1936 	struct spdk_io_channel *channel;
1937 	struct spdk_blob_opts opts;
1938 	uint8_t payload[4096];
1939 	int i;
1940 
1941 	channel = spdk_bs_alloc_io_channel(bs);
1942 	CU_ASSERT(channel != NULL);
1943 
1944 	ut_spdk_blob_opts_init(&opts);
1945 	opts.num_clusters = 10;
1946 
1947 	blob = ut_blob_create_and_open(bs, &opts);
1948 
1949 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1950 	poll_threads();
1951 	CU_ASSERT(g_bserrno == 0);
1952 
1953 	memset(payload, 0, sizeof(payload));
1954 	payload[0] = 0xFF;
1955 
1956 	/*
1957 	 * Set first byte of every cluster to 0xFF.
1958 	 * First cluster on device is reserved so let's start from cluster number 1
1959 	 */
1960 	for (i = 1; i < 11; i++) {
1961 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1962 	}
1963 
1964 	/* Confirm writes */
1965 	for (i = 0; i < 10; i++) {
1966 		payload[0] = 0;
1967 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1968 				  blob_op_complete, NULL);
1969 		poll_threads();
1970 		CU_ASSERT(g_bserrno == 0);
1971 		CU_ASSERT(payload[0] == 0xFF);
1972 	}
1973 
1974 	/* Mark some clusters as unallocated */
1975 	blob->active.clusters[1] = 0;
1976 	blob->active.clusters[2] = 0;
1977 	blob->active.clusters[3] = 0;
1978 	blob->active.clusters[6] = 0;
1979 	blob->active.clusters[8] = 0;
1980 
1981 	/* Unmap clusters by resizing to 0 */
1982 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
1983 	poll_threads();
1984 	CU_ASSERT(g_bserrno == 0);
1985 
1986 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1987 	poll_threads();
1988 	CU_ASSERT(g_bserrno == 0);
1989 
1990 	/* Confirm that only 'allocated' clusters were unmapped */
1991 	for (i = 1; i < 11; i++) {
1992 		switch (i) {
1993 		case 2:
1994 		case 3:
1995 		case 4:
1996 		case 7:
1997 		case 9:
1998 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
1999 			break;
2000 		default:
2001 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2002 			break;
2003 		}
2004 	}
2005 
2006 	spdk_bs_free_io_channel(channel);
2007 	poll_threads();
2008 
2009 	ut_blob_close_and_delete(bs, blob);
2010 }
2011 
2012 static void
2013 blob_iter(void)
2014 {
2015 	struct spdk_blob_store *bs = g_bs;
2016 	struct spdk_blob *blob;
2017 	spdk_blob_id blobid;
2018 	struct spdk_blob_opts blob_opts;
2019 
2020 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2021 	poll_threads();
2022 	CU_ASSERT(g_blob == NULL);
2023 	CU_ASSERT(g_bserrno == -ENOENT);
2024 
2025 	ut_spdk_blob_opts_init(&blob_opts);
2026 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2027 	poll_threads();
2028 	CU_ASSERT(g_bserrno == 0);
2029 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2030 	blobid = g_blobid;
2031 
2032 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2033 	poll_threads();
2034 	CU_ASSERT(g_blob != NULL);
2035 	CU_ASSERT(g_bserrno == 0);
2036 	blob = g_blob;
2037 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2038 
2039 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2040 	poll_threads();
2041 	CU_ASSERT(g_blob == NULL);
2042 	CU_ASSERT(g_bserrno == -ENOENT);
2043 }
2044 
2045 static void
2046 blob_xattr(void)
2047 {
2048 	struct spdk_blob_store *bs = g_bs;
2049 	struct spdk_blob *blob = g_blob;
2050 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2051 	uint64_t length;
2052 	int rc;
2053 	const char *name1, *name2;
2054 	const void *value;
2055 	size_t value_len;
2056 	struct spdk_xattr_names *names;
2057 
2058 	/* Test that set_xattr fails if md_ro flag is set. */
2059 	blob->md_ro = true;
2060 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2061 	CU_ASSERT(rc == -EPERM);
2062 
2063 	blob->md_ro = false;
2064 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2065 	CU_ASSERT(rc == 0);
2066 
2067 	length = 2345;
2068 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2069 	CU_ASSERT(rc == 0);
2070 
2071 	/* Overwrite "length" xattr. */
2072 	length = 3456;
2073 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2074 	CU_ASSERT(rc == 0);
2075 
2076 	/* get_xattr should still work even if md_ro flag is set. */
2077 	value = NULL;
2078 	blob->md_ro = true;
2079 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2080 	CU_ASSERT(rc == 0);
2081 	SPDK_CU_ASSERT_FATAL(value != NULL);
2082 	CU_ASSERT(*(uint64_t *)value == length);
2083 	CU_ASSERT(value_len == 8);
2084 	blob->md_ro = false;
2085 
2086 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2087 	CU_ASSERT(rc == -ENOENT);
2088 
2089 	names = NULL;
2090 	rc = spdk_blob_get_xattr_names(blob, &names);
2091 	CU_ASSERT(rc == 0);
2092 	SPDK_CU_ASSERT_FATAL(names != NULL);
2093 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2094 	name1 = spdk_xattr_names_get_name(names, 0);
2095 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2096 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2097 	name2 = spdk_xattr_names_get_name(names, 1);
2098 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2099 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2100 	CU_ASSERT(strcmp(name1, name2));
2101 	spdk_xattr_names_free(names);
2102 
2103 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2104 	blob->md_ro = true;
2105 	rc = spdk_blob_remove_xattr(blob, "name");
2106 	CU_ASSERT(rc == -EPERM);
2107 
2108 	blob->md_ro = false;
2109 	rc = spdk_blob_remove_xattr(blob, "name");
2110 	CU_ASSERT(rc == 0);
2111 
2112 	rc = spdk_blob_remove_xattr(blob, "foobar");
2113 	CU_ASSERT(rc == -ENOENT);
2114 
2115 	/* Set internal xattr */
2116 	length = 7898;
2117 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2118 	CU_ASSERT(rc == 0);
2119 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2120 	CU_ASSERT(rc == 0);
2121 	CU_ASSERT(*(uint64_t *)value == length);
2122 	/* try to get public xattr with same name */
2123 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2124 	CU_ASSERT(rc != 0);
2125 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2126 	CU_ASSERT(rc != 0);
2127 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2128 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2129 		  SPDK_BLOB_INTERNAL_XATTR);
2130 
2131 	spdk_blob_close(blob, blob_op_complete, NULL);
2132 	poll_threads();
2133 
2134 	/* Check if xattrs are persisted */
2135 	ut_bs_reload(&bs, NULL);
2136 
2137 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2138 	poll_threads();
2139 	CU_ASSERT(g_bserrno == 0);
2140 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2141 	blob = g_blob;
2142 
2143 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2144 	CU_ASSERT(rc == 0);
2145 	CU_ASSERT(*(uint64_t *)value == length);
2146 
2147 	/* try to get internal xattr trough public call */
2148 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2149 	CU_ASSERT(rc != 0);
2150 
2151 	rc = blob_remove_xattr(blob, "internal", true);
2152 	CU_ASSERT(rc == 0);
2153 
2154 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2155 }
2156 
2157 static void
2158 blob_parse_md(void)
2159 {
2160 	struct spdk_blob_store *bs = g_bs;
2161 	struct spdk_blob *blob;
2162 	int rc;
2163 	uint32_t used_pages;
2164 	size_t xattr_length;
2165 	char *xattr;
2166 
2167 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2168 	blob = ut_blob_create_and_open(bs, NULL);
2169 
2170 	/* Create large extent to force more than 1 page of metadata. */
2171 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2172 		       strlen("large_xattr");
2173 	xattr = calloc(xattr_length, sizeof(char));
2174 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2175 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2176 	free(xattr);
2177 	SPDK_CU_ASSERT_FATAL(rc == 0);
2178 
2179 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2180 	poll_threads();
2181 
2182 	/* Delete the blob and verify that number of pages returned to before its creation. */
2183 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2184 	ut_blob_close_and_delete(bs, blob);
2185 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2186 }
2187 
2188 static void
2189 bs_load(void)
2190 {
2191 	struct spdk_blob_store *bs;
2192 	struct spdk_bs_dev *dev;
2193 	spdk_blob_id blobid;
2194 	struct spdk_blob *blob;
2195 	struct spdk_bs_super_block *super_block;
2196 	uint64_t length;
2197 	int rc;
2198 	const void *value;
2199 	size_t value_len;
2200 	struct spdk_bs_opts opts;
2201 	struct spdk_blob_opts blob_opts;
2202 
2203 	dev = init_dev();
2204 	spdk_bs_opts_init(&opts, sizeof(opts));
2205 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2206 
2207 	/* Initialize a new blob store */
2208 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2209 	poll_threads();
2210 	CU_ASSERT(g_bserrno == 0);
2211 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2212 	bs = g_bs;
2213 
2214 	/* Try to open a blobid that does not exist */
2215 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2216 	poll_threads();
2217 	CU_ASSERT(g_bserrno == -ENOENT);
2218 	CU_ASSERT(g_blob == NULL);
2219 
2220 	/* Create a blob */
2221 	blob = ut_blob_create_and_open(bs, NULL);
2222 	blobid = spdk_blob_get_id(blob);
2223 
2224 	/* Try again to open valid blob but without the upper bit set */
2225 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2226 	poll_threads();
2227 	CU_ASSERT(g_bserrno == -ENOENT);
2228 	CU_ASSERT(g_blob == NULL);
2229 
2230 	/* Set some xattrs */
2231 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2232 	CU_ASSERT(rc == 0);
2233 
2234 	length = 2345;
2235 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2236 	CU_ASSERT(rc == 0);
2237 
2238 	/* Resize the blob */
2239 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2240 	poll_threads();
2241 	CU_ASSERT(g_bserrno == 0);
2242 
2243 	spdk_blob_close(blob, blob_op_complete, NULL);
2244 	poll_threads();
2245 	CU_ASSERT(g_bserrno == 0);
2246 	blob = NULL;
2247 	g_blob = NULL;
2248 	g_blobid = SPDK_BLOBID_INVALID;
2249 
2250 	/* Unload the blob store */
2251 	spdk_bs_unload(bs, bs_op_complete, NULL);
2252 	poll_threads();
2253 	CU_ASSERT(g_bserrno == 0);
2254 	g_bs = NULL;
2255 	g_blob = NULL;
2256 	g_blobid = 0;
2257 
2258 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2259 	CU_ASSERT(super_block->clean == 1);
2260 
2261 	/* Load should fail for device with an unsupported blocklen */
2262 	dev = init_dev();
2263 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2264 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2265 	poll_threads();
2266 	CU_ASSERT(g_bserrno == -EINVAL);
2267 
2268 	/* Load should when max_md_ops is set to zero */
2269 	dev = init_dev();
2270 	spdk_bs_opts_init(&opts, sizeof(opts));
2271 	opts.max_md_ops = 0;
2272 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2273 	poll_threads();
2274 	CU_ASSERT(g_bserrno == -EINVAL);
2275 
2276 	/* Load should when max_channel_ops is set to zero */
2277 	dev = init_dev();
2278 	spdk_bs_opts_init(&opts, sizeof(opts));
2279 	opts.max_channel_ops = 0;
2280 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2281 	poll_threads();
2282 	CU_ASSERT(g_bserrno == -EINVAL);
2283 
2284 	/* Load an existing blob store */
2285 	dev = init_dev();
2286 	spdk_bs_opts_init(&opts, sizeof(opts));
2287 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2288 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2289 	poll_threads();
2290 	CU_ASSERT(g_bserrno == 0);
2291 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2292 	bs = g_bs;
2293 
2294 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2295 	CU_ASSERT(super_block->clean == 1);
2296 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2297 
2298 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2299 	poll_threads();
2300 	CU_ASSERT(g_bserrno == 0);
2301 	CU_ASSERT(g_blob != NULL);
2302 	blob = g_blob;
2303 
2304 	/* Verify that blobstore is marked dirty after first metadata sync */
2305 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2306 	CU_ASSERT(super_block->clean == 1);
2307 
2308 	/* Get the xattrs */
2309 	value = NULL;
2310 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2311 	CU_ASSERT(rc == 0);
2312 	SPDK_CU_ASSERT_FATAL(value != NULL);
2313 	CU_ASSERT(*(uint64_t *)value == length);
2314 	CU_ASSERT(value_len == 8);
2315 
2316 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2317 	CU_ASSERT(rc == -ENOENT);
2318 
2319 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2320 
2321 	spdk_blob_close(blob, blob_op_complete, NULL);
2322 	poll_threads();
2323 	CU_ASSERT(g_bserrno == 0);
2324 	blob = NULL;
2325 	g_blob = NULL;
2326 
2327 	spdk_bs_unload(bs, bs_op_complete, NULL);
2328 	poll_threads();
2329 	CU_ASSERT(g_bserrno == 0);
2330 	g_bs = NULL;
2331 
2332 	/* Load should fail: bdev size < saved size */
2333 	dev = init_dev();
2334 	dev->blockcnt /= 2;
2335 
2336 	spdk_bs_opts_init(&opts, sizeof(opts));
2337 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2338 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2339 	poll_threads();
2340 
2341 	CU_ASSERT(g_bserrno == -EILSEQ);
2342 
2343 	/* Load should succeed: bdev size > saved size */
2344 	dev = init_dev();
2345 	dev->blockcnt *= 4;
2346 
2347 	spdk_bs_opts_init(&opts, sizeof(opts));
2348 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2349 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2350 	poll_threads();
2351 	CU_ASSERT(g_bserrno == 0);
2352 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2353 	bs = g_bs;
2354 
2355 	CU_ASSERT(g_bserrno == 0);
2356 	spdk_bs_unload(bs, bs_op_complete, NULL);
2357 	poll_threads();
2358 
2359 
2360 	/* Test compatibility mode */
2361 
2362 	dev = init_dev();
2363 	super_block->size = 0;
2364 	super_block->crc = blob_md_page_calc_crc(super_block);
2365 
2366 	spdk_bs_opts_init(&opts, sizeof(opts));
2367 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2368 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2369 	poll_threads();
2370 	CU_ASSERT(g_bserrno == 0);
2371 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2372 	bs = g_bs;
2373 
2374 	/* Create a blob */
2375 	ut_spdk_blob_opts_init(&blob_opts);
2376 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2377 	poll_threads();
2378 	CU_ASSERT(g_bserrno == 0);
2379 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2380 
2381 	/* Blobstore should update number of blocks in super_block */
2382 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2383 	CU_ASSERT(super_block->clean == 0);
2384 
2385 	spdk_bs_unload(bs, bs_op_complete, NULL);
2386 	poll_threads();
2387 	CU_ASSERT(g_bserrno == 0);
2388 	CU_ASSERT(super_block->clean == 1);
2389 	g_bs = NULL;
2390 
2391 }
2392 
2393 static void
2394 bs_load_pending_removal(void)
2395 {
2396 	struct spdk_blob_store *bs = g_bs;
2397 	struct spdk_blob_opts opts;
2398 	struct spdk_blob *blob, *snapshot;
2399 	spdk_blob_id blobid, snapshotid;
2400 	const void *value;
2401 	size_t value_len;
2402 	int rc;
2403 
2404 	/* Create blob */
2405 	ut_spdk_blob_opts_init(&opts);
2406 	opts.num_clusters = 10;
2407 
2408 	blob = ut_blob_create_and_open(bs, &opts);
2409 	blobid = spdk_blob_get_id(blob);
2410 
2411 	/* Create snapshot */
2412 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2413 	poll_threads();
2414 	CU_ASSERT(g_bserrno == 0);
2415 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2416 	snapshotid = g_blobid;
2417 
2418 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2419 	poll_threads();
2420 	CU_ASSERT(g_bserrno == 0);
2421 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2422 	snapshot = g_blob;
2423 
2424 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2425 	snapshot->md_ro = false;
2426 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2427 	CU_ASSERT(rc == 0);
2428 	snapshot->md_ro = true;
2429 
2430 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2431 	poll_threads();
2432 	CU_ASSERT(g_bserrno == 0);
2433 
2434 	spdk_blob_close(blob, blob_op_complete, NULL);
2435 	poll_threads();
2436 	CU_ASSERT(g_bserrno == 0);
2437 
2438 	/* Reload blobstore */
2439 	ut_bs_reload(&bs, NULL);
2440 
2441 	/* Snapshot should not be removed as blob is still pointing to it */
2442 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2443 	poll_threads();
2444 	CU_ASSERT(g_bserrno == 0);
2445 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2446 	snapshot = g_blob;
2447 
2448 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2449 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2450 	CU_ASSERT(rc != 0);
2451 
2452 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2453 	snapshot->md_ro = false;
2454 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2455 	CU_ASSERT(rc == 0);
2456 	snapshot->md_ro = true;
2457 
2458 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2459 	poll_threads();
2460 	CU_ASSERT(g_bserrno == 0);
2461 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2462 	blob = g_blob;
2463 
2464 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2465 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2466 
2467 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2468 	poll_threads();
2469 	CU_ASSERT(g_bserrno == 0);
2470 
2471 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2472 	poll_threads();
2473 	CU_ASSERT(g_bserrno == 0);
2474 
2475 	spdk_blob_close(blob, blob_op_complete, NULL);
2476 	poll_threads();
2477 	CU_ASSERT(g_bserrno == 0);
2478 
2479 	/* Reload blobstore */
2480 	ut_bs_reload(&bs, NULL);
2481 
2482 	/* Snapshot should be removed as blob is not pointing to it anymore */
2483 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2484 	poll_threads();
2485 	CU_ASSERT(g_bserrno != 0);
2486 }
2487 
2488 static void
2489 bs_load_custom_cluster_size(void)
2490 {
2491 	struct spdk_blob_store *bs;
2492 	struct spdk_bs_dev *dev;
2493 	struct spdk_bs_super_block *super_block;
2494 	struct spdk_bs_opts opts;
2495 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2496 	uint32_t cluster_sz;
2497 	uint64_t total_clusters;
2498 
2499 	dev = init_dev();
2500 	spdk_bs_opts_init(&opts, sizeof(opts));
2501 	opts.cluster_sz = custom_cluster_size;
2502 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2503 
2504 	/* Initialize a new blob store */
2505 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2506 	poll_threads();
2507 	CU_ASSERT(g_bserrno == 0);
2508 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2509 	bs = g_bs;
2510 	cluster_sz = bs->cluster_sz;
2511 	total_clusters = bs->total_clusters;
2512 
2513 	/* Unload the blob store */
2514 	spdk_bs_unload(bs, bs_op_complete, NULL);
2515 	poll_threads();
2516 	CU_ASSERT(g_bserrno == 0);
2517 	g_bs = NULL;
2518 	g_blob = NULL;
2519 	g_blobid = 0;
2520 
2521 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2522 	CU_ASSERT(super_block->clean == 1);
2523 
2524 	/* Load an existing blob store */
2525 	dev = init_dev();
2526 	spdk_bs_opts_init(&opts, sizeof(opts));
2527 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2528 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2529 	poll_threads();
2530 	CU_ASSERT(g_bserrno == 0);
2531 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2532 	bs = g_bs;
2533 	/* Compare cluster size and number to one after initialization */
2534 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2535 	CU_ASSERT(total_clusters == bs->total_clusters);
2536 
2537 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2538 	CU_ASSERT(super_block->clean == 1);
2539 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2540 
2541 	spdk_bs_unload(bs, bs_op_complete, NULL);
2542 	poll_threads();
2543 	CU_ASSERT(g_bserrno == 0);
2544 	CU_ASSERT(super_block->clean == 1);
2545 	g_bs = NULL;
2546 }
2547 
2548 static void
2549 bs_load_after_failed_grow(void)
2550 {
2551 	struct spdk_blob_store *bs;
2552 	struct spdk_bs_dev *dev;
2553 	struct spdk_bs_super_block *super_block;
2554 	struct spdk_bs_opts opts;
2555 	struct spdk_bs_md_mask *mask;
2556 	struct spdk_blob_opts blob_opts;
2557 	struct spdk_blob *blob, *snapshot;
2558 	spdk_blob_id blobid, snapshotid;
2559 	uint64_t total_data_clusters;
2560 
2561 	dev = init_dev();
2562 	spdk_bs_opts_init(&opts, sizeof(opts));
2563 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2564 	/*
2565 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2566 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2567 	 * thus the blobstore could grow to the double size.
2568 	 */
2569 	opts.num_md_pages = 128;
2570 
2571 	/* Initialize a new blob store */
2572 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2573 	poll_threads();
2574 	CU_ASSERT(g_bserrno == 0);
2575 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2576 	bs = g_bs;
2577 
2578 	/* Create blob */
2579 	ut_spdk_blob_opts_init(&blob_opts);
2580 	blob_opts.num_clusters = 10;
2581 
2582 	blob = ut_blob_create_and_open(bs, &blob_opts);
2583 	blobid = spdk_blob_get_id(blob);
2584 
2585 	/* Create snapshot */
2586 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2587 	poll_threads();
2588 	CU_ASSERT(g_bserrno == 0);
2589 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2590 	snapshotid = g_blobid;
2591 
2592 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2593 	poll_threads();
2594 	CU_ASSERT(g_bserrno == 0);
2595 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2596 	snapshot = g_blob;
2597 
2598 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2599 	poll_threads();
2600 	CU_ASSERT(g_bserrno == 0);
2601 
2602 	spdk_blob_close(blob, blob_op_complete, NULL);
2603 	poll_threads();
2604 	CU_ASSERT(g_bserrno == 0);
2605 
2606 	total_data_clusters = bs->total_data_clusters;
2607 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2608 
2609 	/* Unload the blob store */
2610 	spdk_bs_unload(bs, bs_op_complete, NULL);
2611 	poll_threads();
2612 	CU_ASSERT(g_bserrno == 0);
2613 	g_bs = NULL;
2614 	g_blob = NULL;
2615 	g_blobid = 0;
2616 
2617 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2618 	CU_ASSERT(super_block->clean == 1);
2619 
2620 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start * 4096);
2621 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2622 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2623 
2624 	/*
2625 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2626 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2627 	 */
2628 	mask->length *= 2;
2629 
2630 	/* Load an existing blob store */
2631 	dev = init_dev();
2632 	dev->blockcnt *= 2;
2633 	spdk_bs_opts_init(&opts, sizeof(opts));
2634 	opts.clear_method = BS_CLEAR_WITH_NONE;
2635 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2636 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2637 	poll_threads();
2638 	CU_ASSERT(g_bserrno == 0);
2639 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2640 	bs = g_bs;
2641 
2642 	/* Check the capacity is the same as before */
2643 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2644 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2645 
2646 	/* Check the blob and the snapshot are still available */
2647 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2648 	poll_threads();
2649 	CU_ASSERT(g_bserrno == 0);
2650 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2651 	blob = g_blob;
2652 
2653 	spdk_blob_close(blob, blob_op_complete, NULL);
2654 	poll_threads();
2655 	CU_ASSERT(g_bserrno == 0);
2656 
2657 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2658 	poll_threads();
2659 	CU_ASSERT(g_bserrno == 0);
2660 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2661 	snapshot = g_blob;
2662 
2663 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2664 	poll_threads();
2665 	CU_ASSERT(g_bserrno == 0);
2666 
2667 	spdk_bs_unload(bs, bs_op_complete, NULL);
2668 	poll_threads();
2669 	CU_ASSERT(g_bserrno == 0);
2670 	CU_ASSERT(super_block->clean == 1);
2671 	g_bs = NULL;
2672 }
2673 
2674 static void
2675 bs_type(void)
2676 {
2677 	struct spdk_blob_store *bs;
2678 	struct spdk_bs_dev *dev;
2679 	struct spdk_bs_opts opts;
2680 
2681 	dev = init_dev();
2682 	spdk_bs_opts_init(&opts, sizeof(opts));
2683 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2684 
2685 	/* Initialize a new blob store */
2686 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2687 	poll_threads();
2688 	CU_ASSERT(g_bserrno == 0);
2689 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2690 	bs = g_bs;
2691 
2692 	/* Unload the blob store */
2693 	spdk_bs_unload(bs, bs_op_complete, NULL);
2694 	poll_threads();
2695 	CU_ASSERT(g_bserrno == 0);
2696 	g_bs = NULL;
2697 	g_blob = NULL;
2698 	g_blobid = 0;
2699 
2700 	/* Load non existing blobstore type */
2701 	dev = init_dev();
2702 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2703 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2704 	poll_threads();
2705 	CU_ASSERT(g_bserrno != 0);
2706 
2707 	/* Load with empty blobstore type */
2708 	dev = init_dev();
2709 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2710 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2711 	poll_threads();
2712 	CU_ASSERT(g_bserrno == 0);
2713 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2714 	bs = g_bs;
2715 
2716 	spdk_bs_unload(bs, bs_op_complete, NULL);
2717 	poll_threads();
2718 	CU_ASSERT(g_bserrno == 0);
2719 	g_bs = NULL;
2720 
2721 	/* Initialize a new blob store with empty bstype */
2722 	dev = init_dev();
2723 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2724 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2725 	poll_threads();
2726 	CU_ASSERT(g_bserrno == 0);
2727 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2728 	bs = g_bs;
2729 
2730 	spdk_bs_unload(bs, bs_op_complete, NULL);
2731 	poll_threads();
2732 	CU_ASSERT(g_bserrno == 0);
2733 	g_bs = NULL;
2734 
2735 	/* Load non existing blobstore type */
2736 	dev = init_dev();
2737 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2738 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2739 	poll_threads();
2740 	CU_ASSERT(g_bserrno != 0);
2741 
2742 	/* Load with empty blobstore type */
2743 	dev = init_dev();
2744 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2745 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2746 	poll_threads();
2747 	CU_ASSERT(g_bserrno == 0);
2748 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2749 	bs = g_bs;
2750 
2751 	spdk_bs_unload(bs, bs_op_complete, NULL);
2752 	poll_threads();
2753 	CU_ASSERT(g_bserrno == 0);
2754 	g_bs = NULL;
2755 }
2756 
2757 static void
2758 bs_super_block(void)
2759 {
2760 	struct spdk_blob_store *bs;
2761 	struct spdk_bs_dev *dev;
2762 	struct spdk_bs_super_block *super_block;
2763 	struct spdk_bs_opts opts;
2764 	struct spdk_bs_super_block_ver1 super_block_v1;
2765 
2766 	dev = init_dev();
2767 	spdk_bs_opts_init(&opts, sizeof(opts));
2768 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2769 
2770 	/* Initialize a new blob store */
2771 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2772 	poll_threads();
2773 	CU_ASSERT(g_bserrno == 0);
2774 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2775 	bs = g_bs;
2776 
2777 	/* Unload the blob store */
2778 	spdk_bs_unload(bs, bs_op_complete, NULL);
2779 	poll_threads();
2780 	CU_ASSERT(g_bserrno == 0);
2781 	g_bs = NULL;
2782 	g_blob = NULL;
2783 	g_blobid = 0;
2784 
2785 	/* Load an existing blob store with version newer than supported */
2786 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2787 	super_block->version++;
2788 
2789 	dev = init_dev();
2790 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2791 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2792 	poll_threads();
2793 	CU_ASSERT(g_bserrno != 0);
2794 
2795 	/* Create a new blob store with super block version 1 */
2796 	dev = init_dev();
2797 	super_block_v1.version = 1;
2798 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2799 	super_block_v1.length = 0x1000;
2800 	super_block_v1.clean = 1;
2801 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2802 	super_block_v1.cluster_size = 0x100000;
2803 	super_block_v1.used_page_mask_start = 0x01;
2804 	super_block_v1.used_page_mask_len = 0x01;
2805 	super_block_v1.used_cluster_mask_start = 0x02;
2806 	super_block_v1.used_cluster_mask_len = 0x01;
2807 	super_block_v1.md_start = 0x03;
2808 	super_block_v1.md_len = 0x40;
2809 	memset(super_block_v1.reserved, 0, 4036);
2810 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2811 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2812 
2813 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2814 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2815 	poll_threads();
2816 	CU_ASSERT(g_bserrno == 0);
2817 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2818 	bs = g_bs;
2819 
2820 	spdk_bs_unload(bs, bs_op_complete, NULL);
2821 	poll_threads();
2822 	CU_ASSERT(g_bserrno == 0);
2823 	g_bs = NULL;
2824 }
2825 
2826 static void
2827 bs_test_recover_cluster_count(void)
2828 {
2829 	struct spdk_blob_store *bs;
2830 	struct spdk_bs_dev *dev;
2831 	struct spdk_bs_super_block super_block;
2832 	struct spdk_bs_opts opts;
2833 
2834 	dev = init_dev();
2835 	spdk_bs_opts_init(&opts, sizeof(opts));
2836 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2837 
2838 	super_block.version = 3;
2839 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2840 	super_block.length = 0x1000;
2841 	super_block.clean = 0;
2842 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2843 	super_block.cluster_size = 4096;
2844 	super_block.used_page_mask_start = 0x01;
2845 	super_block.used_page_mask_len = 0x01;
2846 	super_block.used_cluster_mask_start = 0x02;
2847 	super_block.used_cluster_mask_len = 0x01;
2848 	super_block.used_blobid_mask_start = 0x03;
2849 	super_block.used_blobid_mask_len = 0x01;
2850 	super_block.md_start = 0x04;
2851 	super_block.md_len = 0x40;
2852 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2853 	super_block.size = dev->blockcnt * dev->blocklen;
2854 	super_block.io_unit_size = 0x1000;
2855 	memset(super_block.reserved, 0, 4000);
2856 	super_block.crc = blob_md_page_calc_crc(&super_block);
2857 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2858 
2859 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2860 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2861 	poll_threads();
2862 	CU_ASSERT(g_bserrno == 0);
2863 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2864 	bs = g_bs;
2865 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2866 			super_block.md_len));
2867 
2868 	spdk_bs_unload(bs, bs_op_complete, NULL);
2869 	poll_threads();
2870 	CU_ASSERT(g_bserrno == 0);
2871 	g_bs = NULL;
2872 }
2873 
2874 static void
2875 bs_test_grow(void)
2876 {
2877 	struct spdk_blob_store *bs;
2878 	struct spdk_bs_dev *dev;
2879 	struct spdk_bs_super_block super_block;
2880 	struct spdk_bs_opts opts;
2881 	struct spdk_bs_md_mask mask;
2882 	uint64_t bdev_size;
2883 
2884 	dev = init_dev();
2885 	bdev_size = dev->blockcnt * dev->blocklen;
2886 	spdk_bs_opts_init(&opts, sizeof(opts));
2887 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2888 	poll_threads();
2889 	CU_ASSERT(g_bserrno == 0);
2890 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2891 	bs = g_bs;
2892 
2893 	spdk_bs_unload(bs, bs_op_complete, NULL);
2894 	poll_threads();
2895 	CU_ASSERT(g_bserrno == 0);
2896 	g_bs = NULL;
2897 
2898 	/*
2899 	 * To make sure all the metadata are updated to the disk,
2900 	 * we check the g_dev_buffer after spdk_bs_unload.
2901 	 */
2902 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2903 	CU_ASSERT(super_block.size == bdev_size);
2904 
2905 	/*
2906 	 * Make sure the used_cluster mask is correct.
2907 	 */
2908 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2909 	       sizeof(struct spdk_bs_md_mask));
2910 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2911 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2912 
2913 	/*
2914 	 * The default dev size is 64M, here we set the dev size to 128M,
2915 	 * then the blobstore will adjust the metadata according to the new size.
2916 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
2917 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
2918 	 * the end of g_dev_buffer.
2919 	 */
2920 	dev = init_dev();
2921 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
2922 	bdev_size = dev->blockcnt * dev->blocklen;
2923 	spdk_bs_opts_init(&opts, sizeof(opts));
2924 	opts.clear_method = BS_CLEAR_WITH_NONE;
2925 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
2926 	poll_threads();
2927 	CU_ASSERT(g_bserrno == 0);
2928 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2929 	bs = g_bs;
2930 
2931 	/*
2932 	 * After spdk_bs_grow, all metadata are updated to the disk.
2933 	 * So we can check g_dev_buffer now.
2934 	 */
2935 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2936 	CU_ASSERT(super_block.size == bdev_size);
2937 
2938 	/*
2939 	 * Make sure the used_cluster mask has been updated according to the bdev size
2940 	 */
2941 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2942 	       sizeof(struct spdk_bs_md_mask));
2943 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2944 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2945 
2946 	spdk_bs_unload(bs, bs_op_complete, NULL);
2947 	poll_threads();
2948 	CU_ASSERT(g_bserrno == 0);
2949 	g_bs = NULL;
2950 }
2951 
2952 /*
2953  * Create a blobstore and then unload it.
2954  */
2955 static void
2956 bs_unload(void)
2957 {
2958 	struct spdk_blob_store *bs = g_bs;
2959 	struct spdk_blob *blob;
2960 
2961 	/* Create a blob and open it. */
2962 	blob = ut_blob_create_and_open(bs, NULL);
2963 
2964 	/* Try to unload blobstore, should fail with open blob */
2965 	g_bserrno = -1;
2966 	spdk_bs_unload(bs, bs_op_complete, NULL);
2967 	poll_threads();
2968 	CU_ASSERT(g_bserrno == -EBUSY);
2969 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2970 
2971 	/* Close the blob, then successfully unload blobstore */
2972 	g_bserrno = -1;
2973 	spdk_blob_close(blob, blob_op_complete, NULL);
2974 	poll_threads();
2975 	CU_ASSERT(g_bserrno == 0);
2976 }
2977 
2978 /*
2979  * Create a blobstore with a cluster size different than the default, and ensure it is
2980  *  persisted.
2981  */
2982 static void
2983 bs_cluster_sz(void)
2984 {
2985 	struct spdk_blob_store *bs;
2986 	struct spdk_bs_dev *dev;
2987 	struct spdk_bs_opts opts;
2988 	uint32_t cluster_sz;
2989 
2990 	/* Set cluster size to zero */
2991 	dev = init_dev();
2992 	spdk_bs_opts_init(&opts, sizeof(opts));
2993 	opts.cluster_sz = 0;
2994 
2995 	/* Initialize a new blob store */
2996 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2997 	poll_threads();
2998 	CU_ASSERT(g_bserrno == -EINVAL);
2999 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3000 
3001 	/*
3002 	 * Set cluster size to blobstore page size,
3003 	 * to work it is required to be at least twice the blobstore page size.
3004 	 */
3005 	dev = init_dev();
3006 	spdk_bs_opts_init(&opts, sizeof(opts));
3007 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
3008 
3009 	/* Initialize a new blob store */
3010 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3011 	poll_threads();
3012 	CU_ASSERT(g_bserrno == -ENOMEM);
3013 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3014 
3015 	/*
3016 	 * Set cluster size to lower than page size,
3017 	 * to work it is required to be at least twice the blobstore page size.
3018 	 */
3019 	dev = init_dev();
3020 	spdk_bs_opts_init(&opts, sizeof(opts));
3021 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
3022 
3023 	/* Initialize a new blob store */
3024 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3025 	poll_threads();
3026 	CU_ASSERT(g_bserrno == -EINVAL);
3027 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3028 
3029 	/* Set cluster size to twice the default */
3030 	dev = init_dev();
3031 	spdk_bs_opts_init(&opts, sizeof(opts));
3032 	opts.cluster_sz *= 2;
3033 	cluster_sz = opts.cluster_sz;
3034 
3035 	/* Initialize a new blob store */
3036 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3037 	poll_threads();
3038 	CU_ASSERT(g_bserrno == 0);
3039 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3040 	bs = g_bs;
3041 
3042 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3043 
3044 	ut_bs_reload(&bs, &opts);
3045 
3046 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3047 
3048 	spdk_bs_unload(bs, bs_op_complete, NULL);
3049 	poll_threads();
3050 	CU_ASSERT(g_bserrno == 0);
3051 	g_bs = NULL;
3052 }
3053 
3054 /*
3055  * Create a blobstore, reload it and ensure total usable cluster count
3056  *  stays the same.
3057  */
3058 static void
3059 bs_usable_clusters(void)
3060 {
3061 	struct spdk_blob_store *bs = g_bs;
3062 	struct spdk_blob *blob;
3063 	uint32_t clusters;
3064 	int i;
3065 
3066 
3067 	clusters = spdk_bs_total_data_cluster_count(bs);
3068 
3069 	ut_bs_reload(&bs, NULL);
3070 
3071 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3072 
3073 	/* Create and resize blobs to make sure that useable cluster count won't change */
3074 	for (i = 0; i < 4; i++) {
3075 		g_bserrno = -1;
3076 		g_blobid = SPDK_BLOBID_INVALID;
3077 		blob = ut_blob_create_and_open(bs, NULL);
3078 
3079 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3080 		poll_threads();
3081 		CU_ASSERT(g_bserrno == 0);
3082 
3083 		g_bserrno = -1;
3084 		spdk_blob_close(blob, blob_op_complete, NULL);
3085 		poll_threads();
3086 		CU_ASSERT(g_bserrno == 0);
3087 
3088 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3089 	}
3090 
3091 	/* Reload the blob store to make sure that nothing changed */
3092 	ut_bs_reload(&bs, NULL);
3093 
3094 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3095 }
3096 
3097 /*
3098  * Test resizing of the metadata blob.  This requires creating enough blobs
3099  *  so that one cluster is not enough to fit the metadata for those blobs.
3100  *  To induce this condition to happen more quickly, we reduce the cluster
3101  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3102  */
3103 static void
3104 bs_resize_md(void)
3105 {
3106 	struct spdk_blob_store *bs;
3107 	const int CLUSTER_PAGE_COUNT = 4;
3108 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3109 	struct spdk_bs_dev *dev;
3110 	struct spdk_bs_opts opts;
3111 	struct spdk_blob *blob;
3112 	struct spdk_blob_opts blob_opts;
3113 	uint32_t cluster_sz;
3114 	spdk_blob_id blobids[NUM_BLOBS];
3115 	int i;
3116 
3117 
3118 	dev = init_dev();
3119 	spdk_bs_opts_init(&opts, sizeof(opts));
3120 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
3121 	cluster_sz = opts.cluster_sz;
3122 
3123 	/* Initialize a new blob store */
3124 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3125 	poll_threads();
3126 	CU_ASSERT(g_bserrno == 0);
3127 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3128 	bs = g_bs;
3129 
3130 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3131 
3132 	ut_spdk_blob_opts_init(&blob_opts);
3133 
3134 	for (i = 0; i < NUM_BLOBS; i++) {
3135 		g_bserrno = -1;
3136 		g_blobid = SPDK_BLOBID_INVALID;
3137 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3138 		poll_threads();
3139 		CU_ASSERT(g_bserrno == 0);
3140 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3141 		blobids[i] = g_blobid;
3142 	}
3143 
3144 	ut_bs_reload(&bs, &opts);
3145 
3146 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3147 
3148 	for (i = 0; i < NUM_BLOBS; i++) {
3149 		g_bserrno = -1;
3150 		g_blob = NULL;
3151 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3152 		poll_threads();
3153 		CU_ASSERT(g_bserrno == 0);
3154 		CU_ASSERT(g_blob !=  NULL);
3155 		blob = g_blob;
3156 		g_bserrno = -1;
3157 		spdk_blob_close(blob, blob_op_complete, NULL);
3158 		poll_threads();
3159 		CU_ASSERT(g_bserrno == 0);
3160 	}
3161 
3162 	spdk_bs_unload(bs, bs_op_complete, NULL);
3163 	poll_threads();
3164 	CU_ASSERT(g_bserrno == 0);
3165 	g_bs = NULL;
3166 }
3167 
3168 static void
3169 bs_destroy(void)
3170 {
3171 	struct spdk_blob_store *bs;
3172 	struct spdk_bs_dev *dev;
3173 
3174 	/* Initialize a new blob store */
3175 	dev = init_dev();
3176 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3177 	poll_threads();
3178 	CU_ASSERT(g_bserrno == 0);
3179 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3180 	bs = g_bs;
3181 
3182 	/* Destroy the blob store */
3183 	g_bserrno = -1;
3184 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3185 	poll_threads();
3186 	CU_ASSERT(g_bserrno == 0);
3187 
3188 	/* Loading an non-existent blob store should fail. */
3189 	g_bs = NULL;
3190 	dev = init_dev();
3191 
3192 	g_bserrno = 0;
3193 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3194 	poll_threads();
3195 	CU_ASSERT(g_bserrno != 0);
3196 }
3197 
3198 /* Try to hit all of the corner cases associated with serializing
3199  * a blob to disk
3200  */
3201 static void
3202 blob_serialize_test(void)
3203 {
3204 	struct spdk_bs_dev *dev;
3205 	struct spdk_bs_opts opts;
3206 	struct spdk_blob_store *bs;
3207 	spdk_blob_id blobid[2];
3208 	struct spdk_blob *blob[2];
3209 	uint64_t i;
3210 	char *value;
3211 	int rc;
3212 
3213 	dev = init_dev();
3214 
3215 	/* Initialize a new blobstore with very small clusters */
3216 	spdk_bs_opts_init(&opts, sizeof(opts));
3217 	opts.cluster_sz = dev->blocklen * 8;
3218 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3219 	poll_threads();
3220 	CU_ASSERT(g_bserrno == 0);
3221 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3222 	bs = g_bs;
3223 
3224 	/* Create and open two blobs */
3225 	for (i = 0; i < 2; i++) {
3226 		blob[i] = ut_blob_create_and_open(bs, NULL);
3227 		blobid[i] = spdk_blob_get_id(blob[i]);
3228 
3229 		/* Set a fairly large xattr on both blobs to eat up
3230 		 * metadata space
3231 		 */
3232 		value = calloc(dev->blocklen - 64, sizeof(char));
3233 		SPDK_CU_ASSERT_FATAL(value != NULL);
3234 		memset(value, i, dev->blocklen / 2);
3235 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3236 		CU_ASSERT(rc == 0);
3237 		free(value);
3238 	}
3239 
3240 	/* Resize the blobs, alternating 1 cluster at a time.
3241 	 * This thwarts run length encoding and will cause spill
3242 	 * over of the extents.
3243 	 */
3244 	for (i = 0; i < 6; i++) {
3245 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3246 		poll_threads();
3247 		CU_ASSERT(g_bserrno == 0);
3248 	}
3249 
3250 	for (i = 0; i < 2; i++) {
3251 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3252 		poll_threads();
3253 		CU_ASSERT(g_bserrno == 0);
3254 	}
3255 
3256 	/* Close the blobs */
3257 	for (i = 0; i < 2; i++) {
3258 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3259 		poll_threads();
3260 		CU_ASSERT(g_bserrno == 0);
3261 	}
3262 
3263 	ut_bs_reload(&bs, &opts);
3264 
3265 	for (i = 0; i < 2; i++) {
3266 		blob[i] = NULL;
3267 
3268 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3269 		poll_threads();
3270 		CU_ASSERT(g_bserrno == 0);
3271 		CU_ASSERT(g_blob != NULL);
3272 		blob[i] = g_blob;
3273 
3274 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3275 
3276 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3277 		poll_threads();
3278 		CU_ASSERT(g_bserrno == 0);
3279 	}
3280 
3281 	spdk_bs_unload(bs, bs_op_complete, NULL);
3282 	poll_threads();
3283 	CU_ASSERT(g_bserrno == 0);
3284 	g_bs = NULL;
3285 }
3286 
3287 static void
3288 blob_crc(void)
3289 {
3290 	struct spdk_blob_store *bs = g_bs;
3291 	struct spdk_blob *blob;
3292 	spdk_blob_id blobid;
3293 	uint32_t page_num;
3294 	int index;
3295 	struct spdk_blob_md_page *page;
3296 
3297 	blob = ut_blob_create_and_open(bs, NULL);
3298 	blobid = spdk_blob_get_id(blob);
3299 
3300 	spdk_blob_close(blob, blob_op_complete, NULL);
3301 	poll_threads();
3302 	CU_ASSERT(g_bserrno == 0);
3303 
3304 	page_num = bs_blobid_to_page(blobid);
3305 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3306 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3307 	page->crc = 0;
3308 
3309 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3310 	poll_threads();
3311 	CU_ASSERT(g_bserrno == -EINVAL);
3312 	CU_ASSERT(g_blob == NULL);
3313 	g_bserrno = 0;
3314 
3315 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3316 	poll_threads();
3317 	CU_ASSERT(g_bserrno == -EINVAL);
3318 }
3319 
3320 static void
3321 super_block_crc(void)
3322 {
3323 	struct spdk_blob_store *bs;
3324 	struct spdk_bs_dev *dev;
3325 	struct spdk_bs_super_block *super_block;
3326 
3327 	dev = init_dev();
3328 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3329 	poll_threads();
3330 	CU_ASSERT(g_bserrno == 0);
3331 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3332 	bs = g_bs;
3333 
3334 	spdk_bs_unload(bs, bs_op_complete, NULL);
3335 	poll_threads();
3336 	CU_ASSERT(g_bserrno == 0);
3337 	g_bs = NULL;
3338 
3339 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3340 	super_block->crc = 0;
3341 	dev = init_dev();
3342 
3343 	/* Load an existing blob store */
3344 	g_bserrno = 0;
3345 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3346 	poll_threads();
3347 	CU_ASSERT(g_bserrno == -EILSEQ);
3348 }
3349 
3350 /* For blob dirty shutdown test case we do the following sub-test cases:
3351  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3352  *   dirty shutdown and reload the blob store and verify the xattrs.
3353  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3354  *   reload the blob store and verify the clusters number.
3355  * 3 Create the second blob and then dirty shutdown, reload the blob store
3356  *   and verify the second blob.
3357  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3358  *   and verify the second blob is invalid.
3359  * 5 Create the second blob again and also create the third blob, modify the
3360  *   md of second blob which makes the md invalid, and then dirty shutdown,
3361  *   reload the blob store verify the second blob, it should invalid and also
3362  *   verify the third blob, it should correct.
3363  */
3364 static void
3365 blob_dirty_shutdown(void)
3366 {
3367 	int rc;
3368 	int index;
3369 	struct spdk_blob_store *bs = g_bs;
3370 	spdk_blob_id blobid1, blobid2, blobid3;
3371 	struct spdk_blob *blob = g_blob;
3372 	uint64_t length;
3373 	uint64_t free_clusters;
3374 	const void *value;
3375 	size_t value_len;
3376 	uint32_t page_num;
3377 	struct spdk_blob_md_page *page;
3378 	struct spdk_blob_opts blob_opts;
3379 
3380 	/* Create first blob */
3381 	blobid1 = spdk_blob_get_id(blob);
3382 
3383 	/* Set some xattrs */
3384 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3385 	CU_ASSERT(rc == 0);
3386 
3387 	length = 2345;
3388 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3389 	CU_ASSERT(rc == 0);
3390 
3391 	/* Put xattr that fits exactly single page.
3392 	 * This results in adding additional pages to MD.
3393 	 * First is flags and smaller xattr, second the large xattr,
3394 	 * third are just the extents.
3395 	 */
3396 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3397 			      strlen("large_xattr");
3398 	char *xattr = calloc(xattr_length, sizeof(char));
3399 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3400 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3401 	free(xattr);
3402 	SPDK_CU_ASSERT_FATAL(rc == 0);
3403 
3404 	/* Resize the blob */
3405 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3406 	poll_threads();
3407 	CU_ASSERT(g_bserrno == 0);
3408 
3409 	/* Set the blob as the super blob */
3410 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3411 	poll_threads();
3412 	CU_ASSERT(g_bserrno == 0);
3413 
3414 	free_clusters = spdk_bs_free_cluster_count(bs);
3415 
3416 	spdk_blob_close(blob, blob_op_complete, NULL);
3417 	poll_threads();
3418 	CU_ASSERT(g_bserrno == 0);
3419 	blob = NULL;
3420 	g_blob = NULL;
3421 	g_blobid = SPDK_BLOBID_INVALID;
3422 
3423 	ut_bs_dirty_load(&bs, NULL);
3424 
3425 	/* Get the super blob */
3426 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3427 	poll_threads();
3428 	CU_ASSERT(g_bserrno == 0);
3429 	CU_ASSERT(blobid1 == g_blobid);
3430 
3431 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3432 	poll_threads();
3433 	CU_ASSERT(g_bserrno == 0);
3434 	CU_ASSERT(g_blob != NULL);
3435 	blob = g_blob;
3436 
3437 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3438 
3439 	/* Get the xattrs */
3440 	value = NULL;
3441 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3442 	CU_ASSERT(rc == 0);
3443 	SPDK_CU_ASSERT_FATAL(value != NULL);
3444 	CU_ASSERT(*(uint64_t *)value == length);
3445 	CU_ASSERT(value_len == 8);
3446 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3447 
3448 	/* Resize the blob */
3449 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3450 	poll_threads();
3451 	CU_ASSERT(g_bserrno == 0);
3452 
3453 	free_clusters = spdk_bs_free_cluster_count(bs);
3454 
3455 	spdk_blob_close(blob, blob_op_complete, NULL);
3456 	poll_threads();
3457 	CU_ASSERT(g_bserrno == 0);
3458 	blob = NULL;
3459 	g_blob = NULL;
3460 	g_blobid = SPDK_BLOBID_INVALID;
3461 
3462 	ut_bs_dirty_load(&bs, NULL);
3463 
3464 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3465 	poll_threads();
3466 	CU_ASSERT(g_bserrno == 0);
3467 	CU_ASSERT(g_blob != NULL);
3468 	blob = g_blob;
3469 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3470 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3471 
3472 	spdk_blob_close(blob, blob_op_complete, NULL);
3473 	poll_threads();
3474 	CU_ASSERT(g_bserrno == 0);
3475 	blob = NULL;
3476 	g_blob = NULL;
3477 	g_blobid = SPDK_BLOBID_INVALID;
3478 
3479 	/* Create second blob */
3480 	blob = ut_blob_create_and_open(bs, NULL);
3481 	blobid2 = spdk_blob_get_id(blob);
3482 
3483 	/* Set some xattrs */
3484 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3485 	CU_ASSERT(rc == 0);
3486 
3487 	length = 5432;
3488 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3489 	CU_ASSERT(rc == 0);
3490 
3491 	/* Resize the blob */
3492 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3493 	poll_threads();
3494 	CU_ASSERT(g_bserrno == 0);
3495 
3496 	free_clusters = spdk_bs_free_cluster_count(bs);
3497 
3498 	spdk_blob_close(blob, blob_op_complete, NULL);
3499 	poll_threads();
3500 	CU_ASSERT(g_bserrno == 0);
3501 	blob = NULL;
3502 	g_blob = NULL;
3503 	g_blobid = SPDK_BLOBID_INVALID;
3504 
3505 	ut_bs_dirty_load(&bs, NULL);
3506 
3507 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3508 	poll_threads();
3509 	CU_ASSERT(g_bserrno == 0);
3510 	CU_ASSERT(g_blob != NULL);
3511 	blob = g_blob;
3512 
3513 	/* Get the xattrs */
3514 	value = NULL;
3515 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3516 	CU_ASSERT(rc == 0);
3517 	SPDK_CU_ASSERT_FATAL(value != NULL);
3518 	CU_ASSERT(*(uint64_t *)value == length);
3519 	CU_ASSERT(value_len == 8);
3520 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3521 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3522 
3523 	ut_blob_close_and_delete(bs, blob);
3524 
3525 	free_clusters = spdk_bs_free_cluster_count(bs);
3526 
3527 	ut_bs_dirty_load(&bs, NULL);
3528 
3529 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3530 	poll_threads();
3531 	CU_ASSERT(g_bserrno != 0);
3532 	CU_ASSERT(g_blob == NULL);
3533 
3534 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3535 	poll_threads();
3536 	CU_ASSERT(g_bserrno == 0);
3537 	CU_ASSERT(g_blob != NULL);
3538 	blob = g_blob;
3539 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3540 	spdk_blob_close(blob, blob_op_complete, NULL);
3541 	poll_threads();
3542 	CU_ASSERT(g_bserrno == 0);
3543 
3544 	ut_bs_reload(&bs, NULL);
3545 
3546 	/* Create second blob */
3547 	ut_spdk_blob_opts_init(&blob_opts);
3548 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3549 	poll_threads();
3550 	CU_ASSERT(g_bserrno == 0);
3551 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3552 	blobid2 = g_blobid;
3553 
3554 	/* Create third blob */
3555 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3556 	poll_threads();
3557 	CU_ASSERT(g_bserrno == 0);
3558 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3559 	blobid3 = g_blobid;
3560 
3561 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3562 	poll_threads();
3563 	CU_ASSERT(g_bserrno == 0);
3564 	CU_ASSERT(g_blob != NULL);
3565 	blob = g_blob;
3566 
3567 	/* Set some xattrs for second blob */
3568 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3569 	CU_ASSERT(rc == 0);
3570 
3571 	length = 5432;
3572 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3573 	CU_ASSERT(rc == 0);
3574 
3575 	spdk_blob_close(blob, blob_op_complete, NULL);
3576 	poll_threads();
3577 	CU_ASSERT(g_bserrno == 0);
3578 	blob = NULL;
3579 	g_blob = NULL;
3580 	g_blobid = SPDK_BLOBID_INVALID;
3581 
3582 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3583 	poll_threads();
3584 	CU_ASSERT(g_bserrno == 0);
3585 	CU_ASSERT(g_blob != NULL);
3586 	blob = g_blob;
3587 
3588 	/* Set some xattrs for third blob */
3589 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3590 	CU_ASSERT(rc == 0);
3591 
3592 	length = 5432;
3593 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3594 	CU_ASSERT(rc == 0);
3595 
3596 	spdk_blob_close(blob, blob_op_complete, NULL);
3597 	poll_threads();
3598 	CU_ASSERT(g_bserrno == 0);
3599 	blob = NULL;
3600 	g_blob = NULL;
3601 	g_blobid = SPDK_BLOBID_INVALID;
3602 
3603 	/* Mark second blob as invalid */
3604 	page_num = bs_blobid_to_page(blobid2);
3605 
3606 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3607 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3608 	page->sequence_num = 1;
3609 	page->crc = blob_md_page_calc_crc(page);
3610 
3611 	free_clusters = spdk_bs_free_cluster_count(bs);
3612 
3613 	ut_bs_dirty_load(&bs, NULL);
3614 
3615 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3616 	poll_threads();
3617 	CU_ASSERT(g_bserrno != 0);
3618 	CU_ASSERT(g_blob == NULL);
3619 
3620 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3621 	poll_threads();
3622 	CU_ASSERT(g_bserrno == 0);
3623 	CU_ASSERT(g_blob != NULL);
3624 	blob = g_blob;
3625 
3626 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3627 }
3628 
3629 static void
3630 blob_flags(void)
3631 {
3632 	struct spdk_blob_store *bs = g_bs;
3633 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3634 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3635 	struct spdk_blob_opts blob_opts;
3636 	int rc;
3637 
3638 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3639 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3640 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3641 
3642 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3643 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3644 
3645 	ut_spdk_blob_opts_init(&blob_opts);
3646 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3647 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3648 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3649 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3650 
3651 	/* Change the size of blob_data_ro to check if flags are serialized
3652 	 * when blob has non zero number of extents */
3653 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3654 	poll_threads();
3655 	CU_ASSERT(g_bserrno == 0);
3656 
3657 	/* Set the xattr to check if flags are serialized
3658 	 * when blob has non zero number of xattrs */
3659 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3660 	CU_ASSERT(rc == 0);
3661 
3662 	blob_invalid->invalid_flags = (1ULL << 63);
3663 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3664 	blob_data_ro->data_ro_flags = (1ULL << 62);
3665 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3666 	blob_md_ro->md_ro_flags = (1ULL << 61);
3667 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3668 
3669 	g_bserrno = -1;
3670 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3671 	poll_threads();
3672 	CU_ASSERT(g_bserrno == 0);
3673 	g_bserrno = -1;
3674 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3675 	poll_threads();
3676 	CU_ASSERT(g_bserrno == 0);
3677 	g_bserrno = -1;
3678 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3679 	poll_threads();
3680 	CU_ASSERT(g_bserrno == 0);
3681 
3682 	g_bserrno = -1;
3683 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3684 	poll_threads();
3685 	CU_ASSERT(g_bserrno == 0);
3686 	blob_invalid = NULL;
3687 	g_bserrno = -1;
3688 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3689 	poll_threads();
3690 	CU_ASSERT(g_bserrno == 0);
3691 	blob_data_ro = NULL;
3692 	g_bserrno = -1;
3693 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3694 	poll_threads();
3695 	CU_ASSERT(g_bserrno == 0);
3696 	blob_md_ro = NULL;
3697 
3698 	g_blob = NULL;
3699 	g_blobid = SPDK_BLOBID_INVALID;
3700 
3701 	ut_bs_reload(&bs, NULL);
3702 
3703 	g_blob = NULL;
3704 	g_bserrno = 0;
3705 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3706 	poll_threads();
3707 	CU_ASSERT(g_bserrno != 0);
3708 	CU_ASSERT(g_blob == NULL);
3709 
3710 	g_blob = NULL;
3711 	g_bserrno = -1;
3712 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3713 	poll_threads();
3714 	CU_ASSERT(g_bserrno == 0);
3715 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3716 	blob_data_ro = g_blob;
3717 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3718 	CU_ASSERT(blob_data_ro->data_ro == true);
3719 	CU_ASSERT(blob_data_ro->md_ro == true);
3720 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3721 
3722 	g_blob = NULL;
3723 	g_bserrno = -1;
3724 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3725 	poll_threads();
3726 	CU_ASSERT(g_bserrno == 0);
3727 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3728 	blob_md_ro = g_blob;
3729 	CU_ASSERT(blob_md_ro->data_ro == false);
3730 	CU_ASSERT(blob_md_ro->md_ro == true);
3731 
3732 	g_bserrno = -1;
3733 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3734 	poll_threads();
3735 	CU_ASSERT(g_bserrno == 0);
3736 
3737 	ut_blob_close_and_delete(bs, blob_data_ro);
3738 	ut_blob_close_and_delete(bs, blob_md_ro);
3739 }
3740 
3741 static void
3742 bs_version(void)
3743 {
3744 	struct spdk_bs_super_block *super;
3745 	struct spdk_blob_store *bs = g_bs;
3746 	struct spdk_bs_dev *dev;
3747 	struct spdk_blob *blob;
3748 	struct spdk_blob_opts blob_opts;
3749 	spdk_blob_id blobid;
3750 
3751 	/* Unload the blob store */
3752 	spdk_bs_unload(bs, bs_op_complete, NULL);
3753 	poll_threads();
3754 	CU_ASSERT(g_bserrno == 0);
3755 	g_bs = NULL;
3756 
3757 	/*
3758 	 * Change the bs version on disk.  This will allow us to
3759 	 *  test that the version does not get modified automatically
3760 	 *  when loading and unloading the blobstore.
3761 	 */
3762 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3763 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3764 	CU_ASSERT(super->clean == 1);
3765 	super->version = 2;
3766 	/*
3767 	 * Version 2 metadata does not have a used blobid mask, so clear
3768 	 *  those fields in the super block and zero the corresponding
3769 	 *  region on "disk".  We will use this to ensure blob IDs are
3770 	 *  correctly reconstructed.
3771 	 */
3772 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3773 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3774 	super->used_blobid_mask_start = 0;
3775 	super->used_blobid_mask_len = 0;
3776 	super->crc = blob_md_page_calc_crc(super);
3777 
3778 	/* Load an existing blob store */
3779 	dev = init_dev();
3780 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3781 	poll_threads();
3782 	CU_ASSERT(g_bserrno == 0);
3783 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3784 	CU_ASSERT(super->clean == 1);
3785 	bs = g_bs;
3786 
3787 	/*
3788 	 * Create a blob - just to make sure that when we unload it
3789 	 *  results in writing the super block (since metadata pages
3790 	 *  were allocated.
3791 	 */
3792 	ut_spdk_blob_opts_init(&blob_opts);
3793 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3794 	poll_threads();
3795 	CU_ASSERT(g_bserrno == 0);
3796 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3797 	blobid = g_blobid;
3798 
3799 	/* Unload the blob store */
3800 	spdk_bs_unload(bs, bs_op_complete, NULL);
3801 	poll_threads();
3802 	CU_ASSERT(g_bserrno == 0);
3803 	g_bs = NULL;
3804 	CU_ASSERT(super->version == 2);
3805 	CU_ASSERT(super->used_blobid_mask_start == 0);
3806 	CU_ASSERT(super->used_blobid_mask_len == 0);
3807 
3808 	dev = init_dev();
3809 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3810 	poll_threads();
3811 	CU_ASSERT(g_bserrno == 0);
3812 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3813 	bs = g_bs;
3814 
3815 	g_blob = NULL;
3816 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3817 	poll_threads();
3818 	CU_ASSERT(g_bserrno == 0);
3819 	CU_ASSERT(g_blob != NULL);
3820 	blob = g_blob;
3821 
3822 	ut_blob_close_and_delete(bs, blob);
3823 
3824 	CU_ASSERT(super->version == 2);
3825 	CU_ASSERT(super->used_blobid_mask_start == 0);
3826 	CU_ASSERT(super->used_blobid_mask_len == 0);
3827 }
3828 
3829 static void
3830 blob_set_xattrs_test(void)
3831 {
3832 	struct spdk_blob_store *bs = g_bs;
3833 	struct spdk_blob *blob;
3834 	struct spdk_blob_opts opts;
3835 	const void *value;
3836 	size_t value_len;
3837 	char *xattr;
3838 	size_t xattr_length;
3839 	int rc;
3840 
3841 	/* Create blob with extra attributes */
3842 	ut_spdk_blob_opts_init(&opts);
3843 
3844 	opts.xattrs.names = g_xattr_names;
3845 	opts.xattrs.get_value = _get_xattr_value;
3846 	opts.xattrs.count = 3;
3847 	opts.xattrs.ctx = &g_ctx;
3848 
3849 	blob = ut_blob_create_and_open(bs, &opts);
3850 
3851 	/* Get the xattrs */
3852 	value = NULL;
3853 
3854 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3855 	CU_ASSERT(rc == 0);
3856 	SPDK_CU_ASSERT_FATAL(value != NULL);
3857 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3858 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3859 
3860 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3861 	CU_ASSERT(rc == 0);
3862 	SPDK_CU_ASSERT_FATAL(value != NULL);
3863 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3864 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3865 
3866 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3867 	CU_ASSERT(rc == 0);
3868 	SPDK_CU_ASSERT_FATAL(value != NULL);
3869 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3870 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3871 
3872 	/* Try to get non existing attribute */
3873 
3874 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3875 	CU_ASSERT(rc == -ENOENT);
3876 
3877 	/* Try xattr exceeding maximum length of descriptor in single page */
3878 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3879 		       strlen("large_xattr") + 1;
3880 	xattr = calloc(xattr_length, sizeof(char));
3881 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3882 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3883 	free(xattr);
3884 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3885 
3886 	spdk_blob_close(blob, blob_op_complete, NULL);
3887 	poll_threads();
3888 	CU_ASSERT(g_bserrno == 0);
3889 	blob = NULL;
3890 	g_blob = NULL;
3891 	g_blobid = SPDK_BLOBID_INVALID;
3892 
3893 	/* NULL callback */
3894 	ut_spdk_blob_opts_init(&opts);
3895 	opts.xattrs.names = g_xattr_names;
3896 	opts.xattrs.get_value = NULL;
3897 	opts.xattrs.count = 1;
3898 	opts.xattrs.ctx = &g_ctx;
3899 
3900 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3901 	poll_threads();
3902 	CU_ASSERT(g_bserrno == -EINVAL);
3903 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3904 
3905 	/* NULL values */
3906 	ut_spdk_blob_opts_init(&opts);
3907 	opts.xattrs.names = g_xattr_names;
3908 	opts.xattrs.get_value = _get_xattr_value_null;
3909 	opts.xattrs.count = 1;
3910 	opts.xattrs.ctx = NULL;
3911 
3912 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3913 	poll_threads();
3914 	CU_ASSERT(g_bserrno == -EINVAL);
3915 }
3916 
3917 static void
3918 blob_thin_prov_alloc(void)
3919 {
3920 	struct spdk_blob_store *bs = g_bs;
3921 	struct spdk_blob *blob;
3922 	struct spdk_blob_opts opts;
3923 	spdk_blob_id blobid;
3924 	uint64_t free_clusters;
3925 
3926 	free_clusters = spdk_bs_free_cluster_count(bs);
3927 
3928 	/* Set blob as thin provisioned */
3929 	ut_spdk_blob_opts_init(&opts);
3930 	opts.thin_provision = true;
3931 
3932 	blob = ut_blob_create_and_open(bs, &opts);
3933 	blobid = spdk_blob_get_id(blob);
3934 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3935 
3936 	CU_ASSERT(blob->active.num_clusters == 0);
3937 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3938 
3939 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3940 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3941 	poll_threads();
3942 	CU_ASSERT(g_bserrno == 0);
3943 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3944 	CU_ASSERT(blob->active.num_clusters == 5);
3945 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3946 
3947 	/* Grow it to 1TB - still unallocated */
3948 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3949 	poll_threads();
3950 	CU_ASSERT(g_bserrno == 0);
3951 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3952 	CU_ASSERT(blob->active.num_clusters == 262144);
3953 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3954 
3955 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3956 	poll_threads();
3957 	CU_ASSERT(g_bserrno == 0);
3958 	/* Sync must not change anything */
3959 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3960 	CU_ASSERT(blob->active.num_clusters == 262144);
3961 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3962 	/* Since clusters are not allocated,
3963 	 * number of metadata pages is expected to be minimal.
3964 	 */
3965 	CU_ASSERT(blob->active.num_pages == 1);
3966 
3967 	/* Shrink the blob to 3 clusters - still unallocated */
3968 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3969 	poll_threads();
3970 	CU_ASSERT(g_bserrno == 0);
3971 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3972 	CU_ASSERT(blob->active.num_clusters == 3);
3973 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3974 
3975 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3976 	poll_threads();
3977 	CU_ASSERT(g_bserrno == 0);
3978 	/* Sync must not change anything */
3979 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3980 	CU_ASSERT(blob->active.num_clusters == 3);
3981 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3982 
3983 	spdk_blob_close(blob, blob_op_complete, NULL);
3984 	poll_threads();
3985 	CU_ASSERT(g_bserrno == 0);
3986 
3987 	ut_bs_reload(&bs, NULL);
3988 
3989 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3990 	poll_threads();
3991 	CU_ASSERT(g_bserrno == 0);
3992 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3993 	blob = g_blob;
3994 
3995 	/* Check that clusters allocation and size is still the same */
3996 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3997 	CU_ASSERT(blob->active.num_clusters == 3);
3998 
3999 	ut_blob_close_and_delete(bs, blob);
4000 }
4001 
4002 static void
4003 blob_insert_cluster_msg_test(void)
4004 {
4005 	struct spdk_blob_store *bs = g_bs;
4006 	struct spdk_blob *blob;
4007 	struct spdk_blob_opts opts;
4008 	struct spdk_blob_md_page page = {};
4009 	spdk_blob_id blobid;
4010 	uint64_t free_clusters;
4011 	uint64_t new_cluster = 0;
4012 	uint32_t cluster_num = 3;
4013 	uint32_t extent_page = 0;
4014 
4015 	free_clusters = spdk_bs_free_cluster_count(bs);
4016 
4017 	/* Set blob as thin provisioned */
4018 	ut_spdk_blob_opts_init(&opts);
4019 	opts.thin_provision = true;
4020 	opts.num_clusters = 4;
4021 
4022 	blob = ut_blob_create_and_open(bs, &opts);
4023 	blobid = spdk_blob_get_id(blob);
4024 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4025 
4026 	CU_ASSERT(blob->active.num_clusters == 4);
4027 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4028 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4029 
4030 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4031 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4032 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4033 	spdk_spin_lock(&bs->used_lock);
4034 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4035 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4036 	spdk_spin_unlock(&bs->used_lock);
4037 
4038 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
4039 					 blob_op_complete, NULL);
4040 	poll_threads();
4041 
4042 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4043 
4044 	spdk_blob_close(blob, blob_op_complete, NULL);
4045 	poll_threads();
4046 	CU_ASSERT(g_bserrno == 0);
4047 
4048 	ut_bs_reload(&bs, NULL);
4049 
4050 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4051 	poll_threads();
4052 	CU_ASSERT(g_bserrno == 0);
4053 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4054 	blob = g_blob;
4055 
4056 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4057 
4058 	ut_blob_close_and_delete(bs, blob);
4059 }
4060 
4061 static void
4062 blob_thin_prov_rw(void)
4063 {
4064 	static const uint8_t zero[10 * 4096] = { 0 };
4065 	struct spdk_blob_store *bs = g_bs;
4066 	struct spdk_blob *blob, *blob_id0;
4067 	struct spdk_io_channel *channel, *channel_thread1;
4068 	struct spdk_blob_opts opts;
4069 	uint64_t free_clusters;
4070 	uint64_t page_size;
4071 	uint8_t payload_read[10 * 4096];
4072 	uint8_t payload_write[10 * 4096];
4073 	uint64_t write_bytes;
4074 	uint64_t read_bytes;
4075 
4076 	free_clusters = spdk_bs_free_cluster_count(bs);
4077 	page_size = spdk_bs_get_page_size(bs);
4078 
4079 	channel = spdk_bs_alloc_io_channel(bs);
4080 	CU_ASSERT(channel != NULL);
4081 
4082 	ut_spdk_blob_opts_init(&opts);
4083 	opts.thin_provision = true;
4084 
4085 	/* Create and delete blob at md page 0, so that next md page allocation
4086 	 * for extent will use that. */
4087 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4088 	blob = ut_blob_create_and_open(bs, &opts);
4089 	ut_blob_close_and_delete(bs, blob_id0);
4090 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4091 
4092 	CU_ASSERT(blob->active.num_clusters == 0);
4093 
4094 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4095 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4096 	poll_threads();
4097 	CU_ASSERT(g_bserrno == 0);
4098 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4099 	CU_ASSERT(blob->active.num_clusters == 5);
4100 
4101 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4102 	poll_threads();
4103 	CU_ASSERT(g_bserrno == 0);
4104 	/* Sync must not change anything */
4105 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4106 	CU_ASSERT(blob->active.num_clusters == 5);
4107 
4108 	/* Payload should be all zeros from unallocated clusters */
4109 	memset(payload_read, 0xFF, sizeof(payload_read));
4110 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4111 	poll_threads();
4112 	CU_ASSERT(g_bserrno == 0);
4113 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4114 
4115 	write_bytes = g_dev_write_bytes;
4116 	read_bytes = g_dev_read_bytes;
4117 
4118 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4119 	set_thread(1);
4120 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4121 	CU_ASSERT(channel_thread1 != NULL);
4122 	memset(payload_write, 0xE5, sizeof(payload_write));
4123 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4124 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4125 	/* Perform write on thread 0. That will try to allocate cluster,
4126 	 * but fail due to another thread issuing the cluster allocation first. */
4127 	set_thread(0);
4128 	memset(payload_write, 0xE5, sizeof(payload_write));
4129 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4130 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4131 	poll_threads();
4132 	CU_ASSERT(g_bserrno == 0);
4133 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4134 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
4135 	 * read 0 bytes */
4136 	if (g_use_extent_table) {
4137 		/* Add one more page for EXTENT_PAGE write */
4138 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
4139 	} else {
4140 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
4141 	}
4142 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4143 
4144 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4145 	poll_threads();
4146 	CU_ASSERT(g_bserrno == 0);
4147 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4148 
4149 	ut_blob_close_and_delete(bs, blob);
4150 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4151 
4152 	set_thread(1);
4153 	spdk_bs_free_io_channel(channel_thread1);
4154 	set_thread(0);
4155 	spdk_bs_free_io_channel(channel);
4156 	poll_threads();
4157 	g_blob = NULL;
4158 	g_blobid = 0;
4159 }
4160 
4161 static void
4162 blob_thin_prov_write_count_io(void)
4163 {
4164 	struct spdk_blob_store *bs;
4165 	struct spdk_blob *blob;
4166 	struct spdk_io_channel *ch;
4167 	struct spdk_bs_dev *dev;
4168 	struct spdk_bs_opts bs_opts;
4169 	struct spdk_blob_opts opts;
4170 	uint64_t free_clusters;
4171 	uint64_t page_size;
4172 	uint8_t payload_write[4096];
4173 	uint64_t write_bytes;
4174 	uint64_t read_bytes;
4175 	const uint32_t CLUSTER_SZ = 16384;
4176 	uint32_t pages_per_cluster;
4177 	uint32_t pages_per_extent_page;
4178 	uint32_t i;
4179 
4180 	/* Use a very small cluster size for this test.  This ensures we need multiple
4181 	 * extent pages to hold all of the clusters even for relatively small blobs like
4182 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4183 	 * buffers).
4184 	 */
4185 	dev = init_dev();
4186 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4187 	bs_opts.cluster_sz = CLUSTER_SZ;
4188 
4189 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4190 	poll_threads();
4191 	CU_ASSERT(g_bserrno == 0);
4192 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4193 	bs = g_bs;
4194 
4195 	free_clusters = spdk_bs_free_cluster_count(bs);
4196 	page_size = spdk_bs_get_page_size(bs);
4197 	pages_per_cluster = CLUSTER_SZ / page_size;
4198 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4199 
4200 	ch = spdk_bs_alloc_io_channel(bs);
4201 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4202 
4203 	ut_spdk_blob_opts_init(&opts);
4204 	opts.thin_provision = true;
4205 
4206 	blob = ut_blob_create_and_open(bs, &opts);
4207 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4208 
4209 	/* Resize the blob so that it will require 8 extent pages to hold all of
4210 	 * the clusters.
4211 	 */
4212 	g_bserrno = -1;
4213 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4214 	poll_threads();
4215 	CU_ASSERT(g_bserrno == 0);
4216 
4217 	g_bserrno = -1;
4218 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4219 	poll_threads();
4220 	CU_ASSERT(g_bserrno == 0);
4221 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4222 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4223 
4224 	memset(payload_write, 0, sizeof(payload_write));
4225 	for (i = 0; i < 8; i++) {
4226 		write_bytes = g_dev_write_bytes;
4227 		read_bytes = g_dev_read_bytes;
4228 
4229 		g_bserrno = -1;
4230 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4231 		poll_threads();
4232 		CU_ASSERT(g_bserrno == 0);
4233 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4234 
4235 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4236 		if (!g_use_extent_table) {
4237 			/* For legacy metadata, we should have written two pages - one for the
4238 			 * write I/O itself, another for the blob's primary metadata.
4239 			 */
4240 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4241 		} else {
4242 			/* For extent table metadata, we should have written three pages - one
4243 			 * for the write I/O, one for the extent page, one for the blob's primary
4244 			 * metadata.
4245 			 */
4246 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4247 		}
4248 
4249 		/* The write should have synced the metadata already.  Do another sync here
4250 		 * just to confirm.
4251 		 */
4252 		write_bytes = g_dev_write_bytes;
4253 		read_bytes = g_dev_read_bytes;
4254 
4255 		g_bserrno = -1;
4256 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4257 		poll_threads();
4258 		CU_ASSERT(g_bserrno == 0);
4259 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4260 
4261 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4262 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4263 
4264 		/* Now write to another unallocated cluster that is part of the same extent page. */
4265 		g_bserrno = -1;
4266 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4267 				   1, blob_op_complete, NULL);
4268 		poll_threads();
4269 		CU_ASSERT(g_bserrno == 0);
4270 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4271 
4272 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4273 		/*
4274 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4275 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4276 		 */
4277 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4278 	}
4279 
4280 	ut_blob_close_and_delete(bs, blob);
4281 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4282 
4283 	spdk_bs_free_io_channel(ch);
4284 	poll_threads();
4285 	g_blob = NULL;
4286 	g_blobid = 0;
4287 
4288 	spdk_bs_unload(bs, bs_op_complete, NULL);
4289 	poll_threads();
4290 	CU_ASSERT(g_bserrno == 0);
4291 	g_bs = NULL;
4292 }
4293 
4294 static void
4295 blob_thin_prov_rle(void)
4296 {
4297 	static const uint8_t zero[10 * 4096] = { 0 };
4298 	struct spdk_blob_store *bs = g_bs;
4299 	struct spdk_blob *blob;
4300 	struct spdk_io_channel *channel;
4301 	struct spdk_blob_opts opts;
4302 	spdk_blob_id blobid;
4303 	uint64_t free_clusters;
4304 	uint64_t page_size;
4305 	uint8_t payload_read[10 * 4096];
4306 	uint8_t payload_write[10 * 4096];
4307 	uint64_t write_bytes;
4308 	uint64_t read_bytes;
4309 	uint64_t io_unit;
4310 
4311 	free_clusters = spdk_bs_free_cluster_count(bs);
4312 	page_size = spdk_bs_get_page_size(bs);
4313 
4314 	ut_spdk_blob_opts_init(&opts);
4315 	opts.thin_provision = true;
4316 	opts.num_clusters = 5;
4317 
4318 	blob = ut_blob_create_and_open(bs, &opts);
4319 	blobid = spdk_blob_get_id(blob);
4320 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4321 
4322 	channel = spdk_bs_alloc_io_channel(bs);
4323 	CU_ASSERT(channel != NULL);
4324 
4325 	/* Target specifically second cluster in a blob as first allocation */
4326 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4327 
4328 	/* Payload should be all zeros from unallocated clusters */
4329 	memset(payload_read, 0xFF, sizeof(payload_read));
4330 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4331 	poll_threads();
4332 	CU_ASSERT(g_bserrno == 0);
4333 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4334 
4335 	write_bytes = g_dev_write_bytes;
4336 	read_bytes = g_dev_read_bytes;
4337 
4338 	/* Issue write to second cluster in a blob */
4339 	memset(payload_write, 0xE5, sizeof(payload_write));
4340 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4341 	poll_threads();
4342 	CU_ASSERT(g_bserrno == 0);
4343 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4344 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4345 	 * read 0 bytes */
4346 	if (g_use_extent_table) {
4347 		/* Add one more page for EXTENT_PAGE write */
4348 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4349 	} else {
4350 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4351 	}
4352 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4353 
4354 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4355 	poll_threads();
4356 	CU_ASSERT(g_bserrno == 0);
4357 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4358 
4359 	spdk_bs_free_io_channel(channel);
4360 	poll_threads();
4361 
4362 	spdk_blob_close(blob, blob_op_complete, NULL);
4363 	poll_threads();
4364 	CU_ASSERT(g_bserrno == 0);
4365 
4366 	ut_bs_reload(&bs, NULL);
4367 
4368 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4369 	poll_threads();
4370 	CU_ASSERT(g_bserrno == 0);
4371 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4372 	blob = g_blob;
4373 
4374 	channel = spdk_bs_alloc_io_channel(bs);
4375 	CU_ASSERT(channel != NULL);
4376 
4377 	/* Read second cluster after blob reload to confirm data written */
4378 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4379 	poll_threads();
4380 	CU_ASSERT(g_bserrno == 0);
4381 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4382 
4383 	spdk_bs_free_io_channel(channel);
4384 	poll_threads();
4385 
4386 	ut_blob_close_and_delete(bs, blob);
4387 }
4388 
4389 static void
4390 blob_thin_prov_rw_iov(void)
4391 {
4392 	static const uint8_t zero[10 * 4096] = { 0 };
4393 	struct spdk_blob_store *bs = g_bs;
4394 	struct spdk_blob *blob;
4395 	struct spdk_io_channel *channel;
4396 	struct spdk_blob_opts opts;
4397 	uint64_t free_clusters;
4398 	uint8_t payload_read[10 * 4096];
4399 	uint8_t payload_write[10 * 4096];
4400 	struct iovec iov_read[3];
4401 	struct iovec iov_write[3];
4402 
4403 	free_clusters = spdk_bs_free_cluster_count(bs);
4404 
4405 	channel = spdk_bs_alloc_io_channel(bs);
4406 	CU_ASSERT(channel != NULL);
4407 
4408 	ut_spdk_blob_opts_init(&opts);
4409 	opts.thin_provision = true;
4410 
4411 	blob = ut_blob_create_and_open(bs, &opts);
4412 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4413 
4414 	CU_ASSERT(blob->active.num_clusters == 0);
4415 
4416 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4417 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4418 	poll_threads();
4419 	CU_ASSERT(g_bserrno == 0);
4420 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4421 	CU_ASSERT(blob->active.num_clusters == 5);
4422 
4423 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4424 	poll_threads();
4425 	CU_ASSERT(g_bserrno == 0);
4426 	/* Sync must not change anything */
4427 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4428 	CU_ASSERT(blob->active.num_clusters == 5);
4429 
4430 	/* Payload should be all zeros from unallocated clusters */
4431 	memset(payload_read, 0xAA, sizeof(payload_read));
4432 	iov_read[0].iov_base = payload_read;
4433 	iov_read[0].iov_len = 3 * 4096;
4434 	iov_read[1].iov_base = payload_read + 3 * 4096;
4435 	iov_read[1].iov_len = 4 * 4096;
4436 	iov_read[2].iov_base = payload_read + 7 * 4096;
4437 	iov_read[2].iov_len = 3 * 4096;
4438 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4439 	poll_threads();
4440 	CU_ASSERT(g_bserrno == 0);
4441 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4442 
4443 	memset(payload_write, 0xE5, sizeof(payload_write));
4444 	iov_write[0].iov_base = payload_write;
4445 	iov_write[0].iov_len = 1 * 4096;
4446 	iov_write[1].iov_base = payload_write + 1 * 4096;
4447 	iov_write[1].iov_len = 5 * 4096;
4448 	iov_write[2].iov_base = payload_write + 6 * 4096;
4449 	iov_write[2].iov_len = 4 * 4096;
4450 
4451 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4452 	poll_threads();
4453 	CU_ASSERT(g_bserrno == 0);
4454 
4455 	memset(payload_read, 0xAA, sizeof(payload_read));
4456 	iov_read[0].iov_base = payload_read;
4457 	iov_read[0].iov_len = 3 * 4096;
4458 	iov_read[1].iov_base = payload_read + 3 * 4096;
4459 	iov_read[1].iov_len = 4 * 4096;
4460 	iov_read[2].iov_base = payload_read + 7 * 4096;
4461 	iov_read[2].iov_len = 3 * 4096;
4462 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4463 	poll_threads();
4464 	CU_ASSERT(g_bserrno == 0);
4465 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4466 
4467 	spdk_bs_free_io_channel(channel);
4468 	poll_threads();
4469 
4470 	ut_blob_close_and_delete(bs, blob);
4471 }
4472 
4473 struct iter_ctx {
4474 	int		current_iter;
4475 	spdk_blob_id	blobid[4];
4476 };
4477 
4478 static void
4479 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4480 {
4481 	struct iter_ctx *iter_ctx = arg;
4482 	spdk_blob_id blobid;
4483 
4484 	CU_ASSERT(bserrno == 0);
4485 	blobid = spdk_blob_get_id(blob);
4486 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4487 }
4488 
4489 static void
4490 bs_load_iter_test(void)
4491 {
4492 	struct spdk_blob_store *bs;
4493 	struct spdk_bs_dev *dev;
4494 	struct iter_ctx iter_ctx = { 0 };
4495 	struct spdk_blob *blob;
4496 	int i, rc;
4497 	struct spdk_bs_opts opts;
4498 
4499 	dev = init_dev();
4500 	spdk_bs_opts_init(&opts, sizeof(opts));
4501 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4502 
4503 	/* Initialize a new blob store */
4504 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4505 	poll_threads();
4506 	CU_ASSERT(g_bserrno == 0);
4507 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4508 	bs = g_bs;
4509 
4510 	for (i = 0; i < 4; i++) {
4511 		blob = ut_blob_create_and_open(bs, NULL);
4512 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4513 
4514 		/* Just save the blobid as an xattr for testing purposes. */
4515 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4516 		CU_ASSERT(rc == 0);
4517 
4518 		/* Resize the blob */
4519 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4520 		poll_threads();
4521 		CU_ASSERT(g_bserrno == 0);
4522 
4523 		spdk_blob_close(blob, blob_op_complete, NULL);
4524 		poll_threads();
4525 		CU_ASSERT(g_bserrno == 0);
4526 	}
4527 
4528 	g_bserrno = -1;
4529 	spdk_bs_unload(bs, bs_op_complete, NULL);
4530 	poll_threads();
4531 	CU_ASSERT(g_bserrno == 0);
4532 
4533 	dev = init_dev();
4534 	spdk_bs_opts_init(&opts, sizeof(opts));
4535 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4536 	opts.iter_cb_fn = test_iter;
4537 	opts.iter_cb_arg = &iter_ctx;
4538 
4539 	/* Test blob iteration during load after a clean shutdown. */
4540 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4541 	poll_threads();
4542 	CU_ASSERT(g_bserrno == 0);
4543 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4544 	bs = g_bs;
4545 
4546 	/* Dirty shutdown */
4547 	bs_free(bs);
4548 
4549 	dev = init_dev();
4550 	spdk_bs_opts_init(&opts, sizeof(opts));
4551 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4552 	opts.iter_cb_fn = test_iter;
4553 	iter_ctx.current_iter = 0;
4554 	opts.iter_cb_arg = &iter_ctx;
4555 
4556 	/* Test blob iteration during load after a dirty shutdown. */
4557 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4558 	poll_threads();
4559 	CU_ASSERT(g_bserrno == 0);
4560 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4561 	bs = g_bs;
4562 
4563 	spdk_bs_unload(bs, bs_op_complete, NULL);
4564 	poll_threads();
4565 	CU_ASSERT(g_bserrno == 0);
4566 	g_bs = NULL;
4567 }
4568 
4569 static void
4570 blob_snapshot_rw(void)
4571 {
4572 	static const uint8_t zero[10 * 4096] = { 0 };
4573 	struct spdk_blob_store *bs = g_bs;
4574 	struct spdk_blob *blob, *snapshot;
4575 	struct spdk_io_channel *channel;
4576 	struct spdk_blob_opts opts;
4577 	spdk_blob_id blobid, snapshotid;
4578 	uint64_t free_clusters;
4579 	uint64_t cluster_size;
4580 	uint64_t page_size;
4581 	uint8_t payload_read[10 * 4096];
4582 	uint8_t payload_write[10 * 4096];
4583 	uint64_t write_bytes_start;
4584 	uint64_t read_bytes_start;
4585 	uint64_t copy_bytes_start;
4586 	uint64_t write_bytes;
4587 	uint64_t read_bytes;
4588 	uint64_t copy_bytes;
4589 
4590 	free_clusters = spdk_bs_free_cluster_count(bs);
4591 	cluster_size = spdk_bs_get_cluster_size(bs);
4592 	page_size = spdk_bs_get_page_size(bs);
4593 
4594 	channel = spdk_bs_alloc_io_channel(bs);
4595 	CU_ASSERT(channel != NULL);
4596 
4597 	ut_spdk_blob_opts_init(&opts);
4598 	opts.thin_provision = true;
4599 	opts.num_clusters = 5;
4600 
4601 	blob = ut_blob_create_and_open(bs, &opts);
4602 	blobid = spdk_blob_get_id(blob);
4603 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4604 
4605 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4606 
4607 	memset(payload_read, 0xFF, sizeof(payload_read));
4608 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4609 	poll_threads();
4610 	CU_ASSERT(g_bserrno == 0);
4611 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4612 
4613 	memset(payload_write, 0xE5, sizeof(payload_write));
4614 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4615 	poll_threads();
4616 	CU_ASSERT(g_bserrno == 0);
4617 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4618 
4619 	/* Create snapshot from blob */
4620 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4621 	poll_threads();
4622 	CU_ASSERT(g_bserrno == 0);
4623 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4624 	snapshotid = g_blobid;
4625 
4626 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4627 	poll_threads();
4628 	CU_ASSERT(g_bserrno == 0);
4629 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4630 	snapshot = g_blob;
4631 	CU_ASSERT(snapshot->data_ro == true);
4632 	CU_ASSERT(snapshot->md_ro == true);
4633 
4634 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4635 
4636 	write_bytes_start = g_dev_write_bytes;
4637 	read_bytes_start = g_dev_read_bytes;
4638 	copy_bytes_start = g_dev_copy_bytes;
4639 
4640 	memset(payload_write, 0xAA, sizeof(payload_write));
4641 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4642 	poll_threads();
4643 	CU_ASSERT(g_bserrno == 0);
4644 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4645 
4646 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4647 	 * and then write 10 pages of payload.
4648 	 */
4649 	write_bytes = g_dev_write_bytes - write_bytes_start;
4650 	read_bytes = g_dev_read_bytes - read_bytes_start;
4651 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
4652 	if (g_dev_copy_enabled) {
4653 		CU_ASSERT(copy_bytes == cluster_size);
4654 	} else {
4655 		CU_ASSERT(copy_bytes == 0);
4656 	}
4657 	if (g_use_extent_table) {
4658 		/* Add one more page for EXTENT_PAGE write */
4659 		CU_ASSERT(write_bytes + copy_bytes == page_size * 12 + cluster_size);
4660 	} else {
4661 		CU_ASSERT(write_bytes + copy_bytes == page_size * 11 + cluster_size);
4662 	}
4663 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
4664 
4665 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4666 	poll_threads();
4667 	CU_ASSERT(g_bserrno == 0);
4668 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4669 
4670 	/* Data on snapshot should not change after write to clone */
4671 	memset(payload_write, 0xE5, sizeof(payload_write));
4672 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4673 	poll_threads();
4674 	CU_ASSERT(g_bserrno == 0);
4675 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4676 
4677 	ut_blob_close_and_delete(bs, blob);
4678 	ut_blob_close_and_delete(bs, snapshot);
4679 
4680 	spdk_bs_free_io_channel(channel);
4681 	poll_threads();
4682 	g_blob = NULL;
4683 	g_blobid = 0;
4684 }
4685 
4686 static void
4687 blob_snapshot_rw_iov(void)
4688 {
4689 	static const uint8_t zero[10 * 4096] = { 0 };
4690 	struct spdk_blob_store *bs = g_bs;
4691 	struct spdk_blob *blob, *snapshot;
4692 	struct spdk_io_channel *channel;
4693 	struct spdk_blob_opts opts;
4694 	spdk_blob_id blobid, snapshotid;
4695 	uint64_t free_clusters;
4696 	uint8_t payload_read[10 * 4096];
4697 	uint8_t payload_write[10 * 4096];
4698 	struct iovec iov_read[3];
4699 	struct iovec iov_write[3];
4700 
4701 	free_clusters = spdk_bs_free_cluster_count(bs);
4702 
4703 	channel = spdk_bs_alloc_io_channel(bs);
4704 	CU_ASSERT(channel != NULL);
4705 
4706 	ut_spdk_blob_opts_init(&opts);
4707 	opts.thin_provision = true;
4708 	opts.num_clusters = 5;
4709 
4710 	blob = ut_blob_create_and_open(bs, &opts);
4711 	blobid = spdk_blob_get_id(blob);
4712 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4713 
4714 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4715 
4716 	/* Create snapshot from blob */
4717 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4718 	poll_threads();
4719 	CU_ASSERT(g_bserrno == 0);
4720 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4721 	snapshotid = g_blobid;
4722 
4723 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4724 	poll_threads();
4725 	CU_ASSERT(g_bserrno == 0);
4726 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4727 	snapshot = g_blob;
4728 	CU_ASSERT(snapshot->data_ro == true);
4729 	CU_ASSERT(snapshot->md_ro == true);
4730 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4731 
4732 	/* Payload should be all zeros from unallocated clusters */
4733 	memset(payload_read, 0xAA, sizeof(payload_read));
4734 	iov_read[0].iov_base = payload_read;
4735 	iov_read[0].iov_len = 3 * 4096;
4736 	iov_read[1].iov_base = payload_read + 3 * 4096;
4737 	iov_read[1].iov_len = 4 * 4096;
4738 	iov_read[2].iov_base = payload_read + 7 * 4096;
4739 	iov_read[2].iov_len = 3 * 4096;
4740 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4741 	poll_threads();
4742 	CU_ASSERT(g_bserrno == 0);
4743 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4744 
4745 	memset(payload_write, 0xE5, sizeof(payload_write));
4746 	iov_write[0].iov_base = payload_write;
4747 	iov_write[0].iov_len = 1 * 4096;
4748 	iov_write[1].iov_base = payload_write + 1 * 4096;
4749 	iov_write[1].iov_len = 5 * 4096;
4750 	iov_write[2].iov_base = payload_write + 6 * 4096;
4751 	iov_write[2].iov_len = 4 * 4096;
4752 
4753 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4754 	poll_threads();
4755 	CU_ASSERT(g_bserrno == 0);
4756 
4757 	memset(payload_read, 0xAA, sizeof(payload_read));
4758 	iov_read[0].iov_base = payload_read;
4759 	iov_read[0].iov_len = 3 * 4096;
4760 	iov_read[1].iov_base = payload_read + 3 * 4096;
4761 	iov_read[1].iov_len = 4 * 4096;
4762 	iov_read[2].iov_base = payload_read + 7 * 4096;
4763 	iov_read[2].iov_len = 3 * 4096;
4764 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4765 	poll_threads();
4766 	CU_ASSERT(g_bserrno == 0);
4767 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4768 
4769 	spdk_bs_free_io_channel(channel);
4770 	poll_threads();
4771 
4772 	ut_blob_close_and_delete(bs, blob);
4773 	ut_blob_close_and_delete(bs, snapshot);
4774 }
4775 
4776 /**
4777  * Inflate / decouple parent rw unit tests.
4778  *
4779  * --------------
4780  * original blob:         0         1         2         3         4
4781  *                   ,---------+---------+---------+---------+---------.
4782  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4783  *                   +---------+---------+---------+---------+---------+
4784  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4785  *                   +---------+---------+---------+---------+---------+
4786  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4787  *                   '---------+---------+---------+---------+---------'
4788  *                   .         .         .         .         .         .
4789  * --------          .         .         .         .         .         .
4790  * inflate:          .         .         .         .         .         .
4791  *                   ,---------+---------+---------+---------+---------.
4792  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4793  *                   '---------+---------+---------+---------+---------'
4794  *
4795  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4796  *               on snapshot2 and snapshot removed .         .         .
4797  *                   .         .         .         .         .         .
4798  * ----------------  .         .         .         .         .         .
4799  * decouple parent:  .         .         .         .         .         .
4800  *                   ,---------+---------+---------+---------+---------.
4801  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4802  *                   +---------+---------+---------+---------+---------+
4803  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4804  *                   '---------+---------+---------+---------+---------'
4805  *
4806  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4807  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4808  *               should remain a clone of snapshot.
4809  */
4810 static void
4811 _blob_inflate_rw(bool decouple_parent)
4812 {
4813 	struct spdk_blob_store *bs = g_bs;
4814 	struct spdk_blob *blob, *snapshot, *snapshot2;
4815 	struct spdk_io_channel *channel;
4816 	struct spdk_blob_opts opts;
4817 	spdk_blob_id blobid, snapshotid, snapshot2id;
4818 	uint64_t free_clusters;
4819 	uint64_t cluster_size;
4820 
4821 	uint64_t payload_size;
4822 	uint8_t *payload_read;
4823 	uint8_t *payload_write;
4824 	uint8_t *payload_clone;
4825 
4826 	uint64_t pages_per_cluster;
4827 	uint64_t pages_per_payload;
4828 
4829 	int i;
4830 	spdk_blob_id ids[2];
4831 	size_t count;
4832 
4833 	free_clusters = spdk_bs_free_cluster_count(bs);
4834 	cluster_size = spdk_bs_get_cluster_size(bs);
4835 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4836 	pages_per_payload = pages_per_cluster * 5;
4837 
4838 	payload_size = cluster_size * 5;
4839 
4840 	payload_read = malloc(payload_size);
4841 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4842 
4843 	payload_write = malloc(payload_size);
4844 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4845 
4846 	payload_clone = malloc(payload_size);
4847 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4848 
4849 	channel = spdk_bs_alloc_io_channel(bs);
4850 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4851 
4852 	/* Create blob */
4853 	ut_spdk_blob_opts_init(&opts);
4854 	opts.thin_provision = true;
4855 	opts.num_clusters = 5;
4856 
4857 	blob = ut_blob_create_and_open(bs, &opts);
4858 	blobid = spdk_blob_get_id(blob);
4859 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4860 
4861 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4862 
4863 	/* 1) Initial read should return zeroed payload */
4864 	memset(payload_read, 0xFF, payload_size);
4865 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4866 			  blob_op_complete, NULL);
4867 	poll_threads();
4868 	CU_ASSERT(g_bserrno == 0);
4869 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4870 
4871 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4872 	 * isn't allocated) */
4873 	memset(payload_write, 0xE5, payload_size - cluster_size);
4874 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4875 			   pages_per_cluster, blob_op_complete, NULL);
4876 	poll_threads();
4877 	CU_ASSERT(g_bserrno == 0);
4878 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4879 
4880 	/* 2) Create snapshot from blob (first level) */
4881 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4882 	poll_threads();
4883 	CU_ASSERT(g_bserrno == 0);
4884 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4885 	snapshotid = g_blobid;
4886 
4887 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4888 	poll_threads();
4889 	CU_ASSERT(g_bserrno == 0);
4890 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4891 	snapshot = g_blob;
4892 	CU_ASSERT(snapshot->data_ro == true);
4893 	CU_ASSERT(snapshot->md_ro == true);
4894 
4895 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4896 
4897 	/* Write every second cluster with a pattern.
4898 	 *
4899 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4900 	 * doesn't allocate it.
4901 	 *
4902 	 * payload_clone stores expected result on "blob" read at the time and
4903 	 * is used only to check data consistency on clone before and after
4904 	 * inflation. Initially we fill it with a backing snapshots pattern
4905 	 * used before.
4906 	 */
4907 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4908 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4909 	memset(payload_write, 0xAA, payload_size);
4910 	for (i = 1; i < 5; i += 2) {
4911 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4912 				   pages_per_cluster, blob_op_complete, NULL);
4913 		poll_threads();
4914 		CU_ASSERT(g_bserrno == 0);
4915 
4916 		/* Update expected result */
4917 		memcpy(payload_clone + (cluster_size * i), payload_write,
4918 		       cluster_size);
4919 	}
4920 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4921 
4922 	/* Check data consistency on clone */
4923 	memset(payload_read, 0xFF, payload_size);
4924 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4925 			  blob_op_complete, NULL);
4926 	poll_threads();
4927 	CU_ASSERT(g_bserrno == 0);
4928 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4929 
4930 	/* 3) Create second levels snapshot from blob */
4931 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4932 	poll_threads();
4933 	CU_ASSERT(g_bserrno == 0);
4934 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4935 	snapshot2id = g_blobid;
4936 
4937 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4938 	poll_threads();
4939 	CU_ASSERT(g_bserrno == 0);
4940 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4941 	snapshot2 = g_blob;
4942 	CU_ASSERT(snapshot2->data_ro == true);
4943 	CU_ASSERT(snapshot2->md_ro == true);
4944 
4945 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4946 
4947 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4948 
4949 	/* Write one cluster on the top level blob. This cluster (1) covers
4950 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4951 	 * at all */
4952 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4953 			   pages_per_cluster, blob_op_complete, NULL);
4954 	poll_threads();
4955 	CU_ASSERT(g_bserrno == 0);
4956 
4957 	/* Update expected result */
4958 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4959 
4960 	/* Check data consistency on clone */
4961 	memset(payload_read, 0xFF, payload_size);
4962 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4963 			  blob_op_complete, NULL);
4964 	poll_threads();
4965 	CU_ASSERT(g_bserrno == 0);
4966 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4967 
4968 
4969 	/* Close all blobs */
4970 	spdk_blob_close(blob, blob_op_complete, NULL);
4971 	poll_threads();
4972 	CU_ASSERT(g_bserrno == 0);
4973 
4974 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4975 	poll_threads();
4976 	CU_ASSERT(g_bserrno == 0);
4977 
4978 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4979 	poll_threads();
4980 	CU_ASSERT(g_bserrno == 0);
4981 
4982 	/* Check snapshot-clone relations */
4983 	count = 2;
4984 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4985 	CU_ASSERT(count == 1);
4986 	CU_ASSERT(ids[0] == snapshot2id);
4987 
4988 	count = 2;
4989 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4990 	CU_ASSERT(count == 1);
4991 	CU_ASSERT(ids[0] == blobid);
4992 
4993 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4994 
4995 	free_clusters = spdk_bs_free_cluster_count(bs);
4996 	if (!decouple_parent) {
4997 		/* Do full blob inflation */
4998 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4999 		poll_threads();
5000 		CU_ASSERT(g_bserrno == 0);
5001 
5002 		/* All clusters should be inflated (except one already allocated
5003 		 * in a top level blob) */
5004 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5005 
5006 		/* Check if relation tree updated correctly */
5007 		count = 2;
5008 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5009 
5010 		/* snapshotid have one clone */
5011 		CU_ASSERT(count == 1);
5012 		CU_ASSERT(ids[0] == snapshot2id);
5013 
5014 		/* snapshot2id have no clones */
5015 		count = 2;
5016 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5017 		CU_ASSERT(count == 0);
5018 
5019 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5020 	} else {
5021 		/* Decouple parent of blob */
5022 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5023 		poll_threads();
5024 		CU_ASSERT(g_bserrno == 0);
5025 
5026 		/* Only one cluster from a parent should be inflated (second one
5027 		 * is covered by a cluster written on a top level blob, and
5028 		 * already allocated) */
5029 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5030 
5031 		/* Check if relation tree updated correctly */
5032 		count = 2;
5033 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5034 
5035 		/* snapshotid have two clones now */
5036 		CU_ASSERT(count == 2);
5037 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5038 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5039 
5040 		/* snapshot2id have no clones */
5041 		count = 2;
5042 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5043 		CU_ASSERT(count == 0);
5044 
5045 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5046 	}
5047 
5048 	/* Try to delete snapshot2 (should pass) */
5049 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5050 	poll_threads();
5051 	CU_ASSERT(g_bserrno == 0);
5052 
5053 	/* Try to delete base snapshot */
5054 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5055 	poll_threads();
5056 	CU_ASSERT(g_bserrno == 0);
5057 
5058 	/* Reopen blob after snapshot deletion */
5059 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5060 	poll_threads();
5061 	CU_ASSERT(g_bserrno == 0);
5062 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5063 	blob = g_blob;
5064 
5065 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5066 
5067 	/* Check data consistency on inflated blob */
5068 	memset(payload_read, 0xFF, payload_size);
5069 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5070 			  blob_op_complete, NULL);
5071 	poll_threads();
5072 	CU_ASSERT(g_bserrno == 0);
5073 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5074 
5075 	spdk_bs_free_io_channel(channel);
5076 	poll_threads();
5077 
5078 	free(payload_read);
5079 	free(payload_write);
5080 	free(payload_clone);
5081 
5082 	ut_blob_close_and_delete(bs, blob);
5083 }
5084 
5085 static void
5086 blob_inflate_rw(void)
5087 {
5088 	_blob_inflate_rw(false);
5089 	_blob_inflate_rw(true);
5090 }
5091 
5092 /**
5093  * Snapshot-clones relation test
5094  *
5095  *         snapshot
5096  *            |
5097  *      +-----+-----+
5098  *      |           |
5099  *   blob(ro)   snapshot2
5100  *      |           |
5101  *   clone2      clone
5102  */
5103 static void
5104 blob_relations(void)
5105 {
5106 	struct spdk_blob_store *bs;
5107 	struct spdk_bs_dev *dev;
5108 	struct spdk_bs_opts bs_opts;
5109 	struct spdk_blob_opts opts;
5110 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5111 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5112 	int rc;
5113 	size_t count;
5114 	spdk_blob_id ids[10] = {};
5115 
5116 	dev = init_dev();
5117 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5118 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5119 
5120 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5121 	poll_threads();
5122 	CU_ASSERT(g_bserrno == 0);
5123 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5124 	bs = g_bs;
5125 
5126 	/* 1. Create blob with 10 clusters */
5127 
5128 	ut_spdk_blob_opts_init(&opts);
5129 	opts.num_clusters = 10;
5130 
5131 	blob = ut_blob_create_and_open(bs, &opts);
5132 	blobid = spdk_blob_get_id(blob);
5133 
5134 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5135 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5136 	CU_ASSERT(!spdk_blob_is_clone(blob));
5137 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5138 
5139 	/* blob should not have underlying snapshot nor clones */
5140 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5141 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5142 	count = SPDK_COUNTOF(ids);
5143 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5144 	CU_ASSERT(rc == 0);
5145 	CU_ASSERT(count == 0);
5146 
5147 
5148 	/* 2. Create snapshot */
5149 
5150 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5151 	poll_threads();
5152 	CU_ASSERT(g_bserrno == 0);
5153 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5154 	snapshotid = g_blobid;
5155 
5156 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5157 	poll_threads();
5158 	CU_ASSERT(g_bserrno == 0);
5159 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5160 	snapshot = g_blob;
5161 
5162 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5163 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5164 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5165 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5166 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5167 
5168 	/* Check if original blob is converted to the clone of snapshot */
5169 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5170 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5171 	CU_ASSERT(spdk_blob_is_clone(blob));
5172 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5173 	CU_ASSERT(blob->parent_id == snapshotid);
5174 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5175 
5176 	count = SPDK_COUNTOF(ids);
5177 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5178 	CU_ASSERT(rc == 0);
5179 	CU_ASSERT(count == 1);
5180 	CU_ASSERT(ids[0] == blobid);
5181 
5182 
5183 	/* 3. Create clone from snapshot */
5184 
5185 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5186 	poll_threads();
5187 	CU_ASSERT(g_bserrno == 0);
5188 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5189 	cloneid = g_blobid;
5190 
5191 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5192 	poll_threads();
5193 	CU_ASSERT(g_bserrno == 0);
5194 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5195 	clone = g_blob;
5196 
5197 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5198 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5199 	CU_ASSERT(spdk_blob_is_clone(clone));
5200 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5201 	CU_ASSERT(clone->parent_id == snapshotid);
5202 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5203 
5204 	count = SPDK_COUNTOF(ids);
5205 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5206 	CU_ASSERT(rc == 0);
5207 	CU_ASSERT(count == 0);
5208 
5209 	/* Check if clone is on the snapshot's list */
5210 	count = SPDK_COUNTOF(ids);
5211 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5212 	CU_ASSERT(rc == 0);
5213 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5214 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5215 
5216 
5217 	/* 4. Create snapshot of the clone */
5218 
5219 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5220 	poll_threads();
5221 	CU_ASSERT(g_bserrno == 0);
5222 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5223 	snapshotid2 = g_blobid;
5224 
5225 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5226 	poll_threads();
5227 	CU_ASSERT(g_bserrno == 0);
5228 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5229 	snapshot2 = g_blob;
5230 
5231 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5232 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5233 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5234 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5235 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5236 
5237 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5238 	 * is a child of snapshot */
5239 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5240 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5241 	CU_ASSERT(spdk_blob_is_clone(clone));
5242 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5243 	CU_ASSERT(clone->parent_id == snapshotid2);
5244 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5245 
5246 	count = SPDK_COUNTOF(ids);
5247 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5248 	CU_ASSERT(rc == 0);
5249 	CU_ASSERT(count == 1);
5250 	CU_ASSERT(ids[0] == cloneid);
5251 
5252 
5253 	/* 5. Try to create clone from read only blob */
5254 
5255 	/* Mark blob as read only */
5256 	spdk_blob_set_read_only(blob);
5257 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5258 	poll_threads();
5259 	CU_ASSERT(g_bserrno == 0);
5260 
5261 	/* Check if previously created blob is read only clone */
5262 	CU_ASSERT(spdk_blob_is_read_only(blob));
5263 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5264 	CU_ASSERT(spdk_blob_is_clone(blob));
5265 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5266 
5267 	/* Create clone from read only blob */
5268 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5269 	poll_threads();
5270 	CU_ASSERT(g_bserrno == 0);
5271 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5272 	cloneid2 = g_blobid;
5273 
5274 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5275 	poll_threads();
5276 	CU_ASSERT(g_bserrno == 0);
5277 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5278 	clone2 = g_blob;
5279 
5280 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5281 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5282 	CU_ASSERT(spdk_blob_is_clone(clone2));
5283 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5284 
5285 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5286 
5287 	count = SPDK_COUNTOF(ids);
5288 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5289 	CU_ASSERT(rc == 0);
5290 
5291 	CU_ASSERT(count == 1);
5292 	CU_ASSERT(ids[0] == cloneid2);
5293 
5294 	/* Close blobs */
5295 
5296 	spdk_blob_close(clone2, blob_op_complete, NULL);
5297 	poll_threads();
5298 	CU_ASSERT(g_bserrno == 0);
5299 
5300 	spdk_blob_close(blob, blob_op_complete, NULL);
5301 	poll_threads();
5302 	CU_ASSERT(g_bserrno == 0);
5303 
5304 	spdk_blob_close(clone, blob_op_complete, NULL);
5305 	poll_threads();
5306 	CU_ASSERT(g_bserrno == 0);
5307 
5308 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5309 	poll_threads();
5310 	CU_ASSERT(g_bserrno == 0);
5311 
5312 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5313 	poll_threads();
5314 	CU_ASSERT(g_bserrno == 0);
5315 
5316 	/* Try to delete snapshot with more than 1 clone */
5317 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5318 	poll_threads();
5319 	CU_ASSERT(g_bserrno != 0);
5320 
5321 	ut_bs_reload(&bs, &bs_opts);
5322 
5323 	/* NULL ids array should return number of clones in count */
5324 	count = SPDK_COUNTOF(ids);
5325 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5326 	CU_ASSERT(rc == -ENOMEM);
5327 	CU_ASSERT(count == 2);
5328 
5329 	/* incorrect array size */
5330 	count = 1;
5331 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5332 	CU_ASSERT(rc == -ENOMEM);
5333 	CU_ASSERT(count == 2);
5334 
5335 
5336 	/* Verify structure of loaded blob store */
5337 
5338 	/* snapshot */
5339 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5340 
5341 	count = SPDK_COUNTOF(ids);
5342 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5343 	CU_ASSERT(rc == 0);
5344 	CU_ASSERT(count == 2);
5345 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5346 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5347 
5348 	/* blob */
5349 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5350 	count = SPDK_COUNTOF(ids);
5351 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5352 	CU_ASSERT(rc == 0);
5353 	CU_ASSERT(count == 1);
5354 	CU_ASSERT(ids[0] == cloneid2);
5355 
5356 	/* clone */
5357 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5358 	count = SPDK_COUNTOF(ids);
5359 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5360 	CU_ASSERT(rc == 0);
5361 	CU_ASSERT(count == 0);
5362 
5363 	/* snapshot2 */
5364 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5365 	count = SPDK_COUNTOF(ids);
5366 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5367 	CU_ASSERT(rc == 0);
5368 	CU_ASSERT(count == 1);
5369 	CU_ASSERT(ids[0] == cloneid);
5370 
5371 	/* clone2 */
5372 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5373 	count = SPDK_COUNTOF(ids);
5374 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5375 	CU_ASSERT(rc == 0);
5376 	CU_ASSERT(count == 0);
5377 
5378 	/* Try to delete blob that user should not be able to remove */
5379 
5380 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5381 	poll_threads();
5382 	CU_ASSERT(g_bserrno != 0);
5383 
5384 	/* Remove all blobs */
5385 
5386 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5387 	poll_threads();
5388 	CU_ASSERT(g_bserrno == 0);
5389 
5390 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5391 	poll_threads();
5392 	CU_ASSERT(g_bserrno == 0);
5393 
5394 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5395 	poll_threads();
5396 	CU_ASSERT(g_bserrno == 0);
5397 
5398 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5399 	poll_threads();
5400 	CU_ASSERT(g_bserrno == 0);
5401 
5402 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5403 	poll_threads();
5404 	CU_ASSERT(g_bserrno == 0);
5405 
5406 	spdk_bs_unload(bs, bs_op_complete, NULL);
5407 	poll_threads();
5408 	CU_ASSERT(g_bserrno == 0);
5409 
5410 	g_bs = NULL;
5411 }
5412 
5413 /**
5414  * Snapshot-clones relation test 2
5415  *
5416  *         snapshot1
5417  *            |
5418  *         snapshot2
5419  *            |
5420  *      +-----+-----+
5421  *      |           |
5422  *   blob(ro)   snapshot3
5423  *      |           |
5424  *      |       snapshot4
5425  *      |        |     |
5426  *   clone2   clone  clone3
5427  */
5428 static void
5429 blob_relations2(void)
5430 {
5431 	struct spdk_blob_store *bs;
5432 	struct spdk_bs_dev *dev;
5433 	struct spdk_bs_opts bs_opts;
5434 	struct spdk_blob_opts opts;
5435 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5436 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5437 		     cloneid3;
5438 	int rc;
5439 	size_t count;
5440 	spdk_blob_id ids[10] = {};
5441 
5442 	dev = init_dev();
5443 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5444 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5445 
5446 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5447 	poll_threads();
5448 	CU_ASSERT(g_bserrno == 0);
5449 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5450 	bs = g_bs;
5451 
5452 	/* 1. Create blob with 10 clusters */
5453 
5454 	ut_spdk_blob_opts_init(&opts);
5455 	opts.num_clusters = 10;
5456 
5457 	blob = ut_blob_create_and_open(bs, &opts);
5458 	blobid = spdk_blob_get_id(blob);
5459 
5460 	/* 2. Create snapshot1 */
5461 
5462 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5463 	poll_threads();
5464 	CU_ASSERT(g_bserrno == 0);
5465 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5466 	snapshotid1 = g_blobid;
5467 
5468 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5469 	poll_threads();
5470 	CU_ASSERT(g_bserrno == 0);
5471 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5472 	snapshot1 = g_blob;
5473 
5474 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5475 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5476 
5477 	CU_ASSERT(blob->parent_id == snapshotid1);
5478 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5479 
5480 	/* Check if blob is the clone of snapshot1 */
5481 	CU_ASSERT(blob->parent_id == snapshotid1);
5482 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5483 
5484 	count = SPDK_COUNTOF(ids);
5485 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5486 	CU_ASSERT(rc == 0);
5487 	CU_ASSERT(count == 1);
5488 	CU_ASSERT(ids[0] == blobid);
5489 
5490 	/* 3. Create another snapshot */
5491 
5492 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5493 	poll_threads();
5494 	CU_ASSERT(g_bserrno == 0);
5495 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5496 	snapshotid2 = g_blobid;
5497 
5498 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5499 	poll_threads();
5500 	CU_ASSERT(g_bserrno == 0);
5501 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5502 	snapshot2 = g_blob;
5503 
5504 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5505 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5506 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5507 
5508 	/* Check if snapshot2 is the clone of snapshot1 and blob
5509 	 * is a child of snapshot2 */
5510 	CU_ASSERT(blob->parent_id == snapshotid2);
5511 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5512 
5513 	count = SPDK_COUNTOF(ids);
5514 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5515 	CU_ASSERT(rc == 0);
5516 	CU_ASSERT(count == 1);
5517 	CU_ASSERT(ids[0] == blobid);
5518 
5519 	/* 4. Create clone from snapshot */
5520 
5521 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5522 	poll_threads();
5523 	CU_ASSERT(g_bserrno == 0);
5524 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5525 	cloneid = g_blobid;
5526 
5527 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5528 	poll_threads();
5529 	CU_ASSERT(g_bserrno == 0);
5530 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5531 	clone = g_blob;
5532 
5533 	CU_ASSERT(clone->parent_id == snapshotid2);
5534 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5535 
5536 	/* Check if clone is on the snapshot's list */
5537 	count = SPDK_COUNTOF(ids);
5538 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5539 	CU_ASSERT(rc == 0);
5540 	CU_ASSERT(count == 2);
5541 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5542 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5543 
5544 	/* 5. Create snapshot of the clone */
5545 
5546 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5547 	poll_threads();
5548 	CU_ASSERT(g_bserrno == 0);
5549 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5550 	snapshotid3 = g_blobid;
5551 
5552 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5553 	poll_threads();
5554 	CU_ASSERT(g_bserrno == 0);
5555 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5556 	snapshot3 = g_blob;
5557 
5558 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5559 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5560 
5561 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5562 	 * is a child of snapshot2 */
5563 	CU_ASSERT(clone->parent_id == snapshotid3);
5564 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5565 
5566 	count = SPDK_COUNTOF(ids);
5567 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5568 	CU_ASSERT(rc == 0);
5569 	CU_ASSERT(count == 1);
5570 	CU_ASSERT(ids[0] == cloneid);
5571 
5572 	/* 6. Create another snapshot of the clone */
5573 
5574 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5575 	poll_threads();
5576 	CU_ASSERT(g_bserrno == 0);
5577 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5578 	snapshotid4 = g_blobid;
5579 
5580 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5581 	poll_threads();
5582 	CU_ASSERT(g_bserrno == 0);
5583 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5584 	snapshot4 = g_blob;
5585 
5586 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5587 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5588 
5589 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5590 	 * is a child of snapshot3 */
5591 	CU_ASSERT(clone->parent_id == snapshotid4);
5592 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5593 
5594 	count = SPDK_COUNTOF(ids);
5595 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5596 	CU_ASSERT(rc == 0);
5597 	CU_ASSERT(count == 1);
5598 	CU_ASSERT(ids[0] == cloneid);
5599 
5600 	/* 7. Remove snapshot 4 */
5601 
5602 	ut_blob_close_and_delete(bs, snapshot4);
5603 
5604 	/* Check if relations are back to state from before creating snapshot 4 */
5605 	CU_ASSERT(clone->parent_id == snapshotid3);
5606 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5607 
5608 	count = SPDK_COUNTOF(ids);
5609 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5610 	CU_ASSERT(rc == 0);
5611 	CU_ASSERT(count == 1);
5612 	CU_ASSERT(ids[0] == cloneid);
5613 
5614 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5615 
5616 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5617 	poll_threads();
5618 	CU_ASSERT(g_bserrno == 0);
5619 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5620 	cloneid3 = g_blobid;
5621 
5622 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5623 	poll_threads();
5624 	CU_ASSERT(g_bserrno != 0);
5625 
5626 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5627 
5628 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5629 	poll_threads();
5630 	CU_ASSERT(g_bserrno == 0);
5631 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5632 	snapshot3 = g_blob;
5633 
5634 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5635 	poll_threads();
5636 	CU_ASSERT(g_bserrno != 0);
5637 
5638 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5639 	poll_threads();
5640 	CU_ASSERT(g_bserrno == 0);
5641 
5642 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5643 	poll_threads();
5644 	CU_ASSERT(g_bserrno == 0);
5645 
5646 	/* 10. Remove snapshot 1 */
5647 
5648 	ut_blob_close_and_delete(bs, snapshot1);
5649 
5650 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5651 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5652 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5653 
5654 	count = SPDK_COUNTOF(ids);
5655 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5656 	CU_ASSERT(rc == 0);
5657 	CU_ASSERT(count == 2);
5658 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5659 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5660 
5661 	/* 11. Try to create clone from read only blob */
5662 
5663 	/* Mark blob as read only */
5664 	spdk_blob_set_read_only(blob);
5665 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5666 	poll_threads();
5667 	CU_ASSERT(g_bserrno == 0);
5668 
5669 	/* Create clone from read only blob */
5670 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5671 	poll_threads();
5672 	CU_ASSERT(g_bserrno == 0);
5673 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5674 	cloneid2 = g_blobid;
5675 
5676 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5677 	poll_threads();
5678 	CU_ASSERT(g_bserrno == 0);
5679 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5680 	clone2 = g_blob;
5681 
5682 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5683 
5684 	count = SPDK_COUNTOF(ids);
5685 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5686 	CU_ASSERT(rc == 0);
5687 	CU_ASSERT(count == 1);
5688 	CU_ASSERT(ids[0] == cloneid2);
5689 
5690 	/* Close blobs */
5691 
5692 	spdk_blob_close(clone2, blob_op_complete, NULL);
5693 	poll_threads();
5694 	CU_ASSERT(g_bserrno == 0);
5695 
5696 	spdk_blob_close(blob, blob_op_complete, NULL);
5697 	poll_threads();
5698 	CU_ASSERT(g_bserrno == 0);
5699 
5700 	spdk_blob_close(clone, blob_op_complete, NULL);
5701 	poll_threads();
5702 	CU_ASSERT(g_bserrno == 0);
5703 
5704 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5705 	poll_threads();
5706 	CU_ASSERT(g_bserrno == 0);
5707 
5708 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5709 	poll_threads();
5710 	CU_ASSERT(g_bserrno == 0);
5711 
5712 	ut_bs_reload(&bs, &bs_opts);
5713 
5714 	/* Verify structure of loaded blob store */
5715 
5716 	/* snapshot2 */
5717 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5718 
5719 	count = SPDK_COUNTOF(ids);
5720 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5721 	CU_ASSERT(rc == 0);
5722 	CU_ASSERT(count == 2);
5723 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5724 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5725 
5726 	/* blob */
5727 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5728 	count = SPDK_COUNTOF(ids);
5729 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5730 	CU_ASSERT(rc == 0);
5731 	CU_ASSERT(count == 1);
5732 	CU_ASSERT(ids[0] == cloneid2);
5733 
5734 	/* clone */
5735 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5736 	count = SPDK_COUNTOF(ids);
5737 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5738 	CU_ASSERT(rc == 0);
5739 	CU_ASSERT(count == 0);
5740 
5741 	/* snapshot3 */
5742 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5743 	count = SPDK_COUNTOF(ids);
5744 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5745 	CU_ASSERT(rc == 0);
5746 	CU_ASSERT(count == 1);
5747 	CU_ASSERT(ids[0] == cloneid);
5748 
5749 	/* clone2 */
5750 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5751 	count = SPDK_COUNTOF(ids);
5752 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5753 	CU_ASSERT(rc == 0);
5754 	CU_ASSERT(count == 0);
5755 
5756 	/* Try to delete all blobs in the worse possible order */
5757 
5758 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5759 	poll_threads();
5760 	CU_ASSERT(g_bserrno != 0);
5761 
5762 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5763 	poll_threads();
5764 	CU_ASSERT(g_bserrno == 0);
5765 
5766 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5767 	poll_threads();
5768 	CU_ASSERT(g_bserrno != 0);
5769 
5770 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5771 	poll_threads();
5772 	CU_ASSERT(g_bserrno == 0);
5773 
5774 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5775 	poll_threads();
5776 	CU_ASSERT(g_bserrno == 0);
5777 
5778 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5779 	poll_threads();
5780 	CU_ASSERT(g_bserrno == 0);
5781 
5782 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5783 	poll_threads();
5784 	CU_ASSERT(g_bserrno == 0);
5785 
5786 	spdk_bs_unload(bs, bs_op_complete, NULL);
5787 	poll_threads();
5788 	CU_ASSERT(g_bserrno == 0);
5789 
5790 	g_bs = NULL;
5791 }
5792 
5793 /**
5794  * Snapshot-clones relation test 3
5795  *
5796  *         snapshot0
5797  *            |
5798  *         snapshot1
5799  *            |
5800  *         snapshot2
5801  *            |
5802  *           blob
5803  */
5804 static void
5805 blob_relations3(void)
5806 {
5807 	struct spdk_blob_store *bs;
5808 	struct spdk_bs_dev *dev;
5809 	struct spdk_io_channel *channel;
5810 	struct spdk_bs_opts bs_opts;
5811 	struct spdk_blob_opts opts;
5812 	struct spdk_blob *blob;
5813 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
5814 
5815 	dev = init_dev();
5816 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5817 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5818 
5819 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5820 	poll_threads();
5821 	CU_ASSERT(g_bserrno == 0);
5822 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5823 	bs = g_bs;
5824 
5825 	channel = spdk_bs_alloc_io_channel(bs);
5826 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5827 
5828 	/* 1. Create blob with 10 clusters */
5829 	ut_spdk_blob_opts_init(&opts);
5830 	opts.num_clusters = 10;
5831 
5832 	blob = ut_blob_create_and_open(bs, &opts);
5833 	blobid = spdk_blob_get_id(blob);
5834 
5835 	/* 2. Create snapshot0 */
5836 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5837 	poll_threads();
5838 	CU_ASSERT(g_bserrno == 0);
5839 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5840 	snapshotid0 = g_blobid;
5841 
5842 	/* 3. Create snapshot1 */
5843 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5844 	poll_threads();
5845 	CU_ASSERT(g_bserrno == 0);
5846 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5847 	snapshotid1 = g_blobid;
5848 
5849 	/* 4. Create snapshot2 */
5850 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5851 	poll_threads();
5852 	CU_ASSERT(g_bserrno == 0);
5853 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5854 	snapshotid2 = g_blobid;
5855 
5856 	/* 5. Decouple blob */
5857 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5858 	poll_threads();
5859 	CU_ASSERT(g_bserrno == 0);
5860 
5861 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
5862 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
5863 	poll_threads();
5864 	CU_ASSERT(g_bserrno == 0);
5865 
5866 	/* 7. Delete blob */
5867 	spdk_blob_close(blob, blob_op_complete, NULL);
5868 	poll_threads();
5869 	CU_ASSERT(g_bserrno == 0);
5870 
5871 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5872 	poll_threads();
5873 	CU_ASSERT(g_bserrno == 0);
5874 
5875 	/* 8. Delete snapshot2.
5876 	 * If md of snapshot 2 was updated, it should be possible to delete it */
5877 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5878 	poll_threads();
5879 	CU_ASSERT(g_bserrno == 0);
5880 
5881 	/* Remove remaining blobs and unload bs */
5882 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
5883 	poll_threads();
5884 	CU_ASSERT(g_bserrno == 0);
5885 
5886 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
5887 	poll_threads();
5888 	CU_ASSERT(g_bserrno == 0);
5889 
5890 	spdk_bs_free_io_channel(channel);
5891 	poll_threads();
5892 
5893 	spdk_bs_unload(bs, bs_op_complete, NULL);
5894 	poll_threads();
5895 	CU_ASSERT(g_bserrno == 0);
5896 
5897 	g_bs = NULL;
5898 }
5899 
5900 static void
5901 blobstore_clean_power_failure(void)
5902 {
5903 	struct spdk_blob_store *bs;
5904 	struct spdk_blob *blob;
5905 	struct spdk_power_failure_thresholds thresholds = {};
5906 	bool clean = false;
5907 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5908 	struct spdk_bs_super_block super_copy = {};
5909 
5910 	thresholds.general_threshold = 1;
5911 	while (!clean) {
5912 		/* Create bs and blob */
5913 		suite_blob_setup();
5914 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5915 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5916 		bs = g_bs;
5917 		blob = g_blob;
5918 
5919 		/* Super block should not change for rest of the UT,
5920 		 * save it and compare later. */
5921 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5922 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5923 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5924 
5925 		/* Force bs/super block in a clean state.
5926 		 * Along with marking blob dirty, to cause blob persist. */
5927 		blob->state = SPDK_BLOB_STATE_DIRTY;
5928 		bs->clean = 1;
5929 		super->clean = 1;
5930 		super->crc = blob_md_page_calc_crc(super);
5931 
5932 		g_bserrno = -1;
5933 		dev_set_power_failure_thresholds(thresholds);
5934 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5935 		poll_threads();
5936 		dev_reset_power_failure_event();
5937 
5938 		if (g_bserrno == 0) {
5939 			/* After successful md sync, both bs and super block
5940 			 * should be marked as not clean. */
5941 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5942 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5943 			clean = true;
5944 		}
5945 
5946 		/* Depending on the point of failure, super block was either updated or not. */
5947 		super_copy.clean = super->clean;
5948 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5949 		/* Compare that the values in super block remained unchanged. */
5950 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5951 
5952 		/* Delete blob and unload bs */
5953 		suite_blob_cleanup();
5954 
5955 		thresholds.general_threshold++;
5956 	}
5957 }
5958 
5959 static void
5960 blob_delete_snapshot_power_failure(void)
5961 {
5962 	struct spdk_bs_dev *dev;
5963 	struct spdk_blob_store *bs;
5964 	struct spdk_blob_opts opts;
5965 	struct spdk_blob *blob, *snapshot;
5966 	struct spdk_power_failure_thresholds thresholds = {};
5967 	spdk_blob_id blobid, snapshotid;
5968 	const void *value;
5969 	size_t value_len;
5970 	size_t count;
5971 	spdk_blob_id ids[3] = {};
5972 	int rc;
5973 	bool deleted = false;
5974 	int delete_snapshot_bserrno = -1;
5975 
5976 	thresholds.general_threshold = 1;
5977 	while (!deleted) {
5978 		dev = init_dev();
5979 
5980 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5981 		poll_threads();
5982 		CU_ASSERT(g_bserrno == 0);
5983 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5984 		bs = g_bs;
5985 
5986 		/* Create blob */
5987 		ut_spdk_blob_opts_init(&opts);
5988 		opts.num_clusters = 10;
5989 
5990 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5991 		poll_threads();
5992 		CU_ASSERT(g_bserrno == 0);
5993 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5994 		blobid = g_blobid;
5995 
5996 		/* Create snapshot */
5997 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5998 		poll_threads();
5999 		CU_ASSERT(g_bserrno == 0);
6000 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6001 		snapshotid = g_blobid;
6002 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6003 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6004 
6005 		dev_set_power_failure_thresholds(thresholds);
6006 
6007 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6008 		poll_threads();
6009 		delete_snapshot_bserrno = g_bserrno;
6010 
6011 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6012 		 * reports success, changes to both blobs should already persisted. */
6013 		dev_reset_power_failure_event();
6014 		ut_bs_dirty_load(&bs, NULL);
6015 
6016 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6017 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6018 
6019 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6020 		poll_threads();
6021 		CU_ASSERT(g_bserrno == 0);
6022 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6023 		blob = g_blob;
6024 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6025 
6026 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6027 		poll_threads();
6028 
6029 		if (g_bserrno == 0) {
6030 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6031 			snapshot = g_blob;
6032 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6033 			count = SPDK_COUNTOF(ids);
6034 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6035 			CU_ASSERT(rc == 0);
6036 			CU_ASSERT(count == 1);
6037 			CU_ASSERT(ids[0] == blobid);
6038 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6039 			CU_ASSERT(rc != 0);
6040 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6041 
6042 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6043 			poll_threads();
6044 			CU_ASSERT(g_bserrno == 0);
6045 		} else {
6046 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6047 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6048 			 * Yet delete might perform further changes to the clone after that.
6049 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6050 			if (delete_snapshot_bserrno == 0) {
6051 				deleted = true;
6052 			}
6053 		}
6054 
6055 		spdk_blob_close(blob, blob_op_complete, NULL);
6056 		poll_threads();
6057 		CU_ASSERT(g_bserrno == 0);
6058 
6059 		spdk_bs_unload(bs, bs_op_complete, NULL);
6060 		poll_threads();
6061 		CU_ASSERT(g_bserrno == 0);
6062 
6063 		thresholds.general_threshold++;
6064 	}
6065 }
6066 
6067 static void
6068 blob_create_snapshot_power_failure(void)
6069 {
6070 	struct spdk_blob_store *bs = g_bs;
6071 	struct spdk_bs_dev *dev;
6072 	struct spdk_blob_opts opts;
6073 	struct spdk_blob *blob, *snapshot;
6074 	struct spdk_power_failure_thresholds thresholds = {};
6075 	spdk_blob_id blobid, snapshotid;
6076 	const void *value;
6077 	size_t value_len;
6078 	size_t count;
6079 	spdk_blob_id ids[3] = {};
6080 	int rc;
6081 	bool created = false;
6082 	int create_snapshot_bserrno = -1;
6083 
6084 	thresholds.general_threshold = 1;
6085 	while (!created) {
6086 		dev = init_dev();
6087 
6088 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6089 		poll_threads();
6090 		CU_ASSERT(g_bserrno == 0);
6091 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6092 		bs = g_bs;
6093 
6094 		/* Create blob */
6095 		ut_spdk_blob_opts_init(&opts);
6096 		opts.num_clusters = 10;
6097 
6098 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6099 		poll_threads();
6100 		CU_ASSERT(g_bserrno == 0);
6101 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6102 		blobid = g_blobid;
6103 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6104 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6105 
6106 		dev_set_power_failure_thresholds(thresholds);
6107 
6108 		/* Create snapshot */
6109 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6110 		poll_threads();
6111 		create_snapshot_bserrno = g_bserrno;
6112 		snapshotid = g_blobid;
6113 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6114 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6115 
6116 		/* Do not shut down cleanly. Assumption is that after create snapshot
6117 		 * reports success, both blobs should be power-fail safe. */
6118 		dev_reset_power_failure_event();
6119 		ut_bs_dirty_load(&bs, NULL);
6120 
6121 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6122 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6123 
6124 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6125 		poll_threads();
6126 		CU_ASSERT(g_bserrno == 0);
6127 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6128 		blob = g_blob;
6129 
6130 		if (snapshotid != SPDK_BLOBID_INVALID) {
6131 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6132 			poll_threads();
6133 		}
6134 
6135 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6136 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6137 			snapshot = g_blob;
6138 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6139 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6140 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6141 			count = SPDK_COUNTOF(ids);
6142 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6143 			CU_ASSERT(rc == 0);
6144 			CU_ASSERT(count == 1);
6145 			CU_ASSERT(ids[0] == blobid);
6146 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6147 			CU_ASSERT(rc != 0);
6148 
6149 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6150 			poll_threads();
6151 			CU_ASSERT(g_bserrno == 0);
6152 			if (create_snapshot_bserrno == 0) {
6153 				created = true;
6154 			}
6155 		} else {
6156 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6157 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6158 		}
6159 
6160 		spdk_blob_close(blob, blob_op_complete, NULL);
6161 		poll_threads();
6162 		CU_ASSERT(g_bserrno == 0);
6163 
6164 		spdk_bs_unload(bs, bs_op_complete, NULL);
6165 		poll_threads();
6166 		CU_ASSERT(g_bserrno == 0);
6167 
6168 		thresholds.general_threshold++;
6169 	}
6170 }
6171 
6172 static void
6173 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6174 {
6175 	uint8_t payload_ff[64 * 512];
6176 	uint8_t payload_aa[64 * 512];
6177 	uint8_t payload_00[64 * 512];
6178 	uint8_t *cluster0, *cluster1;
6179 
6180 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6181 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6182 	memset(payload_00, 0x00, sizeof(payload_00));
6183 
6184 	/* Try to perform I/O with io unit = 512 */
6185 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6186 	poll_threads();
6187 	CU_ASSERT(g_bserrno == 0);
6188 
6189 	/* If thin provisioned is set cluster should be allocated now */
6190 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6191 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6192 
6193 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6194 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6195 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6196 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6197 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6198 
6199 	/* Verify write with offset on first page */
6200 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6201 	poll_threads();
6202 	CU_ASSERT(g_bserrno == 0);
6203 
6204 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6205 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6206 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6207 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6208 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6209 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6210 
6211 	/* Verify write with offset on first page */
6212 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6213 	poll_threads();
6214 
6215 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6216 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6217 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6218 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6219 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6220 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6221 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6222 
6223 	/* Verify write with offset on second page */
6224 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6225 	poll_threads();
6226 
6227 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6228 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6229 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6230 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6231 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6232 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6233 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6234 
6235 	/* Verify write across multiple pages */
6236 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6237 	poll_threads();
6238 
6239 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6240 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6241 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6242 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6243 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6244 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6245 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6246 
6247 	/* Verify write across multiple clusters */
6248 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6249 	poll_threads();
6250 
6251 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6252 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6253 
6254 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6255 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6256 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6257 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6258 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6259 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6260 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6261 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6262 
6263 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6264 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6265 
6266 	/* Verify write to second cluster */
6267 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6268 	poll_threads();
6269 
6270 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6271 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6272 
6273 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6274 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6275 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6276 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6277 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6278 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6279 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6280 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6281 
6282 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6283 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6284 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6285 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6286 }
6287 
6288 static void
6289 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6290 {
6291 	uint8_t payload_read[64 * 512];
6292 	uint8_t payload_ff[64 * 512];
6293 	uint8_t payload_aa[64 * 512];
6294 	uint8_t payload_00[64 * 512];
6295 
6296 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6297 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6298 	memset(payload_00, 0x00, sizeof(payload_00));
6299 
6300 	/* Read only first io unit */
6301 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6302 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6303 	 * payload_read: F000 0000 | 0000 0000 ... */
6304 	memset(payload_read, 0x00, sizeof(payload_read));
6305 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6306 	poll_threads();
6307 	CU_ASSERT(g_bserrno == 0);
6308 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6309 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6310 
6311 	/* Read four io_units starting from offset = 2
6312 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6313 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6314 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6315 
6316 	memset(payload_read, 0x00, sizeof(payload_read));
6317 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6318 	poll_threads();
6319 	CU_ASSERT(g_bserrno == 0);
6320 
6321 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6322 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6323 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6324 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6325 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6326 
6327 	/* Read eight io_units across multiple pages
6328 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6329 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6330 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6331 	memset(payload_read, 0x00, sizeof(payload_read));
6332 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6333 	poll_threads();
6334 	CU_ASSERT(g_bserrno == 0);
6335 
6336 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6337 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6338 
6339 	/* Read eight io_units across multiple clusters
6340 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6341 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6342 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6343 	memset(payload_read, 0x00, sizeof(payload_read));
6344 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6345 	poll_threads();
6346 	CU_ASSERT(g_bserrno == 0);
6347 
6348 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6349 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6350 
6351 	/* Read four io_units from second cluster
6352 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6353 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6354 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6355 	memset(payload_read, 0x00, sizeof(payload_read));
6356 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6357 	poll_threads();
6358 	CU_ASSERT(g_bserrno == 0);
6359 
6360 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6361 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6362 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6363 
6364 	/* Read second cluster
6365 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6366 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6367 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6368 	memset(payload_read, 0x00, sizeof(payload_read));
6369 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6370 	poll_threads();
6371 	CU_ASSERT(g_bserrno == 0);
6372 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6373 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6374 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6375 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6376 
6377 	/* Read whole two clusters
6378 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6379 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6380 	memset(payload_read, 0x00, sizeof(payload_read));
6381 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6382 	poll_threads();
6383 	CU_ASSERT(g_bserrno == 0);
6384 
6385 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6386 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6387 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6388 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6389 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6390 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6391 
6392 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6393 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6394 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6395 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6396 }
6397 
6398 
6399 static void
6400 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6401 {
6402 	uint8_t payload_ff[64 * 512];
6403 	uint8_t payload_aa[64 * 512];
6404 	uint8_t payload_00[64 * 512];
6405 	uint8_t *cluster0, *cluster1;
6406 
6407 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6408 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6409 	memset(payload_00, 0x00, sizeof(payload_00));
6410 
6411 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6412 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6413 
6414 	/* Unmap */
6415 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6416 	poll_threads();
6417 
6418 	CU_ASSERT(g_bserrno == 0);
6419 
6420 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6421 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6422 }
6423 
6424 static void
6425 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6426 {
6427 	uint8_t payload_ff[64 * 512];
6428 	uint8_t payload_aa[64 * 512];
6429 	uint8_t payload_00[64 * 512];
6430 	uint8_t *cluster0, *cluster1;
6431 
6432 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6433 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6434 	memset(payload_00, 0x00, sizeof(payload_00));
6435 
6436 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6437 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6438 
6439 	/* Write zeroes  */
6440 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6441 	poll_threads();
6442 
6443 	CU_ASSERT(g_bserrno == 0);
6444 
6445 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6446 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6447 }
6448 
6449 static inline void
6450 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6451 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6452 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6453 {
6454 	if (io_opts) {
6455 		g_dev_writev_ext_called = false;
6456 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6457 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
6458 					io_opts);
6459 	} else {
6460 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6461 	}
6462 	poll_threads();
6463 	CU_ASSERT(g_bserrno == 0);
6464 	if (io_opts) {
6465 		CU_ASSERT(g_dev_writev_ext_called);
6466 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6467 	}
6468 }
6469 
6470 static void
6471 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6472 	       bool ext_api)
6473 {
6474 	uint8_t payload_ff[64 * 512];
6475 	uint8_t payload_aa[64 * 512];
6476 	uint8_t payload_00[64 * 512];
6477 	uint8_t *cluster0, *cluster1;
6478 	struct iovec iov[4];
6479 	struct spdk_blob_ext_io_opts ext_opts = {
6480 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6481 		.memory_domain_ctx = (void *)0xf00df00d,
6482 		.size = sizeof(struct spdk_blob_ext_io_opts),
6483 		.user_ctx = (void *)123,
6484 	};
6485 
6486 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6487 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6488 	memset(payload_00, 0x00, sizeof(payload_00));
6489 
6490 	/* Try to perform I/O with io unit = 512 */
6491 	iov[0].iov_base = payload_ff;
6492 	iov[0].iov_len = 1 * 512;
6493 
6494 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
6495 			    ext_api ? &ext_opts : NULL);
6496 
6497 	/* If thin provisioned is set cluster should be allocated now */
6498 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6499 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6500 
6501 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6502 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6503 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6504 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6505 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6506 
6507 	/* Verify write with offset on first page */
6508 	iov[0].iov_base = payload_ff;
6509 	iov[0].iov_len = 1 * 512;
6510 
6511 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
6512 			    ext_api ? &ext_opts : NULL);
6513 
6514 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6515 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6516 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6517 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6518 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6519 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6520 
6521 	/* Verify write with offset on first page */
6522 	iov[0].iov_base = payload_ff;
6523 	iov[0].iov_len = 4 * 512;
6524 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6525 	poll_threads();
6526 
6527 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6528 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6529 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6530 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6531 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6532 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6533 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6534 
6535 	/* Verify write with offset on second page */
6536 	iov[0].iov_base = payload_ff;
6537 	iov[0].iov_len = 4 * 512;
6538 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6539 	poll_threads();
6540 
6541 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6542 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6543 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6544 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6545 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6546 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6547 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6548 
6549 	/* Verify write across multiple pages */
6550 	iov[0].iov_base = payload_aa;
6551 	iov[0].iov_len = 8 * 512;
6552 
6553 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
6554 			    ext_api ? &ext_opts : NULL);
6555 
6556 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6557 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6558 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6559 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6560 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6561 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6562 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6563 
6564 	/* Verify write across multiple clusters */
6565 
6566 	iov[0].iov_base = payload_ff;
6567 	iov[0].iov_len = 8 * 512;
6568 
6569 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
6570 			    ext_api ? &ext_opts : NULL);
6571 
6572 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6573 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6574 
6575 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6576 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6577 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6578 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6579 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6580 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6581 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6582 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6583 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6584 
6585 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6586 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6587 
6588 	/* Verify write to second cluster */
6589 
6590 	iov[0].iov_base = payload_ff;
6591 	iov[0].iov_len = 2 * 512;
6592 
6593 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
6594 			    ext_api ? &ext_opts : NULL);
6595 
6596 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6597 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6598 
6599 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6600 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6601 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6602 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6603 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6604 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6605 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6606 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6607 
6608 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6609 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6610 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6611 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6612 }
6613 
6614 static inline void
6615 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6616 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6617 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6618 {
6619 	if (io_opts) {
6620 		g_dev_readv_ext_called = false;
6621 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6622 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
6623 	} else {
6624 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6625 	}
6626 	poll_threads();
6627 	CU_ASSERT(g_bserrno == 0);
6628 	if (io_opts) {
6629 		CU_ASSERT(g_dev_readv_ext_called);
6630 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6631 	}
6632 }
6633 
6634 static void
6635 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6636 	      bool ext_api)
6637 {
6638 	uint8_t payload_read[64 * 512];
6639 	uint8_t payload_ff[64 * 512];
6640 	uint8_t payload_aa[64 * 512];
6641 	uint8_t payload_00[64 * 512];
6642 	struct iovec iov[4];
6643 	struct spdk_blob_ext_io_opts ext_opts = {
6644 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6645 		.memory_domain_ctx = (void *)0xf00df00d,
6646 		.size = sizeof(struct spdk_blob_ext_io_opts),
6647 		.user_ctx = (void *)123,
6648 	};
6649 
6650 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6651 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6652 	memset(payload_00, 0x00, sizeof(payload_00));
6653 
6654 	/* Read only first io unit */
6655 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6656 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6657 	 * payload_read: F000 0000 | 0000 0000 ... */
6658 	memset(payload_read, 0x00, sizeof(payload_read));
6659 	iov[0].iov_base = payload_read;
6660 	iov[0].iov_len = 1 * 512;
6661 
6662 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6663 
6664 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6665 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6666 
6667 	/* Read four io_units starting from offset = 2
6668 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6669 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6670 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6671 
6672 	memset(payload_read, 0x00, sizeof(payload_read));
6673 	iov[0].iov_base = payload_read;
6674 	iov[0].iov_len = 4 * 512;
6675 
6676 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6677 
6678 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6679 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6680 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6681 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6682 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6683 
6684 	/* Read eight io_units across multiple pages
6685 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6686 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6687 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6688 	memset(payload_read, 0x00, sizeof(payload_read));
6689 	iov[0].iov_base = payload_read;
6690 	iov[0].iov_len = 4 * 512;
6691 	iov[1].iov_base = payload_read + 4 * 512;
6692 	iov[1].iov_len = 4 * 512;
6693 
6694 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6695 
6696 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6697 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6698 
6699 	/* Read eight io_units across multiple clusters
6700 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6701 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6702 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6703 	memset(payload_read, 0x00, sizeof(payload_read));
6704 	iov[0].iov_base = payload_read;
6705 	iov[0].iov_len = 2 * 512;
6706 	iov[1].iov_base = payload_read + 2 * 512;
6707 	iov[1].iov_len = 2 * 512;
6708 	iov[2].iov_base = payload_read + 4 * 512;
6709 	iov[2].iov_len = 2 * 512;
6710 	iov[3].iov_base = payload_read + 6 * 512;
6711 	iov[3].iov_len = 2 * 512;
6712 
6713 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
6714 			   ext_api ? &ext_opts : NULL);
6715 
6716 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6717 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6718 
6719 	/* Read four io_units from second cluster
6720 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6721 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6722 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6723 	memset(payload_read, 0x00, sizeof(payload_read));
6724 	iov[0].iov_base = payload_read;
6725 	iov[0].iov_len = 1 * 512;
6726 	iov[1].iov_base = payload_read + 1 * 512;
6727 	iov[1].iov_len = 3 * 512;
6728 
6729 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
6730 			   ext_api ? &ext_opts : NULL);
6731 
6732 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6733 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6734 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6735 
6736 	/* Read second cluster
6737 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6738 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6739 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6740 	memset(payload_read, 0x00, sizeof(payload_read));
6741 	iov[0].iov_base = payload_read;
6742 	iov[0].iov_len = 1 * 512;
6743 	iov[1].iov_base = payload_read + 1 * 512;
6744 	iov[1].iov_len = 2 * 512;
6745 	iov[2].iov_base = payload_read + 3 * 512;
6746 	iov[2].iov_len = 4 * 512;
6747 	iov[3].iov_base = payload_read + 7 * 512;
6748 	iov[3].iov_len = 25 * 512;
6749 
6750 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
6751 			   ext_api ? &ext_opts : NULL);
6752 
6753 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6754 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6755 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6756 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6757 
6758 	/* Read whole two clusters
6759 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6760 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6761 	memset(payload_read, 0x00, sizeof(payload_read));
6762 	iov[0].iov_base = payload_read;
6763 	iov[0].iov_len = 1 * 512;
6764 	iov[1].iov_base = payload_read + 1 * 512;
6765 	iov[1].iov_len = 8 * 512;
6766 	iov[2].iov_base = payload_read + 9 * 512;
6767 	iov[2].iov_len = 16 * 512;
6768 	iov[3].iov_base = payload_read + 25 * 512;
6769 	iov[3].iov_len = 39 * 512;
6770 
6771 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
6772 			   ext_api ? &ext_opts : NULL);
6773 
6774 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6775 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6776 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6777 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6778 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6779 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6780 
6781 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6782 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6783 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6784 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6785 }
6786 
6787 static void
6788 blob_io_unit(void)
6789 {
6790 	struct spdk_bs_opts bsopts;
6791 	struct spdk_blob_opts opts;
6792 	struct spdk_blob_store *bs;
6793 	struct spdk_bs_dev *dev;
6794 	struct spdk_blob *blob, *snapshot, *clone;
6795 	spdk_blob_id blobid;
6796 	struct spdk_io_channel *channel;
6797 
6798 	/* Create dev with 512 bytes io unit size */
6799 
6800 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6801 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6802 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6803 
6804 	/* Try to initialize a new blob store with unsupported io_unit */
6805 	dev = init_dev();
6806 	dev->blocklen = 512;
6807 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6808 
6809 	/* Initialize a new blob store */
6810 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6811 	poll_threads();
6812 	CU_ASSERT(g_bserrno == 0);
6813 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6814 	bs = g_bs;
6815 
6816 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6817 	channel = spdk_bs_alloc_io_channel(bs);
6818 
6819 	/* Create thick provisioned blob */
6820 	ut_spdk_blob_opts_init(&opts);
6821 	opts.thin_provision = false;
6822 	opts.num_clusters = 32;
6823 
6824 	blob = ut_blob_create_and_open(bs, &opts);
6825 	blobid = spdk_blob_get_id(blob);
6826 
6827 	test_io_write(dev, blob, channel);
6828 	test_io_read(dev, blob, channel);
6829 	test_io_zeroes(dev, blob, channel);
6830 
6831 	test_iov_write(dev, blob, channel, false);
6832 	test_iov_read(dev, blob, channel, false);
6833 	test_io_zeroes(dev, blob, channel);
6834 
6835 	test_iov_write(dev, blob, channel, true);
6836 	test_iov_read(dev, blob, channel, true);
6837 
6838 	test_io_unmap(dev, blob, channel);
6839 
6840 	spdk_blob_close(blob, blob_op_complete, NULL);
6841 	poll_threads();
6842 	CU_ASSERT(g_bserrno == 0);
6843 	blob = NULL;
6844 	g_blob = NULL;
6845 
6846 	/* Create thin provisioned blob */
6847 
6848 	ut_spdk_blob_opts_init(&opts);
6849 	opts.thin_provision = true;
6850 	opts.num_clusters = 32;
6851 
6852 	blob = ut_blob_create_and_open(bs, &opts);
6853 	blobid = spdk_blob_get_id(blob);
6854 
6855 	test_io_write(dev, blob, channel);
6856 	test_io_read(dev, blob, channel);
6857 	test_io_zeroes(dev, blob, channel);
6858 
6859 	test_iov_write(dev, blob, channel, false);
6860 	test_iov_read(dev, blob, channel, false);
6861 	test_io_zeroes(dev, blob, channel);
6862 
6863 	test_iov_write(dev, blob, channel, true);
6864 	test_iov_read(dev, blob, channel, true);
6865 
6866 	/* Create snapshot */
6867 
6868 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6869 	poll_threads();
6870 	CU_ASSERT(g_bserrno == 0);
6871 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6872 	blobid = g_blobid;
6873 
6874 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6875 	poll_threads();
6876 	CU_ASSERT(g_bserrno == 0);
6877 	CU_ASSERT(g_blob != NULL);
6878 	snapshot = g_blob;
6879 
6880 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6881 	poll_threads();
6882 	CU_ASSERT(g_bserrno == 0);
6883 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6884 	blobid = g_blobid;
6885 
6886 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6887 	poll_threads();
6888 	CU_ASSERT(g_bserrno == 0);
6889 	CU_ASSERT(g_blob != NULL);
6890 	clone = g_blob;
6891 
6892 	test_io_read(dev, blob, channel);
6893 	test_io_read(dev, snapshot, channel);
6894 	test_io_read(dev, clone, channel);
6895 
6896 	test_iov_read(dev, blob, channel, false);
6897 	test_iov_read(dev, snapshot, channel, false);
6898 	test_iov_read(dev, clone, channel, false);
6899 
6900 	test_iov_read(dev, blob, channel, true);
6901 	test_iov_read(dev, snapshot, channel, true);
6902 	test_iov_read(dev, clone, channel, true);
6903 
6904 	/* Inflate clone */
6905 
6906 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6907 	poll_threads();
6908 
6909 	CU_ASSERT(g_bserrno == 0);
6910 
6911 	test_io_read(dev, clone, channel);
6912 
6913 	test_io_unmap(dev, clone, channel);
6914 
6915 	test_iov_write(dev, clone, channel, false);
6916 	test_iov_read(dev, clone, channel, false);
6917 	test_io_unmap(dev, clone, channel);
6918 
6919 	test_iov_write(dev, clone, channel, true);
6920 	test_iov_read(dev, clone, channel, true);
6921 
6922 	spdk_blob_close(blob, blob_op_complete, NULL);
6923 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6924 	spdk_blob_close(clone, blob_op_complete, NULL);
6925 	poll_threads();
6926 	CU_ASSERT(g_bserrno == 0);
6927 	blob = NULL;
6928 	g_blob = NULL;
6929 
6930 	spdk_bs_free_io_channel(channel);
6931 	poll_threads();
6932 
6933 	/* Unload the blob store */
6934 	spdk_bs_unload(bs, bs_op_complete, NULL);
6935 	poll_threads();
6936 	CU_ASSERT(g_bserrno == 0);
6937 	g_bs = NULL;
6938 	g_blob = NULL;
6939 	g_blobid = 0;
6940 }
6941 
6942 static void
6943 blob_io_unit_compatibility(void)
6944 {
6945 	struct spdk_bs_opts bsopts;
6946 	struct spdk_blob_store *bs;
6947 	struct spdk_bs_dev *dev;
6948 	struct spdk_bs_super_block *super;
6949 
6950 	/* Create dev with 512 bytes io unit size */
6951 
6952 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6953 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6954 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6955 
6956 	/* Try to initialize a new blob store with unsupported io_unit */
6957 	dev = init_dev();
6958 	dev->blocklen = 512;
6959 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6960 
6961 	/* Initialize a new blob store */
6962 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6963 	poll_threads();
6964 	CU_ASSERT(g_bserrno == 0);
6965 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6966 	bs = g_bs;
6967 
6968 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6969 
6970 	/* Unload the blob store */
6971 	spdk_bs_unload(bs, bs_op_complete, NULL);
6972 	poll_threads();
6973 	CU_ASSERT(g_bserrno == 0);
6974 
6975 	/* Modify super block to behave like older version.
6976 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6977 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6978 	super->io_unit_size = 0;
6979 	super->crc = blob_md_page_calc_crc(super);
6980 
6981 	dev = init_dev();
6982 	dev->blocklen = 512;
6983 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6984 
6985 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6986 	poll_threads();
6987 	CU_ASSERT(g_bserrno == 0);
6988 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6989 	bs = g_bs;
6990 
6991 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6992 
6993 	/* Unload the blob store */
6994 	spdk_bs_unload(bs, bs_op_complete, NULL);
6995 	poll_threads();
6996 	CU_ASSERT(g_bserrno == 0);
6997 
6998 	g_bs = NULL;
6999 	g_blob = NULL;
7000 	g_blobid = 0;
7001 }
7002 
7003 static void
7004 first_sync_complete(void *cb_arg, int bserrno)
7005 {
7006 	struct spdk_blob *blob = cb_arg;
7007 	int rc;
7008 
7009 	CU_ASSERT(bserrno == 0);
7010 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7011 	CU_ASSERT(rc == 0);
7012 	CU_ASSERT(g_bserrno == -1);
7013 
7014 	/* Keep g_bserrno at -1, only the
7015 	 * second sync completion should set it at 0. */
7016 }
7017 
7018 static void
7019 second_sync_complete(void *cb_arg, int bserrno)
7020 {
7021 	struct spdk_blob *blob = cb_arg;
7022 	const void *value;
7023 	size_t value_len;
7024 	int rc;
7025 
7026 	CU_ASSERT(bserrno == 0);
7027 
7028 	/* Verify that the first sync completion had a chance to execute */
7029 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7030 	CU_ASSERT(rc == 0);
7031 	SPDK_CU_ASSERT_FATAL(value != NULL);
7032 	CU_ASSERT(value_len == strlen("second") + 1);
7033 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7034 
7035 	CU_ASSERT(g_bserrno == -1);
7036 	g_bserrno = bserrno;
7037 }
7038 
7039 static void
7040 blob_simultaneous_operations(void)
7041 {
7042 	struct spdk_blob_store *bs = g_bs;
7043 	struct spdk_blob_opts opts;
7044 	struct spdk_blob *blob, *snapshot;
7045 	spdk_blob_id blobid, snapshotid;
7046 	struct spdk_io_channel *channel;
7047 	int rc;
7048 
7049 	channel = spdk_bs_alloc_io_channel(bs);
7050 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7051 
7052 	ut_spdk_blob_opts_init(&opts);
7053 	opts.num_clusters = 10;
7054 
7055 	blob = ut_blob_create_and_open(bs, &opts);
7056 	blobid = spdk_blob_get_id(blob);
7057 
7058 	/* Create snapshot and try to remove blob in the same time:
7059 	 * - snapshot should be created successfully
7060 	 * - delete operation should fail w -EBUSY */
7061 	CU_ASSERT(blob->locked_operation_in_progress == false);
7062 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7063 	CU_ASSERT(blob->locked_operation_in_progress == true);
7064 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7065 	CU_ASSERT(blob->locked_operation_in_progress == true);
7066 	/* Deletion failure */
7067 	CU_ASSERT(g_bserrno == -EBUSY);
7068 	poll_threads();
7069 	CU_ASSERT(blob->locked_operation_in_progress == false);
7070 	/* Snapshot creation success */
7071 	CU_ASSERT(g_bserrno == 0);
7072 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7073 
7074 	snapshotid = g_blobid;
7075 
7076 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7077 	poll_threads();
7078 	CU_ASSERT(g_bserrno == 0);
7079 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7080 	snapshot = g_blob;
7081 
7082 	/* Inflate blob and try to remove blob in the same time:
7083 	 * - blob should be inflated successfully
7084 	 * - delete operation should fail w -EBUSY */
7085 	CU_ASSERT(blob->locked_operation_in_progress == false);
7086 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7087 	CU_ASSERT(blob->locked_operation_in_progress == true);
7088 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7089 	CU_ASSERT(blob->locked_operation_in_progress == true);
7090 	/* Deletion failure */
7091 	CU_ASSERT(g_bserrno == -EBUSY);
7092 	poll_threads();
7093 	CU_ASSERT(blob->locked_operation_in_progress == false);
7094 	/* Inflation success */
7095 	CU_ASSERT(g_bserrno == 0);
7096 
7097 	/* Clone snapshot and try to remove snapshot in the same time:
7098 	 * - snapshot should be cloned successfully
7099 	 * - delete operation should fail w -EBUSY */
7100 	CU_ASSERT(blob->locked_operation_in_progress == false);
7101 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7102 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7103 	/* Deletion failure */
7104 	CU_ASSERT(g_bserrno == -EBUSY);
7105 	poll_threads();
7106 	CU_ASSERT(blob->locked_operation_in_progress == false);
7107 	/* Clone created */
7108 	CU_ASSERT(g_bserrno == 0);
7109 
7110 	/* Resize blob and try to remove blob in the same time:
7111 	 * - blob should be resized successfully
7112 	 * - delete operation should fail w -EBUSY */
7113 	CU_ASSERT(blob->locked_operation_in_progress == false);
7114 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7115 	CU_ASSERT(blob->locked_operation_in_progress == true);
7116 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7117 	CU_ASSERT(blob->locked_operation_in_progress == true);
7118 	/* Deletion failure */
7119 	CU_ASSERT(g_bserrno == -EBUSY);
7120 	poll_threads();
7121 	CU_ASSERT(blob->locked_operation_in_progress == false);
7122 	/* Blob resized successfully */
7123 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7124 	poll_threads();
7125 	CU_ASSERT(g_bserrno == 0);
7126 
7127 	/* Issue two consecutive blob syncs, neither should fail.
7128 	 * Force sync to actually occur by marking blob dirty each time.
7129 	 * Execution of sync should not be enough to complete the operation,
7130 	 * since disk I/O is required to complete it. */
7131 	g_bserrno = -1;
7132 
7133 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7134 	CU_ASSERT(rc == 0);
7135 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7136 	CU_ASSERT(g_bserrno == -1);
7137 
7138 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7139 	CU_ASSERT(g_bserrno == -1);
7140 
7141 	poll_threads();
7142 	CU_ASSERT(g_bserrno == 0);
7143 
7144 	spdk_bs_free_io_channel(channel);
7145 	poll_threads();
7146 
7147 	ut_blob_close_and_delete(bs, snapshot);
7148 	ut_blob_close_and_delete(bs, blob);
7149 }
7150 
7151 static void
7152 blob_persist_test(void)
7153 {
7154 	struct spdk_blob_store *bs = g_bs;
7155 	struct spdk_blob_opts opts;
7156 	struct spdk_blob *blob;
7157 	spdk_blob_id blobid;
7158 	struct spdk_io_channel *channel;
7159 	char *xattr;
7160 	size_t xattr_length;
7161 	int rc;
7162 	uint32_t page_count_clear, page_count_xattr;
7163 	uint64_t poller_iterations;
7164 	bool run_poller;
7165 
7166 	channel = spdk_bs_alloc_io_channel(bs);
7167 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7168 
7169 	ut_spdk_blob_opts_init(&opts);
7170 	opts.num_clusters = 10;
7171 
7172 	blob = ut_blob_create_and_open(bs, &opts);
7173 	blobid = spdk_blob_get_id(blob);
7174 
7175 	/* Save the amount of md pages used after creation of a blob.
7176 	 * This should be consistent after removing xattr. */
7177 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7178 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7179 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7180 
7181 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7182 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7183 		       strlen("large_xattr");
7184 	xattr = calloc(xattr_length, sizeof(char));
7185 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7186 
7187 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7188 	SPDK_CU_ASSERT_FATAL(rc == 0);
7189 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7190 	poll_threads();
7191 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7192 
7193 	/* Save the amount of md pages used after adding the large xattr */
7194 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7195 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7196 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7197 
7198 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7199 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7200 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7201 	poller_iterations = 1;
7202 	run_poller = true;
7203 	while (run_poller) {
7204 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7205 		SPDK_CU_ASSERT_FATAL(rc == 0);
7206 		g_bserrno = -1;
7207 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7208 		poll_thread_times(0, poller_iterations);
7209 		if (g_bserrno == 0) {
7210 			/* Poller iteration count was high enough for first sync to complete.
7211 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7212 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7213 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7214 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7215 			run_poller = false;
7216 		}
7217 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7218 		SPDK_CU_ASSERT_FATAL(rc == 0);
7219 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7220 		poll_threads();
7221 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7222 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7223 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7224 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7225 
7226 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7227 		spdk_blob_close(blob, blob_op_complete, NULL);
7228 		poll_threads();
7229 		CU_ASSERT(g_bserrno == 0);
7230 
7231 		ut_bs_reload(&bs, NULL);
7232 
7233 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7234 		poll_threads();
7235 		CU_ASSERT(g_bserrno == 0);
7236 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7237 		blob = g_blob;
7238 
7239 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7240 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7241 
7242 		poller_iterations++;
7243 		/* Stop at high iteration count to prevent infinite loop.
7244 		 * This value should be enough for first md sync to complete in any case. */
7245 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7246 	}
7247 
7248 	free(xattr);
7249 
7250 	ut_blob_close_and_delete(bs, blob);
7251 
7252 	spdk_bs_free_io_channel(channel);
7253 	poll_threads();
7254 }
7255 
7256 static void
7257 blob_decouple_snapshot(void)
7258 {
7259 	struct spdk_blob_store *bs = g_bs;
7260 	struct spdk_blob_opts opts;
7261 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7262 	struct spdk_io_channel *channel;
7263 	spdk_blob_id blobid, snapshotid;
7264 	uint64_t cluster;
7265 
7266 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7267 		channel = spdk_bs_alloc_io_channel(bs);
7268 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7269 
7270 		ut_spdk_blob_opts_init(&opts);
7271 		opts.num_clusters = 10;
7272 		opts.thin_provision = false;
7273 
7274 		blob = ut_blob_create_and_open(bs, &opts);
7275 		blobid = spdk_blob_get_id(blob);
7276 
7277 		/* Create first snapshot */
7278 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7279 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7280 		poll_threads();
7281 		CU_ASSERT(g_bserrno == 0);
7282 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7283 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7284 		snapshotid = g_blobid;
7285 
7286 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7287 		poll_threads();
7288 		CU_ASSERT(g_bserrno == 0);
7289 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7290 		snapshot1 = g_blob;
7291 
7292 		/* Create the second one */
7293 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7294 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7295 		poll_threads();
7296 		CU_ASSERT(g_bserrno == 0);
7297 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7298 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7299 		snapshotid = g_blobid;
7300 
7301 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7302 		poll_threads();
7303 		CU_ASSERT(g_bserrno == 0);
7304 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7305 		snapshot2 = g_blob;
7306 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7307 
7308 		/* Now decouple the second snapshot forcing it to copy the written clusters */
7309 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7310 		poll_threads();
7311 		CU_ASSERT(g_bserrno == 0);
7312 
7313 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7314 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7315 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7316 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7317 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7318 					    snapshot1->active.clusters[cluster]);
7319 		}
7320 
7321 		spdk_bs_free_io_channel(channel);
7322 
7323 		if (delete_snapshot_first) {
7324 			ut_blob_close_and_delete(bs, snapshot2);
7325 			ut_blob_close_and_delete(bs, snapshot1);
7326 			ut_blob_close_and_delete(bs, blob);
7327 		} else {
7328 			ut_blob_close_and_delete(bs, blob);
7329 			ut_blob_close_and_delete(bs, snapshot2);
7330 			ut_blob_close_and_delete(bs, snapshot1);
7331 		}
7332 		poll_threads();
7333 	}
7334 }
7335 
7336 static void
7337 blob_seek_io_unit(void)
7338 {
7339 	struct spdk_blob_store *bs = g_bs;
7340 	struct spdk_blob *blob;
7341 	struct spdk_io_channel *channel;
7342 	struct spdk_blob_opts opts;
7343 	uint64_t free_clusters;
7344 	uint8_t payload[10 * 4096];
7345 	uint64_t offset;
7346 	uint64_t io_unit, io_units_per_cluster;
7347 
7348 	free_clusters = spdk_bs_free_cluster_count(bs);
7349 
7350 	channel = spdk_bs_alloc_io_channel(bs);
7351 	CU_ASSERT(channel != NULL);
7352 
7353 	/* Set blob as thin provisioned */
7354 	ut_spdk_blob_opts_init(&opts);
7355 	opts.thin_provision = true;
7356 
7357 	/* Create a blob */
7358 	blob = ut_blob_create_and_open(bs, &opts);
7359 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7360 
7361 	io_units_per_cluster = bs_io_units_per_cluster(blob);
7362 
7363 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
7364 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
7365 	poll_threads();
7366 	CU_ASSERT(g_bserrno == 0);
7367 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
7368 	CU_ASSERT(blob->active.num_clusters == 5);
7369 
7370 	/* Write at the beginning of first cluster */
7371 	offset = 0;
7372 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7373 	poll_threads();
7374 	CU_ASSERT(g_bserrno == 0);
7375 
7376 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
7377 	CU_ASSERT(io_unit == offset);
7378 
7379 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
7380 	CU_ASSERT(io_unit == io_units_per_cluster);
7381 
7382 	/* Write in the middle of third cluster */
7383 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
7384 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7385 	poll_threads();
7386 	CU_ASSERT(g_bserrno == 0);
7387 
7388 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
7389 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
7390 
7391 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
7392 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
7393 
7394 	/* Write at the end of last cluster */
7395 	offset = 5 * io_units_per_cluster - 1;
7396 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
7397 	poll_threads();
7398 	CU_ASSERT(g_bserrno == 0);
7399 
7400 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
7401 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
7402 
7403 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
7404 	CU_ASSERT(io_unit == UINT64_MAX);
7405 
7406 	spdk_bs_free_io_channel(channel);
7407 	poll_threads();
7408 
7409 	ut_blob_close_and_delete(bs, blob);
7410 }
7411 
7412 static void
7413 blob_esnap_create(void)
7414 {
7415 	struct spdk_blob_store	*bs = g_bs;
7416 	struct spdk_bs_opts	bs_opts;
7417 	struct ut_esnap_opts	esnap_opts;
7418 	struct spdk_blob_opts	opts;
7419 	struct spdk_blob_open_opts open_opts;
7420 	struct spdk_blob	*blob;
7421 	uint32_t		cluster_sz, block_sz;
7422 	const uint32_t		esnap_num_clusters = 4;
7423 	uint64_t		esnap_num_blocks;
7424 	uint32_t		sz;
7425 	spdk_blob_id		blobid;
7426 	uint32_t		bs_ctx_count, blob_ctx_count;
7427 
7428 	cluster_sz = spdk_bs_get_cluster_size(bs);
7429 	block_sz = spdk_bs_get_io_unit_size(bs);
7430 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
7431 
7432 	/* Create a normal blob and verify it is not an esnap clone. */
7433 	ut_spdk_blob_opts_init(&opts);
7434 	blob = ut_blob_create_and_open(bs, &opts);
7435 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob));
7436 	ut_blob_close_and_delete(bs, blob);
7437 
7438 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
7439 	ut_spdk_blob_opts_init(&opts);
7440 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7441 	opts.esnap_id = &esnap_opts;
7442 	opts.esnap_id_len = sizeof(esnap_opts);
7443 	opts.num_clusters = esnap_num_clusters;
7444 	blob = ut_blob_create_and_open(bs, &opts);
7445 	SPDK_CU_ASSERT_FATAL(blob != NULL);
7446 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7447 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
7448 	sz = spdk_blob_get_num_clusters(blob);
7449 	CU_ASSERT(sz == esnap_num_clusters);
7450 	ut_blob_close_and_delete(bs, blob);
7451 
7452 	/* Create an esnap clone without the size and verify it can be grown */
7453 	ut_spdk_blob_opts_init(&opts);
7454 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7455 	opts.esnap_id = &esnap_opts;
7456 	opts.esnap_id_len = sizeof(esnap_opts);
7457 	blob = ut_blob_create_and_open(bs, &opts);
7458 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7459 	sz = spdk_blob_get_num_clusters(blob);
7460 	CU_ASSERT(sz == 0);
7461 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
7462 	poll_threads();
7463 	CU_ASSERT(g_bserrno == 0);
7464 	sz = spdk_blob_get_num_clusters(blob);
7465 	CU_ASSERT(sz == 1);
7466 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
7467 	poll_threads();
7468 	CU_ASSERT(g_bserrno == 0);
7469 	sz = spdk_blob_get_num_clusters(blob);
7470 	CU_ASSERT(sz == esnap_num_clusters);
7471 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
7472 	poll_threads();
7473 	CU_ASSERT(g_bserrno == 0);
7474 	sz = spdk_blob_get_num_clusters(blob);
7475 	CU_ASSERT(sz == esnap_num_clusters + 1);
7476 
7477 	/* Reload the blobstore and be sure that the blob can be opened. */
7478 	blobid = spdk_blob_get_id(blob);
7479 	spdk_blob_close(blob, blob_op_complete, NULL);
7480 	poll_threads();
7481 	CU_ASSERT(g_bserrno == 0);
7482 	g_blob = NULL;
7483 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7484 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
7485 	ut_bs_reload(&bs, &bs_opts);
7486 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7487 	poll_threads();
7488 	CU_ASSERT(g_bserrno == 0);
7489 	CU_ASSERT(g_blob != NULL);
7490 	blob = g_blob;
7491 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7492 	sz = spdk_blob_get_num_clusters(blob);
7493 	CU_ASSERT(sz == esnap_num_clusters + 1);
7494 
7495 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
7496 	spdk_blob_close(blob, blob_op_complete, NULL);
7497 	poll_threads();
7498 	CU_ASSERT(g_bserrno == 0);
7499 	g_blob = NULL;
7500 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7501 	ut_bs_reload(&bs, &bs_opts);
7502 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7503 	poll_threads();
7504 	CU_ASSERT(g_bserrno != 0);
7505 	CU_ASSERT(g_blob == NULL);
7506 
7507 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
7508 	bs_ctx_count = 0;
7509 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7510 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
7511 	bs_opts.esnap_ctx = &bs_ctx_count;
7512 	ut_bs_reload(&bs, &bs_opts);
7513 	/* Loading the blobstore triggers the esnap to be loaded */
7514 	CU_ASSERT(bs_ctx_count == 1);
7515 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7516 	poll_threads();
7517 	CU_ASSERT(g_bserrno == 0);
7518 	CU_ASSERT(g_blob != NULL);
7519 	/* Opening the blob also triggers the esnap to be loaded */
7520 	CU_ASSERT(bs_ctx_count == 2);
7521 	blob = g_blob;
7522 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
7523 	sz = spdk_blob_get_num_clusters(blob);
7524 	CU_ASSERT(sz == esnap_num_clusters + 1);
7525 	spdk_blob_close(blob, blob_op_complete, NULL);
7526 	poll_threads();
7527 	CU_ASSERT(g_bserrno == 0);
7528 	g_blob = NULL;
7529 	/* If open_opts.esnap_ctx is set it is passed to the esnap create callback */
7530 	blob_ctx_count = 0;
7531 	spdk_blob_open_opts_init(&open_opts, sizeof(open_opts));
7532 	open_opts.esnap_ctx = &blob_ctx_count;
7533 	spdk_bs_open_blob_ext(bs, blobid, &open_opts, blob_op_with_handle_complete, NULL);
7534 	poll_threads();
7535 	blob = g_blob;
7536 	CU_ASSERT(bs_ctx_count == 3);
7537 	CU_ASSERT(blob_ctx_count == 1);
7538 	spdk_blob_close(blob, blob_op_complete, NULL);
7539 	poll_threads();
7540 	CU_ASSERT(g_bserrno == 0);
7541 	g_blob = NULL;
7542 }
7543 
7544 static bool
7545 blob_esnap_verify_contents(struct spdk_blob *blob, struct spdk_io_channel *ch,
7546 			   uint64_t offset, uint64_t size, uint32_t readsize, const char *how)
7547 {
7548 	const uint32_t	bs_blksz = blob->bs->io_unit_size;
7549 	const uint32_t	esnap_blksz = blob->back_bs_dev->blocklen;
7550 	const uint32_t	start_blk = offset / bs_blksz;
7551 	const uint32_t	num_blocks = spdk_max(size, readsize) / bs_blksz;
7552 	const uint32_t	blocks_per_read = spdk_min(size, readsize) / bs_blksz;
7553 	uint32_t	blob_block;
7554 	struct iovec	iov;
7555 	uint8_t		buf[spdk_min(size, readsize)];
7556 	bool		block_ok;
7557 
7558 	SPDK_CU_ASSERT_FATAL(offset % bs_blksz == 0);
7559 	SPDK_CU_ASSERT_FATAL(size % bs_blksz == 0);
7560 	SPDK_CU_ASSERT_FATAL(readsize % bs_blksz == 0);
7561 
7562 	memset(buf, 0, readsize);
7563 	iov.iov_base = buf;
7564 	iov.iov_len = readsize;
7565 	for (blob_block = start_blk; blob_block < num_blocks; blob_block += blocks_per_read) {
7566 		if (strcmp(how, "read") == 0) {
7567 			spdk_blob_io_read(blob, ch, buf, blob_block, blocks_per_read,
7568 					  bs_op_complete, NULL);
7569 		} else if (strcmp(how, "readv") == 0) {
7570 			spdk_blob_io_readv(blob, ch, &iov, 1, blob_block, blocks_per_read,
7571 					   bs_op_complete, NULL);
7572 		} else if (strcmp(how, "readv_ext") == 0) {
7573 			/*
7574 			 * This is currently pointless. NULL ext_opts leads to dev->readv(), not
7575 			 * dev->readv_ext().
7576 			 */
7577 			spdk_blob_io_readv_ext(blob, ch, &iov, 1, blob_block, blocks_per_read,
7578 					       bs_op_complete, NULL, NULL);
7579 		} else {
7580 			abort();
7581 		}
7582 		poll_threads();
7583 		CU_ASSERT(g_bserrno == 0);
7584 		if (g_bserrno != 0) {
7585 			return false;
7586 		}
7587 		block_ok = ut_esnap_content_is_correct(buf, blocks_per_read * bs_blksz, blob->id,
7588 						       blob_block * bs_blksz, esnap_blksz);
7589 		CU_ASSERT(block_ok);
7590 		if (!block_ok) {
7591 			return false;
7592 		}
7593 	}
7594 
7595 	return true;
7596 }
7597 
7598 static void
7599 blob_esnap_io_size(uint32_t bs_blksz, uint32_t esnap_blksz)
7600 {
7601 	struct spdk_bs_dev	*dev;
7602 	struct spdk_blob_store	*bs;
7603 	struct spdk_bs_opts	bsopts;
7604 	struct spdk_blob_opts	opts;
7605 	struct ut_esnap_opts	esnap_opts;
7606 	struct spdk_blob	*blob;
7607 	const uint32_t		cluster_sz = 16 * 1024;
7608 	const uint64_t		esnap_num_clusters = 4;
7609 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
7610 	const uint64_t		esnap_num_blocks = esnap_sz / esnap_blksz;
7611 	const uint64_t		blob_num_blocks = esnap_sz / bs_blksz;
7612 	uint32_t		block;
7613 	struct spdk_io_channel	*bs_ch;
7614 
7615 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7616 	bsopts.cluster_sz = cluster_sz;
7617 	bsopts.esnap_bs_dev_create = ut_esnap_create;
7618 
7619 	/* Create device with desired block size */
7620 	dev = init_dev();
7621 	dev->blocklen = bs_blksz;
7622 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
7623 
7624 	/* Initialize a new blob store */
7625 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7626 	poll_threads();
7627 	CU_ASSERT(g_bserrno == 0);
7628 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7629 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
7630 	bs = g_bs;
7631 
7632 	bs_ch = spdk_bs_alloc_io_channel(bs);
7633 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
7634 
7635 	/* Create and open the esnap clone  */
7636 	ut_spdk_blob_opts_init(&opts);
7637 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7638 	opts.esnap_id = &esnap_opts;
7639 	opts.esnap_id_len = sizeof(esnap_opts);
7640 	opts.num_clusters = esnap_num_clusters;
7641 	blob = ut_blob_create_and_open(bs, &opts);
7642 	SPDK_CU_ASSERT_FATAL(blob != NULL);
7643 
7644 	/* Verify that large reads return the content of the esnap device */
7645 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "read"));
7646 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv"));
7647 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv_ext"));
7648 	/* Verify that small reads return the content of the esnap device */
7649 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "read"));
7650 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv"));
7651 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv_ext"));
7652 
7653 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
7654 	for (block = 0; block < blob_num_blocks; block++) {
7655 		char		buf[bs_blksz];
7656 		union ut_word	word;
7657 
7658 		word.f.blob_id = 0xfedcba90;
7659 		word.f.lba = block;
7660 		ut_memset8(buf, word.num, bs_blksz);
7661 
7662 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
7663 		poll_threads();
7664 		CU_ASSERT(g_bserrno == 0);
7665 		if (g_bserrno != 0) {
7666 			break;
7667 		}
7668 
7669 		/* Read and verify the block before the current block */
7670 		if (block != 0) {
7671 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
7672 			poll_threads();
7673 			CU_ASSERT(g_bserrno == 0);
7674 			if (g_bserrno != 0) {
7675 				break;
7676 			}
7677 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
7678 							      (block - 1) * bs_blksz, bs_blksz));
7679 		}
7680 
7681 		/* Read and verify the current block */
7682 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
7683 		poll_threads();
7684 		CU_ASSERT(g_bserrno == 0);
7685 		if (g_bserrno != 0) {
7686 			break;
7687 		}
7688 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
7689 						      block * bs_blksz, bs_blksz));
7690 
7691 		/* Check the block that follows */
7692 		if (block + 1 < blob_num_blocks) {
7693 			g_bserrno = 0xbad;
7694 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
7695 			poll_threads();
7696 			CU_ASSERT(g_bserrno == 0);
7697 			if (g_bserrno != 0) {
7698 				break;
7699 			}
7700 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
7701 							      (block + 1) * bs_blksz,
7702 							      esnap_blksz));
7703 		}
7704 	}
7705 
7706 	/* Clean up */
7707 	spdk_bs_free_io_channel(bs_ch);
7708 	g_bserrno = 0xbad;
7709 	spdk_blob_close(blob, blob_op_complete, NULL);
7710 	poll_threads();
7711 	CU_ASSERT(g_bserrno == 0);
7712 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
7713 	poll_threads();
7714 	CU_ASSERT(g_bserrno == 0);
7715 	g_bs = NULL;
7716 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7717 }
7718 
7719 static void
7720 blob_esnap_io_4096_4096(void)
7721 {
7722 	blob_esnap_io_size(4096, 4096);
7723 }
7724 
7725 static void
7726 blob_esnap_io_512_512(void)
7727 {
7728 	blob_esnap_io_size(512, 512);
7729 }
7730 
7731 static void
7732 blob_esnap_io_4096_512(void)
7733 {
7734 	blob_esnap_io_size(4096, 512);
7735 }
7736 
7737 static void
7738 blob_esnap_io_512_4096(void)
7739 {
7740 	struct spdk_bs_dev	*dev;
7741 	struct spdk_blob_store	*bs;
7742 	struct spdk_bs_opts	bs_opts;
7743 	struct spdk_blob_opts	blob_opts;
7744 	struct ut_esnap_opts	esnap_opts;
7745 	uint64_t		cluster_sz = 16 * 1024;
7746 	uint32_t		bs_blksz = 512;
7747 	uint32_t		esnap_blksz = 4096;
7748 	uint64_t		esnap_num_blocks = 64;
7749 	spdk_blob_id		blobid;
7750 
7751 	/* Create device with desired block size */
7752 	dev = init_dev();
7753 	dev->blocklen = bs_blksz;
7754 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
7755 
7756 	/* Initialize a new blob store */
7757 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7758 	bs_opts.cluster_sz = cluster_sz;
7759 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
7760 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
7761 	poll_threads();
7762 	CU_ASSERT(g_bserrno == 0);
7763 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7764 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
7765 	bs = g_bs;
7766 
7767 	/* Try to create and open the esnap clone. Create should succeed, open should fail. */
7768 	ut_spdk_blob_opts_init(&blob_opts);
7769 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
7770 	blob_opts.esnap_id = &esnap_opts;
7771 	blob_opts.esnap_id_len = sizeof(esnap_opts);
7772 	blob_opts.num_clusters = esnap_num_blocks * esnap_blksz / bs_blksz;
7773 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
7774 	poll_threads();
7775 	CU_ASSERT(g_bserrno == 0);
7776 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7777 	blobid = g_blobid;
7778 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7779 	poll_threads();
7780 	CU_ASSERT(g_bserrno == -EINVAL);
7781 	CU_ASSERT(g_blob == NULL);
7782 
7783 	/* Clean up */
7784 	spdk_bs_unload(bs, bs_op_complete, NULL);
7785 	poll_threads();
7786 	CU_ASSERT(g_bserrno == 0);
7787 	g_bs = NULL;
7788 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7789 }
7790 
7791 static void
7792 blob_esnap_thread_add_remove(void)
7793 {
7794 	struct spdk_blob_store	*bs = g_bs;
7795 	struct spdk_blob_opts	opts;
7796 	struct ut_esnap_opts	ut_esnap_opts;
7797 	struct spdk_blob	*blob;
7798 	struct ut_esnap_dev	*ut_dev;
7799 	spdk_blob_id		blobid;
7800 	uint64_t		start_thread = g_ut_thread_id;
7801 	bool			destroyed = false;
7802 	struct spdk_io_channel	*ch0, *ch1;
7803 	struct ut_esnap_channel	*ut_ch0, *ut_ch1;
7804 	const uint32_t		blocklen = bs->io_unit_size;
7805 	char			buf[blocklen * 4];
7806 
7807 	SPDK_CU_ASSERT_FATAL(g_ut_num_threads > 1);
7808 	set_thread(0);
7809 
7810 	/* Create the esnap clone */
7811 	ut_esnap_opts_init(blocklen, 2048, "add_remove_1", &destroyed, &ut_esnap_opts);
7812 	ut_spdk_blob_opts_init(&opts);
7813 	opts.esnap_id = &ut_esnap_opts;
7814 	opts.esnap_id_len = sizeof(ut_esnap_opts);
7815 	opts.num_clusters = 10;
7816 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
7817 	poll_threads();
7818 	CU_ASSERT(g_bserrno == 0);
7819 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7820 	blobid = g_blobid;
7821 
7822 	/* Open the blob. No channels should be allocated yet. */
7823 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7824 	poll_threads();
7825 	CU_ASSERT(g_bserrno == 0);
7826 	CU_ASSERT(g_blob != NULL);
7827 	blob = g_blob;
7828 	ut_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
7829 	CU_ASSERT(ut_dev != NULL);
7830 	CU_ASSERT(ut_dev->num_channels == 0);
7831 
7832 	/* Create a channel on thread 0. It is lazily created on the first read. */
7833 	ch0 = spdk_bs_alloc_io_channel(bs);
7834 	CU_ASSERT(ch0 != NULL);
7835 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
7836 	CU_ASSERT(ut_ch0 == NULL);
7837 	CU_ASSERT(ut_dev->num_channels == 0);
7838 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
7839 	poll_threads();
7840 	CU_ASSERT(g_bserrno == 0);
7841 	CU_ASSERT(ut_dev->num_channels == 1);
7842 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
7843 	CU_ASSERT(ut_ch0 != NULL);
7844 	CU_ASSERT(ut_ch0->blocks_read == 1);
7845 
7846 	/* Create a channel on thread 1 and verify its lazy creation too. */
7847 	set_thread(1);
7848 	ch1 = spdk_bs_alloc_io_channel(bs);
7849 	CU_ASSERT(ch1 != NULL);
7850 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
7851 	CU_ASSERT(ut_ch1 == NULL);
7852 	CU_ASSERT(ut_dev->num_channels == 1);
7853 	spdk_blob_io_read(blob, ch1, buf, 0, 4, bs_op_complete, NULL);
7854 	poll_threads();
7855 	CU_ASSERT(g_bserrno == 0);
7856 	CU_ASSERT(ut_dev->num_channels == 2);
7857 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
7858 	CU_ASSERT(ut_ch1 != NULL);
7859 	CU_ASSERT(ut_ch1->blocks_read == 4);
7860 
7861 	/* Close the channel on thread 0 and verify the bs_dev channel is also gone. */
7862 	set_thread(0);
7863 	spdk_bs_free_io_channel(ch0);
7864 	poll_threads();
7865 	CU_ASSERT(ut_dev->num_channels == 1);
7866 
7867 	/* Close the blob. There is no outstanding IO so it should close right away. */
7868 	g_bserrno = 0xbad;
7869 	spdk_blob_close(blob, blob_op_complete, NULL);
7870 	poll_threads();
7871 	CU_ASSERT(g_bserrno == 0);
7872 	CU_ASSERT(destroyed);
7873 
7874 	/* The esnap channel for the blob should be gone now too. */
7875 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
7876 	CU_ASSERT(ut_ch1 == NULL);
7877 
7878 	/* Clean up */
7879 	set_thread(1);
7880 	spdk_bs_free_io_channel(ch1);
7881 	set_thread(start_thread);
7882 }
7883 
7884 static void
7885 freeze_done(void *cb_arg, int bserrno)
7886 {
7887 	uint32_t *freeze_cnt = cb_arg;
7888 
7889 	CU_ASSERT(bserrno == 0);
7890 	(*freeze_cnt)++;
7891 }
7892 
7893 static void
7894 unfreeze_done(void *cb_arg, int bserrno)
7895 {
7896 	uint32_t *unfreeze_cnt = cb_arg;
7897 
7898 	CU_ASSERT(bserrno == 0);
7899 	(*unfreeze_cnt)++;
7900 }
7901 
7902 static void
7903 blob_nested_freezes(void)
7904 {
7905 	struct spdk_blob_store *bs = g_bs;
7906 	struct spdk_blob *blob;
7907 	struct spdk_io_channel *channel[2];
7908 	struct spdk_blob_opts opts;
7909 	uint32_t freeze_cnt, unfreeze_cnt;
7910 	int i;
7911 
7912 	for (i = 0; i < 2; i++) {
7913 		set_thread(i);
7914 		channel[i] = spdk_bs_alloc_io_channel(bs);
7915 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
7916 	}
7917 
7918 	set_thread(0);
7919 
7920 	ut_spdk_blob_opts_init(&opts);
7921 	blob = ut_blob_create_and_open(bs, &opts);
7922 
7923 	/* First just test a single freeze/unfreeze. */
7924 	freeze_cnt = 0;
7925 	unfreeze_cnt = 0;
7926 	CU_ASSERT(blob->frozen_refcnt == 0);
7927 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
7928 	CU_ASSERT(blob->frozen_refcnt == 1);
7929 	CU_ASSERT(freeze_cnt == 0);
7930 	poll_threads();
7931 	CU_ASSERT(freeze_cnt == 1);
7932 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
7933 	CU_ASSERT(blob->frozen_refcnt == 0);
7934 	CU_ASSERT(unfreeze_cnt == 0);
7935 	poll_threads();
7936 	CU_ASSERT(unfreeze_cnt == 1);
7937 
7938 	/* Now nest multiple freeze/unfreeze operations.  We should
7939 	 * expect a callback for each operation, but only after
7940 	 * the threads have been polled to ensure a for_each_channel()
7941 	 * was executed.
7942 	 */
7943 	freeze_cnt = 0;
7944 	unfreeze_cnt = 0;
7945 	CU_ASSERT(blob->frozen_refcnt == 0);
7946 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
7947 	CU_ASSERT(blob->frozen_refcnt == 1);
7948 	CU_ASSERT(freeze_cnt == 0);
7949 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
7950 	CU_ASSERT(blob->frozen_refcnt == 2);
7951 	CU_ASSERT(freeze_cnt == 0);
7952 	poll_threads();
7953 	CU_ASSERT(freeze_cnt == 2);
7954 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
7955 	CU_ASSERT(blob->frozen_refcnt == 1);
7956 	CU_ASSERT(unfreeze_cnt == 0);
7957 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
7958 	CU_ASSERT(blob->frozen_refcnt == 0);
7959 	CU_ASSERT(unfreeze_cnt == 0);
7960 	poll_threads();
7961 	CU_ASSERT(unfreeze_cnt == 2);
7962 
7963 	for (i = 0; i < 2; i++) {
7964 		set_thread(i);
7965 		spdk_bs_free_io_channel(channel[i]);
7966 	}
7967 	set_thread(0);
7968 	ut_blob_close_and_delete(bs, blob);
7969 
7970 	poll_threads();
7971 	g_blob = NULL;
7972 	g_blobid = 0;
7973 }
7974 
7975 static void
7976 blob_ext_md_pages(void)
7977 {
7978 	struct spdk_blob_store *bs;
7979 	struct spdk_bs_dev *dev;
7980 	struct spdk_blob *blob;
7981 	struct spdk_blob_opts opts;
7982 	struct spdk_bs_opts bs_opts;
7983 	uint64_t free_clusters;
7984 
7985 	dev = init_dev();
7986 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
7987 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
7988 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
7989 	 * It requires num_md_pages that is much smaller than the number of clusters.
7990 	 * Make sure we can create a blob that uses all of the free clusters.
7991 	 */
7992 	bs_opts.cluster_sz = 65536;
7993 	bs_opts.num_md_pages = 16;
7994 
7995 	/* Initialize a new blob store */
7996 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
7997 	poll_threads();
7998 	CU_ASSERT(g_bserrno == 0);
7999 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8000 	bs = g_bs;
8001 
8002 	free_clusters = spdk_bs_free_cluster_count(bs);
8003 
8004 	ut_spdk_blob_opts_init(&opts);
8005 	opts.num_clusters = free_clusters;
8006 
8007 	blob = ut_blob_create_and_open(bs, &opts);
8008 	spdk_blob_close(blob, blob_op_complete, NULL);
8009 	CU_ASSERT(g_bserrno == 0);
8010 
8011 	spdk_bs_unload(bs, bs_op_complete, NULL);
8012 	poll_threads();
8013 	CU_ASSERT(g_bserrno == 0);
8014 	g_bs = NULL;
8015 }
8016 
8017 static void
8018 suite_bs_setup(void)
8019 {
8020 	struct spdk_bs_dev *dev;
8021 
8022 	dev = init_dev();
8023 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8024 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
8025 	poll_threads();
8026 	CU_ASSERT(g_bserrno == 0);
8027 	CU_ASSERT(g_bs != NULL);
8028 }
8029 
8030 static void
8031 suite_esnap_bs_setup(void)
8032 {
8033 	struct spdk_bs_dev	*dev;
8034 	struct spdk_bs_opts	bs_opts;
8035 
8036 	dev = init_dev();
8037 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8038 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8039 	bs_opts.cluster_sz = 16 * 1024;
8040 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8041 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8042 	poll_threads();
8043 	CU_ASSERT(g_bserrno == 0);
8044 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8045 }
8046 
8047 static void
8048 suite_bs_cleanup(void)
8049 {
8050 	if (g_bs != NULL) {
8051 		spdk_bs_unload(g_bs, bs_op_complete, NULL);
8052 		poll_threads();
8053 		CU_ASSERT(g_bserrno == 0);
8054 		g_bs = NULL;
8055 	}
8056 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8057 }
8058 
8059 static struct spdk_blob *
8060 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
8061 {
8062 	struct spdk_blob *blob;
8063 	struct spdk_blob_opts create_blob_opts;
8064 	spdk_blob_id blobid;
8065 
8066 	if (blob_opts == NULL) {
8067 		ut_spdk_blob_opts_init(&create_blob_opts);
8068 		blob_opts = &create_blob_opts;
8069 	}
8070 
8071 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
8072 	poll_threads();
8073 	CU_ASSERT(g_bserrno == 0);
8074 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8075 	blobid = g_blobid;
8076 	g_blobid = -1;
8077 
8078 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8079 	poll_threads();
8080 	CU_ASSERT(g_bserrno == 0);
8081 	CU_ASSERT(g_blob != NULL);
8082 	blob = g_blob;
8083 
8084 	g_blob = NULL;
8085 	g_bserrno = -1;
8086 
8087 	return blob;
8088 }
8089 
8090 static void
8091 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
8092 {
8093 	spdk_blob_id blobid = spdk_blob_get_id(blob);
8094 
8095 	spdk_blob_close(blob, blob_op_complete, NULL);
8096 	poll_threads();
8097 	CU_ASSERT(g_bserrno == 0);
8098 	g_blob = NULL;
8099 
8100 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
8101 	poll_threads();
8102 	CU_ASSERT(g_bserrno == 0);
8103 	g_bserrno = -1;
8104 }
8105 
8106 static void
8107 suite_blob_setup(void)
8108 {
8109 	suite_bs_setup();
8110 	CU_ASSERT(g_bs != NULL);
8111 
8112 	g_blob = ut_blob_create_and_open(g_bs, NULL);
8113 	CU_ASSERT(g_blob != NULL);
8114 }
8115 
8116 static void
8117 suite_blob_cleanup(void)
8118 {
8119 	ut_blob_close_and_delete(g_bs, g_blob);
8120 	CU_ASSERT(g_blob == NULL);
8121 
8122 	suite_bs_cleanup();
8123 	CU_ASSERT(g_bs == NULL);
8124 }
8125 
8126 int
8127 main(int argc, char **argv)
8128 {
8129 	CU_pSuite	suite, suite_bs, suite_blob, suite_esnap_bs;
8130 	unsigned int	num_failures;
8131 
8132 	CU_set_error_action(CUEA_ABORT);
8133 	CU_initialize_registry();
8134 
8135 	suite = CU_add_suite("blob", NULL, NULL);
8136 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
8137 			suite_bs_setup, suite_bs_cleanup);
8138 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
8139 			suite_blob_setup, suite_blob_cleanup);
8140 	suite_esnap_bs = CU_add_suite_with_setup_and_teardown("blob_esnap_bs", NULL, NULL,
8141 			 suite_esnap_bs_setup,
8142 			 suite_bs_cleanup);
8143 
8144 	CU_ADD_TEST(suite, blob_init);
8145 	CU_ADD_TEST(suite_bs, blob_open);
8146 	CU_ADD_TEST(suite_bs, blob_create);
8147 	CU_ADD_TEST(suite_bs, blob_create_loop);
8148 	CU_ADD_TEST(suite_bs, blob_create_fail);
8149 	CU_ADD_TEST(suite_bs, blob_create_internal);
8150 	CU_ADD_TEST(suite_bs, blob_create_zero_extent);
8151 	CU_ADD_TEST(suite, blob_thin_provision);
8152 	CU_ADD_TEST(suite_bs, blob_snapshot);
8153 	CU_ADD_TEST(suite_bs, blob_clone);
8154 	CU_ADD_TEST(suite_bs, blob_inflate);
8155 	CU_ADD_TEST(suite_bs, blob_delete);
8156 	CU_ADD_TEST(suite_bs, blob_resize_test);
8157 	CU_ADD_TEST(suite, blob_read_only);
8158 	CU_ADD_TEST(suite_bs, channel_ops);
8159 	CU_ADD_TEST(suite_bs, blob_super);
8160 	CU_ADD_TEST(suite_blob, blob_write);
8161 	CU_ADD_TEST(suite_blob, blob_read);
8162 	CU_ADD_TEST(suite_blob, blob_rw_verify);
8163 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
8164 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
8165 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
8166 	CU_ADD_TEST(suite_bs, blob_unmap);
8167 	CU_ADD_TEST(suite_bs, blob_iter);
8168 	CU_ADD_TEST(suite_blob, blob_xattr);
8169 	CU_ADD_TEST(suite_bs, blob_parse_md);
8170 	CU_ADD_TEST(suite, bs_load);
8171 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
8172 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
8173 	CU_ADD_TEST(suite, bs_load_after_failed_grow);
8174 	CU_ADD_TEST(suite_bs, bs_unload);
8175 	CU_ADD_TEST(suite, bs_cluster_sz);
8176 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
8177 	CU_ADD_TEST(suite, bs_resize_md);
8178 	CU_ADD_TEST(suite, bs_destroy);
8179 	CU_ADD_TEST(suite, bs_type);
8180 	CU_ADD_TEST(suite, bs_super_block);
8181 	CU_ADD_TEST(suite, bs_test_recover_cluster_count);
8182 	CU_ADD_TEST(suite, bs_test_grow);
8183 	CU_ADD_TEST(suite, blob_serialize_test);
8184 	CU_ADD_TEST(suite_bs, blob_crc);
8185 	CU_ADD_TEST(suite, super_block_crc);
8186 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
8187 	CU_ADD_TEST(suite_bs, blob_flags);
8188 	CU_ADD_TEST(suite_bs, bs_version);
8189 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
8190 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
8191 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
8192 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
8193 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
8194 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
8195 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
8196 	CU_ADD_TEST(suite, bs_load_iter_test);
8197 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
8198 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
8199 	CU_ADD_TEST(suite, blob_relations);
8200 	CU_ADD_TEST(suite, blob_relations2);
8201 	CU_ADD_TEST(suite, blob_relations3);
8202 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
8203 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
8204 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
8205 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
8206 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
8207 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
8208 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
8209 	CU_ADD_TEST(suite, blob_io_unit);
8210 	CU_ADD_TEST(suite, blob_io_unit_compatibility);
8211 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
8212 	CU_ADD_TEST(suite_bs, blob_persist_test);
8213 	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
8214 	CU_ADD_TEST(suite_bs, blob_seek_io_unit);
8215 	CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
8216 	CU_ADD_TEST(suite_bs, blob_nested_freezes);
8217 	CU_ADD_TEST(suite, blob_ext_md_pages);
8218 	CU_ADD_TEST(suite, blob_esnap_io_4096_4096);
8219 	CU_ADD_TEST(suite, blob_esnap_io_512_512);
8220 	CU_ADD_TEST(suite, blob_esnap_io_4096_512);
8221 	CU_ADD_TEST(suite, blob_esnap_io_512_4096);
8222 	CU_ADD_TEST(suite_esnap_bs, blob_esnap_thread_add_remove);
8223 
8224 	allocate_threads(2);
8225 	set_thread(0);
8226 
8227 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
8228 
8229 	g_dev_copy_enabled = false;
8230 	CU_basic_set_mode(CU_BRM_VERBOSE);
8231 	g_use_extent_table = false;
8232 	CU_basic_run_tests();
8233 	num_failures = CU_get_number_of_failures();
8234 	g_use_extent_table = true;
8235 	CU_basic_run_tests();
8236 	num_failures += CU_get_number_of_failures();
8237 
8238 	g_dev_copy_enabled = true;
8239 	CU_basic_set_mode(CU_BRM_VERBOSE);
8240 	g_use_extent_table = false;
8241 	CU_basic_run_tests();
8242 	num_failures = CU_get_number_of_failures();
8243 	g_use_extent_table = true;
8244 	CU_basic_run_tests();
8245 	num_failures += CU_get_number_of_failures();
8246 	CU_cleanup_registry();
8247 
8248 	free(g_dev_buffer);
8249 
8250 	free_threads();
8251 
8252 	return num_failures;
8253 }
8254