xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 6f338d4bf3a8a91b7abe377a605a321ea2b05bf7)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (c) Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "blob/blobstore.c"
16 #include "blob/request.c"
17 #include "blob/zeroes.c"
18 #include "blob/blob_bs_dev.c"
19 
20 struct spdk_blob_store *g_bs;
21 spdk_blob_id g_blobid;
22 struct spdk_blob *g_blob, *g_blob2;
23 int g_bserrno, g_bserrno2;
24 struct spdk_xattr_names *g_names;
25 int g_done;
26 char *g_xattr_names[] = {"first", "second", "third"};
27 char *g_xattr_values[] = {"one", "two", "three"};
28 uint64_t g_ctx = 1729;
29 bool g_use_extent_table = false;
30 
31 struct spdk_bs_super_block_ver1 {
32 	uint8_t		signature[8];
33 	uint32_t        version;
34 	uint32_t        length;
35 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
36 	spdk_blob_id	super_blob;
37 
38 	uint32_t	cluster_size; /* In bytes */
39 
40 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
41 	uint32_t	used_page_mask_len; /* Count, in pages */
42 
43 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
44 	uint32_t	used_cluster_mask_len; /* Count, in pages */
45 
46 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
47 	uint32_t	md_len; /* Count, in pages */
48 
49 	uint8_t		reserved[4036];
50 	uint32_t	crc;
51 } __attribute__((packed));
52 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
53 
54 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
55 		struct spdk_blob_opts *blob_opts);
56 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
57 static void suite_blob_setup(void);
58 static void suite_blob_cleanup(void);
59 
60 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
61 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
62 		void *cpl_cb_arg), 0);
63 
64 static void
65 _get_xattr_value(void *arg, const char *name,
66 		 const void **value, size_t *value_len)
67 {
68 	uint64_t i;
69 
70 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
71 	SPDK_CU_ASSERT_FATAL(value != NULL);
72 	CU_ASSERT(arg == &g_ctx);
73 
74 	for (i = 0; i < sizeof(g_xattr_names); i++) {
75 		if (!strcmp(name, g_xattr_names[i])) {
76 			*value_len = strlen(g_xattr_values[i]);
77 			*value = g_xattr_values[i];
78 			break;
79 		}
80 	}
81 }
82 
83 static void
84 _get_xattr_value_null(void *arg, const char *name,
85 		      const void **value, size_t *value_len)
86 {
87 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
88 	SPDK_CU_ASSERT_FATAL(value != NULL);
89 	CU_ASSERT(arg == NULL);
90 
91 	*value_len = 0;
92 	*value = NULL;
93 }
94 
95 static int
96 _get_snapshots_count(struct spdk_blob_store *bs)
97 {
98 	struct spdk_blob_list *snapshot = NULL;
99 	int count = 0;
100 
101 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
102 		count += 1;
103 	}
104 
105 	return count;
106 }
107 
108 static void
109 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
110 {
111 	spdk_blob_opts_init(opts, sizeof(*opts));
112 	opts->use_extent_table = g_use_extent_table;
113 }
114 
115 static void
116 bs_op_complete(void *cb_arg, int bserrno)
117 {
118 	g_bserrno = bserrno;
119 }
120 
121 static void
122 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
123 			   int bserrno)
124 {
125 	g_bs = bs;
126 	g_bserrno = bserrno;
127 }
128 
129 static void
130 blob_op_complete(void *cb_arg, int bserrno)
131 {
132 	g_bserrno = bserrno;
133 }
134 
135 static void
136 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
137 {
138 	g_blobid = blobid;
139 	g_bserrno = bserrno;
140 }
141 
142 static void
143 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
144 {
145 	g_blob = blb;
146 	g_bserrno = bserrno;
147 }
148 
149 static void
150 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
151 {
152 	if (g_blob == NULL) {
153 		g_blob = blob;
154 		g_bserrno = bserrno;
155 	} else {
156 		g_blob2 = blob;
157 		g_bserrno2 = bserrno;
158 	}
159 }
160 
161 static void
162 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
163 {
164 	struct spdk_bs_dev *dev;
165 
166 	/* Unload the blob store */
167 	spdk_bs_unload(*bs, bs_op_complete, NULL);
168 	poll_threads();
169 	CU_ASSERT(g_bserrno == 0);
170 
171 	dev = init_dev();
172 	/* Load an existing blob store */
173 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
174 	poll_threads();
175 	CU_ASSERT(g_bserrno == 0);
176 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
177 	*bs = g_bs;
178 
179 	g_bserrno = -1;
180 }
181 
182 static void
183 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
184 {
185 	struct spdk_bs_dev *dev;
186 
187 	/* Dirty shutdown */
188 	bs_free(*bs);
189 
190 	dev = init_dev();
191 	/* Load an existing blob store */
192 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
193 	poll_threads();
194 	CU_ASSERT(g_bserrno == 0);
195 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
196 	*bs = g_bs;
197 
198 	g_bserrno = -1;
199 }
200 
201 static void
202 blob_init(void)
203 {
204 	struct spdk_blob_store *bs;
205 	struct spdk_bs_dev *dev;
206 
207 	dev = init_dev();
208 
209 	/* should fail for an unsupported blocklen */
210 	dev->blocklen = 500;
211 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
212 	poll_threads();
213 	CU_ASSERT(g_bserrno == -EINVAL);
214 
215 	dev = init_dev();
216 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
217 	poll_threads();
218 	CU_ASSERT(g_bserrno == 0);
219 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
220 	bs = g_bs;
221 
222 	spdk_bs_unload(bs, bs_op_complete, NULL);
223 	poll_threads();
224 	CU_ASSERT(g_bserrno == 0);
225 	g_bs = NULL;
226 }
227 
228 static void
229 blob_super(void)
230 {
231 	struct spdk_blob_store *bs = g_bs;
232 	spdk_blob_id blobid;
233 	struct spdk_blob_opts blob_opts;
234 
235 	/* Get the super blob without having set one */
236 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
237 	poll_threads();
238 	CU_ASSERT(g_bserrno == -ENOENT);
239 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
240 
241 	/* Create a blob */
242 	ut_spdk_blob_opts_init(&blob_opts);
243 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
244 	poll_threads();
245 	CU_ASSERT(g_bserrno == 0);
246 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
247 	blobid = g_blobid;
248 
249 	/* Set the blob as the super blob */
250 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
251 	poll_threads();
252 	CU_ASSERT(g_bserrno == 0);
253 
254 	/* Get the super blob */
255 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
256 	poll_threads();
257 	CU_ASSERT(g_bserrno == 0);
258 	CU_ASSERT(blobid == g_blobid);
259 }
260 
261 static void
262 blob_open(void)
263 {
264 	struct spdk_blob_store *bs = g_bs;
265 	struct spdk_blob *blob;
266 	struct spdk_blob_opts blob_opts;
267 	spdk_blob_id blobid, blobid2;
268 
269 	ut_spdk_blob_opts_init(&blob_opts);
270 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
271 	poll_threads();
272 	CU_ASSERT(g_bserrno == 0);
273 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
274 	blobid = g_blobid;
275 
276 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
277 	poll_threads();
278 	CU_ASSERT(g_bserrno == 0);
279 	CU_ASSERT(g_blob != NULL);
280 	blob = g_blob;
281 
282 	blobid2 = spdk_blob_get_id(blob);
283 	CU_ASSERT(blobid == blobid2);
284 
285 	/* Try to open file again.  It should return success. */
286 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
287 	poll_threads();
288 	CU_ASSERT(g_bserrno == 0);
289 	CU_ASSERT(blob == g_blob);
290 
291 	spdk_blob_close(blob, blob_op_complete, NULL);
292 	poll_threads();
293 	CU_ASSERT(g_bserrno == 0);
294 
295 	/*
296 	 * Close the file a second time, releasing the second reference.  This
297 	 *  should succeed.
298 	 */
299 	blob = g_blob;
300 	spdk_blob_close(blob, blob_op_complete, NULL);
301 	poll_threads();
302 	CU_ASSERT(g_bserrno == 0);
303 
304 	/*
305 	 * Try to open file again.  It should succeed.  This tests the case
306 	 *  where the file is opened, closed, then re-opened again.
307 	 */
308 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
309 	poll_threads();
310 	CU_ASSERT(g_bserrno == 0);
311 	CU_ASSERT(g_blob != NULL);
312 	blob = g_blob;
313 	spdk_blob_close(blob, blob_op_complete, NULL);
314 	poll_threads();
315 	CU_ASSERT(g_bserrno == 0);
316 
317 	/* Try to open file twice in succession.  This should return the same
318 	 * blob object.
319 	 */
320 	g_blob = NULL;
321 	g_blob2 = NULL;
322 	g_bserrno = -1;
323 	g_bserrno2 = -1;
324 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
325 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
326 	poll_threads();
327 	CU_ASSERT(g_bserrno == 0);
328 	CU_ASSERT(g_bserrno2 == 0);
329 	CU_ASSERT(g_blob != NULL);
330 	CU_ASSERT(g_blob2 != NULL);
331 	CU_ASSERT(g_blob == g_blob2);
332 
333 	g_bserrno = -1;
334 	spdk_blob_close(g_blob, blob_op_complete, NULL);
335 	poll_threads();
336 	CU_ASSERT(g_bserrno == 0);
337 
338 	ut_blob_close_and_delete(bs, g_blob);
339 }
340 
341 static void
342 blob_create(void)
343 {
344 	struct spdk_blob_store *bs = g_bs;
345 	struct spdk_blob *blob;
346 	struct spdk_blob_opts opts;
347 	spdk_blob_id blobid;
348 
349 	/* Create blob with 10 clusters */
350 
351 	ut_spdk_blob_opts_init(&opts);
352 	opts.num_clusters = 10;
353 
354 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
355 	poll_threads();
356 	CU_ASSERT(g_bserrno == 0);
357 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
358 	blobid = g_blobid;
359 
360 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
361 	poll_threads();
362 	CU_ASSERT(g_bserrno == 0);
363 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
364 	blob = g_blob;
365 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
366 
367 	spdk_blob_close(blob, blob_op_complete, NULL);
368 	poll_threads();
369 	CU_ASSERT(g_bserrno == 0);
370 
371 	/* Create blob with 0 clusters */
372 
373 	ut_spdk_blob_opts_init(&opts);
374 	opts.num_clusters = 0;
375 
376 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
377 	poll_threads();
378 	CU_ASSERT(g_bserrno == 0);
379 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
380 	blobid = g_blobid;
381 
382 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
383 	poll_threads();
384 	CU_ASSERT(g_bserrno == 0);
385 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
386 	blob = g_blob;
387 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
388 
389 	spdk_blob_close(blob, blob_op_complete, NULL);
390 	poll_threads();
391 	CU_ASSERT(g_bserrno == 0);
392 
393 	/* Create blob with default options (opts == NULL) */
394 
395 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
396 	poll_threads();
397 	CU_ASSERT(g_bserrno == 0);
398 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
399 	blobid = g_blobid;
400 
401 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
402 	poll_threads();
403 	CU_ASSERT(g_bserrno == 0);
404 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
405 	blob = g_blob;
406 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
407 
408 	spdk_blob_close(blob, blob_op_complete, NULL);
409 	poll_threads();
410 	CU_ASSERT(g_bserrno == 0);
411 
412 	/* Try to create blob with size larger than blobstore */
413 
414 	ut_spdk_blob_opts_init(&opts);
415 	opts.num_clusters = bs->total_clusters + 1;
416 
417 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
418 	poll_threads();
419 	CU_ASSERT(g_bserrno == -ENOSPC);
420 }
421 
422 static void
423 blob_create_zero_extent(void)
424 {
425 	struct spdk_blob_store *bs = g_bs;
426 	struct spdk_blob *blob;
427 	spdk_blob_id blobid;
428 
429 	/* Create blob with default options (opts == NULL) */
430 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
431 	poll_threads();
432 	CU_ASSERT(g_bserrno == 0);
433 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
434 	blobid = g_blobid;
435 
436 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
437 	poll_threads();
438 	CU_ASSERT(g_bserrno == 0);
439 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
440 	blob = g_blob;
441 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
442 	CU_ASSERT(blob->extent_table_found == true);
443 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
444 	CU_ASSERT(blob->active.extent_pages == NULL);
445 
446 	spdk_blob_close(blob, blob_op_complete, NULL);
447 	poll_threads();
448 	CU_ASSERT(g_bserrno == 0);
449 
450 	/* Create blob with NULL internal options  */
451 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
452 	poll_threads();
453 	CU_ASSERT(g_bserrno == 0);
454 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
455 	blobid = g_blobid;
456 
457 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
458 	poll_threads();
459 	CU_ASSERT(g_bserrno == 0);
460 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
461 	blob = g_blob;
462 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
463 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
464 	CU_ASSERT(blob->extent_table_found == true);
465 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
466 	CU_ASSERT(blob->active.extent_pages == NULL);
467 
468 	spdk_blob_close(blob, blob_op_complete, NULL);
469 	poll_threads();
470 	CU_ASSERT(g_bserrno == 0);
471 }
472 
473 /*
474  * Create and delete one blob in a loop over and over again.  This helps ensure
475  * that the internal bit masks tracking used clusters and md_pages are being
476  * tracked correctly.
477  */
478 static void
479 blob_create_loop(void)
480 {
481 	struct spdk_blob_store *bs = g_bs;
482 	struct spdk_blob_opts opts;
483 	uint32_t i, loop_count;
484 
485 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
486 				  spdk_bit_pool_capacity(bs->used_clusters));
487 
488 	for (i = 0; i < loop_count; i++) {
489 		ut_spdk_blob_opts_init(&opts);
490 		opts.num_clusters = 1;
491 		g_bserrno = -1;
492 		g_blobid = SPDK_BLOBID_INVALID;
493 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
494 		poll_threads();
495 		CU_ASSERT(g_bserrno == 0);
496 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
497 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
498 		poll_threads();
499 		CU_ASSERT(g_bserrno == 0);
500 	}
501 }
502 
503 static void
504 blob_create_fail(void)
505 {
506 	struct spdk_blob_store *bs = g_bs;
507 	struct spdk_blob_opts opts;
508 	spdk_blob_id blobid;
509 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
510 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
511 
512 	/* NULL callback */
513 	ut_spdk_blob_opts_init(&opts);
514 	opts.xattrs.names = g_xattr_names;
515 	opts.xattrs.get_value = NULL;
516 	opts.xattrs.count = 1;
517 	opts.xattrs.ctx = &g_ctx;
518 
519 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
520 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
521 	poll_threads();
522 	CU_ASSERT(g_bserrno == -EINVAL);
523 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
524 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
525 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
526 
527 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
528 	poll_threads();
529 	CU_ASSERT(g_bserrno == -ENOENT);
530 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
531 
532 	ut_bs_reload(&bs, NULL);
533 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
534 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
535 
536 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
537 	poll_threads();
538 	CU_ASSERT(g_blob == NULL);
539 	CU_ASSERT(g_bserrno == -ENOENT);
540 }
541 
542 static void
543 blob_create_internal(void)
544 {
545 	struct spdk_blob_store *bs = g_bs;
546 	struct spdk_blob *blob;
547 	struct spdk_blob_opts opts;
548 	struct spdk_blob_xattr_opts internal_xattrs;
549 	const void *value;
550 	size_t value_len;
551 	spdk_blob_id blobid;
552 	int rc;
553 
554 	/* Create blob with custom xattrs */
555 
556 	ut_spdk_blob_opts_init(&opts);
557 	blob_xattrs_init(&internal_xattrs);
558 	internal_xattrs.count = 3;
559 	internal_xattrs.names = g_xattr_names;
560 	internal_xattrs.get_value = _get_xattr_value;
561 	internal_xattrs.ctx = &g_ctx;
562 
563 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
564 	poll_threads();
565 	CU_ASSERT(g_bserrno == 0);
566 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
567 	blobid = g_blobid;
568 
569 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
570 	poll_threads();
571 	CU_ASSERT(g_bserrno == 0);
572 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
573 	blob = g_blob;
574 
575 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
576 	CU_ASSERT(rc == 0);
577 	SPDK_CU_ASSERT_FATAL(value != NULL);
578 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
579 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
580 
581 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
582 	CU_ASSERT(rc == 0);
583 	SPDK_CU_ASSERT_FATAL(value != NULL);
584 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
585 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
586 
587 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
588 	CU_ASSERT(rc == 0);
589 	SPDK_CU_ASSERT_FATAL(value != NULL);
590 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
591 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
592 
593 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
594 	CU_ASSERT(rc != 0);
595 
596 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
597 	CU_ASSERT(rc != 0);
598 
599 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
600 	CU_ASSERT(rc != 0);
601 
602 	spdk_blob_close(blob, blob_op_complete, NULL);
603 	poll_threads();
604 	CU_ASSERT(g_bserrno == 0);
605 
606 	/* Create blob with NULL internal options  */
607 
608 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
609 	poll_threads();
610 	CU_ASSERT(g_bserrno == 0);
611 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
612 	blobid = g_blobid;
613 
614 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
615 	poll_threads();
616 	CU_ASSERT(g_bserrno == 0);
617 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
618 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
619 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
620 
621 	blob = g_blob;
622 
623 	spdk_blob_close(blob, blob_op_complete, NULL);
624 	poll_threads();
625 	CU_ASSERT(g_bserrno == 0);
626 }
627 
628 static void
629 blob_thin_provision(void)
630 {
631 	struct spdk_blob_store *bs;
632 	struct spdk_bs_dev *dev;
633 	struct spdk_blob *blob;
634 	struct spdk_blob_opts opts;
635 	struct spdk_bs_opts bs_opts;
636 	spdk_blob_id blobid;
637 
638 	dev = init_dev();
639 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
640 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
641 
642 	/* Initialize a new blob store */
643 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
644 	poll_threads();
645 	CU_ASSERT(g_bserrno == 0);
646 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
647 
648 	bs = g_bs;
649 
650 	/* Create blob with thin provisioning enabled */
651 
652 	ut_spdk_blob_opts_init(&opts);
653 	opts.thin_provision = true;
654 	opts.num_clusters = 10;
655 
656 	blob = ut_blob_create_and_open(bs, &opts);
657 	blobid = spdk_blob_get_id(blob);
658 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
659 	/* In thin provisioning with num_clusters is set, if not using the
660 	 * extent table, there is no allocation. If extent table is used,
661 	 * there is related allocation happened. */
662 	if (blob->extent_table_found == true) {
663 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
664 		CU_ASSERT(blob->active.extent_pages != NULL);
665 	} else {
666 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
667 		CU_ASSERT(blob->active.extent_pages == NULL);
668 	}
669 
670 	spdk_blob_close(blob, blob_op_complete, NULL);
671 	CU_ASSERT(g_bserrno == 0);
672 
673 	/* Do not shut down cleanly.  This makes sure that when we load again
674 	 *  and try to recover a valid used_cluster map, that blobstore will
675 	 *  ignore clusters with index 0 since these are unallocated clusters.
676 	 */
677 	ut_bs_dirty_load(&bs, &bs_opts);
678 
679 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
680 	poll_threads();
681 	CU_ASSERT(g_bserrno == 0);
682 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
683 	blob = g_blob;
684 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
685 
686 	ut_blob_close_and_delete(bs, blob);
687 
688 	spdk_bs_unload(bs, bs_op_complete, NULL);
689 	poll_threads();
690 	CU_ASSERT(g_bserrno == 0);
691 	g_bs = NULL;
692 }
693 
694 static void
695 blob_snapshot(void)
696 {
697 	struct spdk_blob_store *bs = g_bs;
698 	struct spdk_blob *blob;
699 	struct spdk_blob *snapshot, *snapshot2;
700 	struct spdk_blob_bs_dev *blob_bs_dev;
701 	struct spdk_blob_opts opts;
702 	struct spdk_blob_xattr_opts xattrs;
703 	spdk_blob_id blobid;
704 	spdk_blob_id snapshotid;
705 	spdk_blob_id snapshotid2;
706 	const void *value;
707 	size_t value_len;
708 	int rc;
709 	spdk_blob_id ids[2];
710 	size_t count;
711 
712 	/* Create blob with 10 clusters */
713 	ut_spdk_blob_opts_init(&opts);
714 	opts.num_clusters = 10;
715 
716 	blob = ut_blob_create_and_open(bs, &opts);
717 	blobid = spdk_blob_get_id(blob);
718 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
719 
720 	/* Create snapshot from blob */
721 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
722 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
723 	poll_threads();
724 	CU_ASSERT(g_bserrno == 0);
725 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
726 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
727 	snapshotid = g_blobid;
728 
729 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
730 	poll_threads();
731 	CU_ASSERT(g_bserrno == 0);
732 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
733 	snapshot = g_blob;
734 	CU_ASSERT(snapshot->data_ro == true);
735 	CU_ASSERT(snapshot->md_ro == true);
736 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
737 
738 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
739 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
740 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
741 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
742 
743 	/* Try to create snapshot from clone with xattrs */
744 	xattrs.names = g_xattr_names;
745 	xattrs.get_value = _get_xattr_value;
746 	xattrs.count = 3;
747 	xattrs.ctx = &g_ctx;
748 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
749 	poll_threads();
750 	CU_ASSERT(g_bserrno == 0);
751 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
752 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
753 	snapshotid2 = g_blobid;
754 
755 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
756 	CU_ASSERT(g_bserrno == 0);
757 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
758 	snapshot2 = g_blob;
759 	CU_ASSERT(snapshot2->data_ro == true);
760 	CU_ASSERT(snapshot2->md_ro == true);
761 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
762 
763 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
764 	CU_ASSERT(snapshot->back_bs_dev == NULL);
765 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
766 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
767 
768 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
769 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
770 
771 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
772 	CU_ASSERT(blob_bs_dev->blob == snapshot);
773 
774 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
775 	CU_ASSERT(rc == 0);
776 	SPDK_CU_ASSERT_FATAL(value != NULL);
777 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
778 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
779 
780 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
781 	CU_ASSERT(rc == 0);
782 	SPDK_CU_ASSERT_FATAL(value != NULL);
783 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
784 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
785 
786 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
787 	CU_ASSERT(rc == 0);
788 	SPDK_CU_ASSERT_FATAL(value != NULL);
789 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
790 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
791 
792 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
793 	count = 2;
794 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
795 	CU_ASSERT(count == 1);
796 	CU_ASSERT(ids[0] == blobid);
797 
798 	count = 2;
799 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
800 	CU_ASSERT(count == 1);
801 	CU_ASSERT(ids[0] == snapshotid2);
802 
803 	/* Try to create snapshot from snapshot */
804 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
805 	poll_threads();
806 	CU_ASSERT(g_bserrno == -EINVAL);
807 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
808 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
809 
810 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
811 	ut_blob_close_and_delete(bs, blob);
812 	count = 2;
813 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
814 	CU_ASSERT(count == 0);
815 
816 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
817 	ut_blob_close_and_delete(bs, snapshot2);
818 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
819 	count = 2;
820 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
821 	CU_ASSERT(count == 0);
822 
823 	ut_blob_close_and_delete(bs, snapshot);
824 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
825 }
826 
827 static void
828 blob_snapshot_freeze_io(void)
829 {
830 	struct spdk_io_channel *channel;
831 	struct spdk_bs_channel *bs_channel;
832 	struct spdk_blob_store *bs = g_bs;
833 	struct spdk_blob *blob;
834 	struct spdk_blob_opts opts;
835 	spdk_blob_id blobid;
836 	uint32_t num_of_pages = 10;
837 	uint8_t payload_read[num_of_pages * SPDK_BS_PAGE_SIZE];
838 	uint8_t payload_write[num_of_pages * SPDK_BS_PAGE_SIZE];
839 	uint8_t payload_zero[num_of_pages * SPDK_BS_PAGE_SIZE];
840 
841 	memset(payload_write, 0xE5, sizeof(payload_write));
842 	memset(payload_read, 0x00, sizeof(payload_read));
843 	memset(payload_zero, 0x00, sizeof(payload_zero));
844 
845 	/* Test freeze I/O during snapshot */
846 	channel = spdk_bs_alloc_io_channel(bs);
847 	bs_channel = spdk_io_channel_get_ctx(channel);
848 
849 	/* Create blob with 10 clusters */
850 	ut_spdk_blob_opts_init(&opts);
851 	opts.num_clusters = 10;
852 	opts.thin_provision = false;
853 
854 	blob = ut_blob_create_and_open(bs, &opts);
855 	blobid = spdk_blob_get_id(blob);
856 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
857 
858 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
859 
860 	/* This is implementation specific.
861 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
862 	 * Four async I/O operations happen before that. */
863 	poll_thread_times(0, 5);
864 
865 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
866 
867 	/* Blob I/O should be frozen here */
868 	CU_ASSERT(blob->frozen_refcnt == 1);
869 
870 	/* Write to the blob */
871 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
872 
873 	/* Verify that I/O is queued */
874 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
875 	/* Verify that payload is not written to disk, at this point the blobs already switched */
876 	CU_ASSERT(blob->active.clusters[0] == 0);
877 
878 	/* Finish all operations including spdk_bs_create_snapshot */
879 	poll_threads();
880 
881 	/* Verify snapshot */
882 	CU_ASSERT(g_bserrno == 0);
883 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
884 
885 	/* Verify that blob has unset frozen_io */
886 	CU_ASSERT(blob->frozen_refcnt == 0);
887 
888 	/* Verify that postponed I/O completed successfully by comparing payload */
889 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
890 	poll_threads();
891 	CU_ASSERT(g_bserrno == 0);
892 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * SPDK_BS_PAGE_SIZE) == 0);
893 
894 	spdk_bs_free_io_channel(channel);
895 	poll_threads();
896 
897 	ut_blob_close_and_delete(bs, blob);
898 }
899 
900 static void
901 blob_clone(void)
902 {
903 	struct spdk_blob_store *bs = g_bs;
904 	struct spdk_blob_opts opts;
905 	struct spdk_blob *blob, *snapshot, *clone;
906 	spdk_blob_id blobid, cloneid, snapshotid;
907 	struct spdk_blob_xattr_opts xattrs;
908 	const void *value;
909 	size_t value_len;
910 	int rc;
911 
912 	/* Create blob with 10 clusters */
913 
914 	ut_spdk_blob_opts_init(&opts);
915 	opts.num_clusters = 10;
916 
917 	blob = ut_blob_create_and_open(bs, &opts);
918 	blobid = spdk_blob_get_id(blob);
919 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
920 
921 	/* Create snapshot */
922 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
923 	poll_threads();
924 	CU_ASSERT(g_bserrno == 0);
925 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
926 	snapshotid = g_blobid;
927 
928 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
929 	poll_threads();
930 	CU_ASSERT(g_bserrno == 0);
931 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
932 	snapshot = g_blob;
933 	CU_ASSERT(snapshot->data_ro == true);
934 	CU_ASSERT(snapshot->md_ro == true);
935 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
936 
937 	spdk_blob_close(snapshot, blob_op_complete, NULL);
938 	poll_threads();
939 	CU_ASSERT(g_bserrno == 0);
940 
941 	/* Create clone from snapshot with xattrs */
942 	xattrs.names = g_xattr_names;
943 	xattrs.get_value = _get_xattr_value;
944 	xattrs.count = 3;
945 	xattrs.ctx = &g_ctx;
946 
947 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
948 	poll_threads();
949 	CU_ASSERT(g_bserrno == 0);
950 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
951 	cloneid = g_blobid;
952 
953 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
954 	poll_threads();
955 	CU_ASSERT(g_bserrno == 0);
956 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
957 	clone = g_blob;
958 	CU_ASSERT(clone->data_ro == false);
959 	CU_ASSERT(clone->md_ro == false);
960 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
961 
962 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
963 	CU_ASSERT(rc == 0);
964 	SPDK_CU_ASSERT_FATAL(value != NULL);
965 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
966 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
967 
968 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
969 	CU_ASSERT(rc == 0);
970 	SPDK_CU_ASSERT_FATAL(value != NULL);
971 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
972 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
973 
974 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
975 	CU_ASSERT(rc == 0);
976 	SPDK_CU_ASSERT_FATAL(value != NULL);
977 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
978 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
979 
980 
981 	spdk_blob_close(clone, blob_op_complete, NULL);
982 	poll_threads();
983 	CU_ASSERT(g_bserrno == 0);
984 
985 	/* Try to create clone from not read only blob */
986 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
987 	poll_threads();
988 	CU_ASSERT(g_bserrno == -EINVAL);
989 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
990 
991 	/* Mark blob as read only */
992 	spdk_blob_set_read_only(blob);
993 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
994 	poll_threads();
995 	CU_ASSERT(g_bserrno == 0);
996 
997 	/* Create clone from read only blob */
998 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
999 	poll_threads();
1000 	CU_ASSERT(g_bserrno == 0);
1001 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1002 	cloneid = g_blobid;
1003 
1004 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1005 	poll_threads();
1006 	CU_ASSERT(g_bserrno == 0);
1007 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1008 	clone = g_blob;
1009 	CU_ASSERT(clone->data_ro == false);
1010 	CU_ASSERT(clone->md_ro == false);
1011 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1012 
1013 	ut_blob_close_and_delete(bs, clone);
1014 	ut_blob_close_and_delete(bs, blob);
1015 }
1016 
1017 static void
1018 _blob_inflate(bool decouple_parent)
1019 {
1020 	struct spdk_blob_store *bs = g_bs;
1021 	struct spdk_blob_opts opts;
1022 	struct spdk_blob *blob, *snapshot;
1023 	spdk_blob_id blobid, snapshotid;
1024 	struct spdk_io_channel *channel;
1025 	uint64_t free_clusters;
1026 
1027 	channel = spdk_bs_alloc_io_channel(bs);
1028 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1029 
1030 	/* Create blob with 10 clusters */
1031 
1032 	ut_spdk_blob_opts_init(&opts);
1033 	opts.num_clusters = 10;
1034 	opts.thin_provision = true;
1035 
1036 	blob = ut_blob_create_and_open(bs, &opts);
1037 	blobid = spdk_blob_get_id(blob);
1038 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1039 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1040 
1041 	/* 1) Blob with no parent */
1042 	if (decouple_parent) {
1043 		/* Decouple parent of blob with no parent (should fail) */
1044 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1045 		poll_threads();
1046 		CU_ASSERT(g_bserrno != 0);
1047 	} else {
1048 		/* Inflate of thin blob with no parent should made it thick */
1049 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1050 		poll_threads();
1051 		CU_ASSERT(g_bserrno == 0);
1052 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1053 	}
1054 
1055 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1056 	poll_threads();
1057 	CU_ASSERT(g_bserrno == 0);
1058 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1059 	snapshotid = g_blobid;
1060 
1061 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1062 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1063 
1064 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1065 	poll_threads();
1066 	CU_ASSERT(g_bserrno == 0);
1067 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1068 	snapshot = g_blob;
1069 	CU_ASSERT(snapshot->data_ro == true);
1070 	CU_ASSERT(snapshot->md_ro == true);
1071 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1072 
1073 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1074 	poll_threads();
1075 	CU_ASSERT(g_bserrno == 0);
1076 
1077 	free_clusters = spdk_bs_free_cluster_count(bs);
1078 
1079 	/* 2) Blob with parent */
1080 	if (!decouple_parent) {
1081 		/* Do full blob inflation */
1082 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1083 		poll_threads();
1084 		CU_ASSERT(g_bserrno == 0);
1085 		/* all 10 clusters should be allocated */
1086 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1087 	} else {
1088 		/* Decouple parent of blob */
1089 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1090 		poll_threads();
1091 		CU_ASSERT(g_bserrno == 0);
1092 		/* when only parent is removed, none of the clusters should be allocated */
1093 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1094 	}
1095 
1096 	/* Now, it should be possible to delete snapshot */
1097 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1098 	poll_threads();
1099 	CU_ASSERT(g_bserrno == 0);
1100 
1101 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1102 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1103 
1104 	spdk_bs_free_io_channel(channel);
1105 	poll_threads();
1106 
1107 	ut_blob_close_and_delete(bs, blob);
1108 }
1109 
1110 static void
1111 blob_inflate(void)
1112 {
1113 	_blob_inflate(false);
1114 	_blob_inflate(true);
1115 }
1116 
1117 static void
1118 blob_delete(void)
1119 {
1120 	struct spdk_blob_store *bs = g_bs;
1121 	struct spdk_blob_opts blob_opts;
1122 	spdk_blob_id blobid;
1123 
1124 	/* Create a blob and then delete it. */
1125 	ut_spdk_blob_opts_init(&blob_opts);
1126 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1127 	poll_threads();
1128 	CU_ASSERT(g_bserrno == 0);
1129 	CU_ASSERT(g_blobid > 0);
1130 	blobid = g_blobid;
1131 
1132 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1133 	poll_threads();
1134 	CU_ASSERT(g_bserrno == 0);
1135 
1136 	/* Try to open the blob */
1137 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1138 	poll_threads();
1139 	CU_ASSERT(g_bserrno == -ENOENT);
1140 }
1141 
1142 static void
1143 blob_resize_test(void)
1144 {
1145 	struct spdk_blob_store *bs = g_bs;
1146 	struct spdk_blob *blob;
1147 	uint64_t free_clusters;
1148 
1149 	free_clusters = spdk_bs_free_cluster_count(bs);
1150 
1151 	blob = ut_blob_create_and_open(bs, NULL);
1152 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1153 
1154 	/* Confirm that resize fails if blob is marked read-only. */
1155 	blob->md_ro = true;
1156 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1157 	poll_threads();
1158 	CU_ASSERT(g_bserrno == -EPERM);
1159 	blob->md_ro = false;
1160 
1161 	/* The blob started at 0 clusters. Resize it to be 5. */
1162 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1163 	poll_threads();
1164 	CU_ASSERT(g_bserrno == 0);
1165 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1166 
1167 	/* Shrink the blob to 3 clusters. This will not actually release
1168 	 * the old clusters until the blob is synced.
1169 	 */
1170 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1171 	poll_threads();
1172 	CU_ASSERT(g_bserrno == 0);
1173 	/* Verify there are still 5 clusters in use */
1174 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1175 
1176 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1177 	poll_threads();
1178 	CU_ASSERT(g_bserrno == 0);
1179 	/* Now there are only 3 clusters in use */
1180 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1181 
1182 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1183 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1184 	poll_threads();
1185 	CU_ASSERT(g_bserrno == 0);
1186 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1187 
1188 	/* Try to resize the blob to size larger than blobstore. */
1189 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1190 	poll_threads();
1191 	CU_ASSERT(g_bserrno == -ENOSPC);
1192 
1193 	ut_blob_close_and_delete(bs, blob);
1194 }
1195 
1196 static void
1197 blob_read_only(void)
1198 {
1199 	struct spdk_blob_store *bs;
1200 	struct spdk_bs_dev *dev;
1201 	struct spdk_blob *blob;
1202 	struct spdk_bs_opts opts;
1203 	spdk_blob_id blobid;
1204 	int rc;
1205 
1206 	dev = init_dev();
1207 	spdk_bs_opts_init(&opts, sizeof(opts));
1208 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1209 
1210 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1211 	poll_threads();
1212 	CU_ASSERT(g_bserrno == 0);
1213 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1214 	bs = g_bs;
1215 
1216 	blob = ut_blob_create_and_open(bs, NULL);
1217 	blobid = spdk_blob_get_id(blob);
1218 
1219 	rc = spdk_blob_set_read_only(blob);
1220 	CU_ASSERT(rc == 0);
1221 
1222 	CU_ASSERT(blob->data_ro == false);
1223 	CU_ASSERT(blob->md_ro == false);
1224 
1225 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1226 	poll_threads();
1227 
1228 	CU_ASSERT(blob->data_ro == true);
1229 	CU_ASSERT(blob->md_ro == true);
1230 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1231 
1232 	spdk_blob_close(blob, blob_op_complete, NULL);
1233 	poll_threads();
1234 	CU_ASSERT(g_bserrno == 0);
1235 
1236 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1237 	poll_threads();
1238 	CU_ASSERT(g_bserrno == 0);
1239 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1240 	blob = g_blob;
1241 
1242 	CU_ASSERT(blob->data_ro == true);
1243 	CU_ASSERT(blob->md_ro == true);
1244 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1245 
1246 	spdk_blob_close(blob, blob_op_complete, NULL);
1247 	poll_threads();
1248 	CU_ASSERT(g_bserrno == 0);
1249 
1250 	ut_bs_reload(&bs, &opts);
1251 
1252 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1253 	poll_threads();
1254 	CU_ASSERT(g_bserrno == 0);
1255 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1256 	blob = g_blob;
1257 
1258 	CU_ASSERT(blob->data_ro == true);
1259 	CU_ASSERT(blob->md_ro == true);
1260 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1261 
1262 	ut_blob_close_and_delete(bs, blob);
1263 
1264 	spdk_bs_unload(bs, bs_op_complete, NULL);
1265 	poll_threads();
1266 	CU_ASSERT(g_bserrno == 0);
1267 }
1268 
1269 static void
1270 channel_ops(void)
1271 {
1272 	struct spdk_blob_store *bs = g_bs;
1273 	struct spdk_io_channel *channel;
1274 
1275 	channel = spdk_bs_alloc_io_channel(bs);
1276 	CU_ASSERT(channel != NULL);
1277 
1278 	spdk_bs_free_io_channel(channel);
1279 	poll_threads();
1280 }
1281 
1282 static void
1283 blob_write(void)
1284 {
1285 	struct spdk_blob_store *bs = g_bs;
1286 	struct spdk_blob *blob = g_blob;
1287 	struct spdk_io_channel *channel;
1288 	uint64_t pages_per_cluster;
1289 	uint8_t payload[10 * 4096];
1290 
1291 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1292 
1293 	channel = spdk_bs_alloc_io_channel(bs);
1294 	CU_ASSERT(channel != NULL);
1295 
1296 	/* Write to a blob with 0 size */
1297 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1298 	poll_threads();
1299 	CU_ASSERT(g_bserrno == -EINVAL);
1300 
1301 	/* Resize the blob */
1302 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1303 	poll_threads();
1304 	CU_ASSERT(g_bserrno == 0);
1305 
1306 	/* Confirm that write fails if blob is marked read-only. */
1307 	blob->data_ro = true;
1308 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1309 	poll_threads();
1310 	CU_ASSERT(g_bserrno == -EPERM);
1311 	blob->data_ro = false;
1312 
1313 	/* Write to the blob */
1314 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1315 	poll_threads();
1316 	CU_ASSERT(g_bserrno == 0);
1317 
1318 	/* Write starting beyond the end */
1319 	spdk_blob_io_write(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1320 			   NULL);
1321 	poll_threads();
1322 	CU_ASSERT(g_bserrno == -EINVAL);
1323 
1324 	/* Write starting at a valid location but going off the end */
1325 	spdk_blob_io_write(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1326 			   blob_op_complete, NULL);
1327 	poll_threads();
1328 	CU_ASSERT(g_bserrno == -EINVAL);
1329 
1330 	spdk_bs_free_io_channel(channel);
1331 	poll_threads();
1332 }
1333 
1334 static void
1335 blob_read(void)
1336 {
1337 	struct spdk_blob_store *bs = g_bs;
1338 	struct spdk_blob *blob = g_blob;
1339 	struct spdk_io_channel *channel;
1340 	uint64_t pages_per_cluster;
1341 	uint8_t payload[10 * 4096];
1342 
1343 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
1344 
1345 	channel = spdk_bs_alloc_io_channel(bs);
1346 	CU_ASSERT(channel != NULL);
1347 
1348 	/* Read from a blob with 0 size */
1349 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1350 	poll_threads();
1351 	CU_ASSERT(g_bserrno == -EINVAL);
1352 
1353 	/* Resize the blob */
1354 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1355 	poll_threads();
1356 	CU_ASSERT(g_bserrno == 0);
1357 
1358 	/* Confirm that read passes if blob is marked read-only. */
1359 	blob->data_ro = true;
1360 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1361 	poll_threads();
1362 	CU_ASSERT(g_bserrno == 0);
1363 	blob->data_ro = false;
1364 
1365 	/* Read from the blob */
1366 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1367 	poll_threads();
1368 	CU_ASSERT(g_bserrno == 0);
1369 
1370 	/* Read starting beyond the end */
1371 	spdk_blob_io_read(blob, channel, payload, 5 * pages_per_cluster, 1, blob_op_complete,
1372 			  NULL);
1373 	poll_threads();
1374 	CU_ASSERT(g_bserrno == -EINVAL);
1375 
1376 	/* Read starting at a valid location but going off the end */
1377 	spdk_blob_io_read(blob, channel, payload, 4 * pages_per_cluster, pages_per_cluster + 1,
1378 			  blob_op_complete, NULL);
1379 	poll_threads();
1380 	CU_ASSERT(g_bserrno == -EINVAL);
1381 
1382 	spdk_bs_free_io_channel(channel);
1383 	poll_threads();
1384 }
1385 
1386 static void
1387 blob_rw_verify(void)
1388 {
1389 	struct spdk_blob_store *bs = g_bs;
1390 	struct spdk_blob *blob = g_blob;
1391 	struct spdk_io_channel *channel;
1392 	uint8_t payload_read[10 * 4096];
1393 	uint8_t payload_write[10 * 4096];
1394 
1395 	channel = spdk_bs_alloc_io_channel(bs);
1396 	CU_ASSERT(channel != NULL);
1397 
1398 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1399 	poll_threads();
1400 	CU_ASSERT(g_bserrno == 0);
1401 
1402 	memset(payload_write, 0xE5, sizeof(payload_write));
1403 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1404 	poll_threads();
1405 	CU_ASSERT(g_bserrno == 0);
1406 
1407 	memset(payload_read, 0x00, sizeof(payload_read));
1408 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1409 	poll_threads();
1410 	CU_ASSERT(g_bserrno == 0);
1411 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * 4096) == 0);
1412 
1413 	spdk_bs_free_io_channel(channel);
1414 	poll_threads();
1415 }
1416 
1417 static void
1418 blob_rw_verify_iov(void)
1419 {
1420 	struct spdk_blob_store *bs = g_bs;
1421 	struct spdk_blob *blob;
1422 	struct spdk_io_channel *channel;
1423 	uint8_t payload_read[10 * 4096];
1424 	uint8_t payload_write[10 * 4096];
1425 	struct iovec iov_read[3];
1426 	struct iovec iov_write[3];
1427 	void *buf;
1428 
1429 	channel = spdk_bs_alloc_io_channel(bs);
1430 	CU_ASSERT(channel != NULL);
1431 
1432 	blob = ut_blob_create_and_open(bs, NULL);
1433 
1434 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1435 	poll_threads();
1436 	CU_ASSERT(g_bserrno == 0);
1437 
1438 	/*
1439 	 * Manually adjust the offset of the blob's second cluster.  This allows
1440 	 *  us to make sure that the readv/write code correctly accounts for I/O
1441 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1442 	 *  clusters are where we expect before modifying the second cluster.
1443 	 */
1444 	CU_ASSERT(blob->active.clusters[0] == 1 * 256);
1445 	CU_ASSERT(blob->active.clusters[1] == 2 * 256);
1446 	blob->active.clusters[1] = 3 * 256;
1447 
1448 	memset(payload_write, 0xE5, sizeof(payload_write));
1449 	iov_write[0].iov_base = payload_write;
1450 	iov_write[0].iov_len = 1 * 4096;
1451 	iov_write[1].iov_base = payload_write + 1 * 4096;
1452 	iov_write[1].iov_len = 5 * 4096;
1453 	iov_write[2].iov_base = payload_write + 6 * 4096;
1454 	iov_write[2].iov_len = 4 * 4096;
1455 	/*
1456 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1457 	 *  will get written to the first cluster, the last 4 to the second cluster.
1458 	 */
1459 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1460 	poll_threads();
1461 	CU_ASSERT(g_bserrno == 0);
1462 
1463 	memset(payload_read, 0xAA, sizeof(payload_read));
1464 	iov_read[0].iov_base = payload_read;
1465 	iov_read[0].iov_len = 3 * 4096;
1466 	iov_read[1].iov_base = payload_read + 3 * 4096;
1467 	iov_read[1].iov_len = 4 * 4096;
1468 	iov_read[2].iov_base = payload_read + 7 * 4096;
1469 	iov_read[2].iov_len = 3 * 4096;
1470 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1471 	poll_threads();
1472 	CU_ASSERT(g_bserrno == 0);
1473 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
1474 
1475 	buf = calloc(1, 256 * 4096);
1476 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1477 	/* Check that cluster 2 on "disk" was not modified. */
1478 	CU_ASSERT(memcmp(buf, &g_dev_buffer[512 * 4096], 256 * 4096) == 0);
1479 	free(buf);
1480 
1481 	spdk_blob_close(blob, blob_op_complete, NULL);
1482 	poll_threads();
1483 	CU_ASSERT(g_bserrno == 0);
1484 
1485 	spdk_bs_free_io_channel(channel);
1486 	poll_threads();
1487 }
1488 
1489 static uint32_t
1490 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1491 {
1492 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1493 	struct spdk_bs_request_set *set;
1494 	uint32_t count = 0;
1495 
1496 	TAILQ_FOREACH(set, &channel->reqs, link) {
1497 		count++;
1498 	}
1499 
1500 	return count;
1501 }
1502 
1503 static void
1504 blob_rw_verify_iov_nomem(void)
1505 {
1506 	struct spdk_blob_store *bs = g_bs;
1507 	struct spdk_blob *blob = g_blob;
1508 	struct spdk_io_channel *channel;
1509 	uint8_t payload_write[10 * 4096];
1510 	struct iovec iov_write[3];
1511 	uint32_t req_count;
1512 
1513 	channel = spdk_bs_alloc_io_channel(bs);
1514 	CU_ASSERT(channel != NULL);
1515 
1516 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1517 	poll_threads();
1518 	CU_ASSERT(g_bserrno == 0);
1519 
1520 	/*
1521 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1522 	 *  will get written to the first cluster, the last 4 to the second cluster.
1523 	 */
1524 	iov_write[0].iov_base = payload_write;
1525 	iov_write[0].iov_len = 1 * 4096;
1526 	iov_write[1].iov_base = payload_write + 1 * 4096;
1527 	iov_write[1].iov_len = 5 * 4096;
1528 	iov_write[2].iov_base = payload_write + 6 * 4096;
1529 	iov_write[2].iov_len = 4 * 4096;
1530 	MOCK_SET(calloc, NULL);
1531 	req_count = bs_channel_get_req_count(channel);
1532 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1533 	poll_threads();
1534 	CU_ASSERT(g_bserrno = -ENOMEM);
1535 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1536 	MOCK_CLEAR(calloc);
1537 
1538 	spdk_bs_free_io_channel(channel);
1539 	poll_threads();
1540 }
1541 
1542 static void
1543 blob_rw_iov_read_only(void)
1544 {
1545 	struct spdk_blob_store *bs = g_bs;
1546 	struct spdk_blob *blob = g_blob;
1547 	struct spdk_io_channel *channel;
1548 	uint8_t payload_read[4096];
1549 	uint8_t payload_write[4096];
1550 	struct iovec iov_read;
1551 	struct iovec iov_write;
1552 
1553 	channel = spdk_bs_alloc_io_channel(bs);
1554 	CU_ASSERT(channel != NULL);
1555 
1556 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1557 	poll_threads();
1558 	CU_ASSERT(g_bserrno == 0);
1559 
1560 	/* Verify that writev failed if read_only flag is set. */
1561 	blob->data_ro = true;
1562 	iov_write.iov_base = payload_write;
1563 	iov_write.iov_len = sizeof(payload_write);
1564 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1565 	poll_threads();
1566 	CU_ASSERT(g_bserrno == -EPERM);
1567 
1568 	/* Verify that reads pass if data_ro flag is set. */
1569 	iov_read.iov_base = payload_read;
1570 	iov_read.iov_len = sizeof(payload_read);
1571 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1572 	poll_threads();
1573 	CU_ASSERT(g_bserrno == 0);
1574 
1575 	spdk_bs_free_io_channel(channel);
1576 	poll_threads();
1577 }
1578 
1579 static void
1580 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1581 		       uint8_t *payload, uint64_t offset, uint64_t length,
1582 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1583 {
1584 	uint64_t i;
1585 	uint8_t *buf;
1586 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1587 
1588 	/* To be sure that operation is NOT splitted, read one page at the time */
1589 	buf = payload;
1590 	for (i = 0; i < length; i++) {
1591 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1592 		poll_threads();
1593 		if (g_bserrno != 0) {
1594 			/* Pass the error code up */
1595 			break;
1596 		}
1597 		buf += page_size;
1598 	}
1599 
1600 	cb_fn(cb_arg, g_bserrno);
1601 }
1602 
1603 static void
1604 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1605 			uint8_t *payload, uint64_t offset, uint64_t length,
1606 			spdk_blob_op_complete cb_fn, void *cb_arg)
1607 {
1608 	uint64_t i;
1609 	uint8_t *buf;
1610 	uint64_t page_size = spdk_bs_get_page_size(blob->bs);
1611 
1612 	/* To be sure that operation is NOT splitted, write one page at the time */
1613 	buf = payload;
1614 	for (i = 0; i < length; i++) {
1615 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1616 		poll_threads();
1617 		if (g_bserrno != 0) {
1618 			/* Pass the error code up */
1619 			break;
1620 		}
1621 		buf += page_size;
1622 	}
1623 
1624 	cb_fn(cb_arg, g_bserrno);
1625 }
1626 
1627 static void
1628 blob_operation_split_rw(void)
1629 {
1630 	struct spdk_blob_store *bs = g_bs;
1631 	struct spdk_blob *blob;
1632 	struct spdk_io_channel *channel;
1633 	struct spdk_blob_opts opts;
1634 	uint64_t cluster_size;
1635 
1636 	uint64_t payload_size;
1637 	uint8_t *payload_read;
1638 	uint8_t *payload_write;
1639 	uint8_t *payload_pattern;
1640 
1641 	uint64_t page_size;
1642 	uint64_t pages_per_cluster;
1643 	uint64_t pages_per_payload;
1644 
1645 	uint64_t i;
1646 
1647 	cluster_size = spdk_bs_get_cluster_size(bs);
1648 	page_size = spdk_bs_get_page_size(bs);
1649 	pages_per_cluster = cluster_size / page_size;
1650 	pages_per_payload = pages_per_cluster * 5;
1651 	payload_size = cluster_size * 5;
1652 
1653 	payload_read = malloc(payload_size);
1654 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1655 
1656 	payload_write = malloc(payload_size);
1657 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1658 
1659 	payload_pattern = malloc(payload_size);
1660 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1661 
1662 	/* Prepare random pattern to write */
1663 	memset(payload_pattern, 0xFF, payload_size);
1664 	for (i = 0; i < pages_per_payload; i++) {
1665 		*((uint64_t *)(payload_pattern + page_size * i)) = (i + 1);
1666 	}
1667 
1668 	channel = spdk_bs_alloc_io_channel(bs);
1669 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1670 
1671 	/* Create blob */
1672 	ut_spdk_blob_opts_init(&opts);
1673 	opts.thin_provision = false;
1674 	opts.num_clusters = 5;
1675 
1676 	blob = ut_blob_create_and_open(bs, &opts);
1677 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1678 
1679 	/* Initial read should return zeroed payload */
1680 	memset(payload_read, 0xFF, payload_size);
1681 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1682 	poll_threads();
1683 	CU_ASSERT(g_bserrno == 0);
1684 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1685 
1686 	/* Fill whole blob except last page */
1687 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload - 1,
1688 			   blob_op_complete, NULL);
1689 	poll_threads();
1690 	CU_ASSERT(g_bserrno == 0);
1691 
1692 	/* Write last page with a pattern */
1693 	spdk_blob_io_write(blob, channel, payload_pattern, pages_per_payload - 1, 1,
1694 			   blob_op_complete, NULL);
1695 	poll_threads();
1696 	CU_ASSERT(g_bserrno == 0);
1697 
1698 	/* Read whole blob and check consistency */
1699 	memset(payload_read, 0xFF, payload_size);
1700 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1701 	poll_threads();
1702 	CU_ASSERT(g_bserrno == 0);
1703 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1704 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1705 
1706 	/* Fill whole blob except first page */
1707 	spdk_blob_io_write(blob, channel, payload_pattern, 1, pages_per_payload - 1,
1708 			   blob_op_complete, NULL);
1709 	poll_threads();
1710 	CU_ASSERT(g_bserrno == 0);
1711 
1712 	/* Write first page with a pattern */
1713 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1714 			   blob_op_complete, NULL);
1715 	poll_threads();
1716 	CU_ASSERT(g_bserrno == 0);
1717 
1718 	/* Read whole blob and check consistency */
1719 	memset(payload_read, 0xFF, payload_size);
1720 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1721 	poll_threads();
1722 	CU_ASSERT(g_bserrno == 0);
1723 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1724 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1725 
1726 
1727 	/* Fill whole blob with a pattern (5 clusters) */
1728 
1729 	/* 1. Read test. */
1730 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1731 				blob_op_complete, NULL);
1732 	poll_threads();
1733 	CU_ASSERT(g_bserrno == 0);
1734 
1735 	memset(payload_read, 0xFF, payload_size);
1736 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1737 	poll_threads();
1738 	poll_threads();
1739 	CU_ASSERT(g_bserrno == 0);
1740 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1741 
1742 	/* 2. Write test. */
1743 	spdk_blob_io_write(blob, channel, payload_pattern, 0, pages_per_payload,
1744 			   blob_op_complete, NULL);
1745 	poll_threads();
1746 	CU_ASSERT(g_bserrno == 0);
1747 
1748 	memset(payload_read, 0xFF, payload_size);
1749 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1750 	poll_threads();
1751 	CU_ASSERT(g_bserrno == 0);
1752 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1753 
1754 	spdk_bs_free_io_channel(channel);
1755 	poll_threads();
1756 
1757 	g_blob = NULL;
1758 	g_blobid = 0;
1759 
1760 	free(payload_read);
1761 	free(payload_write);
1762 	free(payload_pattern);
1763 
1764 	ut_blob_close_and_delete(bs, blob);
1765 }
1766 
1767 static void
1768 blob_operation_split_rw_iov(void)
1769 {
1770 	struct spdk_blob_store *bs = g_bs;
1771 	struct spdk_blob *blob;
1772 	struct spdk_io_channel *channel;
1773 	struct spdk_blob_opts opts;
1774 	uint64_t cluster_size;
1775 
1776 	uint64_t payload_size;
1777 	uint8_t *payload_read;
1778 	uint8_t *payload_write;
1779 	uint8_t *payload_pattern;
1780 
1781 	uint64_t page_size;
1782 	uint64_t pages_per_cluster;
1783 	uint64_t pages_per_payload;
1784 
1785 	struct iovec iov_read[2];
1786 	struct iovec iov_write[2];
1787 
1788 	uint64_t i, j;
1789 
1790 	cluster_size = spdk_bs_get_cluster_size(bs);
1791 	page_size = spdk_bs_get_page_size(bs);
1792 	pages_per_cluster = cluster_size / page_size;
1793 	pages_per_payload = pages_per_cluster * 5;
1794 	payload_size = cluster_size * 5;
1795 
1796 	payload_read = malloc(payload_size);
1797 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1798 
1799 	payload_write = malloc(payload_size);
1800 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1801 
1802 	payload_pattern = malloc(payload_size);
1803 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1804 
1805 	/* Prepare random pattern to write */
1806 	for (i = 0; i < pages_per_payload; i++) {
1807 		for (j = 0; j < page_size / sizeof(uint64_t); j++) {
1808 			uint64_t *tmp;
1809 
1810 			tmp = (uint64_t *)payload_pattern;
1811 			tmp += ((page_size * i) / sizeof(uint64_t)) + j;
1812 			*tmp = i + 1;
1813 		}
1814 	}
1815 
1816 	channel = spdk_bs_alloc_io_channel(bs);
1817 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1818 
1819 	/* Create blob */
1820 	ut_spdk_blob_opts_init(&opts);
1821 	opts.thin_provision = false;
1822 	opts.num_clusters = 5;
1823 
1824 	blob = ut_blob_create_and_open(bs, &opts);
1825 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1826 
1827 	/* Initial read should return zeroes payload */
1828 	memset(payload_read, 0xFF, payload_size);
1829 	iov_read[0].iov_base = payload_read;
1830 	iov_read[0].iov_len = cluster_size * 3;
1831 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1832 	iov_read[1].iov_len = cluster_size * 2;
1833 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1834 	poll_threads();
1835 	CU_ASSERT(g_bserrno == 0);
1836 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1837 
1838 	/* First of iovs fills whole blob except last page and second of iovs writes last page
1839 	 *  with a pattern. */
1840 	iov_write[0].iov_base = payload_pattern;
1841 	iov_write[0].iov_len = payload_size - page_size;
1842 	iov_write[1].iov_base = payload_pattern;
1843 	iov_write[1].iov_len = page_size;
1844 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1845 	poll_threads();
1846 	CU_ASSERT(g_bserrno == 0);
1847 
1848 	/* Read whole blob and check consistency */
1849 	memset(payload_read, 0xFF, payload_size);
1850 	iov_read[0].iov_base = payload_read;
1851 	iov_read[0].iov_len = cluster_size * 2;
1852 	iov_read[1].iov_base = payload_read + cluster_size * 2;
1853 	iov_read[1].iov_len = cluster_size * 3;
1854 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1855 	poll_threads();
1856 	CU_ASSERT(g_bserrno == 0);
1857 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - page_size) == 0);
1858 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - page_size, page_size) == 0);
1859 
1860 	/* First of iovs fills only first page and second of iovs writes whole blob except
1861 	 *  first page with a pattern. */
1862 	iov_write[0].iov_base = payload_pattern;
1863 	iov_write[0].iov_len = page_size;
1864 	iov_write[1].iov_base = payload_pattern;
1865 	iov_write[1].iov_len = payload_size - page_size;
1866 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1867 	poll_threads();
1868 	CU_ASSERT(g_bserrno == 0);
1869 
1870 	/* Read whole blob and check consistency */
1871 	memset(payload_read, 0xFF, payload_size);
1872 	iov_read[0].iov_base = payload_read;
1873 	iov_read[0].iov_len = cluster_size * 4;
1874 	iov_read[1].iov_base = payload_read + cluster_size * 4;
1875 	iov_read[1].iov_len = cluster_size;
1876 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1877 	poll_threads();
1878 	CU_ASSERT(g_bserrno == 0);
1879 	CU_ASSERT(memcmp(payload_pattern, payload_read + page_size, payload_size - page_size) == 0);
1880 	CU_ASSERT(memcmp(payload_pattern, payload_read, page_size) == 0);
1881 
1882 
1883 	/* Fill whole blob with a pattern (5 clusters) */
1884 
1885 	/* 1. Read test. */
1886 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, pages_per_payload,
1887 				blob_op_complete, NULL);
1888 	poll_threads();
1889 	CU_ASSERT(g_bserrno == 0);
1890 
1891 	memset(payload_read, 0xFF, payload_size);
1892 	iov_read[0].iov_base = payload_read;
1893 	iov_read[0].iov_len = cluster_size;
1894 	iov_read[1].iov_base = payload_read + cluster_size;
1895 	iov_read[1].iov_len = cluster_size * 4;
1896 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, pages_per_payload, blob_op_complete, NULL);
1897 	poll_threads();
1898 	CU_ASSERT(g_bserrno == 0);
1899 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1900 
1901 	/* 2. Write test. */
1902 	iov_write[0].iov_base = payload_read;
1903 	iov_write[0].iov_len = cluster_size * 2;
1904 	iov_write[1].iov_base = payload_read + cluster_size * 2;
1905 	iov_write[1].iov_len = cluster_size * 3;
1906 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, pages_per_payload, blob_op_complete, NULL);
1907 	poll_threads();
1908 	CU_ASSERT(g_bserrno == 0);
1909 
1910 	memset(payload_read, 0xFF, payload_size);
1911 	_blob_io_read_no_split(blob, channel, payload_read, 0, pages_per_payload, blob_op_complete, NULL);
1912 	poll_threads();
1913 	CU_ASSERT(g_bserrno == 0);
1914 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1915 
1916 	spdk_bs_free_io_channel(channel);
1917 	poll_threads();
1918 
1919 	g_blob = NULL;
1920 	g_blobid = 0;
1921 
1922 	free(payload_read);
1923 	free(payload_write);
1924 	free(payload_pattern);
1925 
1926 	ut_blob_close_and_delete(bs, blob);
1927 }
1928 
1929 static void
1930 blob_unmap(void)
1931 {
1932 	struct spdk_blob_store *bs = g_bs;
1933 	struct spdk_blob *blob;
1934 	struct spdk_io_channel *channel;
1935 	struct spdk_blob_opts opts;
1936 	uint8_t payload[4096];
1937 	int i;
1938 
1939 	channel = spdk_bs_alloc_io_channel(bs);
1940 	CU_ASSERT(channel != NULL);
1941 
1942 	ut_spdk_blob_opts_init(&opts);
1943 	opts.num_clusters = 10;
1944 
1945 	blob = ut_blob_create_and_open(bs, &opts);
1946 
1947 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1948 	poll_threads();
1949 	CU_ASSERT(g_bserrno == 0);
1950 
1951 	memset(payload, 0, sizeof(payload));
1952 	payload[0] = 0xFF;
1953 
1954 	/*
1955 	 * Set first byte of every cluster to 0xFF.
1956 	 * First cluster on device is reserved so let's start from cluster number 1
1957 	 */
1958 	for (i = 1; i < 11; i++) {
1959 		g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
1960 	}
1961 
1962 	/* Confirm writes */
1963 	for (i = 0; i < 10; i++) {
1964 		payload[0] = 0;
1965 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / 4096, 1,
1966 				  blob_op_complete, NULL);
1967 		poll_threads();
1968 		CU_ASSERT(g_bserrno == 0);
1969 		CU_ASSERT(payload[0] == 0xFF);
1970 	}
1971 
1972 	/* Mark some clusters as unallocated */
1973 	blob->active.clusters[1] = 0;
1974 	blob->active.clusters[2] = 0;
1975 	blob->active.clusters[3] = 0;
1976 	blob->active.clusters[6] = 0;
1977 	blob->active.clusters[8] = 0;
1978 
1979 	/* Unmap clusters by resizing to 0 */
1980 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
1981 	poll_threads();
1982 	CU_ASSERT(g_bserrno == 0);
1983 
1984 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1985 	poll_threads();
1986 	CU_ASSERT(g_bserrno == 0);
1987 
1988 	/* Confirm that only 'allocated' clusters were unmapped */
1989 	for (i = 1; i < 11; i++) {
1990 		switch (i) {
1991 		case 2:
1992 		case 3:
1993 		case 4:
1994 		case 7:
1995 		case 9:
1996 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
1997 			break;
1998 		default:
1999 			CU_ASSERT(g_dev_buffer[i * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2000 			break;
2001 		}
2002 	}
2003 
2004 	spdk_bs_free_io_channel(channel);
2005 	poll_threads();
2006 
2007 	ut_blob_close_and_delete(bs, blob);
2008 }
2009 
2010 static void
2011 blob_iter(void)
2012 {
2013 	struct spdk_blob_store *bs = g_bs;
2014 	struct spdk_blob *blob;
2015 	spdk_blob_id blobid;
2016 	struct spdk_blob_opts blob_opts;
2017 
2018 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2019 	poll_threads();
2020 	CU_ASSERT(g_blob == NULL);
2021 	CU_ASSERT(g_bserrno == -ENOENT);
2022 
2023 	ut_spdk_blob_opts_init(&blob_opts);
2024 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2025 	poll_threads();
2026 	CU_ASSERT(g_bserrno == 0);
2027 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2028 	blobid = g_blobid;
2029 
2030 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2031 	poll_threads();
2032 	CU_ASSERT(g_blob != NULL);
2033 	CU_ASSERT(g_bserrno == 0);
2034 	blob = g_blob;
2035 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2036 
2037 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2038 	poll_threads();
2039 	CU_ASSERT(g_blob == NULL);
2040 	CU_ASSERT(g_bserrno == -ENOENT);
2041 }
2042 
2043 static void
2044 blob_xattr(void)
2045 {
2046 	struct spdk_blob_store *bs = g_bs;
2047 	struct spdk_blob *blob = g_blob;
2048 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2049 	uint64_t length;
2050 	int rc;
2051 	const char *name1, *name2;
2052 	const void *value;
2053 	size_t value_len;
2054 	struct spdk_xattr_names *names;
2055 
2056 	/* Test that set_xattr fails if md_ro flag is set. */
2057 	blob->md_ro = true;
2058 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2059 	CU_ASSERT(rc == -EPERM);
2060 
2061 	blob->md_ro = false;
2062 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2063 	CU_ASSERT(rc == 0);
2064 
2065 	length = 2345;
2066 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2067 	CU_ASSERT(rc == 0);
2068 
2069 	/* Overwrite "length" xattr. */
2070 	length = 3456;
2071 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2072 	CU_ASSERT(rc == 0);
2073 
2074 	/* get_xattr should still work even if md_ro flag is set. */
2075 	value = NULL;
2076 	blob->md_ro = true;
2077 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2078 	CU_ASSERT(rc == 0);
2079 	SPDK_CU_ASSERT_FATAL(value != NULL);
2080 	CU_ASSERT(*(uint64_t *)value == length);
2081 	CU_ASSERT(value_len == 8);
2082 	blob->md_ro = false;
2083 
2084 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2085 	CU_ASSERT(rc == -ENOENT);
2086 
2087 	names = NULL;
2088 	rc = spdk_blob_get_xattr_names(blob, &names);
2089 	CU_ASSERT(rc == 0);
2090 	SPDK_CU_ASSERT_FATAL(names != NULL);
2091 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2092 	name1 = spdk_xattr_names_get_name(names, 0);
2093 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2094 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2095 	name2 = spdk_xattr_names_get_name(names, 1);
2096 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2097 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2098 	CU_ASSERT(strcmp(name1, name2));
2099 	spdk_xattr_names_free(names);
2100 
2101 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2102 	blob->md_ro = true;
2103 	rc = spdk_blob_remove_xattr(blob, "name");
2104 	CU_ASSERT(rc == -EPERM);
2105 
2106 	blob->md_ro = false;
2107 	rc = spdk_blob_remove_xattr(blob, "name");
2108 	CU_ASSERT(rc == 0);
2109 
2110 	rc = spdk_blob_remove_xattr(blob, "foobar");
2111 	CU_ASSERT(rc == -ENOENT);
2112 
2113 	/* Set internal xattr */
2114 	length = 7898;
2115 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2116 	CU_ASSERT(rc == 0);
2117 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2118 	CU_ASSERT(rc == 0);
2119 	CU_ASSERT(*(uint64_t *)value == length);
2120 	/* try to get public xattr with same name */
2121 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2122 	CU_ASSERT(rc != 0);
2123 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2124 	CU_ASSERT(rc != 0);
2125 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2126 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2127 		  SPDK_BLOB_INTERNAL_XATTR);
2128 
2129 	spdk_blob_close(blob, blob_op_complete, NULL);
2130 	poll_threads();
2131 
2132 	/* Check if xattrs are persisted */
2133 	ut_bs_reload(&bs, NULL);
2134 
2135 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2136 	poll_threads();
2137 	CU_ASSERT(g_bserrno == 0);
2138 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2139 	blob = g_blob;
2140 
2141 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2142 	CU_ASSERT(rc == 0);
2143 	CU_ASSERT(*(uint64_t *)value == length);
2144 
2145 	/* try to get internal xattr trough public call */
2146 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2147 	CU_ASSERT(rc != 0);
2148 
2149 	rc = blob_remove_xattr(blob, "internal", true);
2150 	CU_ASSERT(rc == 0);
2151 
2152 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2153 }
2154 
2155 static void
2156 blob_parse_md(void)
2157 {
2158 	struct spdk_blob_store *bs = g_bs;
2159 	struct spdk_blob *blob;
2160 	int rc;
2161 	uint32_t used_pages;
2162 	size_t xattr_length;
2163 	char *xattr;
2164 
2165 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2166 	blob = ut_blob_create_and_open(bs, NULL);
2167 
2168 	/* Create large extent to force more than 1 page of metadata. */
2169 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2170 		       strlen("large_xattr");
2171 	xattr = calloc(xattr_length, sizeof(char));
2172 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2173 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2174 	free(xattr);
2175 	SPDK_CU_ASSERT_FATAL(rc == 0);
2176 
2177 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2178 	poll_threads();
2179 
2180 	/* Delete the blob and verify that number of pages returned to before its creation. */
2181 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2182 	ut_blob_close_and_delete(bs, blob);
2183 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2184 }
2185 
2186 static void
2187 bs_load(void)
2188 {
2189 	struct spdk_blob_store *bs;
2190 	struct spdk_bs_dev *dev;
2191 	spdk_blob_id blobid;
2192 	struct spdk_blob *blob;
2193 	struct spdk_bs_super_block *super_block;
2194 	uint64_t length;
2195 	int rc;
2196 	const void *value;
2197 	size_t value_len;
2198 	struct spdk_bs_opts opts;
2199 	struct spdk_blob_opts blob_opts;
2200 
2201 	dev = init_dev();
2202 	spdk_bs_opts_init(&opts, sizeof(opts));
2203 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2204 
2205 	/* Initialize a new blob store */
2206 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2207 	poll_threads();
2208 	CU_ASSERT(g_bserrno == 0);
2209 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2210 	bs = g_bs;
2211 
2212 	/* Try to open a blobid that does not exist */
2213 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2214 	poll_threads();
2215 	CU_ASSERT(g_bserrno == -ENOENT);
2216 	CU_ASSERT(g_blob == NULL);
2217 
2218 	/* Create a blob */
2219 	blob = ut_blob_create_and_open(bs, NULL);
2220 	blobid = spdk_blob_get_id(blob);
2221 
2222 	/* Try again to open valid blob but without the upper bit set */
2223 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2224 	poll_threads();
2225 	CU_ASSERT(g_bserrno == -ENOENT);
2226 	CU_ASSERT(g_blob == NULL);
2227 
2228 	/* Set some xattrs */
2229 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2230 	CU_ASSERT(rc == 0);
2231 
2232 	length = 2345;
2233 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2234 	CU_ASSERT(rc == 0);
2235 
2236 	/* Resize the blob */
2237 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2238 	poll_threads();
2239 	CU_ASSERT(g_bserrno == 0);
2240 
2241 	spdk_blob_close(blob, blob_op_complete, NULL);
2242 	poll_threads();
2243 	CU_ASSERT(g_bserrno == 0);
2244 	blob = NULL;
2245 	g_blob = NULL;
2246 	g_blobid = SPDK_BLOBID_INVALID;
2247 
2248 	/* Unload the blob store */
2249 	spdk_bs_unload(bs, bs_op_complete, NULL);
2250 	poll_threads();
2251 	CU_ASSERT(g_bserrno == 0);
2252 	g_bs = NULL;
2253 	g_blob = NULL;
2254 	g_blobid = 0;
2255 
2256 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2257 	CU_ASSERT(super_block->clean == 1);
2258 
2259 	/* Load should fail for device with an unsupported blocklen */
2260 	dev = init_dev();
2261 	dev->blocklen = SPDK_BS_PAGE_SIZE * 2;
2262 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2263 	poll_threads();
2264 	CU_ASSERT(g_bserrno == -EINVAL);
2265 
2266 	/* Load should when max_md_ops is set to zero */
2267 	dev = init_dev();
2268 	spdk_bs_opts_init(&opts, sizeof(opts));
2269 	opts.max_md_ops = 0;
2270 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2271 	poll_threads();
2272 	CU_ASSERT(g_bserrno == -EINVAL);
2273 
2274 	/* Load should when max_channel_ops is set to zero */
2275 	dev = init_dev();
2276 	spdk_bs_opts_init(&opts, sizeof(opts));
2277 	opts.max_channel_ops = 0;
2278 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2279 	poll_threads();
2280 	CU_ASSERT(g_bserrno == -EINVAL);
2281 
2282 	/* Load an existing blob store */
2283 	dev = init_dev();
2284 	spdk_bs_opts_init(&opts, sizeof(opts));
2285 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2286 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2287 	poll_threads();
2288 	CU_ASSERT(g_bserrno == 0);
2289 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2290 	bs = g_bs;
2291 
2292 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2293 	CU_ASSERT(super_block->clean == 1);
2294 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2295 
2296 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2297 	poll_threads();
2298 	CU_ASSERT(g_bserrno == 0);
2299 	CU_ASSERT(g_blob != NULL);
2300 	blob = g_blob;
2301 
2302 	/* Verify that blobstore is marked dirty after first metadata sync */
2303 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2304 	CU_ASSERT(super_block->clean == 1);
2305 
2306 	/* Get the xattrs */
2307 	value = NULL;
2308 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2309 	CU_ASSERT(rc == 0);
2310 	SPDK_CU_ASSERT_FATAL(value != NULL);
2311 	CU_ASSERT(*(uint64_t *)value == length);
2312 	CU_ASSERT(value_len == 8);
2313 
2314 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2315 	CU_ASSERT(rc == -ENOENT);
2316 
2317 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2318 
2319 	spdk_blob_close(blob, blob_op_complete, NULL);
2320 	poll_threads();
2321 	CU_ASSERT(g_bserrno == 0);
2322 	blob = NULL;
2323 	g_blob = NULL;
2324 
2325 	spdk_bs_unload(bs, bs_op_complete, NULL);
2326 	poll_threads();
2327 	CU_ASSERT(g_bserrno == 0);
2328 	g_bs = NULL;
2329 
2330 	/* Load should fail: bdev size < saved size */
2331 	dev = init_dev();
2332 	dev->blockcnt /= 2;
2333 
2334 	spdk_bs_opts_init(&opts, sizeof(opts));
2335 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2336 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2337 	poll_threads();
2338 
2339 	CU_ASSERT(g_bserrno == -EILSEQ);
2340 
2341 	/* Load should succeed: bdev size > saved size */
2342 	dev = init_dev();
2343 	dev->blockcnt *= 4;
2344 
2345 	spdk_bs_opts_init(&opts, sizeof(opts));
2346 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2347 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2348 	poll_threads();
2349 	CU_ASSERT(g_bserrno == 0);
2350 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2351 	bs = g_bs;
2352 
2353 	CU_ASSERT(g_bserrno == 0);
2354 	spdk_bs_unload(bs, bs_op_complete, NULL);
2355 	poll_threads();
2356 
2357 
2358 	/* Test compatibility mode */
2359 
2360 	dev = init_dev();
2361 	super_block->size = 0;
2362 	super_block->crc = blob_md_page_calc_crc(super_block);
2363 
2364 	spdk_bs_opts_init(&opts, sizeof(opts));
2365 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2366 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2367 	poll_threads();
2368 	CU_ASSERT(g_bserrno == 0);
2369 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2370 	bs = g_bs;
2371 
2372 	/* Create a blob */
2373 	ut_spdk_blob_opts_init(&blob_opts);
2374 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2375 	poll_threads();
2376 	CU_ASSERT(g_bserrno == 0);
2377 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2378 
2379 	/* Blobstore should update number of blocks in super_block */
2380 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2381 	CU_ASSERT(super_block->clean == 0);
2382 
2383 	spdk_bs_unload(bs, bs_op_complete, NULL);
2384 	poll_threads();
2385 	CU_ASSERT(g_bserrno == 0);
2386 	CU_ASSERT(super_block->clean == 1);
2387 	g_bs = NULL;
2388 
2389 }
2390 
2391 static void
2392 bs_load_pending_removal(void)
2393 {
2394 	struct spdk_blob_store *bs = g_bs;
2395 	struct spdk_blob_opts opts;
2396 	struct spdk_blob *blob, *snapshot;
2397 	spdk_blob_id blobid, snapshotid;
2398 	const void *value;
2399 	size_t value_len;
2400 	int rc;
2401 
2402 	/* Create blob */
2403 	ut_spdk_blob_opts_init(&opts);
2404 	opts.num_clusters = 10;
2405 
2406 	blob = ut_blob_create_and_open(bs, &opts);
2407 	blobid = spdk_blob_get_id(blob);
2408 
2409 	/* Create snapshot */
2410 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2411 	poll_threads();
2412 	CU_ASSERT(g_bserrno == 0);
2413 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2414 	snapshotid = g_blobid;
2415 
2416 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2417 	poll_threads();
2418 	CU_ASSERT(g_bserrno == 0);
2419 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2420 	snapshot = g_blob;
2421 
2422 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2423 	snapshot->md_ro = false;
2424 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2425 	CU_ASSERT(rc == 0);
2426 	snapshot->md_ro = true;
2427 
2428 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2429 	poll_threads();
2430 	CU_ASSERT(g_bserrno == 0);
2431 
2432 	spdk_blob_close(blob, blob_op_complete, NULL);
2433 	poll_threads();
2434 	CU_ASSERT(g_bserrno == 0);
2435 
2436 	/* Reload blobstore */
2437 	ut_bs_reload(&bs, NULL);
2438 
2439 	/* Snapshot should not be removed as blob is still pointing to it */
2440 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2441 	poll_threads();
2442 	CU_ASSERT(g_bserrno == 0);
2443 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2444 	snapshot = g_blob;
2445 
2446 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2447 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2448 	CU_ASSERT(rc != 0);
2449 
2450 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2451 	snapshot->md_ro = false;
2452 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2453 	CU_ASSERT(rc == 0);
2454 	snapshot->md_ro = true;
2455 
2456 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2457 	poll_threads();
2458 	CU_ASSERT(g_bserrno == 0);
2459 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2460 	blob = g_blob;
2461 
2462 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2463 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2464 
2465 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2466 	poll_threads();
2467 	CU_ASSERT(g_bserrno == 0);
2468 
2469 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2470 	poll_threads();
2471 	CU_ASSERT(g_bserrno == 0);
2472 
2473 	spdk_blob_close(blob, blob_op_complete, NULL);
2474 	poll_threads();
2475 	CU_ASSERT(g_bserrno == 0);
2476 
2477 	/* Reload blobstore */
2478 	ut_bs_reload(&bs, NULL);
2479 
2480 	/* Snapshot should be removed as blob is not pointing to it anymore */
2481 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2482 	poll_threads();
2483 	CU_ASSERT(g_bserrno != 0);
2484 }
2485 
2486 static void
2487 bs_load_custom_cluster_size(void)
2488 {
2489 	struct spdk_blob_store *bs;
2490 	struct spdk_bs_dev *dev;
2491 	struct spdk_bs_super_block *super_block;
2492 	struct spdk_bs_opts opts;
2493 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2494 	uint32_t cluster_sz;
2495 	uint64_t total_clusters;
2496 
2497 	dev = init_dev();
2498 	spdk_bs_opts_init(&opts, sizeof(opts));
2499 	opts.cluster_sz = custom_cluster_size;
2500 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2501 
2502 	/* Initialize a new blob store */
2503 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2504 	poll_threads();
2505 	CU_ASSERT(g_bserrno == 0);
2506 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2507 	bs = g_bs;
2508 	cluster_sz = bs->cluster_sz;
2509 	total_clusters = bs->total_clusters;
2510 
2511 	/* Unload the blob store */
2512 	spdk_bs_unload(bs, bs_op_complete, NULL);
2513 	poll_threads();
2514 	CU_ASSERT(g_bserrno == 0);
2515 	g_bs = NULL;
2516 	g_blob = NULL;
2517 	g_blobid = 0;
2518 
2519 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2520 	CU_ASSERT(super_block->clean == 1);
2521 
2522 	/* Load an existing blob store */
2523 	dev = init_dev();
2524 	spdk_bs_opts_init(&opts, sizeof(opts));
2525 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2526 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2527 	poll_threads();
2528 	CU_ASSERT(g_bserrno == 0);
2529 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2530 	bs = g_bs;
2531 	/* Compare cluster size and number to one after initialization */
2532 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2533 	CU_ASSERT(total_clusters == bs->total_clusters);
2534 
2535 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2536 	CU_ASSERT(super_block->clean == 1);
2537 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2538 
2539 	spdk_bs_unload(bs, bs_op_complete, NULL);
2540 	poll_threads();
2541 	CU_ASSERT(g_bserrno == 0);
2542 	CU_ASSERT(super_block->clean == 1);
2543 	g_bs = NULL;
2544 }
2545 
2546 static void
2547 bs_load_after_failed_grow(void)
2548 {
2549 	struct spdk_blob_store *bs;
2550 	struct spdk_bs_dev *dev;
2551 	struct spdk_bs_super_block *super_block;
2552 	struct spdk_bs_opts opts;
2553 	struct spdk_bs_md_mask *mask;
2554 	struct spdk_blob_opts blob_opts;
2555 	struct spdk_blob *blob, *snapshot;
2556 	spdk_blob_id blobid, snapshotid;
2557 	uint64_t total_data_clusters;
2558 
2559 	dev = init_dev();
2560 	spdk_bs_opts_init(&opts, sizeof(opts));
2561 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2562 	/*
2563 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2564 	 * blobstore will create 64 md pages by defualt. We set num_md_pages to 128,
2565 	 * thus the blobstore could grow to the double size.
2566 	 */
2567 	opts.num_md_pages = 128;
2568 
2569 	/* Initialize a new blob store */
2570 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2571 	poll_threads();
2572 	CU_ASSERT(g_bserrno == 0);
2573 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2574 	bs = g_bs;
2575 
2576 	/* Create blob */
2577 	ut_spdk_blob_opts_init(&blob_opts);
2578 	blob_opts.num_clusters = 10;
2579 
2580 	blob = ut_blob_create_and_open(bs, &blob_opts);
2581 	blobid = spdk_blob_get_id(blob);
2582 
2583 	/* Create snapshot */
2584 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2585 	poll_threads();
2586 	CU_ASSERT(g_bserrno == 0);
2587 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2588 	snapshotid = g_blobid;
2589 
2590 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2591 	poll_threads();
2592 	CU_ASSERT(g_bserrno == 0);
2593 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2594 	snapshot = g_blob;
2595 
2596 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2597 	poll_threads();
2598 	CU_ASSERT(g_bserrno == 0);
2599 
2600 	spdk_blob_close(blob, blob_op_complete, NULL);
2601 	poll_threads();
2602 	CU_ASSERT(g_bserrno == 0);
2603 
2604 	total_data_clusters = bs->total_data_clusters;
2605 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2606 
2607 	/* Unload the blob store */
2608 	spdk_bs_unload(bs, bs_op_complete, NULL);
2609 	poll_threads();
2610 	CU_ASSERT(g_bserrno == 0);
2611 	g_bs = NULL;
2612 	g_blob = NULL;
2613 	g_blobid = 0;
2614 
2615 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2616 	CU_ASSERT(super_block->clean == 1);
2617 
2618 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start * 4096);
2619 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2620 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2621 
2622 	/*
2623 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2624 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2625 	 */
2626 	mask->length *= 2;
2627 
2628 	/* Load an existing blob store */
2629 	dev = init_dev();
2630 	dev->blockcnt *= 2;
2631 	spdk_bs_opts_init(&opts, sizeof(opts));
2632 	opts.clear_method = BS_CLEAR_WITH_NONE;
2633 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2634 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2635 	poll_threads();
2636 	CU_ASSERT(g_bserrno == 0);
2637 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2638 	bs = g_bs;
2639 
2640 	/* Check the capacity is the same as before */
2641 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2642 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2643 
2644 	/* Check the blob and the snapshot are still available */
2645 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2646 	poll_threads();
2647 	CU_ASSERT(g_bserrno == 0);
2648 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2649 	blob = g_blob;
2650 
2651 	spdk_blob_close(blob, blob_op_complete, NULL);
2652 	poll_threads();
2653 	CU_ASSERT(g_bserrno == 0);
2654 
2655 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2656 	poll_threads();
2657 	CU_ASSERT(g_bserrno == 0);
2658 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2659 	snapshot = g_blob;
2660 
2661 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2662 	poll_threads();
2663 	CU_ASSERT(g_bserrno == 0);
2664 
2665 	spdk_bs_unload(bs, bs_op_complete, NULL);
2666 	poll_threads();
2667 	CU_ASSERT(g_bserrno == 0);
2668 	CU_ASSERT(super_block->clean == 1);
2669 	g_bs = NULL;
2670 }
2671 
2672 static void
2673 bs_type(void)
2674 {
2675 	struct spdk_blob_store *bs;
2676 	struct spdk_bs_dev *dev;
2677 	struct spdk_bs_opts opts;
2678 
2679 	dev = init_dev();
2680 	spdk_bs_opts_init(&opts, sizeof(opts));
2681 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2682 
2683 	/* Initialize a new blob store */
2684 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2685 	poll_threads();
2686 	CU_ASSERT(g_bserrno == 0);
2687 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2688 	bs = g_bs;
2689 
2690 	/* Unload the blob store */
2691 	spdk_bs_unload(bs, bs_op_complete, NULL);
2692 	poll_threads();
2693 	CU_ASSERT(g_bserrno == 0);
2694 	g_bs = NULL;
2695 	g_blob = NULL;
2696 	g_blobid = 0;
2697 
2698 	/* Load non existing blobstore type */
2699 	dev = init_dev();
2700 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2701 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2702 	poll_threads();
2703 	CU_ASSERT(g_bserrno != 0);
2704 
2705 	/* Load with empty blobstore type */
2706 	dev = init_dev();
2707 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2708 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2709 	poll_threads();
2710 	CU_ASSERT(g_bserrno == 0);
2711 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2712 	bs = g_bs;
2713 
2714 	spdk_bs_unload(bs, bs_op_complete, NULL);
2715 	poll_threads();
2716 	CU_ASSERT(g_bserrno == 0);
2717 	g_bs = NULL;
2718 
2719 	/* Initialize a new blob store with empty bstype */
2720 	dev = init_dev();
2721 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2722 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2723 	poll_threads();
2724 	CU_ASSERT(g_bserrno == 0);
2725 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2726 	bs = g_bs;
2727 
2728 	spdk_bs_unload(bs, bs_op_complete, NULL);
2729 	poll_threads();
2730 	CU_ASSERT(g_bserrno == 0);
2731 	g_bs = NULL;
2732 
2733 	/* Load non existing blobstore type */
2734 	dev = init_dev();
2735 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2736 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2737 	poll_threads();
2738 	CU_ASSERT(g_bserrno != 0);
2739 
2740 	/* Load with empty blobstore type */
2741 	dev = init_dev();
2742 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2743 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2744 	poll_threads();
2745 	CU_ASSERT(g_bserrno == 0);
2746 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2747 	bs = g_bs;
2748 
2749 	spdk_bs_unload(bs, bs_op_complete, NULL);
2750 	poll_threads();
2751 	CU_ASSERT(g_bserrno == 0);
2752 	g_bs = NULL;
2753 }
2754 
2755 static void
2756 bs_super_block(void)
2757 {
2758 	struct spdk_blob_store *bs;
2759 	struct spdk_bs_dev *dev;
2760 	struct spdk_bs_super_block *super_block;
2761 	struct spdk_bs_opts opts;
2762 	struct spdk_bs_super_block_ver1 super_block_v1;
2763 
2764 	dev = init_dev();
2765 	spdk_bs_opts_init(&opts, sizeof(opts));
2766 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2767 
2768 	/* Initialize a new blob store */
2769 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2770 	poll_threads();
2771 	CU_ASSERT(g_bserrno == 0);
2772 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2773 	bs = g_bs;
2774 
2775 	/* Unload the blob store */
2776 	spdk_bs_unload(bs, bs_op_complete, NULL);
2777 	poll_threads();
2778 	CU_ASSERT(g_bserrno == 0);
2779 	g_bs = NULL;
2780 	g_blob = NULL;
2781 	g_blobid = 0;
2782 
2783 	/* Load an existing blob store with version newer than supported */
2784 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2785 	super_block->version++;
2786 
2787 	dev = init_dev();
2788 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2789 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2790 	poll_threads();
2791 	CU_ASSERT(g_bserrno != 0);
2792 
2793 	/* Create a new blob store with super block version 1 */
2794 	dev = init_dev();
2795 	super_block_v1.version = 1;
2796 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2797 	super_block_v1.length = 0x1000;
2798 	super_block_v1.clean = 1;
2799 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2800 	super_block_v1.cluster_size = 0x100000;
2801 	super_block_v1.used_page_mask_start = 0x01;
2802 	super_block_v1.used_page_mask_len = 0x01;
2803 	super_block_v1.used_cluster_mask_start = 0x02;
2804 	super_block_v1.used_cluster_mask_len = 0x01;
2805 	super_block_v1.md_start = 0x03;
2806 	super_block_v1.md_len = 0x40;
2807 	memset(super_block_v1.reserved, 0, 4036);
2808 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2809 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2810 
2811 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2812 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2813 	poll_threads();
2814 	CU_ASSERT(g_bserrno == 0);
2815 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2816 	bs = g_bs;
2817 
2818 	spdk_bs_unload(bs, bs_op_complete, NULL);
2819 	poll_threads();
2820 	CU_ASSERT(g_bserrno == 0);
2821 	g_bs = NULL;
2822 }
2823 
2824 static void
2825 bs_test_recover_cluster_count(void)
2826 {
2827 	struct spdk_blob_store *bs;
2828 	struct spdk_bs_dev *dev;
2829 	struct spdk_bs_super_block super_block;
2830 	struct spdk_bs_opts opts;
2831 
2832 	dev = init_dev();
2833 	spdk_bs_opts_init(&opts, sizeof(opts));
2834 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2835 
2836 	super_block.version = 3;
2837 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2838 	super_block.length = 0x1000;
2839 	super_block.clean = 0;
2840 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
2841 	super_block.cluster_size = 4096;
2842 	super_block.used_page_mask_start = 0x01;
2843 	super_block.used_page_mask_len = 0x01;
2844 	super_block.used_cluster_mask_start = 0x02;
2845 	super_block.used_cluster_mask_len = 0x01;
2846 	super_block.used_blobid_mask_start = 0x03;
2847 	super_block.used_blobid_mask_len = 0x01;
2848 	super_block.md_start = 0x04;
2849 	super_block.md_len = 0x40;
2850 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
2851 	super_block.size = dev->blockcnt * dev->blocklen;
2852 	super_block.io_unit_size = 0x1000;
2853 	memset(super_block.reserved, 0, 4000);
2854 	super_block.crc = blob_md_page_calc_crc(&super_block);
2855 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
2856 
2857 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2858 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2859 	poll_threads();
2860 	CU_ASSERT(g_bserrno == 0);
2861 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2862 	bs = g_bs;
2863 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
2864 			super_block.md_len));
2865 
2866 	spdk_bs_unload(bs, bs_op_complete, NULL);
2867 	poll_threads();
2868 	CU_ASSERT(g_bserrno == 0);
2869 	g_bs = NULL;
2870 }
2871 
2872 static void
2873 bs_test_grow(void)
2874 {
2875 	struct spdk_blob_store *bs;
2876 	struct spdk_bs_dev *dev;
2877 	struct spdk_bs_super_block super_block;
2878 	struct spdk_bs_opts opts;
2879 	struct spdk_bs_md_mask mask;
2880 	uint64_t bdev_size;
2881 
2882 	dev = init_dev();
2883 	bdev_size = dev->blockcnt * dev->blocklen;
2884 	spdk_bs_opts_init(&opts, sizeof(opts));
2885 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2886 	poll_threads();
2887 	CU_ASSERT(g_bserrno == 0);
2888 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2889 	bs = g_bs;
2890 
2891 	spdk_bs_unload(bs, bs_op_complete, NULL);
2892 	poll_threads();
2893 	CU_ASSERT(g_bserrno == 0);
2894 	g_bs = NULL;
2895 
2896 	/*
2897 	 * To make sure all the metadata are updated to the disk,
2898 	 * we check the g_dev_buffer after spdk_bs_unload.
2899 	 */
2900 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2901 	CU_ASSERT(super_block.size == bdev_size);
2902 
2903 	/*
2904 	 * Make sure the used_cluster mask is correct.
2905 	 */
2906 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2907 	       sizeof(struct spdk_bs_md_mask));
2908 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2909 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2910 
2911 	/*
2912 	 * The default dev size is 64M, here we set the dev size to 128M,
2913 	 * then the blobstore will adjust the metadata according to the new size.
2914 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
2915 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
2916 	 * the end of g_dev_buffer.
2917 	 */
2918 	dev = init_dev();
2919 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
2920 	bdev_size = dev->blockcnt * dev->blocklen;
2921 	spdk_bs_opts_init(&opts, sizeof(opts));
2922 	opts.clear_method = BS_CLEAR_WITH_NONE;
2923 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
2924 	poll_threads();
2925 	CU_ASSERT(g_bserrno == 0);
2926 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2927 	bs = g_bs;
2928 
2929 	/*
2930 	 * After spdk_bs_grow, all metadata are updated to the disk.
2931 	 * So we can check g_dev_buffer now.
2932 	 */
2933 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
2934 	CU_ASSERT(super_block.size == bdev_size);
2935 
2936 	/*
2937 	 * Make sure the used_cluster mask has been updated according to the bdev size
2938 	 */
2939 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * 4096,
2940 	       sizeof(struct spdk_bs_md_mask));
2941 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2942 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
2943 
2944 	spdk_bs_unload(bs, bs_op_complete, NULL);
2945 	poll_threads();
2946 	CU_ASSERT(g_bserrno == 0);
2947 	g_bs = NULL;
2948 }
2949 
2950 /*
2951  * Create a blobstore and then unload it.
2952  */
2953 static void
2954 bs_unload(void)
2955 {
2956 	struct spdk_blob_store *bs = g_bs;
2957 	struct spdk_blob *blob;
2958 
2959 	/* Create a blob and open it. */
2960 	blob = ut_blob_create_and_open(bs, NULL);
2961 
2962 	/* Try to unload blobstore, should fail with open blob */
2963 	g_bserrno = -1;
2964 	spdk_bs_unload(bs, bs_op_complete, NULL);
2965 	poll_threads();
2966 	CU_ASSERT(g_bserrno == -EBUSY);
2967 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2968 
2969 	/* Close the blob, then successfully unload blobstore */
2970 	g_bserrno = -1;
2971 	spdk_blob_close(blob, blob_op_complete, NULL);
2972 	poll_threads();
2973 	CU_ASSERT(g_bserrno == 0);
2974 }
2975 
2976 /*
2977  * Create a blobstore with a cluster size different than the default, and ensure it is
2978  *  persisted.
2979  */
2980 static void
2981 bs_cluster_sz(void)
2982 {
2983 	struct spdk_blob_store *bs;
2984 	struct spdk_bs_dev *dev;
2985 	struct spdk_bs_opts opts;
2986 	uint32_t cluster_sz;
2987 
2988 	/* Set cluster size to zero */
2989 	dev = init_dev();
2990 	spdk_bs_opts_init(&opts, sizeof(opts));
2991 	opts.cluster_sz = 0;
2992 
2993 	/* Initialize a new blob store */
2994 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2995 	poll_threads();
2996 	CU_ASSERT(g_bserrno == -EINVAL);
2997 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
2998 
2999 	/*
3000 	 * Set cluster size to blobstore page size,
3001 	 * to work it is required to be at least twice the blobstore page size.
3002 	 */
3003 	dev = init_dev();
3004 	spdk_bs_opts_init(&opts, sizeof(opts));
3005 	opts.cluster_sz = SPDK_BS_PAGE_SIZE;
3006 
3007 	/* Initialize a new blob store */
3008 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3009 	poll_threads();
3010 	CU_ASSERT(g_bserrno == -ENOMEM);
3011 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3012 
3013 	/*
3014 	 * Set cluster size to lower than page size,
3015 	 * to work it is required to be at least twice the blobstore page size.
3016 	 */
3017 	dev = init_dev();
3018 	spdk_bs_opts_init(&opts, sizeof(opts));
3019 	opts.cluster_sz = SPDK_BS_PAGE_SIZE - 1;
3020 
3021 	/* Initialize a new blob store */
3022 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3023 	poll_threads();
3024 	CU_ASSERT(g_bserrno == -EINVAL);
3025 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3026 
3027 	/* Set cluster size to twice the default */
3028 	dev = init_dev();
3029 	spdk_bs_opts_init(&opts, sizeof(opts));
3030 	opts.cluster_sz *= 2;
3031 	cluster_sz = opts.cluster_sz;
3032 
3033 	/* Initialize a new blob store */
3034 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3035 	poll_threads();
3036 	CU_ASSERT(g_bserrno == 0);
3037 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3038 	bs = g_bs;
3039 
3040 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3041 
3042 	ut_bs_reload(&bs, &opts);
3043 
3044 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3045 
3046 	spdk_bs_unload(bs, bs_op_complete, NULL);
3047 	poll_threads();
3048 	CU_ASSERT(g_bserrno == 0);
3049 	g_bs = NULL;
3050 }
3051 
3052 /*
3053  * Create a blobstore, reload it and ensure total usable cluster count
3054  *  stays the same.
3055  */
3056 static void
3057 bs_usable_clusters(void)
3058 {
3059 	struct spdk_blob_store *bs = g_bs;
3060 	struct spdk_blob *blob;
3061 	uint32_t clusters;
3062 	int i;
3063 
3064 
3065 	clusters = spdk_bs_total_data_cluster_count(bs);
3066 
3067 	ut_bs_reload(&bs, NULL);
3068 
3069 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3070 
3071 	/* Create and resize blobs to make sure that useable cluster count won't change */
3072 	for (i = 0; i < 4; i++) {
3073 		g_bserrno = -1;
3074 		g_blobid = SPDK_BLOBID_INVALID;
3075 		blob = ut_blob_create_and_open(bs, NULL);
3076 
3077 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3078 		poll_threads();
3079 		CU_ASSERT(g_bserrno == 0);
3080 
3081 		g_bserrno = -1;
3082 		spdk_blob_close(blob, blob_op_complete, NULL);
3083 		poll_threads();
3084 		CU_ASSERT(g_bserrno == 0);
3085 
3086 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3087 	}
3088 
3089 	/* Reload the blob store to make sure that nothing changed */
3090 	ut_bs_reload(&bs, NULL);
3091 
3092 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3093 }
3094 
3095 /*
3096  * Test resizing of the metadata blob.  This requires creating enough blobs
3097  *  so that one cluster is not enough to fit the metadata for those blobs.
3098  *  To induce this condition to happen more quickly, we reduce the cluster
3099  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3100  */
3101 static void
3102 bs_resize_md(void)
3103 {
3104 	struct spdk_blob_store *bs;
3105 	const int CLUSTER_PAGE_COUNT = 4;
3106 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3107 	struct spdk_bs_dev *dev;
3108 	struct spdk_bs_opts opts;
3109 	struct spdk_blob *blob;
3110 	struct spdk_blob_opts blob_opts;
3111 	uint32_t cluster_sz;
3112 	spdk_blob_id blobids[NUM_BLOBS];
3113 	int i;
3114 
3115 
3116 	dev = init_dev();
3117 	spdk_bs_opts_init(&opts, sizeof(opts));
3118 	opts.cluster_sz = CLUSTER_PAGE_COUNT * 4096;
3119 	cluster_sz = opts.cluster_sz;
3120 
3121 	/* Initialize a new blob store */
3122 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3123 	poll_threads();
3124 	CU_ASSERT(g_bserrno == 0);
3125 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3126 	bs = g_bs;
3127 
3128 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3129 
3130 	ut_spdk_blob_opts_init(&blob_opts);
3131 
3132 	for (i = 0; i < NUM_BLOBS; i++) {
3133 		g_bserrno = -1;
3134 		g_blobid = SPDK_BLOBID_INVALID;
3135 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3136 		poll_threads();
3137 		CU_ASSERT(g_bserrno == 0);
3138 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3139 		blobids[i] = g_blobid;
3140 	}
3141 
3142 	ut_bs_reload(&bs, &opts);
3143 
3144 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3145 
3146 	for (i = 0; i < NUM_BLOBS; i++) {
3147 		g_bserrno = -1;
3148 		g_blob = NULL;
3149 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3150 		poll_threads();
3151 		CU_ASSERT(g_bserrno == 0);
3152 		CU_ASSERT(g_blob !=  NULL);
3153 		blob = g_blob;
3154 		g_bserrno = -1;
3155 		spdk_blob_close(blob, blob_op_complete, NULL);
3156 		poll_threads();
3157 		CU_ASSERT(g_bserrno == 0);
3158 	}
3159 
3160 	spdk_bs_unload(bs, bs_op_complete, NULL);
3161 	poll_threads();
3162 	CU_ASSERT(g_bserrno == 0);
3163 	g_bs = NULL;
3164 }
3165 
3166 static void
3167 bs_destroy(void)
3168 {
3169 	struct spdk_blob_store *bs;
3170 	struct spdk_bs_dev *dev;
3171 
3172 	/* Initialize a new blob store */
3173 	dev = init_dev();
3174 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3175 	poll_threads();
3176 	CU_ASSERT(g_bserrno == 0);
3177 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3178 	bs = g_bs;
3179 
3180 	/* Destroy the blob store */
3181 	g_bserrno = -1;
3182 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3183 	poll_threads();
3184 	CU_ASSERT(g_bserrno == 0);
3185 
3186 	/* Loading an non-existent blob store should fail. */
3187 	g_bs = NULL;
3188 	dev = init_dev();
3189 
3190 	g_bserrno = 0;
3191 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3192 	poll_threads();
3193 	CU_ASSERT(g_bserrno != 0);
3194 }
3195 
3196 /* Try to hit all of the corner cases associated with serializing
3197  * a blob to disk
3198  */
3199 static void
3200 blob_serialize_test(void)
3201 {
3202 	struct spdk_bs_dev *dev;
3203 	struct spdk_bs_opts opts;
3204 	struct spdk_blob_store *bs;
3205 	spdk_blob_id blobid[2];
3206 	struct spdk_blob *blob[2];
3207 	uint64_t i;
3208 	char *value;
3209 	int rc;
3210 
3211 	dev = init_dev();
3212 
3213 	/* Initialize a new blobstore with very small clusters */
3214 	spdk_bs_opts_init(&opts, sizeof(opts));
3215 	opts.cluster_sz = dev->blocklen * 8;
3216 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3217 	poll_threads();
3218 	CU_ASSERT(g_bserrno == 0);
3219 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3220 	bs = g_bs;
3221 
3222 	/* Create and open two blobs */
3223 	for (i = 0; i < 2; i++) {
3224 		blob[i] = ut_blob_create_and_open(bs, NULL);
3225 		blobid[i] = spdk_blob_get_id(blob[i]);
3226 
3227 		/* Set a fairly large xattr on both blobs to eat up
3228 		 * metadata space
3229 		 */
3230 		value = calloc(dev->blocklen - 64, sizeof(char));
3231 		SPDK_CU_ASSERT_FATAL(value != NULL);
3232 		memset(value, i, dev->blocklen / 2);
3233 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3234 		CU_ASSERT(rc == 0);
3235 		free(value);
3236 	}
3237 
3238 	/* Resize the blobs, alternating 1 cluster at a time.
3239 	 * This thwarts run length encoding and will cause spill
3240 	 * over of the extents.
3241 	 */
3242 	for (i = 0; i < 6; i++) {
3243 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3244 		poll_threads();
3245 		CU_ASSERT(g_bserrno == 0);
3246 	}
3247 
3248 	for (i = 0; i < 2; i++) {
3249 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3250 		poll_threads();
3251 		CU_ASSERT(g_bserrno == 0);
3252 	}
3253 
3254 	/* Close the blobs */
3255 	for (i = 0; i < 2; i++) {
3256 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3257 		poll_threads();
3258 		CU_ASSERT(g_bserrno == 0);
3259 	}
3260 
3261 	ut_bs_reload(&bs, &opts);
3262 
3263 	for (i = 0; i < 2; i++) {
3264 		blob[i] = NULL;
3265 
3266 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3267 		poll_threads();
3268 		CU_ASSERT(g_bserrno == 0);
3269 		CU_ASSERT(g_blob != NULL);
3270 		blob[i] = g_blob;
3271 
3272 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3273 
3274 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3275 		poll_threads();
3276 		CU_ASSERT(g_bserrno == 0);
3277 	}
3278 
3279 	spdk_bs_unload(bs, bs_op_complete, NULL);
3280 	poll_threads();
3281 	CU_ASSERT(g_bserrno == 0);
3282 	g_bs = NULL;
3283 }
3284 
3285 static void
3286 blob_crc(void)
3287 {
3288 	struct spdk_blob_store *bs = g_bs;
3289 	struct spdk_blob *blob;
3290 	spdk_blob_id blobid;
3291 	uint32_t page_num;
3292 	int index;
3293 	struct spdk_blob_md_page *page;
3294 
3295 	blob = ut_blob_create_and_open(bs, NULL);
3296 	blobid = spdk_blob_get_id(blob);
3297 
3298 	spdk_blob_close(blob, blob_op_complete, NULL);
3299 	poll_threads();
3300 	CU_ASSERT(g_bserrno == 0);
3301 
3302 	page_num = bs_blobid_to_page(blobid);
3303 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3304 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3305 	page->crc = 0;
3306 
3307 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3308 	poll_threads();
3309 	CU_ASSERT(g_bserrno == -EINVAL);
3310 	CU_ASSERT(g_blob == NULL);
3311 	g_bserrno = 0;
3312 
3313 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3314 	poll_threads();
3315 	CU_ASSERT(g_bserrno == -EINVAL);
3316 }
3317 
3318 static void
3319 super_block_crc(void)
3320 {
3321 	struct spdk_blob_store *bs;
3322 	struct spdk_bs_dev *dev;
3323 	struct spdk_bs_super_block *super_block;
3324 
3325 	dev = init_dev();
3326 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3327 	poll_threads();
3328 	CU_ASSERT(g_bserrno == 0);
3329 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3330 	bs = g_bs;
3331 
3332 	spdk_bs_unload(bs, bs_op_complete, NULL);
3333 	poll_threads();
3334 	CU_ASSERT(g_bserrno == 0);
3335 	g_bs = NULL;
3336 
3337 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3338 	super_block->crc = 0;
3339 	dev = init_dev();
3340 
3341 	/* Load an existing blob store */
3342 	g_bserrno = 0;
3343 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3344 	poll_threads();
3345 	CU_ASSERT(g_bserrno == -EILSEQ);
3346 }
3347 
3348 /* For blob dirty shutdown test case we do the following sub-test cases:
3349  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3350  *   dirty shutdown and reload the blob store and verify the xattrs.
3351  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3352  *   reload the blob store and verify the clusters number.
3353  * 3 Create the second blob and then dirty shutdown, reload the blob store
3354  *   and verify the second blob.
3355  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3356  *   and verify the second blob is invalid.
3357  * 5 Create the second blob again and also create the third blob, modify the
3358  *   md of second blob which makes the md invalid, and then dirty shutdown,
3359  *   reload the blob store verify the second blob, it should invalid and also
3360  *   verify the third blob, it should correct.
3361  */
3362 static void
3363 blob_dirty_shutdown(void)
3364 {
3365 	int rc;
3366 	int index;
3367 	struct spdk_blob_store *bs = g_bs;
3368 	spdk_blob_id blobid1, blobid2, blobid3;
3369 	struct spdk_blob *blob = g_blob;
3370 	uint64_t length;
3371 	uint64_t free_clusters;
3372 	const void *value;
3373 	size_t value_len;
3374 	uint32_t page_num;
3375 	struct spdk_blob_md_page *page;
3376 	struct spdk_blob_opts blob_opts;
3377 
3378 	/* Create first blob */
3379 	blobid1 = spdk_blob_get_id(blob);
3380 
3381 	/* Set some xattrs */
3382 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3383 	CU_ASSERT(rc == 0);
3384 
3385 	length = 2345;
3386 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3387 	CU_ASSERT(rc == 0);
3388 
3389 	/* Put xattr that fits exactly single page.
3390 	 * This results in adding additional pages to MD.
3391 	 * First is flags and smaller xattr, second the large xattr,
3392 	 * third are just the extents.
3393 	 */
3394 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3395 			      strlen("large_xattr");
3396 	char *xattr = calloc(xattr_length, sizeof(char));
3397 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3398 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3399 	free(xattr);
3400 	SPDK_CU_ASSERT_FATAL(rc == 0);
3401 
3402 	/* Resize the blob */
3403 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3404 	poll_threads();
3405 	CU_ASSERT(g_bserrno == 0);
3406 
3407 	/* Set the blob as the super blob */
3408 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3409 	poll_threads();
3410 	CU_ASSERT(g_bserrno == 0);
3411 
3412 	free_clusters = spdk_bs_free_cluster_count(bs);
3413 
3414 	spdk_blob_close(blob, blob_op_complete, NULL);
3415 	poll_threads();
3416 	CU_ASSERT(g_bserrno == 0);
3417 	blob = NULL;
3418 	g_blob = NULL;
3419 	g_blobid = SPDK_BLOBID_INVALID;
3420 
3421 	ut_bs_dirty_load(&bs, NULL);
3422 
3423 	/* Get the super blob */
3424 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3425 	poll_threads();
3426 	CU_ASSERT(g_bserrno == 0);
3427 	CU_ASSERT(blobid1 == g_blobid);
3428 
3429 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3430 	poll_threads();
3431 	CU_ASSERT(g_bserrno == 0);
3432 	CU_ASSERT(g_blob != NULL);
3433 	blob = g_blob;
3434 
3435 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3436 
3437 	/* Get the xattrs */
3438 	value = NULL;
3439 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3440 	CU_ASSERT(rc == 0);
3441 	SPDK_CU_ASSERT_FATAL(value != NULL);
3442 	CU_ASSERT(*(uint64_t *)value == length);
3443 	CU_ASSERT(value_len == 8);
3444 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3445 
3446 	/* Resize the blob */
3447 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3448 	poll_threads();
3449 	CU_ASSERT(g_bserrno == 0);
3450 
3451 	free_clusters = spdk_bs_free_cluster_count(bs);
3452 
3453 	spdk_blob_close(blob, blob_op_complete, NULL);
3454 	poll_threads();
3455 	CU_ASSERT(g_bserrno == 0);
3456 	blob = NULL;
3457 	g_blob = NULL;
3458 	g_blobid = SPDK_BLOBID_INVALID;
3459 
3460 	ut_bs_dirty_load(&bs, NULL);
3461 
3462 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3463 	poll_threads();
3464 	CU_ASSERT(g_bserrno == 0);
3465 	CU_ASSERT(g_blob != NULL);
3466 	blob = g_blob;
3467 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3468 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3469 
3470 	spdk_blob_close(blob, blob_op_complete, NULL);
3471 	poll_threads();
3472 	CU_ASSERT(g_bserrno == 0);
3473 	blob = NULL;
3474 	g_blob = NULL;
3475 	g_blobid = SPDK_BLOBID_INVALID;
3476 
3477 	/* Create second blob */
3478 	blob = ut_blob_create_and_open(bs, NULL);
3479 	blobid2 = spdk_blob_get_id(blob);
3480 
3481 	/* Set some xattrs */
3482 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3483 	CU_ASSERT(rc == 0);
3484 
3485 	length = 5432;
3486 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3487 	CU_ASSERT(rc == 0);
3488 
3489 	/* Resize the blob */
3490 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3491 	poll_threads();
3492 	CU_ASSERT(g_bserrno == 0);
3493 
3494 	free_clusters = spdk_bs_free_cluster_count(bs);
3495 
3496 	spdk_blob_close(blob, blob_op_complete, NULL);
3497 	poll_threads();
3498 	CU_ASSERT(g_bserrno == 0);
3499 	blob = NULL;
3500 	g_blob = NULL;
3501 	g_blobid = SPDK_BLOBID_INVALID;
3502 
3503 	ut_bs_dirty_load(&bs, NULL);
3504 
3505 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3506 	poll_threads();
3507 	CU_ASSERT(g_bserrno == 0);
3508 	CU_ASSERT(g_blob != NULL);
3509 	blob = g_blob;
3510 
3511 	/* Get the xattrs */
3512 	value = NULL;
3513 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3514 	CU_ASSERT(rc == 0);
3515 	SPDK_CU_ASSERT_FATAL(value != NULL);
3516 	CU_ASSERT(*(uint64_t *)value == length);
3517 	CU_ASSERT(value_len == 8);
3518 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3519 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3520 
3521 	ut_blob_close_and_delete(bs, blob);
3522 
3523 	free_clusters = spdk_bs_free_cluster_count(bs);
3524 
3525 	ut_bs_dirty_load(&bs, NULL);
3526 
3527 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3528 	poll_threads();
3529 	CU_ASSERT(g_bserrno != 0);
3530 	CU_ASSERT(g_blob == NULL);
3531 
3532 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3533 	poll_threads();
3534 	CU_ASSERT(g_bserrno == 0);
3535 	CU_ASSERT(g_blob != NULL);
3536 	blob = g_blob;
3537 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3538 	spdk_blob_close(blob, blob_op_complete, NULL);
3539 	poll_threads();
3540 	CU_ASSERT(g_bserrno == 0);
3541 
3542 	ut_bs_reload(&bs, NULL);
3543 
3544 	/* Create second blob */
3545 	ut_spdk_blob_opts_init(&blob_opts);
3546 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3547 	poll_threads();
3548 	CU_ASSERT(g_bserrno == 0);
3549 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3550 	blobid2 = g_blobid;
3551 
3552 	/* Create third blob */
3553 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3554 	poll_threads();
3555 	CU_ASSERT(g_bserrno == 0);
3556 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3557 	blobid3 = g_blobid;
3558 
3559 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3560 	poll_threads();
3561 	CU_ASSERT(g_bserrno == 0);
3562 	CU_ASSERT(g_blob != NULL);
3563 	blob = g_blob;
3564 
3565 	/* Set some xattrs for second blob */
3566 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3567 	CU_ASSERT(rc == 0);
3568 
3569 	length = 5432;
3570 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3571 	CU_ASSERT(rc == 0);
3572 
3573 	spdk_blob_close(blob, blob_op_complete, NULL);
3574 	poll_threads();
3575 	CU_ASSERT(g_bserrno == 0);
3576 	blob = NULL;
3577 	g_blob = NULL;
3578 	g_blobid = SPDK_BLOBID_INVALID;
3579 
3580 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3581 	poll_threads();
3582 	CU_ASSERT(g_bserrno == 0);
3583 	CU_ASSERT(g_blob != NULL);
3584 	blob = g_blob;
3585 
3586 	/* Set some xattrs for third blob */
3587 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3588 	CU_ASSERT(rc == 0);
3589 
3590 	length = 5432;
3591 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3592 	CU_ASSERT(rc == 0);
3593 
3594 	spdk_blob_close(blob, blob_op_complete, NULL);
3595 	poll_threads();
3596 	CU_ASSERT(g_bserrno == 0);
3597 	blob = NULL;
3598 	g_blob = NULL;
3599 	g_blobid = SPDK_BLOBID_INVALID;
3600 
3601 	/* Mark second blob as invalid */
3602 	page_num = bs_blobid_to_page(blobid2);
3603 
3604 	index = DEV_BUFFER_BLOCKLEN * (bs->md_start + page_num);
3605 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3606 	page->sequence_num = 1;
3607 	page->crc = blob_md_page_calc_crc(page);
3608 
3609 	free_clusters = spdk_bs_free_cluster_count(bs);
3610 
3611 	ut_bs_dirty_load(&bs, NULL);
3612 
3613 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3614 	poll_threads();
3615 	CU_ASSERT(g_bserrno != 0);
3616 	CU_ASSERT(g_blob == NULL);
3617 
3618 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3619 	poll_threads();
3620 	CU_ASSERT(g_bserrno == 0);
3621 	CU_ASSERT(g_blob != NULL);
3622 	blob = g_blob;
3623 
3624 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3625 }
3626 
3627 static void
3628 blob_flags(void)
3629 {
3630 	struct spdk_blob_store *bs = g_bs;
3631 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3632 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3633 	struct spdk_blob_opts blob_opts;
3634 	int rc;
3635 
3636 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3637 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3638 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3639 
3640 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3641 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3642 
3643 	ut_spdk_blob_opts_init(&blob_opts);
3644 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3645 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3646 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3647 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3648 
3649 	/* Change the size of blob_data_ro to check if flags are serialized
3650 	 * when blob has non zero number of extents */
3651 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
3652 	poll_threads();
3653 	CU_ASSERT(g_bserrno == 0);
3654 
3655 	/* Set the xattr to check if flags are serialized
3656 	 * when blob has non zero number of xattrs */
3657 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
3658 	CU_ASSERT(rc == 0);
3659 
3660 	blob_invalid->invalid_flags = (1ULL << 63);
3661 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
3662 	blob_data_ro->data_ro_flags = (1ULL << 62);
3663 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
3664 	blob_md_ro->md_ro_flags = (1ULL << 61);
3665 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
3666 
3667 	g_bserrno = -1;
3668 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
3669 	poll_threads();
3670 	CU_ASSERT(g_bserrno == 0);
3671 	g_bserrno = -1;
3672 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
3673 	poll_threads();
3674 	CU_ASSERT(g_bserrno == 0);
3675 	g_bserrno = -1;
3676 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3677 	poll_threads();
3678 	CU_ASSERT(g_bserrno == 0);
3679 
3680 	g_bserrno = -1;
3681 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
3682 	poll_threads();
3683 	CU_ASSERT(g_bserrno == 0);
3684 	blob_invalid = NULL;
3685 	g_bserrno = -1;
3686 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
3687 	poll_threads();
3688 	CU_ASSERT(g_bserrno == 0);
3689 	blob_data_ro = NULL;
3690 	g_bserrno = -1;
3691 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
3692 	poll_threads();
3693 	CU_ASSERT(g_bserrno == 0);
3694 	blob_md_ro = NULL;
3695 
3696 	g_blob = NULL;
3697 	g_blobid = SPDK_BLOBID_INVALID;
3698 
3699 	ut_bs_reload(&bs, NULL);
3700 
3701 	g_blob = NULL;
3702 	g_bserrno = 0;
3703 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
3704 	poll_threads();
3705 	CU_ASSERT(g_bserrno != 0);
3706 	CU_ASSERT(g_blob == NULL);
3707 
3708 	g_blob = NULL;
3709 	g_bserrno = -1;
3710 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
3711 	poll_threads();
3712 	CU_ASSERT(g_bserrno == 0);
3713 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3714 	blob_data_ro = g_blob;
3715 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
3716 	CU_ASSERT(blob_data_ro->data_ro == true);
3717 	CU_ASSERT(blob_data_ro->md_ro == true);
3718 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
3719 
3720 	g_blob = NULL;
3721 	g_bserrno = -1;
3722 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
3723 	poll_threads();
3724 	CU_ASSERT(g_bserrno == 0);
3725 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3726 	blob_md_ro = g_blob;
3727 	CU_ASSERT(blob_md_ro->data_ro == false);
3728 	CU_ASSERT(blob_md_ro->md_ro == true);
3729 
3730 	g_bserrno = -1;
3731 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
3732 	poll_threads();
3733 	CU_ASSERT(g_bserrno == 0);
3734 
3735 	ut_blob_close_and_delete(bs, blob_data_ro);
3736 	ut_blob_close_and_delete(bs, blob_md_ro);
3737 }
3738 
3739 static void
3740 bs_version(void)
3741 {
3742 	struct spdk_bs_super_block *super;
3743 	struct spdk_blob_store *bs = g_bs;
3744 	struct spdk_bs_dev *dev;
3745 	struct spdk_blob *blob;
3746 	struct spdk_blob_opts blob_opts;
3747 	spdk_blob_id blobid;
3748 
3749 	/* Unload the blob store */
3750 	spdk_bs_unload(bs, bs_op_complete, NULL);
3751 	poll_threads();
3752 	CU_ASSERT(g_bserrno == 0);
3753 	g_bs = NULL;
3754 
3755 	/*
3756 	 * Change the bs version on disk.  This will allow us to
3757 	 *  test that the version does not get modified automatically
3758 	 *  when loading and unloading the blobstore.
3759 	 */
3760 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
3761 	CU_ASSERT(super->version == SPDK_BS_VERSION);
3762 	CU_ASSERT(super->clean == 1);
3763 	super->version = 2;
3764 	/*
3765 	 * Version 2 metadata does not have a used blobid mask, so clear
3766 	 *  those fields in the super block and zero the corresponding
3767 	 *  region on "disk".  We will use this to ensure blob IDs are
3768 	 *  correctly reconstructed.
3769 	 */
3770 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
3771 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
3772 	super->used_blobid_mask_start = 0;
3773 	super->used_blobid_mask_len = 0;
3774 	super->crc = blob_md_page_calc_crc(super);
3775 
3776 	/* Load an existing blob store */
3777 	dev = init_dev();
3778 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3779 	poll_threads();
3780 	CU_ASSERT(g_bserrno == 0);
3781 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3782 	CU_ASSERT(super->clean == 1);
3783 	bs = g_bs;
3784 
3785 	/*
3786 	 * Create a blob - just to make sure that when we unload it
3787 	 *  results in writing the super block (since metadata pages
3788 	 *  were allocated.
3789 	 */
3790 	ut_spdk_blob_opts_init(&blob_opts);
3791 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3792 	poll_threads();
3793 	CU_ASSERT(g_bserrno == 0);
3794 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3795 	blobid = g_blobid;
3796 
3797 	/* Unload the blob store */
3798 	spdk_bs_unload(bs, bs_op_complete, NULL);
3799 	poll_threads();
3800 	CU_ASSERT(g_bserrno == 0);
3801 	g_bs = NULL;
3802 	CU_ASSERT(super->version == 2);
3803 	CU_ASSERT(super->used_blobid_mask_start == 0);
3804 	CU_ASSERT(super->used_blobid_mask_len == 0);
3805 
3806 	dev = init_dev();
3807 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3808 	poll_threads();
3809 	CU_ASSERT(g_bserrno == 0);
3810 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3811 	bs = g_bs;
3812 
3813 	g_blob = NULL;
3814 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3815 	poll_threads();
3816 	CU_ASSERT(g_bserrno == 0);
3817 	CU_ASSERT(g_blob != NULL);
3818 	blob = g_blob;
3819 
3820 	ut_blob_close_and_delete(bs, blob);
3821 
3822 	CU_ASSERT(super->version == 2);
3823 	CU_ASSERT(super->used_blobid_mask_start == 0);
3824 	CU_ASSERT(super->used_blobid_mask_len == 0);
3825 }
3826 
3827 static void
3828 blob_set_xattrs_test(void)
3829 {
3830 	struct spdk_blob_store *bs = g_bs;
3831 	struct spdk_blob *blob;
3832 	struct spdk_blob_opts opts;
3833 	const void *value;
3834 	size_t value_len;
3835 	char *xattr;
3836 	size_t xattr_length;
3837 	int rc;
3838 
3839 	/* Create blob with extra attributes */
3840 	ut_spdk_blob_opts_init(&opts);
3841 
3842 	opts.xattrs.names = g_xattr_names;
3843 	opts.xattrs.get_value = _get_xattr_value;
3844 	opts.xattrs.count = 3;
3845 	opts.xattrs.ctx = &g_ctx;
3846 
3847 	blob = ut_blob_create_and_open(bs, &opts);
3848 
3849 	/* Get the xattrs */
3850 	value = NULL;
3851 
3852 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
3853 	CU_ASSERT(rc == 0);
3854 	SPDK_CU_ASSERT_FATAL(value != NULL);
3855 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
3856 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
3857 
3858 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
3859 	CU_ASSERT(rc == 0);
3860 	SPDK_CU_ASSERT_FATAL(value != NULL);
3861 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
3862 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
3863 
3864 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
3865 	CU_ASSERT(rc == 0);
3866 	SPDK_CU_ASSERT_FATAL(value != NULL);
3867 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
3868 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
3869 
3870 	/* Try to get non existing attribute */
3871 
3872 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
3873 	CU_ASSERT(rc == -ENOENT);
3874 
3875 	/* Try xattr exceeding maximum length of descriptor in single page */
3876 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
3877 		       strlen("large_xattr") + 1;
3878 	xattr = calloc(xattr_length, sizeof(char));
3879 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3880 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3881 	free(xattr);
3882 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
3883 
3884 	spdk_blob_close(blob, blob_op_complete, NULL);
3885 	poll_threads();
3886 	CU_ASSERT(g_bserrno == 0);
3887 	blob = NULL;
3888 	g_blob = NULL;
3889 	g_blobid = SPDK_BLOBID_INVALID;
3890 
3891 	/* NULL callback */
3892 	ut_spdk_blob_opts_init(&opts);
3893 	opts.xattrs.names = g_xattr_names;
3894 	opts.xattrs.get_value = NULL;
3895 	opts.xattrs.count = 1;
3896 	opts.xattrs.ctx = &g_ctx;
3897 
3898 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3899 	poll_threads();
3900 	CU_ASSERT(g_bserrno == -EINVAL);
3901 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3902 
3903 	/* NULL values */
3904 	ut_spdk_blob_opts_init(&opts);
3905 	opts.xattrs.names = g_xattr_names;
3906 	opts.xattrs.get_value = _get_xattr_value_null;
3907 	opts.xattrs.count = 1;
3908 	opts.xattrs.ctx = NULL;
3909 
3910 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
3911 	poll_threads();
3912 	CU_ASSERT(g_bserrno == -EINVAL);
3913 }
3914 
3915 static void
3916 blob_thin_prov_alloc(void)
3917 {
3918 	struct spdk_blob_store *bs = g_bs;
3919 	struct spdk_blob *blob;
3920 	struct spdk_blob_opts opts;
3921 	spdk_blob_id blobid;
3922 	uint64_t free_clusters;
3923 
3924 	free_clusters = spdk_bs_free_cluster_count(bs);
3925 
3926 	/* Set blob as thin provisioned */
3927 	ut_spdk_blob_opts_init(&opts);
3928 	opts.thin_provision = true;
3929 
3930 	blob = ut_blob_create_and_open(bs, &opts);
3931 	blobid = spdk_blob_get_id(blob);
3932 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3933 
3934 	CU_ASSERT(blob->active.num_clusters == 0);
3935 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
3936 
3937 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
3938 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
3939 	poll_threads();
3940 	CU_ASSERT(g_bserrno == 0);
3941 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3942 	CU_ASSERT(blob->active.num_clusters == 5);
3943 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
3944 
3945 	/* Grow it to 1TB - still unallocated */
3946 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
3947 	poll_threads();
3948 	CU_ASSERT(g_bserrno == 0);
3949 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3950 	CU_ASSERT(blob->active.num_clusters == 262144);
3951 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3952 
3953 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3954 	poll_threads();
3955 	CU_ASSERT(g_bserrno == 0);
3956 	/* Sync must not change anything */
3957 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3958 	CU_ASSERT(blob->active.num_clusters == 262144);
3959 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
3960 	/* Since clusters are not allocated,
3961 	 * number of metadata pages is expected to be minimal.
3962 	 */
3963 	CU_ASSERT(blob->active.num_pages == 1);
3964 
3965 	/* Shrink the blob to 3 clusters - still unallocated */
3966 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
3967 	poll_threads();
3968 	CU_ASSERT(g_bserrno == 0);
3969 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3970 	CU_ASSERT(blob->active.num_clusters == 3);
3971 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3972 
3973 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
3974 	poll_threads();
3975 	CU_ASSERT(g_bserrno == 0);
3976 	/* Sync must not change anything */
3977 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3978 	CU_ASSERT(blob->active.num_clusters == 3);
3979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
3980 
3981 	spdk_blob_close(blob, blob_op_complete, NULL);
3982 	poll_threads();
3983 	CU_ASSERT(g_bserrno == 0);
3984 
3985 	ut_bs_reload(&bs, NULL);
3986 
3987 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3988 	poll_threads();
3989 	CU_ASSERT(g_bserrno == 0);
3990 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
3991 	blob = g_blob;
3992 
3993 	/* Check that clusters allocation and size is still the same */
3994 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3995 	CU_ASSERT(blob->active.num_clusters == 3);
3996 
3997 	ut_blob_close_and_delete(bs, blob);
3998 }
3999 
4000 static void
4001 blob_insert_cluster_msg_test(void)
4002 {
4003 	struct spdk_blob_store *bs = g_bs;
4004 	struct spdk_blob *blob;
4005 	struct spdk_blob_opts opts;
4006 	struct spdk_blob_md_page page = {};
4007 	spdk_blob_id blobid;
4008 	uint64_t free_clusters;
4009 	uint64_t new_cluster = 0;
4010 	uint32_t cluster_num = 3;
4011 	uint32_t extent_page = 0;
4012 
4013 	free_clusters = spdk_bs_free_cluster_count(bs);
4014 
4015 	/* Set blob as thin provisioned */
4016 	ut_spdk_blob_opts_init(&opts);
4017 	opts.thin_provision = true;
4018 	opts.num_clusters = 4;
4019 
4020 	blob = ut_blob_create_and_open(bs, &opts);
4021 	blobid = spdk_blob_get_id(blob);
4022 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4023 
4024 	CU_ASSERT(blob->active.num_clusters == 4);
4025 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4026 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4027 
4028 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4029 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4030 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4031 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4032 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4033 
4034 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &page,
4035 					 blob_op_complete, NULL);
4036 	poll_threads();
4037 
4038 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4039 
4040 	spdk_blob_close(blob, blob_op_complete, NULL);
4041 	poll_threads();
4042 	CU_ASSERT(g_bserrno == 0);
4043 
4044 	ut_bs_reload(&bs, NULL);
4045 
4046 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4047 	poll_threads();
4048 	CU_ASSERT(g_bserrno == 0);
4049 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4050 	blob = g_blob;
4051 
4052 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4053 
4054 	ut_blob_close_and_delete(bs, blob);
4055 }
4056 
4057 static void
4058 blob_thin_prov_rw(void)
4059 {
4060 	static const uint8_t zero[10 * 4096] = { 0 };
4061 	struct spdk_blob_store *bs = g_bs;
4062 	struct spdk_blob *blob, *blob_id0;
4063 	struct spdk_io_channel *channel, *channel_thread1;
4064 	struct spdk_blob_opts opts;
4065 	uint64_t free_clusters;
4066 	uint64_t page_size;
4067 	uint8_t payload_read[10 * 4096];
4068 	uint8_t payload_write[10 * 4096];
4069 	uint64_t write_bytes;
4070 	uint64_t read_bytes;
4071 
4072 	free_clusters = spdk_bs_free_cluster_count(bs);
4073 	page_size = spdk_bs_get_page_size(bs);
4074 
4075 	channel = spdk_bs_alloc_io_channel(bs);
4076 	CU_ASSERT(channel != NULL);
4077 
4078 	ut_spdk_blob_opts_init(&opts);
4079 	opts.thin_provision = true;
4080 
4081 	/* Create and delete blob at md page 0, so that next md page allocation
4082 	 * for extent will use that. */
4083 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4084 	blob = ut_blob_create_and_open(bs, &opts);
4085 	ut_blob_close_and_delete(bs, blob_id0);
4086 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4087 
4088 	CU_ASSERT(blob->active.num_clusters == 0);
4089 
4090 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4091 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4092 	poll_threads();
4093 	CU_ASSERT(g_bserrno == 0);
4094 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4095 	CU_ASSERT(blob->active.num_clusters == 5);
4096 
4097 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4098 	poll_threads();
4099 	CU_ASSERT(g_bserrno == 0);
4100 	/* Sync must not change anything */
4101 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4102 	CU_ASSERT(blob->active.num_clusters == 5);
4103 
4104 	/* Payload should be all zeros from unallocated clusters */
4105 	memset(payload_read, 0xFF, sizeof(payload_read));
4106 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4107 	poll_threads();
4108 	CU_ASSERT(g_bserrno == 0);
4109 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4110 
4111 	write_bytes = g_dev_write_bytes;
4112 	read_bytes = g_dev_read_bytes;
4113 
4114 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4115 	set_thread(1);
4116 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4117 	CU_ASSERT(channel_thread1 != NULL);
4118 	memset(payload_write, 0xE5, sizeof(payload_write));
4119 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4120 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4121 	/* Perform write on thread 0. That will try to allocate cluster,
4122 	 * but fail due to another thread issuing the cluster allocation first. */
4123 	set_thread(0);
4124 	memset(payload_write, 0xE5, sizeof(payload_write));
4125 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4126 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4127 	poll_threads();
4128 	CU_ASSERT(g_bserrno == 0);
4129 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4130 	/* For thin-provisioned blob we need to write 20 pages plus one page metadata and
4131 	 * read 0 bytes */
4132 	if (g_use_extent_table) {
4133 		/* Add one more page for EXTENT_PAGE write */
4134 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 22);
4135 	} else {
4136 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 21);
4137 	}
4138 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4139 
4140 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4141 	poll_threads();
4142 	CU_ASSERT(g_bserrno == 0);
4143 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4144 
4145 	ut_blob_close_and_delete(bs, blob);
4146 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4147 
4148 	set_thread(1);
4149 	spdk_bs_free_io_channel(channel_thread1);
4150 	set_thread(0);
4151 	spdk_bs_free_io_channel(channel);
4152 	poll_threads();
4153 	g_blob = NULL;
4154 	g_blobid = 0;
4155 }
4156 
4157 static void
4158 blob_thin_prov_write_count_io(void)
4159 {
4160 	struct spdk_blob_store *bs;
4161 	struct spdk_blob *blob;
4162 	struct spdk_io_channel *ch;
4163 	struct spdk_bs_dev *dev;
4164 	struct spdk_bs_opts bs_opts;
4165 	struct spdk_blob_opts opts;
4166 	uint64_t free_clusters;
4167 	uint64_t page_size;
4168 	uint8_t payload_write[4096];
4169 	uint64_t write_bytes;
4170 	uint64_t read_bytes;
4171 	const uint32_t CLUSTER_SZ = 16384;
4172 	uint32_t pages_per_cluster;
4173 	uint32_t pages_per_extent_page;
4174 	uint32_t i;
4175 
4176 	/* Use a very small cluster size for this test.  This ensures we need multiple
4177 	 * extent pages to hold all of the clusters even for relatively small blobs like
4178 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4179 	 * buffers).
4180 	 */
4181 	dev = init_dev();
4182 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4183 	bs_opts.cluster_sz = CLUSTER_SZ;
4184 
4185 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4186 	poll_threads();
4187 	CU_ASSERT(g_bserrno == 0);
4188 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4189 	bs = g_bs;
4190 
4191 	free_clusters = spdk_bs_free_cluster_count(bs);
4192 	page_size = spdk_bs_get_page_size(bs);
4193 	pages_per_cluster = CLUSTER_SZ / page_size;
4194 	pages_per_extent_page = SPDK_EXTENTS_PER_EP * pages_per_cluster;
4195 
4196 	ch = spdk_bs_alloc_io_channel(bs);
4197 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4198 
4199 	ut_spdk_blob_opts_init(&opts);
4200 	opts.thin_provision = true;
4201 
4202 	blob = ut_blob_create_and_open(bs, &opts);
4203 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4204 
4205 	/* Resize the blob so that it will require 8 extent pages to hold all of
4206 	 * the clusters.
4207 	 */
4208 	g_bserrno = -1;
4209 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4210 	poll_threads();
4211 	CU_ASSERT(g_bserrno == 0);
4212 
4213 	g_bserrno = -1;
4214 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4215 	poll_threads();
4216 	CU_ASSERT(g_bserrno == 0);
4217 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4218 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4219 
4220 	memset(payload_write, 0, sizeof(payload_write));
4221 	for (i = 0; i < 8; i++) {
4222 		write_bytes = g_dev_write_bytes;
4223 		read_bytes = g_dev_read_bytes;
4224 
4225 		g_bserrno = -1;
4226 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i, 1, blob_op_complete, NULL);
4227 		poll_threads();
4228 		CU_ASSERT(g_bserrno == 0);
4229 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4230 
4231 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4232 		if (!g_use_extent_table) {
4233 			/* For legacy metadata, we should have written two pages - one for the
4234 			 * write I/O itself, another for the blob's primary metadata.
4235 			 */
4236 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4237 		} else {
4238 			/* For extent table metadata, we should have written three pages - one
4239 			 * for the write I/O, one for the extent page, one for the blob's primary
4240 			 * metadata.
4241 			 */
4242 			CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 3);
4243 		}
4244 
4245 		/* The write should have synced the metadata already.  Do another sync here
4246 		 * just to confirm.
4247 		 */
4248 		write_bytes = g_dev_write_bytes;
4249 		read_bytes = g_dev_read_bytes;
4250 
4251 		g_bserrno = -1;
4252 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4253 		poll_threads();
4254 		CU_ASSERT(g_bserrno == 0);
4255 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4256 
4257 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4258 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4259 
4260 		/* Now write to another unallocated cluster that is part of the same extent page. */
4261 		g_bserrno = -1;
4262 		spdk_blob_io_write(blob, ch, payload_write, pages_per_extent_page * i + pages_per_cluster,
4263 				   1, blob_op_complete, NULL);
4264 		poll_threads();
4265 		CU_ASSERT(g_bserrno == 0);
4266 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4267 
4268 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4269 		/*
4270 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4271 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4272 		 */
4273 		CU_ASSERT((g_dev_write_bytes - write_bytes) / page_size == 2);
4274 	}
4275 
4276 	ut_blob_close_and_delete(bs, blob);
4277 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4278 
4279 	spdk_bs_free_io_channel(ch);
4280 	poll_threads();
4281 	g_blob = NULL;
4282 	g_blobid = 0;
4283 
4284 	spdk_bs_unload(bs, bs_op_complete, NULL);
4285 	poll_threads();
4286 	CU_ASSERT(g_bserrno == 0);
4287 	g_bs = NULL;
4288 }
4289 
4290 static void
4291 blob_thin_prov_rle(void)
4292 {
4293 	static const uint8_t zero[10 * 4096] = { 0 };
4294 	struct spdk_blob_store *bs = g_bs;
4295 	struct spdk_blob *blob;
4296 	struct spdk_io_channel *channel;
4297 	struct spdk_blob_opts opts;
4298 	spdk_blob_id blobid;
4299 	uint64_t free_clusters;
4300 	uint64_t page_size;
4301 	uint8_t payload_read[10 * 4096];
4302 	uint8_t payload_write[10 * 4096];
4303 	uint64_t write_bytes;
4304 	uint64_t read_bytes;
4305 	uint64_t io_unit;
4306 
4307 	free_clusters = spdk_bs_free_cluster_count(bs);
4308 	page_size = spdk_bs_get_page_size(bs);
4309 
4310 	ut_spdk_blob_opts_init(&opts);
4311 	opts.thin_provision = true;
4312 	opts.num_clusters = 5;
4313 
4314 	blob = ut_blob_create_and_open(bs, &opts);
4315 	blobid = spdk_blob_get_id(blob);
4316 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4317 
4318 	channel = spdk_bs_alloc_io_channel(bs);
4319 	CU_ASSERT(channel != NULL);
4320 
4321 	/* Target specifically second cluster in a blob as first allocation */
4322 	io_unit = bs_cluster_to_page(bs, 1) * bs_io_unit_per_page(bs);
4323 
4324 	/* Payload should be all zeros from unallocated clusters */
4325 	memset(payload_read, 0xFF, sizeof(payload_read));
4326 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4327 	poll_threads();
4328 	CU_ASSERT(g_bserrno == 0);
4329 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4330 
4331 	write_bytes = g_dev_write_bytes;
4332 	read_bytes = g_dev_read_bytes;
4333 
4334 	/* Issue write to second cluster in a blob */
4335 	memset(payload_write, 0xE5, sizeof(payload_write));
4336 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4337 	poll_threads();
4338 	CU_ASSERT(g_bserrno == 0);
4339 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4340 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4341 	 * read 0 bytes */
4342 	if (g_use_extent_table) {
4343 		/* Add one more page for EXTENT_PAGE write */
4344 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12);
4345 	} else {
4346 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11);
4347 	}
4348 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4349 
4350 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4351 	poll_threads();
4352 	CU_ASSERT(g_bserrno == 0);
4353 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4354 
4355 	spdk_bs_free_io_channel(channel);
4356 	poll_threads();
4357 
4358 	spdk_blob_close(blob, blob_op_complete, NULL);
4359 	poll_threads();
4360 	CU_ASSERT(g_bserrno == 0);
4361 
4362 	ut_bs_reload(&bs, NULL);
4363 
4364 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4365 	poll_threads();
4366 	CU_ASSERT(g_bserrno == 0);
4367 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4368 	blob = g_blob;
4369 
4370 	channel = spdk_bs_alloc_io_channel(bs);
4371 	CU_ASSERT(channel != NULL);
4372 
4373 	/* Read second cluster after blob reload to confirm data written */
4374 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4375 	poll_threads();
4376 	CU_ASSERT(g_bserrno == 0);
4377 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4378 
4379 	spdk_bs_free_io_channel(channel);
4380 	poll_threads();
4381 
4382 	ut_blob_close_and_delete(bs, blob);
4383 }
4384 
4385 static void
4386 blob_thin_prov_rw_iov(void)
4387 {
4388 	static const uint8_t zero[10 * 4096] = { 0 };
4389 	struct spdk_blob_store *bs = g_bs;
4390 	struct spdk_blob *blob;
4391 	struct spdk_io_channel *channel;
4392 	struct spdk_blob_opts opts;
4393 	uint64_t free_clusters;
4394 	uint8_t payload_read[10 * 4096];
4395 	uint8_t payload_write[10 * 4096];
4396 	struct iovec iov_read[3];
4397 	struct iovec iov_write[3];
4398 
4399 	free_clusters = spdk_bs_free_cluster_count(bs);
4400 
4401 	channel = spdk_bs_alloc_io_channel(bs);
4402 	CU_ASSERT(channel != NULL);
4403 
4404 	ut_spdk_blob_opts_init(&opts);
4405 	opts.thin_provision = true;
4406 
4407 	blob = ut_blob_create_and_open(bs, &opts);
4408 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4409 
4410 	CU_ASSERT(blob->active.num_clusters == 0);
4411 
4412 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4413 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4414 	poll_threads();
4415 	CU_ASSERT(g_bserrno == 0);
4416 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4417 	CU_ASSERT(blob->active.num_clusters == 5);
4418 
4419 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4420 	poll_threads();
4421 	CU_ASSERT(g_bserrno == 0);
4422 	/* Sync must not change anything */
4423 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4424 	CU_ASSERT(blob->active.num_clusters == 5);
4425 
4426 	/* Payload should be all zeros from unallocated clusters */
4427 	memset(payload_read, 0xAA, sizeof(payload_read));
4428 	iov_read[0].iov_base = payload_read;
4429 	iov_read[0].iov_len = 3 * 4096;
4430 	iov_read[1].iov_base = payload_read + 3 * 4096;
4431 	iov_read[1].iov_len = 4 * 4096;
4432 	iov_read[2].iov_base = payload_read + 7 * 4096;
4433 	iov_read[2].iov_len = 3 * 4096;
4434 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4435 	poll_threads();
4436 	CU_ASSERT(g_bserrno == 0);
4437 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4438 
4439 	memset(payload_write, 0xE5, sizeof(payload_write));
4440 	iov_write[0].iov_base = payload_write;
4441 	iov_write[0].iov_len = 1 * 4096;
4442 	iov_write[1].iov_base = payload_write + 1 * 4096;
4443 	iov_write[1].iov_len = 5 * 4096;
4444 	iov_write[2].iov_base = payload_write + 6 * 4096;
4445 	iov_write[2].iov_len = 4 * 4096;
4446 
4447 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4448 	poll_threads();
4449 	CU_ASSERT(g_bserrno == 0);
4450 
4451 	memset(payload_read, 0xAA, sizeof(payload_read));
4452 	iov_read[0].iov_base = payload_read;
4453 	iov_read[0].iov_len = 3 * 4096;
4454 	iov_read[1].iov_base = payload_read + 3 * 4096;
4455 	iov_read[1].iov_len = 4 * 4096;
4456 	iov_read[2].iov_base = payload_read + 7 * 4096;
4457 	iov_read[2].iov_len = 3 * 4096;
4458 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4459 	poll_threads();
4460 	CU_ASSERT(g_bserrno == 0);
4461 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4462 
4463 	spdk_bs_free_io_channel(channel);
4464 	poll_threads();
4465 
4466 	ut_blob_close_and_delete(bs, blob);
4467 }
4468 
4469 struct iter_ctx {
4470 	int		current_iter;
4471 	spdk_blob_id	blobid[4];
4472 };
4473 
4474 static void
4475 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
4476 {
4477 	struct iter_ctx *iter_ctx = arg;
4478 	spdk_blob_id blobid;
4479 
4480 	CU_ASSERT(bserrno == 0);
4481 	blobid = spdk_blob_get_id(blob);
4482 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
4483 }
4484 
4485 static void
4486 bs_load_iter_test(void)
4487 {
4488 	struct spdk_blob_store *bs;
4489 	struct spdk_bs_dev *dev;
4490 	struct iter_ctx iter_ctx = { 0 };
4491 	struct spdk_blob *blob;
4492 	int i, rc;
4493 	struct spdk_bs_opts opts;
4494 
4495 	dev = init_dev();
4496 	spdk_bs_opts_init(&opts, sizeof(opts));
4497 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4498 
4499 	/* Initialize a new blob store */
4500 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
4501 	poll_threads();
4502 	CU_ASSERT(g_bserrno == 0);
4503 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4504 	bs = g_bs;
4505 
4506 	for (i = 0; i < 4; i++) {
4507 		blob = ut_blob_create_and_open(bs, NULL);
4508 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
4509 
4510 		/* Just save the blobid as an xattr for testing purposes. */
4511 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
4512 		CU_ASSERT(rc == 0);
4513 
4514 		/* Resize the blob */
4515 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
4516 		poll_threads();
4517 		CU_ASSERT(g_bserrno == 0);
4518 
4519 		spdk_blob_close(blob, blob_op_complete, NULL);
4520 		poll_threads();
4521 		CU_ASSERT(g_bserrno == 0);
4522 	}
4523 
4524 	g_bserrno = -1;
4525 	spdk_bs_unload(bs, bs_op_complete, NULL);
4526 	poll_threads();
4527 	CU_ASSERT(g_bserrno == 0);
4528 
4529 	dev = init_dev();
4530 	spdk_bs_opts_init(&opts, sizeof(opts));
4531 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4532 	opts.iter_cb_fn = test_iter;
4533 	opts.iter_cb_arg = &iter_ctx;
4534 
4535 	/* Test blob iteration during load after a clean shutdown. */
4536 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4537 	poll_threads();
4538 	CU_ASSERT(g_bserrno == 0);
4539 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4540 	bs = g_bs;
4541 
4542 	/* Dirty shutdown */
4543 	bs_free(bs);
4544 
4545 	dev = init_dev();
4546 	spdk_bs_opts_init(&opts, sizeof(opts));
4547 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
4548 	opts.iter_cb_fn = test_iter;
4549 	iter_ctx.current_iter = 0;
4550 	opts.iter_cb_arg = &iter_ctx;
4551 
4552 	/* Test blob iteration during load after a dirty shutdown. */
4553 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
4554 	poll_threads();
4555 	CU_ASSERT(g_bserrno == 0);
4556 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4557 	bs = g_bs;
4558 
4559 	spdk_bs_unload(bs, bs_op_complete, NULL);
4560 	poll_threads();
4561 	CU_ASSERT(g_bserrno == 0);
4562 	g_bs = NULL;
4563 }
4564 
4565 static void
4566 blob_snapshot_rw(void)
4567 {
4568 	static const uint8_t zero[10 * 4096] = { 0 };
4569 	struct spdk_blob_store *bs = g_bs;
4570 	struct spdk_blob *blob, *snapshot;
4571 	struct spdk_io_channel *channel;
4572 	struct spdk_blob_opts opts;
4573 	spdk_blob_id blobid, snapshotid;
4574 	uint64_t free_clusters;
4575 	uint64_t cluster_size;
4576 	uint64_t page_size;
4577 	uint8_t payload_read[10 * 4096];
4578 	uint8_t payload_write[10 * 4096];
4579 	uint64_t write_bytes;
4580 	uint64_t read_bytes;
4581 
4582 	free_clusters = spdk_bs_free_cluster_count(bs);
4583 	cluster_size = spdk_bs_get_cluster_size(bs);
4584 	page_size = spdk_bs_get_page_size(bs);
4585 
4586 	channel = spdk_bs_alloc_io_channel(bs);
4587 	CU_ASSERT(channel != NULL);
4588 
4589 	ut_spdk_blob_opts_init(&opts);
4590 	opts.thin_provision = true;
4591 	opts.num_clusters = 5;
4592 
4593 	blob = ut_blob_create_and_open(bs, &opts);
4594 	blobid = spdk_blob_get_id(blob);
4595 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4596 
4597 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4598 
4599 	memset(payload_read, 0xFF, sizeof(payload_read));
4600 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4601 	poll_threads();
4602 	CU_ASSERT(g_bserrno == 0);
4603 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4604 
4605 	memset(payload_write, 0xE5, sizeof(payload_write));
4606 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4607 	poll_threads();
4608 	CU_ASSERT(g_bserrno == 0);
4609 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4610 
4611 	/* Create snapshot from blob */
4612 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4613 	poll_threads();
4614 	CU_ASSERT(g_bserrno == 0);
4615 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4616 	snapshotid = g_blobid;
4617 
4618 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4619 	poll_threads();
4620 	CU_ASSERT(g_bserrno == 0);
4621 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4622 	snapshot = g_blob;
4623 	CU_ASSERT(snapshot->data_ro == true);
4624 	CU_ASSERT(snapshot->md_ro == true);
4625 
4626 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4627 
4628 	write_bytes = g_dev_write_bytes;
4629 	read_bytes = g_dev_read_bytes;
4630 
4631 	memset(payload_write, 0xAA, sizeof(payload_write));
4632 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4633 	poll_threads();
4634 	CU_ASSERT(g_bserrno == 0);
4635 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4636 
4637 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
4638 	 * and then write 10 pages of payload.
4639 	 */
4640 	if (g_use_extent_table) {
4641 		/* Add one more page for EXTENT_PAGE write */
4642 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 12 + cluster_size);
4643 	} else {
4644 		CU_ASSERT(g_dev_write_bytes - write_bytes == page_size * 11 + cluster_size);
4645 	}
4646 	CU_ASSERT(g_dev_read_bytes - read_bytes == cluster_size);
4647 
4648 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4649 	poll_threads();
4650 	CU_ASSERT(g_bserrno == 0);
4651 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4652 
4653 	/* Data on snapshot should not change after write to clone */
4654 	memset(payload_write, 0xE5, sizeof(payload_write));
4655 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
4656 	poll_threads();
4657 	CU_ASSERT(g_bserrno == 0);
4658 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4659 
4660 	ut_blob_close_and_delete(bs, blob);
4661 	ut_blob_close_and_delete(bs, snapshot);
4662 
4663 	spdk_bs_free_io_channel(channel);
4664 	poll_threads();
4665 	g_blob = NULL;
4666 	g_blobid = 0;
4667 }
4668 
4669 static void
4670 blob_snapshot_rw_iov(void)
4671 {
4672 	static const uint8_t zero[10 * 4096] = { 0 };
4673 	struct spdk_blob_store *bs = g_bs;
4674 	struct spdk_blob *blob, *snapshot;
4675 	struct spdk_io_channel *channel;
4676 	struct spdk_blob_opts opts;
4677 	spdk_blob_id blobid, snapshotid;
4678 	uint64_t free_clusters;
4679 	uint8_t payload_read[10 * 4096];
4680 	uint8_t payload_write[10 * 4096];
4681 	struct iovec iov_read[3];
4682 	struct iovec iov_write[3];
4683 
4684 	free_clusters = spdk_bs_free_cluster_count(bs);
4685 
4686 	channel = spdk_bs_alloc_io_channel(bs);
4687 	CU_ASSERT(channel != NULL);
4688 
4689 	ut_spdk_blob_opts_init(&opts);
4690 	opts.thin_provision = true;
4691 	opts.num_clusters = 5;
4692 
4693 	blob = ut_blob_create_and_open(bs, &opts);
4694 	blobid = spdk_blob_get_id(blob);
4695 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4696 
4697 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4698 
4699 	/* Create snapshot from blob */
4700 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4701 	poll_threads();
4702 	CU_ASSERT(g_bserrno == 0);
4703 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4704 	snapshotid = g_blobid;
4705 
4706 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4707 	poll_threads();
4708 	CU_ASSERT(g_bserrno == 0);
4709 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4710 	snapshot = g_blob;
4711 	CU_ASSERT(snapshot->data_ro == true);
4712 	CU_ASSERT(snapshot->md_ro == true);
4713 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4714 
4715 	/* Payload should be all zeros from unallocated clusters */
4716 	memset(payload_read, 0xAA, sizeof(payload_read));
4717 	iov_read[0].iov_base = payload_read;
4718 	iov_read[0].iov_len = 3 * 4096;
4719 	iov_read[1].iov_base = payload_read + 3 * 4096;
4720 	iov_read[1].iov_len = 4 * 4096;
4721 	iov_read[2].iov_base = payload_read + 7 * 4096;
4722 	iov_read[2].iov_len = 3 * 4096;
4723 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4724 	poll_threads();
4725 	CU_ASSERT(g_bserrno == 0);
4726 	CU_ASSERT(memcmp(zero, payload_read, 10 * 4096) == 0);
4727 
4728 	memset(payload_write, 0xE5, sizeof(payload_write));
4729 	iov_write[0].iov_base = payload_write;
4730 	iov_write[0].iov_len = 1 * 4096;
4731 	iov_write[1].iov_base = payload_write + 1 * 4096;
4732 	iov_write[1].iov_len = 5 * 4096;
4733 	iov_write[2].iov_base = payload_write + 6 * 4096;
4734 	iov_write[2].iov_len = 4 * 4096;
4735 
4736 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
4737 	poll_threads();
4738 	CU_ASSERT(g_bserrno == 0);
4739 
4740 	memset(payload_read, 0xAA, sizeof(payload_read));
4741 	iov_read[0].iov_base = payload_read;
4742 	iov_read[0].iov_len = 3 * 4096;
4743 	iov_read[1].iov_base = payload_read + 3 * 4096;
4744 	iov_read[1].iov_len = 4 * 4096;
4745 	iov_read[2].iov_base = payload_read + 7 * 4096;
4746 	iov_read[2].iov_len = 3 * 4096;
4747 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
4748 	poll_threads();
4749 	CU_ASSERT(g_bserrno == 0);
4750 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * 4096) == 0);
4751 
4752 	spdk_bs_free_io_channel(channel);
4753 	poll_threads();
4754 
4755 	ut_blob_close_and_delete(bs, blob);
4756 	ut_blob_close_and_delete(bs, snapshot);
4757 }
4758 
4759 /**
4760  * Inflate / decouple parent rw unit tests.
4761  *
4762  * --------------
4763  * original blob:         0         1         2         3         4
4764  *                   ,---------+---------+---------+---------+---------.
4765  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4766  *                   +---------+---------+---------+---------+---------+
4767  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
4768  *                   +---------+---------+---------+---------+---------+
4769  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
4770  *                   '---------+---------+---------+---------+---------'
4771  *                   .         .         .         .         .         .
4772  * --------          .         .         .         .         .         .
4773  * inflate:          .         .         .         .         .         .
4774  *                   ,---------+---------+---------+---------+---------.
4775  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
4776  *                   '---------+---------+---------+---------+---------'
4777  *
4778  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
4779  *               on snapshot2 and snapshot removed .         .         .
4780  *                   .         .         .         .         .         .
4781  * ----------------  .         .         .         .         .         .
4782  * decouple parent:  .         .         .         .         .         .
4783  *                   ,---------+---------+---------+---------+---------.
4784  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
4785  *                   +---------+---------+---------+---------+---------+
4786  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
4787  *                   '---------+---------+---------+---------+---------'
4788  *
4789  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
4790  *               on snapshot2 removed and on snapshot still exists. Snapshot2
4791  *               should remain a clone of snapshot.
4792  */
4793 static void
4794 _blob_inflate_rw(bool decouple_parent)
4795 {
4796 	struct spdk_blob_store *bs = g_bs;
4797 	struct spdk_blob *blob, *snapshot, *snapshot2;
4798 	struct spdk_io_channel *channel;
4799 	struct spdk_blob_opts opts;
4800 	spdk_blob_id blobid, snapshotid, snapshot2id;
4801 	uint64_t free_clusters;
4802 	uint64_t cluster_size;
4803 
4804 	uint64_t payload_size;
4805 	uint8_t *payload_read;
4806 	uint8_t *payload_write;
4807 	uint8_t *payload_clone;
4808 
4809 	uint64_t pages_per_cluster;
4810 	uint64_t pages_per_payload;
4811 
4812 	int i;
4813 	spdk_blob_id ids[2];
4814 	size_t count;
4815 
4816 	free_clusters = spdk_bs_free_cluster_count(bs);
4817 	cluster_size = spdk_bs_get_cluster_size(bs);
4818 	pages_per_cluster = cluster_size / spdk_bs_get_page_size(bs);
4819 	pages_per_payload = pages_per_cluster * 5;
4820 
4821 	payload_size = cluster_size * 5;
4822 
4823 	payload_read = malloc(payload_size);
4824 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
4825 
4826 	payload_write = malloc(payload_size);
4827 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
4828 
4829 	payload_clone = malloc(payload_size);
4830 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
4831 
4832 	channel = spdk_bs_alloc_io_channel(bs);
4833 	SPDK_CU_ASSERT_FATAL(channel != NULL);
4834 
4835 	/* Create blob */
4836 	ut_spdk_blob_opts_init(&opts);
4837 	opts.thin_provision = true;
4838 	opts.num_clusters = 5;
4839 
4840 	blob = ut_blob_create_and_open(bs, &opts);
4841 	blobid = spdk_blob_get_id(blob);
4842 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4843 
4844 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4845 
4846 	/* 1) Initial read should return zeroed payload */
4847 	memset(payload_read, 0xFF, payload_size);
4848 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4849 			  blob_op_complete, NULL);
4850 	poll_threads();
4851 	CU_ASSERT(g_bserrno == 0);
4852 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
4853 
4854 	/* Fill whole blob with a pattern, except last cluster (to be sure it
4855 	 * isn't allocated) */
4856 	memset(payload_write, 0xE5, payload_size - cluster_size);
4857 	spdk_blob_io_write(blob, channel, payload_write, 0, pages_per_payload -
4858 			   pages_per_cluster, blob_op_complete, NULL);
4859 	poll_threads();
4860 	CU_ASSERT(g_bserrno == 0);
4861 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4862 
4863 	/* 2) Create snapshot from blob (first level) */
4864 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4865 	poll_threads();
4866 	CU_ASSERT(g_bserrno == 0);
4867 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4868 	snapshotid = g_blobid;
4869 
4870 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4871 	poll_threads();
4872 	CU_ASSERT(g_bserrno == 0);
4873 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4874 	snapshot = g_blob;
4875 	CU_ASSERT(snapshot->data_ro == true);
4876 	CU_ASSERT(snapshot->md_ro == true);
4877 
4878 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
4879 
4880 	/* Write every second cluster with a pattern.
4881 	 *
4882 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
4883 	 * doesn't allocate it.
4884 	 *
4885 	 * payload_clone stores expected result on "blob" read at the time and
4886 	 * is used only to check data consistency on clone before and after
4887 	 * inflation. Initially we fill it with a backing snapshots pattern
4888 	 * used before.
4889 	 */
4890 	memset(payload_clone, 0xE5, payload_size - cluster_size);
4891 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
4892 	memset(payload_write, 0xAA, payload_size);
4893 	for (i = 1; i < 5; i += 2) {
4894 		spdk_blob_io_write(blob, channel, payload_write, i * pages_per_cluster,
4895 				   pages_per_cluster, blob_op_complete, NULL);
4896 		poll_threads();
4897 		CU_ASSERT(g_bserrno == 0);
4898 
4899 		/* Update expected result */
4900 		memcpy(payload_clone + (cluster_size * i), payload_write,
4901 		       cluster_size);
4902 	}
4903 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
4904 
4905 	/* Check data consistency on clone */
4906 	memset(payload_read, 0xFF, payload_size);
4907 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4908 			  blob_op_complete, NULL);
4909 	poll_threads();
4910 	CU_ASSERT(g_bserrno == 0);
4911 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4912 
4913 	/* 3) Create second levels snapshot from blob */
4914 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4915 	poll_threads();
4916 	CU_ASSERT(g_bserrno == 0);
4917 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4918 	snapshot2id = g_blobid;
4919 
4920 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
4921 	poll_threads();
4922 	CU_ASSERT(g_bserrno == 0);
4923 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4924 	snapshot2 = g_blob;
4925 	CU_ASSERT(snapshot2->data_ro == true);
4926 	CU_ASSERT(snapshot2->md_ro == true);
4927 
4928 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
4929 
4930 	CU_ASSERT(snapshot2->parent_id == snapshotid);
4931 
4932 	/* Write one cluster on the top level blob. This cluster (1) covers
4933 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
4934 	 * at all */
4935 	spdk_blob_io_write(blob, channel, payload_write, pages_per_cluster,
4936 			   pages_per_cluster, blob_op_complete, NULL);
4937 	poll_threads();
4938 	CU_ASSERT(g_bserrno == 0);
4939 
4940 	/* Update expected result */
4941 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
4942 
4943 	/* Check data consistency on clone */
4944 	memset(payload_read, 0xFF, payload_size);
4945 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
4946 			  blob_op_complete, NULL);
4947 	poll_threads();
4948 	CU_ASSERT(g_bserrno == 0);
4949 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
4950 
4951 
4952 	/* Close all blobs */
4953 	spdk_blob_close(blob, blob_op_complete, NULL);
4954 	poll_threads();
4955 	CU_ASSERT(g_bserrno == 0);
4956 
4957 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
4958 	poll_threads();
4959 	CU_ASSERT(g_bserrno == 0);
4960 
4961 	spdk_blob_close(snapshot, blob_op_complete, NULL);
4962 	poll_threads();
4963 	CU_ASSERT(g_bserrno == 0);
4964 
4965 	/* Check snapshot-clone relations */
4966 	count = 2;
4967 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4968 	CU_ASSERT(count == 1);
4969 	CU_ASSERT(ids[0] == snapshot2id);
4970 
4971 	count = 2;
4972 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
4973 	CU_ASSERT(count == 1);
4974 	CU_ASSERT(ids[0] == blobid);
4975 
4976 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
4977 
4978 	free_clusters = spdk_bs_free_cluster_count(bs);
4979 	if (!decouple_parent) {
4980 		/* Do full blob inflation */
4981 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
4982 		poll_threads();
4983 		CU_ASSERT(g_bserrno == 0);
4984 
4985 		/* All clusters should be inflated (except one already allocated
4986 		 * in a top level blob) */
4987 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
4988 
4989 		/* Check if relation tree updated correctly */
4990 		count = 2;
4991 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
4992 
4993 		/* snapshotid have one clone */
4994 		CU_ASSERT(count == 1);
4995 		CU_ASSERT(ids[0] == snapshot2id);
4996 
4997 		/* snapshot2id have no clones */
4998 		count = 2;
4999 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5000 		CU_ASSERT(count == 0);
5001 
5002 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5003 	} else {
5004 		/* Decouple parent of blob */
5005 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5006 		poll_threads();
5007 		CU_ASSERT(g_bserrno == 0);
5008 
5009 		/* Only one cluster from a parent should be inflated (second one
5010 		 * is covered by a cluster written on a top level blob, and
5011 		 * already allocated) */
5012 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5013 
5014 		/* Check if relation tree updated correctly */
5015 		count = 2;
5016 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5017 
5018 		/* snapshotid have two clones now */
5019 		CU_ASSERT(count == 2);
5020 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5021 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5022 
5023 		/* snapshot2id have no clones */
5024 		count = 2;
5025 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5026 		CU_ASSERT(count == 0);
5027 
5028 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5029 	}
5030 
5031 	/* Try to delete snapshot2 (should pass) */
5032 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5033 	poll_threads();
5034 	CU_ASSERT(g_bserrno == 0);
5035 
5036 	/* Try to delete base snapshot */
5037 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5038 	poll_threads();
5039 	CU_ASSERT(g_bserrno == 0);
5040 
5041 	/* Reopen blob after snapshot deletion */
5042 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5043 	poll_threads();
5044 	CU_ASSERT(g_bserrno == 0);
5045 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5046 	blob = g_blob;
5047 
5048 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5049 
5050 	/* Check data consistency on inflated blob */
5051 	memset(payload_read, 0xFF, payload_size);
5052 	spdk_blob_io_read(blob, channel, payload_read, 0, pages_per_payload,
5053 			  blob_op_complete, NULL);
5054 	poll_threads();
5055 	CU_ASSERT(g_bserrno == 0);
5056 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5057 
5058 	spdk_bs_free_io_channel(channel);
5059 	poll_threads();
5060 
5061 	free(payload_read);
5062 	free(payload_write);
5063 	free(payload_clone);
5064 
5065 	ut_blob_close_and_delete(bs, blob);
5066 }
5067 
5068 static void
5069 blob_inflate_rw(void)
5070 {
5071 	_blob_inflate_rw(false);
5072 	_blob_inflate_rw(true);
5073 }
5074 
5075 /**
5076  * Snapshot-clones relation test
5077  *
5078  *         snapshot
5079  *            |
5080  *      +-----+-----+
5081  *      |           |
5082  *   blob(ro)   snapshot2
5083  *      |           |
5084  *   clone2      clone
5085  */
5086 static void
5087 blob_relations(void)
5088 {
5089 	struct spdk_blob_store *bs;
5090 	struct spdk_bs_dev *dev;
5091 	struct spdk_bs_opts bs_opts;
5092 	struct spdk_blob_opts opts;
5093 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5094 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5095 	int rc;
5096 	size_t count;
5097 	spdk_blob_id ids[10] = {};
5098 
5099 	dev = init_dev();
5100 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5101 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5102 
5103 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5104 	poll_threads();
5105 	CU_ASSERT(g_bserrno == 0);
5106 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5107 	bs = g_bs;
5108 
5109 	/* 1. Create blob with 10 clusters */
5110 
5111 	ut_spdk_blob_opts_init(&opts);
5112 	opts.num_clusters = 10;
5113 
5114 	blob = ut_blob_create_and_open(bs, &opts);
5115 	blobid = spdk_blob_get_id(blob);
5116 
5117 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5118 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5119 	CU_ASSERT(!spdk_blob_is_clone(blob));
5120 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5121 
5122 	/* blob should not have underlying snapshot nor clones */
5123 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5124 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5125 	count = SPDK_COUNTOF(ids);
5126 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5127 	CU_ASSERT(rc == 0);
5128 	CU_ASSERT(count == 0);
5129 
5130 
5131 	/* 2. Create snapshot */
5132 
5133 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5134 	poll_threads();
5135 	CU_ASSERT(g_bserrno == 0);
5136 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5137 	snapshotid = g_blobid;
5138 
5139 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5140 	poll_threads();
5141 	CU_ASSERT(g_bserrno == 0);
5142 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5143 	snapshot = g_blob;
5144 
5145 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5146 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5147 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5148 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5149 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5150 
5151 	/* Check if original blob is converted to the clone of snapshot */
5152 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5153 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5154 	CU_ASSERT(spdk_blob_is_clone(blob));
5155 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5156 	CU_ASSERT(blob->parent_id == snapshotid);
5157 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5158 
5159 	count = SPDK_COUNTOF(ids);
5160 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5161 	CU_ASSERT(rc == 0);
5162 	CU_ASSERT(count == 1);
5163 	CU_ASSERT(ids[0] == blobid);
5164 
5165 
5166 	/* 3. Create clone from snapshot */
5167 
5168 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5169 	poll_threads();
5170 	CU_ASSERT(g_bserrno == 0);
5171 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5172 	cloneid = g_blobid;
5173 
5174 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5175 	poll_threads();
5176 	CU_ASSERT(g_bserrno == 0);
5177 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5178 	clone = g_blob;
5179 
5180 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5181 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5182 	CU_ASSERT(spdk_blob_is_clone(clone));
5183 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5184 	CU_ASSERT(clone->parent_id == snapshotid);
5185 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5186 
5187 	count = SPDK_COUNTOF(ids);
5188 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5189 	CU_ASSERT(rc == 0);
5190 	CU_ASSERT(count == 0);
5191 
5192 	/* Check if clone is on the snapshot's list */
5193 	count = SPDK_COUNTOF(ids);
5194 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5195 	CU_ASSERT(rc == 0);
5196 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5197 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5198 
5199 
5200 	/* 4. Create snapshot of the clone */
5201 
5202 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5203 	poll_threads();
5204 	CU_ASSERT(g_bserrno == 0);
5205 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5206 	snapshotid2 = g_blobid;
5207 
5208 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5209 	poll_threads();
5210 	CU_ASSERT(g_bserrno == 0);
5211 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5212 	snapshot2 = g_blob;
5213 
5214 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5215 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5216 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5217 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5218 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5219 
5220 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5221 	 * is a child of snapshot */
5222 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5223 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5224 	CU_ASSERT(spdk_blob_is_clone(clone));
5225 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5226 	CU_ASSERT(clone->parent_id == snapshotid2);
5227 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5228 
5229 	count = SPDK_COUNTOF(ids);
5230 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5231 	CU_ASSERT(rc == 0);
5232 	CU_ASSERT(count == 1);
5233 	CU_ASSERT(ids[0] == cloneid);
5234 
5235 
5236 	/* 5. Try to create clone from read only blob */
5237 
5238 	/* Mark blob as read only */
5239 	spdk_blob_set_read_only(blob);
5240 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5241 	poll_threads();
5242 	CU_ASSERT(g_bserrno == 0);
5243 
5244 	/* Check if previously created blob is read only clone */
5245 	CU_ASSERT(spdk_blob_is_read_only(blob));
5246 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5247 	CU_ASSERT(spdk_blob_is_clone(blob));
5248 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5249 
5250 	/* Create clone from read only blob */
5251 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5252 	poll_threads();
5253 	CU_ASSERT(g_bserrno == 0);
5254 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5255 	cloneid2 = g_blobid;
5256 
5257 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5258 	poll_threads();
5259 	CU_ASSERT(g_bserrno == 0);
5260 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5261 	clone2 = g_blob;
5262 
5263 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5264 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5265 	CU_ASSERT(spdk_blob_is_clone(clone2));
5266 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5267 
5268 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5269 
5270 	count = SPDK_COUNTOF(ids);
5271 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5272 	CU_ASSERT(rc == 0);
5273 
5274 	CU_ASSERT(count == 1);
5275 	CU_ASSERT(ids[0] == cloneid2);
5276 
5277 	/* Close blobs */
5278 
5279 	spdk_blob_close(clone2, blob_op_complete, NULL);
5280 	poll_threads();
5281 	CU_ASSERT(g_bserrno == 0);
5282 
5283 	spdk_blob_close(blob, blob_op_complete, NULL);
5284 	poll_threads();
5285 	CU_ASSERT(g_bserrno == 0);
5286 
5287 	spdk_blob_close(clone, blob_op_complete, NULL);
5288 	poll_threads();
5289 	CU_ASSERT(g_bserrno == 0);
5290 
5291 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5292 	poll_threads();
5293 	CU_ASSERT(g_bserrno == 0);
5294 
5295 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5296 	poll_threads();
5297 	CU_ASSERT(g_bserrno == 0);
5298 
5299 	/* Try to delete snapshot with more than 1 clone */
5300 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5301 	poll_threads();
5302 	CU_ASSERT(g_bserrno != 0);
5303 
5304 	ut_bs_reload(&bs, &bs_opts);
5305 
5306 	/* NULL ids array should return number of clones in count */
5307 	count = SPDK_COUNTOF(ids);
5308 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5309 	CU_ASSERT(rc == -ENOMEM);
5310 	CU_ASSERT(count == 2);
5311 
5312 	/* incorrect array size */
5313 	count = 1;
5314 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5315 	CU_ASSERT(rc == -ENOMEM);
5316 	CU_ASSERT(count == 2);
5317 
5318 
5319 	/* Verify structure of loaded blob store */
5320 
5321 	/* snapshot */
5322 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5323 
5324 	count = SPDK_COUNTOF(ids);
5325 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5326 	CU_ASSERT(rc == 0);
5327 	CU_ASSERT(count == 2);
5328 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5329 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5330 
5331 	/* blob */
5332 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5333 	count = SPDK_COUNTOF(ids);
5334 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5335 	CU_ASSERT(rc == 0);
5336 	CU_ASSERT(count == 1);
5337 	CU_ASSERT(ids[0] == cloneid2);
5338 
5339 	/* clone */
5340 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5341 	count = SPDK_COUNTOF(ids);
5342 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5343 	CU_ASSERT(rc == 0);
5344 	CU_ASSERT(count == 0);
5345 
5346 	/* snapshot2 */
5347 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5348 	count = SPDK_COUNTOF(ids);
5349 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5350 	CU_ASSERT(rc == 0);
5351 	CU_ASSERT(count == 1);
5352 	CU_ASSERT(ids[0] == cloneid);
5353 
5354 	/* clone2 */
5355 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5356 	count = SPDK_COUNTOF(ids);
5357 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5358 	CU_ASSERT(rc == 0);
5359 	CU_ASSERT(count == 0);
5360 
5361 	/* Try to delete blob that user should not be able to remove */
5362 
5363 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5364 	poll_threads();
5365 	CU_ASSERT(g_bserrno != 0);
5366 
5367 	/* Remove all blobs */
5368 
5369 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5370 	poll_threads();
5371 	CU_ASSERT(g_bserrno == 0);
5372 
5373 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5374 	poll_threads();
5375 	CU_ASSERT(g_bserrno == 0);
5376 
5377 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5378 	poll_threads();
5379 	CU_ASSERT(g_bserrno == 0);
5380 
5381 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5382 	poll_threads();
5383 	CU_ASSERT(g_bserrno == 0);
5384 
5385 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5386 	poll_threads();
5387 	CU_ASSERT(g_bserrno == 0);
5388 
5389 	spdk_bs_unload(bs, bs_op_complete, NULL);
5390 	poll_threads();
5391 	CU_ASSERT(g_bserrno == 0);
5392 
5393 	g_bs = NULL;
5394 }
5395 
5396 /**
5397  * Snapshot-clones relation test 2
5398  *
5399  *         snapshot1
5400  *            |
5401  *         snapshot2
5402  *            |
5403  *      +-----+-----+
5404  *      |           |
5405  *   blob(ro)   snapshot3
5406  *      |           |
5407  *      |       snapshot4
5408  *      |        |     |
5409  *   clone2   clone  clone3
5410  */
5411 static void
5412 blob_relations2(void)
5413 {
5414 	struct spdk_blob_store *bs;
5415 	struct spdk_bs_dev *dev;
5416 	struct spdk_bs_opts bs_opts;
5417 	struct spdk_blob_opts opts;
5418 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
5419 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
5420 		     cloneid3;
5421 	int rc;
5422 	size_t count;
5423 	spdk_blob_id ids[10] = {};
5424 
5425 	dev = init_dev();
5426 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5427 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5428 
5429 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5430 	poll_threads();
5431 	CU_ASSERT(g_bserrno == 0);
5432 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5433 	bs = g_bs;
5434 
5435 	/* 1. Create blob with 10 clusters */
5436 
5437 	ut_spdk_blob_opts_init(&opts);
5438 	opts.num_clusters = 10;
5439 
5440 	blob = ut_blob_create_and_open(bs, &opts);
5441 	blobid = spdk_blob_get_id(blob);
5442 
5443 	/* 2. Create snapshot1 */
5444 
5445 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5446 	poll_threads();
5447 	CU_ASSERT(g_bserrno == 0);
5448 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5449 	snapshotid1 = g_blobid;
5450 
5451 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
5452 	poll_threads();
5453 	CU_ASSERT(g_bserrno == 0);
5454 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5455 	snapshot1 = g_blob;
5456 
5457 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
5458 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
5459 
5460 	CU_ASSERT(blob->parent_id == snapshotid1);
5461 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5462 
5463 	/* Check if blob is the clone of snapshot1 */
5464 	CU_ASSERT(blob->parent_id == snapshotid1);
5465 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
5466 
5467 	count = SPDK_COUNTOF(ids);
5468 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
5469 	CU_ASSERT(rc == 0);
5470 	CU_ASSERT(count == 1);
5471 	CU_ASSERT(ids[0] == blobid);
5472 
5473 	/* 3. Create another snapshot */
5474 
5475 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5476 	poll_threads();
5477 	CU_ASSERT(g_bserrno == 0);
5478 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5479 	snapshotid2 = g_blobid;
5480 
5481 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5482 	poll_threads();
5483 	CU_ASSERT(g_bserrno == 0);
5484 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5485 	snapshot2 = g_blob;
5486 
5487 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5488 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
5489 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
5490 
5491 	/* Check if snapshot2 is the clone of snapshot1 and blob
5492 	 * is a child of snapshot2 */
5493 	CU_ASSERT(blob->parent_id == snapshotid2);
5494 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5495 
5496 	count = SPDK_COUNTOF(ids);
5497 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5498 	CU_ASSERT(rc == 0);
5499 	CU_ASSERT(count == 1);
5500 	CU_ASSERT(ids[0] == blobid);
5501 
5502 	/* 4. Create clone from snapshot */
5503 
5504 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
5505 	poll_threads();
5506 	CU_ASSERT(g_bserrno == 0);
5507 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5508 	cloneid = g_blobid;
5509 
5510 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5511 	poll_threads();
5512 	CU_ASSERT(g_bserrno == 0);
5513 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5514 	clone = g_blob;
5515 
5516 	CU_ASSERT(clone->parent_id == snapshotid2);
5517 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5518 
5519 	/* Check if clone is on the snapshot's list */
5520 	count = SPDK_COUNTOF(ids);
5521 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5522 	CU_ASSERT(rc == 0);
5523 	CU_ASSERT(count == 2);
5524 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5525 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5526 
5527 	/* 5. Create snapshot of the clone */
5528 
5529 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5530 	poll_threads();
5531 	CU_ASSERT(g_bserrno == 0);
5532 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5533 	snapshotid3 = g_blobid;
5534 
5535 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5536 	poll_threads();
5537 	CU_ASSERT(g_bserrno == 0);
5538 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5539 	snapshot3 = g_blob;
5540 
5541 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
5542 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5543 
5544 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
5545 	 * is a child of snapshot2 */
5546 	CU_ASSERT(clone->parent_id == snapshotid3);
5547 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5548 
5549 	count = SPDK_COUNTOF(ids);
5550 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5551 	CU_ASSERT(rc == 0);
5552 	CU_ASSERT(count == 1);
5553 	CU_ASSERT(ids[0] == cloneid);
5554 
5555 	/* 6. Create another snapshot of the clone */
5556 
5557 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5558 	poll_threads();
5559 	CU_ASSERT(g_bserrno == 0);
5560 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5561 	snapshotid4 = g_blobid;
5562 
5563 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
5564 	poll_threads();
5565 	CU_ASSERT(g_bserrno == 0);
5566 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5567 	snapshot4 = g_blob;
5568 
5569 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
5570 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
5571 
5572 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
5573 	 * is a child of snapshot3 */
5574 	CU_ASSERT(clone->parent_id == snapshotid4);
5575 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
5576 
5577 	count = SPDK_COUNTOF(ids);
5578 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
5579 	CU_ASSERT(rc == 0);
5580 	CU_ASSERT(count == 1);
5581 	CU_ASSERT(ids[0] == cloneid);
5582 
5583 	/* 7. Remove snapshot 4 */
5584 
5585 	ut_blob_close_and_delete(bs, snapshot4);
5586 
5587 	/* Check if relations are back to state from before creating snapshot 4 */
5588 	CU_ASSERT(clone->parent_id == snapshotid3);
5589 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5590 
5591 	count = SPDK_COUNTOF(ids);
5592 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5593 	CU_ASSERT(rc == 0);
5594 	CU_ASSERT(count == 1);
5595 	CU_ASSERT(ids[0] == cloneid);
5596 
5597 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
5598 
5599 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
5600 	poll_threads();
5601 	CU_ASSERT(g_bserrno == 0);
5602 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5603 	cloneid3 = g_blobid;
5604 
5605 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5606 	poll_threads();
5607 	CU_ASSERT(g_bserrno != 0);
5608 
5609 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
5610 
5611 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
5612 	poll_threads();
5613 	CU_ASSERT(g_bserrno == 0);
5614 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5615 	snapshot3 = g_blob;
5616 
5617 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5618 	poll_threads();
5619 	CU_ASSERT(g_bserrno != 0);
5620 
5621 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5622 	poll_threads();
5623 	CU_ASSERT(g_bserrno == 0);
5624 
5625 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
5626 	poll_threads();
5627 	CU_ASSERT(g_bserrno == 0);
5628 
5629 	/* 10. Remove snapshot 1 */
5630 
5631 	ut_blob_close_and_delete(bs, snapshot1);
5632 
5633 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
5634 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
5635 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5636 
5637 	count = SPDK_COUNTOF(ids);
5638 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5639 	CU_ASSERT(rc == 0);
5640 	CU_ASSERT(count == 2);
5641 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5642 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5643 
5644 	/* 11. Try to create clone from read only blob */
5645 
5646 	/* Mark blob as read only */
5647 	spdk_blob_set_read_only(blob);
5648 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5649 	poll_threads();
5650 	CU_ASSERT(g_bserrno == 0);
5651 
5652 	/* Create clone from read only blob */
5653 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5654 	poll_threads();
5655 	CU_ASSERT(g_bserrno == 0);
5656 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5657 	cloneid2 = g_blobid;
5658 
5659 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5660 	poll_threads();
5661 	CU_ASSERT(g_bserrno == 0);
5662 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5663 	clone2 = g_blob;
5664 
5665 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5666 
5667 	count = SPDK_COUNTOF(ids);
5668 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5669 	CU_ASSERT(rc == 0);
5670 	CU_ASSERT(count == 1);
5671 	CU_ASSERT(ids[0] == cloneid2);
5672 
5673 	/* Close blobs */
5674 
5675 	spdk_blob_close(clone2, blob_op_complete, NULL);
5676 	poll_threads();
5677 	CU_ASSERT(g_bserrno == 0);
5678 
5679 	spdk_blob_close(blob, blob_op_complete, NULL);
5680 	poll_threads();
5681 	CU_ASSERT(g_bserrno == 0);
5682 
5683 	spdk_blob_close(clone, blob_op_complete, NULL);
5684 	poll_threads();
5685 	CU_ASSERT(g_bserrno == 0);
5686 
5687 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5688 	poll_threads();
5689 	CU_ASSERT(g_bserrno == 0);
5690 
5691 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
5692 	poll_threads();
5693 	CU_ASSERT(g_bserrno == 0);
5694 
5695 	ut_bs_reload(&bs, &bs_opts);
5696 
5697 	/* Verify structure of loaded blob store */
5698 
5699 	/* snapshot2 */
5700 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
5701 
5702 	count = SPDK_COUNTOF(ids);
5703 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5704 	CU_ASSERT(rc == 0);
5705 	CU_ASSERT(count == 2);
5706 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5707 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
5708 
5709 	/* blob */
5710 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
5711 	count = SPDK_COUNTOF(ids);
5712 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5713 	CU_ASSERT(rc == 0);
5714 	CU_ASSERT(count == 1);
5715 	CU_ASSERT(ids[0] == cloneid2);
5716 
5717 	/* clone */
5718 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
5719 	count = SPDK_COUNTOF(ids);
5720 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5721 	CU_ASSERT(rc == 0);
5722 	CU_ASSERT(count == 0);
5723 
5724 	/* snapshot3 */
5725 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
5726 	count = SPDK_COUNTOF(ids);
5727 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
5728 	CU_ASSERT(rc == 0);
5729 	CU_ASSERT(count == 1);
5730 	CU_ASSERT(ids[0] == cloneid);
5731 
5732 	/* clone2 */
5733 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5734 	count = SPDK_COUNTOF(ids);
5735 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
5736 	CU_ASSERT(rc == 0);
5737 	CU_ASSERT(count == 0);
5738 
5739 	/* Try to delete all blobs in the worse possible order */
5740 
5741 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5742 	poll_threads();
5743 	CU_ASSERT(g_bserrno != 0);
5744 
5745 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
5746 	poll_threads();
5747 	CU_ASSERT(g_bserrno == 0);
5748 
5749 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5750 	poll_threads();
5751 	CU_ASSERT(g_bserrno != 0);
5752 
5753 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
5754 	poll_threads();
5755 	CU_ASSERT(g_bserrno == 0);
5756 
5757 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5758 	poll_threads();
5759 	CU_ASSERT(g_bserrno == 0);
5760 
5761 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5762 	poll_threads();
5763 	CU_ASSERT(g_bserrno == 0);
5764 
5765 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
5766 	poll_threads();
5767 	CU_ASSERT(g_bserrno == 0);
5768 
5769 	spdk_bs_unload(bs, bs_op_complete, NULL);
5770 	poll_threads();
5771 	CU_ASSERT(g_bserrno == 0);
5772 
5773 	g_bs = NULL;
5774 }
5775 
5776 /**
5777  * Snapshot-clones relation test 3
5778  *
5779  *         snapshot0
5780  *            |
5781  *         snapshot1
5782  *            |
5783  *         snapshot2
5784  *            |
5785  *           blob
5786  */
5787 static void
5788 blob_relations3(void)
5789 {
5790 	struct spdk_blob_store *bs;
5791 	struct spdk_bs_dev *dev;
5792 	struct spdk_io_channel *channel;
5793 	struct spdk_bs_opts bs_opts;
5794 	struct spdk_blob_opts opts;
5795 	struct spdk_blob *blob;
5796 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
5797 
5798 	dev = init_dev();
5799 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5800 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5801 
5802 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5803 	poll_threads();
5804 	CU_ASSERT(g_bserrno == 0);
5805 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5806 	bs = g_bs;
5807 
5808 	channel = spdk_bs_alloc_io_channel(bs);
5809 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5810 
5811 	/* 1. Create blob with 10 clusters */
5812 	ut_spdk_blob_opts_init(&opts);
5813 	opts.num_clusters = 10;
5814 
5815 	blob = ut_blob_create_and_open(bs, &opts);
5816 	blobid = spdk_blob_get_id(blob);
5817 
5818 	/* 2. Create snapshot0 */
5819 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5820 	poll_threads();
5821 	CU_ASSERT(g_bserrno == 0);
5822 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5823 	snapshotid0 = g_blobid;
5824 
5825 	/* 3. Create snapshot1 */
5826 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5827 	poll_threads();
5828 	CU_ASSERT(g_bserrno == 0);
5829 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5830 	snapshotid1 = g_blobid;
5831 
5832 	/* 4. Create snapshot2 */
5833 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5834 	poll_threads();
5835 	CU_ASSERT(g_bserrno == 0);
5836 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5837 	snapshotid2 = g_blobid;
5838 
5839 	/* 5. Decouple blob */
5840 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5841 	poll_threads();
5842 	CU_ASSERT(g_bserrno == 0);
5843 
5844 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
5845 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
5846 	poll_threads();
5847 	CU_ASSERT(g_bserrno == 0);
5848 
5849 	/* 7. Delete blob */
5850 	spdk_blob_close(blob, blob_op_complete, NULL);
5851 	poll_threads();
5852 	CU_ASSERT(g_bserrno == 0);
5853 
5854 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
5855 	poll_threads();
5856 	CU_ASSERT(g_bserrno == 0);
5857 
5858 	/* 8. Delete snapshot2.
5859 	 * If md of snapshot 2 was updated, it should be possible to delete it */
5860 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
5861 	poll_threads();
5862 	CU_ASSERT(g_bserrno == 0);
5863 
5864 	/* Remove remaining blobs and unload bs */
5865 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
5866 	poll_threads();
5867 	CU_ASSERT(g_bserrno == 0);
5868 
5869 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
5870 	poll_threads();
5871 	CU_ASSERT(g_bserrno == 0);
5872 
5873 	spdk_bs_free_io_channel(channel);
5874 	poll_threads();
5875 
5876 	spdk_bs_unload(bs, bs_op_complete, NULL);
5877 	poll_threads();
5878 	CU_ASSERT(g_bserrno == 0);
5879 
5880 	g_bs = NULL;
5881 }
5882 
5883 static void
5884 blobstore_clean_power_failure(void)
5885 {
5886 	struct spdk_blob_store *bs;
5887 	struct spdk_blob *blob;
5888 	struct spdk_power_failure_thresholds thresholds = {};
5889 	bool clean = false;
5890 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
5891 	struct spdk_bs_super_block super_copy = {};
5892 
5893 	thresholds.general_threshold = 1;
5894 	while (!clean) {
5895 		/* Create bs and blob */
5896 		suite_blob_setup();
5897 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5898 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5899 		bs = g_bs;
5900 		blob = g_blob;
5901 
5902 		/* Super block should not change for rest of the UT,
5903 		 * save it and compare later. */
5904 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
5905 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
5906 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5907 
5908 		/* Force bs/super block in a clean state.
5909 		 * Along with marking blob dirty, to cause blob persist. */
5910 		blob->state = SPDK_BLOB_STATE_DIRTY;
5911 		bs->clean = 1;
5912 		super->clean = 1;
5913 		super->crc = blob_md_page_calc_crc(super);
5914 
5915 		g_bserrno = -1;
5916 		dev_set_power_failure_thresholds(thresholds);
5917 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
5918 		poll_threads();
5919 		dev_reset_power_failure_event();
5920 
5921 		if (g_bserrno == 0) {
5922 			/* After successful md sync, both bs and super block
5923 			 * should be marked as not clean. */
5924 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
5925 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
5926 			clean = true;
5927 		}
5928 
5929 		/* Depending on the point of failure, super block was either updated or not. */
5930 		super_copy.clean = super->clean;
5931 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
5932 		/* Compare that the values in super block remained unchanged. */
5933 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
5934 
5935 		/* Delete blob and unload bs */
5936 		suite_blob_cleanup();
5937 
5938 		thresholds.general_threshold++;
5939 	}
5940 }
5941 
5942 static void
5943 blob_delete_snapshot_power_failure(void)
5944 {
5945 	struct spdk_bs_dev *dev;
5946 	struct spdk_blob_store *bs;
5947 	struct spdk_blob_opts opts;
5948 	struct spdk_blob *blob, *snapshot;
5949 	struct spdk_power_failure_thresholds thresholds = {};
5950 	spdk_blob_id blobid, snapshotid;
5951 	const void *value;
5952 	size_t value_len;
5953 	size_t count;
5954 	spdk_blob_id ids[3] = {};
5955 	int rc;
5956 	bool deleted = false;
5957 	int delete_snapshot_bserrno = -1;
5958 
5959 	thresholds.general_threshold = 1;
5960 	while (!deleted) {
5961 		dev = init_dev();
5962 
5963 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
5964 		poll_threads();
5965 		CU_ASSERT(g_bserrno == 0);
5966 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5967 		bs = g_bs;
5968 
5969 		/* Create blob */
5970 		ut_spdk_blob_opts_init(&opts);
5971 		opts.num_clusters = 10;
5972 
5973 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
5974 		poll_threads();
5975 		CU_ASSERT(g_bserrno == 0);
5976 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5977 		blobid = g_blobid;
5978 
5979 		/* Create snapshot */
5980 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5981 		poll_threads();
5982 		CU_ASSERT(g_bserrno == 0);
5983 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5984 		snapshotid = g_blobid;
5985 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
5986 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
5987 
5988 		dev_set_power_failure_thresholds(thresholds);
5989 
5990 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5991 		poll_threads();
5992 		delete_snapshot_bserrno = g_bserrno;
5993 
5994 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
5995 		 * reports success, changes to both blobs should already persisted. */
5996 		dev_reset_power_failure_event();
5997 		ut_bs_dirty_load(&bs, NULL);
5998 
5999 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6000 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6001 
6002 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6003 		poll_threads();
6004 		CU_ASSERT(g_bserrno == 0);
6005 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6006 		blob = g_blob;
6007 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6008 
6009 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6010 		poll_threads();
6011 
6012 		if (g_bserrno == 0) {
6013 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6014 			snapshot = g_blob;
6015 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6016 			count = SPDK_COUNTOF(ids);
6017 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6018 			CU_ASSERT(rc == 0);
6019 			CU_ASSERT(count == 1);
6020 			CU_ASSERT(ids[0] == blobid);
6021 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6022 			CU_ASSERT(rc != 0);
6023 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6024 
6025 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6026 			poll_threads();
6027 			CU_ASSERT(g_bserrno == 0);
6028 		} else {
6029 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6030 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6031 			 * Yet delete might perform further changes to the clone after that.
6032 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6033 			if (delete_snapshot_bserrno == 0) {
6034 				deleted = true;
6035 			}
6036 		}
6037 
6038 		spdk_blob_close(blob, blob_op_complete, NULL);
6039 		poll_threads();
6040 		CU_ASSERT(g_bserrno == 0);
6041 
6042 		spdk_bs_unload(bs, bs_op_complete, NULL);
6043 		poll_threads();
6044 		CU_ASSERT(g_bserrno == 0);
6045 
6046 		thresholds.general_threshold++;
6047 	}
6048 }
6049 
6050 static void
6051 blob_create_snapshot_power_failure(void)
6052 {
6053 	struct spdk_blob_store *bs = g_bs;
6054 	struct spdk_bs_dev *dev;
6055 	struct spdk_blob_opts opts;
6056 	struct spdk_blob *blob, *snapshot;
6057 	struct spdk_power_failure_thresholds thresholds = {};
6058 	spdk_blob_id blobid, snapshotid;
6059 	const void *value;
6060 	size_t value_len;
6061 	size_t count;
6062 	spdk_blob_id ids[3] = {};
6063 	int rc;
6064 	bool created = false;
6065 	int create_snapshot_bserrno = -1;
6066 
6067 	thresholds.general_threshold = 1;
6068 	while (!created) {
6069 		dev = init_dev();
6070 
6071 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6072 		poll_threads();
6073 		CU_ASSERT(g_bserrno == 0);
6074 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6075 		bs = g_bs;
6076 
6077 		/* Create blob */
6078 		ut_spdk_blob_opts_init(&opts);
6079 		opts.num_clusters = 10;
6080 
6081 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6082 		poll_threads();
6083 		CU_ASSERT(g_bserrno == 0);
6084 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6085 		blobid = g_blobid;
6086 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6087 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6088 
6089 		dev_set_power_failure_thresholds(thresholds);
6090 
6091 		/* Create snapshot */
6092 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6093 		poll_threads();
6094 		create_snapshot_bserrno = g_bserrno;
6095 		snapshotid = g_blobid;
6096 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6097 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6098 
6099 		/* Do not shut down cleanly. Assumption is that after create snapshot
6100 		 * reports success, both blobs should be power-fail safe. */
6101 		dev_reset_power_failure_event();
6102 		ut_bs_dirty_load(&bs, NULL);
6103 
6104 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, 1));
6105 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, 11));
6106 
6107 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6108 		poll_threads();
6109 		CU_ASSERT(g_bserrno == 0);
6110 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6111 		blob = g_blob;
6112 
6113 		if (snapshotid != SPDK_BLOBID_INVALID) {
6114 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6115 			poll_threads();
6116 		}
6117 
6118 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6119 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6120 			snapshot = g_blob;
6121 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6122 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6123 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6124 			count = SPDK_COUNTOF(ids);
6125 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6126 			CU_ASSERT(rc == 0);
6127 			CU_ASSERT(count == 1);
6128 			CU_ASSERT(ids[0] == blobid);
6129 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6130 			CU_ASSERT(rc != 0);
6131 
6132 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6133 			poll_threads();
6134 			CU_ASSERT(g_bserrno == 0);
6135 			if (create_snapshot_bserrno == 0) {
6136 				created = true;
6137 			}
6138 		} else {
6139 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6140 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6141 		}
6142 
6143 		spdk_blob_close(blob, blob_op_complete, NULL);
6144 		poll_threads();
6145 		CU_ASSERT(g_bserrno == 0);
6146 
6147 		spdk_bs_unload(bs, bs_op_complete, NULL);
6148 		poll_threads();
6149 		CU_ASSERT(g_bserrno == 0);
6150 
6151 		thresholds.general_threshold++;
6152 	}
6153 }
6154 
6155 static void
6156 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6157 {
6158 	uint8_t payload_ff[64 * 512];
6159 	uint8_t payload_aa[64 * 512];
6160 	uint8_t payload_00[64 * 512];
6161 	uint8_t *cluster0, *cluster1;
6162 
6163 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6164 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6165 	memset(payload_00, 0x00, sizeof(payload_00));
6166 
6167 	/* Try to perform I/O with io unit = 512 */
6168 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6169 	poll_threads();
6170 	CU_ASSERT(g_bserrno == 0);
6171 
6172 	/* If thin provisioned is set cluster should be allocated now */
6173 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6174 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6175 
6176 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6177 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6178 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6179 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6180 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6181 
6182 	/* Verify write with offset on first page */
6183 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6184 	poll_threads();
6185 	CU_ASSERT(g_bserrno == 0);
6186 
6187 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6188 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6189 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6190 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6191 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6192 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6193 
6194 	/* Verify write with offset on first page */
6195 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6196 	poll_threads();
6197 
6198 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6199 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6200 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6201 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6202 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6203 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6204 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6205 
6206 	/* Verify write with offset on second page */
6207 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6208 	poll_threads();
6209 
6210 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6211 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6212 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6213 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6214 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6215 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6216 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6217 
6218 	/* Verify write across multiple pages */
6219 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6220 	poll_threads();
6221 
6222 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6223 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6224 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6225 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6226 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6227 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6228 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6229 
6230 	/* Verify write across multiple clusters */
6231 	spdk_blob_io_write(blob, channel, payload_ff, 28, 8, blob_op_complete, NULL);
6232 	poll_threads();
6233 
6234 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6235 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6236 
6237 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6238 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6239 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6240 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6241 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6242 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6243 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6244 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6245 
6246 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6247 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6248 
6249 	/* Verify write to second cluster */
6250 	spdk_blob_io_write(blob, channel, payload_ff, 32 + 12, 2, blob_op_complete, NULL);
6251 	poll_threads();
6252 
6253 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6254 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6255 
6256 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6257 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6258 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6259 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6260 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6261 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6262 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6263 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6264 
6265 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6266 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6267 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6268 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6269 }
6270 
6271 static void
6272 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6273 {
6274 	uint8_t payload_read[64 * 512];
6275 	uint8_t payload_ff[64 * 512];
6276 	uint8_t payload_aa[64 * 512];
6277 	uint8_t payload_00[64 * 512];
6278 
6279 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6280 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6281 	memset(payload_00, 0x00, sizeof(payload_00));
6282 
6283 	/* Read only first io unit */
6284 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6285 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6286 	 * payload_read: F000 0000 | 0000 0000 ... */
6287 	memset(payload_read, 0x00, sizeof(payload_read));
6288 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6289 	poll_threads();
6290 	CU_ASSERT(g_bserrno == 0);
6291 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6292 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6293 
6294 	/* Read four io_units starting from offset = 2
6295 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6296 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6297 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6298 
6299 	memset(payload_read, 0x00, sizeof(payload_read));
6300 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6301 	poll_threads();
6302 	CU_ASSERT(g_bserrno == 0);
6303 
6304 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6305 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6306 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6307 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6308 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6309 
6310 	/* Read eight io_units across multiple pages
6311 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6312 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6313 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6314 	memset(payload_read, 0x00, sizeof(payload_read));
6315 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6316 	poll_threads();
6317 	CU_ASSERT(g_bserrno == 0);
6318 
6319 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6320 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6321 
6322 	/* Read eight io_units across multiple clusters
6323 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6324 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6325 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6326 	memset(payload_read, 0x00, sizeof(payload_read));
6327 	spdk_blob_io_read(blob, channel, payload_read, 28, 8, blob_op_complete, NULL);
6328 	poll_threads();
6329 	CU_ASSERT(g_bserrno == 0);
6330 
6331 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6332 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6333 
6334 	/* Read four io_units from second cluster
6335 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6336 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6337 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6338 	memset(payload_read, 0x00, sizeof(payload_read));
6339 	spdk_blob_io_read(blob, channel, payload_read, 32 + 10, 4, blob_op_complete, NULL);
6340 	poll_threads();
6341 	CU_ASSERT(g_bserrno == 0);
6342 
6343 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6344 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6345 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6346 
6347 	/* Read second cluster
6348 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6349 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6350 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6351 	memset(payload_read, 0x00, sizeof(payload_read));
6352 	spdk_blob_io_read(blob, channel, payload_read, 32, 32, blob_op_complete, NULL);
6353 	poll_threads();
6354 	CU_ASSERT(g_bserrno == 0);
6355 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6356 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6357 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6358 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6359 
6360 	/* Read whole two clusters
6361 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6362 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6363 	memset(payload_read, 0x00, sizeof(payload_read));
6364 	spdk_blob_io_read(blob, channel, payload_read, 0, 64, blob_op_complete, NULL);
6365 	poll_threads();
6366 	CU_ASSERT(g_bserrno == 0);
6367 
6368 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6369 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6370 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6371 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6372 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6373 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6374 
6375 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6376 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6377 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6378 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6379 }
6380 
6381 
6382 static void
6383 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6384 {
6385 	uint8_t payload_ff[64 * 512];
6386 	uint8_t payload_aa[64 * 512];
6387 	uint8_t payload_00[64 * 512];
6388 	uint8_t *cluster0, *cluster1;
6389 
6390 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6391 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6392 	memset(payload_00, 0x00, sizeof(payload_00));
6393 
6394 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6395 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6396 
6397 	/* Unmap */
6398 	spdk_blob_io_unmap(blob, channel, 0, 64, blob_op_complete, NULL);
6399 	poll_threads();
6400 
6401 	CU_ASSERT(g_bserrno == 0);
6402 
6403 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6404 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6405 }
6406 
6407 static void
6408 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6409 {
6410 	uint8_t payload_ff[64 * 512];
6411 	uint8_t payload_aa[64 * 512];
6412 	uint8_t payload_00[64 * 512];
6413 	uint8_t *cluster0, *cluster1;
6414 
6415 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6416 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6417 	memset(payload_00, 0x00, sizeof(payload_00));
6418 
6419 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6420 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6421 
6422 	/* Write zeroes  */
6423 	spdk_blob_io_write_zeroes(blob, channel, 0, 64, blob_op_complete, NULL);
6424 	poll_threads();
6425 
6426 	CU_ASSERT(g_bserrno == 0);
6427 
6428 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, 32 * 512) == 0);
6429 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, 32 * 512) == 0);
6430 }
6431 
6432 static inline void
6433 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
6434 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6435 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6436 {
6437 	if (io_opts) {
6438 		g_dev_writev_ext_called = false;
6439 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6440 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
6441 					io_opts);
6442 	} else {
6443 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6444 	}
6445 	poll_threads();
6446 	CU_ASSERT(g_bserrno == 0);
6447 	if (io_opts) {
6448 		CU_ASSERT(g_dev_writev_ext_called);
6449 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6450 	}
6451 }
6452 
6453 static void
6454 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6455 	       bool ext_api)
6456 {
6457 	uint8_t payload_ff[64 * 512];
6458 	uint8_t payload_aa[64 * 512];
6459 	uint8_t payload_00[64 * 512];
6460 	uint8_t *cluster0, *cluster1;
6461 	struct iovec iov[4];
6462 	struct spdk_blob_ext_io_opts ext_opts = {
6463 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6464 		.memory_domain_ctx = (void *)0xf00df00d,
6465 		.size = sizeof(struct spdk_blob_ext_io_opts),
6466 		.user_ctx = (void *)123,
6467 	};
6468 
6469 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6470 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6471 	memset(payload_00, 0x00, sizeof(payload_00));
6472 
6473 	/* Try to perform I/O with io unit = 512 */
6474 	iov[0].iov_base = payload_ff;
6475 	iov[0].iov_len = 1 * 512;
6476 
6477 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
6478 			    ext_api ? &ext_opts : NULL);
6479 
6480 	/* If thin provisioned is set cluster should be allocated now */
6481 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6482 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6483 
6484 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6485 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6486 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6487 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6488 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 31 * 512) == 0);
6489 
6490 	/* Verify write with offset on first page */
6491 	iov[0].iov_base = payload_ff;
6492 	iov[0].iov_len = 1 * 512;
6493 
6494 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
6495 			    ext_api ? &ext_opts : NULL);
6496 
6497 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6498 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6499 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6500 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6501 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6502 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, 28 * 512) == 0);
6503 
6504 	/* Verify write with offset on first page */
6505 	iov[0].iov_base = payload_ff;
6506 	iov[0].iov_len = 4 * 512;
6507 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
6508 	poll_threads();
6509 
6510 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6511 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6512 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6513 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6514 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6515 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6516 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, 24 * 512) == 0);
6517 
6518 	/* Verify write with offset on second page */
6519 	iov[0].iov_base = payload_ff;
6520 	iov[0].iov_len = 4 * 512;
6521 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
6522 	poll_threads();
6523 
6524 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6525 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6526 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6527 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6528 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6529 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6530 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6531 
6532 	/* Verify write across multiple pages */
6533 	iov[0].iov_base = payload_aa;
6534 	iov[0].iov_len = 8 * 512;
6535 
6536 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
6537 			    ext_api ? &ext_opts : NULL);
6538 
6539 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6540 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6541 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6542 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6543 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6544 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6545 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 20 * 512) == 0);
6546 
6547 	/* Verify write across multiple clusters */
6548 
6549 	iov[0].iov_base = payload_ff;
6550 	iov[0].iov_len = 8 * 512;
6551 
6552 	test_blob_io_writev(blob, channel, iov, 1, 28, 8, blob_op_complete, NULL,
6553 			    ext_api ? &ext_opts : NULL);
6554 
6555 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6556 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6557 
6558 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6559 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6560 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6561 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6562 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6563 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6564 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6565 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, 16 * 512) == 0);
6566 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6567 
6568 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6569 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 28 * 512) == 0);
6570 
6571 	/* Verify write to second cluster */
6572 
6573 	iov[0].iov_base = payload_ff;
6574 	iov[0].iov_len = 2 * 512;
6575 
6576 	test_blob_io_writev(blob, channel, iov, 1, 32 + 12, 2, blob_op_complete, NULL,
6577 			    ext_api ? &ext_opts : NULL);
6578 
6579 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6580 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6581 
6582 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6583 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6584 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6585 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6586 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6587 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6588 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6589 	CU_ASSERT(memcmp(cluster0 + 28 * 512, payload_ff, 4 * 512) == 0);
6590 
6591 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6592 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6593 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6594 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, 18 * 512) == 0);
6595 }
6596 
6597 static inline void
6598 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
6599 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
6600 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
6601 {
6602 	if (io_opts) {
6603 		g_dev_readv_ext_called = false;
6604 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
6605 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
6606 	} else {
6607 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
6608 	}
6609 	poll_threads();
6610 	CU_ASSERT(g_bserrno == 0);
6611 	if (io_opts) {
6612 		CU_ASSERT(g_dev_readv_ext_called);
6613 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
6614 	}
6615 }
6616 
6617 static void
6618 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
6619 	      bool ext_api)
6620 {
6621 	uint8_t payload_read[64 * 512];
6622 	uint8_t payload_ff[64 * 512];
6623 	uint8_t payload_aa[64 * 512];
6624 	uint8_t payload_00[64 * 512];
6625 	struct iovec iov[4];
6626 	struct spdk_blob_ext_io_opts ext_opts = {
6627 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
6628 		.memory_domain_ctx = (void *)0xf00df00d,
6629 		.size = sizeof(struct spdk_blob_ext_io_opts),
6630 		.user_ctx = (void *)123,
6631 	};
6632 
6633 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6634 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6635 	memset(payload_00, 0x00, sizeof(payload_00));
6636 
6637 	/* Read only first io unit */
6638 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6639 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6640 	 * payload_read: F000 0000 | 0000 0000 ... */
6641 	memset(payload_read, 0x00, sizeof(payload_read));
6642 	iov[0].iov_base = payload_read;
6643 	iov[0].iov_len = 1 * 512;
6644 
6645 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6646 
6647 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6648 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 31 * 512) == 0);
6649 
6650 	/* Read four io_units starting from offset = 2
6651 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6652 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6653 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6654 
6655 	memset(payload_read, 0x00, sizeof(payload_read));
6656 	iov[0].iov_base = payload_read;
6657 	iov[0].iov_len = 4 * 512;
6658 
6659 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6660 
6661 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6662 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6663 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6664 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6665 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6666 
6667 	/* Read eight io_units across multiple pages
6668 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6669 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6670 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6671 	memset(payload_read, 0x00, sizeof(payload_read));
6672 	iov[0].iov_base = payload_read;
6673 	iov[0].iov_len = 4 * 512;
6674 	iov[1].iov_base = payload_read + 4 * 512;
6675 	iov[1].iov_len = 4 * 512;
6676 
6677 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
6678 
6679 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
6680 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6681 
6682 	/* Read eight io_units across multiple clusters
6683 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
6684 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6685 	 * payload_read: FFFF FFFF | 0000 0000 ... */
6686 	memset(payload_read, 0x00, sizeof(payload_read));
6687 	iov[0].iov_base = payload_read;
6688 	iov[0].iov_len = 2 * 512;
6689 	iov[1].iov_base = payload_read + 2 * 512;
6690 	iov[1].iov_len = 2 * 512;
6691 	iov[2].iov_base = payload_read + 4 * 512;
6692 	iov[2].iov_len = 2 * 512;
6693 	iov[3].iov_base = payload_read + 6 * 512;
6694 	iov[3].iov_len = 2 * 512;
6695 
6696 	test_blob_io_readv(blob, channel, iov, 4, 28, 8, blob_op_complete, NULL,
6697 			   ext_api ? &ext_opts : NULL);
6698 
6699 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
6700 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, 24 * 512) == 0);
6701 
6702 	/* Read four io_units from second cluster
6703 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6704 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
6705 	 * payload_read: 00FF 0000 | 0000 0000 ... */
6706 	memset(payload_read, 0x00, sizeof(payload_read));
6707 	iov[0].iov_base = payload_read;
6708 	iov[0].iov_len = 1 * 512;
6709 	iov[1].iov_base = payload_read + 1 * 512;
6710 	iov[1].iov_len = 3 * 512;
6711 
6712 	test_blob_io_readv(blob, channel, iov, 2, 32 + 10, 4, blob_op_complete, NULL,
6713 			   ext_api ? &ext_opts : NULL);
6714 
6715 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
6716 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
6717 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 28 * 512) == 0);
6718 
6719 	/* Read second cluster
6720 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6721 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
6722 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
6723 	memset(payload_read, 0x00, sizeof(payload_read));
6724 	iov[0].iov_base = payload_read;
6725 	iov[0].iov_len = 1 * 512;
6726 	iov[1].iov_base = payload_read + 1 * 512;
6727 	iov[1].iov_len = 2 * 512;
6728 	iov[2].iov_base = payload_read + 3 * 512;
6729 	iov[2].iov_len = 4 * 512;
6730 	iov[3].iov_base = payload_read + 7 * 512;
6731 	iov[3].iov_len = 25 * 512;
6732 
6733 	test_blob_io_readv(blob, channel, iov, 4, 32, 32, blob_op_complete, NULL,
6734 			   ext_api ? &ext_opts : NULL);
6735 
6736 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
6737 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
6738 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
6739 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, 18 * 512) == 0);
6740 
6741 	/* Read whole two clusters
6742 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6743 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
6744 	memset(payload_read, 0x00, sizeof(payload_read));
6745 	iov[0].iov_base = payload_read;
6746 	iov[0].iov_len = 1 * 512;
6747 	iov[1].iov_base = payload_read + 1 * 512;
6748 	iov[1].iov_len = 8 * 512;
6749 	iov[2].iov_base = payload_read + 9 * 512;
6750 	iov[2].iov_len = 16 * 512;
6751 	iov[3].iov_base = payload_read + 25 * 512;
6752 	iov[3].iov_len = 39 * 512;
6753 
6754 	test_blob_io_readv(blob, channel, iov, 4, 0, 64, blob_op_complete, NULL,
6755 			   ext_api ? &ext_opts : NULL);
6756 
6757 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6758 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6759 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
6760 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
6761 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
6762 	CU_ASSERT(memcmp(payload_read + 28 * 512, payload_ff, 4 * 512) == 0);
6763 
6764 	CU_ASSERT(memcmp(payload_read + (32 + 0) * 512, payload_ff, 4 * 512) == 0);
6765 	CU_ASSERT(memcmp(payload_read + (32 + 4) * 512, payload_00, 8 * 512) == 0);
6766 	CU_ASSERT(memcmp(payload_read + (32 + 12) * 512, payload_ff, 2 * 512) == 0);
6767 	CU_ASSERT(memcmp(payload_read + (32 + 14) * 512, payload_00, 18 * 512) == 0);
6768 }
6769 
6770 static void
6771 blob_io_unit(void)
6772 {
6773 	struct spdk_bs_opts bsopts;
6774 	struct spdk_blob_opts opts;
6775 	struct spdk_blob_store *bs;
6776 	struct spdk_bs_dev *dev;
6777 	struct spdk_blob *blob, *snapshot, *clone;
6778 	spdk_blob_id blobid;
6779 	struct spdk_io_channel *channel;
6780 
6781 	/* Create dev with 512 bytes io unit size */
6782 
6783 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6784 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6785 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6786 
6787 	/* Try to initialize a new blob store with unsupported io_unit */
6788 	dev = init_dev();
6789 	dev->blocklen = 512;
6790 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6791 
6792 	/* Initialize a new blob store */
6793 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6794 	poll_threads();
6795 	CU_ASSERT(g_bserrno == 0);
6796 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6797 	bs = g_bs;
6798 
6799 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6800 	channel = spdk_bs_alloc_io_channel(bs);
6801 
6802 	/* Create thick provisioned blob */
6803 	ut_spdk_blob_opts_init(&opts);
6804 	opts.thin_provision = false;
6805 	opts.num_clusters = 32;
6806 
6807 	blob = ut_blob_create_and_open(bs, &opts);
6808 	blobid = spdk_blob_get_id(blob);
6809 
6810 	test_io_write(dev, blob, channel);
6811 	test_io_read(dev, blob, channel);
6812 	test_io_zeroes(dev, blob, channel);
6813 
6814 	test_iov_write(dev, blob, channel, false);
6815 	test_iov_read(dev, blob, channel, false);
6816 	test_io_zeroes(dev, blob, channel);
6817 
6818 	test_iov_write(dev, blob, channel, true);
6819 	test_iov_read(dev, blob, channel, true);
6820 
6821 	test_io_unmap(dev, blob, channel);
6822 
6823 	spdk_blob_close(blob, blob_op_complete, NULL);
6824 	poll_threads();
6825 	CU_ASSERT(g_bserrno == 0);
6826 	blob = NULL;
6827 	g_blob = NULL;
6828 
6829 	/* Create thin provisioned blob */
6830 
6831 	ut_spdk_blob_opts_init(&opts);
6832 	opts.thin_provision = true;
6833 	opts.num_clusters = 32;
6834 
6835 	blob = ut_blob_create_and_open(bs, &opts);
6836 	blobid = spdk_blob_get_id(blob);
6837 
6838 	test_io_write(dev, blob, channel);
6839 	test_io_read(dev, blob, channel);
6840 	test_io_zeroes(dev, blob, channel);
6841 
6842 	test_iov_write(dev, blob, channel, false);
6843 	test_iov_read(dev, blob, channel, false);
6844 	test_io_zeroes(dev, blob, channel);
6845 
6846 	test_iov_write(dev, blob, channel, true);
6847 	test_iov_read(dev, blob, channel, true);
6848 
6849 	/* Create snapshot */
6850 
6851 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6852 	poll_threads();
6853 	CU_ASSERT(g_bserrno == 0);
6854 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6855 	blobid = g_blobid;
6856 
6857 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6858 	poll_threads();
6859 	CU_ASSERT(g_bserrno == 0);
6860 	CU_ASSERT(g_blob != NULL);
6861 	snapshot = g_blob;
6862 
6863 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6864 	poll_threads();
6865 	CU_ASSERT(g_bserrno == 0);
6866 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6867 	blobid = g_blobid;
6868 
6869 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6870 	poll_threads();
6871 	CU_ASSERT(g_bserrno == 0);
6872 	CU_ASSERT(g_blob != NULL);
6873 	clone = g_blob;
6874 
6875 	test_io_read(dev, blob, channel);
6876 	test_io_read(dev, snapshot, channel);
6877 	test_io_read(dev, clone, channel);
6878 
6879 	test_iov_read(dev, blob, channel, false);
6880 	test_iov_read(dev, snapshot, channel, false);
6881 	test_iov_read(dev, clone, channel, false);
6882 
6883 	test_iov_read(dev, blob, channel, true);
6884 	test_iov_read(dev, snapshot, channel, true);
6885 	test_iov_read(dev, clone, channel, true);
6886 
6887 	/* Inflate clone */
6888 
6889 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
6890 	poll_threads();
6891 
6892 	CU_ASSERT(g_bserrno == 0);
6893 
6894 	test_io_read(dev, clone, channel);
6895 
6896 	test_io_unmap(dev, clone, channel);
6897 
6898 	test_iov_write(dev, clone, channel, false);
6899 	test_iov_read(dev, clone, channel, false);
6900 	test_io_unmap(dev, clone, channel);
6901 
6902 	test_iov_write(dev, clone, channel, true);
6903 	test_iov_read(dev, clone, channel, true);
6904 
6905 	spdk_blob_close(blob, blob_op_complete, NULL);
6906 	spdk_blob_close(snapshot, blob_op_complete, NULL);
6907 	spdk_blob_close(clone, blob_op_complete, NULL);
6908 	poll_threads();
6909 	CU_ASSERT(g_bserrno == 0);
6910 	blob = NULL;
6911 	g_blob = NULL;
6912 
6913 	spdk_bs_free_io_channel(channel);
6914 	poll_threads();
6915 
6916 	/* Unload the blob store */
6917 	spdk_bs_unload(bs, bs_op_complete, NULL);
6918 	poll_threads();
6919 	CU_ASSERT(g_bserrno == 0);
6920 	g_bs = NULL;
6921 	g_blob = NULL;
6922 	g_blobid = 0;
6923 }
6924 
6925 static void
6926 blob_io_unit_compatibility(void)
6927 {
6928 	struct spdk_bs_opts bsopts;
6929 	struct spdk_blob_store *bs;
6930 	struct spdk_bs_dev *dev;
6931 	struct spdk_bs_super_block *super;
6932 
6933 	/* Create dev with 512 bytes io unit size */
6934 
6935 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
6936 	bsopts.cluster_sz = SPDK_BS_PAGE_SIZE * 4;	/* 8 * 4 = 32 io_unit */
6937 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
6938 
6939 	/* Try to initialize a new blob store with unsupported io_unit */
6940 	dev = init_dev();
6941 	dev->blocklen = 512;
6942 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6943 
6944 	/* Initialize a new blob store */
6945 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
6946 	poll_threads();
6947 	CU_ASSERT(g_bserrno == 0);
6948 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6949 	bs = g_bs;
6950 
6951 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
6952 
6953 	/* Unload the blob store */
6954 	spdk_bs_unload(bs, bs_op_complete, NULL);
6955 	poll_threads();
6956 	CU_ASSERT(g_bserrno == 0);
6957 
6958 	/* Modify super block to behave like older version.
6959 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
6960 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6961 	super->io_unit_size = 0;
6962 	super->crc = blob_md_page_calc_crc(super);
6963 
6964 	dev = init_dev();
6965 	dev->blocklen = 512;
6966 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
6967 
6968 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
6969 	poll_threads();
6970 	CU_ASSERT(g_bserrno == 0);
6971 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6972 	bs = g_bs;
6973 
6974 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
6975 
6976 	/* Unload the blob store */
6977 	spdk_bs_unload(bs, bs_op_complete, NULL);
6978 	poll_threads();
6979 	CU_ASSERT(g_bserrno == 0);
6980 
6981 	g_bs = NULL;
6982 	g_blob = NULL;
6983 	g_blobid = 0;
6984 }
6985 
6986 static void
6987 first_sync_complete(void *cb_arg, int bserrno)
6988 {
6989 	struct spdk_blob *blob = cb_arg;
6990 	int rc;
6991 
6992 	CU_ASSERT(bserrno == 0);
6993 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
6994 	CU_ASSERT(rc == 0);
6995 	CU_ASSERT(g_bserrno == -1);
6996 
6997 	/* Keep g_bserrno at -1, only the
6998 	 * second sync completion should set it at 0. */
6999 }
7000 
7001 static void
7002 second_sync_complete(void *cb_arg, int bserrno)
7003 {
7004 	struct spdk_blob *blob = cb_arg;
7005 	const void *value;
7006 	size_t value_len;
7007 	int rc;
7008 
7009 	CU_ASSERT(bserrno == 0);
7010 
7011 	/* Verify that the first sync completion had a chance to execute */
7012 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7013 	CU_ASSERT(rc == 0);
7014 	SPDK_CU_ASSERT_FATAL(value != NULL);
7015 	CU_ASSERT(value_len == strlen("second") + 1);
7016 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7017 
7018 	CU_ASSERT(g_bserrno == -1);
7019 	g_bserrno = bserrno;
7020 }
7021 
7022 static void
7023 blob_simultaneous_operations(void)
7024 {
7025 	struct spdk_blob_store *bs = g_bs;
7026 	struct spdk_blob_opts opts;
7027 	struct spdk_blob *blob, *snapshot;
7028 	spdk_blob_id blobid, snapshotid;
7029 	struct spdk_io_channel *channel;
7030 	int rc;
7031 
7032 	channel = spdk_bs_alloc_io_channel(bs);
7033 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7034 
7035 	ut_spdk_blob_opts_init(&opts);
7036 	opts.num_clusters = 10;
7037 
7038 	blob = ut_blob_create_and_open(bs, &opts);
7039 	blobid = spdk_blob_get_id(blob);
7040 
7041 	/* Create snapshot and try to remove blob in the same time:
7042 	 * - snapshot should be created successfully
7043 	 * - delete operation should fail w -EBUSY */
7044 	CU_ASSERT(blob->locked_operation_in_progress == false);
7045 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7046 	CU_ASSERT(blob->locked_operation_in_progress == true);
7047 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7048 	CU_ASSERT(blob->locked_operation_in_progress == true);
7049 	/* Deletion failure */
7050 	CU_ASSERT(g_bserrno == -EBUSY);
7051 	poll_threads();
7052 	CU_ASSERT(blob->locked_operation_in_progress == false);
7053 	/* Snapshot creation success */
7054 	CU_ASSERT(g_bserrno == 0);
7055 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7056 
7057 	snapshotid = g_blobid;
7058 
7059 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7060 	poll_threads();
7061 	CU_ASSERT(g_bserrno == 0);
7062 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7063 	snapshot = g_blob;
7064 
7065 	/* Inflate blob and try to remove blob in the same time:
7066 	 * - blob should be inflated successfully
7067 	 * - delete operation should fail w -EBUSY */
7068 	CU_ASSERT(blob->locked_operation_in_progress == false);
7069 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7070 	CU_ASSERT(blob->locked_operation_in_progress == true);
7071 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7072 	CU_ASSERT(blob->locked_operation_in_progress == true);
7073 	/* Deletion failure */
7074 	CU_ASSERT(g_bserrno == -EBUSY);
7075 	poll_threads();
7076 	CU_ASSERT(blob->locked_operation_in_progress == false);
7077 	/* Inflation success */
7078 	CU_ASSERT(g_bserrno == 0);
7079 
7080 	/* Clone snapshot and try to remove snapshot in the same time:
7081 	 * - snapshot should be cloned successfully
7082 	 * - delete operation should fail w -EBUSY */
7083 	CU_ASSERT(blob->locked_operation_in_progress == false);
7084 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7085 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7086 	/* Deletion failure */
7087 	CU_ASSERT(g_bserrno == -EBUSY);
7088 	poll_threads();
7089 	CU_ASSERT(blob->locked_operation_in_progress == false);
7090 	/* Clone created */
7091 	CU_ASSERT(g_bserrno == 0);
7092 
7093 	/* Resize blob and try to remove blob in the same time:
7094 	 * - blob should be resized successfully
7095 	 * - delete operation should fail w -EBUSY */
7096 	CU_ASSERT(blob->locked_operation_in_progress == false);
7097 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7098 	CU_ASSERT(blob->locked_operation_in_progress == true);
7099 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7100 	CU_ASSERT(blob->locked_operation_in_progress == true);
7101 	/* Deletion failure */
7102 	CU_ASSERT(g_bserrno == -EBUSY);
7103 	poll_threads();
7104 	CU_ASSERT(blob->locked_operation_in_progress == false);
7105 	/* Blob resized successfully */
7106 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7107 	poll_threads();
7108 	CU_ASSERT(g_bserrno == 0);
7109 
7110 	/* Issue two consecutive blob syncs, neither should fail.
7111 	 * Force sync to actually occur by marking blob dirty each time.
7112 	 * Execution of sync should not be enough to complete the operation,
7113 	 * since disk I/O is required to complete it. */
7114 	g_bserrno = -1;
7115 
7116 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7117 	CU_ASSERT(rc == 0);
7118 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7119 	CU_ASSERT(g_bserrno == -1);
7120 
7121 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7122 	CU_ASSERT(g_bserrno == -1);
7123 
7124 	poll_threads();
7125 	CU_ASSERT(g_bserrno == 0);
7126 
7127 	spdk_bs_free_io_channel(channel);
7128 	poll_threads();
7129 
7130 	ut_blob_close_and_delete(bs, snapshot);
7131 	ut_blob_close_and_delete(bs, blob);
7132 }
7133 
7134 static void
7135 blob_persist_test(void)
7136 {
7137 	struct spdk_blob_store *bs = g_bs;
7138 	struct spdk_blob_opts opts;
7139 	struct spdk_blob *blob;
7140 	spdk_blob_id blobid;
7141 	struct spdk_io_channel *channel;
7142 	char *xattr;
7143 	size_t xattr_length;
7144 	int rc;
7145 	uint32_t page_count_clear, page_count_xattr;
7146 	uint64_t poller_iterations;
7147 	bool run_poller;
7148 
7149 	channel = spdk_bs_alloc_io_channel(bs);
7150 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7151 
7152 	ut_spdk_blob_opts_init(&opts);
7153 	opts.num_clusters = 10;
7154 
7155 	blob = ut_blob_create_and_open(bs, &opts);
7156 	blobid = spdk_blob_get_id(blob);
7157 
7158 	/* Save the amount of md pages used after creation of a blob.
7159 	 * This should be consistent after removing xattr. */
7160 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7161 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7162 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7163 
7164 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7165 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7166 		       strlen("large_xattr");
7167 	xattr = calloc(xattr_length, sizeof(char));
7168 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7169 
7170 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7171 	SPDK_CU_ASSERT_FATAL(rc == 0);
7172 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7173 	poll_threads();
7174 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7175 
7176 	/* Save the amount of md pages used after adding the large xattr */
7177 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7178 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7179 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7180 
7181 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7182 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7183 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7184 	poller_iterations = 1;
7185 	run_poller = true;
7186 	while (run_poller) {
7187 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7188 		SPDK_CU_ASSERT_FATAL(rc == 0);
7189 		g_bserrno = -1;
7190 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7191 		poll_thread_times(0, poller_iterations);
7192 		if (g_bserrno == 0) {
7193 			/* Poller iteration count was high enough for first sync to complete.
7194 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7195 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7196 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7197 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7198 			run_poller = false;
7199 		}
7200 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7201 		SPDK_CU_ASSERT_FATAL(rc == 0);
7202 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7203 		poll_threads();
7204 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7205 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7206 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7207 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7208 
7209 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7210 		spdk_blob_close(blob, blob_op_complete, NULL);
7211 		poll_threads();
7212 		CU_ASSERT(g_bserrno == 0);
7213 
7214 		ut_bs_reload(&bs, NULL);
7215 
7216 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7217 		poll_threads();
7218 		CU_ASSERT(g_bserrno == 0);
7219 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7220 		blob = g_blob;
7221 
7222 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7223 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7224 
7225 		poller_iterations++;
7226 		/* Stop at high iteration count to prevent infinite loop.
7227 		 * This value should be enough for first md sync to complete in any case. */
7228 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7229 	}
7230 
7231 	free(xattr);
7232 
7233 	ut_blob_close_and_delete(bs, blob);
7234 
7235 	spdk_bs_free_io_channel(channel);
7236 	poll_threads();
7237 }
7238 
7239 static void
7240 blob_decouple_snapshot(void)
7241 {
7242 	struct spdk_blob_store *bs = g_bs;
7243 	struct spdk_blob_opts opts;
7244 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7245 	struct spdk_io_channel *channel;
7246 	spdk_blob_id blobid, snapshotid;
7247 	uint64_t cluster;
7248 
7249 	channel = spdk_bs_alloc_io_channel(bs);
7250 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7251 
7252 	ut_spdk_blob_opts_init(&opts);
7253 	opts.num_clusters = 10;
7254 	opts.thin_provision = false;
7255 
7256 	blob = ut_blob_create_and_open(bs, &opts);
7257 	blobid = spdk_blob_get_id(blob);
7258 
7259 	/* Create first snapshot */
7260 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7261 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7262 	poll_threads();
7263 	CU_ASSERT(g_bserrno == 0);
7264 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7265 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7266 	snapshotid = g_blobid;
7267 
7268 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7269 	poll_threads();
7270 	CU_ASSERT(g_bserrno == 0);
7271 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7272 	snapshot1 = g_blob;
7273 
7274 	/* Create the second one */
7275 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7276 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7277 	poll_threads();
7278 	CU_ASSERT(g_bserrno == 0);
7279 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7280 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7281 	snapshotid = g_blobid;
7282 
7283 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7284 	poll_threads();
7285 	CU_ASSERT(g_bserrno == 0);
7286 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7287 	snapshot2 = g_blob;
7288 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7289 
7290 	/* Now decouple the second snapshot forcing it to copy the written clusters */
7291 	spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7292 	poll_threads();
7293 	CU_ASSERT(g_bserrno == 0);
7294 
7295 	/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7296 	CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7297 	for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7298 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7299 		CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7300 				    snapshot1->active.clusters[cluster]);
7301 	}
7302 
7303 	spdk_bs_free_io_channel(channel);
7304 
7305 	ut_blob_close_and_delete(bs, snapshot2);
7306 	ut_blob_close_and_delete(bs, snapshot1);
7307 	ut_blob_close_and_delete(bs, blob);
7308 	poll_threads();
7309 }
7310 
7311 static void
7312 suite_bs_setup(void)
7313 {
7314 	struct spdk_bs_dev *dev;
7315 
7316 	dev = init_dev();
7317 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7318 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
7319 	poll_threads();
7320 	CU_ASSERT(g_bserrno == 0);
7321 	CU_ASSERT(g_bs != NULL);
7322 }
7323 
7324 static void
7325 suite_bs_cleanup(void)
7326 {
7327 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
7328 	poll_threads();
7329 	CU_ASSERT(g_bserrno == 0);
7330 	g_bs = NULL;
7331 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
7332 }
7333 
7334 static struct spdk_blob *
7335 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
7336 {
7337 	struct spdk_blob *blob;
7338 	struct spdk_blob_opts create_blob_opts;
7339 	spdk_blob_id blobid;
7340 
7341 	if (blob_opts == NULL) {
7342 		ut_spdk_blob_opts_init(&create_blob_opts);
7343 		blob_opts = &create_blob_opts;
7344 	}
7345 
7346 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
7347 	poll_threads();
7348 	CU_ASSERT(g_bserrno == 0);
7349 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7350 	blobid = g_blobid;
7351 	g_blobid = -1;
7352 
7353 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7354 	poll_threads();
7355 	CU_ASSERT(g_bserrno == 0);
7356 	CU_ASSERT(g_blob != NULL);
7357 	blob = g_blob;
7358 
7359 	g_blob = NULL;
7360 	g_bserrno = -1;
7361 
7362 	return blob;
7363 }
7364 
7365 static void
7366 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
7367 {
7368 	spdk_blob_id blobid = spdk_blob_get_id(blob);
7369 
7370 	spdk_blob_close(blob, blob_op_complete, NULL);
7371 	poll_threads();
7372 	CU_ASSERT(g_bserrno == 0);
7373 	g_blob = NULL;
7374 
7375 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7376 	poll_threads();
7377 	CU_ASSERT(g_bserrno == 0);
7378 	g_bserrno = -1;
7379 }
7380 
7381 static void
7382 suite_blob_setup(void)
7383 {
7384 	suite_bs_setup();
7385 	CU_ASSERT(g_bs != NULL);
7386 
7387 	g_blob = ut_blob_create_and_open(g_bs, NULL);
7388 	CU_ASSERT(g_blob != NULL);
7389 }
7390 
7391 static void
7392 suite_blob_cleanup(void)
7393 {
7394 	ut_blob_close_and_delete(g_bs, g_blob);
7395 	CU_ASSERT(g_blob == NULL);
7396 
7397 	suite_bs_cleanup();
7398 	CU_ASSERT(g_bs == NULL);
7399 }
7400 
7401 int
7402 main(int argc, char **argv)
7403 {
7404 	CU_pSuite	suite, suite_bs, suite_blob;
7405 	unsigned int	num_failures;
7406 
7407 	CU_set_error_action(CUEA_ABORT);
7408 	CU_initialize_registry();
7409 
7410 	suite = CU_add_suite("blob", NULL, NULL);
7411 	suite_bs = CU_add_suite_with_setup_and_teardown("blob_bs", NULL, NULL,
7412 			suite_bs_setup, suite_bs_cleanup);
7413 	suite_blob = CU_add_suite_with_setup_and_teardown("blob_blob", NULL, NULL,
7414 			suite_blob_setup, suite_blob_cleanup);
7415 
7416 	CU_ADD_TEST(suite, blob_init);
7417 	CU_ADD_TEST(suite_bs, blob_open);
7418 	CU_ADD_TEST(suite_bs, blob_create);
7419 	CU_ADD_TEST(suite_bs, blob_create_loop);
7420 	CU_ADD_TEST(suite_bs, blob_create_fail);
7421 	CU_ADD_TEST(suite_bs, blob_create_internal);
7422 	CU_ADD_TEST(suite_bs, blob_create_zero_extent);
7423 	CU_ADD_TEST(suite, blob_thin_provision);
7424 	CU_ADD_TEST(suite_bs, blob_snapshot);
7425 	CU_ADD_TEST(suite_bs, blob_clone);
7426 	CU_ADD_TEST(suite_bs, blob_inflate);
7427 	CU_ADD_TEST(suite_bs, blob_delete);
7428 	CU_ADD_TEST(suite_bs, blob_resize_test);
7429 	CU_ADD_TEST(suite, blob_read_only);
7430 	CU_ADD_TEST(suite_bs, channel_ops);
7431 	CU_ADD_TEST(suite_bs, blob_super);
7432 	CU_ADD_TEST(suite_blob, blob_write);
7433 	CU_ADD_TEST(suite_blob, blob_read);
7434 	CU_ADD_TEST(suite_blob, blob_rw_verify);
7435 	CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
7436 	CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
7437 	CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
7438 	CU_ADD_TEST(suite_bs, blob_unmap);
7439 	CU_ADD_TEST(suite_bs, blob_iter);
7440 	CU_ADD_TEST(suite_blob, blob_xattr);
7441 	CU_ADD_TEST(suite_bs, blob_parse_md);
7442 	CU_ADD_TEST(suite, bs_load);
7443 	CU_ADD_TEST(suite_bs, bs_load_pending_removal);
7444 	CU_ADD_TEST(suite, bs_load_custom_cluster_size);
7445 	CU_ADD_TEST(suite, bs_load_after_failed_grow);
7446 	CU_ADD_TEST(suite_bs, bs_unload);
7447 	CU_ADD_TEST(suite, bs_cluster_sz);
7448 	CU_ADD_TEST(suite_bs, bs_usable_clusters);
7449 	CU_ADD_TEST(suite, bs_resize_md);
7450 	CU_ADD_TEST(suite, bs_destroy);
7451 	CU_ADD_TEST(suite, bs_type);
7452 	CU_ADD_TEST(suite, bs_super_block);
7453 	CU_ADD_TEST(suite, bs_test_recover_cluster_count);
7454 	CU_ADD_TEST(suite, bs_test_grow);
7455 	CU_ADD_TEST(suite, blob_serialize_test);
7456 	CU_ADD_TEST(suite_bs, blob_crc);
7457 	CU_ADD_TEST(suite, super_block_crc);
7458 	CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
7459 	CU_ADD_TEST(suite_bs, blob_flags);
7460 	CU_ADD_TEST(suite_bs, bs_version);
7461 	CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
7462 	CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
7463 	CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
7464 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
7465 	CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
7466 	CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
7467 	CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
7468 	CU_ADD_TEST(suite, bs_load_iter_test);
7469 	CU_ADD_TEST(suite_bs, blob_snapshot_rw);
7470 	CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
7471 	CU_ADD_TEST(suite, blob_relations);
7472 	CU_ADD_TEST(suite, blob_relations2);
7473 	CU_ADD_TEST(suite, blob_relations3);
7474 	CU_ADD_TEST(suite, blobstore_clean_power_failure);
7475 	CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
7476 	CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
7477 	CU_ADD_TEST(suite_bs, blob_inflate_rw);
7478 	CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
7479 	CU_ADD_TEST(suite_bs, blob_operation_split_rw);
7480 	CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
7481 	CU_ADD_TEST(suite, blob_io_unit);
7482 	CU_ADD_TEST(suite, blob_io_unit_compatibility);
7483 	CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
7484 	CU_ADD_TEST(suite_bs, blob_persist_test);
7485 	CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
7486 
7487 	allocate_threads(2);
7488 	set_thread(0);
7489 
7490 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
7491 
7492 	CU_basic_set_mode(CU_BRM_VERBOSE);
7493 	g_use_extent_table = false;
7494 	CU_basic_run_tests();
7495 	num_failures = CU_get_number_of_failures();
7496 	g_use_extent_table = true;
7497 	CU_basic_run_tests();
7498 	num_failures += CU_get_number_of_failures();
7499 	CU_cleanup_registry();
7500 
7501 	free(g_dev_buffer);
7502 
7503 	free_threads();
7504 
7505 	return num_failures;
7506 }
7507