xref: /spdk/test/unit/lib/blob/blob.c/blob_ut.c (revision 3299bf6d5a3e5dbc4fcda51ed9a15d0f870476b6)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2017 Intel Corporation.
3  *   All rights reserved.
4  *   Copyright (c) 2021-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
5  */
6 
7 #include "spdk/stdinc.h"
8 
9 #include "spdk_internal/cunit.h"
10 #include "spdk/blob.h"
11 #include "spdk/string.h"
12 
13 #include "common/lib/ut_multithread.c"
14 #include "../bs_dev_common.c"
15 #include "thread/thread.c"
16 #include "ext_dev.c"
17 #include "blob/blobstore.c"
18 #include "blob/request.c"
19 #include "blob/zeroes.c"
20 #include "blob/blob_bs_dev.c"
21 #include "esnap_dev.c"
22 #define BLOCKLEN DEV_BUFFER_BLOCKLEN
23 
24 struct spdk_blob_store *g_bs;
25 spdk_blob_id g_blobid;
26 struct spdk_blob *g_blob, *g_blob2;
27 int g_bserrno, g_bserrno2;
28 struct spdk_xattr_names *g_names;
29 int g_done;
30 char *g_xattr_names[] = {"first", "second", "third"};
31 char *g_xattr_values[] = {"one", "two", "three"};
32 uint64_t g_ctx = 1729;
33 bool g_use_extent_table = false;
34 uint64_t g_copied_clusters_count = 0;
35 
36 struct spdk_bs_super_block_ver1 {
37 	uint8_t		signature[8];
38 	uint32_t        version;
39 	uint32_t        length;
40 	uint32_t	clean; /* If there was a clean shutdown, this is 1. */
41 	spdk_blob_id	super_blob;
42 
43 	uint32_t	cluster_size; /* In bytes */
44 
45 	uint32_t	used_page_mask_start; /* Offset from beginning of disk, in pages */
46 	uint32_t	used_page_mask_len; /* Count, in pages */
47 
48 	uint32_t	used_cluster_mask_start; /* Offset from beginning of disk, in pages */
49 	uint32_t	used_cluster_mask_len; /* Count, in pages */
50 
51 	uint32_t	md_start; /* Offset from beginning of disk, in pages */
52 	uint32_t	md_len; /* Count, in pages */
53 
54 	uint8_t		reserved[4036];
55 	uint32_t	crc;
56 } __attribute__((packed));
57 SPDK_STATIC_ASSERT(sizeof(struct spdk_bs_super_block_ver1) == 0x1000, "Invalid super block size");
58 
59 static struct spdk_blob *ut_blob_create_and_open(struct spdk_blob_store *bs,
60 		struct spdk_blob_opts *blob_opts);
61 static void ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob);
62 static void suite_blob_setup(void);
63 static void suite_blob_cleanup(void);
64 
65 DEFINE_STUB(spdk_memory_domain_memzero, int, (struct spdk_memory_domain *src_domain,
66 		void *src_domain_ctx, struct iovec *iov, uint32_t iovcnt, void (*cpl_cb)(void *, int),
67 		void *cpl_cb_arg), 0);
68 
69 static bool
70 is_esnap_clone(struct spdk_blob *_blob, const void *id, size_t id_len)
71 {
72 	const void *val = NULL;
73 	size_t len = 0;
74 	bool c0, c1, c2, c3;
75 
76 	CU_ASSERT(blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
77 				       true) == 0);
78 	CU_ASSERT((c0 = (len == id_len)));
79 	CU_ASSERT((c1 = (val != NULL && memcmp(val, id, len) == 0)));
80 	CU_ASSERT((c2 = !!(_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT)));
81 	CU_ASSERT((c3 = (_blob->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
82 
83 	return c0 && c1 && c2 && c3;
84 }
85 
86 static bool
87 is_not_esnap_clone(struct spdk_blob *_blob)
88 {
89 	const void *val = NULL;
90 	size_t len = 0;
91 	bool c1, c2, c3, c4;
92 
93 	CU_ASSERT((c1 = (blob_get_xattr_value(_blob, BLOB_EXTERNAL_SNAPSHOT_ID, &val, &len,
94 					      true) == -ENOENT)));
95 	CU_ASSERT((c2 = (val == NULL)));
96 	CU_ASSERT((c3 = ((_blob->invalid_flags & SPDK_BLOB_EXTERNAL_SNAPSHOT) == 0)));
97 	CU_ASSERT((c4 = (_blob->parent_id != SPDK_BLOBID_EXTERNAL_SNAPSHOT)));
98 
99 	return c1 && c2 && c3 && c4;
100 }
101 
102 #define UT_ASSERT_IS_ESNAP_CLONE(_blob, _id, _len) CU_ASSERT(is_esnap_clone(_blob, _id, _len))
103 #define UT_ASSERT_IS_NOT_ESNAP_CLONE(_blob) CU_ASSERT(is_not_esnap_clone(_blob))
104 
105 static void
106 _get_xattr_value(void *arg, const char *name,
107 		 const void **value, size_t *value_len)
108 {
109 	uint64_t i;
110 
111 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
112 	SPDK_CU_ASSERT_FATAL(value != NULL);
113 	CU_ASSERT(arg == &g_ctx);
114 
115 	for (i = 0; i < sizeof(g_xattr_names); i++) {
116 		if (!strcmp(name, g_xattr_names[i])) {
117 			*value_len = strlen(g_xattr_values[i]);
118 			*value = g_xattr_values[i];
119 			break;
120 		}
121 	}
122 }
123 
124 static void
125 _get_xattr_value_null(void *arg, const char *name,
126 		      const void **value, size_t *value_len)
127 {
128 	SPDK_CU_ASSERT_FATAL(value_len != NULL);
129 	SPDK_CU_ASSERT_FATAL(value != NULL);
130 	CU_ASSERT(arg == NULL);
131 
132 	*value_len = 0;
133 	*value = NULL;
134 }
135 
136 static int
137 _get_snapshots_count(struct spdk_blob_store *bs)
138 {
139 	struct spdk_blob_list *snapshot = NULL;
140 	int count = 0;
141 
142 	TAILQ_FOREACH(snapshot, &bs->snapshots, link) {
143 		count += 1;
144 	}
145 
146 	return count;
147 }
148 
149 static void
150 ut_spdk_blob_opts_init(struct spdk_blob_opts *opts)
151 {
152 	spdk_blob_opts_init(opts, sizeof(*opts));
153 	opts->use_extent_table = g_use_extent_table;
154 }
155 
156 static void
157 bs_op_complete(void *cb_arg, int bserrno)
158 {
159 	g_bserrno = bserrno;
160 }
161 
162 static void
163 bs_op_with_handle_complete(void *cb_arg, struct spdk_blob_store *bs,
164 			   int bserrno)
165 {
166 	g_bs = bs;
167 	g_bserrno = bserrno;
168 }
169 
170 static void
171 blob_op_complete(void *cb_arg, int bserrno)
172 {
173 	if (cb_arg != NULL) {
174 		int *errp = cb_arg;
175 
176 		*errp = bserrno;
177 	}
178 	g_bserrno = bserrno;
179 }
180 
181 static void
182 blob_op_with_id_complete(void *cb_arg, spdk_blob_id blobid, int bserrno)
183 {
184 	g_blobid = blobid;
185 	g_bserrno = bserrno;
186 }
187 
188 static void
189 blob_op_with_handle_complete(void *cb_arg, struct spdk_blob *blb, int bserrno)
190 {
191 	g_blob = blb;
192 	g_bserrno = bserrno;
193 }
194 
195 static void
196 blob_op_with_handle_complete2(void *cb_arg, struct spdk_blob *blob, int bserrno)
197 {
198 	if (g_blob == NULL) {
199 		g_blob = blob;
200 		g_bserrno = bserrno;
201 	} else {
202 		g_blob2 = blob;
203 		g_bserrno2 = bserrno;
204 	}
205 }
206 
207 static void
208 blob_shallow_copy_status_cb(uint64_t copied_clusters, void *cb_arg)
209 {
210 	g_copied_clusters_count = copied_clusters;
211 }
212 
213 static void
214 ut_bs_reload(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
215 {
216 	struct spdk_bs_dev *dev;
217 
218 	/* Unload the blob store */
219 	spdk_bs_unload(*bs, bs_op_complete, NULL);
220 	poll_threads();
221 	CU_ASSERT(g_bserrno == 0);
222 
223 	dev = init_dev();
224 	/* Load an existing blob store */
225 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
226 	poll_threads();
227 	CU_ASSERT(g_bserrno == 0);
228 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
229 	*bs = g_bs;
230 
231 	g_bserrno = -1;
232 }
233 
234 static void
235 ut_bs_dirty_load(struct spdk_blob_store **bs, struct spdk_bs_opts *opts)
236 {
237 	struct spdk_bs_dev *dev;
238 
239 	/* Dirty shutdown */
240 	bs_free(*bs);
241 
242 	dev = init_dev();
243 	/* Load an existing blob store */
244 	spdk_bs_load(dev, opts, bs_op_with_handle_complete, NULL);
245 	poll_threads();
246 	CU_ASSERT(g_bserrno == 0);
247 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
248 	*bs = g_bs;
249 
250 	g_bserrno = -1;
251 }
252 
253 static void
254 blob_init(void)
255 {
256 	struct spdk_blob_store *bs;
257 	struct spdk_bs_dev *dev;
258 
259 	dev = init_dev();
260 
261 	/* should fail for an unsupported blocklen */
262 	dev->blocklen = 500;
263 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
264 	poll_threads();
265 	CU_ASSERT(g_bserrno == -EINVAL);
266 
267 	dev = init_dev();
268 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
269 	poll_threads();
270 	CU_ASSERT(g_bserrno == 0);
271 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
272 	bs = g_bs;
273 
274 	spdk_bs_unload(bs, bs_op_complete, NULL);
275 	poll_threads();
276 	CU_ASSERT(g_bserrno == 0);
277 	g_bs = NULL;
278 }
279 
280 static void
281 blob_super(void)
282 {
283 	struct spdk_blob_store *bs = g_bs;
284 	spdk_blob_id blobid;
285 	struct spdk_blob_opts blob_opts;
286 
287 	/* Get the super blob without having set one */
288 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
289 	poll_threads();
290 	CU_ASSERT(g_bserrno == -ENOENT);
291 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
292 
293 	/* Create a blob */
294 	ut_spdk_blob_opts_init(&blob_opts);
295 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
296 	poll_threads();
297 	CU_ASSERT(g_bserrno == 0);
298 	CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
299 	blobid = g_blobid;
300 
301 	/* Set the blob as the super blob */
302 	spdk_bs_set_super(bs, blobid, blob_op_complete, NULL);
303 	poll_threads();
304 	CU_ASSERT(g_bserrno == 0);
305 
306 	/* Get the super blob */
307 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
308 	poll_threads();
309 	CU_ASSERT(g_bserrno == 0);
310 	CU_ASSERT(blobid == g_blobid);
311 }
312 
313 static void
314 blob_open(void)
315 {
316 	struct spdk_blob_store *bs = g_bs;
317 	struct spdk_blob *blob;
318 	struct spdk_blob_opts blob_opts;
319 	spdk_blob_id blobid, blobid2;
320 
321 	ut_spdk_blob_opts_init(&blob_opts);
322 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
323 	poll_threads();
324 	CU_ASSERT(g_bserrno == 0);
325 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
326 	blobid = g_blobid;
327 
328 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
329 	poll_threads();
330 	CU_ASSERT(g_bserrno == 0);
331 	CU_ASSERT(g_blob != NULL);
332 	blob = g_blob;
333 
334 	blobid2 = spdk_blob_get_id(blob);
335 	CU_ASSERT(blobid == blobid2);
336 
337 	/* Try to open file again.  It should return success. */
338 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
339 	poll_threads();
340 	CU_ASSERT(g_bserrno == 0);
341 	CU_ASSERT(blob == g_blob);
342 
343 	spdk_blob_close(blob, blob_op_complete, NULL);
344 	poll_threads();
345 	CU_ASSERT(g_bserrno == 0);
346 
347 	/*
348 	 * Close the file a second time, releasing the second reference.  This
349 	 *  should succeed.
350 	 */
351 	blob = g_blob;
352 	spdk_blob_close(blob, blob_op_complete, NULL);
353 	poll_threads();
354 	CU_ASSERT(g_bserrno == 0);
355 
356 	/*
357 	 * Try to open file again.  It should succeed.  This tests the case
358 	 *  where the file is opened, closed, then re-opened again.
359 	 */
360 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
361 	poll_threads();
362 	CU_ASSERT(g_bserrno == 0);
363 	CU_ASSERT(g_blob != NULL);
364 	blob = g_blob;
365 	spdk_blob_close(blob, blob_op_complete, NULL);
366 	poll_threads();
367 	CU_ASSERT(g_bserrno == 0);
368 
369 	/* Try to open file twice in succession.  This should return the same
370 	 * blob object.
371 	 */
372 	g_blob = NULL;
373 	g_blob2 = NULL;
374 	g_bserrno = -1;
375 	g_bserrno2 = -1;
376 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
377 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete2, NULL);
378 	poll_threads();
379 	CU_ASSERT(g_bserrno == 0);
380 	CU_ASSERT(g_bserrno2 == 0);
381 	CU_ASSERT(g_blob != NULL);
382 	CU_ASSERT(g_blob2 != NULL);
383 	CU_ASSERT(g_blob == g_blob2);
384 
385 	g_bserrno = -1;
386 	spdk_blob_close(g_blob, blob_op_complete, NULL);
387 	poll_threads();
388 	CU_ASSERT(g_bserrno == 0);
389 
390 	ut_blob_close_and_delete(bs, g_blob);
391 }
392 
393 static void
394 blob_create(void)
395 {
396 	struct spdk_blob_store *bs = g_bs;
397 	struct spdk_blob *blob;
398 	struct spdk_blob_opts opts;
399 	spdk_blob_id blobid;
400 
401 	/* Create blob with 10 clusters */
402 
403 	ut_spdk_blob_opts_init(&opts);
404 	opts.num_clusters = 10;
405 
406 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
407 	poll_threads();
408 	CU_ASSERT(g_bserrno == 0);
409 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
410 	blobid = g_blobid;
411 
412 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
413 	poll_threads();
414 	CU_ASSERT(g_bserrno == 0);
415 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
416 	blob = g_blob;
417 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
418 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
419 
420 	spdk_blob_close(blob, blob_op_complete, NULL);
421 	poll_threads();
422 	CU_ASSERT(g_bserrno == 0);
423 
424 	/* Create blob with 0 clusters */
425 
426 	ut_spdk_blob_opts_init(&opts);
427 	opts.num_clusters = 0;
428 
429 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
430 	poll_threads();
431 	CU_ASSERT(g_bserrno == 0);
432 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
433 	blobid = g_blobid;
434 
435 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
436 	poll_threads();
437 	CU_ASSERT(g_bserrno == 0);
438 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
439 	blob = g_blob;
440 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
441 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
442 
443 	spdk_blob_close(blob, blob_op_complete, NULL);
444 	poll_threads();
445 	CU_ASSERT(g_bserrno == 0);
446 
447 	/* Create blob with default options (opts == NULL) */
448 
449 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
450 	poll_threads();
451 	CU_ASSERT(g_bserrno == 0);
452 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
453 	blobid = g_blobid;
454 
455 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
456 	poll_threads();
457 	CU_ASSERT(g_bserrno == 0);
458 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
459 	blob = g_blob;
460 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
461 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
462 
463 	spdk_blob_close(blob, blob_op_complete, NULL);
464 	poll_threads();
465 	CU_ASSERT(g_bserrno == 0);
466 
467 	/* Try to create blob with size larger than blobstore */
468 
469 	ut_spdk_blob_opts_init(&opts);
470 	opts.num_clusters = bs->total_clusters + 1;
471 
472 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
473 	poll_threads();
474 	CU_ASSERT(g_bserrno == -ENOSPC);
475 }
476 
477 static void
478 blob_create_zero_extent(void)
479 {
480 	struct spdk_blob_store *bs = g_bs;
481 	struct spdk_blob *blob;
482 	spdk_blob_id blobid;
483 
484 	/* Create blob with default options (opts == NULL) */
485 	spdk_bs_create_blob_ext(bs, NULL, blob_op_with_id_complete, NULL);
486 	poll_threads();
487 	CU_ASSERT(g_bserrno == 0);
488 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
489 	blobid = g_blobid;
490 
491 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
492 	poll_threads();
493 	CU_ASSERT(g_bserrno == 0);
494 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
495 	blob = g_blob;
496 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
497 	CU_ASSERT(blob->extent_table_found == true);
498 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
499 	CU_ASSERT(blob->active.extent_pages == NULL);
500 
501 	spdk_blob_close(blob, blob_op_complete, NULL);
502 	poll_threads();
503 	CU_ASSERT(g_bserrno == 0);
504 
505 	/* Create blob with NULL internal options  */
506 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
507 	poll_threads();
508 	CU_ASSERT(g_bserrno == 0);
509 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
510 	blobid = g_blobid;
511 
512 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
513 	poll_threads();
514 	CU_ASSERT(g_bserrno == 0);
515 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
516 	blob = g_blob;
517 	CU_ASSERT(TAILQ_FIRST(&blob->xattrs_internal) == NULL);
518 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
519 	CU_ASSERT(blob->extent_table_found == true);
520 	CU_ASSERT(blob->active.extent_pages_array_size == 0);
521 	CU_ASSERT(blob->active.extent_pages == NULL);
522 
523 	spdk_blob_close(blob, blob_op_complete, NULL);
524 	poll_threads();
525 	CU_ASSERT(g_bserrno == 0);
526 }
527 
528 /*
529  * Create and delete one blob in a loop over and over again.  This helps ensure
530  * that the internal bit masks tracking used clusters and md_pages are being
531  * tracked correctly.
532  */
533 static void
534 blob_create_loop(void)
535 {
536 	struct spdk_blob_store *bs = g_bs;
537 	struct spdk_blob_opts opts;
538 	uint32_t i, loop_count;
539 
540 	loop_count = 4 * spdk_max(spdk_bit_array_capacity(bs->used_md_pages),
541 				  spdk_bit_pool_capacity(bs->used_clusters));
542 
543 	for (i = 0; i < loop_count; i++) {
544 		ut_spdk_blob_opts_init(&opts);
545 		opts.num_clusters = 1;
546 		g_bserrno = -1;
547 		g_blobid = SPDK_BLOBID_INVALID;
548 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
549 		poll_threads();
550 		CU_ASSERT(g_bserrno == 0);
551 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
552 		spdk_bs_delete_blob(bs, g_blobid, blob_op_complete, NULL);
553 		poll_threads();
554 		CU_ASSERT(g_bserrno == 0);
555 	}
556 }
557 
558 static void
559 blob_create_fail(void)
560 {
561 	struct spdk_blob_store *bs = g_bs;
562 	struct spdk_blob_opts opts;
563 	spdk_blob_id blobid;
564 	uint32_t used_blobids_count = spdk_bit_array_count_set(bs->used_blobids);
565 	uint32_t used_md_pages_count = spdk_bit_array_count_set(bs->used_md_pages);
566 
567 	/* NULL callback */
568 	ut_spdk_blob_opts_init(&opts);
569 	opts.xattrs.names = g_xattr_names;
570 	opts.xattrs.get_value = NULL;
571 	opts.xattrs.count = 1;
572 	opts.xattrs.ctx = &g_ctx;
573 
574 	blobid = spdk_bit_array_find_first_clear(bs->used_md_pages, 0);
575 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
576 	poll_threads();
577 	CU_ASSERT(g_bserrno == -EINVAL);
578 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
579 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
580 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
581 
582 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
583 	poll_threads();
584 	CU_ASSERT(g_bserrno == -ENOENT);
585 	SPDK_CU_ASSERT_FATAL(g_blob == NULL);
586 
587 	ut_bs_reload(&bs, NULL);
588 	CU_ASSERT(spdk_bit_array_count_set(bs->used_blobids) == used_blobids_count);
589 	CU_ASSERT(spdk_bit_array_count_set(bs->used_md_pages) == used_md_pages_count);
590 
591 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
592 	poll_threads();
593 	CU_ASSERT(g_blob == NULL);
594 	CU_ASSERT(g_bserrno == -ENOENT);
595 }
596 
597 static void
598 blob_create_internal(void)
599 {
600 	struct spdk_blob_store *bs = g_bs;
601 	struct spdk_blob *blob;
602 	struct spdk_blob_opts opts;
603 	struct spdk_blob_xattr_opts internal_xattrs;
604 	const void *value;
605 	size_t value_len;
606 	spdk_blob_id blobid;
607 	int rc;
608 
609 	/* Create blob with custom xattrs */
610 
611 	ut_spdk_blob_opts_init(&opts);
612 	blob_xattrs_init(&internal_xattrs);
613 	internal_xattrs.count = 3;
614 	internal_xattrs.names = g_xattr_names;
615 	internal_xattrs.get_value = _get_xattr_value;
616 	internal_xattrs.ctx = &g_ctx;
617 
618 	bs_create_blob(bs, &opts, &internal_xattrs, blob_op_with_id_complete, NULL);
619 	poll_threads();
620 	CU_ASSERT(g_bserrno == 0);
621 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
622 	blobid = g_blobid;
623 
624 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
625 	poll_threads();
626 	CU_ASSERT(g_bserrno == 0);
627 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
628 	blob = g_blob;
629 
630 	rc = blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len, true);
631 	CU_ASSERT(rc == 0);
632 	SPDK_CU_ASSERT_FATAL(value != NULL);
633 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
634 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
635 
636 	rc = blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len, true);
637 	CU_ASSERT(rc == 0);
638 	SPDK_CU_ASSERT_FATAL(value != NULL);
639 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
640 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
641 
642 	rc = blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len, true);
643 	CU_ASSERT(rc == 0);
644 	SPDK_CU_ASSERT_FATAL(value != NULL);
645 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
646 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
647 
648 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
649 	CU_ASSERT(rc != 0);
650 
651 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
652 	CU_ASSERT(rc != 0);
653 
654 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
655 	CU_ASSERT(rc != 0);
656 
657 	spdk_blob_close(blob, blob_op_complete, NULL);
658 	poll_threads();
659 	CU_ASSERT(g_bserrno == 0);
660 
661 	/* Create blob with NULL internal options  */
662 
663 	bs_create_blob(bs, NULL, NULL, blob_op_with_id_complete, NULL);
664 	poll_threads();
665 	CU_ASSERT(g_bserrno == 0);
666 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
667 	blobid = g_blobid;
668 
669 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
670 	poll_threads();
671 	CU_ASSERT(g_bserrno == 0);
672 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
673 	CU_ASSERT(TAILQ_FIRST(&g_blob->xattrs_internal) == NULL);
674 	CU_ASSERT(spdk_blob_get_num_clusters(g_blob) == 0);
675 
676 	blob = g_blob;
677 
678 	spdk_blob_close(blob, blob_op_complete, NULL);
679 	poll_threads();
680 	CU_ASSERT(g_bserrno == 0);
681 }
682 
683 static void
684 blob_thin_provision(void)
685 {
686 	struct spdk_blob_store *bs;
687 	struct spdk_bs_dev *dev;
688 	struct spdk_blob *blob;
689 	struct spdk_blob_opts opts;
690 	struct spdk_bs_opts bs_opts;
691 	spdk_blob_id blobid;
692 
693 	dev = init_dev();
694 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
695 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
696 
697 	/* Initialize a new blob store */
698 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
699 	poll_threads();
700 	CU_ASSERT(g_bserrno == 0);
701 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
702 
703 	bs = g_bs;
704 
705 	/* Create blob with thin provisioning enabled */
706 
707 	ut_spdk_blob_opts_init(&opts);
708 	opts.thin_provision = true;
709 	opts.num_clusters = 10;
710 
711 	blob = ut_blob_create_and_open(bs, &opts);
712 	blobid = spdk_blob_get_id(blob);
713 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
714 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
715 	/* In thin provisioning with num_clusters is set, if not using the
716 	 * extent table, there is no allocation. If extent table is used,
717 	 * there is related allocation happened. */
718 	if (blob->extent_table_found == true) {
719 		CU_ASSERT(blob->active.extent_pages_array_size > 0);
720 		CU_ASSERT(blob->active.extent_pages != NULL);
721 	} else {
722 		CU_ASSERT(blob->active.extent_pages_array_size == 0);
723 		CU_ASSERT(blob->active.extent_pages == NULL);
724 	}
725 
726 	spdk_blob_close(blob, blob_op_complete, NULL);
727 	CU_ASSERT(g_bserrno == 0);
728 
729 	/* Do not shut down cleanly.  This makes sure that when we load again
730 	 *  and try to recover a valid used_cluster map, that blobstore will
731 	 *  ignore clusters with index 0 since these are unallocated clusters.
732 	 */
733 	ut_bs_dirty_load(&bs, &bs_opts);
734 
735 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
736 	poll_threads();
737 	CU_ASSERT(g_bserrno == 0);
738 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
739 	blob = g_blob;
740 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
741 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
742 
743 	ut_blob_close_and_delete(bs, blob);
744 
745 	spdk_bs_unload(bs, bs_op_complete, NULL);
746 	poll_threads();
747 	CU_ASSERT(g_bserrno == 0);
748 	g_bs = NULL;
749 }
750 
751 static void
752 blob_snapshot(void)
753 {
754 	struct spdk_blob_store *bs = g_bs;
755 	struct spdk_blob *blob;
756 	struct spdk_blob *snapshot, *snapshot2;
757 	struct spdk_blob_bs_dev *blob_bs_dev;
758 	struct spdk_blob_opts opts;
759 	struct spdk_blob_xattr_opts xattrs;
760 	spdk_blob_id blobid;
761 	spdk_blob_id snapshotid;
762 	spdk_blob_id snapshotid2;
763 	const void *value;
764 	size_t value_len;
765 	int rc;
766 	spdk_blob_id ids[2];
767 	size_t count;
768 
769 	/* Create blob with 10 clusters */
770 	ut_spdk_blob_opts_init(&opts);
771 	opts.num_clusters = 10;
772 
773 	blob = ut_blob_create_and_open(bs, &opts);
774 	blobid = spdk_blob_get_id(blob);
775 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
776 
777 	/* Create snapshot from blob */
778 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
779 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
780 	poll_threads();
781 	CU_ASSERT(g_bserrno == 0);
782 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
783 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
784 	snapshotid = g_blobid;
785 
786 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
787 	poll_threads();
788 	CU_ASSERT(g_bserrno == 0);
789 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
790 	snapshot = g_blob;
791 	CU_ASSERT(snapshot->data_ro == true);
792 	CU_ASSERT(snapshot->md_ro == true);
793 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
794 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
795 
796 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
797 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
798 	CU_ASSERT(blob->invalid_flags & SPDK_BLOB_THIN_PROV);
799 	CU_ASSERT(spdk_mem_all_zero(blob->active.clusters,
800 				    blob->active.num_clusters * sizeof(blob->active.clusters[0])));
801 
802 	/* Try to create snapshot from clone with xattrs */
803 	xattrs.names = g_xattr_names;
804 	xattrs.get_value = _get_xattr_value;
805 	xattrs.count = 3;
806 	xattrs.ctx = &g_ctx;
807 	spdk_bs_create_snapshot(bs, blobid, &xattrs, blob_op_with_id_complete, NULL);
808 	poll_threads();
809 	CU_ASSERT(g_bserrno == 0);
810 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
811 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
812 	snapshotid2 = g_blobid;
813 
814 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
815 	CU_ASSERT(g_bserrno == 0);
816 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
817 	snapshot2 = g_blob;
818 	CU_ASSERT(snapshot2->data_ro == true);
819 	CU_ASSERT(snapshot2->md_ro == true);
820 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 10);
821 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
822 
823 	/* Confirm that blob is backed by snapshot2 and snapshot2 is backed by snapshot */
824 	CU_ASSERT(snapshot->back_bs_dev == NULL);
825 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
826 	SPDK_CU_ASSERT_FATAL(snapshot2->back_bs_dev != NULL);
827 
828 	blob_bs_dev = (struct spdk_blob_bs_dev *)blob->back_bs_dev;
829 	CU_ASSERT(blob_bs_dev->blob == snapshot2);
830 
831 	blob_bs_dev = (struct spdk_blob_bs_dev *)snapshot2->back_bs_dev;
832 	CU_ASSERT(blob_bs_dev->blob == snapshot);
833 
834 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[0], &value, &value_len);
835 	CU_ASSERT(rc == 0);
836 	SPDK_CU_ASSERT_FATAL(value != NULL);
837 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
838 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
839 
840 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[1], &value, &value_len);
841 	CU_ASSERT(rc == 0);
842 	SPDK_CU_ASSERT_FATAL(value != NULL);
843 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
844 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
845 
846 	rc = spdk_blob_get_xattr_value(snapshot2, g_xattr_names[2], &value, &value_len);
847 	CU_ASSERT(rc == 0);
848 	SPDK_CU_ASSERT_FATAL(value != NULL);
849 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
850 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
851 
852 	/* Confirm that blob is clone of snapshot2, and snapshot2 is clone of snapshot */
853 	count = 2;
854 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
855 	CU_ASSERT(count == 1);
856 	CU_ASSERT(ids[0] == blobid);
857 
858 	count = 2;
859 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
860 	CU_ASSERT(count == 1);
861 	CU_ASSERT(ids[0] == snapshotid2);
862 
863 	/* Try to create snapshot from snapshot */
864 	spdk_bs_create_snapshot(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
865 	poll_threads();
866 	CU_ASSERT(g_bserrno == -EINVAL);
867 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
868 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
869 
870 	/* Delete blob and confirm that it is no longer on snapshot2 clone list */
871 	ut_blob_close_and_delete(bs, blob);
872 	count = 2;
873 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
874 	CU_ASSERT(count == 0);
875 
876 	/* Delete snapshot2 and confirm that it is no longer on snapshot clone list */
877 	ut_blob_close_and_delete(bs, snapshot2);
878 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
879 	count = 2;
880 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &count) == 0);
881 	CU_ASSERT(count == 0);
882 
883 	ut_blob_close_and_delete(bs, snapshot);
884 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
885 }
886 
887 static void
888 blob_snapshot_freeze_io(void)
889 {
890 	struct spdk_io_channel *channel;
891 	struct spdk_bs_channel *bs_channel;
892 	struct spdk_blob_store *bs = g_bs;
893 	struct spdk_blob *blob;
894 	struct spdk_blob_opts opts;
895 	spdk_blob_id blobid;
896 	uint32_t num_of_pages = 10;
897 	uint8_t payload_read[num_of_pages * BLOCKLEN];
898 	uint8_t payload_write[num_of_pages * BLOCKLEN];
899 	uint8_t payload_zero[num_of_pages * BLOCKLEN];
900 
901 	memset(payload_write, 0xE5, sizeof(payload_write));
902 	memset(payload_read, 0x00, sizeof(payload_read));
903 	memset(payload_zero, 0x00, sizeof(payload_zero));
904 
905 	/* Test freeze I/O during snapshot */
906 	channel = spdk_bs_alloc_io_channel(bs);
907 	bs_channel = spdk_io_channel_get_ctx(channel);
908 
909 	/* Create blob with 10 clusters */
910 	ut_spdk_blob_opts_init(&opts);
911 	opts.num_clusters = 10;
912 	opts.thin_provision = false;
913 
914 	blob = ut_blob_create_and_open(bs, &opts);
915 	blobid = spdk_blob_get_id(blob);
916 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
917 
918 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
919 
920 	/* This is implementation specific.
921 	 * Flag 'frozen_io' is set in _spdk_bs_snapshot_freeze_cpl callback.
922 	 * Four async I/O operations happen before that. */
923 	poll_thread_times(0, 5);
924 
925 	CU_ASSERT(TAILQ_EMPTY(&bs_channel->queued_io));
926 
927 	/* Blob I/O should be frozen here */
928 	CU_ASSERT(blob->frozen_refcnt == 1);
929 
930 	/* Write to the blob */
931 	spdk_blob_io_write(blob, channel, payload_write, 0, num_of_pages, blob_op_complete, NULL);
932 
933 	/* Verify that I/O is queued */
934 	CU_ASSERT(!TAILQ_EMPTY(&bs_channel->queued_io));
935 	/* Verify that payload is not written to disk, at this point the blobs already switched */
936 	CU_ASSERT(blob->active.clusters[0] == 0);
937 
938 	/* Finish all operations including spdk_bs_create_snapshot */
939 	poll_threads();
940 
941 	/* Verify snapshot */
942 	CU_ASSERT(g_bserrno == 0);
943 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
944 
945 	/* Verify that blob has unset frozen_io */
946 	CU_ASSERT(blob->frozen_refcnt == 0);
947 
948 	/* Verify that postponed I/O completed successfully by comparing payload */
949 	spdk_blob_io_read(blob, channel, payload_read, 0, num_of_pages, blob_op_complete, NULL);
950 	poll_threads();
951 	CU_ASSERT(g_bserrno == 0);
952 	CU_ASSERT(memcmp(payload_write, payload_read, num_of_pages * BLOCKLEN) == 0);
953 
954 	spdk_bs_free_io_channel(channel);
955 	poll_threads();
956 
957 	ut_blob_close_and_delete(bs, blob);
958 }
959 
960 static void
961 blob_clone(void)
962 {
963 	struct spdk_blob_store *bs = g_bs;
964 	struct spdk_blob_opts opts;
965 	struct spdk_blob *blob, *snapshot, *clone;
966 	spdk_blob_id blobid, cloneid, snapshotid;
967 	struct spdk_blob_xattr_opts xattrs;
968 	const void *value;
969 	size_t value_len;
970 	int rc;
971 
972 	/* Create blob with 10 clusters */
973 
974 	ut_spdk_blob_opts_init(&opts);
975 	opts.num_clusters = 10;
976 
977 	blob = ut_blob_create_and_open(bs, &opts);
978 	blobid = spdk_blob_get_id(blob);
979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
980 
981 	/* Create snapshot */
982 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
983 	poll_threads();
984 	CU_ASSERT(g_bserrno == 0);
985 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
986 	snapshotid = g_blobid;
987 
988 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
989 	poll_threads();
990 	CU_ASSERT(g_bserrno == 0);
991 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
992 	snapshot = g_blob;
993 	CU_ASSERT(snapshot->data_ro == true);
994 	CU_ASSERT(snapshot->md_ro == true);
995 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
996 
997 	spdk_blob_close(snapshot, blob_op_complete, NULL);
998 	poll_threads();
999 	CU_ASSERT(g_bserrno == 0);
1000 
1001 	/* Create clone from snapshot with xattrs */
1002 	xattrs.names = g_xattr_names;
1003 	xattrs.get_value = _get_xattr_value;
1004 	xattrs.count = 3;
1005 	xattrs.ctx = &g_ctx;
1006 
1007 	spdk_bs_create_clone(bs, snapshotid, &xattrs, blob_op_with_id_complete, NULL);
1008 	poll_threads();
1009 	CU_ASSERT(g_bserrno == 0);
1010 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1011 	cloneid = g_blobid;
1012 
1013 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1014 	poll_threads();
1015 	CU_ASSERT(g_bserrno == 0);
1016 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1017 	clone = g_blob;
1018 	CU_ASSERT(clone->data_ro == false);
1019 	CU_ASSERT(clone->md_ro == false);
1020 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1021 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(clone) == 0);
1022 
1023 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[0], &value, &value_len);
1024 	CU_ASSERT(rc == 0);
1025 	SPDK_CU_ASSERT_FATAL(value != NULL);
1026 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
1027 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
1028 
1029 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[1], &value, &value_len);
1030 	CU_ASSERT(rc == 0);
1031 	SPDK_CU_ASSERT_FATAL(value != NULL);
1032 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
1033 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
1034 
1035 	rc = spdk_blob_get_xattr_value(clone, g_xattr_names[2], &value, &value_len);
1036 	CU_ASSERT(rc == 0);
1037 	SPDK_CU_ASSERT_FATAL(value != NULL);
1038 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
1039 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
1040 
1041 
1042 	spdk_blob_close(clone, blob_op_complete, NULL);
1043 	poll_threads();
1044 	CU_ASSERT(g_bserrno == 0);
1045 
1046 	/* Try to create clone from not read only blob */
1047 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1048 	poll_threads();
1049 	CU_ASSERT(g_bserrno == -EINVAL);
1050 	CU_ASSERT(g_blobid == SPDK_BLOBID_INVALID);
1051 
1052 	/* Mark blob as read only */
1053 	spdk_blob_set_read_only(blob);
1054 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1055 	poll_threads();
1056 	CU_ASSERT(g_bserrno == 0);
1057 
1058 	/* Create clone from read only blob */
1059 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1060 	poll_threads();
1061 	CU_ASSERT(g_bserrno == 0);
1062 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1063 	cloneid = g_blobid;
1064 
1065 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
1066 	poll_threads();
1067 	CU_ASSERT(g_bserrno == 0);
1068 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1069 	clone = g_blob;
1070 	CU_ASSERT(clone->data_ro == false);
1071 	CU_ASSERT(clone->md_ro == false);
1072 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
1073 
1074 	ut_blob_close_and_delete(bs, clone);
1075 	ut_blob_close_and_delete(bs, blob);
1076 }
1077 
1078 static void
1079 _blob_inflate(bool decouple_parent)
1080 {
1081 	struct spdk_blob_store *bs = g_bs;
1082 	struct spdk_blob_opts opts;
1083 	struct spdk_blob *blob, *snapshot;
1084 	spdk_blob_id blobid, snapshotid;
1085 	struct spdk_io_channel *channel;
1086 	uint64_t free_clusters;
1087 
1088 	channel = spdk_bs_alloc_io_channel(bs);
1089 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1090 
1091 	/* Create blob with 10 clusters */
1092 
1093 	ut_spdk_blob_opts_init(&opts);
1094 	opts.num_clusters = 10;
1095 	opts.thin_provision = true;
1096 
1097 	blob = ut_blob_create_and_open(bs, &opts);
1098 	blobid = spdk_blob_get_id(blob);
1099 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1100 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1101 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1102 
1103 	/* 1) Blob with no parent */
1104 	if (decouple_parent) {
1105 		/* Decouple parent of blob with no parent (should fail) */
1106 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1107 		poll_threads();
1108 		CU_ASSERT(g_bserrno != 0);
1109 	} else {
1110 		/* Inflate of thin blob with no parent should made it thick */
1111 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1112 		poll_threads();
1113 		CU_ASSERT(g_bserrno == 0);
1114 		CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == false);
1115 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1116 	}
1117 
1118 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
1119 	poll_threads();
1120 	CU_ASSERT(g_bserrno == 0);
1121 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
1122 	snapshotid = g_blobid;
1123 
1124 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == true);
1125 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1126 
1127 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
1128 	poll_threads();
1129 	CU_ASSERT(g_bserrno == 0);
1130 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1131 	snapshot = g_blob;
1132 	CU_ASSERT(snapshot->data_ro == true);
1133 	CU_ASSERT(snapshot->md_ro == true);
1134 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 10);
1135 
1136 	spdk_blob_close(snapshot, blob_op_complete, NULL);
1137 	poll_threads();
1138 	CU_ASSERT(g_bserrno == 0);
1139 
1140 	free_clusters = spdk_bs_free_cluster_count(bs);
1141 
1142 	/* 2) Blob with parent */
1143 	if (!decouple_parent) {
1144 		/* Do full blob inflation */
1145 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
1146 		poll_threads();
1147 		CU_ASSERT(g_bserrno == 0);
1148 		/* all 10 clusters should be allocated */
1149 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 10);
1150 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1151 	} else {
1152 		/* Decouple parent of blob */
1153 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
1154 		poll_threads();
1155 		CU_ASSERT(g_bserrno == 0);
1156 		/* when only parent is removed, none of the clusters should be allocated */
1157 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters);
1158 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1159 	}
1160 
1161 	/* Now, it should be possible to delete snapshot */
1162 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
1163 	poll_threads();
1164 	CU_ASSERT(g_bserrno == 0);
1165 
1166 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
1167 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob) == decouple_parent);
1168 
1169 	spdk_bs_free_io_channel(channel);
1170 	poll_threads();
1171 
1172 	ut_blob_close_and_delete(bs, blob);
1173 }
1174 
1175 static void
1176 blob_inflate(void)
1177 {
1178 	_blob_inflate(false);
1179 	_blob_inflate(true);
1180 }
1181 
1182 static void
1183 blob_delete(void)
1184 {
1185 	struct spdk_blob_store *bs = g_bs;
1186 	struct spdk_blob_opts blob_opts;
1187 	spdk_blob_id blobid;
1188 
1189 	/* Create a blob and then delete it. */
1190 	ut_spdk_blob_opts_init(&blob_opts);
1191 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
1192 	poll_threads();
1193 	CU_ASSERT(g_bserrno == 0);
1194 	CU_ASSERT(g_blobid > 0);
1195 	blobid = g_blobid;
1196 
1197 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
1198 	poll_threads();
1199 	CU_ASSERT(g_bserrno == 0);
1200 
1201 	/* Try to open the blob */
1202 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1203 	poll_threads();
1204 	CU_ASSERT(g_bserrno == -ENOENT);
1205 }
1206 
1207 static void
1208 blob_resize_test(void)
1209 {
1210 	struct spdk_blob_store *bs = g_bs;
1211 	struct spdk_blob *blob;
1212 	uint64_t free_clusters;
1213 
1214 	free_clusters = spdk_bs_free_cluster_count(bs);
1215 
1216 	blob = ut_blob_create_and_open(bs, NULL);
1217 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
1218 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1219 
1220 	/* Confirm that resize fails if blob is marked read-only. */
1221 	blob->md_ro = true;
1222 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1223 	poll_threads();
1224 	CU_ASSERT(g_bserrno == -EPERM);
1225 	blob->md_ro = false;
1226 
1227 	/* The blob started at 0 clusters. Resize it to be 5. */
1228 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1229 	poll_threads();
1230 	CU_ASSERT(g_bserrno == 0);
1231 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1232 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 5);
1233 
1234 	/* Shrink the blob to 3 clusters. This will not actually release
1235 	 * the old clusters until the blob is synced.
1236 	 */
1237 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
1238 	poll_threads();
1239 	CU_ASSERT(g_bserrno == 0);
1240 	/* Verify there are still 5 clusters in use */
1241 	CU_ASSERT((free_clusters - 5) == spdk_bs_free_cluster_count(bs));
1242 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 3);
1243 
1244 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1245 	poll_threads();
1246 	CU_ASSERT(g_bserrno == 0);
1247 	/* Now there are only 3 clusters in use */
1248 	CU_ASSERT((free_clusters - 3) == spdk_bs_free_cluster_count(bs));
1249 
1250 	/* Resize the blob to be 10 clusters. Growth takes effect immediately. */
1251 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
1252 	poll_threads();
1253 	CU_ASSERT(g_bserrno == 0);
1254 	CU_ASSERT((free_clusters - 10) == spdk_bs_free_cluster_count(bs));
1255 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
1256 
1257 	/* Try to resize the blob to size larger than blobstore. */
1258 	spdk_blob_resize(blob, bs->total_clusters + 1, blob_op_complete, NULL);
1259 	poll_threads();
1260 	CU_ASSERT(g_bserrno == -ENOSPC);
1261 
1262 	ut_blob_close_and_delete(bs, blob);
1263 }
1264 
1265 static void
1266 blob_resize_thin_test(void)
1267 {
1268 	struct spdk_blob_store *bs = g_bs;
1269 	struct spdk_blob *blob;
1270 	struct spdk_blob_opts opts;
1271 	struct spdk_io_channel *blob_ch;
1272 	uint64_t free_clusters;
1273 	uint64_t io_units_per_cluster;
1274 	uint64_t offset;
1275 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
1276 
1277 	free_clusters = spdk_bs_free_cluster_count(bs);
1278 
1279 	blob_ch = spdk_bs_alloc_io_channel(bs);
1280 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
1281 
1282 	/* Create blob with thin provisioning enabled */
1283 	ut_spdk_blob_opts_init(&opts);
1284 	opts.thin_provision = true;
1285 	opts.num_clusters = 0;
1286 
1287 	blob = ut_blob_create_and_open(bs, &opts);
1288 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1289 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1290 	io_units_per_cluster = bs_io_units_per_cluster(blob);
1291 
1292 	/* The blob started at 0 clusters. Resize it to be 6. */
1293 	spdk_blob_resize(blob, 6, blob_op_complete, NULL);
1294 	poll_threads();
1295 	CU_ASSERT(g_bserrno == 0);
1296 	CU_ASSERT((free_clusters) == spdk_bs_free_cluster_count(bs));
1297 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
1298 
1299 	/* Write on cluster 0,2,4 and 5 of blob */
1300 	for (offset = 0; offset < io_units_per_cluster; offset++) {
1301 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1302 		poll_threads();
1303 		CU_ASSERT(g_bserrno == 0);
1304 	}
1305 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
1306 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1307 		poll_threads();
1308 		CU_ASSERT(g_bserrno == 0);
1309 	}
1310 	for (offset = 4 * io_units_per_cluster; offset < 5 * io_units_per_cluster; offset++) {
1311 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1312 		poll_threads();
1313 		CU_ASSERT(g_bserrno == 0);
1314 	}
1315 	for (offset = 5 * io_units_per_cluster; offset < 6 * io_units_per_cluster; offset++) {
1316 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
1317 		poll_threads();
1318 		CU_ASSERT(g_bserrno == 0);
1319 	}
1320 
1321 	/* Check allocated clusters after write */
1322 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1323 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 4);
1324 
1325 	/* Shrink the blob to 2 clusters. This will not actually release
1326 	 * the old clusters until the blob is synced.
1327 	 */
1328 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1329 	poll_threads();
1330 	CU_ASSERT(g_bserrno == 0);
1331 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1332 	CU_ASSERT((free_clusters - 4) == spdk_bs_free_cluster_count(bs));
1333 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1334 
1335 	/* Sync blob: 4 clusters were truncated but only 3 of them was allocated */
1336 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
1337 	poll_threads();
1338 	CU_ASSERT(g_bserrno == 0);
1339 	CU_ASSERT((free_clusters - 1) == spdk_bs_free_cluster_count(bs));
1340 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 2);
1341 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
1342 
1343 	spdk_bs_free_io_channel(blob_ch);
1344 	ut_blob_close_and_delete(bs, blob);
1345 }
1346 
1347 static void
1348 blob_read_only(void)
1349 {
1350 	struct spdk_blob_store *bs;
1351 	struct spdk_bs_dev *dev;
1352 	struct spdk_blob *blob;
1353 	struct spdk_bs_opts opts;
1354 	spdk_blob_id blobid;
1355 	int rc;
1356 
1357 	dev = init_dev();
1358 	spdk_bs_opts_init(&opts, sizeof(opts));
1359 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
1360 
1361 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
1362 	poll_threads();
1363 	CU_ASSERT(g_bserrno == 0);
1364 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
1365 	bs = g_bs;
1366 
1367 	blob = ut_blob_create_and_open(bs, NULL);
1368 	blobid = spdk_blob_get_id(blob);
1369 
1370 	rc = spdk_blob_set_read_only(blob);
1371 	CU_ASSERT(rc == 0);
1372 
1373 	CU_ASSERT(blob->data_ro == false);
1374 	CU_ASSERT(blob->md_ro == false);
1375 
1376 	spdk_blob_sync_md(blob, bs_op_complete, NULL);
1377 	poll_threads();
1378 
1379 	CU_ASSERT(blob->data_ro == true);
1380 	CU_ASSERT(blob->md_ro == true);
1381 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1382 
1383 	spdk_blob_close(blob, blob_op_complete, NULL);
1384 	poll_threads();
1385 	CU_ASSERT(g_bserrno == 0);
1386 
1387 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1388 	poll_threads();
1389 	CU_ASSERT(g_bserrno == 0);
1390 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1391 	blob = g_blob;
1392 
1393 	CU_ASSERT(blob->data_ro == true);
1394 	CU_ASSERT(blob->md_ro == true);
1395 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1396 
1397 	spdk_blob_close(blob, blob_op_complete, NULL);
1398 	poll_threads();
1399 	CU_ASSERT(g_bserrno == 0);
1400 
1401 	ut_bs_reload(&bs, &opts);
1402 
1403 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
1404 	poll_threads();
1405 	CU_ASSERT(g_bserrno == 0);
1406 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
1407 	blob = g_blob;
1408 
1409 	CU_ASSERT(blob->data_ro == true);
1410 	CU_ASSERT(blob->md_ro == true);
1411 	CU_ASSERT(blob->data_ro_flags & SPDK_BLOB_READ_ONLY);
1412 
1413 	ut_blob_close_and_delete(bs, blob);
1414 
1415 	spdk_bs_unload(bs, bs_op_complete, NULL);
1416 	poll_threads();
1417 	CU_ASSERT(g_bserrno == 0);
1418 }
1419 
1420 static void
1421 channel_ops(void)
1422 {
1423 	struct spdk_blob_store *bs = g_bs;
1424 	struct spdk_io_channel *channel;
1425 
1426 	channel = spdk_bs_alloc_io_channel(bs);
1427 	CU_ASSERT(channel != NULL);
1428 
1429 	spdk_bs_free_io_channel(channel);
1430 	poll_threads();
1431 }
1432 
1433 static void
1434 blob_write(void)
1435 {
1436 	struct spdk_blob_store *bs = g_bs;
1437 	struct spdk_blob *blob = g_blob;
1438 	struct spdk_io_channel *channel;
1439 	uint64_t io_units_per_cluster;
1440 	uint8_t payload[10 * BLOCKLEN];
1441 
1442 	io_units_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_io_unit_size(bs);
1443 
1444 	channel = spdk_bs_alloc_io_channel(bs);
1445 	CU_ASSERT(channel != NULL);
1446 
1447 	/* Write to a blob with 0 size */
1448 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1449 	poll_threads();
1450 	CU_ASSERT(g_bserrno == -EINVAL);
1451 
1452 	/* Resize the blob */
1453 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1454 	poll_threads();
1455 	CU_ASSERT(g_bserrno == 0);
1456 
1457 	/* Confirm that write fails if blob is marked read-only. */
1458 	blob->data_ro = true;
1459 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1460 	poll_threads();
1461 	CU_ASSERT(g_bserrno == -EPERM);
1462 	blob->data_ro = false;
1463 
1464 	/* Write to the blob */
1465 	spdk_blob_io_write(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1466 	poll_threads();
1467 	CU_ASSERT(g_bserrno == 0);
1468 
1469 	/* Write starting beyond the end */
1470 	spdk_blob_io_write(blob, channel, payload, 5 * io_units_per_cluster, 1, blob_op_complete,
1471 			   NULL);
1472 	poll_threads();
1473 	CU_ASSERT(g_bserrno == -EINVAL);
1474 
1475 	/* Write starting at a valid location but going off the end */
1476 	spdk_blob_io_write(blob, channel, payload, 4 * io_units_per_cluster, io_units_per_cluster + 1,
1477 			   blob_op_complete, NULL);
1478 	poll_threads();
1479 	CU_ASSERT(g_bserrno == -EINVAL);
1480 
1481 	spdk_bs_free_io_channel(channel);
1482 	poll_threads();
1483 }
1484 
1485 static void
1486 blob_read(void)
1487 {
1488 	struct spdk_blob_store *bs = g_bs;
1489 	struct spdk_blob *blob = g_blob;
1490 	struct spdk_io_channel *channel;
1491 	uint64_t io_units_per_cluster;
1492 	uint8_t payload[10 * BLOCKLEN];
1493 
1494 	io_units_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_io_unit_size(bs);
1495 
1496 	channel = spdk_bs_alloc_io_channel(bs);
1497 	CU_ASSERT(channel != NULL);
1498 
1499 	/* Read from a blob with 0 size */
1500 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1501 	poll_threads();
1502 	CU_ASSERT(g_bserrno == -EINVAL);
1503 
1504 	/* Resize the blob */
1505 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
1506 	poll_threads();
1507 	CU_ASSERT(g_bserrno == 0);
1508 
1509 	/* Confirm that read passes if blob is marked read-only. */
1510 	blob->data_ro = true;
1511 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1512 	poll_threads();
1513 	CU_ASSERT(g_bserrno == 0);
1514 	blob->data_ro = false;
1515 
1516 	/* Read from the blob */
1517 	spdk_blob_io_read(blob, channel, payload, 0, 1, blob_op_complete, NULL);
1518 	poll_threads();
1519 	CU_ASSERT(g_bserrno == 0);
1520 
1521 	/* Read starting beyond the end */
1522 	spdk_blob_io_read(blob, channel, payload, 5 * io_units_per_cluster, 1, blob_op_complete,
1523 			  NULL);
1524 	poll_threads();
1525 	CU_ASSERT(g_bserrno == -EINVAL);
1526 
1527 	/* Read starting at a valid location but going off the end */
1528 	spdk_blob_io_read(blob, channel, payload, 4 * io_units_per_cluster, io_units_per_cluster + 1,
1529 			  blob_op_complete, NULL);
1530 	poll_threads();
1531 	CU_ASSERT(g_bserrno == -EINVAL);
1532 
1533 	spdk_bs_free_io_channel(channel);
1534 	poll_threads();
1535 }
1536 
1537 static void
1538 blob_rw_verify(void)
1539 {
1540 	struct spdk_blob_store *bs = g_bs;
1541 	struct spdk_blob *blob = g_blob;
1542 	struct spdk_io_channel *channel;
1543 	uint8_t payload_read[10 * BLOCKLEN];
1544 	uint8_t payload_write[10 * BLOCKLEN];
1545 
1546 	channel = spdk_bs_alloc_io_channel(bs);
1547 	CU_ASSERT(channel != NULL);
1548 
1549 	spdk_blob_resize(blob, 32, blob_op_complete, NULL);
1550 	poll_threads();
1551 	CU_ASSERT(g_bserrno == 0);
1552 
1553 	memset(payload_write, 0xE5, sizeof(payload_write));
1554 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
1555 	poll_threads();
1556 	CU_ASSERT(g_bserrno == 0);
1557 
1558 	memset(payload_read, 0x00, sizeof(payload_read));
1559 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
1560 	poll_threads();
1561 	CU_ASSERT(g_bserrno == 0);
1562 	CU_ASSERT(memcmp(payload_write, payload_read, 4 * BLOCKLEN) == 0);
1563 
1564 	spdk_bs_free_io_channel(channel);
1565 	poll_threads();
1566 }
1567 
1568 static void
1569 blob_rw_verify_iov(void)
1570 {
1571 	struct spdk_blob_store *bs = g_bs;
1572 	struct spdk_blob *blob;
1573 	struct spdk_io_channel *channel;
1574 	uint8_t payload_read[10 * BLOCKLEN];
1575 	uint8_t payload_write[10 * BLOCKLEN];
1576 	struct iovec iov_read[3];
1577 	struct iovec iov_write[3];
1578 	void *buf;
1579 	uint32_t first_data_cluster = FIRST_DATA_CLUSTER(bs);
1580 
1581 	channel = spdk_bs_alloc_io_channel(bs);
1582 	CU_ASSERT(channel != NULL);
1583 
1584 	blob = ut_blob_create_and_open(bs, NULL);
1585 
1586 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1587 	poll_threads();
1588 	CU_ASSERT(g_bserrno == 0);
1589 
1590 	/*
1591 	 * Manually adjust the offset of the blob's second cluster.  This allows
1592 	 *  us to make sure that the readv/write code correctly accounts for I/O
1593 	 *  that cross cluster boundaries.  Start by asserting that the allocated
1594 	 *  clusters are where we expect before modifying the second cluster.
1595 	 */
1596 	CU_ASSERT(blob->active.clusters[0] == first_data_cluster * 256);
1597 	CU_ASSERT(blob->active.clusters[1] == (first_data_cluster + 1) * 256);
1598 	blob->active.clusters[1] = (first_data_cluster + 2) * 256;
1599 
1600 	memset(payload_write, 0xE5, sizeof(payload_write));
1601 	iov_write[0].iov_base = payload_write;
1602 	iov_write[0].iov_len = 1 * BLOCKLEN;
1603 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1604 	iov_write[1].iov_len = 5 * BLOCKLEN;
1605 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1606 	iov_write[2].iov_len = 4 * BLOCKLEN;
1607 	/*
1608 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1609 	 *  will get written to the first cluster, the last 4 to the second cluster.
1610 	 */
1611 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1612 	poll_threads();
1613 	CU_ASSERT(g_bserrno == 0);
1614 
1615 	memset(payload_read, 0xAA, sizeof(payload_read));
1616 	iov_read[0].iov_base = payload_read;
1617 	iov_read[0].iov_len = 3 * BLOCKLEN;
1618 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
1619 	iov_read[1].iov_len = 4 * BLOCKLEN;
1620 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
1621 	iov_read[2].iov_len = 3 * BLOCKLEN;
1622 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
1623 	poll_threads();
1624 	CU_ASSERT(g_bserrno == 0);
1625 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
1626 
1627 	buf = calloc(1, 256 * BLOCKLEN);
1628 	SPDK_CU_ASSERT_FATAL(buf != NULL);
1629 	/* Check that cluster 2 on "disk" was not modified. */
1630 	CU_ASSERT(memcmp(buf, &g_dev_buffer[(first_data_cluster + 1) * 256 * BLOCKLEN],
1631 			 256 * BLOCKLEN) == 0);
1632 	free(buf);
1633 
1634 	spdk_blob_close(blob, blob_op_complete, NULL);
1635 	poll_threads();
1636 	CU_ASSERT(g_bserrno == 0);
1637 
1638 	spdk_bs_free_io_channel(channel);
1639 	poll_threads();
1640 }
1641 
1642 static uint32_t
1643 bs_channel_get_req_count(struct spdk_io_channel *_channel)
1644 {
1645 	struct spdk_bs_channel *channel = spdk_io_channel_get_ctx(_channel);
1646 	struct spdk_bs_request_set *set;
1647 	uint32_t count = 0;
1648 
1649 	TAILQ_FOREACH(set, &channel->reqs, link) {
1650 		count++;
1651 	}
1652 
1653 	return count;
1654 }
1655 
1656 static void
1657 blob_rw_verify_iov_nomem(void)
1658 {
1659 	struct spdk_blob_store *bs = g_bs;
1660 	struct spdk_blob *blob = g_blob;
1661 	struct spdk_io_channel *channel;
1662 	uint8_t payload_write[10 * BLOCKLEN];
1663 	struct iovec iov_write[3];
1664 	uint32_t req_count;
1665 
1666 	channel = spdk_bs_alloc_io_channel(bs);
1667 	CU_ASSERT(channel != NULL);
1668 
1669 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1670 	poll_threads();
1671 	CU_ASSERT(g_bserrno == 0);
1672 
1673 	/*
1674 	 * Choose a page offset just before the cluster boundary.  The first 6 pages of payload
1675 	 *  will get written to the first cluster, the last 4 to the second cluster.
1676 	 */
1677 	iov_write[0].iov_base = payload_write;
1678 	iov_write[0].iov_len = 1 * BLOCKLEN;
1679 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
1680 	iov_write[1].iov_len = 5 * BLOCKLEN;
1681 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
1682 	iov_write[2].iov_len = 4 * BLOCKLEN;
1683 	MOCK_SET(calloc, NULL);
1684 	req_count = bs_channel_get_req_count(channel);
1685 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
1686 	poll_threads();
1687 	CU_ASSERT(g_bserrno == -ENOMEM);
1688 	CU_ASSERT(req_count == bs_channel_get_req_count(channel));
1689 	MOCK_CLEAR(calloc);
1690 
1691 	spdk_bs_free_io_channel(channel);
1692 	poll_threads();
1693 }
1694 
1695 static void
1696 blob_rw_iov_read_only(void)
1697 {
1698 	struct spdk_blob_store *bs = g_bs;
1699 	struct spdk_blob *blob = g_blob;
1700 	struct spdk_io_channel *channel;
1701 	uint8_t payload_read[BLOCKLEN];
1702 	uint8_t payload_write[BLOCKLEN];
1703 	struct iovec iov_read;
1704 	struct iovec iov_write;
1705 
1706 	channel = spdk_bs_alloc_io_channel(bs);
1707 	CU_ASSERT(channel != NULL);
1708 
1709 	spdk_blob_resize(blob, 2, blob_op_complete, NULL);
1710 	poll_threads();
1711 	CU_ASSERT(g_bserrno == 0);
1712 
1713 	/* Verify that writev failed if read_only flag is set. */
1714 	blob->data_ro = true;
1715 	iov_write.iov_base = payload_write;
1716 	iov_write.iov_len = sizeof(payload_write);
1717 	spdk_blob_io_writev(blob, channel, &iov_write, 1, 0, 1, blob_op_complete, NULL);
1718 	poll_threads();
1719 	CU_ASSERT(g_bserrno == -EPERM);
1720 
1721 	/* Verify that reads pass if data_ro flag is set. */
1722 	iov_read.iov_base = payload_read;
1723 	iov_read.iov_len = sizeof(payload_read);
1724 	spdk_blob_io_readv(blob, channel, &iov_read, 1, 0, 1, blob_op_complete, NULL);
1725 	poll_threads();
1726 	CU_ASSERT(g_bserrno == 0);
1727 
1728 	spdk_bs_free_io_channel(channel);
1729 	poll_threads();
1730 }
1731 
1732 static void
1733 _blob_io_read_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1734 		       uint8_t *payload, uint64_t offset, uint64_t length,
1735 		       spdk_blob_op_complete cb_fn, void *cb_arg)
1736 {
1737 	uint64_t i;
1738 	uint8_t *buf;
1739 	uint64_t io_unit_size = spdk_bs_get_io_unit_size(blob->bs);
1740 
1741 	/* To be sure that operation is NOT split, read one io_unit at the time */
1742 	buf = payload;
1743 	for (i = 0; i < length; i++) {
1744 		spdk_blob_io_read(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1745 		poll_threads();
1746 		if (g_bserrno != 0) {
1747 			/* Pass the error code up */
1748 			break;
1749 		}
1750 		buf += io_unit_size;
1751 	}
1752 
1753 	cb_fn(cb_arg, g_bserrno);
1754 }
1755 
1756 static void
1757 _blob_io_write_no_split(struct spdk_blob *blob, struct spdk_io_channel *channel,
1758 			uint8_t *payload, uint64_t offset, uint64_t length,
1759 			spdk_blob_op_complete cb_fn, void *cb_arg)
1760 {
1761 	uint64_t i;
1762 	uint8_t *buf;
1763 	uint64_t io_unit_size = spdk_bs_get_io_unit_size(blob->bs);
1764 
1765 	/* To be sure that operation is NOT split, write one io_unit at the time */
1766 	buf = payload;
1767 	for (i = 0; i < length; i++) {
1768 		spdk_blob_io_write(blob, channel, buf, i + offset, 1, blob_op_complete, NULL);
1769 		poll_threads();
1770 		if (g_bserrno != 0) {
1771 			/* Pass the error code up */
1772 			break;
1773 		}
1774 		buf += io_unit_size;
1775 	}
1776 
1777 	cb_fn(cb_arg, g_bserrno);
1778 }
1779 
1780 static void
1781 blob_operation_split_rw(void)
1782 {
1783 	struct spdk_blob_store *bs = g_bs;
1784 	struct spdk_blob *blob;
1785 	struct spdk_io_channel *channel;
1786 	struct spdk_blob_opts opts;
1787 	uint64_t cluster_size;
1788 
1789 	uint64_t payload_size;
1790 	uint8_t *payload_read;
1791 	uint8_t *payload_write;
1792 	uint8_t *payload_pattern;
1793 
1794 	uint64_t io_unit_size;
1795 	uint64_t io_units_per_cluster;
1796 	uint64_t io_units_per_payload;
1797 
1798 	uint64_t i;
1799 
1800 	cluster_size = spdk_bs_get_cluster_size(bs);
1801 	io_unit_size = spdk_bs_get_io_unit_size(bs);
1802 	io_units_per_cluster = cluster_size / io_unit_size;
1803 	io_units_per_payload = io_units_per_cluster * 5;
1804 	payload_size = cluster_size * 5;
1805 
1806 	payload_read = malloc(payload_size);
1807 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1808 
1809 	payload_write = malloc(payload_size);
1810 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1811 
1812 	payload_pattern = malloc(payload_size);
1813 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1814 
1815 	/* Prepare random pattern to write */
1816 	memset(payload_pattern, 0xFF, payload_size);
1817 	for (i = 0; i < io_units_per_payload; i++) {
1818 		*((uint64_t *)(payload_pattern + io_unit_size * i)) = (i + 1);
1819 	}
1820 
1821 	channel = spdk_bs_alloc_io_channel(bs);
1822 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1823 
1824 	/* Create blob */
1825 	ut_spdk_blob_opts_init(&opts);
1826 	opts.thin_provision = false;
1827 	opts.num_clusters = 5;
1828 
1829 	blob = ut_blob_create_and_open(bs, &opts);
1830 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1831 
1832 	/* Initial read should return zeroed payload */
1833 	memset(payload_read, 0xFF, payload_size);
1834 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1835 	poll_threads();
1836 	CU_ASSERT(g_bserrno == 0);
1837 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1838 
1839 	/* Fill whole blob except last page */
1840 	spdk_blob_io_write(blob, channel, payload_pattern, 0, io_units_per_payload - 1,
1841 			   blob_op_complete, NULL);
1842 	poll_threads();
1843 	CU_ASSERT(g_bserrno == 0);
1844 
1845 	/* Write last page with a pattern */
1846 	spdk_blob_io_write(blob, channel, payload_pattern, io_units_per_payload - 1, 1,
1847 			   blob_op_complete, NULL);
1848 	poll_threads();
1849 	CU_ASSERT(g_bserrno == 0);
1850 
1851 	/* Read whole blob and check consistency */
1852 	memset(payload_read, 0xFF, payload_size);
1853 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1854 	poll_threads();
1855 	CU_ASSERT(g_bserrno == 0);
1856 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - io_unit_size) == 0);
1857 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - io_unit_size, io_unit_size) == 0);
1858 
1859 	/* Fill whole blob except first page */
1860 	spdk_blob_io_write(blob, channel, payload_pattern, 1, io_units_per_payload - 1,
1861 			   blob_op_complete, NULL);
1862 	poll_threads();
1863 	CU_ASSERT(g_bserrno == 0);
1864 
1865 	/* Write first page with a pattern */
1866 	spdk_blob_io_write(blob, channel, payload_pattern, 0, 1,
1867 			   blob_op_complete, NULL);
1868 	poll_threads();
1869 	CU_ASSERT(g_bserrno == 0);
1870 
1871 	/* Read whole blob and check consistency */
1872 	memset(payload_read, 0xFF, payload_size);
1873 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1874 	poll_threads();
1875 	CU_ASSERT(g_bserrno == 0);
1876 	CU_ASSERT(memcmp(payload_pattern, payload_read + io_unit_size, payload_size - io_unit_size) == 0);
1877 	CU_ASSERT(memcmp(payload_pattern, payload_read, io_unit_size) == 0);
1878 
1879 
1880 	/* Fill whole blob with a pattern (5 clusters) */
1881 
1882 	/* 1. Read test. */
1883 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, io_units_per_payload,
1884 				blob_op_complete, NULL);
1885 	poll_threads();
1886 	CU_ASSERT(g_bserrno == 0);
1887 
1888 	memset(payload_read, 0xFF, payload_size);
1889 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete, NULL);
1890 	poll_threads();
1891 	poll_threads();
1892 	CU_ASSERT(g_bserrno == 0);
1893 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1894 
1895 	/* 2. Write test. */
1896 	spdk_blob_io_write(blob, channel, payload_pattern, 0, io_units_per_payload,
1897 			   blob_op_complete, NULL);
1898 	poll_threads();
1899 	CU_ASSERT(g_bserrno == 0);
1900 
1901 	memset(payload_read, 0xFF, payload_size);
1902 	_blob_io_read_no_split(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete,
1903 			       NULL);
1904 	poll_threads();
1905 	CU_ASSERT(g_bserrno == 0);
1906 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
1907 
1908 	spdk_bs_free_io_channel(channel);
1909 	poll_threads();
1910 
1911 	g_blob = NULL;
1912 	g_blobid = 0;
1913 
1914 	free(payload_read);
1915 	free(payload_write);
1916 	free(payload_pattern);
1917 
1918 	ut_blob_close_and_delete(bs, blob);
1919 }
1920 
1921 static void
1922 blob_operation_split_rw_iov(void)
1923 {
1924 	struct spdk_blob_store *bs = g_bs;
1925 	struct spdk_blob *blob;
1926 	struct spdk_io_channel *channel;
1927 	struct spdk_blob_opts opts;
1928 	uint64_t cluster_size;
1929 
1930 	uint64_t payload_size;
1931 	uint8_t *payload_read;
1932 	uint8_t *payload_write;
1933 	uint8_t *payload_pattern;
1934 
1935 	uint64_t io_unit_size;
1936 	uint64_t io_units_per_cluster;
1937 	uint64_t io_units_per_payload;
1938 
1939 	struct iovec iov_read[2];
1940 	struct iovec iov_write[2];
1941 
1942 	uint64_t i, j;
1943 
1944 	cluster_size = spdk_bs_get_cluster_size(bs);
1945 	io_unit_size = spdk_bs_get_io_unit_size(bs);
1946 	io_units_per_cluster = cluster_size / io_unit_size;
1947 	io_units_per_payload = io_units_per_cluster * 5;
1948 	payload_size = cluster_size * 5;
1949 
1950 	payload_read = malloc(payload_size);
1951 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
1952 
1953 	payload_write = malloc(payload_size);
1954 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
1955 
1956 	payload_pattern = malloc(payload_size);
1957 	SPDK_CU_ASSERT_FATAL(payload_pattern != NULL);
1958 
1959 	/* Prepare random pattern to write */
1960 	for (i = 0; i < io_units_per_payload; i++) {
1961 		for (j = 0; j < io_unit_size / sizeof(uint64_t); j++) {
1962 			uint64_t *tmp;
1963 
1964 			tmp = (uint64_t *)payload_pattern;
1965 			tmp += ((io_unit_size * i) / sizeof(uint64_t)) + j;
1966 			*tmp = i + 1;
1967 		}
1968 	}
1969 
1970 	channel = spdk_bs_alloc_io_channel(bs);
1971 	SPDK_CU_ASSERT_FATAL(channel != NULL);
1972 
1973 	/* Create blob */
1974 	ut_spdk_blob_opts_init(&opts);
1975 	opts.thin_provision = false;
1976 	opts.num_clusters = 5;
1977 
1978 	blob = ut_blob_create_and_open(bs, &opts);
1979 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
1980 
1981 	/* Initial read should return zeroes payload */
1982 	memset(payload_read, 0xFF, payload_size);
1983 	iov_read[0].iov_base = payload_read;
1984 	iov_read[0].iov_len = cluster_size * 3;
1985 	iov_read[1].iov_base = payload_read + cluster_size * 3;
1986 	iov_read[1].iov_len = cluster_size * 2;
1987 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
1988 	poll_threads();
1989 	CU_ASSERT(g_bserrno == 0);
1990 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
1991 
1992 	/* First of iovs fills whole blob except last io_unit and second of iovs writes last io_unit
1993 	 *  with a pattern. */
1994 	iov_write[0].iov_base = payload_pattern;
1995 	iov_write[0].iov_len = payload_size - io_unit_size;
1996 	iov_write[1].iov_base = payload_pattern;
1997 	iov_write[1].iov_len = io_unit_size;
1998 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
1999 	poll_threads();
2000 	CU_ASSERT(g_bserrno == 0);
2001 
2002 	/* Read whole blob and check consistency */
2003 	memset(payload_read, 0xFF, payload_size);
2004 	iov_read[0].iov_base = payload_read;
2005 	iov_read[0].iov_len = cluster_size * 2;
2006 	iov_read[1].iov_base = payload_read + cluster_size * 2;
2007 	iov_read[1].iov_len = cluster_size * 3;
2008 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2009 	poll_threads();
2010 	CU_ASSERT(g_bserrno == 0);
2011 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size - io_unit_size) == 0);
2012 	CU_ASSERT(memcmp(payload_pattern, payload_read + payload_size - io_unit_size, io_unit_size) == 0);
2013 
2014 	/* First of iovs fills only first io_unit and second of iovs writes whole blob except
2015 	 *  first io_unit with a pattern. */
2016 	iov_write[0].iov_base = payload_pattern;
2017 	iov_write[0].iov_len = io_unit_size;
2018 	iov_write[1].iov_base = payload_pattern;
2019 	iov_write[1].iov_len = payload_size - io_unit_size;
2020 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2021 	poll_threads();
2022 	CU_ASSERT(g_bserrno == 0);
2023 
2024 	/* Read whole blob and check consistency */
2025 	memset(payload_read, 0xFF, payload_size);
2026 	iov_read[0].iov_base = payload_read;
2027 	iov_read[0].iov_len = cluster_size * 4;
2028 	iov_read[1].iov_base = payload_read + cluster_size * 4;
2029 	iov_read[1].iov_len = cluster_size;
2030 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2031 	poll_threads();
2032 	CU_ASSERT(g_bserrno == 0);
2033 	CU_ASSERT(memcmp(payload_pattern, payload_read + io_unit_size, payload_size - io_unit_size) == 0);
2034 	CU_ASSERT(memcmp(payload_pattern, payload_read, io_unit_size) == 0);
2035 
2036 
2037 	/* Fill whole blob with a pattern (5 clusters) */
2038 
2039 	/* 1. Read test. */
2040 	_blob_io_write_no_split(blob, channel, payload_pattern, 0, io_units_per_payload,
2041 				blob_op_complete, NULL);
2042 	poll_threads();
2043 	CU_ASSERT(g_bserrno == 0);
2044 
2045 	memset(payload_read, 0xFF, payload_size);
2046 	iov_read[0].iov_base = payload_read;
2047 	iov_read[0].iov_len = cluster_size;
2048 	iov_read[1].iov_base = payload_read + cluster_size;
2049 	iov_read[1].iov_len = cluster_size * 4;
2050 	spdk_blob_io_readv(blob, channel, iov_read, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2051 	poll_threads();
2052 	CU_ASSERT(g_bserrno == 0);
2053 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2054 
2055 	/* 2. Write test. */
2056 	iov_write[0].iov_base = payload_read;
2057 	iov_write[0].iov_len = cluster_size * 2;
2058 	iov_write[1].iov_base = payload_read + cluster_size * 2;
2059 	iov_write[1].iov_len = cluster_size * 3;
2060 	spdk_blob_io_writev(blob, channel, iov_write, 2, 0, io_units_per_payload, blob_op_complete, NULL);
2061 	poll_threads();
2062 	CU_ASSERT(g_bserrno == 0);
2063 
2064 	memset(payload_read, 0xFF, payload_size);
2065 	_blob_io_read_no_split(blob, channel, payload_read, 0, io_units_per_payload, blob_op_complete,
2066 			       NULL);
2067 	poll_threads();
2068 	CU_ASSERT(g_bserrno == 0);
2069 	CU_ASSERT(memcmp(payload_pattern, payload_read, payload_size) == 0);
2070 
2071 	spdk_bs_free_io_channel(channel);
2072 	poll_threads();
2073 
2074 	g_blob = NULL;
2075 	g_blobid = 0;
2076 
2077 	free(payload_read);
2078 	free(payload_write);
2079 	free(payload_pattern);
2080 
2081 	ut_blob_close_and_delete(bs, blob);
2082 }
2083 
2084 static void
2085 blob_unmap(void)
2086 {
2087 	struct spdk_blob_store *bs = g_bs;
2088 	struct spdk_blob *blob;
2089 	struct spdk_io_channel *channel;
2090 	struct spdk_blob_opts opts;
2091 	uint8_t payload[BLOCKLEN];
2092 	uint32_t first_data_cluster = FIRST_DATA_CLUSTER(bs);
2093 	int i;
2094 
2095 	channel = spdk_bs_alloc_io_channel(bs);
2096 	CU_ASSERT(channel != NULL);
2097 
2098 	ut_spdk_blob_opts_init(&opts);
2099 	opts.num_clusters = 10;
2100 
2101 	blob = ut_blob_create_and_open(bs, &opts);
2102 
2103 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2104 	poll_threads();
2105 	CU_ASSERT(g_bserrno == 0);
2106 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2107 
2108 	memset(payload, 0, sizeof(payload));
2109 	payload[0] = 0xFF;
2110 
2111 	/*
2112 	 * Set first byte of every cluster to 0xFF.
2113 	 */
2114 	for (i = 0; i < 10; i++) {
2115 		g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] = 0xFF;
2116 	}
2117 
2118 	/* Confirm writes */
2119 	for (i = 0; i < 10; i++) {
2120 		payload[0] = 0;
2121 		spdk_blob_io_read(blob, channel, &payload, i * SPDK_BLOB_OPTS_CLUSTER_SZ / BLOCKLEN, 1,
2122 				  blob_op_complete, NULL);
2123 		poll_threads();
2124 		CU_ASSERT(g_bserrno == 0);
2125 		CU_ASSERT(payload[0] == 0xFF);
2126 	}
2127 
2128 	/* Mark some clusters as unallocated */
2129 	blob->active.clusters[1] = 0;
2130 	blob->active.clusters[2] = 0;
2131 	blob->active.clusters[3] = 0;
2132 	blob->active.clusters[6] = 0;
2133 	blob->active.clusters[8] = 0;
2134 	blob->active.num_allocated_clusters -= 5;
2135 
2136 	/* Unmap clusters by resizing to 0 */
2137 	spdk_blob_resize(blob, 0, blob_op_complete, NULL);
2138 	poll_threads();
2139 	CU_ASSERT(g_bserrno == 0);
2140 
2141 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2142 	poll_threads();
2143 	CU_ASSERT(g_bserrno == 0);
2144 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
2145 
2146 	/* Confirm that only 'allocated' clusters were unmapped */
2147 	for (i = 0; i < 10; i++) {
2148 		switch (i) {
2149 		case 1:
2150 		case 2:
2151 		case 3:
2152 		case 6:
2153 		case 8:
2154 			CU_ASSERT(g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0xFF);
2155 			break;
2156 		default:
2157 			CU_ASSERT(g_dev_buffer[(first_data_cluster + i) * SPDK_BLOB_OPTS_CLUSTER_SZ] == 0);
2158 			break;
2159 		}
2160 	}
2161 
2162 	spdk_bs_free_io_channel(channel);
2163 	poll_threads();
2164 
2165 	ut_blob_close_and_delete(bs, blob);
2166 }
2167 
2168 static void
2169 blob_iter(void)
2170 {
2171 	struct spdk_blob_store *bs = g_bs;
2172 	struct spdk_blob *blob;
2173 	spdk_blob_id blobid;
2174 	struct spdk_blob_opts blob_opts;
2175 
2176 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2177 	poll_threads();
2178 	CU_ASSERT(g_blob == NULL);
2179 	CU_ASSERT(g_bserrno == -ENOENT);
2180 
2181 	ut_spdk_blob_opts_init(&blob_opts);
2182 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2183 	poll_threads();
2184 	CU_ASSERT(g_bserrno == 0);
2185 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2186 	blobid = g_blobid;
2187 
2188 	spdk_bs_iter_first(bs, blob_op_with_handle_complete, NULL);
2189 	poll_threads();
2190 	CU_ASSERT(g_blob != NULL);
2191 	CU_ASSERT(g_bserrno == 0);
2192 	blob = g_blob;
2193 	CU_ASSERT(spdk_blob_get_id(blob) == blobid);
2194 
2195 	spdk_bs_iter_next(bs, blob, blob_op_with_handle_complete, NULL);
2196 	poll_threads();
2197 	CU_ASSERT(g_blob == NULL);
2198 	CU_ASSERT(g_bserrno == -ENOENT);
2199 }
2200 
2201 static void
2202 blob_xattr(void)
2203 {
2204 	struct spdk_blob_store *bs = g_bs;
2205 	struct spdk_blob *blob = g_blob;
2206 	spdk_blob_id blobid = spdk_blob_get_id(blob);
2207 	uint64_t length;
2208 	int rc;
2209 	const char *name1, *name2;
2210 	const void *value;
2211 	size_t value_len;
2212 	struct spdk_xattr_names *names;
2213 
2214 	/* Test that set_xattr fails if md_ro flag is set. */
2215 	blob->md_ro = true;
2216 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2217 	CU_ASSERT(rc == -EPERM);
2218 
2219 	blob->md_ro = false;
2220 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2221 	CU_ASSERT(rc == 0);
2222 
2223 	length = 2345;
2224 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2225 	CU_ASSERT(rc == 0);
2226 
2227 	/* Overwrite "length" xattr. */
2228 	length = 3456;
2229 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2230 	CU_ASSERT(rc == 0);
2231 
2232 	/* get_xattr should still work even if md_ro flag is set. */
2233 	value = NULL;
2234 	blob->md_ro = true;
2235 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2236 	CU_ASSERT(rc == 0);
2237 	SPDK_CU_ASSERT_FATAL(value != NULL);
2238 	CU_ASSERT(*(uint64_t *)value == length);
2239 	CU_ASSERT(value_len == 8);
2240 	blob->md_ro = false;
2241 
2242 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2243 	CU_ASSERT(rc == -ENOENT);
2244 
2245 	names = NULL;
2246 	rc = spdk_blob_get_xattr_names(blob, &names);
2247 	CU_ASSERT(rc == 0);
2248 	SPDK_CU_ASSERT_FATAL(names != NULL);
2249 	CU_ASSERT(spdk_xattr_names_get_count(names) == 2);
2250 	name1 = spdk_xattr_names_get_name(names, 0);
2251 	SPDK_CU_ASSERT_FATAL(name1 != NULL);
2252 	CU_ASSERT(!strcmp(name1, "name") || !strcmp(name1, "length"));
2253 	name2 = spdk_xattr_names_get_name(names, 1);
2254 	SPDK_CU_ASSERT_FATAL(name2 != NULL);
2255 	CU_ASSERT(!strcmp(name2, "name") || !strcmp(name2, "length"));
2256 	CU_ASSERT(strcmp(name1, name2));
2257 	spdk_xattr_names_free(names);
2258 
2259 	/* Confirm that remove_xattr fails if md_ro is set to true. */
2260 	blob->md_ro = true;
2261 	rc = spdk_blob_remove_xattr(blob, "name");
2262 	CU_ASSERT(rc == -EPERM);
2263 
2264 	blob->md_ro = false;
2265 	rc = spdk_blob_remove_xattr(blob, "name");
2266 	CU_ASSERT(rc == 0);
2267 
2268 	rc = spdk_blob_remove_xattr(blob, "foobar");
2269 	CU_ASSERT(rc == -ENOENT);
2270 
2271 	/* Set internal xattr */
2272 	length = 7898;
2273 	rc = blob_set_xattr(blob, "internal", &length, sizeof(length), true);
2274 	CU_ASSERT(rc == 0);
2275 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2276 	CU_ASSERT(rc == 0);
2277 	CU_ASSERT(*(uint64_t *)value == length);
2278 	/* try to get public xattr with same name */
2279 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2280 	CU_ASSERT(rc != 0);
2281 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, false);
2282 	CU_ASSERT(rc != 0);
2283 	/* Check if SPDK_BLOB_INTERNAL_XATTR is set */
2284 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) ==
2285 		  SPDK_BLOB_INTERNAL_XATTR);
2286 
2287 	spdk_blob_close(blob, blob_op_complete, NULL);
2288 	poll_threads();
2289 
2290 	/* Check if xattrs are persisted */
2291 	ut_bs_reload(&bs, NULL);
2292 
2293 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2294 	poll_threads();
2295 	CU_ASSERT(g_bserrno == 0);
2296 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2297 	blob = g_blob;
2298 
2299 	rc = blob_get_xattr_value(blob, "internal", &value, &value_len, true);
2300 	CU_ASSERT(rc == 0);
2301 	CU_ASSERT(*(uint64_t *)value == length);
2302 
2303 	/* try to get internal xattr through public call */
2304 	rc = spdk_blob_get_xattr_value(blob, "internal", &value, &value_len);
2305 	CU_ASSERT(rc != 0);
2306 
2307 	rc = blob_remove_xattr(blob, "internal", true);
2308 	CU_ASSERT(rc == 0);
2309 
2310 	CU_ASSERT((blob->invalid_flags & SPDK_BLOB_INTERNAL_XATTR) == 0);
2311 }
2312 
2313 static void
2314 blob_parse_md(void)
2315 {
2316 	struct spdk_blob_store *bs = g_bs;
2317 	struct spdk_blob *blob;
2318 	int rc;
2319 	uint32_t used_pages;
2320 	size_t xattr_length;
2321 	char *xattr;
2322 
2323 	used_pages = spdk_bit_array_count_set(bs->used_md_pages);
2324 	blob = ut_blob_create_and_open(bs, NULL);
2325 
2326 	/* Create large extent to force more than 1 page of metadata. */
2327 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
2328 		       strlen("large_xattr");
2329 	xattr = calloc(xattr_length, sizeof(char));
2330 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
2331 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
2332 	free(xattr);
2333 	SPDK_CU_ASSERT_FATAL(rc == 0);
2334 
2335 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2336 	poll_threads();
2337 
2338 	/* Delete the blob and verify that number of pages returned to before its creation. */
2339 	SPDK_CU_ASSERT_FATAL(used_pages != spdk_bit_array_count_set(bs->used_md_pages));
2340 	ut_blob_close_and_delete(bs, blob);
2341 	SPDK_CU_ASSERT_FATAL(used_pages == spdk_bit_array_count_set(bs->used_md_pages));
2342 }
2343 
2344 static void
2345 bs_load(void)
2346 {
2347 	struct spdk_blob_store *bs;
2348 	struct spdk_bs_dev *dev;
2349 	spdk_blob_id blobid;
2350 	struct spdk_blob *blob;
2351 	struct spdk_bs_super_block *super_block;
2352 	uint64_t length;
2353 	int rc;
2354 	const void *value;
2355 	size_t value_len;
2356 	struct spdk_bs_opts opts;
2357 	struct spdk_blob_opts blob_opts;
2358 
2359 	dev = init_dev();
2360 	spdk_bs_opts_init(&opts, sizeof(opts));
2361 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2362 
2363 	/* Initialize a new blob store */
2364 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2365 	poll_threads();
2366 	CU_ASSERT(g_bserrno == 0);
2367 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2368 	bs = g_bs;
2369 
2370 	/* Try to open a blobid that does not exist */
2371 	spdk_bs_open_blob(bs, 0, blob_op_with_handle_complete, NULL);
2372 	poll_threads();
2373 	CU_ASSERT(g_bserrno == -ENOENT);
2374 	CU_ASSERT(g_blob == NULL);
2375 
2376 	/* Create a blob */
2377 	blob = ut_blob_create_and_open(bs, NULL);
2378 	blobid = spdk_blob_get_id(blob);
2379 
2380 	/* Try again to open valid blob but without the upper bit set */
2381 	spdk_bs_open_blob(bs, blobid & 0xFFFFFFFF, blob_op_with_handle_complete, NULL);
2382 	poll_threads();
2383 	CU_ASSERT(g_bserrno == -ENOENT);
2384 	CU_ASSERT(g_blob == NULL);
2385 
2386 	/* Set some xattrs */
2387 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
2388 	CU_ASSERT(rc == 0);
2389 
2390 	length = 2345;
2391 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
2392 	CU_ASSERT(rc == 0);
2393 
2394 	/* Resize the blob */
2395 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
2396 	poll_threads();
2397 	CU_ASSERT(g_bserrno == 0);
2398 
2399 	spdk_blob_close(blob, blob_op_complete, NULL);
2400 	poll_threads();
2401 	CU_ASSERT(g_bserrno == 0);
2402 	blob = NULL;
2403 	g_blob = NULL;
2404 	g_blobid = SPDK_BLOBID_INVALID;
2405 
2406 	/* Unload the blob store */
2407 	spdk_bs_unload(bs, bs_op_complete, NULL);
2408 	poll_threads();
2409 	CU_ASSERT(g_bserrno == 0);
2410 	g_bs = NULL;
2411 	g_blob = NULL;
2412 	g_blobid = 0;
2413 
2414 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2415 	CU_ASSERT(super_block->clean == 1);
2416 
2417 	/* Load should fail for device with an unsupported blocklen */
2418 	dev = init_dev();
2419 	dev->blocklen = g_phys_blocklen * 2;
2420 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
2421 	poll_threads();
2422 	CU_ASSERT(g_bserrno == -EINVAL);
2423 
2424 	/* Load should when max_md_ops is set to zero */
2425 	dev = init_dev();
2426 	spdk_bs_opts_init(&opts, sizeof(opts));
2427 	opts.max_md_ops = 0;
2428 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2429 	poll_threads();
2430 	CU_ASSERT(g_bserrno == -EINVAL);
2431 
2432 	/* Load should when max_channel_ops is set to zero */
2433 	dev = init_dev();
2434 	spdk_bs_opts_init(&opts, sizeof(opts));
2435 	opts.max_channel_ops = 0;
2436 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2437 	poll_threads();
2438 	CU_ASSERT(g_bserrno == -EINVAL);
2439 
2440 	/* Load an existing blob store */
2441 	dev = init_dev();
2442 	spdk_bs_opts_init(&opts, sizeof(opts));
2443 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2444 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2445 	poll_threads();
2446 	CU_ASSERT(g_bserrno == 0);
2447 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2448 	bs = g_bs;
2449 
2450 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2451 	CU_ASSERT(super_block->clean == 1);
2452 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2453 
2454 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2455 	poll_threads();
2456 	CU_ASSERT(g_bserrno == 0);
2457 	CU_ASSERT(g_blob != NULL);
2458 	blob = g_blob;
2459 
2460 	/* Verify that blobstore is marked dirty after first metadata sync */
2461 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2462 	CU_ASSERT(super_block->clean == 1);
2463 
2464 	/* Get the xattrs */
2465 	value = NULL;
2466 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
2467 	CU_ASSERT(rc == 0);
2468 	SPDK_CU_ASSERT_FATAL(value != NULL);
2469 	CU_ASSERT(*(uint64_t *)value == length);
2470 	CU_ASSERT(value_len == 8);
2471 
2472 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
2473 	CU_ASSERT(rc == -ENOENT);
2474 
2475 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
2476 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
2477 
2478 	spdk_blob_close(blob, blob_op_complete, NULL);
2479 	poll_threads();
2480 	CU_ASSERT(g_bserrno == 0);
2481 	blob = NULL;
2482 	g_blob = NULL;
2483 
2484 	spdk_bs_unload(bs, bs_op_complete, NULL);
2485 	poll_threads();
2486 	CU_ASSERT(g_bserrno == 0);
2487 	g_bs = NULL;
2488 
2489 	/* Load should fail: bdev size < saved size */
2490 	dev = init_dev();
2491 	dev->blockcnt /= 2;
2492 
2493 	spdk_bs_opts_init(&opts, sizeof(opts));
2494 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2495 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2496 	poll_threads();
2497 
2498 	CU_ASSERT(g_bserrno == -EILSEQ);
2499 
2500 	/* Load should succeed: bdev size > saved size */
2501 	dev = init_dev();
2502 	dev->blockcnt *= 4;
2503 
2504 	spdk_bs_opts_init(&opts, sizeof(opts));
2505 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2506 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2507 	poll_threads();
2508 	CU_ASSERT(g_bserrno == 0);
2509 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2510 	bs = g_bs;
2511 
2512 	CU_ASSERT(g_bserrno == 0);
2513 	spdk_bs_unload(bs, bs_op_complete, NULL);
2514 	poll_threads();
2515 
2516 
2517 	/* Test compatibility mode */
2518 
2519 	dev = init_dev();
2520 	super_block->size = 0;
2521 	super_block->crc = blob_md_page_calc_crc(super_block);
2522 
2523 	spdk_bs_opts_init(&opts, sizeof(opts));
2524 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2525 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2526 	poll_threads();
2527 	CU_ASSERT(g_bserrno == 0);
2528 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2529 	bs = g_bs;
2530 
2531 	/* Create a blob */
2532 	ut_spdk_blob_opts_init(&blob_opts);
2533 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
2534 	poll_threads();
2535 	CU_ASSERT(g_bserrno == 0);
2536 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2537 
2538 	/* Blobstore should update number of blocks in super_block */
2539 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2540 	CU_ASSERT(super_block->clean == 0);
2541 
2542 	spdk_bs_unload(bs, bs_op_complete, NULL);
2543 	poll_threads();
2544 	CU_ASSERT(g_bserrno == 0);
2545 	CU_ASSERT(super_block->clean == 1);
2546 	g_bs = NULL;
2547 
2548 }
2549 
2550 static void
2551 bs_load_pending_removal(void)
2552 {
2553 	struct spdk_blob_store *bs = g_bs;
2554 	struct spdk_blob_opts opts;
2555 	struct spdk_blob *blob, *snapshot;
2556 	spdk_blob_id blobid, snapshotid;
2557 	const void *value;
2558 	size_t value_len;
2559 	int rc;
2560 
2561 	/* Create blob */
2562 	ut_spdk_blob_opts_init(&opts);
2563 	opts.num_clusters = 10;
2564 
2565 	blob = ut_blob_create_and_open(bs, &opts);
2566 	blobid = spdk_blob_get_id(blob);
2567 
2568 	/* Create snapshot */
2569 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2570 	poll_threads();
2571 	CU_ASSERT(g_bserrno == 0);
2572 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2573 	snapshotid = g_blobid;
2574 
2575 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2576 	poll_threads();
2577 	CU_ASSERT(g_bserrno == 0);
2578 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2579 	snapshot = g_blob;
2580 
2581 	/* Set SNAPSHOT_PENDING_REMOVAL xattr */
2582 	snapshot->md_ro = false;
2583 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2584 	CU_ASSERT(rc == 0);
2585 	snapshot->md_ro = true;
2586 
2587 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2588 	poll_threads();
2589 	CU_ASSERT(g_bserrno == 0);
2590 
2591 	spdk_blob_close(blob, blob_op_complete, NULL);
2592 	poll_threads();
2593 	CU_ASSERT(g_bserrno == 0);
2594 
2595 	/* Reload blobstore */
2596 	ut_bs_reload(&bs, NULL);
2597 
2598 	/* Snapshot should not be removed as blob is still pointing to it */
2599 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2600 	poll_threads();
2601 	CU_ASSERT(g_bserrno == 0);
2602 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2603 	snapshot = g_blob;
2604 
2605 	/* SNAPSHOT_PENDING_REMOVAL xattr should be removed during load */
2606 	rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
2607 	CU_ASSERT(rc != 0);
2608 
2609 	/* Set SNAPSHOT_PENDING_REMOVAL xattr again */
2610 	snapshot->md_ro = false;
2611 	rc = blob_set_xattr(snapshot, SNAPSHOT_PENDING_REMOVAL, &blobid, sizeof(spdk_blob_id), true);
2612 	CU_ASSERT(rc == 0);
2613 	snapshot->md_ro = true;
2614 
2615 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2616 	poll_threads();
2617 	CU_ASSERT(g_bserrno == 0);
2618 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2619 	blob = g_blob;
2620 
2621 	/* Remove parent_id from blob by removing BLOB_SNAPSHOT xattr */
2622 	blob_remove_xattr(blob, BLOB_SNAPSHOT, true);
2623 
2624 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
2625 	poll_threads();
2626 	CU_ASSERT(g_bserrno == 0);
2627 
2628 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2629 	poll_threads();
2630 	CU_ASSERT(g_bserrno == 0);
2631 
2632 	spdk_blob_close(blob, blob_op_complete, NULL);
2633 	poll_threads();
2634 	CU_ASSERT(g_bserrno == 0);
2635 
2636 	/* Reload blobstore */
2637 	ut_bs_reload(&bs, NULL);
2638 
2639 	/* Snapshot should be removed as blob is not pointing to it anymore */
2640 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2641 	poll_threads();
2642 	CU_ASSERT(g_bserrno != 0);
2643 }
2644 
2645 static void
2646 bs_load_custom_cluster_size(void)
2647 {
2648 	struct spdk_blob_store *bs;
2649 	struct spdk_bs_dev *dev;
2650 	struct spdk_bs_super_block *super_block;
2651 	struct spdk_bs_opts opts;
2652 	uint32_t custom_cluster_size = 4194304; /* 4MiB */
2653 	uint32_t cluster_sz;
2654 	uint64_t total_clusters;
2655 
2656 	dev = init_dev();
2657 	spdk_bs_opts_init(&opts, sizeof(opts));
2658 	opts.cluster_sz = custom_cluster_size;
2659 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2660 
2661 	/* Initialize a new blob store */
2662 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2663 	poll_threads();
2664 	CU_ASSERT(g_bserrno == 0);
2665 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2666 	bs = g_bs;
2667 	cluster_sz = bs->cluster_sz;
2668 	total_clusters = bs->total_clusters;
2669 
2670 	/* Unload the blob store */
2671 	spdk_bs_unload(bs, bs_op_complete, NULL);
2672 	poll_threads();
2673 	CU_ASSERT(g_bserrno == 0);
2674 	g_bs = NULL;
2675 	g_blob = NULL;
2676 	g_blobid = 0;
2677 
2678 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2679 	CU_ASSERT(super_block->clean == 1);
2680 
2681 	/* Load an existing blob store */
2682 	dev = init_dev();
2683 	spdk_bs_opts_init(&opts, sizeof(opts));
2684 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2685 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2686 	poll_threads();
2687 	CU_ASSERT(g_bserrno == 0);
2688 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2689 	bs = g_bs;
2690 	/* Compare cluster size and number to one after initialization */
2691 	CU_ASSERT(cluster_sz == bs->cluster_sz);
2692 	CU_ASSERT(total_clusters == bs->total_clusters);
2693 
2694 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2695 	CU_ASSERT(super_block->clean == 1);
2696 	CU_ASSERT(super_block->size == dev->blockcnt * dev->blocklen);
2697 
2698 	spdk_bs_unload(bs, bs_op_complete, NULL);
2699 	poll_threads();
2700 	CU_ASSERT(g_bserrno == 0);
2701 	CU_ASSERT(super_block->clean == 1);
2702 	g_bs = NULL;
2703 }
2704 
2705 static void
2706 bs_load_after_failed_grow(void)
2707 {
2708 	struct spdk_blob_store *bs;
2709 	struct spdk_bs_dev *dev;
2710 	struct spdk_bs_super_block *super_block;
2711 	struct spdk_bs_opts opts;
2712 	struct spdk_bs_md_mask *mask;
2713 	struct spdk_blob_opts blob_opts;
2714 	struct spdk_blob *blob, *snapshot;
2715 	spdk_blob_id blobid, snapshotid;
2716 	uint64_t total_data_clusters;
2717 
2718 	dev = init_dev();
2719 	spdk_bs_opts_init(&opts, sizeof(opts));
2720 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2721 	/*
2722 	 * The bdev_size is 64M, cluster_sz is 1M, so there are 64 clusters. The
2723 	 * blobstore will create 64 md pages by default. We set num_md_pages to 128,
2724 	 * thus the blobstore could grow to the double size.
2725 	 */
2726 	opts.num_md_pages = 128;
2727 
2728 	/* Initialize a new blob store */
2729 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2730 	poll_threads();
2731 	CU_ASSERT(g_bserrno == 0);
2732 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2733 	bs = g_bs;
2734 
2735 	/* Create blob */
2736 	ut_spdk_blob_opts_init(&blob_opts);
2737 	blob_opts.num_clusters = 10;
2738 
2739 	blob = ut_blob_create_and_open(bs, &blob_opts);
2740 	blobid = spdk_blob_get_id(blob);
2741 
2742 	/* Create snapshot */
2743 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
2744 	poll_threads();
2745 	CU_ASSERT(g_bserrno == 0);
2746 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
2747 	snapshotid = g_blobid;
2748 
2749 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2750 	poll_threads();
2751 	CU_ASSERT(g_bserrno == 0);
2752 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2753 	snapshot = g_blob;
2754 
2755 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2756 	poll_threads();
2757 	CU_ASSERT(g_bserrno == 0);
2758 
2759 	spdk_blob_close(blob, blob_op_complete, NULL);
2760 	poll_threads();
2761 	CU_ASSERT(g_bserrno == 0);
2762 
2763 	total_data_clusters = bs->total_data_clusters;
2764 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2765 
2766 	/* Unload the blob store */
2767 	spdk_bs_unload(bs, bs_op_complete, NULL);
2768 	poll_threads();
2769 	CU_ASSERT(g_bserrno == 0);
2770 	g_bs = NULL;
2771 	g_blob = NULL;
2772 	g_blobid = 0;
2773 
2774 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2775 	CU_ASSERT(super_block->clean == 1);
2776 
2777 	mask = (struct spdk_bs_md_mask *)(g_dev_buffer + super_block->used_cluster_mask_start *
2778 					  g_phys_blocklen);
2779 	CU_ASSERT(mask->type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
2780 	CU_ASSERT(mask->length == super_block->size / super_block->cluster_size);
2781 
2782 	/*
2783 	 * We change the mask->length to emulate this scenario: A spdk_bs_grow failed after it changed
2784 	 * the used_cluster bitmap length, but it didn't change the super block yet.
2785 	 */
2786 	mask->length *= 2;
2787 
2788 	/* Load an existing blob store */
2789 	dev = init_dev();
2790 	dev->blockcnt *= 2;
2791 	spdk_bs_opts_init(&opts, sizeof(opts));
2792 	opts.clear_method = BS_CLEAR_WITH_NONE;
2793 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2794 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2795 	poll_threads();
2796 	CU_ASSERT(g_bserrno == 0);
2797 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2798 	bs = g_bs;
2799 
2800 	/* Check the capacity is the same as before */
2801 	CU_ASSERT(bs->total_data_clusters == total_data_clusters);
2802 	CU_ASSERT(bs->num_free_clusters + 10 == total_data_clusters);
2803 
2804 	/* Check the blob and the snapshot are still available */
2805 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
2806 	poll_threads();
2807 	CU_ASSERT(g_bserrno == 0);
2808 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2809 	blob = g_blob;
2810 
2811 	spdk_blob_close(blob, blob_op_complete, NULL);
2812 	poll_threads();
2813 	CU_ASSERT(g_bserrno == 0);
2814 
2815 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
2816 	poll_threads();
2817 	CU_ASSERT(g_bserrno == 0);
2818 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
2819 	snapshot = g_blob;
2820 
2821 	spdk_blob_close(snapshot, blob_op_complete, NULL);
2822 	poll_threads();
2823 	CU_ASSERT(g_bserrno == 0);
2824 
2825 	spdk_bs_unload(bs, bs_op_complete, NULL);
2826 	poll_threads();
2827 	CU_ASSERT(g_bserrno == 0);
2828 	CU_ASSERT(super_block->clean == 1);
2829 	g_bs = NULL;
2830 }
2831 
2832 static void
2833 bs_type(void)
2834 {
2835 	struct spdk_blob_store *bs;
2836 	struct spdk_bs_dev *dev;
2837 	struct spdk_bs_opts opts;
2838 
2839 	dev = init_dev();
2840 	spdk_bs_opts_init(&opts, sizeof(opts));
2841 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2842 
2843 	/* Initialize a new blob store */
2844 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2845 	poll_threads();
2846 	CU_ASSERT(g_bserrno == 0);
2847 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2848 	bs = g_bs;
2849 
2850 	/* Unload the blob store */
2851 	spdk_bs_unload(bs, bs_op_complete, NULL);
2852 	poll_threads();
2853 	CU_ASSERT(g_bserrno == 0);
2854 	g_bs = NULL;
2855 	g_blob = NULL;
2856 	g_blobid = 0;
2857 
2858 	/* Load non existing blobstore type */
2859 	dev = init_dev();
2860 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2861 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2862 	poll_threads();
2863 	CU_ASSERT(g_bserrno != 0);
2864 
2865 	/* Load with empty blobstore type */
2866 	dev = init_dev();
2867 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2868 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2869 	poll_threads();
2870 	CU_ASSERT(g_bserrno == 0);
2871 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2872 	bs = g_bs;
2873 
2874 	spdk_bs_unload(bs, bs_op_complete, NULL);
2875 	poll_threads();
2876 	CU_ASSERT(g_bserrno == 0);
2877 	g_bs = NULL;
2878 
2879 	/* Initialize a new blob store with empty bstype */
2880 	dev = init_dev();
2881 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2882 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
2883 	poll_threads();
2884 	CU_ASSERT(g_bserrno == 0);
2885 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2886 	bs = g_bs;
2887 
2888 	spdk_bs_unload(bs, bs_op_complete, NULL);
2889 	poll_threads();
2890 	CU_ASSERT(g_bserrno == 0);
2891 	g_bs = NULL;
2892 
2893 	/* Load non existing blobstore type */
2894 	dev = init_dev();
2895 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "NONEXISTING");
2896 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2897 	poll_threads();
2898 	CU_ASSERT(g_bserrno != 0);
2899 
2900 	/* Load with empty blobstore type */
2901 	dev = init_dev();
2902 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2903 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2904 	poll_threads();
2905 	CU_ASSERT(g_bserrno == 0);
2906 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2907 	bs = g_bs;
2908 
2909 	spdk_bs_unload(bs, bs_op_complete, NULL);
2910 	poll_threads();
2911 	CU_ASSERT(g_bserrno == 0);
2912 	g_bs = NULL;
2913 }
2914 
2915 static void
2916 bs_super_block(void)
2917 {
2918 	struct spdk_blob_store *bs;
2919 	struct spdk_bs_dev *dev;
2920 	struct spdk_bs_super_block *super_block;
2921 	struct spdk_bs_opts opts;
2922 	struct spdk_bs_super_block_ver1 super_block_v1;
2923 
2924 	dev = init_dev();
2925 	spdk_bs_opts_init(&opts, sizeof(opts));
2926 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2927 
2928 	/* Initialize a new blob store */
2929 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
2930 	poll_threads();
2931 	CU_ASSERT(g_bserrno == 0);
2932 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2933 	bs = g_bs;
2934 
2935 	/* Unload the blob store */
2936 	spdk_bs_unload(bs, bs_op_complete, NULL);
2937 	poll_threads();
2938 	CU_ASSERT(g_bserrno == 0);
2939 	g_bs = NULL;
2940 	g_blob = NULL;
2941 	g_blobid = 0;
2942 
2943 	/* Load an existing blob store with version newer than supported */
2944 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
2945 	super_block->version++;
2946 
2947 	dev = init_dev();
2948 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2949 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2950 	poll_threads();
2951 	CU_ASSERT(g_bserrno != 0);
2952 
2953 	/* Create a new blob store with super block version 1 */
2954 	dev = init_dev();
2955 	super_block_v1.version = 1;
2956 	memcpy(super_block_v1.signature, "SPDKBLOB", sizeof(super_block_v1.signature));
2957 	super_block_v1.length = 0x1000;
2958 	super_block_v1.clean = 1;
2959 	super_block_v1.super_blob = 0xFFFFFFFFFFFFFFFF;
2960 	super_block_v1.cluster_size = 0x100000;
2961 	super_block_v1.used_page_mask_start = 0x01;
2962 	super_block_v1.used_page_mask_len = 0x01;
2963 	super_block_v1.used_cluster_mask_start = 0x02;
2964 	super_block_v1.used_cluster_mask_len = 0x01;
2965 	super_block_v1.md_start = 0x03;
2966 	super_block_v1.md_len = 0x40;
2967 	memset(super_block_v1.reserved, 0, 4036);
2968 	super_block_v1.crc = blob_md_page_calc_crc(&super_block_v1);
2969 	memcpy(g_dev_buffer, &super_block_v1, sizeof(struct spdk_bs_super_block_ver1));
2970 
2971 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
2972 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
2973 	poll_threads();
2974 	CU_ASSERT(g_bserrno == 0);
2975 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
2976 	bs = g_bs;
2977 
2978 	spdk_bs_unload(bs, bs_op_complete, NULL);
2979 	poll_threads();
2980 	CU_ASSERT(g_bserrno == 0);
2981 	g_bs = NULL;
2982 }
2983 
2984 static void
2985 bs_test_recover_cluster_count(void)
2986 {
2987 	struct spdk_blob_store *bs;
2988 	struct spdk_bs_dev *dev;
2989 	struct spdk_bs_super_block super_block;
2990 	struct spdk_bs_opts opts;
2991 
2992 	dev = init_dev();
2993 	spdk_bs_opts_init(&opts, sizeof(opts));
2994 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
2995 
2996 	super_block.version = 3;
2997 	memcpy(super_block.signature, "SPDKBLOB", sizeof(super_block.signature));
2998 	super_block.length = 0x1000;
2999 	super_block.clean = 0;
3000 	super_block.super_blob = 0xFFFFFFFFFFFFFFFF;
3001 	super_block.cluster_size = g_phys_blocklen;
3002 	super_block.used_page_mask_start = 0x01;
3003 	super_block.used_page_mask_len = 0x01;
3004 	super_block.used_cluster_mask_start = 0x02;
3005 	super_block.used_cluster_mask_len = 0x01;
3006 	super_block.used_blobid_mask_start = 0x03;
3007 	super_block.used_blobid_mask_len = 0x01;
3008 	super_block.md_start = 0x04;
3009 	super_block.md_len = 0x40;
3010 	memset(super_block.bstype.bstype, 0, sizeof(super_block.bstype.bstype));
3011 	super_block.size = dev->blockcnt * dev->blocklen;
3012 	super_block.io_unit_size = 0x1000;
3013 	memset(super_block.reserved, 0, 4000);
3014 	super_block.crc = blob_md_page_calc_crc(&super_block);
3015 	memcpy(g_dev_buffer, &super_block, sizeof(struct spdk_bs_super_block));
3016 
3017 	memset(opts.bstype.bstype, 0, sizeof(opts.bstype.bstype));
3018 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
3019 	poll_threads();
3020 	CU_ASSERT(g_bserrno == 0);
3021 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3022 	bs = g_bs;
3023 	CU_ASSERT(bs->num_free_clusters == bs->total_clusters - (super_block.md_start +
3024 			super_block.md_len));
3025 
3026 	spdk_bs_unload(bs, bs_op_complete, NULL);
3027 	poll_threads();
3028 	CU_ASSERT(g_bserrno == 0);
3029 	g_bs = NULL;
3030 }
3031 
3032 static void
3033 bs_grow_live_size(uint64_t new_blockcnt)
3034 {
3035 	struct spdk_blob_store *bs;
3036 	struct spdk_bs_dev *dev;
3037 	struct spdk_bs_super_block super_block;
3038 	struct spdk_bs_opts opts;
3039 	struct spdk_bs_md_mask mask;
3040 	uint64_t bdev_size;
3041 	uint64_t total_data_clusters;
3042 
3043 	/*
3044 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3045 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3046 	 * will write beyond the end of g_dev_buffer.
3047 	 */
3048 	dev = init_dev();
3049 	spdk_bs_opts_init(&opts, sizeof(opts));
3050 	opts.clear_method = BS_CLEAR_WITH_NONE;
3051 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3052 	poll_threads();
3053 	CU_ASSERT(g_bserrno == 0);
3054 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3055 	bs = g_bs;
3056 
3057 	/*
3058 	 * Set the dev size according to the new_blockcnt,
3059 	 * then the blobstore will adjust the metadata according to the new size.
3060 	 */
3061 	dev->blockcnt = new_blockcnt;
3062 	bdev_size = dev->blockcnt * dev->blocklen;
3063 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3064 	poll_threads();
3065 	CU_ASSERT(g_bserrno == 0);
3066 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3067 
3068 	/* Make sure the super block is updated. */
3069 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3070 	CU_ASSERT(super_block.size == bdev_size);
3071 	CU_ASSERT(super_block.clean == 0);
3072 	/* The used_cluster mask is not written out until first spdk_bs_unload. */
3073 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3074 	       sizeof(struct spdk_bs_md_mask));
3075 	CU_ASSERT(mask.type == 0);
3076 	CU_ASSERT(mask.length == 0);
3077 
3078 	spdk_bs_unload(bs, bs_op_complete, NULL);
3079 	poll_threads();
3080 	CU_ASSERT(g_bserrno == 0);
3081 	g_bs = NULL;
3082 
3083 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3084 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3085 	CU_ASSERT(super_block.size == bdev_size);
3086 	CU_ASSERT(super_block.clean == 1);
3087 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3088 	       sizeof(struct spdk_bs_md_mask));
3089 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3090 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3091 
3092 	/* Load blobstore and check the cluster counts again. */
3093 	dev = init_dev();
3094 	dev->blockcnt = new_blockcnt;
3095 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3096 	poll_threads();
3097 	CU_ASSERT(g_bserrno == 0);
3098 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3099 	CU_ASSERT(super_block.clean == 1);
3100 	bs = g_bs;
3101 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3102 
3103 	/* Perform grow without change in size, expected pass. */
3104 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3105 	poll_threads();
3106 	CU_ASSERT(g_bserrno == 0);
3107 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3108 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3109 	CU_ASSERT(super_block.size == bdev_size);
3110 	CU_ASSERT(super_block.clean == 1);
3111 
3112 	spdk_bs_unload(bs, bs_op_complete, NULL);
3113 	poll_threads();
3114 	CU_ASSERT(g_bserrno == 0);
3115 	g_bs = NULL;
3116 }
3117 
3118 static void
3119 bs_grow_live(void)
3120 {
3121 	/* No change expected */
3122 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT);
3123 
3124 	/* Size slightly increased, but not enough to increase cluster count */
3125 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT + 1);
3126 
3127 	/* Size doubled, increasing the cluster count */
3128 	bs_grow_live_size(DEV_BUFFER_BLOCKCNT * 2);
3129 }
3130 
3131 static void
3132 bs_grow_live_no_space(void)
3133 {
3134 	struct spdk_blob_store *bs;
3135 	struct spdk_bs_dev *dev;
3136 	struct spdk_bs_super_block super_block;
3137 	struct spdk_bs_opts opts;
3138 	struct spdk_bs_md_mask mask;
3139 	uint64_t bdev_size_init;
3140 	uint64_t total_data_clusters, max_clusters;
3141 
3142 	/*
3143 	 * Further down the test the dev size will be larger than the g_dev_buffer size,
3144 	 * so we set clear_method to NONE, or the blobstore will try to clear the dev and
3145 	 * will write beyond the end of g_dev_buffer.
3146 	 */
3147 	dev = init_dev();
3148 	bdev_size_init = dev->blockcnt * dev->blocklen;
3149 	spdk_bs_opts_init(&opts, sizeof(opts));
3150 	opts.clear_method = BS_CLEAR_WITH_NONE;
3151 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3152 	poll_threads();
3153 	CU_ASSERT(g_bserrno == 0);
3154 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3155 	bs = g_bs;
3156 	total_data_clusters = spdk_bs_total_data_cluster_count(bs);
3157 
3158 	/*
3159 	 * The default dev size is 64M, here we set the dev size to 32M,
3160 	 * expecting EILSEQ due to super_block validation and no change in blobstore.
3161 	 */
3162 	dev->blockcnt = (32L * 1024L * 1024L) / dev->blocklen;
3163 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3164 	poll_threads();
3165 	/* This error code comes from bs_super_validate() */
3166 	CU_ASSERT(g_bserrno == -EILSEQ);
3167 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3168 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3169 	CU_ASSERT(super_block.size == bdev_size_init);
3170 
3171 	/*
3172 	 * Blobstore in this test has only space for single md_page for used_clusters,
3173 	 * which fits 1 bit per cluster minus the md header.
3174 	 *
3175 	 * Dev size is increased to exceed the reserved space for the used_cluster_mask
3176 	 * in the metadata, expecting ENOSPC and no change in blobstore.
3177 	 */
3178 	max_clusters = (spdk_bs_get_page_size(bs) - sizeof(struct spdk_bs_md_mask)) * 8;
3179 	max_clusters += 1;
3180 	dev->blockcnt = (max_clusters * spdk_bs_get_cluster_size(bs)) / dev->blocklen;
3181 	spdk_bs_grow_live(bs, bs_op_complete, NULL);
3182 	poll_threads();
3183 	CU_ASSERT(g_bserrno == -ENOSPC);
3184 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3185 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3186 	CU_ASSERT(super_block.size == bdev_size_init);
3187 
3188 	/*
3189 	 * No change should have occurred for the duration of the test,
3190 	 * unload blobstore and check metadata.
3191 	 */
3192 	spdk_bs_unload(bs, bs_op_complete, NULL);
3193 	poll_threads();
3194 	CU_ASSERT(g_bserrno == 0);
3195 	g_bs = NULL;
3196 
3197 	/* Make sure all metadata is correct, super block and used_cluster mask. */
3198 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3199 	CU_ASSERT(super_block.size == bdev_size_init);
3200 	CU_ASSERT(super_block.clean == 1);
3201 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3202 	       sizeof(struct spdk_bs_md_mask));
3203 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3204 	CU_ASSERT(mask.length == bdev_size_init / (1 * 1024 * 1024));
3205 
3206 	/* Load blobstore and check the cluster counts again. */
3207 	dev = init_dev();
3208 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3209 	poll_threads();
3210 	CU_ASSERT(g_bserrno == 0);
3211 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3212 	bs = g_bs;
3213 	CU_ASSERT(total_data_clusters == spdk_bs_total_data_cluster_count(bs));
3214 
3215 	spdk_bs_unload(bs, bs_op_complete, NULL);
3216 	poll_threads();
3217 	CU_ASSERT(g_bserrno == 0);
3218 	g_bs = NULL;
3219 }
3220 
3221 static void
3222 bs_test_grow(void)
3223 {
3224 	struct spdk_blob_store *bs;
3225 	struct spdk_bs_dev *dev;
3226 	struct spdk_bs_super_block super_block;
3227 	struct spdk_bs_opts opts;
3228 	struct spdk_bs_md_mask mask;
3229 	uint64_t bdev_size;
3230 
3231 	dev = init_dev();
3232 	bdev_size = dev->blockcnt * dev->blocklen;
3233 	spdk_bs_opts_init(&opts, sizeof(opts));
3234 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3235 	poll_threads();
3236 	CU_ASSERT(g_bserrno == 0);
3237 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3238 	bs = g_bs;
3239 
3240 	spdk_bs_unload(bs, bs_op_complete, NULL);
3241 	poll_threads();
3242 	CU_ASSERT(g_bserrno == 0);
3243 	g_bs = NULL;
3244 
3245 	/*
3246 	 * To make sure all the metadata are updated to the disk,
3247 	 * we check the g_dev_buffer after spdk_bs_unload.
3248 	 */
3249 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3250 	CU_ASSERT(super_block.size == bdev_size);
3251 
3252 	/*
3253 	 * Make sure the used_cluster mask is correct.
3254 	 */
3255 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3256 	       sizeof(struct spdk_bs_md_mask));
3257 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3258 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3259 
3260 	/*
3261 	 * The default dev size is 64M, here we set the dev size to 128M,
3262 	 * then the blobstore will adjust the metadata according to the new size.
3263 	 * The dev size is larger than the g_dev_buffer size, so we set clear_method
3264 	 * to NONE, or the blobstore will try to clear the dev and will write beyond
3265 	 * the end of g_dev_buffer.
3266 	 */
3267 	dev = init_dev();
3268 	dev->blockcnt = (128L * 1024L * 1024L) / dev->blocklen;
3269 	bdev_size = dev->blockcnt * dev->blocklen;
3270 	spdk_bs_opts_init(&opts, sizeof(opts));
3271 	opts.clear_method = BS_CLEAR_WITH_NONE;
3272 	spdk_bs_grow(dev, &opts, bs_op_with_handle_complete, NULL);
3273 	poll_threads();
3274 	CU_ASSERT(g_bserrno == 0);
3275 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3276 	bs = g_bs;
3277 
3278 	/*
3279 	 * After spdk_bs_grow, all metadata are updated to the disk.
3280 	 * So we can check g_dev_buffer now.
3281 	 */
3282 	memcpy(&super_block, g_dev_buffer, sizeof(struct spdk_bs_super_block));
3283 	CU_ASSERT(super_block.size == bdev_size);
3284 
3285 	/*
3286 	 * Make sure the used_cluster mask has been updated according to the bdev size
3287 	 */
3288 	memcpy(&mask, g_dev_buffer + super_block.used_cluster_mask_start * g_phys_blocklen,
3289 	       sizeof(struct spdk_bs_md_mask));
3290 	CU_ASSERT(mask.type == SPDK_MD_MASK_TYPE_USED_CLUSTERS);
3291 	CU_ASSERT(mask.length == bdev_size / (1 * 1024 * 1024));
3292 
3293 	spdk_bs_unload(bs, bs_op_complete, NULL);
3294 	poll_threads();
3295 	CU_ASSERT(g_bserrno == 0);
3296 	g_bs = NULL;
3297 }
3298 
3299 /*
3300  * Create a blobstore and then unload it.
3301  */
3302 static void
3303 bs_unload(void)
3304 {
3305 	struct spdk_blob_store *bs = g_bs;
3306 	struct spdk_blob *blob;
3307 
3308 	/* Create a blob and open it. */
3309 	blob = ut_blob_create_and_open(bs, NULL);
3310 
3311 	/* Try to unload blobstore, should fail with open blob */
3312 	g_bserrno = -1;
3313 	spdk_bs_unload(bs, bs_op_complete, NULL);
3314 	poll_threads();
3315 	CU_ASSERT(g_bserrno == -EBUSY);
3316 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3317 
3318 	/* Close the blob, then successfully unload blobstore */
3319 	g_bserrno = -1;
3320 	spdk_blob_close(blob, blob_op_complete, NULL);
3321 	poll_threads();
3322 	CU_ASSERT(g_bserrno == 0);
3323 }
3324 
3325 /*
3326  * Create a blobstore with a cluster size different than the default, and ensure it is
3327  *  persisted.
3328  */
3329 static void
3330 bs_cluster_sz(void)
3331 {
3332 	struct spdk_blob_store *bs;
3333 	struct spdk_bs_dev *dev;
3334 	struct spdk_bs_opts opts;
3335 	uint32_t cluster_sz;
3336 
3337 	/* Set cluster size to zero */
3338 	dev = init_dev();
3339 	spdk_bs_opts_init(&opts, sizeof(opts));
3340 	opts.cluster_sz = 0;
3341 
3342 	/* Initialize a new blob store */
3343 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3344 	poll_threads();
3345 	CU_ASSERT(g_bserrno == -EINVAL);
3346 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3347 
3348 	/*
3349 	 * Set cluster size to blobstore page size,
3350 	 * to work it is required to be at least twice the blobstore page size.
3351 	 */
3352 	dev = init_dev();
3353 	spdk_bs_opts_init(&opts, sizeof(opts));
3354 	opts.cluster_sz = g_phys_blocklen;
3355 
3356 	/* Initialize a new blob store */
3357 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3358 	poll_threads();
3359 	CU_ASSERT(g_bserrno == -ENOMEM);
3360 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3361 
3362 	/*
3363 	 * Set cluster size to lower than page size,
3364 	 * to work it is required to be at least twice the blobstore page size.
3365 	 */
3366 	dev = init_dev();
3367 	spdk_bs_opts_init(&opts, sizeof(opts));
3368 	opts.cluster_sz = g_phys_blocklen - 1;
3369 
3370 	/* Initialize a new blob store */
3371 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3372 	poll_threads();
3373 	CU_ASSERT(g_bserrno == -EINVAL);
3374 	SPDK_CU_ASSERT_FATAL(g_bs == NULL);
3375 
3376 	/* Set cluster size to twice the default */
3377 	dev = init_dev();
3378 	spdk_bs_opts_init(&opts, sizeof(opts));
3379 	opts.cluster_sz *= 2;
3380 	cluster_sz = opts.cluster_sz;
3381 
3382 	/* Initialize a new blob store */
3383 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3384 	poll_threads();
3385 	CU_ASSERT(g_bserrno == 0);
3386 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3387 	bs = g_bs;
3388 
3389 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3390 
3391 	ut_bs_reload(&bs, &opts);
3392 
3393 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3394 
3395 	spdk_bs_unload(bs, bs_op_complete, NULL);
3396 	poll_threads();
3397 	CU_ASSERT(g_bserrno == 0);
3398 	g_bs = NULL;
3399 }
3400 
3401 /*
3402  * Create a blobstore, reload it and ensure total usable cluster count
3403  *  stays the same.
3404  */
3405 static void
3406 bs_usable_clusters(void)
3407 {
3408 	struct spdk_blob_store *bs = g_bs;
3409 	struct spdk_blob *blob;
3410 	uint32_t clusters;
3411 	int i;
3412 
3413 
3414 	clusters = spdk_bs_total_data_cluster_count(bs);
3415 
3416 	ut_bs_reload(&bs, NULL);
3417 
3418 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3419 
3420 	/* Create and resize blobs to make sure that usable cluster count won't change */
3421 	for (i = 0; i < 4; i++) {
3422 		g_bserrno = -1;
3423 		g_blobid = SPDK_BLOBID_INVALID;
3424 		blob = ut_blob_create_and_open(bs, NULL);
3425 
3426 		spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3427 		poll_threads();
3428 		CU_ASSERT(g_bserrno == 0);
3429 
3430 		g_bserrno = -1;
3431 		spdk_blob_close(blob, blob_op_complete, NULL);
3432 		poll_threads();
3433 		CU_ASSERT(g_bserrno == 0);
3434 
3435 		CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3436 	}
3437 
3438 	/* Reload the blob store to make sure that nothing changed */
3439 	ut_bs_reload(&bs, NULL);
3440 
3441 	CU_ASSERT(spdk_bs_total_data_cluster_count(bs) == clusters);
3442 }
3443 
3444 /*
3445  * Test resizing of the metadata blob.  This requires creating enough blobs
3446  *  so that one cluster is not enough to fit the metadata for those blobs.
3447  *  To induce this condition to happen more quickly, we reduce the cluster
3448  *  size to 16KB, which means only 4 4KB blob metadata pages can fit.
3449  */
3450 static void
3451 bs_resize_md(void)
3452 {
3453 	struct spdk_blob_store *bs;
3454 	const int CLUSTER_PAGE_COUNT = 4;
3455 	const int NUM_BLOBS = CLUSTER_PAGE_COUNT * 4;
3456 	struct spdk_bs_dev *dev;
3457 	struct spdk_bs_opts opts;
3458 	struct spdk_blob *blob;
3459 	struct spdk_blob_opts blob_opts;
3460 	uint32_t cluster_sz;
3461 	spdk_blob_id blobids[NUM_BLOBS];
3462 	int i;
3463 
3464 
3465 	dev = init_dev();
3466 	spdk_bs_opts_init(&opts, sizeof(opts));
3467 	opts.cluster_sz = CLUSTER_PAGE_COUNT * g_phys_blocklen;
3468 	cluster_sz = opts.cluster_sz;
3469 
3470 	/* Initialize a new blob store */
3471 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3472 	poll_threads();
3473 	CU_ASSERT(g_bserrno == 0);
3474 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3475 	bs = g_bs;
3476 
3477 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3478 
3479 	ut_spdk_blob_opts_init(&blob_opts);
3480 
3481 	for (i = 0; i < NUM_BLOBS; i++) {
3482 		g_bserrno = -1;
3483 		g_blobid = SPDK_BLOBID_INVALID;
3484 		spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3485 		poll_threads();
3486 		CU_ASSERT(g_bserrno == 0);
3487 		CU_ASSERT(g_blobid !=  SPDK_BLOBID_INVALID);
3488 		blobids[i] = g_blobid;
3489 	}
3490 
3491 	ut_bs_reload(&bs, &opts);
3492 
3493 	CU_ASSERT(spdk_bs_get_cluster_size(bs) == cluster_sz);
3494 
3495 	for (i = 0; i < NUM_BLOBS; i++) {
3496 		g_bserrno = -1;
3497 		g_blob = NULL;
3498 		spdk_bs_open_blob(bs, blobids[i], blob_op_with_handle_complete, NULL);
3499 		poll_threads();
3500 		CU_ASSERT(g_bserrno == 0);
3501 		CU_ASSERT(g_blob !=  NULL);
3502 		blob = g_blob;
3503 		g_bserrno = -1;
3504 		spdk_blob_close(blob, blob_op_complete, NULL);
3505 		poll_threads();
3506 		CU_ASSERT(g_bserrno == 0);
3507 	}
3508 
3509 	spdk_bs_unload(bs, bs_op_complete, NULL);
3510 	poll_threads();
3511 	CU_ASSERT(g_bserrno == 0);
3512 	g_bs = NULL;
3513 }
3514 
3515 static void
3516 bs_destroy(void)
3517 {
3518 	struct spdk_blob_store *bs;
3519 	struct spdk_bs_dev *dev;
3520 
3521 	/* Initialize a new blob store */
3522 	dev = init_dev();
3523 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3524 	poll_threads();
3525 	CU_ASSERT(g_bserrno == 0);
3526 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3527 	bs = g_bs;
3528 
3529 	/* Destroy the blob store */
3530 	g_bserrno = -1;
3531 	spdk_bs_destroy(bs, bs_op_complete, NULL);
3532 	poll_threads();
3533 	CU_ASSERT(g_bserrno == 0);
3534 
3535 	/* Loading an non-existent blob store should fail. */
3536 	g_bs = NULL;
3537 	dev = init_dev();
3538 
3539 	g_bserrno = 0;
3540 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3541 	poll_threads();
3542 	CU_ASSERT(g_bserrno != 0);
3543 }
3544 
3545 /* Try to hit all of the corner cases associated with serializing
3546  * a blob to disk
3547  */
3548 static void
3549 blob_serialize_test(void)
3550 {
3551 	struct spdk_bs_dev *dev;
3552 	struct spdk_bs_opts opts;
3553 	struct spdk_blob_store *bs;
3554 	spdk_blob_id blobid[2];
3555 	struct spdk_blob *blob[2];
3556 	uint64_t i;
3557 	char *value;
3558 	int rc;
3559 
3560 	dev = init_dev();
3561 
3562 	/* Initialize a new blobstore with very small clusters */
3563 	spdk_bs_opts_init(&opts, sizeof(opts));
3564 	opts.cluster_sz = dev->blocklen * 8;
3565 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
3566 	poll_threads();
3567 	CU_ASSERT(g_bserrno == 0);
3568 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3569 	bs = g_bs;
3570 
3571 	/* Create and open two blobs */
3572 	for (i = 0; i < 2; i++) {
3573 		blob[i] = ut_blob_create_and_open(bs, NULL);
3574 		blobid[i] = spdk_blob_get_id(blob[i]);
3575 
3576 		/* Set a fairly large xattr on both blobs to eat up
3577 		 * metadata space
3578 		 */
3579 		value = calloc(dev->blocklen - 64, sizeof(char));
3580 		SPDK_CU_ASSERT_FATAL(value != NULL);
3581 		memset(value, i, dev->blocklen / 2);
3582 		rc = spdk_blob_set_xattr(blob[i], "name", value, dev->blocklen - 64);
3583 		CU_ASSERT(rc == 0);
3584 		free(value);
3585 	}
3586 
3587 	/* Resize the blobs, alternating 1 cluster at a time.
3588 	 * This thwarts run length encoding and will cause spill
3589 	 * over of the extents.
3590 	 */
3591 	for (i = 0; i < 6; i++) {
3592 		spdk_blob_resize(blob[i % 2], (i / 2) + 1, blob_op_complete, NULL);
3593 		poll_threads();
3594 		CU_ASSERT(g_bserrno == 0);
3595 	}
3596 
3597 	for (i = 0; i < 2; i++) {
3598 		spdk_blob_sync_md(blob[i], blob_op_complete, NULL);
3599 		poll_threads();
3600 		CU_ASSERT(g_bserrno == 0);
3601 	}
3602 
3603 	/* Close the blobs */
3604 	for (i = 0; i < 2; i++) {
3605 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3606 		poll_threads();
3607 		CU_ASSERT(g_bserrno == 0);
3608 	}
3609 
3610 	ut_bs_reload(&bs, &opts);
3611 
3612 	for (i = 0; i < 2; i++) {
3613 		blob[i] = NULL;
3614 
3615 		spdk_bs_open_blob(bs, blobid[i], blob_op_with_handle_complete, NULL);
3616 		poll_threads();
3617 		CU_ASSERT(g_bserrno == 0);
3618 		CU_ASSERT(g_blob != NULL);
3619 		blob[i] = g_blob;
3620 
3621 		CU_ASSERT(spdk_blob_get_num_clusters(blob[i]) == 3);
3622 
3623 		spdk_blob_close(blob[i], blob_op_complete, NULL);
3624 		poll_threads();
3625 		CU_ASSERT(g_bserrno == 0);
3626 	}
3627 
3628 	spdk_bs_unload(bs, bs_op_complete, NULL);
3629 	poll_threads();
3630 	CU_ASSERT(g_bserrno == 0);
3631 	g_bs = NULL;
3632 }
3633 
3634 static void
3635 blob_crc(void)
3636 {
3637 	struct spdk_blob_store *bs = g_bs;
3638 	struct spdk_blob *blob;
3639 	spdk_blob_id blobid;
3640 	uint32_t page_num;
3641 	int index;
3642 	struct spdk_blob_md_page *page;
3643 
3644 	blob = ut_blob_create_and_open(bs, NULL);
3645 	blobid = spdk_blob_get_id(blob);
3646 
3647 	spdk_blob_close(blob, blob_op_complete, NULL);
3648 	poll_threads();
3649 	CU_ASSERT(g_bserrno == 0);
3650 
3651 	page_num = bs_blobid_to_page(blobid);
3652 	index = g_phys_blocklen * (bs->md_start + page_num);
3653 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3654 	page->crc = 0;
3655 
3656 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
3657 	poll_threads();
3658 	CU_ASSERT(g_bserrno == -EINVAL);
3659 	CU_ASSERT(g_blob == NULL);
3660 	g_bserrno = 0;
3661 
3662 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
3663 	poll_threads();
3664 	CU_ASSERT(g_bserrno == -EINVAL);
3665 }
3666 
3667 static void
3668 super_block_crc(void)
3669 {
3670 	struct spdk_blob_store *bs;
3671 	struct spdk_bs_dev *dev;
3672 	struct spdk_bs_super_block *super_block;
3673 
3674 	dev = init_dev();
3675 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
3676 	poll_threads();
3677 	CU_ASSERT(g_bserrno == 0);
3678 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
3679 	bs = g_bs;
3680 
3681 	spdk_bs_unload(bs, bs_op_complete, NULL);
3682 	poll_threads();
3683 	CU_ASSERT(g_bserrno == 0);
3684 	g_bs = NULL;
3685 
3686 	super_block = (struct spdk_bs_super_block *)g_dev_buffer;
3687 	super_block->crc = 0;
3688 	dev = init_dev();
3689 
3690 	/* Load an existing blob store */
3691 	g_bserrno = 0;
3692 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
3693 	poll_threads();
3694 	CU_ASSERT(g_bserrno == -EILSEQ);
3695 }
3696 
3697 /* For blob dirty shutdown test case we do the following sub-test cases:
3698  * 1 Initialize new blob store and create 1 super blob with some xattrs, then we
3699  *   dirty shutdown and reload the blob store and verify the xattrs.
3700  * 2 Resize the blob from 10 clusters to 20 clusters and then dirty shutdown,
3701  *   reload the blob store and verify the clusters number.
3702  * 3 Create the second blob and then dirty shutdown, reload the blob store
3703  *   and verify the second blob.
3704  * 4 Delete the second blob and then dirty shutdown, reload the blob store
3705  *   and verify the second blob is invalid.
3706  * 5 Create the second blob again and also create the third blob, modify the
3707  *   md of second blob which makes the md invalid, and then dirty shutdown,
3708  *   reload the blob store verify the second blob, it should invalid and also
3709  *   verify the third blob, it should correct.
3710  */
3711 static void
3712 blob_dirty_shutdown(void)
3713 {
3714 	int rc;
3715 	int index;
3716 	struct spdk_blob_store *bs = g_bs;
3717 	spdk_blob_id blobid1, blobid2, blobid3;
3718 	struct spdk_blob *blob = g_blob;
3719 	uint64_t length;
3720 	uint64_t free_clusters;
3721 	const void *value;
3722 	size_t value_len;
3723 	uint32_t page_num;
3724 	struct spdk_blob_md_page *page;
3725 	struct spdk_blob_opts blob_opts;
3726 
3727 	/* Create first blob */
3728 	blobid1 = spdk_blob_get_id(blob);
3729 
3730 	/* Set some xattrs */
3731 	rc = spdk_blob_set_xattr(blob, "name", "log.txt", strlen("log.txt") + 1);
3732 	CU_ASSERT(rc == 0);
3733 
3734 	length = 2345;
3735 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3736 	CU_ASSERT(rc == 0);
3737 
3738 	/* Put xattr that fits exactly single page.
3739 	 * This results in adding additional pages to MD.
3740 	 * First is flags and smaller xattr, second the large xattr,
3741 	 * third are just the extents.
3742 	 */
3743 	size_t xattr_length = 4072 - sizeof(struct spdk_blob_md_descriptor_xattr) -
3744 			      strlen("large_xattr");
3745 	char *xattr = calloc(xattr_length, sizeof(char));
3746 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
3747 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
3748 	free(xattr);
3749 	SPDK_CU_ASSERT_FATAL(rc == 0);
3750 
3751 	/* Resize the blob */
3752 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3753 	poll_threads();
3754 	CU_ASSERT(g_bserrno == 0);
3755 
3756 	/* Set the blob as the super blob */
3757 	spdk_bs_set_super(bs, blobid1, blob_op_complete, NULL);
3758 	poll_threads();
3759 	CU_ASSERT(g_bserrno == 0);
3760 
3761 	free_clusters = spdk_bs_free_cluster_count(bs);
3762 
3763 	spdk_blob_close(blob, blob_op_complete, NULL);
3764 	poll_threads();
3765 	CU_ASSERT(g_bserrno == 0);
3766 	blob = NULL;
3767 	g_blob = NULL;
3768 	g_blobid = SPDK_BLOBID_INVALID;
3769 
3770 	ut_bs_dirty_load(&bs, NULL);
3771 
3772 	/* Get the super blob */
3773 	spdk_bs_get_super(bs, blob_op_with_id_complete, NULL);
3774 	poll_threads();
3775 	CU_ASSERT(g_bserrno == 0);
3776 	CU_ASSERT(blobid1 == g_blobid);
3777 
3778 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3779 	poll_threads();
3780 	CU_ASSERT(g_bserrno == 0);
3781 	CU_ASSERT(g_blob != NULL);
3782 	blob = g_blob;
3783 
3784 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3785 
3786 	/* Get the xattrs */
3787 	value = NULL;
3788 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3789 	CU_ASSERT(rc == 0);
3790 	SPDK_CU_ASSERT_FATAL(value != NULL);
3791 	CU_ASSERT(*(uint64_t *)value == length);
3792 	CU_ASSERT(value_len == 8);
3793 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3794 
3795 	/* Resize the blob */
3796 	spdk_blob_resize(blob, 20, blob_op_complete, NULL);
3797 	poll_threads();
3798 	CU_ASSERT(g_bserrno == 0);
3799 
3800 	free_clusters = spdk_bs_free_cluster_count(bs);
3801 
3802 	spdk_blob_close(blob, blob_op_complete, NULL);
3803 	poll_threads();
3804 	CU_ASSERT(g_bserrno == 0);
3805 	blob = NULL;
3806 	g_blob = NULL;
3807 	g_blobid = SPDK_BLOBID_INVALID;
3808 
3809 	ut_bs_dirty_load(&bs, NULL);
3810 
3811 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3812 	poll_threads();
3813 	CU_ASSERT(g_bserrno == 0);
3814 	CU_ASSERT(g_blob != NULL);
3815 	blob = g_blob;
3816 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 20);
3817 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3818 
3819 	spdk_blob_close(blob, blob_op_complete, NULL);
3820 	poll_threads();
3821 	CU_ASSERT(g_bserrno == 0);
3822 	blob = NULL;
3823 	g_blob = NULL;
3824 	g_blobid = SPDK_BLOBID_INVALID;
3825 
3826 	/* Create second blob */
3827 	blob = ut_blob_create_and_open(bs, NULL);
3828 	blobid2 = spdk_blob_get_id(blob);
3829 
3830 	/* Set some xattrs */
3831 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3832 	CU_ASSERT(rc == 0);
3833 
3834 	length = 5432;
3835 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3836 	CU_ASSERT(rc == 0);
3837 
3838 	/* Resize the blob */
3839 	spdk_blob_resize(blob, 10, blob_op_complete, NULL);
3840 	poll_threads();
3841 	CU_ASSERT(g_bserrno == 0);
3842 
3843 	free_clusters = spdk_bs_free_cluster_count(bs);
3844 
3845 	spdk_blob_close(blob, blob_op_complete, NULL);
3846 	poll_threads();
3847 	CU_ASSERT(g_bserrno == 0);
3848 	blob = NULL;
3849 	g_blob = NULL;
3850 	g_blobid = SPDK_BLOBID_INVALID;
3851 
3852 	ut_bs_dirty_load(&bs, NULL);
3853 
3854 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3855 	poll_threads();
3856 	CU_ASSERT(g_bserrno == 0);
3857 	CU_ASSERT(g_blob != NULL);
3858 	blob = g_blob;
3859 
3860 	/* Get the xattrs */
3861 	value = NULL;
3862 	rc = spdk_blob_get_xattr_value(blob, "length", &value, &value_len);
3863 	CU_ASSERT(rc == 0);
3864 	SPDK_CU_ASSERT_FATAL(value != NULL);
3865 	CU_ASSERT(*(uint64_t *)value == length);
3866 	CU_ASSERT(value_len == 8);
3867 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
3868 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3869 
3870 	ut_blob_close_and_delete(bs, blob);
3871 
3872 	free_clusters = spdk_bs_free_cluster_count(bs);
3873 
3874 	ut_bs_dirty_load(&bs, NULL);
3875 
3876 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3877 	poll_threads();
3878 	CU_ASSERT(g_bserrno != 0);
3879 	CU_ASSERT(g_blob == NULL);
3880 
3881 	spdk_bs_open_blob(bs, blobid1, blob_op_with_handle_complete, NULL);
3882 	poll_threads();
3883 	CU_ASSERT(g_bserrno == 0);
3884 	CU_ASSERT(g_blob != NULL);
3885 	blob = g_blob;
3886 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3887 	spdk_blob_close(blob, blob_op_complete, NULL);
3888 	poll_threads();
3889 	CU_ASSERT(g_bserrno == 0);
3890 
3891 	ut_bs_reload(&bs, NULL);
3892 
3893 	/* Create second blob */
3894 	ut_spdk_blob_opts_init(&blob_opts);
3895 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3896 	poll_threads();
3897 	CU_ASSERT(g_bserrno == 0);
3898 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3899 	blobid2 = g_blobid;
3900 
3901 	/* Create third blob */
3902 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
3903 	poll_threads();
3904 	CU_ASSERT(g_bserrno == 0);
3905 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
3906 	blobid3 = g_blobid;
3907 
3908 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3909 	poll_threads();
3910 	CU_ASSERT(g_bserrno == 0);
3911 	CU_ASSERT(g_blob != NULL);
3912 	blob = g_blob;
3913 
3914 	/* Set some xattrs for second blob */
3915 	rc = spdk_blob_set_xattr(blob, "name", "log1.txt", strlen("log1.txt") + 1);
3916 	CU_ASSERT(rc == 0);
3917 
3918 	length = 5432;
3919 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3920 	CU_ASSERT(rc == 0);
3921 
3922 	spdk_blob_close(blob, blob_op_complete, NULL);
3923 	poll_threads();
3924 	CU_ASSERT(g_bserrno == 0);
3925 	blob = NULL;
3926 	g_blob = NULL;
3927 	g_blobid = SPDK_BLOBID_INVALID;
3928 
3929 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3930 	poll_threads();
3931 	CU_ASSERT(g_bserrno == 0);
3932 	CU_ASSERT(g_blob != NULL);
3933 	blob = g_blob;
3934 
3935 	/* Set some xattrs for third blob */
3936 	rc = spdk_blob_set_xattr(blob, "name", "log2.txt", strlen("log2.txt") + 1);
3937 	CU_ASSERT(rc == 0);
3938 
3939 	length = 5432;
3940 	rc = spdk_blob_set_xattr(blob, "length", &length, sizeof(length));
3941 	CU_ASSERT(rc == 0);
3942 
3943 	spdk_blob_close(blob, blob_op_complete, NULL);
3944 	poll_threads();
3945 	CU_ASSERT(g_bserrno == 0);
3946 	blob = NULL;
3947 	g_blob = NULL;
3948 	g_blobid = SPDK_BLOBID_INVALID;
3949 
3950 	/* Mark second blob as invalid */
3951 	page_num = bs_blobid_to_page(blobid2);
3952 
3953 	index = g_phys_blocklen * (bs->md_start + page_num);
3954 	page = (struct spdk_blob_md_page *)&g_dev_buffer[index];
3955 	page->sequence_num = 1;
3956 	page->crc = blob_md_page_calc_crc(page);
3957 
3958 	free_clusters = spdk_bs_free_cluster_count(bs);
3959 
3960 	ut_bs_dirty_load(&bs, NULL);
3961 
3962 	spdk_bs_open_blob(bs, blobid2, blob_op_with_handle_complete, NULL);
3963 	poll_threads();
3964 	CU_ASSERT(g_bserrno != 0);
3965 	CU_ASSERT(g_blob == NULL);
3966 
3967 	spdk_bs_open_blob(bs, blobid3, blob_op_with_handle_complete, NULL);
3968 	poll_threads();
3969 	CU_ASSERT(g_bserrno == 0);
3970 	CU_ASSERT(g_blob != NULL);
3971 	blob = g_blob;
3972 
3973 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
3974 }
3975 
3976 static void
3977 blob_flags(void)
3978 {
3979 	struct spdk_blob_store *bs = g_bs;
3980 	spdk_blob_id blobid_invalid, blobid_data_ro, blobid_md_ro;
3981 	struct spdk_blob *blob_invalid, *blob_data_ro, *blob_md_ro;
3982 	struct spdk_blob_opts blob_opts;
3983 	int rc;
3984 
3985 	/* Create three blobs - one each for testing invalid, data_ro and md_ro flags. */
3986 	blob_invalid = ut_blob_create_and_open(bs, NULL);
3987 	blobid_invalid = spdk_blob_get_id(blob_invalid);
3988 
3989 	blob_data_ro = ut_blob_create_and_open(bs, NULL);
3990 	blobid_data_ro = spdk_blob_get_id(blob_data_ro);
3991 
3992 	ut_spdk_blob_opts_init(&blob_opts);
3993 	blob_opts.clear_method = BLOB_CLEAR_WITH_WRITE_ZEROES;
3994 	blob_md_ro = ut_blob_create_and_open(bs, &blob_opts);
3995 	blobid_md_ro = spdk_blob_get_id(blob_md_ro);
3996 	CU_ASSERT((blob_md_ro->md_ro_flags & SPDK_BLOB_MD_RO_FLAGS_MASK) == BLOB_CLEAR_WITH_WRITE_ZEROES);
3997 
3998 	/* Change the size of blob_data_ro to check if flags are serialized
3999 	 * when blob has non zero number of extents */
4000 	spdk_blob_resize(blob_data_ro, 10, blob_op_complete, NULL);
4001 	poll_threads();
4002 	CU_ASSERT(g_bserrno == 0);
4003 
4004 	/* Set the xattr to check if flags are serialized
4005 	 * when blob has non zero number of xattrs */
4006 	rc = spdk_blob_set_xattr(blob_md_ro, "name", "log.txt", strlen("log.txt") + 1);
4007 	CU_ASSERT(rc == 0);
4008 
4009 	blob_invalid->invalid_flags = (1ULL << 63);
4010 	blob_invalid->state = SPDK_BLOB_STATE_DIRTY;
4011 	blob_data_ro->data_ro_flags = (1ULL << 62);
4012 	blob_data_ro->state = SPDK_BLOB_STATE_DIRTY;
4013 	blob_md_ro->md_ro_flags = (1ULL << 61);
4014 	blob_md_ro->state = SPDK_BLOB_STATE_DIRTY;
4015 
4016 	g_bserrno = -1;
4017 	spdk_blob_sync_md(blob_invalid, blob_op_complete, NULL);
4018 	poll_threads();
4019 	CU_ASSERT(g_bserrno == 0);
4020 	g_bserrno = -1;
4021 	spdk_blob_sync_md(blob_data_ro, blob_op_complete, NULL);
4022 	poll_threads();
4023 	CU_ASSERT(g_bserrno == 0);
4024 	g_bserrno = -1;
4025 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4026 	poll_threads();
4027 	CU_ASSERT(g_bserrno == 0);
4028 
4029 	g_bserrno = -1;
4030 	spdk_blob_close(blob_invalid, blob_op_complete, NULL);
4031 	poll_threads();
4032 	CU_ASSERT(g_bserrno == 0);
4033 	blob_invalid = NULL;
4034 	g_bserrno = -1;
4035 	spdk_blob_close(blob_data_ro, blob_op_complete, NULL);
4036 	poll_threads();
4037 	CU_ASSERT(g_bserrno == 0);
4038 	blob_data_ro = NULL;
4039 	g_bserrno = -1;
4040 	spdk_blob_close(blob_md_ro, blob_op_complete, NULL);
4041 	poll_threads();
4042 	CU_ASSERT(g_bserrno == 0);
4043 	blob_md_ro = NULL;
4044 
4045 	g_blob = NULL;
4046 	g_blobid = SPDK_BLOBID_INVALID;
4047 
4048 	ut_bs_reload(&bs, NULL);
4049 
4050 	g_blob = NULL;
4051 	g_bserrno = 0;
4052 	spdk_bs_open_blob(bs, blobid_invalid, blob_op_with_handle_complete, NULL);
4053 	poll_threads();
4054 	CU_ASSERT(g_bserrno != 0);
4055 	CU_ASSERT(g_blob == NULL);
4056 
4057 	g_blob = NULL;
4058 	g_bserrno = -1;
4059 	spdk_bs_open_blob(bs, blobid_data_ro, blob_op_with_handle_complete, NULL);
4060 	poll_threads();
4061 	CU_ASSERT(g_bserrno == 0);
4062 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4063 	blob_data_ro = g_blob;
4064 	/* If an unknown data_ro flag was found, the blob should be marked both data and md read-only. */
4065 	CU_ASSERT(blob_data_ro->data_ro == true);
4066 	CU_ASSERT(blob_data_ro->md_ro == true);
4067 	CU_ASSERT(spdk_blob_get_num_clusters(blob_data_ro) == 10);
4068 
4069 	g_blob = NULL;
4070 	g_bserrno = -1;
4071 	spdk_bs_open_blob(bs, blobid_md_ro, blob_op_with_handle_complete, NULL);
4072 	poll_threads();
4073 	CU_ASSERT(g_bserrno == 0);
4074 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4075 	blob_md_ro = g_blob;
4076 	CU_ASSERT(blob_md_ro->data_ro == false);
4077 	CU_ASSERT(blob_md_ro->md_ro == true);
4078 
4079 	g_bserrno = -1;
4080 	spdk_blob_sync_md(blob_md_ro, blob_op_complete, NULL);
4081 	poll_threads();
4082 	CU_ASSERT(g_bserrno == 0);
4083 
4084 	ut_blob_close_and_delete(bs, blob_data_ro);
4085 	ut_blob_close_and_delete(bs, blob_md_ro);
4086 }
4087 
4088 static void
4089 bs_version(void)
4090 {
4091 	struct spdk_bs_super_block *super;
4092 	struct spdk_blob_store *bs = g_bs;
4093 	struct spdk_bs_dev *dev;
4094 	struct spdk_blob *blob;
4095 	struct spdk_blob_opts blob_opts;
4096 	spdk_blob_id blobid;
4097 
4098 	/* Unload the blob store */
4099 	spdk_bs_unload(bs, bs_op_complete, NULL);
4100 	poll_threads();
4101 	CU_ASSERT(g_bserrno == 0);
4102 	g_bs = NULL;
4103 
4104 	/*
4105 	 * Change the bs version on disk.  This will allow us to
4106 	 *  test that the version does not get modified automatically
4107 	 *  when loading and unloading the blobstore.
4108 	 */
4109 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
4110 	CU_ASSERT(super->version == SPDK_BS_VERSION);
4111 	CU_ASSERT(super->clean == 1);
4112 	super->version = 2;
4113 	/*
4114 	 * Version 2 metadata does not have a used blobid mask, so clear
4115 	 *  those fields in the super block and zero the corresponding
4116 	 *  region on "disk".  We will use this to ensure blob IDs are
4117 	 *  correctly reconstructed.
4118 	 */
4119 	memset(&g_dev_buffer[super->used_blobid_mask_start * SPDK_BS_PAGE_SIZE], 0,
4120 	       super->used_blobid_mask_len * SPDK_BS_PAGE_SIZE);
4121 	super->used_blobid_mask_start = 0;
4122 	super->used_blobid_mask_len = 0;
4123 	super->crc = blob_md_page_calc_crc(super);
4124 
4125 	/* Load an existing blob store */
4126 	dev = init_dev();
4127 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4128 	poll_threads();
4129 	CU_ASSERT(g_bserrno == 0);
4130 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4131 	CU_ASSERT(super->clean == 1);
4132 	bs = g_bs;
4133 
4134 	/*
4135 	 * Create a blob - just to make sure that when we unload it
4136 	 *  results in writing the super block (since metadata pages
4137 	 *  were allocated.
4138 	 */
4139 	ut_spdk_blob_opts_init(&blob_opts);
4140 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
4141 	poll_threads();
4142 	CU_ASSERT(g_bserrno == 0);
4143 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4144 	blobid = g_blobid;
4145 
4146 	/* Unload the blob store */
4147 	spdk_bs_unload(bs, bs_op_complete, NULL);
4148 	poll_threads();
4149 	CU_ASSERT(g_bserrno == 0);
4150 	g_bs = NULL;
4151 	CU_ASSERT(super->version == 2);
4152 	CU_ASSERT(super->used_blobid_mask_start == 0);
4153 	CU_ASSERT(super->used_blobid_mask_len == 0);
4154 
4155 	dev = init_dev();
4156 	spdk_bs_load(dev, NULL, bs_op_with_handle_complete, NULL);
4157 	poll_threads();
4158 	CU_ASSERT(g_bserrno == 0);
4159 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4160 	bs = g_bs;
4161 
4162 	g_blob = NULL;
4163 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4164 	poll_threads();
4165 	CU_ASSERT(g_bserrno == 0);
4166 	CU_ASSERT(g_blob != NULL);
4167 	blob = g_blob;
4168 
4169 	ut_blob_close_and_delete(bs, blob);
4170 
4171 	CU_ASSERT(super->version == 2);
4172 	CU_ASSERT(super->used_blobid_mask_start == 0);
4173 	CU_ASSERT(super->used_blobid_mask_len == 0);
4174 }
4175 
4176 static void
4177 blob_set_xattrs_test(void)
4178 {
4179 	struct spdk_blob_store *bs = g_bs;
4180 	struct spdk_blob *blob;
4181 	struct spdk_blob_opts opts;
4182 	const void *value;
4183 	size_t value_len;
4184 	char *xattr;
4185 	size_t xattr_length;
4186 	int rc;
4187 
4188 	/* Create blob with extra attributes */
4189 	ut_spdk_blob_opts_init(&opts);
4190 
4191 	opts.xattrs.names = g_xattr_names;
4192 	opts.xattrs.get_value = _get_xattr_value;
4193 	opts.xattrs.count = 3;
4194 	opts.xattrs.ctx = &g_ctx;
4195 
4196 	blob = ut_blob_create_and_open(bs, &opts);
4197 
4198 	/* Get the xattrs */
4199 	value = NULL;
4200 
4201 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[0], &value, &value_len);
4202 	CU_ASSERT(rc == 0);
4203 	SPDK_CU_ASSERT_FATAL(value != NULL);
4204 	CU_ASSERT(value_len == strlen(g_xattr_values[0]));
4205 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, g_xattr_values[0], value_len);
4206 
4207 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[1], &value, &value_len);
4208 	CU_ASSERT(rc == 0);
4209 	SPDK_CU_ASSERT_FATAL(value != NULL);
4210 	CU_ASSERT(value_len == strlen(g_xattr_values[1]));
4211 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[1], value_len);
4212 
4213 	rc = spdk_blob_get_xattr_value(blob, g_xattr_names[2], &value, &value_len);
4214 	CU_ASSERT(rc == 0);
4215 	SPDK_CU_ASSERT_FATAL(value != NULL);
4216 	CU_ASSERT(value_len == strlen(g_xattr_values[2]));
4217 	CU_ASSERT_NSTRING_EQUAL((char *)value, g_xattr_values[2], value_len);
4218 
4219 	/* Try to get non existing attribute */
4220 
4221 	rc = spdk_blob_get_xattr_value(blob, "foobar", &value, &value_len);
4222 	CU_ASSERT(rc == -ENOENT);
4223 
4224 	/* Try xattr exceeding maximum length of descriptor in single page */
4225 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
4226 		       strlen("large_xattr") + 1;
4227 	xattr = calloc(xattr_length, sizeof(char));
4228 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
4229 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
4230 	free(xattr);
4231 	SPDK_CU_ASSERT_FATAL(rc == -ENOMEM);
4232 
4233 	spdk_blob_close(blob, blob_op_complete, NULL);
4234 	poll_threads();
4235 	CU_ASSERT(g_bserrno == 0);
4236 	blob = NULL;
4237 	g_blob = NULL;
4238 	g_blobid = SPDK_BLOBID_INVALID;
4239 
4240 	/* NULL callback */
4241 	ut_spdk_blob_opts_init(&opts);
4242 	opts.xattrs.names = g_xattr_names;
4243 	opts.xattrs.get_value = NULL;
4244 	opts.xattrs.count = 1;
4245 	opts.xattrs.ctx = &g_ctx;
4246 
4247 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4248 	poll_threads();
4249 	CU_ASSERT(g_bserrno == -EINVAL);
4250 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4251 
4252 	/* NULL values */
4253 	ut_spdk_blob_opts_init(&opts);
4254 	opts.xattrs.names = g_xattr_names;
4255 	opts.xattrs.get_value = _get_xattr_value_null;
4256 	opts.xattrs.count = 1;
4257 	opts.xattrs.ctx = NULL;
4258 
4259 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
4260 	poll_threads();
4261 	CU_ASSERT(g_bserrno == -EINVAL);
4262 }
4263 
4264 static void
4265 blob_thin_prov_alloc(void)
4266 {
4267 	struct spdk_blob_store *bs = g_bs;
4268 	struct spdk_blob *blob;
4269 	struct spdk_blob_opts opts;
4270 	spdk_blob_id blobid;
4271 	uint64_t free_clusters;
4272 
4273 	free_clusters = spdk_bs_free_cluster_count(bs);
4274 
4275 	/* Set blob as thin provisioned */
4276 	ut_spdk_blob_opts_init(&opts);
4277 	opts.thin_provision = true;
4278 
4279 	blob = ut_blob_create_and_open(bs, &opts);
4280 	blobid = spdk_blob_get_id(blob);
4281 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4282 
4283 	CU_ASSERT(blob->active.num_clusters == 0);
4284 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 0);
4285 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4286 
4287 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4288 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4289 	poll_threads();
4290 	CU_ASSERT(g_bserrno == 0);
4291 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4292 	CU_ASSERT(blob->active.num_clusters == 5);
4293 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
4294 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4295 
4296 	/* Grow it to 1TB - still unallocated */
4297 	spdk_blob_resize(blob, 262144, blob_op_complete, NULL);
4298 	poll_threads();
4299 	CU_ASSERT(g_bserrno == 0);
4300 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4301 	CU_ASSERT(blob->active.num_clusters == 262144);
4302 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4303 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4304 
4305 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4306 	poll_threads();
4307 	CU_ASSERT(g_bserrno == 0);
4308 	/* Sync must not change anything */
4309 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4310 	CU_ASSERT(blob->active.num_clusters == 262144);
4311 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 262144);
4312 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4313 	/* Since clusters are not allocated,
4314 	 * number of metadata pages is expected to be minimal.
4315 	 */
4316 	CU_ASSERT(blob->active.num_pages == 1);
4317 
4318 	/* Shrink the blob to 3 clusters - still unallocated */
4319 	spdk_blob_resize(blob, 3, blob_op_complete, NULL);
4320 	poll_threads();
4321 	CU_ASSERT(g_bserrno == 0);
4322 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4323 	CU_ASSERT(blob->active.num_clusters == 3);
4324 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4325 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4326 
4327 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4328 	poll_threads();
4329 	CU_ASSERT(g_bserrno == 0);
4330 	/* Sync must not change anything */
4331 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4332 	CU_ASSERT(blob->active.num_clusters == 3);
4333 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 3);
4334 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4335 
4336 	spdk_blob_close(blob, blob_op_complete, NULL);
4337 	poll_threads();
4338 	CU_ASSERT(g_bserrno == 0);
4339 
4340 	ut_bs_reload(&bs, NULL);
4341 
4342 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4343 	poll_threads();
4344 	CU_ASSERT(g_bserrno == 0);
4345 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4346 	blob = g_blob;
4347 
4348 	/* Check that clusters allocation and size is still the same */
4349 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4350 	CU_ASSERT(blob->active.num_clusters == 3);
4351 
4352 	ut_blob_close_and_delete(bs, blob);
4353 }
4354 
4355 static void
4356 blob_insert_cluster_msg_test(void)
4357 {
4358 	struct spdk_blob_store *bs = g_bs;
4359 	struct spdk_blob *blob;
4360 	struct spdk_blob_opts opts;
4361 	/* For now, even if md_page_size is > 4KB, we still only use the first
4362 	 * 4KB of it. The rest is left unused. Future changes may allow using the
4363 	 * rest of the md_page, but that will require more extensive changes since
4364 	 * then the struct spdk_blob_md_page cannot be used directly (since some
4365 	 * fields such as crc would have variable placement in the struct).
4366 	 */
4367 	struct {
4368 		struct spdk_blob_md_page page;
4369 		uint8_t pad[DEV_MAX_PHYS_BLOCKLEN - sizeof(struct spdk_blob_md_page)];
4370 	} md = {};
4371 	spdk_blob_id blobid;
4372 	uint64_t free_clusters;
4373 	uint64_t new_cluster = 0;
4374 	uint32_t cluster_num = 3;
4375 	uint32_t extent_page = 0;
4376 
4377 	free_clusters = spdk_bs_free_cluster_count(bs);
4378 
4379 	/* Set blob as thin provisioned */
4380 	ut_spdk_blob_opts_init(&opts);
4381 	opts.thin_provision = true;
4382 	opts.num_clusters = 4;
4383 
4384 	blob = ut_blob_create_and_open(bs, &opts);
4385 	blobid = spdk_blob_get_id(blob);
4386 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4387 
4388 	CU_ASSERT(blob->active.num_clusters == 4);
4389 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 4);
4390 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4391 
4392 	/* Specify cluster_num to allocate and new_cluster will be returned to insert on md_thread.
4393 	 * This is to simulate behaviour when cluster is allocated after blob creation.
4394 	 * Such as _spdk_bs_allocate_and_copy_cluster(). */
4395 	spdk_spin_lock(&bs->used_lock);
4396 	bs_allocate_cluster(blob, cluster_num, &new_cluster, &extent_page, false);
4397 	CU_ASSERT(blob->active.clusters[cluster_num] == 0);
4398 	spdk_spin_unlock(&bs->used_lock);
4399 
4400 	blob_insert_cluster_on_md_thread(blob, cluster_num, new_cluster, extent_page, &md.page,
4401 					 blob_op_complete, NULL);
4402 	poll_threads();
4403 
4404 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4405 
4406 	spdk_blob_close(blob, blob_op_complete, NULL);
4407 	poll_threads();
4408 	CU_ASSERT(g_bserrno == 0);
4409 
4410 	ut_bs_reload(&bs, NULL);
4411 
4412 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
4413 	poll_threads();
4414 	CU_ASSERT(g_bserrno == 0);
4415 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4416 	blob = g_blob;
4417 
4418 	CU_ASSERT(blob->active.clusters[cluster_num] != 0);
4419 
4420 	ut_blob_close_and_delete(bs, blob);
4421 }
4422 
4423 static void
4424 blob_thin_prov_rw(void)
4425 {
4426 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
4427 	struct spdk_blob_store *bs = g_bs;
4428 	struct spdk_blob *blob, *blob_id0;
4429 	struct spdk_io_channel *channel, *channel_thread1;
4430 	struct spdk_blob_opts opts;
4431 	uint64_t free_clusters;
4432 	uint64_t io_unit_size;
4433 	uint8_t payload_read[10 * BLOCKLEN];
4434 	uint8_t payload_write[10 * BLOCKLEN];
4435 	uint64_t write_bytes;
4436 	uint64_t read_bytes;
4437 	uint64_t expected_bytes;
4438 
4439 	free_clusters = spdk_bs_free_cluster_count(bs);
4440 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4441 
4442 	channel = spdk_bs_alloc_io_channel(bs);
4443 	CU_ASSERT(channel != NULL);
4444 
4445 	ut_spdk_blob_opts_init(&opts);
4446 	opts.thin_provision = true;
4447 
4448 	/* Create and delete blob at md page 0, so that next md page allocation
4449 	 * for extent will use that. */
4450 	blob_id0 = ut_blob_create_and_open(bs, &opts);
4451 	blob = ut_blob_create_and_open(bs, &opts);
4452 	ut_blob_close_and_delete(bs, blob_id0);
4453 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4454 
4455 	CU_ASSERT(blob->active.num_clusters == 0);
4456 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4457 
4458 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
4459 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
4460 	poll_threads();
4461 	CU_ASSERT(g_bserrno == 0);
4462 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4463 	CU_ASSERT(blob->active.num_clusters == 5);
4464 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4465 
4466 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4467 	poll_threads();
4468 	CU_ASSERT(g_bserrno == 0);
4469 	/* Sync must not change anything */
4470 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4471 	CU_ASSERT(blob->active.num_clusters == 5);
4472 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
4473 
4474 	/* Payload should be all zeros from unallocated clusters */
4475 	memset(payload_read, 0xFF, sizeof(payload_read));
4476 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4477 	poll_threads();
4478 	CU_ASSERT(g_bserrno == 0);
4479 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
4480 
4481 	write_bytes = g_dev_write_bytes;
4482 	read_bytes = g_dev_read_bytes;
4483 
4484 	/* Perform write on thread 1. That will allocate cluster on thread 0 via send_msg */
4485 	set_thread(1);
4486 	channel_thread1 = spdk_bs_alloc_io_channel(bs);
4487 	CU_ASSERT(channel_thread1 != NULL);
4488 	memset(payload_write, 0xE5, sizeof(payload_write));
4489 	spdk_blob_io_write(blob, channel_thread1, payload_write, 4, 10, blob_op_complete, NULL);
4490 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4491 	/* Perform write on thread 0. That will try to allocate cluster,
4492 	 * but fail due to another thread issuing the cluster allocation first. */
4493 	set_thread(0);
4494 	memset(payload_write, 0xE5, sizeof(payload_write));
4495 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
4496 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4497 	poll_threads();
4498 	CU_ASSERT(g_bserrno == 0);
4499 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4500 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 1);
4501 	/* For thin-provisioned blob we need to write 20 io_units plus one page metadata and
4502 	 * read 0 bytes */
4503 	expected_bytes = 20 * io_unit_size + spdk_bs_get_page_size(bs);
4504 	if (g_use_extent_table) {
4505 		/* Add one more page for EXTENT_PAGE write */
4506 		expected_bytes += spdk_bs_get_page_size(bs);
4507 	}
4508 	CU_ASSERT(g_dev_write_bytes - write_bytes == expected_bytes);
4509 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4510 
4511 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
4512 	poll_threads();
4513 	CU_ASSERT(g_bserrno == 0);
4514 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
4515 
4516 	ut_blob_close_and_delete(bs, blob);
4517 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4518 
4519 	set_thread(1);
4520 	spdk_bs_free_io_channel(channel_thread1);
4521 	set_thread(0);
4522 	spdk_bs_free_io_channel(channel);
4523 	poll_threads();
4524 	g_blob = NULL;
4525 	g_blobid = 0;
4526 }
4527 
4528 static void
4529 blob_thin_prov_write_count_io(void)
4530 {
4531 	struct spdk_blob_store *bs;
4532 	struct spdk_blob *blob;
4533 	struct spdk_io_channel *ch;
4534 	struct spdk_bs_dev *dev;
4535 	struct spdk_bs_opts bs_opts;
4536 	struct spdk_blob_opts opts;
4537 	uint64_t free_clusters;
4538 	uint64_t io_unit_size;
4539 	uint8_t payload_write[BLOCKLEN];
4540 	uint64_t write_bytes;
4541 	uint64_t read_bytes;
4542 	uint64_t expected_bytes;
4543 	const uint32_t CLUSTER_SZ = g_phys_blocklen * 4;
4544 	uint32_t io_units_per_cluster;
4545 	uint32_t io_units_per_extent_page;
4546 	uint32_t i;
4547 
4548 	/* Use a very small cluster size for this test.  This ensures we need multiple
4549 	 * extent pages to hold all of the clusters even for relatively small blobs like
4550 	 * we are restricted to for the unit tests (i.e. we don't want to allocate multi-GB
4551 	 * buffers).
4552 	 */
4553 	dev = init_dev();
4554 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4555 	bs_opts.cluster_sz = CLUSTER_SZ;
4556 
4557 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4558 	poll_threads();
4559 	CU_ASSERT(g_bserrno == 0);
4560 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4561 	bs = g_bs;
4562 
4563 	free_clusters = spdk_bs_free_cluster_count(bs);
4564 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4565 	io_units_per_cluster = CLUSTER_SZ / io_unit_size;
4566 	io_units_per_extent_page = SPDK_EXTENTS_PER_EP * io_units_per_cluster;
4567 
4568 	ch = spdk_bs_alloc_io_channel(bs);
4569 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4570 
4571 	ut_spdk_blob_opts_init(&opts);
4572 	opts.thin_provision = true;
4573 
4574 	blob = ut_blob_create_and_open(bs, &opts);
4575 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4576 
4577 	/* Resize the blob so that it will require 8 extent pages to hold all of
4578 	 * the clusters.
4579 	 */
4580 	g_bserrno = -1;
4581 	spdk_blob_resize(blob, SPDK_EXTENTS_PER_EP * 8, blob_op_complete, NULL);
4582 	poll_threads();
4583 	CU_ASSERT(g_bserrno == 0);
4584 
4585 	g_bserrno = -1;
4586 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4587 	poll_threads();
4588 	CU_ASSERT(g_bserrno == 0);
4589 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4590 	CU_ASSERT(blob->active.num_clusters == SPDK_EXTENTS_PER_EP * 8);
4591 
4592 	memset(payload_write, 0, sizeof(payload_write));
4593 	for (i = 0; i < 8; i++) {
4594 		write_bytes = g_dev_write_bytes;
4595 		read_bytes = g_dev_read_bytes;
4596 
4597 		g_bserrno = -1;
4598 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i, 1, blob_op_complete,
4599 				   NULL);
4600 		poll_threads();
4601 		CU_ASSERT(g_bserrno == 0);
4602 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4603 
4604 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4605 		if (!g_use_extent_table) {
4606 			/* For legacy metadata, we should have written the io_unit for
4607 			 * the write I/O, plus the blob's primary metadata page
4608 			 */
4609 			expected_bytes = io_unit_size + spdk_bs_get_page_size(bs);
4610 		} else {
4611 			/* For extent table metadata, we should have written the io_unit for
4612 			 * the write I/O, plus 2 metadata pages - the extent page and the
4613 			 * blob's primary metadata page
4614 			 */
4615 			expected_bytes = io_unit_size + 2 * spdk_bs_get_page_size(bs);
4616 		}
4617 		CU_ASSERT((g_dev_write_bytes - write_bytes) == expected_bytes);
4618 
4619 		/* The write should have synced the metadata already.  Do another sync here
4620 		 * just to confirm.
4621 		 */
4622 		write_bytes = g_dev_write_bytes;
4623 		read_bytes = g_dev_read_bytes;
4624 
4625 		g_bserrno = -1;
4626 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
4627 		poll_threads();
4628 		CU_ASSERT(g_bserrno == 0);
4629 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4630 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 1);
4631 
4632 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4633 		CU_ASSERT(g_dev_write_bytes == write_bytes);
4634 
4635 		/* Now write to another unallocated cluster that is part of the same extent page. */
4636 		g_bserrno = -1;
4637 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i + io_units_per_cluster,
4638 				   1, blob_op_complete, NULL);
4639 		poll_threads();
4640 		CU_ASSERT(g_bserrno == 0);
4641 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4642 		CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2 * i + 2);
4643 
4644 		CU_ASSERT(g_dev_read_bytes == read_bytes);
4645 		/*
4646 		 * For legacy metadata, we should have written the I/O and the primary metadata page.
4647 		 * For extent table metadata, we should have written the I/O and the extent metadata page.
4648 		 */
4649 		expected_bytes = io_unit_size + spdk_bs_get_page_size(bs);
4650 		CU_ASSERT((g_dev_write_bytes - write_bytes) == expected_bytes);
4651 
4652 		/* Send unmap aligned to the whole cluster - should free it up */
4653 		g_bserrno = -1;
4654 		spdk_blob_io_unmap(blob, ch, io_units_per_extent_page * i, io_units_per_cluster, blob_op_complete,
4655 				   NULL);
4656 		poll_threads();
4657 		CU_ASSERT(g_bserrno == 0);
4658 		CU_ASSERT(free_clusters - (2 * i + 1) == spdk_bs_free_cluster_count(bs));
4659 
4660 		/* Write back to the freed cluster */
4661 		g_bserrno = -1;
4662 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_extent_page * i, 1, blob_op_complete,
4663 				   NULL);
4664 		poll_threads();
4665 		CU_ASSERT(g_bserrno == 0);
4666 		CU_ASSERT(free_clusters - (2 * i + 2) == spdk_bs_free_cluster_count(bs));
4667 	}
4668 
4669 	ut_blob_close_and_delete(bs, blob);
4670 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4671 
4672 	spdk_bs_free_io_channel(ch);
4673 	poll_threads();
4674 	g_blob = NULL;
4675 	g_blobid = 0;
4676 
4677 	spdk_bs_unload(bs, bs_op_complete, NULL);
4678 	poll_threads();
4679 	CU_ASSERT(g_bserrno == 0);
4680 	g_bs = NULL;
4681 }
4682 
4683 static void
4684 blob_thin_prov_unmap_cluster(void)
4685 {
4686 	struct spdk_blob_store *bs;
4687 	struct spdk_blob *blob, *snapshot;
4688 	struct spdk_io_channel *ch;
4689 	struct spdk_bs_dev *dev;
4690 	struct spdk_bs_opts bs_opts;
4691 	struct spdk_blob_opts opts;
4692 	uint64_t free_clusters;
4693 	uint64_t io_unit_size;
4694 	uint8_t payload_write[BLOCKLEN];
4695 	uint8_t payload_read[BLOCKLEN];
4696 	const uint32_t CLUSTER_COUNT = 3;
4697 	uint32_t io_units_per_cluster;
4698 	spdk_blob_id blobid, snapshotid;
4699 	uint32_t i;
4700 	int err;
4701 
4702 	/* Use a very large cluster size for this test. Check how the unmap/release cluster code path behaves when
4703 	 * clusters are fully used.
4704 	 */
4705 	dev = init_dev();
4706 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
4707 	bs_opts.cluster_sz = dev->blocklen * dev->blockcnt / (CLUSTER_COUNT + 1);
4708 
4709 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
4710 	poll_threads();
4711 	CU_ASSERT(g_bserrno == 0);
4712 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
4713 	bs = g_bs;
4714 
4715 	free_clusters = spdk_bs_free_cluster_count(bs);
4716 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4717 	io_units_per_cluster = bs_opts.cluster_sz / io_unit_size;
4718 
4719 	ch = spdk_bs_alloc_io_channel(bs);
4720 	SPDK_CU_ASSERT_FATAL(ch != NULL);
4721 
4722 	ut_spdk_blob_opts_init(&opts);
4723 	opts.thin_provision = true;
4724 
4725 	blob = ut_blob_create_and_open(bs, &opts);
4726 	CU_ASSERT(free_clusters == CLUSTER_COUNT);
4727 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4728 	blobid = spdk_blob_get_id(blob);
4729 
4730 	g_bserrno = -1;
4731 	spdk_blob_resize(blob, CLUSTER_COUNT, blob_op_complete, NULL);
4732 	poll_threads();
4733 	CU_ASSERT(g_bserrno == 0);
4734 
4735 	g_bserrno = -1;
4736 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4737 	poll_threads();
4738 	CU_ASSERT(g_bserrno == 0);
4739 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4740 	CU_ASSERT(blob->active.num_clusters == CLUSTER_COUNT);
4741 
4742 	/* Fill all clusters */
4743 	for (i = 0; i < CLUSTER_COUNT; i++) {
4744 		memset(payload_write, i + 1, sizeof(payload_write));
4745 		g_bserrno = -1;
4746 		spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster * i, 1, blob_op_complete, NULL);
4747 		poll_threads();
4748 		CU_ASSERT(g_bserrno == 0);
4749 		CU_ASSERT(free_clusters - (i + 1) == spdk_bs_free_cluster_count(bs));
4750 	}
4751 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4752 
4753 	/* Unmap one whole cluster */
4754 	g_bserrno = -1;
4755 	spdk_blob_io_unmap(blob, ch, io_units_per_cluster, io_units_per_cluster, blob_op_complete, NULL);
4756 	poll_threads();
4757 	CU_ASSERT(g_bserrno == 0);
4758 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4759 
4760 	/* Verify the data read from the cluster is zeroed out */
4761 	memset(payload_write, 0, sizeof(payload_write));
4762 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4763 	poll_threads();
4764 	CU_ASSERT(g_bserrno == 0);
4765 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4766 
4767 	/* Fill the same cluster with data */
4768 	memset(payload_write, 3, sizeof(payload_write));
4769 	g_bserrno = -1;
4770 	spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster, 1, blob_op_complete, NULL);
4771 	poll_threads();
4772 	CU_ASSERT(g_bserrno == 0);
4773 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4774 
4775 	/* Verify the data read from the cluster has the expected data */
4776 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4777 	poll_threads();
4778 	CU_ASSERT(g_bserrno == 0);
4779 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4780 
4781 	/* Send an unaligned unmap that ecompasses one whole cluster */
4782 	g_bserrno = -1;
4783 	spdk_blob_io_unmap(blob, ch, io_units_per_cluster - 1, io_units_per_cluster + 2, blob_op_complete,
4784 			   NULL);
4785 	poll_threads();
4786 	CU_ASSERT(g_bserrno == 0);
4787 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4788 
4789 	/* Verify the data read from the cluster is zeroed out */
4790 	g_bserrno = -1;
4791 	memset(payload_write, 0, sizeof(payload_write));
4792 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4793 	poll_threads();
4794 	CU_ASSERT(g_bserrno == 0);
4795 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4796 
4797 	/* Send a simultaneous unmap with a write to an unallocated area -
4798 	 * check that writes don't claim the currently unmapped cluster */
4799 	g_bserrno = -1;
4800 	memset(payload_write, 7, sizeof(payload_write));
4801 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4802 	spdk_blob_io_write(blob, ch, payload_write, io_units_per_cluster, 1, blob_op_complete, NULL);
4803 	poll_threads();
4804 	CU_ASSERT(g_bserrno == 0);
4805 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4806 
4807 	/* Verify the contents of written sector */
4808 	g_bserrno = -1;
4809 	spdk_blob_io_read(blob, ch, payload_read, io_units_per_cluster, 1, blob_op_complete, NULL);
4810 	poll_threads();
4811 	CU_ASSERT(g_bserrno == 0);
4812 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4813 
4814 	/* Verify the contents of unmapped sector */
4815 	g_bserrno = -1;
4816 	memset(payload_write, 0, sizeof(payload_write));
4817 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4818 	poll_threads();
4819 	CU_ASSERT(g_bserrno == 0);
4820 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4821 
4822 	/* Make sure clusters are not freed until the unmap to the drive is done */
4823 	g_bserrno = -1;
4824 	memset(payload_write, 7, sizeof(payload_write));
4825 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4826 	poll_threads();
4827 	CU_ASSERT(g_bserrno == 0);
4828 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4829 
4830 	g_bserrno = -1;
4831 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4832 	while (memcmp(payload_write, &g_dev_buffer[BLOCKLEN * io_units_per_cluster], BLOCKLEN) == 0) {
4833 		CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4834 		poll_thread_times(0, 1);
4835 	}
4836 	poll_threads();
4837 	CU_ASSERT(g_bserrno == 0);
4838 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4839 
4840 	/* Issue #3358 had a bug with concurrent trims to the same cluster causing an assert, check for regressions.
4841 	 * Send three concurrent unmaps to the same cluster.
4842 	 */
4843 	g_bserrno = -1;
4844 	memset(payload_write, 7, sizeof(payload_write));
4845 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4846 	poll_threads();
4847 	CU_ASSERT(g_bserrno == 0);
4848 	CU_ASSERT(0 == spdk_bs_free_cluster_count(bs));
4849 
4850 	g_bserrno = -1;
4851 	err = -1;
4852 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4853 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4854 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, &err);
4855 	poll_threads();
4856 	CU_ASSERT(g_bserrno == 0);
4857 	CU_ASSERT(err == 0);
4858 	CU_ASSERT(1 == spdk_bs_free_cluster_count(bs));
4859 
4860 	/* Test thin-provisioned blob that is backed */
4861 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
4862 	poll_threads();
4863 	CU_ASSERT(g_bserrno == 0);
4864 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
4865 	poll_threads();
4866 	CU_ASSERT(g_bserrno == 0);
4867 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4868 
4869 	g_bserrno = -1;
4870 	memset(payload_write, 1, sizeof(payload_write));
4871 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4872 	poll_threads();
4873 	CU_ASSERT(g_bserrno == 0);
4874 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4875 
4876 	/* Create a snapshot */
4877 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
4878 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
4879 	poll_threads();
4880 	CU_ASSERT(g_bserrno == 0);
4881 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
4882 	CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
4883 	snapshotid = g_blobid;
4884 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4885 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
4886 	poll_threads();
4887 	CU_ASSERT(g_bserrno == 0);
4888 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
4889 	snapshot = g_blob;
4890 
4891 	/* Write data to blob, it will alloc new cluster */
4892 	g_bserrno = -1;
4893 	memset(payload_write, 2, sizeof(payload_write));
4894 	spdk_blob_io_write(blob, ch, payload_write, 0, 1, blob_op_complete, NULL);
4895 	poll_threads();
4896 	CU_ASSERT(g_bserrno == 0);
4897 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4898 
4899 	/* Unmap one whole cluster, but do not release this cluster */
4900 	g_bserrno = -1;
4901 	spdk_blob_io_unmap(blob, ch, 0, io_units_per_cluster, blob_op_complete, NULL);
4902 	poll_threads();
4903 	CU_ASSERT(g_bserrno == 0);
4904 	CU_ASSERT(free_clusters - 2 == spdk_bs_free_cluster_count(bs));
4905 
4906 	/* Verify the data read from the cluster is zeroed out */
4907 	g_bserrno = -1;
4908 	memset(payload_write, 0, sizeof(payload_write));
4909 	spdk_blob_io_read(blob, ch, payload_read, 0, 1, blob_op_complete, NULL);
4910 	poll_threads();
4911 	CU_ASSERT(g_bserrno == 0);
4912 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
4913 
4914 	ut_blob_close_and_delete(bs, blob);
4915 	ut_blob_close_and_delete(bs, snapshot);
4916 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4917 
4918 	spdk_bs_free_io_channel(ch);
4919 	poll_threads();
4920 	g_blob = NULL;
4921 	g_blobid = 0;
4922 
4923 	spdk_bs_unload(bs, bs_op_complete, NULL);
4924 	poll_threads();
4925 	CU_ASSERT(g_bserrno == 0);
4926 	g_bs = NULL;
4927 }
4928 
4929 static void
4930 blob_thin_prov_rle(void)
4931 {
4932 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
4933 	struct spdk_blob_store *bs = g_bs;
4934 	struct spdk_blob *blob;
4935 	struct spdk_io_channel *channel;
4936 	struct spdk_blob_opts opts;
4937 	spdk_blob_id blobid;
4938 	uint64_t free_clusters;
4939 	uint64_t io_unit_size;
4940 	uint8_t payload_read[10 * BLOCKLEN];
4941 	uint8_t payload_write[10 * BLOCKLEN];
4942 	uint64_t write_bytes;
4943 	uint64_t read_bytes;
4944 	uint64_t expected_bytes;
4945 	uint64_t io_unit;
4946 
4947 	/* assert that the stack variables above are of correct size */
4948 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == BLOCKLEN);
4949 
4950 	free_clusters = spdk_bs_free_cluster_count(bs);
4951 	io_unit_size = spdk_bs_get_io_unit_size(bs);
4952 
4953 	ut_spdk_blob_opts_init(&opts);
4954 	opts.thin_provision = true;
4955 	opts.num_clusters = 5;
4956 
4957 	blob = ut_blob_create_and_open(bs, &opts);
4958 	blobid = spdk_blob_get_id(blob);
4959 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
4960 
4961 	channel = spdk_bs_alloc_io_channel(bs);
4962 	CU_ASSERT(channel != NULL);
4963 
4964 	/* Target specifically second cluster in a blob as first allocation */
4965 	io_unit = bs_cluster_to_io_unit(bs, 1);
4966 
4967 	/* Payload should be all zeros from unallocated clusters */
4968 	memset(payload_read, 0xFF, sizeof(payload_read));
4969 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4970 	poll_threads();
4971 	CU_ASSERT(g_bserrno == 0);
4972 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
4973 
4974 	write_bytes = g_dev_write_bytes;
4975 	read_bytes = g_dev_read_bytes;
4976 
4977 	/* Issue write to second cluster in a blob */
4978 	memset(payload_write, 0xE5, sizeof(payload_write));
4979 	spdk_blob_io_write(blob, channel, payload_write, io_unit, 10, blob_op_complete, NULL);
4980 	poll_threads();
4981 	CU_ASSERT(g_bserrno == 0);
4982 	CU_ASSERT(free_clusters - 1 == spdk_bs_free_cluster_count(bs));
4983 	/* For thin-provisioned blob we need to write 10 pages plus one page metadata and
4984 	 * read 0 bytes */
4985 	expected_bytes = 10 * io_unit_size + spdk_bs_get_page_size(bs);
4986 	if (g_use_extent_table) {
4987 		/* Add one more page for EXTENT_PAGE write */
4988 		expected_bytes += spdk_bs_get_page_size(bs);
4989 	}
4990 	CU_ASSERT(g_dev_write_bytes - write_bytes == expected_bytes);
4991 	CU_ASSERT(g_dev_read_bytes - read_bytes == 0);
4992 
4993 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
4994 	poll_threads();
4995 	CU_ASSERT(g_bserrno == 0);
4996 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
4997 
4998 	spdk_bs_free_io_channel(channel);
4999 	poll_threads();
5000 
5001 	spdk_blob_close(blob, blob_op_complete, NULL);
5002 	poll_threads();
5003 	CU_ASSERT(g_bserrno == 0);
5004 
5005 	ut_bs_reload(&bs, NULL);
5006 
5007 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5008 	poll_threads();
5009 	CU_ASSERT(g_bserrno == 0);
5010 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5011 	blob = g_blob;
5012 
5013 	channel = spdk_bs_alloc_io_channel(bs);
5014 	CU_ASSERT(channel != NULL);
5015 
5016 	/* Read second cluster after blob reload to confirm data written */
5017 	spdk_blob_io_read(blob, channel, payload_read, io_unit, 10, blob_op_complete, NULL);
5018 	poll_threads();
5019 	CU_ASSERT(g_bserrno == 0);
5020 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5021 
5022 	spdk_bs_free_io_channel(channel);
5023 	poll_threads();
5024 
5025 	ut_blob_close_and_delete(bs, blob);
5026 }
5027 
5028 static void
5029 blob_thin_prov_rw_iov(void)
5030 {
5031 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5032 	struct spdk_blob_store *bs = g_bs;
5033 	struct spdk_blob *blob;
5034 	struct spdk_io_channel *channel;
5035 	struct spdk_blob_opts opts;
5036 	uint64_t free_clusters;
5037 	uint8_t payload_read[10 * BLOCKLEN];
5038 	uint8_t payload_write[10 * BLOCKLEN];
5039 	struct iovec iov_read[3];
5040 	struct iovec iov_write[3];
5041 
5042 	free_clusters = spdk_bs_free_cluster_count(bs);
5043 
5044 	channel = spdk_bs_alloc_io_channel(bs);
5045 	CU_ASSERT(channel != NULL);
5046 
5047 	ut_spdk_blob_opts_init(&opts);
5048 	opts.thin_provision = true;
5049 
5050 	blob = ut_blob_create_and_open(bs, &opts);
5051 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5052 
5053 	CU_ASSERT(blob->active.num_clusters == 0);
5054 
5055 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
5056 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
5057 	poll_threads();
5058 	CU_ASSERT(g_bserrno == 0);
5059 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5060 	CU_ASSERT(blob->active.num_clusters == 5);
5061 
5062 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5063 	poll_threads();
5064 	CU_ASSERT(g_bserrno == 0);
5065 	/* Sync must not change anything */
5066 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5067 	CU_ASSERT(blob->active.num_clusters == 5);
5068 
5069 	/* Payload should be all zeros from unallocated clusters */
5070 	memset(payload_read, 0xAA, sizeof(payload_read));
5071 	iov_read[0].iov_base = payload_read;
5072 	iov_read[0].iov_len = 3 * BLOCKLEN;
5073 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5074 	iov_read[1].iov_len = 4 * BLOCKLEN;
5075 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5076 	iov_read[2].iov_len = 3 * BLOCKLEN;
5077 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5078 	poll_threads();
5079 	CU_ASSERT(g_bserrno == 0);
5080 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5081 
5082 	memset(payload_write, 0xE5, sizeof(payload_write));
5083 	iov_write[0].iov_base = payload_write;
5084 	iov_write[0].iov_len = 1 * BLOCKLEN;
5085 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5086 	iov_write[1].iov_len = 5 * BLOCKLEN;
5087 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5088 	iov_write[2].iov_len = 4 * BLOCKLEN;
5089 
5090 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5091 	poll_threads();
5092 	CU_ASSERT(g_bserrno == 0);
5093 
5094 	memset(payload_read, 0xAA, sizeof(payload_read));
5095 	iov_read[0].iov_base = payload_read;
5096 	iov_read[0].iov_len = 3 * BLOCKLEN;
5097 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5098 	iov_read[1].iov_len = 4 * BLOCKLEN;
5099 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5100 	iov_read[2].iov_len = 3 * BLOCKLEN;
5101 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5102 	poll_threads();
5103 	CU_ASSERT(g_bserrno == 0);
5104 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5105 
5106 	spdk_bs_free_io_channel(channel);
5107 	poll_threads();
5108 
5109 	ut_blob_close_and_delete(bs, blob);
5110 }
5111 
5112 struct iter_ctx {
5113 	int		current_iter;
5114 	spdk_blob_id	blobid[4];
5115 };
5116 
5117 static void
5118 test_iter(void *arg, struct spdk_blob *blob, int bserrno)
5119 {
5120 	struct iter_ctx *iter_ctx = arg;
5121 	spdk_blob_id blobid;
5122 
5123 	CU_ASSERT(bserrno == 0);
5124 	blobid = spdk_blob_get_id(blob);
5125 	CU_ASSERT(blobid == iter_ctx->blobid[iter_ctx->current_iter++]);
5126 }
5127 
5128 static void
5129 bs_load_iter_test(void)
5130 {
5131 	struct spdk_blob_store *bs;
5132 	struct spdk_bs_dev *dev;
5133 	struct iter_ctx iter_ctx = { 0 };
5134 	struct spdk_blob *blob;
5135 	int i, rc;
5136 	struct spdk_bs_opts opts;
5137 
5138 	dev = init_dev();
5139 	spdk_bs_opts_init(&opts, sizeof(opts));
5140 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5141 
5142 	/* Initialize a new blob store */
5143 	spdk_bs_init(dev, &opts, bs_op_with_handle_complete, NULL);
5144 	poll_threads();
5145 	CU_ASSERT(g_bserrno == 0);
5146 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5147 	bs = g_bs;
5148 
5149 	for (i = 0; i < 4; i++) {
5150 		blob = ut_blob_create_and_open(bs, NULL);
5151 		iter_ctx.blobid[i] = spdk_blob_get_id(blob);
5152 
5153 		/* Just save the blobid as an xattr for testing purposes. */
5154 		rc = spdk_blob_set_xattr(blob, "blobid", &iter_ctx.blobid[i], sizeof(spdk_blob_id));
5155 		CU_ASSERT(rc == 0);
5156 
5157 		/* Resize the blob */
5158 		spdk_blob_resize(blob, i, blob_op_complete, NULL);
5159 		poll_threads();
5160 		CU_ASSERT(g_bserrno == 0);
5161 
5162 		spdk_blob_close(blob, blob_op_complete, NULL);
5163 		poll_threads();
5164 		CU_ASSERT(g_bserrno == 0);
5165 	}
5166 
5167 	g_bserrno = -1;
5168 	spdk_bs_unload(bs, bs_op_complete, NULL);
5169 	poll_threads();
5170 	CU_ASSERT(g_bserrno == 0);
5171 
5172 	dev = init_dev();
5173 	spdk_bs_opts_init(&opts, sizeof(opts));
5174 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5175 	opts.iter_cb_fn = test_iter;
5176 	opts.iter_cb_arg = &iter_ctx;
5177 
5178 	/* Test blob iteration during load after a clean shutdown. */
5179 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5180 	poll_threads();
5181 	CU_ASSERT(g_bserrno == 0);
5182 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5183 	bs = g_bs;
5184 
5185 	/* Dirty shutdown */
5186 	bs_free(bs);
5187 
5188 	dev = init_dev();
5189 	spdk_bs_opts_init(&opts, sizeof(opts));
5190 	snprintf(opts.bstype.bstype, sizeof(opts.bstype.bstype), "TESTTYPE");
5191 	opts.iter_cb_fn = test_iter;
5192 	iter_ctx.current_iter = 0;
5193 	opts.iter_cb_arg = &iter_ctx;
5194 
5195 	/* Test blob iteration during load after a dirty shutdown. */
5196 	spdk_bs_load(dev, &opts, bs_op_with_handle_complete, NULL);
5197 	poll_threads();
5198 	CU_ASSERT(g_bserrno == 0);
5199 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5200 	bs = g_bs;
5201 
5202 	spdk_bs_unload(bs, bs_op_complete, NULL);
5203 	poll_threads();
5204 	CU_ASSERT(g_bserrno == 0);
5205 	g_bs = NULL;
5206 }
5207 
5208 static void
5209 blob_snapshot_rw(void)
5210 {
5211 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5212 	struct spdk_blob_store *bs = g_bs;
5213 	struct spdk_blob *blob, *snapshot;
5214 	struct spdk_io_channel *channel;
5215 	struct spdk_blob_opts opts;
5216 	spdk_blob_id blobid, snapshotid;
5217 	uint64_t free_clusters;
5218 	uint64_t cluster_size;
5219 	uint64_t io_unit_size;
5220 	uint8_t payload_read[10 * BLOCKLEN];
5221 	uint8_t payload_write[10 * BLOCKLEN];
5222 	uint64_t write_bytes_start;
5223 	uint64_t read_bytes_start;
5224 	uint64_t copy_bytes_start;
5225 	uint64_t write_bytes;
5226 	uint64_t read_bytes;
5227 	uint64_t copy_bytes;
5228 	uint64_t expected_bytes;
5229 
5230 	free_clusters = spdk_bs_free_cluster_count(bs);
5231 	cluster_size = spdk_bs_get_cluster_size(bs);
5232 	io_unit_size = spdk_bs_get_io_unit_size(bs);
5233 
5234 	channel = spdk_bs_alloc_io_channel(bs);
5235 	CU_ASSERT(channel != NULL);
5236 
5237 	ut_spdk_blob_opts_init(&opts);
5238 	opts.thin_provision = true;
5239 	opts.num_clusters = 5;
5240 
5241 	blob = ut_blob_create_and_open(bs, &opts);
5242 	blobid = spdk_blob_get_id(blob);
5243 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5244 
5245 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5246 
5247 	memset(payload_read, 0xFF, sizeof(payload_read));
5248 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5249 	poll_threads();
5250 	CU_ASSERT(g_bserrno == 0);
5251 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5252 
5253 	memset(payload_write, 0xE5, sizeof(payload_write));
5254 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5255 	poll_threads();
5256 	CU_ASSERT(g_bserrno == 0);
5257 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5258 
5259 	/* Create snapshot from blob */
5260 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5261 	poll_threads();
5262 	CU_ASSERT(g_bserrno == 0);
5263 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5264 	snapshotid = g_blobid;
5265 
5266 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5267 	poll_threads();
5268 	CU_ASSERT(g_bserrno == 0);
5269 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5270 	snapshot = g_blob;
5271 	CU_ASSERT(snapshot->data_ro == true);
5272 	CU_ASSERT(snapshot->md_ro == true);
5273 
5274 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5275 
5276 	write_bytes_start = g_dev_write_bytes;
5277 	read_bytes_start = g_dev_read_bytes;
5278 	copy_bytes_start = g_dev_copy_bytes;
5279 
5280 	memset(payload_write, 0xAA, sizeof(payload_write));
5281 	spdk_blob_io_write(blob, channel, payload_write, 4, 10, blob_op_complete, NULL);
5282 	poll_threads();
5283 	CU_ASSERT(g_bserrno == 0);
5284 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5285 
5286 	/* For a clone we need to allocate and copy one cluster, update one page of metadata
5287 	 * and then write 10 io units of payload.
5288 	 */
5289 	write_bytes = g_dev_write_bytes - write_bytes_start;
5290 	read_bytes = g_dev_read_bytes - read_bytes_start;
5291 	copy_bytes = g_dev_copy_bytes - copy_bytes_start;
5292 	if (g_dev_copy_enabled) {
5293 		CU_ASSERT(copy_bytes == cluster_size);
5294 	} else {
5295 		CU_ASSERT(copy_bytes == 0);
5296 	}
5297 	expected_bytes = 10 * io_unit_size + cluster_size + spdk_bs_get_page_size(bs);
5298 	if (g_use_extent_table) {
5299 		/* Add one more page for EXTENT_PAGE write */
5300 		expected_bytes += spdk_bs_get_page_size(bs);
5301 	}
5302 	CU_ASSERT(write_bytes + copy_bytes == expected_bytes);
5303 	CU_ASSERT(read_bytes + copy_bytes == cluster_size);
5304 
5305 	spdk_blob_io_read(blob, channel, payload_read, 4, 10, blob_op_complete, NULL);
5306 	poll_threads();
5307 	CU_ASSERT(g_bserrno == 0);
5308 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5309 
5310 	/* Data on snapshot should not change after write to clone */
5311 	memset(payload_write, 0xE5, sizeof(payload_write));
5312 	spdk_blob_io_read(snapshot, channel, payload_read, 4, 10, blob_op_complete, NULL);
5313 	poll_threads();
5314 	CU_ASSERT(g_bserrno == 0);
5315 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5316 
5317 	ut_blob_close_and_delete(bs, blob);
5318 	ut_blob_close_and_delete(bs, snapshot);
5319 
5320 	spdk_bs_free_io_channel(channel);
5321 	poll_threads();
5322 	g_blob = NULL;
5323 	g_blobid = 0;
5324 }
5325 
5326 static void
5327 blob_snapshot_rw_iov(void)
5328 {
5329 	static const uint8_t zero[10 * BLOCKLEN] = { 0 };
5330 	struct spdk_blob_store *bs = g_bs;
5331 	struct spdk_blob *blob, *snapshot;
5332 	struct spdk_io_channel *channel;
5333 	struct spdk_blob_opts opts;
5334 	spdk_blob_id blobid, snapshotid;
5335 	uint64_t free_clusters;
5336 	uint8_t payload_read[10 * BLOCKLEN];
5337 	uint8_t payload_write[10 * BLOCKLEN];
5338 	struct iovec iov_read[3];
5339 	struct iovec iov_write[3];
5340 
5341 	free_clusters = spdk_bs_free_cluster_count(bs);
5342 
5343 	channel = spdk_bs_alloc_io_channel(bs);
5344 	CU_ASSERT(channel != NULL);
5345 
5346 	ut_spdk_blob_opts_init(&opts);
5347 	opts.thin_provision = true;
5348 	opts.num_clusters = 5;
5349 
5350 	blob = ut_blob_create_and_open(bs, &opts);
5351 	blobid = spdk_blob_get_id(blob);
5352 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5353 
5354 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5355 
5356 	/* Create snapshot from blob */
5357 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5358 	poll_threads();
5359 	CU_ASSERT(g_bserrno == 0);
5360 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5361 	snapshotid = g_blobid;
5362 
5363 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5364 	poll_threads();
5365 	CU_ASSERT(g_bserrno == 0);
5366 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5367 	snapshot = g_blob;
5368 	CU_ASSERT(snapshot->data_ro == true);
5369 	CU_ASSERT(snapshot->md_ro == true);
5370 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5371 
5372 	/* Payload should be all zeros from unallocated clusters */
5373 	memset(payload_read, 0xAA, sizeof(payload_read));
5374 	iov_read[0].iov_base = payload_read;
5375 	iov_read[0].iov_len = 3 * BLOCKLEN;
5376 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5377 	iov_read[1].iov_len = 4 * BLOCKLEN;
5378 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5379 	iov_read[2].iov_len = 3 * BLOCKLEN;
5380 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5381 	poll_threads();
5382 	CU_ASSERT(g_bserrno == 0);
5383 	CU_ASSERT(memcmp(zero, payload_read, 10 * BLOCKLEN) == 0);
5384 
5385 	memset(payload_write, 0xE5, sizeof(payload_write));
5386 	iov_write[0].iov_base = payload_write;
5387 	iov_write[0].iov_len = 1 * BLOCKLEN;
5388 	iov_write[1].iov_base = payload_write + 1 * BLOCKLEN;
5389 	iov_write[1].iov_len = 5 * BLOCKLEN;
5390 	iov_write[2].iov_base = payload_write + 6 * BLOCKLEN;
5391 	iov_write[2].iov_len = 4 * BLOCKLEN;
5392 
5393 	spdk_blob_io_writev(blob, channel, iov_write, 3, 250, 10, blob_op_complete, NULL);
5394 	poll_threads();
5395 	CU_ASSERT(g_bserrno == 0);
5396 
5397 	memset(payload_read, 0xAA, sizeof(payload_read));
5398 	iov_read[0].iov_base = payload_read;
5399 	iov_read[0].iov_len = 3 * BLOCKLEN;
5400 	iov_read[1].iov_base = payload_read + 3 * BLOCKLEN;
5401 	iov_read[1].iov_len = 4 * BLOCKLEN;
5402 	iov_read[2].iov_base = payload_read + 7 * BLOCKLEN;
5403 	iov_read[2].iov_len = 3 * BLOCKLEN;
5404 	spdk_blob_io_readv(blob, channel, iov_read, 3, 250, 10, blob_op_complete, NULL);
5405 	poll_threads();
5406 	CU_ASSERT(g_bserrno == 0);
5407 	CU_ASSERT(memcmp(payload_write, payload_read, 10 * BLOCKLEN) == 0);
5408 
5409 	spdk_bs_free_io_channel(channel);
5410 	poll_threads();
5411 
5412 	ut_blob_close_and_delete(bs, blob);
5413 	ut_blob_close_and_delete(bs, snapshot);
5414 }
5415 
5416 /**
5417  * Inflate / decouple parent rw unit tests.
5418  *
5419  * --------------
5420  * original blob:         0         1         2         3         4
5421  *                   ,---------+---------+---------+---------+---------.
5422  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5423  *                   +---------+---------+---------+---------+---------+
5424  *         snapshot2 |    -    |yyyyyyyyy|    -    |yyyyyyyyy|    -    |
5425  *                   +---------+---------+---------+---------+---------+
5426  *         blob      |    -    |zzzzzzzzz|    -    |    -    |    -    |
5427  *                   '---------+---------+---------+---------+---------'
5428  *                   .         .         .         .         .         .
5429  * --------          .         .         .         .         .         .
5430  * inflate:          .         .         .         .         .         .
5431  *                   ,---------+---------+---------+---------+---------.
5432  *         blob      |xxxxxxxxx|zzzzzzzzz|xxxxxxxxx|yyyyyyyyy|000000000|
5433  *                   '---------+---------+---------+---------+---------'
5434  *
5435  *         NOTE: needs to allocate 4 clusters, thin provisioning removed, dependency
5436  *               on snapshot2 and snapshot removed .         .         .
5437  *                   .         .         .         .         .         .
5438  * ----------------  .         .         .         .         .         .
5439  * decouple parent:  .         .         .         .         .         .
5440  *                   ,---------+---------+---------+---------+---------.
5441  *         snapshot  |xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|xxxxxxxxx|    -    |
5442  *                   +---------+---------+---------+---------+---------+
5443  *         blob      |    -    |zzzzzzzzz|    -    |yyyyyyyyy|    -    |
5444  *                   '---------+---------+---------+---------+---------'
5445  *
5446  *         NOTE: needs to allocate 1 cluster, 3 clusters unallocated, dependency
5447  *               on snapshot2 removed and on snapshot still exists. Snapshot2
5448  *               should remain a clone of snapshot.
5449  */
5450 static void
5451 _blob_inflate_rw(bool decouple_parent)
5452 {
5453 	struct spdk_blob_store *bs = g_bs;
5454 	struct spdk_blob *blob, *snapshot, *snapshot2;
5455 	struct spdk_io_channel *channel;
5456 	struct spdk_blob_opts opts;
5457 	spdk_blob_id blobid, snapshotid, snapshot2id;
5458 	uint64_t free_clusters;
5459 	uint64_t cluster_size;
5460 
5461 	uint64_t payload_size;
5462 	uint8_t *payload_read;
5463 	uint8_t *payload_write;
5464 	uint8_t *payload_clone;
5465 
5466 	uint64_t io_units_per_cluster;
5467 	uint64_t io_units_per_payload;
5468 
5469 	int i;
5470 	spdk_blob_id ids[2];
5471 	size_t count;
5472 
5473 	free_clusters = spdk_bs_free_cluster_count(bs);
5474 	cluster_size = spdk_bs_get_cluster_size(bs);
5475 	io_units_per_cluster = cluster_size / spdk_bs_get_io_unit_size(bs);
5476 	io_units_per_payload = io_units_per_cluster * 5;
5477 
5478 	payload_size = cluster_size * 5;
5479 
5480 	payload_read = malloc(payload_size);
5481 	SPDK_CU_ASSERT_FATAL(payload_read != NULL);
5482 
5483 	payload_write = malloc(payload_size);
5484 	SPDK_CU_ASSERT_FATAL(payload_write != NULL);
5485 
5486 	payload_clone = malloc(payload_size);
5487 	SPDK_CU_ASSERT_FATAL(payload_clone != NULL);
5488 
5489 	channel = spdk_bs_alloc_io_channel(bs);
5490 	SPDK_CU_ASSERT_FATAL(channel != NULL);
5491 
5492 	/* Create blob */
5493 	ut_spdk_blob_opts_init(&opts);
5494 	opts.thin_provision = true;
5495 	opts.num_clusters = 5;
5496 
5497 	blob = ut_blob_create_and_open(bs, &opts);
5498 	blobid = spdk_blob_get_id(blob);
5499 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
5500 
5501 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5502 
5503 	/* 1) Initial read should return zeroed payload */
5504 	memset(payload_read, 0xFF, payload_size);
5505 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5506 			  blob_op_complete, NULL);
5507 	poll_threads();
5508 	CU_ASSERT(g_bserrno == 0);
5509 	CU_ASSERT(spdk_mem_all_zero(payload_read, payload_size));
5510 
5511 	/* Fill whole blob with a pattern, except last cluster (to be sure it
5512 	 * isn't allocated) */
5513 	memset(payload_write, 0xE5, payload_size - cluster_size);
5514 	spdk_blob_io_write(blob, channel, payload_write, 0, io_units_per_payload -
5515 			   io_units_per_cluster, blob_op_complete, NULL);
5516 	poll_threads();
5517 	CU_ASSERT(g_bserrno == 0);
5518 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5519 
5520 	/* 2) Create snapshot from blob (first level) */
5521 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5522 	poll_threads();
5523 	CU_ASSERT(g_bserrno == 0);
5524 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5525 	snapshotid = g_blobid;
5526 
5527 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5528 	poll_threads();
5529 	CU_ASSERT(g_bserrno == 0);
5530 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5531 	snapshot = g_blob;
5532 	CU_ASSERT(snapshot->data_ro == true);
5533 	CU_ASSERT(snapshot->md_ro == true);
5534 
5535 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot) == 5);
5536 
5537 	/* Write every second cluster with a pattern.
5538 	 *
5539 	 * Last cluster shouldn't be written, to be sure that snapshot nor clone
5540 	 * doesn't allocate it.
5541 	 *
5542 	 * payload_clone stores expected result on "blob" read at the time and
5543 	 * is used only to check data consistency on clone before and after
5544 	 * inflation. Initially we fill it with a backing snapshots pattern
5545 	 * used before.
5546 	 */
5547 	memset(payload_clone, 0xE5, payload_size - cluster_size);
5548 	memset(payload_clone + payload_size - cluster_size, 0x00, cluster_size);
5549 	memset(payload_write, 0xAA, payload_size);
5550 	for (i = 1; i < 5; i += 2) {
5551 		spdk_blob_io_write(blob, channel, payload_write, i * io_units_per_cluster,
5552 				   io_units_per_cluster, blob_op_complete, NULL);
5553 		poll_threads();
5554 		CU_ASSERT(g_bserrno == 0);
5555 
5556 		/* Update expected result */
5557 		memcpy(payload_clone + (cluster_size * i), payload_write,
5558 		       cluster_size);
5559 	}
5560 	CU_ASSERT(free_clusters != spdk_bs_free_cluster_count(bs));
5561 
5562 	/* Check data consistency on clone */
5563 	memset(payload_read, 0xFF, payload_size);
5564 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5565 			  blob_op_complete, NULL);
5566 	poll_threads();
5567 	CU_ASSERT(g_bserrno == 0);
5568 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5569 
5570 	/* 3) Create second levels snapshot from blob */
5571 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5572 	poll_threads();
5573 	CU_ASSERT(g_bserrno == 0);
5574 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5575 	snapshot2id = g_blobid;
5576 
5577 	spdk_bs_open_blob(bs, snapshot2id, blob_op_with_handle_complete, NULL);
5578 	poll_threads();
5579 	CU_ASSERT(g_bserrno == 0);
5580 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5581 	snapshot2 = g_blob;
5582 	CU_ASSERT(snapshot2->data_ro == true);
5583 	CU_ASSERT(snapshot2->md_ro == true);
5584 
5585 	CU_ASSERT(spdk_blob_get_num_clusters(snapshot2) == 5);
5586 
5587 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5588 
5589 	/* Write one cluster on the top level blob. This cluster (1) covers
5590 	 * already allocated cluster in the snapshot2, so shouldn't be inflated
5591 	 * at all */
5592 	spdk_blob_io_write(blob, channel, payload_write, io_units_per_cluster,
5593 			   io_units_per_cluster, blob_op_complete, NULL);
5594 	poll_threads();
5595 	CU_ASSERT(g_bserrno == 0);
5596 
5597 	/* Update expected result */
5598 	memcpy(payload_clone + cluster_size, payload_write, cluster_size);
5599 
5600 	/* Check data consistency on clone */
5601 	memset(payload_read, 0xFF, payload_size);
5602 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5603 			  blob_op_complete, NULL);
5604 	poll_threads();
5605 	CU_ASSERT(g_bserrno == 0);
5606 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5607 
5608 
5609 	/* Close all blobs */
5610 	spdk_blob_close(blob, blob_op_complete, NULL);
5611 	poll_threads();
5612 	CU_ASSERT(g_bserrno == 0);
5613 
5614 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5615 	poll_threads();
5616 	CU_ASSERT(g_bserrno == 0);
5617 
5618 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5619 	poll_threads();
5620 	CU_ASSERT(g_bserrno == 0);
5621 
5622 	/* Check snapshot-clone relations */
5623 	count = 2;
5624 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5625 	CU_ASSERT(count == 1);
5626 	CU_ASSERT(ids[0] == snapshot2id);
5627 
5628 	count = 2;
5629 	CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5630 	CU_ASSERT(count == 1);
5631 	CU_ASSERT(ids[0] == blobid);
5632 
5633 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshot2id);
5634 
5635 	free_clusters = spdk_bs_free_cluster_count(bs);
5636 	if (!decouple_parent) {
5637 		/* Do full blob inflation */
5638 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
5639 		poll_threads();
5640 		CU_ASSERT(g_bserrno == 0);
5641 
5642 		/* All clusters should be inflated (except one already allocated
5643 		 * in a top level blob) */
5644 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 4);
5645 
5646 		/* Check if relation tree updated correctly */
5647 		count = 2;
5648 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5649 
5650 		/* snapshotid have one clone */
5651 		CU_ASSERT(count == 1);
5652 		CU_ASSERT(ids[0] == snapshot2id);
5653 
5654 		/* snapshot2id have no clones */
5655 		count = 2;
5656 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5657 		CU_ASSERT(count == 0);
5658 
5659 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5660 	} else {
5661 		/* Decouple parent of blob */
5662 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
5663 		poll_threads();
5664 		CU_ASSERT(g_bserrno == 0);
5665 
5666 		/* Only one cluster from a parent should be inflated (second one
5667 		 * is covered by a cluster written on a top level blob, and
5668 		 * already allocated) */
5669 		CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 1);
5670 
5671 		/* Check if relation tree updated correctly */
5672 		count = 2;
5673 		CU_ASSERT(spdk_blob_get_clones(bs, snapshotid, ids, &count) == 0);
5674 
5675 		/* snapshotid have two clones now */
5676 		CU_ASSERT(count == 2);
5677 		CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5678 		CU_ASSERT(ids[0] == snapshot2id || ids[1] == snapshot2id);
5679 
5680 		/* snapshot2id have no clones */
5681 		count = 2;
5682 		CU_ASSERT(spdk_blob_get_clones(bs, snapshot2id, ids, &count) == 0);
5683 		CU_ASSERT(count == 0);
5684 
5685 		CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5686 	}
5687 
5688 	/* Try to delete snapshot2 (should pass) */
5689 	spdk_bs_delete_blob(bs, snapshot2id, blob_op_complete, NULL);
5690 	poll_threads();
5691 	CU_ASSERT(g_bserrno == 0);
5692 
5693 	/* Try to delete base snapshot */
5694 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5695 	poll_threads();
5696 	CU_ASSERT(g_bserrno == 0);
5697 
5698 	/* Reopen blob after snapshot deletion */
5699 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
5700 	poll_threads();
5701 	CU_ASSERT(g_bserrno == 0);
5702 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5703 	blob = g_blob;
5704 
5705 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 5);
5706 
5707 	/* Check data consistency on inflated blob */
5708 	memset(payload_read, 0xFF, payload_size);
5709 	spdk_blob_io_read(blob, channel, payload_read, 0, io_units_per_payload,
5710 			  blob_op_complete, NULL);
5711 	poll_threads();
5712 	CU_ASSERT(g_bserrno == 0);
5713 	CU_ASSERT(memcmp(payload_clone, payload_read, payload_size) == 0);
5714 
5715 	spdk_bs_free_io_channel(channel);
5716 	poll_threads();
5717 
5718 	free(payload_read);
5719 	free(payload_write);
5720 	free(payload_clone);
5721 
5722 	ut_blob_close_and_delete(bs, blob);
5723 }
5724 
5725 static void
5726 blob_inflate_rw(void)
5727 {
5728 	_blob_inflate_rw(false);
5729 	_blob_inflate_rw(true);
5730 }
5731 
5732 /**
5733  * Snapshot-clones relation test
5734  *
5735  *         snapshot
5736  *            |
5737  *      +-----+-----+
5738  *      |           |
5739  *   blob(ro)   snapshot2
5740  *      |           |
5741  *   clone2      clone
5742  */
5743 static void
5744 blob_relations(void)
5745 {
5746 	struct spdk_blob_store *bs;
5747 	struct spdk_bs_dev *dev;
5748 	struct spdk_bs_opts bs_opts;
5749 	struct spdk_blob_opts opts;
5750 	struct spdk_blob *blob, *snapshot, *snapshot2, *clone, *clone2;
5751 	spdk_blob_id blobid, cloneid, snapshotid, cloneid2, snapshotid2;
5752 	int rc;
5753 	size_t count;
5754 	spdk_blob_id ids[10] = {};
5755 
5756 	dev = init_dev();
5757 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
5758 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
5759 
5760 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
5761 	poll_threads();
5762 	CU_ASSERT(g_bserrno == 0);
5763 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
5764 	bs = g_bs;
5765 
5766 	/* 1. Create blob with 10 clusters */
5767 
5768 	ut_spdk_blob_opts_init(&opts);
5769 	opts.num_clusters = 10;
5770 
5771 	blob = ut_blob_create_and_open(bs, &opts);
5772 	blobid = spdk_blob_get_id(blob);
5773 
5774 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5775 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5776 	CU_ASSERT(!spdk_blob_is_clone(blob));
5777 	CU_ASSERT(!spdk_blob_is_thin_provisioned(blob));
5778 
5779 	/* blob should not have underlying snapshot nor clones */
5780 	CU_ASSERT(blob->parent_id == SPDK_BLOBID_INVALID);
5781 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
5782 	count = SPDK_COUNTOF(ids);
5783 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5784 	CU_ASSERT(rc == 0);
5785 	CU_ASSERT(count == 0);
5786 
5787 
5788 	/* 2. Create snapshot */
5789 
5790 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5791 	poll_threads();
5792 	CU_ASSERT(g_bserrno == 0);
5793 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5794 	snapshotid = g_blobid;
5795 
5796 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
5797 	poll_threads();
5798 	CU_ASSERT(g_bserrno == 0);
5799 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5800 	snapshot = g_blob;
5801 
5802 	CU_ASSERT(spdk_blob_is_read_only(snapshot));
5803 	CU_ASSERT(spdk_blob_is_snapshot(snapshot));
5804 	CU_ASSERT(!spdk_blob_is_clone(snapshot));
5805 	CU_ASSERT(snapshot->parent_id == SPDK_BLOBID_INVALID);
5806 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5807 
5808 	/* Check if original blob is converted to the clone of snapshot */
5809 	CU_ASSERT(!spdk_blob_is_read_only(blob));
5810 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5811 	CU_ASSERT(spdk_blob_is_clone(blob));
5812 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5813 	CU_ASSERT(blob->parent_id == snapshotid);
5814 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5815 
5816 	count = SPDK_COUNTOF(ids);
5817 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5818 	CU_ASSERT(rc == 0);
5819 	CU_ASSERT(count == 1);
5820 	CU_ASSERT(ids[0] == blobid);
5821 
5822 
5823 	/* 3. Create clone from snapshot */
5824 
5825 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
5826 	poll_threads();
5827 	CU_ASSERT(g_bserrno == 0);
5828 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5829 	cloneid = g_blobid;
5830 
5831 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
5832 	poll_threads();
5833 	CU_ASSERT(g_bserrno == 0);
5834 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5835 	clone = g_blob;
5836 
5837 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5838 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5839 	CU_ASSERT(spdk_blob_is_clone(clone));
5840 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5841 	CU_ASSERT(clone->parent_id == snapshotid);
5842 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid);
5843 
5844 	count = SPDK_COUNTOF(ids);
5845 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
5846 	CU_ASSERT(rc == 0);
5847 	CU_ASSERT(count == 0);
5848 
5849 	/* Check if clone is on the snapshot's list */
5850 	count = SPDK_COUNTOF(ids);
5851 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5852 	CU_ASSERT(rc == 0);
5853 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5854 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
5855 
5856 
5857 	/* 4. Create snapshot of the clone */
5858 
5859 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
5860 	poll_threads();
5861 	CU_ASSERT(g_bserrno == 0);
5862 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5863 	snapshotid2 = g_blobid;
5864 
5865 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
5866 	poll_threads();
5867 	CU_ASSERT(g_bserrno == 0);
5868 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5869 	snapshot2 = g_blob;
5870 
5871 	CU_ASSERT(spdk_blob_is_read_only(snapshot2));
5872 	CU_ASSERT(spdk_blob_is_snapshot(snapshot2));
5873 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
5874 	CU_ASSERT(snapshot2->parent_id == snapshotid);
5875 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
5876 
5877 	/* Check if clone is converted to the clone of snapshot2 and snapshot2
5878 	 * is a child of snapshot */
5879 	CU_ASSERT(!spdk_blob_is_read_only(clone));
5880 	CU_ASSERT(!spdk_blob_is_snapshot(clone));
5881 	CU_ASSERT(spdk_blob_is_clone(clone));
5882 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone));
5883 	CU_ASSERT(clone->parent_id == snapshotid2);
5884 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5885 
5886 	count = SPDK_COUNTOF(ids);
5887 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
5888 	CU_ASSERT(rc == 0);
5889 	CU_ASSERT(count == 1);
5890 	CU_ASSERT(ids[0] == cloneid);
5891 
5892 
5893 	/* 5. Try to create clone from read only blob */
5894 
5895 	/* Mark blob as read only */
5896 	spdk_blob_set_read_only(blob);
5897 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
5898 	poll_threads();
5899 	CU_ASSERT(g_bserrno == 0);
5900 
5901 	/* Check if previously created blob is read only clone */
5902 	CU_ASSERT(spdk_blob_is_read_only(blob));
5903 	CU_ASSERT(!spdk_blob_is_snapshot(blob));
5904 	CU_ASSERT(spdk_blob_is_clone(blob));
5905 	CU_ASSERT(spdk_blob_is_thin_provisioned(blob));
5906 
5907 	/* Create clone from read only blob */
5908 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
5909 	poll_threads();
5910 	CU_ASSERT(g_bserrno == 0);
5911 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
5912 	cloneid2 = g_blobid;
5913 
5914 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
5915 	poll_threads();
5916 	CU_ASSERT(g_bserrno == 0);
5917 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
5918 	clone2 = g_blob;
5919 
5920 	CU_ASSERT(!spdk_blob_is_read_only(clone2));
5921 	CU_ASSERT(!spdk_blob_is_snapshot(clone2));
5922 	CU_ASSERT(spdk_blob_is_clone(clone2));
5923 	CU_ASSERT(spdk_blob_is_thin_provisioned(clone2));
5924 
5925 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
5926 
5927 	count = SPDK_COUNTOF(ids);
5928 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5929 	CU_ASSERT(rc == 0);
5930 
5931 	CU_ASSERT(count == 1);
5932 	CU_ASSERT(ids[0] == cloneid2);
5933 
5934 	/* Close blobs */
5935 
5936 	spdk_blob_close(clone2, blob_op_complete, NULL);
5937 	poll_threads();
5938 	CU_ASSERT(g_bserrno == 0);
5939 
5940 	spdk_blob_close(blob, blob_op_complete, NULL);
5941 	poll_threads();
5942 	CU_ASSERT(g_bserrno == 0);
5943 
5944 	spdk_blob_close(clone, blob_op_complete, NULL);
5945 	poll_threads();
5946 	CU_ASSERT(g_bserrno == 0);
5947 
5948 	spdk_blob_close(snapshot, blob_op_complete, NULL);
5949 	poll_threads();
5950 	CU_ASSERT(g_bserrno == 0);
5951 
5952 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
5953 	poll_threads();
5954 	CU_ASSERT(g_bserrno == 0);
5955 
5956 	/* Try to delete snapshot with more than 1 clone */
5957 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
5958 	poll_threads();
5959 	CU_ASSERT(g_bserrno != 0);
5960 
5961 	ut_bs_reload(&bs, &bs_opts);
5962 
5963 	/* NULL ids array should return number of clones in count */
5964 	count = SPDK_COUNTOF(ids);
5965 	rc = spdk_blob_get_clones(bs, snapshotid, NULL, &count);
5966 	CU_ASSERT(rc == -ENOMEM);
5967 	CU_ASSERT(count == 2);
5968 
5969 	/* incorrect array size */
5970 	count = 1;
5971 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5972 	CU_ASSERT(rc == -ENOMEM);
5973 	CU_ASSERT(count == 2);
5974 
5975 
5976 	/* Verify structure of loaded blob store */
5977 
5978 	/* snapshot */
5979 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid) == SPDK_BLOBID_INVALID);
5980 
5981 	count = SPDK_COUNTOF(ids);
5982 	rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
5983 	CU_ASSERT(rc == 0);
5984 	CU_ASSERT(count == 2);
5985 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
5986 	CU_ASSERT(ids[0] == snapshotid2 || ids[1] == snapshotid2);
5987 
5988 	/* blob */
5989 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
5990 	count = SPDK_COUNTOF(ids);
5991 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
5992 	CU_ASSERT(rc == 0);
5993 	CU_ASSERT(count == 1);
5994 	CU_ASSERT(ids[0] == cloneid2);
5995 
5996 	/* clone */
5997 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
5998 	count = SPDK_COUNTOF(ids);
5999 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6000 	CU_ASSERT(rc == 0);
6001 	CU_ASSERT(count == 0);
6002 
6003 	/* snapshot2 */
6004 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid);
6005 	count = SPDK_COUNTOF(ids);
6006 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6007 	CU_ASSERT(rc == 0);
6008 	CU_ASSERT(count == 1);
6009 	CU_ASSERT(ids[0] == cloneid);
6010 
6011 	/* clone2 */
6012 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6013 	count = SPDK_COUNTOF(ids);
6014 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6015 	CU_ASSERT(rc == 0);
6016 	CU_ASSERT(count == 0);
6017 
6018 	/* Try to delete blob that user should not be able to remove */
6019 
6020 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6021 	poll_threads();
6022 	CU_ASSERT(g_bserrno != 0);
6023 
6024 	/* Remove all blobs */
6025 
6026 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6027 	poll_threads();
6028 	CU_ASSERT(g_bserrno == 0);
6029 
6030 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6031 	poll_threads();
6032 	CU_ASSERT(g_bserrno == 0);
6033 
6034 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6035 	poll_threads();
6036 	CU_ASSERT(g_bserrno == 0);
6037 
6038 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6039 	poll_threads();
6040 	CU_ASSERT(g_bserrno == 0);
6041 
6042 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6043 	poll_threads();
6044 	CU_ASSERT(g_bserrno == 0);
6045 
6046 	spdk_bs_unload(bs, bs_op_complete, NULL);
6047 	poll_threads();
6048 	CU_ASSERT(g_bserrno == 0);
6049 
6050 	g_bs = NULL;
6051 }
6052 
6053 /**
6054  * Snapshot-clones relation test 2
6055  *
6056  *         snapshot1
6057  *            |
6058  *         snapshot2
6059  *            |
6060  *      +-----+-----+
6061  *      |           |
6062  *   blob(ro)   snapshot3
6063  *      |           |
6064  *      |       snapshot4
6065  *      |        |     |
6066  *   clone2   clone  clone3
6067  */
6068 static void
6069 blob_relations2(void)
6070 {
6071 	struct spdk_blob_store *bs;
6072 	struct spdk_bs_dev *dev;
6073 	struct spdk_bs_opts bs_opts;
6074 	struct spdk_blob_opts opts;
6075 	struct spdk_blob *blob, *snapshot1, *snapshot2, *snapshot3, *snapshot4, *clone, *clone2;
6076 	spdk_blob_id blobid, snapshotid1, snapshotid2, snapshotid3, snapshotid4, cloneid, cloneid2,
6077 		     cloneid3;
6078 	int rc;
6079 	size_t count;
6080 	spdk_blob_id ids[10] = {};
6081 
6082 	dev = init_dev();
6083 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6084 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6085 
6086 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6087 	poll_threads();
6088 	CU_ASSERT(g_bserrno == 0);
6089 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6090 	bs = g_bs;
6091 
6092 	/* 1. Create blob with 10 clusters */
6093 
6094 	ut_spdk_blob_opts_init(&opts);
6095 	opts.num_clusters = 10;
6096 
6097 	blob = ut_blob_create_and_open(bs, &opts);
6098 	blobid = spdk_blob_get_id(blob);
6099 
6100 	/* 2. Create snapshot1 */
6101 
6102 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6103 	poll_threads();
6104 	CU_ASSERT(g_bserrno == 0);
6105 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6106 	snapshotid1 = g_blobid;
6107 
6108 	spdk_bs_open_blob(bs, snapshotid1, blob_op_with_handle_complete, NULL);
6109 	poll_threads();
6110 	CU_ASSERT(g_bserrno == 0);
6111 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6112 	snapshot1 = g_blob;
6113 
6114 	CU_ASSERT(snapshot1->parent_id == SPDK_BLOBID_INVALID);
6115 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid1) == SPDK_BLOBID_INVALID);
6116 
6117 	CU_ASSERT(blob->parent_id == snapshotid1);
6118 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6119 
6120 	/* Check if blob is the clone of snapshot1 */
6121 	CU_ASSERT(blob->parent_id == snapshotid1);
6122 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid1);
6123 
6124 	count = SPDK_COUNTOF(ids);
6125 	rc = spdk_blob_get_clones(bs, snapshotid1, ids, &count);
6126 	CU_ASSERT(rc == 0);
6127 	CU_ASSERT(count == 1);
6128 	CU_ASSERT(ids[0] == blobid);
6129 
6130 	/* 3. Create another snapshot */
6131 
6132 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6133 	poll_threads();
6134 	CU_ASSERT(g_bserrno == 0);
6135 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6136 	snapshotid2 = g_blobid;
6137 
6138 	spdk_bs_open_blob(bs, snapshotid2, blob_op_with_handle_complete, NULL);
6139 	poll_threads();
6140 	CU_ASSERT(g_bserrno == 0);
6141 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6142 	snapshot2 = g_blob;
6143 
6144 	CU_ASSERT(spdk_blob_is_clone(snapshot2));
6145 	CU_ASSERT(snapshot2->parent_id == snapshotid1);
6146 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == snapshotid1);
6147 
6148 	/* Check if snapshot2 is the clone of snapshot1 and blob
6149 	 * is a child of snapshot2 */
6150 	CU_ASSERT(blob->parent_id == snapshotid2);
6151 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6152 
6153 	count = SPDK_COUNTOF(ids);
6154 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6155 	CU_ASSERT(rc == 0);
6156 	CU_ASSERT(count == 1);
6157 	CU_ASSERT(ids[0] == blobid);
6158 
6159 	/* 4. Create clone from snapshot */
6160 
6161 	spdk_bs_create_clone(bs, snapshotid2, NULL, blob_op_with_id_complete, NULL);
6162 	poll_threads();
6163 	CU_ASSERT(g_bserrno == 0);
6164 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6165 	cloneid = g_blobid;
6166 
6167 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
6168 	poll_threads();
6169 	CU_ASSERT(g_bserrno == 0);
6170 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6171 	clone = g_blob;
6172 
6173 	CU_ASSERT(clone->parent_id == snapshotid2);
6174 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid2);
6175 
6176 	/* Check if clone is on the snapshot's list */
6177 	count = SPDK_COUNTOF(ids);
6178 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6179 	CU_ASSERT(rc == 0);
6180 	CU_ASSERT(count == 2);
6181 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6182 	CU_ASSERT(ids[0] == cloneid || ids[1] == cloneid);
6183 
6184 	/* 5. Create snapshot of the clone */
6185 
6186 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6187 	poll_threads();
6188 	CU_ASSERT(g_bserrno == 0);
6189 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6190 	snapshotid3 = g_blobid;
6191 
6192 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6193 	poll_threads();
6194 	CU_ASSERT(g_bserrno == 0);
6195 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6196 	snapshot3 = g_blob;
6197 
6198 	CU_ASSERT(snapshot3->parent_id == snapshotid2);
6199 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6200 
6201 	/* Check if clone is converted to the clone of snapshot3 and snapshot3
6202 	 * is a child of snapshot2 */
6203 	CU_ASSERT(clone->parent_id == snapshotid3);
6204 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6205 
6206 	count = SPDK_COUNTOF(ids);
6207 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6208 	CU_ASSERT(rc == 0);
6209 	CU_ASSERT(count == 1);
6210 	CU_ASSERT(ids[0] == cloneid);
6211 
6212 	/* 6. Create another snapshot of the clone */
6213 
6214 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
6215 	poll_threads();
6216 	CU_ASSERT(g_bserrno == 0);
6217 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6218 	snapshotid4 = g_blobid;
6219 
6220 	spdk_bs_open_blob(bs, snapshotid4, blob_op_with_handle_complete, NULL);
6221 	poll_threads();
6222 	CU_ASSERT(g_bserrno == 0);
6223 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6224 	snapshot4 = g_blob;
6225 
6226 	CU_ASSERT(snapshot4->parent_id == snapshotid3);
6227 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid4) == snapshotid3);
6228 
6229 	/* Check if clone is converted to the clone of snapshot4 and snapshot4
6230 	 * is a child of snapshot3 */
6231 	CU_ASSERT(clone->parent_id == snapshotid4);
6232 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid4);
6233 
6234 	count = SPDK_COUNTOF(ids);
6235 	rc = spdk_blob_get_clones(bs, snapshotid4, ids, &count);
6236 	CU_ASSERT(rc == 0);
6237 	CU_ASSERT(count == 1);
6238 	CU_ASSERT(ids[0] == cloneid);
6239 
6240 	/* 7. Remove snapshot 4 */
6241 
6242 	ut_blob_close_and_delete(bs, snapshot4);
6243 
6244 	/* Check if relations are back to state from before creating snapshot 4 */
6245 	CU_ASSERT(clone->parent_id == snapshotid3);
6246 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6247 
6248 	count = SPDK_COUNTOF(ids);
6249 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6250 	CU_ASSERT(rc == 0);
6251 	CU_ASSERT(count == 1);
6252 	CU_ASSERT(ids[0] == cloneid);
6253 
6254 	/* 8. Create second clone of snapshot 3 and try to remove snapshot 3 */
6255 
6256 	spdk_bs_create_clone(bs, snapshotid3, NULL, blob_op_with_id_complete, NULL);
6257 	poll_threads();
6258 	CU_ASSERT(g_bserrno == 0);
6259 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6260 	cloneid3 = g_blobid;
6261 
6262 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6263 	poll_threads();
6264 	CU_ASSERT(g_bserrno != 0);
6265 
6266 	/* 9. Open snapshot 3 again and try to remove it while clone 3 is closed */
6267 
6268 	spdk_bs_open_blob(bs, snapshotid3, blob_op_with_handle_complete, NULL);
6269 	poll_threads();
6270 	CU_ASSERT(g_bserrno == 0);
6271 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6272 	snapshot3 = g_blob;
6273 
6274 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6275 	poll_threads();
6276 	CU_ASSERT(g_bserrno != 0);
6277 
6278 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6279 	poll_threads();
6280 	CU_ASSERT(g_bserrno == 0);
6281 
6282 	spdk_bs_delete_blob(bs, cloneid3, blob_op_complete, NULL);
6283 	poll_threads();
6284 	CU_ASSERT(g_bserrno == 0);
6285 
6286 	/* 10. Remove snapshot 1 */
6287 
6288 	/* Check snapshot 1 and snapshot 2 allocated clusters */
6289 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot1) == 10);
6290 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 0);
6291 
6292 	ut_blob_close_and_delete(bs, snapshot1);
6293 
6294 	/* Check if relations are back to state from before creating snapshot 4 (before step 6) */
6295 	CU_ASSERT(snapshot2->parent_id == SPDK_BLOBID_INVALID);
6296 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6297 
6298 	/* Check that snapshot 2 has the clusters that were allocated to snapshot 1 */
6299 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot2) == 10);
6300 
6301 	count = SPDK_COUNTOF(ids);
6302 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6303 	CU_ASSERT(rc == 0);
6304 	CU_ASSERT(count == 2);
6305 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6306 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6307 
6308 	/* 11. Try to create clone from read only blob */
6309 
6310 	/* Mark blob as read only */
6311 	spdk_blob_set_read_only(blob);
6312 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
6313 	poll_threads();
6314 	CU_ASSERT(g_bserrno == 0);
6315 
6316 	/* Create clone from read only blob */
6317 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6318 	poll_threads();
6319 	CU_ASSERT(g_bserrno == 0);
6320 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6321 	cloneid2 = g_blobid;
6322 
6323 	spdk_bs_open_blob(bs, cloneid2, blob_op_with_handle_complete, NULL);
6324 	poll_threads();
6325 	CU_ASSERT(g_bserrno == 0);
6326 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6327 	clone2 = g_blob;
6328 
6329 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6330 
6331 	count = SPDK_COUNTOF(ids);
6332 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6333 	CU_ASSERT(rc == 0);
6334 	CU_ASSERT(count == 1);
6335 	CU_ASSERT(ids[0] == cloneid2);
6336 
6337 	/* Close blobs */
6338 
6339 	spdk_blob_close(clone2, blob_op_complete, NULL);
6340 	poll_threads();
6341 	CU_ASSERT(g_bserrno == 0);
6342 
6343 	spdk_blob_close(blob, blob_op_complete, NULL);
6344 	poll_threads();
6345 	CU_ASSERT(g_bserrno == 0);
6346 
6347 	spdk_blob_close(clone, blob_op_complete, NULL);
6348 	poll_threads();
6349 	CU_ASSERT(g_bserrno == 0);
6350 
6351 	spdk_blob_close(snapshot2, blob_op_complete, NULL);
6352 	poll_threads();
6353 	CU_ASSERT(g_bserrno == 0);
6354 
6355 	spdk_blob_close(snapshot3, blob_op_complete, NULL);
6356 	poll_threads();
6357 	CU_ASSERT(g_bserrno == 0);
6358 
6359 	ut_bs_reload(&bs, &bs_opts);
6360 
6361 	/* Verify structure of loaded blob store */
6362 
6363 	/* snapshot2 */
6364 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid2) == SPDK_BLOBID_INVALID);
6365 
6366 	count = SPDK_COUNTOF(ids);
6367 	rc = spdk_blob_get_clones(bs, snapshotid2, ids, &count);
6368 	CU_ASSERT(rc == 0);
6369 	CU_ASSERT(count == 2);
6370 	CU_ASSERT(ids[0] == blobid || ids[1] == blobid);
6371 	CU_ASSERT(ids[0] == snapshotid3 || ids[1] == snapshotid3);
6372 
6373 	/* blob */
6374 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid2);
6375 	count = SPDK_COUNTOF(ids);
6376 	rc = spdk_blob_get_clones(bs, blobid, ids, &count);
6377 	CU_ASSERT(rc == 0);
6378 	CU_ASSERT(count == 1);
6379 	CU_ASSERT(ids[0] == cloneid2);
6380 
6381 	/* clone */
6382 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid) == snapshotid3);
6383 	count = SPDK_COUNTOF(ids);
6384 	rc = spdk_blob_get_clones(bs, cloneid, ids, &count);
6385 	CU_ASSERT(rc == 0);
6386 	CU_ASSERT(count == 0);
6387 
6388 	/* snapshot3 */
6389 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, snapshotid3) == snapshotid2);
6390 	count = SPDK_COUNTOF(ids);
6391 	rc = spdk_blob_get_clones(bs, snapshotid3, ids, &count);
6392 	CU_ASSERT(rc == 0);
6393 	CU_ASSERT(count == 1);
6394 	CU_ASSERT(ids[0] == cloneid);
6395 
6396 	/* clone2 */
6397 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, cloneid2) == blobid);
6398 	count = SPDK_COUNTOF(ids);
6399 	rc = spdk_blob_get_clones(bs, cloneid2, ids, &count);
6400 	CU_ASSERT(rc == 0);
6401 	CU_ASSERT(count == 0);
6402 
6403 	/* Try to delete all blobs in the worse possible order */
6404 
6405 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6406 	poll_threads();
6407 	CU_ASSERT(g_bserrno != 0);
6408 
6409 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
6410 	poll_threads();
6411 	CU_ASSERT(g_bserrno == 0);
6412 
6413 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6414 	poll_threads();
6415 	CU_ASSERT(g_bserrno != 0);
6416 
6417 	spdk_bs_delete_blob(bs, cloneid, blob_op_complete, NULL);
6418 	poll_threads();
6419 	CU_ASSERT(g_bserrno == 0);
6420 
6421 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6422 	poll_threads();
6423 	CU_ASSERT(g_bserrno == 0);
6424 
6425 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6426 	poll_threads();
6427 	CU_ASSERT(g_bserrno == 0);
6428 
6429 	spdk_bs_delete_blob(bs, cloneid2, blob_op_complete, NULL);
6430 	poll_threads();
6431 	CU_ASSERT(g_bserrno == 0);
6432 
6433 	spdk_bs_unload(bs, bs_op_complete, NULL);
6434 	poll_threads();
6435 	CU_ASSERT(g_bserrno == 0);
6436 
6437 	g_bs = NULL;
6438 }
6439 
6440 /**
6441  * Snapshot-clones relation test 3
6442  *
6443  *         snapshot0
6444  *            |
6445  *         snapshot1
6446  *            |
6447  *         snapshot2
6448  *            |
6449  *           blob
6450  */
6451 static void
6452 blob_relations3(void)
6453 {
6454 	struct spdk_blob_store *bs;
6455 	struct spdk_bs_dev *dev;
6456 	struct spdk_io_channel *channel;
6457 	struct spdk_bs_opts bs_opts;
6458 	struct spdk_blob_opts opts;
6459 	struct spdk_blob *blob;
6460 	spdk_blob_id blobid, snapshotid0, snapshotid1, snapshotid2;
6461 
6462 	dev = init_dev();
6463 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
6464 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
6465 
6466 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
6467 	poll_threads();
6468 	CU_ASSERT(g_bserrno == 0);
6469 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6470 	bs = g_bs;
6471 
6472 	channel = spdk_bs_alloc_io_channel(bs);
6473 	SPDK_CU_ASSERT_FATAL(channel != NULL);
6474 
6475 	/* 1. Create blob with 10 clusters */
6476 	ut_spdk_blob_opts_init(&opts);
6477 	opts.num_clusters = 10;
6478 
6479 	blob = ut_blob_create_and_open(bs, &opts);
6480 	blobid = spdk_blob_get_id(blob);
6481 
6482 	/* 2. Create snapshot0 */
6483 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6484 	poll_threads();
6485 	CU_ASSERT(g_bserrno == 0);
6486 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6487 	snapshotid0 = g_blobid;
6488 
6489 	/* 3. Create snapshot1 */
6490 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6491 	poll_threads();
6492 	CU_ASSERT(g_bserrno == 0);
6493 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6494 	snapshotid1 = g_blobid;
6495 
6496 	/* 4. Create snapshot2 */
6497 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6498 	poll_threads();
6499 	CU_ASSERT(g_bserrno == 0);
6500 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6501 	snapshotid2 = g_blobid;
6502 
6503 	/* 5. Decouple blob */
6504 	spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
6505 	poll_threads();
6506 	CU_ASSERT(g_bserrno == 0);
6507 
6508 	/* 6. Decouple snapshot2. Make sure updating md of snapshot2 is possible */
6509 	spdk_bs_blob_decouple_parent(bs, channel, snapshotid2, blob_op_complete, NULL);
6510 	poll_threads();
6511 	CU_ASSERT(g_bserrno == 0);
6512 
6513 	/* 7. Delete blob */
6514 	spdk_blob_close(blob, blob_op_complete, NULL);
6515 	poll_threads();
6516 	CU_ASSERT(g_bserrno == 0);
6517 
6518 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
6519 	poll_threads();
6520 	CU_ASSERT(g_bserrno == 0);
6521 
6522 	/* 8. Delete snapshot2.
6523 	 * If md of snapshot 2 was updated, it should be possible to delete it */
6524 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
6525 	poll_threads();
6526 	CU_ASSERT(g_bserrno == 0);
6527 
6528 	/* Remove remaining blobs and unload bs */
6529 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
6530 	poll_threads();
6531 	CU_ASSERT(g_bserrno == 0);
6532 
6533 	spdk_bs_delete_blob(bs, snapshotid0, blob_op_complete, NULL);
6534 	poll_threads();
6535 	CU_ASSERT(g_bserrno == 0);
6536 
6537 	spdk_bs_free_io_channel(channel);
6538 	poll_threads();
6539 
6540 	spdk_bs_unload(bs, bs_op_complete, NULL);
6541 	poll_threads();
6542 	CU_ASSERT(g_bserrno == 0);
6543 
6544 	g_bs = NULL;
6545 }
6546 
6547 static void
6548 blobstore_clean_power_failure(void)
6549 {
6550 	struct spdk_blob_store *bs;
6551 	struct spdk_blob *blob;
6552 	struct spdk_power_failure_thresholds thresholds = {};
6553 	bool clean = false;
6554 	struct spdk_bs_super_block *super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
6555 	struct spdk_bs_super_block super_copy = {};
6556 
6557 	thresholds.general_threshold = 1;
6558 	while (!clean) {
6559 		/* Create bs and blob */
6560 		suite_blob_setup();
6561 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6562 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6563 		bs = g_bs;
6564 		blob = g_blob;
6565 
6566 		/* Super block should not change for rest of the UT,
6567 		 * save it and compare later. */
6568 		memcpy(&super_copy, super, sizeof(struct spdk_bs_super_block));
6569 		SPDK_CU_ASSERT_FATAL(super->clean == 0);
6570 		SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6571 
6572 		/* Force bs/super block in a clean state.
6573 		 * Along with marking blob dirty, to cause blob persist. */
6574 		blob->state = SPDK_BLOB_STATE_DIRTY;
6575 		bs->clean = 1;
6576 		super->clean = 1;
6577 		super->crc = blob_md_page_calc_crc(super);
6578 
6579 		g_bserrno = -1;
6580 		dev_set_power_failure_thresholds(thresholds);
6581 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
6582 		poll_threads();
6583 		dev_reset_power_failure_event();
6584 
6585 		if (g_bserrno == 0) {
6586 			/* After successful md sync, both bs and super block
6587 			 * should be marked as not clean. */
6588 			SPDK_CU_ASSERT_FATAL(bs->clean == 0);
6589 			SPDK_CU_ASSERT_FATAL(super->clean == 0);
6590 			clean = true;
6591 		}
6592 
6593 		/* Depending on the point of failure, super block was either updated or not. */
6594 		super_copy.clean = super->clean;
6595 		super_copy.crc = blob_md_page_calc_crc(&super_copy);
6596 		/* Compare that the values in super block remained unchanged. */
6597 		SPDK_CU_ASSERT_FATAL(!memcmp(&super_copy, super, sizeof(struct spdk_bs_super_block)));
6598 
6599 		/* Delete blob and unload bs */
6600 		suite_blob_cleanup();
6601 
6602 		thresholds.general_threshold++;
6603 	}
6604 }
6605 
6606 static void
6607 blob_delete_snapshot_power_failure(void)
6608 {
6609 	struct spdk_bs_dev *dev;
6610 	struct spdk_blob_store *bs;
6611 	struct spdk_blob_opts opts;
6612 	struct spdk_blob *blob, *snapshot;
6613 	struct spdk_power_failure_thresholds thresholds = {};
6614 	spdk_blob_id blobid, snapshotid;
6615 	const void *value;
6616 	size_t value_len;
6617 	size_t count;
6618 	spdk_blob_id ids[3] = {};
6619 	int rc;
6620 	bool deleted = false;
6621 	int delete_snapshot_bserrno = -1;
6622 	uint32_t first_data_cluster;
6623 
6624 	thresholds.general_threshold = 1;
6625 	while (!deleted) {
6626 		dev = init_dev();
6627 
6628 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6629 		poll_threads();
6630 		CU_ASSERT(g_bserrno == 0);
6631 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6632 		bs = g_bs;
6633 
6634 		first_data_cluster = FIRST_DATA_CLUSTER(bs);
6635 
6636 		/* Create blob */
6637 		ut_spdk_blob_opts_init(&opts);
6638 		opts.num_clusters = 10;
6639 
6640 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6641 		poll_threads();
6642 		CU_ASSERT(g_bserrno == 0);
6643 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6644 		blobid = g_blobid;
6645 
6646 		/* Create snapshot */
6647 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6648 		poll_threads();
6649 		CU_ASSERT(g_bserrno == 0);
6650 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6651 		snapshotid = g_blobid;
6652 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6653 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6654 
6655 		dev_set_power_failure_thresholds(thresholds);
6656 
6657 		spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
6658 		poll_threads();
6659 		delete_snapshot_bserrno = g_bserrno;
6660 
6661 		/* Do not shut down cleanly. Assumption is that after snapshot deletion
6662 		 * reports success, changes to both blobs should already persisted. */
6663 		dev_reset_power_failure_event();
6664 		ut_bs_dirty_load(&bs, NULL);
6665 
6666 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6667 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6668 
6669 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6670 		poll_threads();
6671 		CU_ASSERT(g_bserrno == 0);
6672 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6673 		blob = g_blob;
6674 		SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6675 
6676 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6677 		poll_threads();
6678 
6679 		if (g_bserrno == 0) {
6680 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6681 			snapshot = g_blob;
6682 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6683 			count = SPDK_COUNTOF(ids);
6684 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6685 			CU_ASSERT(rc == 0);
6686 			CU_ASSERT(count == 1);
6687 			CU_ASSERT(ids[0] == blobid);
6688 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_PENDING_REMOVAL, &value, &value_len);
6689 			CU_ASSERT(rc != 0);
6690 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6691 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6692 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6693 
6694 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6695 			poll_threads();
6696 			CU_ASSERT(g_bserrno == 0);
6697 		} else {
6698 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6699 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6700 			/* Snapshot might have been left in unrecoverable state, so it does not open.
6701 			 * Yet delete might perform further changes to the clone after that.
6702 			 * This UT should test until snapshot is deleted and delete call succeeds. */
6703 			if (delete_snapshot_bserrno == 0) {
6704 				deleted = true;
6705 			}
6706 		}
6707 
6708 		spdk_blob_close(blob, blob_op_complete, NULL);
6709 		poll_threads();
6710 		CU_ASSERT(g_bserrno == 0);
6711 
6712 		spdk_bs_unload(bs, bs_op_complete, NULL);
6713 		poll_threads();
6714 		CU_ASSERT(g_bserrno == 0);
6715 
6716 		thresholds.general_threshold++;
6717 	}
6718 }
6719 
6720 static void
6721 blob_create_snapshot_power_failure(void)
6722 {
6723 	struct spdk_blob_store *bs = g_bs;
6724 	struct spdk_bs_dev *dev;
6725 	struct spdk_blob_opts opts;
6726 	struct spdk_blob *blob, *snapshot;
6727 	struct spdk_power_failure_thresholds thresholds = {};
6728 	spdk_blob_id blobid, snapshotid;
6729 	const void *value;
6730 	size_t value_len;
6731 	size_t count;
6732 	spdk_blob_id ids[3] = {};
6733 	int rc;
6734 	bool created = false;
6735 	int create_snapshot_bserrno = -1;
6736 	uint32_t first_data_cluster;
6737 
6738 	thresholds.general_threshold = 1;
6739 	while (!created) {
6740 		dev = init_dev();
6741 
6742 		spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
6743 		poll_threads();
6744 		CU_ASSERT(g_bserrno == 0);
6745 		SPDK_CU_ASSERT_FATAL(g_bs != NULL);
6746 		bs = g_bs;
6747 
6748 		first_data_cluster = FIRST_DATA_CLUSTER(bs);
6749 
6750 		/* Create blob */
6751 		ut_spdk_blob_opts_init(&opts);
6752 		opts.num_clusters = 10;
6753 
6754 		spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
6755 		poll_threads();
6756 		CU_ASSERT(g_bserrno == 0);
6757 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
6758 		blobid = g_blobid;
6759 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6760 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6761 
6762 		dev_set_power_failure_thresholds(thresholds);
6763 
6764 		/* Create snapshot */
6765 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
6766 		poll_threads();
6767 		create_snapshot_bserrno = g_bserrno;
6768 		snapshotid = g_blobid;
6769 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6770 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6771 
6772 		/* Do not shut down cleanly. Assumption is that after create snapshot
6773 		 * reports success, both blobs should be power-fail safe. */
6774 		dev_reset_power_failure_event();
6775 		ut_bs_dirty_load(&bs, NULL);
6776 
6777 		SPDK_CU_ASSERT_FATAL(spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster));
6778 		SPDK_CU_ASSERT_FATAL(!spdk_bit_pool_is_allocated(bs->used_clusters, first_data_cluster + 10));
6779 
6780 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
6781 		poll_threads();
6782 		CU_ASSERT(g_bserrno == 0);
6783 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6784 		blob = g_blob;
6785 
6786 		if (snapshotid != SPDK_BLOBID_INVALID) {
6787 			spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
6788 			poll_threads();
6789 		}
6790 
6791 		if ((snapshotid != SPDK_BLOBID_INVALID) && (g_bserrno == 0)) {
6792 			SPDK_CU_ASSERT_FATAL(g_blob != NULL);
6793 			snapshot = g_blob;
6794 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == true);
6795 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(snapshot) == false);
6796 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
6797 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(snapshot) == 10);
6798 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == snapshotid);
6799 			count = SPDK_COUNTOF(ids);
6800 			rc = spdk_blob_get_clones(bs, snapshotid, ids, &count);
6801 			CU_ASSERT(rc == 0);
6802 			CU_ASSERT(count == 1);
6803 			CU_ASSERT(ids[0] == blobid);
6804 			rc = spdk_blob_get_xattr_value(snapshot, SNAPSHOT_IN_PROGRESS, &value, &value_len);
6805 			CU_ASSERT(rc != 0);
6806 
6807 			spdk_blob_close(snapshot, blob_op_complete, NULL);
6808 			poll_threads();
6809 			CU_ASSERT(g_bserrno == 0);
6810 			if (create_snapshot_bserrno == 0) {
6811 				created = true;
6812 			}
6813 		} else {
6814 			CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid) == SPDK_BLOBID_INVALID);
6815 			SPDK_CU_ASSERT_FATAL(spdk_blob_is_thin_provisioned(blob) == false);
6816 			CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 10);
6817 		}
6818 
6819 		spdk_blob_close(blob, blob_op_complete, NULL);
6820 		poll_threads();
6821 		CU_ASSERT(g_bserrno == 0);
6822 
6823 		spdk_bs_unload(bs, bs_op_complete, NULL);
6824 		poll_threads();
6825 		CU_ASSERT(g_bserrno == 0);
6826 
6827 		thresholds.general_threshold++;
6828 	}
6829 }
6830 
6831 #define IO_UT_BLOCKS_PER_CLUSTER 32
6832 
6833 static void
6834 test_io_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6835 {
6836 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
6837 	uint8_t payload_ff[SZ * 512];
6838 	uint8_t payload_aa[SZ * 512];
6839 	uint8_t payload_00[SZ * 512];
6840 	uint8_t *cluster0, *cluster1;
6841 
6842 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6843 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6844 	memset(payload_00, 0x00, sizeof(payload_00));
6845 
6846 	/* Try to perform I/O with io unit = 512 */
6847 	spdk_blob_io_write(blob, channel, payload_ff, 0, 1, blob_op_complete, NULL);
6848 	poll_threads();
6849 	CU_ASSERT(g_bserrno == 0);
6850 
6851 	/* If thin provisioned is set cluster should be allocated now */
6852 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
6853 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
6854 
6855 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
6856 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
6857 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6858 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6859 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
6860 
6861 	/* Verify write with offset on first page */
6862 	spdk_blob_io_write(blob, channel, payload_ff, 2, 1, blob_op_complete, NULL);
6863 	poll_threads();
6864 	CU_ASSERT(g_bserrno == 0);
6865 
6866 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6867 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6868 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6869 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6870 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6871 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
6872 
6873 	/* Verify write with offset on first page */
6874 	spdk_blob_io_write(blob, channel, payload_ff, 4, 4, blob_op_complete, NULL);
6875 	poll_threads();
6876 
6877 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
6878 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6879 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6880 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6881 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6882 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
6883 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
6884 
6885 	/* Verify write with offset on second page */
6886 	spdk_blob_io_write(blob, channel, payload_ff, 8, 4, blob_op_complete, NULL);
6887 	poll_threads();
6888 
6889 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
6890 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6891 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6892 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6893 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6894 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
6895 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
6896 
6897 	/* Verify write across multiple pages */
6898 	spdk_blob_io_write(blob, channel, payload_aa, 4, 8, blob_op_complete, NULL);
6899 	poll_threads();
6900 
6901 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
6902 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6903 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6904 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6905 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6906 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6907 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
6908 
6909 	/* Verify write across multiple clusters */
6910 	spdk_blob_io_write(blob, channel, payload_ff, SZ - 4, 8, blob_op_complete, NULL);
6911 	poll_threads();
6912 
6913 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6914 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6915 
6916 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6917 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
6918 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6919 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6920 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6921 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6922 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6923 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
6924 
6925 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6926 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
6927 
6928 	/* Verify write to second cluster */
6929 	spdk_blob_io_write(blob, channel, payload_ff, SZ + 12, 2, blob_op_complete, NULL);
6930 	poll_threads();
6931 
6932 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
6933 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
6934 
6935 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6936 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
6937 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
6938 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
6939 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
6940 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
6941 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
6942 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
6943 
6944 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
6945 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
6946 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
6947 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
6948 }
6949 
6950 static void
6951 test_io_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
6952 {
6953 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
6954 	uint8_t payload_read[2 * SZ * 512];
6955 	uint8_t payload_ff[SZ * 512];
6956 	uint8_t payload_aa[SZ * 512];
6957 	uint8_t payload_00[SZ * 512];
6958 
6959 	memset(payload_ff, 0xFF, sizeof(payload_ff));
6960 	memset(payload_aa, 0xAA, sizeof(payload_aa));
6961 	memset(payload_00, 0x00, sizeof(payload_00));
6962 
6963 	/* Read only first io unit */
6964 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6965 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6966 	 * payload_read: F000 0000 | 0000 0000 ... */
6967 	memset(payload_read, 0x00, sizeof(payload_read));
6968 	spdk_blob_io_read(blob, channel, payload_read, 0, 1, blob_op_complete, NULL);
6969 	poll_threads();
6970 	CU_ASSERT(g_bserrno == 0);
6971 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6972 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
6973 
6974 	/* Read four io_units starting from offset = 2
6975 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
6976 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6977 	 * payload_read: F0AA 0000 | 0000 0000 ... */
6978 
6979 	memset(payload_read, 0x00, sizeof(payload_read));
6980 	spdk_blob_io_read(blob, channel, payload_read, 2, 4, blob_op_complete, NULL);
6981 	poll_threads();
6982 	CU_ASSERT(g_bserrno == 0);
6983 
6984 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
6985 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
6986 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
6987 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
6988 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
6989 
6990 	/* Read eight io_units across multiple pages
6991 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
6992 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
6993 	 * payload_read: AAAA AAAA | 0000 0000 ... */
6994 	memset(payload_read, 0x00, sizeof(payload_read));
6995 	spdk_blob_io_read(blob, channel, payload_read, 4, 8, blob_op_complete, NULL);
6996 	poll_threads();
6997 	CU_ASSERT(g_bserrno == 0);
6998 
6999 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7000 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7001 
7002 	/* Read eight io_units across multiple clusters
7003 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7004 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7005 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7006 	memset(payload_read, 0x00, sizeof(payload_read));
7007 	spdk_blob_io_read(blob, channel, payload_read, SZ - 4, 8, blob_op_complete, NULL);
7008 	poll_threads();
7009 	CU_ASSERT(g_bserrno == 0);
7010 
7011 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7012 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7013 
7014 	/* Read four io_units from second cluster
7015 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7016 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7017 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7018 	memset(payload_read, 0x00, sizeof(payload_read));
7019 	spdk_blob_io_read(blob, channel, payload_read, SZ + 10, 4, blob_op_complete, NULL);
7020 	poll_threads();
7021 	CU_ASSERT(g_bserrno == 0);
7022 
7023 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7024 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7025 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7026 
7027 	/* Read second cluster
7028 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7029 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7030 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7031 	memset(payload_read, 0x00, sizeof(payload_read));
7032 	spdk_blob_io_read(blob, channel, payload_read, SZ, SZ, blob_op_complete, NULL);
7033 	poll_threads();
7034 	CU_ASSERT(g_bserrno == 0);
7035 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7036 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7037 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7038 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7039 
7040 	/* Read whole two clusters
7041 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7042 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7043 	memset(payload_read, 0x00, sizeof(payload_read));
7044 	spdk_blob_io_read(blob, channel, payload_read, 0, SZ * 2, blob_op_complete, NULL);
7045 	poll_threads();
7046 	CU_ASSERT(g_bserrno == 0);
7047 
7048 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7049 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7050 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7051 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7052 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7053 	CU_ASSERT(memcmp(payload_read + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7054 
7055 	CU_ASSERT(memcmp(payload_read + (SZ + 0) * 512, payload_ff, 4 * 512) == 0);
7056 	CU_ASSERT(memcmp(payload_read + (SZ + 4) * 512, payload_00, 8 * 512) == 0);
7057 	CU_ASSERT(memcmp(payload_read + (SZ + 12) * 512, payload_ff, 2 * 512) == 0);
7058 	CU_ASSERT(memcmp(payload_read + (SZ + 14) * 512, payload_00, (SZ - 14) * 512) == 0);
7059 }
7060 
7061 
7062 static void
7063 test_io_unmap(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7064 {
7065 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7066 	uint8_t payload_ff[SZ * 512];
7067 	uint8_t payload_aa[SZ * 512];
7068 	uint8_t payload_00[SZ * 512];
7069 	uint8_t *cluster0, *cluster1;
7070 
7071 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7072 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7073 	memset(payload_00, 0x00, sizeof(payload_00));
7074 
7075 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7076 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7077 
7078 	/* Unmap */
7079 	spdk_blob_io_unmap(blob, channel, 0, SZ * 2, blob_op_complete, NULL);
7080 	poll_threads();
7081 
7082 	CU_ASSERT(g_bserrno == 0);
7083 
7084 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, SZ * 512) == 0);
7085 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, SZ * 512) == 0);
7086 }
7087 
7088 static void
7089 test_io_zeroes(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel)
7090 {
7091 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7092 	uint8_t payload_ff[SZ * 512];
7093 	uint8_t payload_aa[SZ * 512];
7094 	uint8_t payload_00[SZ * 512];
7095 	uint8_t *cluster0, *cluster1;
7096 
7097 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7098 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7099 	memset(payload_00, 0x00, sizeof(payload_00));
7100 
7101 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7102 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7103 
7104 	/* Write zeroes  */
7105 	spdk_blob_io_write_zeroes(blob, channel, 0, SZ * 2, blob_op_complete, NULL);
7106 	poll_threads();
7107 
7108 	CU_ASSERT(g_bserrno == 0);
7109 
7110 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_00, SZ * 512) == 0);
7111 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_00, SZ * 512) == 0);
7112 }
7113 
7114 static inline void
7115 test_blob_io_writev(struct spdk_blob *blob, struct spdk_io_channel *channel,
7116 		    struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7117 		    spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7118 {
7119 	if (io_opts) {
7120 		g_dev_writev_ext_called = false;
7121 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7122 		spdk_blob_io_writev_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL,
7123 					io_opts);
7124 	} else {
7125 		spdk_blob_io_writev(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7126 	}
7127 	poll_threads();
7128 	CU_ASSERT(g_bserrno == 0);
7129 	if (io_opts) {
7130 		CU_ASSERT(g_dev_writev_ext_called);
7131 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7132 	}
7133 }
7134 
7135 static void
7136 test_iov_write(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7137 	       bool ext_api)
7138 {
7139 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7140 	uint8_t payload_ff[SZ * 512];
7141 	uint8_t payload_aa[SZ * 512];
7142 	uint8_t payload_00[SZ * 512];
7143 	uint8_t *cluster0, *cluster1;
7144 	struct iovec iov[4];
7145 	struct spdk_blob_ext_io_opts ext_opts = {
7146 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7147 		.memory_domain_ctx = (void *)0xf00df00d,
7148 		.size = sizeof(struct spdk_blob_ext_io_opts),
7149 		.user_ctx = (void *)123,
7150 	};
7151 
7152 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7153 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7154 	memset(payload_00, 0x00, sizeof(payload_00));
7155 
7156 	/* Try to perform I/O with io unit = 512 */
7157 	iov[0].iov_base = payload_ff;
7158 	iov[0].iov_len = 1 * 512;
7159 
7160 	test_blob_io_writev(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL,
7161 			    ext_api ? &ext_opts : NULL);
7162 
7163 	/* If thin provisioned is set cluster should be allocated now */
7164 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[0] != 0);
7165 	cluster0 = &g_dev_buffer[blob->active.clusters[0] * dev->blocklen];
7166 
7167 	/* Each character 0-F symbolizes single io_unit containing 512 bytes block filled with that character.
7168 	* Each page is separated by |. Whole block [...] symbolizes one cluster (containing 4 pages). */
7169 	/* cluster0: [ F000 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7170 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7171 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7172 
7173 	/* Verify write with offset on first page */
7174 	iov[0].iov_base = payload_ff;
7175 	iov[0].iov_len = 1 * 512;
7176 
7177 	test_blob_io_writev(blob, channel, iov, 1, 2, 1, blob_op_complete, NULL,
7178 			    ext_api ? &ext_opts : NULL);
7179 
7180 	/* cluster0: [ F0F0 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7181 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7182 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7183 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7184 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7185 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7186 
7187 	/* Verify write with offset on first page */
7188 	iov[0].iov_base = payload_ff;
7189 	iov[0].iov_len = 4 * 512;
7190 	spdk_blob_io_writev(blob, channel, iov, 1, 4, 4, blob_op_complete, NULL);
7191 	poll_threads();
7192 
7193 	/* cluster0: [ F0F0 FFFF | 0000 0000 | 0000 0000 | 0000 0000 ] */
7194 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7195 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7196 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7197 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7198 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 4 * 512) == 0);
7199 	CU_ASSERT(memcmp(cluster0 + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7200 
7201 	/* Verify write with offset on second page */
7202 	iov[0].iov_base = payload_ff;
7203 	iov[0].iov_len = 4 * 512;
7204 	spdk_blob_io_writev(blob, channel, iov, 1, 8, 4, blob_op_complete, NULL);
7205 	poll_threads();
7206 
7207 	/* cluster0: [ F0F0 FFFF | FFFF 0000 | 0000 0000 | 0000 0000 ] */
7208 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7209 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7210 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7211 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7212 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_ff, 8 * 512) == 0);
7213 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
7214 
7215 	/* Verify write across multiple pages */
7216 	iov[0].iov_base = payload_aa;
7217 	iov[0].iov_len = 8 * 512;
7218 
7219 	test_blob_io_writev(blob, channel, iov, 1, 4, 8, blob_op_complete, NULL,
7220 			    ext_api ? &ext_opts : NULL);
7221 
7222 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 0000 ] */
7223 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7224 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7225 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7226 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7227 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7228 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 12) * 512) == 0);
7229 
7230 	/* Verify write across multiple clusters */
7231 
7232 	iov[0].iov_base = payload_ff;
7233 	iov[0].iov_len = 8 * 512;
7234 
7235 	test_blob_io_writev(blob, channel, iov, 1, (SZ - 4), 8, blob_op_complete, NULL,
7236 			    ext_api ? &ext_opts : NULL);
7237 
7238 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7239 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7240 
7241 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7242 	 * cluster1: [ FFFF 0000 | 0000 0000 | 0000 0000 | 0000 0000 ] */
7243 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7244 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7245 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7246 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7247 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7248 	CU_ASSERT(memcmp(cluster0 + 12 * 512, payload_00, (SZ - 16) * 512) == 0);
7249 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7250 
7251 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7252 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7253 
7254 	/* Verify write to second cluster */
7255 
7256 	iov[0].iov_base = payload_ff;
7257 	iov[0].iov_len = 2 * 512;
7258 
7259 	test_blob_io_writev(blob, channel, iov, 1, SZ + 12, 2, blob_op_complete, NULL,
7260 			    ext_api ? &ext_opts : NULL);
7261 
7262 	SPDK_CU_ASSERT_FATAL(blob->active.clusters[1] != 0);
7263 	cluster1 = &g_dev_buffer[blob->active.clusters[1] * dev->blocklen];
7264 
7265 	/* cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7266 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ] */
7267 	CU_ASSERT(memcmp(cluster0 + 0 * 512, payload_ff, 512) == 0);
7268 	CU_ASSERT(memcmp(cluster0 + 1 * 512, payload_00, 512) == 0);
7269 	CU_ASSERT(memcmp(cluster0 + 2 * 512, payload_ff, 512) == 0);
7270 	CU_ASSERT(memcmp(cluster0 + 3 * 512, payload_00, 512) == 0);
7271 	CU_ASSERT(memcmp(cluster0 + 4 * 512, payload_aa, 8 * 512) == 0);
7272 	CU_ASSERT(memcmp(cluster0 + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7273 
7274 	CU_ASSERT(memcmp(cluster1 + 0 * 512, payload_ff, 4 * 512) == 0);
7275 	CU_ASSERT(memcmp(cluster1 + 4 * 512, payload_00, 8 * 512) == 0);
7276 	CU_ASSERT(memcmp(cluster1 + 12 * 512, payload_ff, 2 * 512) == 0);
7277 	CU_ASSERT(memcmp(cluster1 + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7278 }
7279 
7280 static inline void
7281 test_blob_io_readv(struct spdk_blob *blob, struct spdk_io_channel *channel,
7282 		   struct iovec *iov, int iovcnt, uint64_t offset, uint64_t length,
7283 		   spdk_blob_op_complete cb_fn, void *cb_arg, struct spdk_blob_ext_io_opts *io_opts)
7284 {
7285 	if (io_opts) {
7286 		g_dev_readv_ext_called = false;
7287 		memset(&g_blob_ext_io_opts, 0, sizeof(g_blob_ext_io_opts));
7288 		spdk_blob_io_readv_ext(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL, io_opts);
7289 	} else {
7290 		spdk_blob_io_readv(blob, channel, iov, iovcnt, offset, length, blob_op_complete, NULL);
7291 	}
7292 	poll_threads();
7293 	CU_ASSERT(g_bserrno == 0);
7294 	if (io_opts) {
7295 		CU_ASSERT(g_dev_readv_ext_called);
7296 		CU_ASSERT(memcmp(io_opts, &g_blob_ext_io_opts, sizeof(g_blob_ext_io_opts)) == 0);
7297 	}
7298 }
7299 
7300 static void
7301 test_iov_read(struct spdk_bs_dev *dev, struct spdk_blob *blob, struct spdk_io_channel *channel,
7302 	      bool ext_api)
7303 {
7304 	const uint32_t SZ = IO_UT_BLOCKS_PER_CLUSTER;
7305 	uint8_t payload_read[2 * SZ * 512];
7306 	uint8_t payload_ff[SZ * 512];
7307 	uint8_t payload_aa[SZ * 512];
7308 	uint8_t payload_00[SZ * 512];
7309 	struct iovec iov[4];
7310 	struct spdk_blob_ext_io_opts ext_opts = {
7311 		.memory_domain = (struct spdk_memory_domain *)0xfeedbeef,
7312 		.memory_domain_ctx = (void *)0xf00df00d,
7313 		.size = sizeof(struct spdk_blob_ext_io_opts),
7314 		.user_ctx = (void *)123,
7315 	};
7316 
7317 	memset(payload_ff, 0xFF, sizeof(payload_ff));
7318 	memset(payload_aa, 0xAA, sizeof(payload_aa));
7319 	memset(payload_00, 0x00, sizeof(payload_00));
7320 
7321 	/* Read only first io unit */
7322 	/* cluster0: [ (F)0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7323 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7324 	 * payload_read: F000 0000 | 0000 0000 ... */
7325 	memset(payload_read, 0x00, sizeof(payload_read));
7326 	iov[0].iov_base = payload_read;
7327 	iov[0].iov_len = 1 * 512;
7328 
7329 	test_blob_io_readv(blob, channel, iov, 1, 0, 1, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7330 
7331 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7332 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, (SZ - 1) * 512) == 0);
7333 
7334 	/* Read four io_units starting from offset = 2
7335 	 * cluster0: [ F0(F0 AA)AA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7336 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7337 	 * payload_read: F0AA 0000 | 0000 0000 ... */
7338 
7339 	memset(payload_read, 0x00, sizeof(payload_read));
7340 	iov[0].iov_base = payload_read;
7341 	iov[0].iov_len = 4 * 512;
7342 
7343 	test_blob_io_readv(blob, channel, iov, 1, 2, 4, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7344 
7345 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7346 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7347 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_aa, 512) == 0);
7348 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_aa, 512) == 0);
7349 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7350 
7351 	/* Read eight io_units across multiple pages
7352 	 * cluster0: [ F0F0 (AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7353 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7354 	 * payload_read: AAAA AAAA | 0000 0000 ... */
7355 	memset(payload_read, 0x00, sizeof(payload_read));
7356 	iov[0].iov_base = payload_read;
7357 	iov[0].iov_len = 4 * 512;
7358 	iov[1].iov_base = payload_read + 4 * 512;
7359 	iov[1].iov_len = 4 * 512;
7360 
7361 	test_blob_io_readv(blob, channel, iov, 2, 4, 8, blob_op_complete, NULL, ext_api ? &ext_opts : NULL);
7362 
7363 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_aa, 8 * 512) == 0);
7364 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7365 
7366 	/* Read eight io_units across multiple clusters
7367 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 (FFFF ]
7368 	 * cluster1: [ FFFF) 0000 | 0000 FF00 | 0000 0000 | 0000 0000 ]
7369 	 * payload_read: FFFF FFFF | 0000 0000 ... */
7370 	memset(payload_read, 0x00, sizeof(payload_read));
7371 	iov[0].iov_base = payload_read;
7372 	iov[0].iov_len = 2 * 512;
7373 	iov[1].iov_base = payload_read + 2 * 512;
7374 	iov[1].iov_len = 2 * 512;
7375 	iov[2].iov_base = payload_read + 4 * 512;
7376 	iov[2].iov_len = 2 * 512;
7377 	iov[3].iov_base = payload_read + 6 * 512;
7378 	iov[3].iov_len = 2 * 512;
7379 
7380 	test_blob_io_readv(blob, channel, iov, 4, SZ - 4, 8, blob_op_complete, NULL,
7381 			   ext_api ? &ext_opts : NULL);
7382 
7383 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 8 * 512) == 0);
7384 	CU_ASSERT(memcmp(payload_read + 8 * 512, payload_00, (SZ - 8) * 512) == 0);
7385 
7386 	/* Read four io_units from second cluster
7387 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7388 	 * cluster1: [ FFFF 0000 | 00(00 FF)00 | 0000 0000 | 0000 0000 ]
7389 	 * payload_read: 00FF 0000 | 0000 0000 ... */
7390 	memset(payload_read, 0x00, sizeof(payload_read));
7391 	iov[0].iov_base = payload_read;
7392 	iov[0].iov_len = 1 * 512;
7393 	iov[1].iov_base = payload_read + 1 * 512;
7394 	iov[1].iov_len = 3 * 512;
7395 
7396 	test_blob_io_readv(blob, channel, iov, 2, SZ + 10, 4, blob_op_complete, NULL,
7397 			   ext_api ? &ext_opts : NULL);
7398 
7399 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_00, 2 * 512) == 0);
7400 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 2 * 512) == 0);
7401 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, (SZ - 4) * 512) == 0);
7402 
7403 	/* Read second cluster
7404 	 * cluster0: [ F0F0 AAAA | AAAA 0000 | 0000 0000 | 0000 FFFF ]
7405 	 * cluster1: [ (FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ]
7406 	 * payload_read: FFFF 0000 | 0000 FF00 ... */
7407 	memset(payload_read, 0x00, sizeof(payload_read));
7408 	iov[0].iov_base = payload_read;
7409 	iov[0].iov_len = 1 * 512;
7410 	iov[1].iov_base = payload_read + 1 * 512;
7411 	iov[1].iov_len = 2 * 512;
7412 	iov[2].iov_base = payload_read + 3 * 512;
7413 	iov[2].iov_len = 4 * 512;
7414 	iov[3].iov_base = payload_read + 7 * 512;
7415 	iov[3].iov_len = (SZ - 7) * 512;
7416 
7417 	test_blob_io_readv(blob, channel, iov, 4, SZ, SZ, blob_op_complete, NULL,
7418 			   ext_api ? &ext_opts : NULL);
7419 
7420 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 4 * 512) == 0);
7421 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_00, 8 * 512) == 0);
7422 	CU_ASSERT(memcmp(payload_read + 12 * 512, payload_ff, 2 * 512) == 0);
7423 	CU_ASSERT(memcmp(payload_read + 14 * 512, payload_00, (SZ - 14) * 512) == 0);
7424 
7425 	/* Read whole two clusters
7426 	 * cluster0: [ (F0F0 AAAA | AAAA) 0000 | 0000 0000 | 0000 FFFF ]
7427 	 * cluster1: [ FFFF 0000 | 0000 FF00 | 0000 0000 | 0000 0000) ] */
7428 	memset(payload_read, 0x00, sizeof(payload_read));
7429 	iov[0].iov_base = payload_read;
7430 	iov[0].iov_len = 1 * 512;
7431 	iov[1].iov_base = payload_read + 1 * 512;
7432 	iov[1].iov_len = 8 * 512;
7433 	iov[2].iov_base = payload_read + 9 * 512;
7434 	iov[2].iov_len = 16 * 512;
7435 	iov[3].iov_base = payload_read + 25 * 512;
7436 	iov[3].iov_len = (2 * SZ - 25) * 512;
7437 
7438 	test_blob_io_readv(blob, channel, iov, 4, 0, SZ * 2, blob_op_complete, NULL,
7439 			   ext_api ? &ext_opts : NULL);
7440 
7441 	CU_ASSERT(memcmp(payload_read + 0 * 512, payload_ff, 512) == 0);
7442 	CU_ASSERT(memcmp(payload_read + 1 * 512, payload_00, 512) == 0);
7443 	CU_ASSERT(memcmp(payload_read + 2 * 512, payload_ff, 512) == 0);
7444 	CU_ASSERT(memcmp(payload_read + 3 * 512, payload_00, 512) == 0);
7445 	CU_ASSERT(memcmp(payload_read + 4 * 512, payload_aa, 8 * 512) == 0);
7446 	CU_ASSERT(memcmp(payload_read + (SZ - 4) * 512, payload_ff, 4 * 512) == 0);
7447 
7448 	CU_ASSERT(memcmp(payload_read + (SZ + 0) * 512, payload_ff, 4 * 512) == 0);
7449 	CU_ASSERT(memcmp(payload_read + (SZ + 4) * 512, payload_00, 8 * 512) == 0);
7450 	CU_ASSERT(memcmp(payload_read + (SZ + 12) * 512, payload_ff, 2 * 512) == 0);
7451 	CU_ASSERT(memcmp(payload_read + (SZ + 14) * 512, payload_00, (SZ - 14) * 512) == 0);
7452 }
7453 
7454 static void
7455 blob_io_unit(void)
7456 {
7457 	struct spdk_bs_opts bsopts;
7458 	struct spdk_blob_opts opts;
7459 	struct spdk_blob_store *bs;
7460 	struct spdk_bs_dev *dev;
7461 	struct spdk_blob *blob, *snapshot, *clone;
7462 	spdk_blob_id blobid;
7463 	struct spdk_io_channel *channel;
7464 
7465 	/* Create dev with 512 bytes io unit size */
7466 
7467 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7468 	bsopts.cluster_sz = IO_UT_BLOCKS_PER_CLUSTER * 512;
7469 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7470 
7471 	/* Try to initialize a new blob store with unsupported io_unit */
7472 	dev = init_dev();
7473 	dev->blocklen = 512;
7474 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7475 
7476 	/* Initialize a new blob store */
7477 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7478 	poll_threads();
7479 	CU_ASSERT(g_bserrno == 0);
7480 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7481 	bs = g_bs;
7482 
7483 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7484 	channel = spdk_bs_alloc_io_channel(bs);
7485 
7486 	/* Create thick provisioned blob */
7487 	ut_spdk_blob_opts_init(&opts);
7488 	opts.thin_provision = false;
7489 	opts.num_clusters = 32;
7490 
7491 	blob = ut_blob_create_and_open(bs, &opts);
7492 	blobid = spdk_blob_get_id(blob);
7493 
7494 	test_io_write(dev, blob, channel);
7495 	test_io_read(dev, blob, channel);
7496 	test_io_zeroes(dev, blob, channel);
7497 
7498 	test_iov_write(dev, blob, channel, false);
7499 	test_iov_read(dev, blob, channel, false);
7500 	test_io_zeroes(dev, blob, channel);
7501 
7502 	test_iov_write(dev, blob, channel, true);
7503 	test_iov_read(dev, blob, channel, true);
7504 
7505 	test_io_unmap(dev, blob, channel);
7506 
7507 	spdk_blob_close(blob, blob_op_complete, NULL);
7508 	poll_threads();
7509 	CU_ASSERT(g_bserrno == 0);
7510 	blob = NULL;
7511 	g_blob = NULL;
7512 
7513 	/* Create thin provisioned blob */
7514 
7515 	ut_spdk_blob_opts_init(&opts);
7516 	opts.thin_provision = true;
7517 	opts.num_clusters = 32;
7518 
7519 	blob = ut_blob_create_and_open(bs, &opts);
7520 	blobid = spdk_blob_get_id(blob);
7521 
7522 	test_io_write(dev, blob, channel);
7523 	test_io_read(dev, blob, channel);
7524 	test_io_zeroes(dev, blob, channel);
7525 
7526 	test_iov_write(dev, blob, channel, false);
7527 	test_iov_read(dev, blob, channel, false);
7528 	test_io_zeroes(dev, blob, channel);
7529 
7530 	test_iov_write(dev, blob, channel, true);
7531 	test_iov_read(dev, blob, channel, true);
7532 
7533 	/* Create snapshot */
7534 
7535 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7536 	poll_threads();
7537 	CU_ASSERT(g_bserrno == 0);
7538 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7539 	blobid = g_blobid;
7540 
7541 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7542 	poll_threads();
7543 	CU_ASSERT(g_bserrno == 0);
7544 	CU_ASSERT(g_blob != NULL);
7545 	snapshot = g_blob;
7546 
7547 	spdk_bs_create_clone(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7548 	poll_threads();
7549 	CU_ASSERT(g_bserrno == 0);
7550 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7551 	blobid = g_blobid;
7552 
7553 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7554 	poll_threads();
7555 	CU_ASSERT(g_bserrno == 0);
7556 	CU_ASSERT(g_blob != NULL);
7557 	clone = g_blob;
7558 
7559 	test_io_read(dev, blob, channel);
7560 	test_io_read(dev, snapshot, channel);
7561 	test_io_read(dev, clone, channel);
7562 
7563 	test_iov_read(dev, blob, channel, false);
7564 	test_iov_read(dev, snapshot, channel, false);
7565 	test_iov_read(dev, clone, channel, false);
7566 
7567 	test_iov_read(dev, blob, channel, true);
7568 	test_iov_read(dev, snapshot, channel, true);
7569 	test_iov_read(dev, clone, channel, true);
7570 
7571 	/* Inflate clone */
7572 
7573 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7574 	poll_threads();
7575 
7576 	CU_ASSERT(g_bserrno == 0);
7577 
7578 	test_io_read(dev, clone, channel);
7579 
7580 	test_io_unmap(dev, clone, channel);
7581 
7582 	test_iov_write(dev, clone, channel, false);
7583 	test_iov_read(dev, clone, channel, false);
7584 	test_io_unmap(dev, clone, channel);
7585 
7586 	test_iov_write(dev, clone, channel, true);
7587 	test_iov_read(dev, clone, channel, true);
7588 
7589 	spdk_blob_close(blob, blob_op_complete, NULL);
7590 	spdk_blob_close(snapshot, blob_op_complete, NULL);
7591 	spdk_blob_close(clone, blob_op_complete, NULL);
7592 	poll_threads();
7593 	CU_ASSERT(g_bserrno == 0);
7594 	blob = NULL;
7595 	g_blob = NULL;
7596 
7597 	spdk_bs_free_io_channel(channel);
7598 	poll_threads();
7599 
7600 	/* Unload the blob store */
7601 	spdk_bs_unload(bs, bs_op_complete, NULL);
7602 	poll_threads();
7603 	CU_ASSERT(g_bserrno == 0);
7604 	g_bs = NULL;
7605 	g_blob = NULL;
7606 	g_blobid = 0;
7607 }
7608 
7609 static void
7610 blob_io_unit_compatibility(void)
7611 {
7612 	struct spdk_bs_opts bsopts;
7613 	struct spdk_blob_store *bs;
7614 	struct spdk_bs_dev *dev;
7615 	struct spdk_bs_super_block *super;
7616 
7617 	/* Create dev with 512 bytes io unit size */
7618 
7619 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
7620 	bsopts.cluster_sz = g_phys_blocklen * 4;
7621 	snprintf(bsopts.bstype.bstype, sizeof(bsopts.bstype.bstype), "TESTTYPE");
7622 
7623 	/* Try to initialize a new blob store with unsupported io_unit */
7624 	dev = init_dev();
7625 	dev->blocklen = 512;
7626 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7627 
7628 	/* Initialize a new blob store */
7629 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
7630 	poll_threads();
7631 	CU_ASSERT(g_bserrno == 0);
7632 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7633 	bs = g_bs;
7634 
7635 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == 512);
7636 
7637 	/* Unload the blob store */
7638 	spdk_bs_unload(bs, bs_op_complete, NULL);
7639 	poll_threads();
7640 	CU_ASSERT(g_bserrno == 0);
7641 
7642 	/* Modify super block to behave like older version.
7643 	 * Check if loaded io unit size equals SPDK_BS_PAGE_SIZE */
7644 	super = (struct spdk_bs_super_block *)&g_dev_buffer[0];
7645 	super->io_unit_size = 0;
7646 	super->crc = blob_md_page_calc_crc(super);
7647 
7648 	dev = init_dev();
7649 	dev->blocklen = 512;
7650 	dev->blockcnt =  DEV_BUFFER_SIZE / dev->blocklen;
7651 
7652 	spdk_bs_load(dev, &bsopts, bs_op_with_handle_complete, NULL);
7653 	poll_threads();
7654 	CU_ASSERT(g_bserrno == 0);
7655 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
7656 	bs = g_bs;
7657 
7658 	CU_ASSERT(spdk_bs_get_io_unit_size(bs) == SPDK_BS_PAGE_SIZE);
7659 
7660 	/* Unload the blob store */
7661 	spdk_bs_unload(bs, bs_op_complete, NULL);
7662 	poll_threads();
7663 	CU_ASSERT(g_bserrno == 0);
7664 
7665 	g_bs = NULL;
7666 	g_blob = NULL;
7667 	g_blobid = 0;
7668 }
7669 
7670 static void
7671 first_sync_complete(void *cb_arg, int bserrno)
7672 {
7673 	struct spdk_blob *blob = cb_arg;
7674 	int rc;
7675 
7676 	CU_ASSERT(bserrno == 0);
7677 	rc = spdk_blob_set_xattr(blob, "sync", "second", strlen("second") + 1);
7678 	CU_ASSERT(rc == 0);
7679 	CU_ASSERT(g_bserrno == -1);
7680 
7681 	/* Keep g_bserrno at -1, only the
7682 	 * second sync completion should set it at 0. */
7683 }
7684 
7685 static void
7686 second_sync_complete(void *cb_arg, int bserrno)
7687 {
7688 	struct spdk_blob *blob = cb_arg;
7689 	const void *value;
7690 	size_t value_len;
7691 	int rc;
7692 
7693 	CU_ASSERT(bserrno == 0);
7694 
7695 	/* Verify that the first sync completion had a chance to execute */
7696 	rc = spdk_blob_get_xattr_value(blob, "sync", &value, &value_len);
7697 	CU_ASSERT(rc == 0);
7698 	SPDK_CU_ASSERT_FATAL(value != NULL);
7699 	CU_ASSERT(value_len == strlen("second") + 1);
7700 	CU_ASSERT_NSTRING_EQUAL_FATAL(value, "second", value_len);
7701 
7702 	CU_ASSERT(g_bserrno == -1);
7703 	g_bserrno = bserrno;
7704 }
7705 
7706 static void
7707 blob_simultaneous_operations(void)
7708 {
7709 	struct spdk_blob_store *bs = g_bs;
7710 	struct spdk_blob_opts opts;
7711 	struct spdk_blob *blob, *snapshot;
7712 	spdk_blob_id blobid, snapshotid;
7713 	struct spdk_io_channel *channel;
7714 	int rc;
7715 
7716 	channel = spdk_bs_alloc_io_channel(bs);
7717 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7718 
7719 	ut_spdk_blob_opts_init(&opts);
7720 	opts.num_clusters = 10;
7721 
7722 	blob = ut_blob_create_and_open(bs, &opts);
7723 	blobid = spdk_blob_get_id(blob);
7724 
7725 	/* Create snapshot and try to remove blob in the same time:
7726 	 * - snapshot should be created successfully
7727 	 * - delete operation should fail w -EBUSY */
7728 	CU_ASSERT(blob->locked_operation_in_progress == false);
7729 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7730 	CU_ASSERT(blob->locked_operation_in_progress == true);
7731 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7732 	CU_ASSERT(blob->locked_operation_in_progress == true);
7733 	/* Deletion failure */
7734 	CU_ASSERT(g_bserrno == -EBUSY);
7735 	poll_threads();
7736 	CU_ASSERT(blob->locked_operation_in_progress == false);
7737 	/* Snapshot creation success */
7738 	CU_ASSERT(g_bserrno == 0);
7739 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7740 
7741 	snapshotid = g_blobid;
7742 
7743 	spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7744 	poll_threads();
7745 	CU_ASSERT(g_bserrno == 0);
7746 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7747 	snapshot = g_blob;
7748 
7749 	/* Inflate blob and try to remove blob in the same time:
7750 	 * - blob should be inflated successfully
7751 	 * - delete operation should fail w -EBUSY */
7752 	CU_ASSERT(blob->locked_operation_in_progress == false);
7753 	spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
7754 	CU_ASSERT(blob->locked_operation_in_progress == true);
7755 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7756 	CU_ASSERT(blob->locked_operation_in_progress == true);
7757 	/* Deletion failure */
7758 	CU_ASSERT(g_bserrno == -EBUSY);
7759 	poll_threads();
7760 	CU_ASSERT(blob->locked_operation_in_progress == false);
7761 	/* Inflation success */
7762 	CU_ASSERT(g_bserrno == 0);
7763 
7764 	/* Clone snapshot and try to remove snapshot in the same time:
7765 	 * - snapshot should be cloned successfully
7766 	 * - delete operation should fail w -EBUSY */
7767 	CU_ASSERT(blob->locked_operation_in_progress == false);
7768 	spdk_bs_create_clone(bs, snapshotid, NULL, blob_op_with_id_complete, NULL);
7769 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
7770 	/* Deletion failure */
7771 	CU_ASSERT(g_bserrno == -EBUSY);
7772 	poll_threads();
7773 	CU_ASSERT(blob->locked_operation_in_progress == false);
7774 	/* Clone created */
7775 	CU_ASSERT(g_bserrno == 0);
7776 
7777 	/* Resize blob and try to remove blob in the same time:
7778 	 * - blob should be resized successfully
7779 	 * - delete operation should fail w -EBUSY */
7780 	CU_ASSERT(blob->locked_operation_in_progress == false);
7781 	spdk_blob_resize(blob, 50, blob_op_complete, NULL);
7782 	CU_ASSERT(blob->locked_operation_in_progress == true);
7783 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
7784 	CU_ASSERT(blob->locked_operation_in_progress == true);
7785 	/* Deletion failure */
7786 	CU_ASSERT(g_bserrno == -EBUSY);
7787 	poll_threads();
7788 	CU_ASSERT(blob->locked_operation_in_progress == false);
7789 	/* Blob resized successfully */
7790 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7791 	poll_threads();
7792 	CU_ASSERT(g_bserrno == 0);
7793 
7794 	/* Issue two consecutive blob syncs, neither should fail.
7795 	 * Force sync to actually occur by marking blob dirty each time.
7796 	 * Execution of sync should not be enough to complete the operation,
7797 	 * since disk I/O is required to complete it. */
7798 	g_bserrno = -1;
7799 
7800 	rc = spdk_blob_set_xattr(blob, "sync", "first", strlen("first") + 1);
7801 	CU_ASSERT(rc == 0);
7802 	spdk_blob_sync_md(blob, first_sync_complete, blob);
7803 	CU_ASSERT(g_bserrno == -1);
7804 
7805 	spdk_blob_sync_md(blob, second_sync_complete, blob);
7806 	CU_ASSERT(g_bserrno == -1);
7807 
7808 	poll_threads();
7809 	CU_ASSERT(g_bserrno == 0);
7810 
7811 	spdk_bs_free_io_channel(channel);
7812 	poll_threads();
7813 
7814 	ut_blob_close_and_delete(bs, snapshot);
7815 	ut_blob_close_and_delete(bs, blob);
7816 }
7817 
7818 static void
7819 blob_persist_test(void)
7820 {
7821 	struct spdk_blob_store *bs = g_bs;
7822 	struct spdk_blob_opts opts;
7823 	struct spdk_blob *blob;
7824 	spdk_blob_id blobid;
7825 	struct spdk_io_channel *channel;
7826 	char *xattr;
7827 	size_t xattr_length;
7828 	int rc;
7829 	uint32_t page_count_clear, page_count_xattr;
7830 	uint64_t poller_iterations;
7831 	bool run_poller;
7832 
7833 	channel = spdk_bs_alloc_io_channel(bs);
7834 	SPDK_CU_ASSERT_FATAL(channel != NULL);
7835 
7836 	ut_spdk_blob_opts_init(&opts);
7837 	opts.num_clusters = 10;
7838 
7839 	blob = ut_blob_create_and_open(bs, &opts);
7840 	blobid = spdk_blob_get_id(blob);
7841 
7842 	/* Save the amount of md pages used after creation of a blob.
7843 	 * This should be consistent after removing xattr. */
7844 	page_count_clear = spdk_bit_array_count_set(bs->used_md_pages);
7845 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7846 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7847 
7848 	/* Add xattr with maximum length of descriptor to exceed single metadata page. */
7849 	xattr_length = SPDK_BS_MAX_DESC_SIZE - sizeof(struct spdk_blob_md_descriptor_xattr) -
7850 		       strlen("large_xattr");
7851 	xattr = calloc(xattr_length, sizeof(char));
7852 	SPDK_CU_ASSERT_FATAL(xattr != NULL);
7853 
7854 	rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7855 	SPDK_CU_ASSERT_FATAL(rc == 0);
7856 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
7857 	poll_threads();
7858 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7859 
7860 	/* Save the amount of md pages used after adding the large xattr */
7861 	page_count_xattr = spdk_bit_array_count_set(bs->used_md_pages);
7862 	SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7863 	SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7864 
7865 	/* Add xattr to a blob and sync it. While sync is occurring, remove the xattr and sync again.
7866 	 * Interrupt the first sync after increasing number of poller iterations, until it succeeds.
7867 	 * Expectation is that after second sync completes no xattr is saved in metadata. */
7868 	poller_iterations = 1;
7869 	run_poller = true;
7870 	while (run_poller) {
7871 		rc = spdk_blob_set_xattr(blob, "large_xattr", xattr, xattr_length);
7872 		SPDK_CU_ASSERT_FATAL(rc == 0);
7873 		g_bserrno = -1;
7874 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7875 		poll_thread_times(0, poller_iterations);
7876 		if (g_bserrno == 0) {
7877 			/* Poller iteration count was high enough for first sync to complete.
7878 			 * Verify that blob takes up enough of md_pages to store the xattr. */
7879 			SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_xattr);
7880 			SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_xattr);
7881 			SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_xattr);
7882 			run_poller = false;
7883 		}
7884 		rc = spdk_blob_remove_xattr(blob, "large_xattr");
7885 		SPDK_CU_ASSERT_FATAL(rc == 0);
7886 		spdk_blob_sync_md(blob, blob_op_complete, NULL);
7887 		poll_threads();
7888 		SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
7889 		SPDK_CU_ASSERT_FATAL(blob->active.num_pages + blob->active.num_extent_pages == page_count_clear);
7890 		SPDK_CU_ASSERT_FATAL(blob->clean.num_pages + blob->clean.num_extent_pages == page_count_clear);
7891 		SPDK_CU_ASSERT_FATAL(spdk_bit_array_count_set(bs->used_md_pages) == page_count_clear);
7892 
7893 		/* Reload bs and re-open blob to verify that xattr was not persisted. */
7894 		spdk_blob_close(blob, blob_op_complete, NULL);
7895 		poll_threads();
7896 		CU_ASSERT(g_bserrno == 0);
7897 
7898 		ut_bs_reload(&bs, NULL);
7899 
7900 		spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
7901 		poll_threads();
7902 		CU_ASSERT(g_bserrno == 0);
7903 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7904 		blob = g_blob;
7905 
7906 		rc = spdk_blob_get_xattr_value(blob, "large_xattr", (const void **)&xattr, &xattr_length);
7907 		SPDK_CU_ASSERT_FATAL(rc == -ENOENT);
7908 
7909 		poller_iterations++;
7910 		/* Stop at high iteration count to prevent infinite loop.
7911 		 * This value should be enough for first md sync to complete in any case. */
7912 		SPDK_CU_ASSERT_FATAL(poller_iterations < 50);
7913 	}
7914 
7915 	free(xattr);
7916 
7917 	ut_blob_close_and_delete(bs, blob);
7918 
7919 	spdk_bs_free_io_channel(channel);
7920 	poll_threads();
7921 }
7922 
7923 static void
7924 blob_decouple_snapshot(void)
7925 {
7926 	struct spdk_blob_store *bs = g_bs;
7927 	struct spdk_blob_opts opts;
7928 	struct spdk_blob *blob, *snapshot1, *snapshot2;
7929 	struct spdk_io_channel *channel;
7930 	spdk_blob_id blobid, snapshotid;
7931 	uint64_t cluster;
7932 
7933 	for (int delete_snapshot_first = 0; delete_snapshot_first <= 1; delete_snapshot_first++) {
7934 		channel = spdk_bs_alloc_io_channel(bs);
7935 		SPDK_CU_ASSERT_FATAL(channel != NULL);
7936 
7937 		ut_spdk_blob_opts_init(&opts);
7938 		opts.num_clusters = 10;
7939 		opts.thin_provision = false;
7940 
7941 		blob = ut_blob_create_and_open(bs, &opts);
7942 		blobid = spdk_blob_get_id(blob);
7943 
7944 		/* Create first snapshot */
7945 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 0);
7946 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7947 		poll_threads();
7948 		CU_ASSERT(g_bserrno == 0);
7949 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7950 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7951 		snapshotid = g_blobid;
7952 
7953 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7954 		poll_threads();
7955 		CU_ASSERT(g_bserrno == 0);
7956 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7957 		snapshot1 = g_blob;
7958 
7959 		/* Create the second one */
7960 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 1);
7961 		spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
7962 		poll_threads();
7963 		CU_ASSERT(g_bserrno == 0);
7964 		CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
7965 		CU_ASSERT_EQUAL(_get_snapshots_count(bs), 2);
7966 		snapshotid = g_blobid;
7967 
7968 		spdk_bs_open_blob(bs, snapshotid, blob_op_with_handle_complete, NULL);
7969 		poll_threads();
7970 		CU_ASSERT(g_bserrno == 0);
7971 		SPDK_CU_ASSERT_FATAL(g_blob != NULL);
7972 		snapshot2 = g_blob;
7973 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), snapshot1->id);
7974 
7975 		/* Now decouple the second snapshot forcing it to copy the written clusters */
7976 		spdk_bs_blob_decouple_parent(bs, channel, snapshot2->id, blob_op_complete, NULL);
7977 		poll_threads();
7978 		CU_ASSERT(g_bserrno == 0);
7979 
7980 		/* Verify that the snapshot has been decoupled and that the clusters have been copied */
7981 		CU_ASSERT_EQUAL(spdk_blob_get_parent_snapshot(bs, snapshot2->id), SPDK_BLOBID_INVALID);
7982 		for (cluster = 0; cluster < snapshot2->active.num_clusters; ++cluster) {
7983 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster], 0);
7984 			CU_ASSERT_NOT_EQUAL(snapshot2->active.clusters[cluster],
7985 					    snapshot1->active.clusters[cluster]);
7986 		}
7987 
7988 		spdk_bs_free_io_channel(channel);
7989 
7990 		if (delete_snapshot_first) {
7991 			ut_blob_close_and_delete(bs, snapshot2);
7992 			ut_blob_close_and_delete(bs, snapshot1);
7993 			ut_blob_close_and_delete(bs, blob);
7994 		} else {
7995 			ut_blob_close_and_delete(bs, blob);
7996 			ut_blob_close_and_delete(bs, snapshot2);
7997 			ut_blob_close_and_delete(bs, snapshot1);
7998 		}
7999 		poll_threads();
8000 	}
8001 }
8002 
8003 static void
8004 blob_seek_io_unit(void)
8005 {
8006 	struct spdk_blob_store *bs = g_bs;
8007 	struct spdk_blob *blob;
8008 	struct spdk_io_channel *channel;
8009 	struct spdk_blob_opts opts;
8010 	uint64_t free_clusters;
8011 	uint8_t payload[10 * BLOCKLEN];
8012 	uint64_t offset;
8013 	uint64_t io_unit, io_units_per_cluster;
8014 
8015 	free_clusters = spdk_bs_free_cluster_count(bs);
8016 
8017 	channel = spdk_bs_alloc_io_channel(bs);
8018 	CU_ASSERT(channel != NULL);
8019 
8020 	/* Set blob as thin provisioned */
8021 	ut_spdk_blob_opts_init(&opts);
8022 	opts.thin_provision = true;
8023 
8024 	/* Create a blob */
8025 	blob = ut_blob_create_and_open(bs, &opts);
8026 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
8027 
8028 	io_units_per_cluster = bs_io_units_per_cluster(blob);
8029 
8030 	/* The blob started at 0 clusters. Resize it to be 5, but still unallocated. */
8031 	spdk_blob_resize(blob, 5, blob_op_complete, NULL);
8032 	poll_threads();
8033 	CU_ASSERT(g_bserrno == 0);
8034 	CU_ASSERT(free_clusters == spdk_bs_free_cluster_count(bs));
8035 	CU_ASSERT(blob->active.num_clusters == 5);
8036 
8037 	/* Write at the beginning of first cluster */
8038 	offset = 0;
8039 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8040 	poll_threads();
8041 	CU_ASSERT(g_bserrno == 0);
8042 
8043 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 0);
8044 	CU_ASSERT(io_unit == offset);
8045 
8046 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 0);
8047 	CU_ASSERT(io_unit == io_units_per_cluster);
8048 
8049 	/* Write in the middle of third cluster */
8050 	offset = 2 * io_units_per_cluster + io_units_per_cluster / 2;
8051 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8052 	poll_threads();
8053 	CU_ASSERT(g_bserrno == 0);
8054 
8055 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, io_units_per_cluster);
8056 	CU_ASSERT(io_unit == 2 * io_units_per_cluster);
8057 
8058 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 2 * io_units_per_cluster);
8059 	CU_ASSERT(io_unit == 3 * io_units_per_cluster);
8060 
8061 	/* Write at the end of last cluster */
8062 	offset = 5 * io_units_per_cluster - 1;
8063 	spdk_blob_io_write(blob, channel, payload, offset, 1, blob_op_complete, NULL);
8064 	poll_threads();
8065 	CU_ASSERT(g_bserrno == 0);
8066 
8067 	io_unit = spdk_blob_get_next_allocated_io_unit(blob, 3 * io_units_per_cluster);
8068 	CU_ASSERT(io_unit == 4 * io_units_per_cluster);
8069 
8070 	io_unit = spdk_blob_get_next_unallocated_io_unit(blob, 4 * io_units_per_cluster);
8071 	CU_ASSERT(io_unit == UINT64_MAX);
8072 
8073 	spdk_bs_free_io_channel(channel);
8074 	poll_threads();
8075 
8076 	ut_blob_close_and_delete(bs, blob);
8077 }
8078 
8079 static void
8080 blob_esnap_create(void)
8081 {
8082 	struct spdk_blob_store	*bs = g_bs;
8083 	struct spdk_bs_opts	bs_opts;
8084 	struct ut_esnap_opts	esnap_opts;
8085 	struct spdk_blob_opts	opts;
8086 	struct spdk_blob_open_opts open_opts;
8087 	struct spdk_blob	*blob;
8088 	uint32_t		cluster_sz, block_sz;
8089 	const uint32_t		esnap_num_clusters = 4;
8090 	uint64_t		esnap_num_blocks;
8091 	uint32_t		sz;
8092 	spdk_blob_id		blobid;
8093 	uint32_t		bs_ctx_count, blob_ctx_count;
8094 
8095 	cluster_sz = spdk_bs_get_cluster_size(bs);
8096 	block_sz = spdk_bs_get_io_unit_size(bs);
8097 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8098 
8099 	/* Create a normal blob and verify it is not an esnap clone. */
8100 	ut_spdk_blob_opts_init(&opts);
8101 	blob = ut_blob_create_and_open(bs, &opts);
8102 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob));
8103 	ut_blob_close_and_delete(bs, blob);
8104 
8105 	/* Create an esnap clone blob then verify it is an esnap clone and has the right size */
8106 	ut_spdk_blob_opts_init(&opts);
8107 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8108 	opts.esnap_id = &esnap_opts;
8109 	opts.esnap_id_len = sizeof(esnap_opts);
8110 	opts.num_clusters = esnap_num_clusters;
8111 	blob = ut_blob_create_and_open(bs, &opts);
8112 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8113 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8114 	SPDK_CU_ASSERT_FATAL(blob_is_esnap_clone(blob));
8115 	SPDK_CU_ASSERT_FATAL(!spdk_blob_is_clone(blob));
8116 	sz = spdk_blob_get_num_clusters(blob);
8117 	CU_ASSERT(sz == esnap_num_clusters);
8118 	ut_blob_close_and_delete(bs, blob);
8119 
8120 	/* Create an esnap clone without the size and verify it can be grown */
8121 	ut_spdk_blob_opts_init(&opts);
8122 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8123 	opts.esnap_id = &esnap_opts;
8124 	opts.esnap_id_len = sizeof(esnap_opts);
8125 	blob = ut_blob_create_and_open(bs, &opts);
8126 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8127 	sz = spdk_blob_get_num_clusters(blob);
8128 	CU_ASSERT(sz == 0);
8129 	spdk_blob_resize(blob, 1, blob_op_complete, NULL);
8130 	poll_threads();
8131 	CU_ASSERT(g_bserrno == 0);
8132 	sz = spdk_blob_get_num_clusters(blob);
8133 	CU_ASSERT(sz == 1);
8134 	spdk_blob_resize(blob, esnap_num_clusters, blob_op_complete, NULL);
8135 	poll_threads();
8136 	CU_ASSERT(g_bserrno == 0);
8137 	sz = spdk_blob_get_num_clusters(blob);
8138 	CU_ASSERT(sz == esnap_num_clusters);
8139 	spdk_blob_resize(blob, esnap_num_clusters + 1, blob_op_complete, NULL);
8140 	poll_threads();
8141 	CU_ASSERT(g_bserrno == 0);
8142 	sz = spdk_blob_get_num_clusters(blob);
8143 	CU_ASSERT(sz == esnap_num_clusters + 1);
8144 
8145 	/* Reload the blobstore and be sure that the blob can be opened. */
8146 	blobid = spdk_blob_get_id(blob);
8147 	spdk_blob_close(blob, blob_op_complete, NULL);
8148 	poll_threads();
8149 	CU_ASSERT(g_bserrno == 0);
8150 	g_blob = NULL;
8151 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8152 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8153 	ut_bs_reload(&bs, &bs_opts);
8154 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8155 	poll_threads();
8156 	CU_ASSERT(g_bserrno == 0);
8157 	CU_ASSERT(g_blob != NULL);
8158 	blob = g_blob;
8159 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8160 	sz = spdk_blob_get_num_clusters(blob);
8161 	CU_ASSERT(sz == esnap_num_clusters + 1);
8162 
8163 	/* Reload the blobstore without esnap_bs_dev_create: should fail to open blob. */
8164 	spdk_blob_close(blob, blob_op_complete, NULL);
8165 	poll_threads();
8166 	CU_ASSERT(g_bserrno == 0);
8167 	g_blob = NULL;
8168 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8169 	ut_bs_reload(&bs, &bs_opts);
8170 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8171 	poll_threads();
8172 	CU_ASSERT(g_bserrno != 0);
8173 	CU_ASSERT(g_blob == NULL);
8174 
8175 	/* Reload the blobstore with ctx set and verify it is passed to the esnap create callback */
8176 	bs_ctx_count = 0;
8177 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8178 	bs_opts.esnap_bs_dev_create = ut_esnap_create_with_count;
8179 	bs_opts.esnap_ctx = &bs_ctx_count;
8180 	ut_bs_reload(&bs, &bs_opts);
8181 	/* Loading the blobstore triggers the esnap to be loaded */
8182 	CU_ASSERT(bs_ctx_count == 1);
8183 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8184 	poll_threads();
8185 	CU_ASSERT(g_bserrno == 0);
8186 	CU_ASSERT(g_blob != NULL);
8187 	/* Opening the blob also triggers the esnap to be loaded */
8188 	CU_ASSERT(bs_ctx_count == 2);
8189 	blob = g_blob;
8190 	SPDK_CU_ASSERT_FATAL(spdk_blob_is_esnap_clone(blob));
8191 	sz = spdk_blob_get_num_clusters(blob);
8192 	CU_ASSERT(sz == esnap_num_clusters + 1);
8193 	spdk_blob_close(blob, blob_op_complete, NULL);
8194 	poll_threads();
8195 	CU_ASSERT(g_bserrno == 0);
8196 	g_blob = NULL;
8197 	/* If open_opts.esnap_ctx is set it is passed to the esnap create callback */
8198 	blob_ctx_count = 0;
8199 	spdk_blob_open_opts_init(&open_opts, sizeof(open_opts));
8200 	open_opts.esnap_ctx = &blob_ctx_count;
8201 	spdk_bs_open_blob_ext(bs, blobid, &open_opts, blob_op_with_handle_complete, NULL);
8202 	poll_threads();
8203 	blob = g_blob;
8204 	CU_ASSERT(bs_ctx_count == 3);
8205 	CU_ASSERT(blob_ctx_count == 1);
8206 	spdk_blob_close(blob, blob_op_complete, NULL);
8207 	poll_threads();
8208 	CU_ASSERT(g_bserrno == 0);
8209 	g_blob = NULL;
8210 }
8211 
8212 static void
8213 blob_esnap_clone_reload(void)
8214 {
8215 	struct spdk_blob_store	*bs = g_bs;
8216 	struct spdk_bs_opts	bs_opts;
8217 	struct ut_esnap_opts	esnap_opts;
8218 	struct spdk_blob_opts	opts;
8219 	struct spdk_blob	*eclone1, *snap1, *clone1;
8220 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8221 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
8222 	const uint32_t		esnap_num_clusters = 4;
8223 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
8224 	spdk_blob_id		eclone1_id, snap1_id, clone1_id;
8225 	struct spdk_io_channel	*bs_ch;
8226 	char			buf[block_sz];
8227 	int			bserr1, bserr2, bserr3, bserr4;
8228 	struct spdk_bs_dev	*dev;
8229 
8230 	/* Create and open an esnap clone blob */
8231 	ut_spdk_blob_opts_init(&opts);
8232 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8233 	opts.esnap_id = &esnap_opts;
8234 	opts.esnap_id_len = sizeof(esnap_opts);
8235 	opts.num_clusters = esnap_num_clusters;
8236 	eclone1 = ut_blob_create_and_open(bs, &opts);
8237 	CU_ASSERT(eclone1 != NULL);
8238 	CU_ASSERT(spdk_blob_is_esnap_clone(eclone1));
8239 	eclone1_id = eclone1->id;
8240 
8241 	/* Create and open a snapshot of eclone1 */
8242 	spdk_bs_create_snapshot(bs, eclone1_id, NULL, blob_op_with_id_complete, NULL);
8243 	poll_threads();
8244 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8245 	CU_ASSERT(g_bserrno == 0);
8246 	snap1_id = g_blobid;
8247 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8248 	poll_threads();
8249 	CU_ASSERT(g_bserrno == 0);
8250 	CU_ASSERT(g_blob != NULL);
8251 	snap1 = g_blob;
8252 
8253 	/* Create and open regular clone of snap1 */
8254 	spdk_bs_create_clone(bs, snap1_id, NULL, blob_op_with_id_complete, NULL);
8255 	poll_threads();
8256 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8257 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
8258 	clone1_id = g_blobid;
8259 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8260 	poll_threads();
8261 	CU_ASSERT(g_bserrno == 0);
8262 	CU_ASSERT(g_blob != NULL);
8263 	clone1 = g_blob;
8264 
8265 	/* Close the blobs in preparation for reloading the blobstore */
8266 	spdk_blob_close(clone1, blob_op_complete, NULL);
8267 	poll_threads();
8268 	CU_ASSERT(g_bserrno == 0);
8269 	spdk_blob_close(snap1, blob_op_complete, NULL);
8270 	poll_threads();
8271 	CU_ASSERT(g_bserrno == 0);
8272 	spdk_blob_close(eclone1, blob_op_complete, NULL);
8273 	poll_threads();
8274 	CU_ASSERT(g_bserrno == 0);
8275 	g_blob = NULL;
8276 
8277 	/* Reload the blobstore */
8278 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8279 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8280 	ut_bs_reload(&bs, &bs_opts);
8281 
8282 	/* Be sure each of the blobs can be opened */
8283 	spdk_bs_open_blob(bs, eclone1_id, blob_op_with_handle_complete, NULL);
8284 	poll_threads();
8285 	CU_ASSERT(g_bserrno == 0);
8286 	CU_ASSERT(g_blob != NULL);
8287 	eclone1 = g_blob;
8288 	spdk_bs_open_blob(bs, snap1_id, blob_op_with_handle_complete, NULL);
8289 	poll_threads();
8290 	CU_ASSERT(g_bserrno == 0);
8291 	CU_ASSERT(g_blob != NULL);
8292 	snap1 = g_blob;
8293 	spdk_bs_open_blob(bs, clone1_id, blob_op_with_handle_complete, NULL);
8294 	poll_threads();
8295 	CU_ASSERT(g_bserrno == 0);
8296 	CU_ASSERT(g_blob != NULL);
8297 	clone1 = g_blob;
8298 
8299 	/* Perform some reads on each of them to cause channels to be allocated */
8300 	bs_ch = spdk_bs_alloc_io_channel(bs);
8301 	CU_ASSERT(bs_ch != NULL);
8302 	spdk_blob_io_read(eclone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8303 	poll_threads();
8304 	CU_ASSERT(g_bserrno == 0);
8305 	spdk_blob_io_read(snap1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8306 	poll_threads();
8307 	CU_ASSERT(g_bserrno == 0);
8308 	spdk_blob_io_read(clone1, bs_ch, buf, 0, 1, bs_op_complete, NULL);
8309 	poll_threads();
8310 	CU_ASSERT(g_bserrno == 0);
8311 
8312 	/*
8313 	 * Unload the blobstore in a way similar to how lvstore unloads it.  This should exercise
8314 	 * the deferred unload path in spdk_bs_unload().
8315 	 */
8316 	bserr1 = 0xbad;
8317 	bserr2 = 0xbad;
8318 	bserr3 = 0xbad;
8319 	bserr4 = 0xbad;
8320 	spdk_blob_close(eclone1, blob_op_complete, &bserr1);
8321 	spdk_blob_close(snap1, blob_op_complete, &bserr2);
8322 	spdk_blob_close(clone1, blob_op_complete, &bserr3);
8323 	spdk_bs_unload(bs, blob_op_complete, &bserr4);
8324 	spdk_bs_free_io_channel(bs_ch);
8325 	poll_threads();
8326 	CU_ASSERT(bserr1 == 0);
8327 	CU_ASSERT(bserr2 == 0);
8328 	CU_ASSERT(bserr3 == 0);
8329 	CU_ASSERT(bserr4 == 0);
8330 	g_blob = NULL;
8331 
8332 	/* Reload the blobstore */
8333 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8334 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8335 	dev = init_dev();
8336 	spdk_bs_load(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8337 	poll_threads();
8338 	CU_ASSERT(g_bserrno == 0);
8339 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8340 }
8341 
8342 static bool
8343 blob_esnap_verify_contents(struct spdk_blob *blob, struct spdk_io_channel *ch,
8344 			   uint64_t offset, uint64_t size, uint32_t readsize, const char *how)
8345 {
8346 	const uint32_t	bs_blksz = blob->bs->io_unit_size;
8347 	const uint32_t	esnap_blksz = blob->back_bs_dev ? blob->back_bs_dev->blocklen : bs_blksz;
8348 	const uint32_t	start_blk = offset / bs_blksz;
8349 	const uint32_t	num_blocks = spdk_max(size, readsize) / bs_blksz;
8350 	const uint32_t	blocks_per_read = spdk_min(size, readsize) / bs_blksz;
8351 	uint32_t	blob_block;
8352 	struct iovec	iov;
8353 	uint8_t		buf[spdk_min(size, readsize)];
8354 	bool		block_ok;
8355 
8356 	SPDK_CU_ASSERT_FATAL(offset % bs_blksz == 0);
8357 	SPDK_CU_ASSERT_FATAL(size % bs_blksz == 0);
8358 	SPDK_CU_ASSERT_FATAL(readsize % bs_blksz == 0);
8359 
8360 	memset(buf, 0, readsize);
8361 	iov.iov_base = buf;
8362 	iov.iov_len = readsize;
8363 	for (blob_block = start_blk; blob_block < num_blocks; blob_block += blocks_per_read) {
8364 		if (strcmp(how, "read") == 0) {
8365 			spdk_blob_io_read(blob, ch, buf, blob_block, blocks_per_read,
8366 					  bs_op_complete, NULL);
8367 		} else if (strcmp(how, "readv") == 0) {
8368 			spdk_blob_io_readv(blob, ch, &iov, 1, blob_block, blocks_per_read,
8369 					   bs_op_complete, NULL);
8370 		} else if (strcmp(how, "readv_ext") == 0) {
8371 			/*
8372 			 * This is currently pointless. NULL ext_opts leads to dev->readv(), not
8373 			 * dev->readv_ext().
8374 			 */
8375 			spdk_blob_io_readv_ext(blob, ch, &iov, 1, blob_block, blocks_per_read,
8376 					       bs_op_complete, NULL, NULL);
8377 		} else {
8378 			abort();
8379 		}
8380 		poll_threads();
8381 		CU_ASSERT(g_bserrno == 0);
8382 		if (g_bserrno != 0) {
8383 			return false;
8384 		}
8385 		block_ok = ut_esnap_content_is_correct(buf, blocks_per_read * bs_blksz, blob->id,
8386 						       blob_block * bs_blksz, esnap_blksz);
8387 		CU_ASSERT(block_ok);
8388 		if (!block_ok) {
8389 			return false;
8390 		}
8391 	}
8392 
8393 	return true;
8394 }
8395 
8396 static void
8397 blob_esnap_io_size(uint32_t bs_blksz, uint32_t esnap_blksz)
8398 {
8399 	struct spdk_bs_dev	*dev;
8400 	struct spdk_blob_store	*bs;
8401 	struct spdk_bs_opts	bsopts;
8402 	struct spdk_blob_opts	opts;
8403 	struct ut_esnap_opts	esnap_opts;
8404 	struct spdk_blob	*blob;
8405 	const uint32_t		cluster_sz = 4 * g_phys_blocklen;
8406 	const uint64_t		esnap_num_clusters = 4;
8407 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8408 	const uint64_t		esnap_num_blocks = esnap_sz / esnap_blksz;
8409 	const uint64_t		blob_num_blocks = esnap_sz / bs_blksz;
8410 	uint32_t		block;
8411 	struct spdk_io_channel	*bs_ch;
8412 
8413 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
8414 	bsopts.cluster_sz = cluster_sz;
8415 	bsopts.esnap_bs_dev_create = ut_esnap_create;
8416 
8417 	/* Create device with desired block size */
8418 	dev = init_dev();
8419 	dev->blocklen = bs_blksz;
8420 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8421 
8422 	/* Initialize a new blob store */
8423 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
8424 	poll_threads();
8425 	CU_ASSERT(g_bserrno == 0);
8426 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8427 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8428 	bs = g_bs;
8429 
8430 	bs_ch = spdk_bs_alloc_io_channel(bs);
8431 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
8432 
8433 	/* Create and open the esnap clone  */
8434 	ut_spdk_blob_opts_init(&opts);
8435 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8436 	opts.esnap_id = &esnap_opts;
8437 	opts.esnap_id_len = sizeof(esnap_opts);
8438 	opts.num_clusters = esnap_num_clusters;
8439 	blob = ut_blob_create_and_open(bs, &opts);
8440 	SPDK_CU_ASSERT_FATAL(blob != NULL);
8441 
8442 	/* Verify that large reads return the content of the esnap device */
8443 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "read"));
8444 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv"));
8445 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, esnap_sz, "readv_ext"));
8446 	/* Verify that small reads return the content of the esnap device */
8447 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "read"));
8448 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv"));
8449 	CU_ASSERT(blob_esnap_verify_contents(blob, bs_ch, 0, esnap_sz, bs_blksz, "readv_ext"));
8450 
8451 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
8452 	for (block = 0; block < blob_num_blocks; block++) {
8453 		char		buf[bs_blksz];
8454 		union ut_word	word;
8455 
8456 		word.f.blob_id = 0xfedcba90;
8457 		word.f.lba = block;
8458 		ut_memset8(buf, word.num, bs_blksz);
8459 
8460 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8461 		poll_threads();
8462 		CU_ASSERT(g_bserrno == 0);
8463 		if (g_bserrno != 0) {
8464 			break;
8465 		}
8466 
8467 		/* Read and verify the block before the current block */
8468 		if (block != 0) {
8469 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
8470 			poll_threads();
8471 			CU_ASSERT(g_bserrno == 0);
8472 			if (g_bserrno != 0) {
8473 				break;
8474 			}
8475 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8476 							      (block - 1) * bs_blksz, bs_blksz));
8477 		}
8478 
8479 		/* Read and verify the current block */
8480 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
8481 		poll_threads();
8482 		CU_ASSERT(g_bserrno == 0);
8483 		if (g_bserrno != 0) {
8484 			break;
8485 		}
8486 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
8487 						      block * bs_blksz, bs_blksz));
8488 
8489 		/* Check the block that follows */
8490 		if (block + 1 < blob_num_blocks) {
8491 			g_bserrno = 0xbad;
8492 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
8493 			poll_threads();
8494 			CU_ASSERT(g_bserrno == 0);
8495 			if (g_bserrno != 0) {
8496 				break;
8497 			}
8498 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
8499 							      (block + 1) * bs_blksz,
8500 							      esnap_blksz));
8501 		}
8502 	}
8503 
8504 	/* Clean up */
8505 	spdk_bs_free_io_channel(bs_ch);
8506 	g_bserrno = 0xbad;
8507 	spdk_blob_close(blob, blob_op_complete, NULL);
8508 	poll_threads();
8509 	CU_ASSERT(g_bserrno == 0);
8510 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
8511 	poll_threads();
8512 	CU_ASSERT(g_bserrno == 0);
8513 	g_bs = NULL;
8514 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8515 }
8516 
8517 static void
8518 blob_esnap_io_4096_4096(void)
8519 {
8520 	blob_esnap_io_size(4096, 4096);
8521 }
8522 
8523 static void
8524 blob_esnap_io_512_512(void)
8525 {
8526 	blob_esnap_io_size(512, 512);
8527 }
8528 
8529 static void
8530 blob_esnap_io_4096_512(void)
8531 {
8532 	blob_esnap_io_size(4096, 512);
8533 }
8534 
8535 static void
8536 blob_esnap_io_512_4096(void)
8537 {
8538 	struct spdk_bs_dev	*dev;
8539 	struct spdk_blob_store	*bs;
8540 	struct spdk_bs_opts	bs_opts;
8541 	struct spdk_blob_opts	blob_opts;
8542 	struct ut_esnap_opts	esnap_opts;
8543 	uint64_t		cluster_sz = 4 * g_phys_blocklen;
8544 	uint32_t		bs_blksz = 512;
8545 	uint32_t		esnap_blksz = BLOCKLEN;
8546 	uint64_t		esnap_num_blocks = 64;
8547 	spdk_blob_id		blobid;
8548 
8549 	/* Create device with desired block size */
8550 	dev = init_dev();
8551 	dev->blocklen = bs_blksz;
8552 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
8553 
8554 	/* Initialize a new blob store */
8555 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8556 	bs_opts.cluster_sz = cluster_sz;
8557 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
8558 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8559 	poll_threads();
8560 	CU_ASSERT(g_bserrno == 0);
8561 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8562 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
8563 	bs = g_bs;
8564 
8565 	/* Try to create and open the esnap clone. Create should succeed, open should fail. */
8566 	ut_spdk_blob_opts_init(&blob_opts);
8567 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
8568 	blob_opts.esnap_id = &esnap_opts;
8569 	blob_opts.esnap_id_len = sizeof(esnap_opts);
8570 	blob_opts.num_clusters = esnap_num_blocks * esnap_blksz / bs_blksz;
8571 	spdk_bs_create_blob_ext(bs, &blob_opts, blob_op_with_id_complete, NULL);
8572 	poll_threads();
8573 	CU_ASSERT(g_bserrno == 0);
8574 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8575 	blobid = g_blobid;
8576 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8577 	poll_threads();
8578 	CU_ASSERT(g_bserrno == -EINVAL);
8579 	CU_ASSERT(g_blob == NULL);
8580 
8581 	/* Clean up */
8582 	spdk_bs_unload(bs, bs_op_complete, NULL);
8583 	poll_threads();
8584 	CU_ASSERT(g_bserrno == 0);
8585 	g_bs = NULL;
8586 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
8587 }
8588 
8589 static void
8590 blob_esnap_thread_add_remove(void)
8591 {
8592 	struct spdk_blob_store	*bs = g_bs;
8593 	struct spdk_blob_opts	opts;
8594 	struct ut_esnap_opts	ut_esnap_opts;
8595 	struct spdk_blob	*blob;
8596 	struct ut_esnap_dev	*ut_dev;
8597 	spdk_blob_id		blobid;
8598 	uint64_t		start_thread = g_ut_thread_id;
8599 	bool			destroyed = false;
8600 	struct spdk_io_channel	*ch0, *ch1;
8601 	struct ut_esnap_channel	*ut_ch0, *ut_ch1;
8602 	const uint32_t		blocklen = bs->io_unit_size;
8603 	char			buf[blocklen * 4];
8604 
8605 	SPDK_CU_ASSERT_FATAL(g_ut_num_threads > 1);
8606 	set_thread(0);
8607 
8608 	/* Create the esnap clone */
8609 	ut_esnap_opts_init(blocklen, 2048, "add_remove_1", &destroyed, &ut_esnap_opts);
8610 	ut_spdk_blob_opts_init(&opts);
8611 	opts.esnap_id = &ut_esnap_opts;
8612 	opts.esnap_id_len = sizeof(ut_esnap_opts);
8613 	opts.num_clusters = 10;
8614 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8615 	poll_threads();
8616 	CU_ASSERT(g_bserrno == 0);
8617 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8618 	blobid = g_blobid;
8619 
8620 	/* Open the blob. No channels should be allocated yet. */
8621 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8622 	poll_threads();
8623 	CU_ASSERT(g_bserrno == 0);
8624 	CU_ASSERT(g_blob != NULL);
8625 	blob = g_blob;
8626 	ut_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
8627 	CU_ASSERT(ut_dev != NULL);
8628 	CU_ASSERT(ut_dev->num_channels == 0);
8629 
8630 	/* Create a channel on thread 0. It is lazily created on the first read. */
8631 	ch0 = spdk_bs_alloc_io_channel(bs);
8632 	CU_ASSERT(ch0 != NULL);
8633 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8634 	CU_ASSERT(ut_ch0 == NULL);
8635 	CU_ASSERT(ut_dev->num_channels == 0);
8636 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
8637 	poll_threads();
8638 	CU_ASSERT(g_bserrno == 0);
8639 	CU_ASSERT(ut_dev->num_channels == 1);
8640 	ut_ch0 = ut_esnap_get_io_channel(ch0, blobid);
8641 	CU_ASSERT(ut_ch0 != NULL);
8642 	CU_ASSERT(ut_ch0->blocks_read == 1);
8643 
8644 	/* Create a channel on thread 1 and verify its lazy creation too. */
8645 	set_thread(1);
8646 	ch1 = spdk_bs_alloc_io_channel(bs);
8647 	CU_ASSERT(ch1 != NULL);
8648 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8649 	CU_ASSERT(ut_ch1 == NULL);
8650 	CU_ASSERT(ut_dev->num_channels == 1);
8651 	spdk_blob_io_read(blob, ch1, buf, 0, 4, bs_op_complete, NULL);
8652 	poll_threads();
8653 	CU_ASSERT(g_bserrno == 0);
8654 	CU_ASSERT(ut_dev->num_channels == 2);
8655 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8656 	CU_ASSERT(ut_ch1 != NULL);
8657 	CU_ASSERT(ut_ch1->blocks_read == 4);
8658 
8659 	/* Close the channel on thread 0 and verify the bs_dev channel is also gone. */
8660 	set_thread(0);
8661 	spdk_bs_free_io_channel(ch0);
8662 	poll_threads();
8663 	CU_ASSERT(ut_dev->num_channels == 1);
8664 
8665 	/* Close the blob. There is no outstanding IO so it should close right away. */
8666 	g_bserrno = 0xbad;
8667 	spdk_blob_close(blob, blob_op_complete, NULL);
8668 	poll_threads();
8669 	CU_ASSERT(g_bserrno == 0);
8670 	CU_ASSERT(destroyed);
8671 
8672 	/* The esnap channel for the blob should be gone now too. */
8673 	ut_ch1 = ut_esnap_get_io_channel(ch1, blobid);
8674 	CU_ASSERT(ut_ch1 == NULL);
8675 
8676 	/* Clean up */
8677 	set_thread(1);
8678 	spdk_bs_free_io_channel(ch1);
8679 	set_thread(start_thread);
8680 }
8681 
8682 static void
8683 freeze_done(void *cb_arg, int bserrno)
8684 {
8685 	uint32_t *freeze_cnt = cb_arg;
8686 
8687 	CU_ASSERT(bserrno == 0);
8688 	(*freeze_cnt)++;
8689 }
8690 
8691 static void
8692 unfreeze_done(void *cb_arg, int bserrno)
8693 {
8694 	uint32_t *unfreeze_cnt = cb_arg;
8695 
8696 	CU_ASSERT(bserrno == 0);
8697 	(*unfreeze_cnt)++;
8698 }
8699 
8700 static void
8701 blob_nested_freezes(void)
8702 {
8703 	struct spdk_blob_store *bs = g_bs;
8704 	struct spdk_blob *blob;
8705 	struct spdk_io_channel *channel[2];
8706 	struct spdk_blob_opts opts;
8707 	uint32_t freeze_cnt, unfreeze_cnt;
8708 	int i;
8709 
8710 	for (i = 0; i < 2; i++) {
8711 		set_thread(i);
8712 		channel[i] = spdk_bs_alloc_io_channel(bs);
8713 		SPDK_CU_ASSERT_FATAL(channel[i] != NULL);
8714 	}
8715 
8716 	set_thread(0);
8717 
8718 	ut_spdk_blob_opts_init(&opts);
8719 	blob = ut_blob_create_and_open(bs, &opts);
8720 
8721 	/* First just test a single freeze/unfreeze. */
8722 	freeze_cnt = 0;
8723 	unfreeze_cnt = 0;
8724 	CU_ASSERT(blob->frozen_refcnt == 0);
8725 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8726 	CU_ASSERT(blob->frozen_refcnt == 1);
8727 	CU_ASSERT(freeze_cnt == 0);
8728 	poll_threads();
8729 	CU_ASSERT(freeze_cnt == 1);
8730 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8731 	CU_ASSERT(blob->frozen_refcnt == 0);
8732 	CU_ASSERT(unfreeze_cnt == 0);
8733 	poll_threads();
8734 	CU_ASSERT(unfreeze_cnt == 1);
8735 
8736 	/* Now nest multiple freeze/unfreeze operations.  We should
8737 	 * expect a callback for each operation, but only after
8738 	 * the threads have been polled to ensure a for_each_channel()
8739 	 * was executed.
8740 	 */
8741 	freeze_cnt = 0;
8742 	unfreeze_cnt = 0;
8743 	CU_ASSERT(blob->frozen_refcnt == 0);
8744 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8745 	CU_ASSERT(blob->frozen_refcnt == 1);
8746 	CU_ASSERT(freeze_cnt == 0);
8747 	blob_freeze_io(blob, freeze_done, &freeze_cnt);
8748 	CU_ASSERT(blob->frozen_refcnt == 2);
8749 	CU_ASSERT(freeze_cnt == 0);
8750 	poll_threads();
8751 	CU_ASSERT(freeze_cnt == 2);
8752 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8753 	CU_ASSERT(blob->frozen_refcnt == 1);
8754 	CU_ASSERT(unfreeze_cnt == 0);
8755 	blob_unfreeze_io(blob, unfreeze_done, &unfreeze_cnt);
8756 	CU_ASSERT(blob->frozen_refcnt == 0);
8757 	CU_ASSERT(unfreeze_cnt == 0);
8758 	poll_threads();
8759 	CU_ASSERT(unfreeze_cnt == 2);
8760 
8761 	for (i = 0; i < 2; i++) {
8762 		set_thread(i);
8763 		spdk_bs_free_io_channel(channel[i]);
8764 	}
8765 	set_thread(0);
8766 	ut_blob_close_and_delete(bs, blob);
8767 
8768 	poll_threads();
8769 	g_blob = NULL;
8770 	g_blobid = 0;
8771 }
8772 
8773 static void
8774 blob_ext_md_pages(void)
8775 {
8776 	struct spdk_blob_store *bs;
8777 	struct spdk_bs_dev *dev;
8778 	struct spdk_blob *blob;
8779 	struct spdk_blob_opts opts;
8780 	struct spdk_bs_opts bs_opts;
8781 	uint64_t free_clusters;
8782 
8783 	dev = init_dev();
8784 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
8785 	snprintf(bs_opts.bstype.bstype, sizeof(bs_opts.bstype.bstype), "TESTTYPE");
8786 	/* Issue #2932 was a bug in how we use bs_allocate_cluster() during resize.
8787 	 * It requires num_md_pages that is much smaller than the number of clusters.
8788 	 * Make sure we can create a blob that uses all of the free clusters.
8789 	 */
8790 	bs_opts.cluster_sz = 65536;
8791 	bs_opts.num_md_pages = 16;
8792 
8793 	/* Initialize a new blob store */
8794 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
8795 	poll_threads();
8796 	CU_ASSERT(g_bserrno == 0);
8797 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
8798 	bs = g_bs;
8799 
8800 	free_clusters = spdk_bs_free_cluster_count(bs);
8801 
8802 	ut_spdk_blob_opts_init(&opts);
8803 	opts.num_clusters = free_clusters;
8804 
8805 	blob = ut_blob_create_and_open(bs, &opts);
8806 	spdk_blob_close(blob, blob_op_complete, NULL);
8807 	CU_ASSERT(g_bserrno == 0);
8808 
8809 	spdk_bs_unload(bs, bs_op_complete, NULL);
8810 	poll_threads();
8811 	CU_ASSERT(g_bserrno == 0);
8812 	g_bs = NULL;
8813 }
8814 
8815 static void
8816 blob_esnap_clone_snapshot(void)
8817 {
8818 	/*
8819 	 * When a snapshot is created, the blob that is being snapped becomes
8820 	 * the leaf node (a clone of the snapshot) and the newly created
8821 	 * snapshot sits between the snapped blob and the external snapshot.
8822 	 *
8823 	 * Before creating snap1
8824 	 *
8825 	 *   ,--------.     ,----------.
8826 	 *   |  blob  |     |  vbdev   |
8827 	 *   | blob1  |<----| nvme1n42 |
8828 	 *   |  (rw)  |     |   (ro)   |
8829 	 *   `--------'     `----------'
8830 	 *       Figure 1
8831 	 *
8832 	 * After creating snap1
8833 	 *
8834 	 *   ,--------.     ,--------.     ,----------.
8835 	 *   |  blob  |     |  blob  |     |  vbdev   |
8836 	 *   | blob1  |<----| snap1  |<----| nvme1n42 |
8837 	 *   |  (rw)  |     |  (ro)  |     |   (ro)   |
8838 	 *   `--------'     `--------'     `----------'
8839 	 *       Figure 2
8840 	 *
8841 	 * Starting from Figure 2, if snap1 is removed, the chain reverts to
8842 	 * what it looks like in Figure 1.
8843 	 *
8844 	 * Starting from Figure 2, if blob1 is removed, the chain becomes:
8845 	 *
8846 	 *   ,--------.     ,----------.
8847 	 *   |  blob  |     |  vbdev   |
8848 	 *   | snap1  |<----| nvme1n42 |
8849 	 *   |  (ro)  |     |   (ro)   |
8850 	 *   `--------'     `----------'
8851 	 *       Figure 3
8852 	 *
8853 	 * In each case, the blob pointed to by the nvme vbdev is considered
8854 	 * the "esnap clone".  The esnap clone must have:
8855 	 *
8856 	 *   - XATTR_INTERNAL for BLOB_EXTERNAL_SNAPSHOT_ID (e.g. name or UUID)
8857 	 *   - blob->invalid_flags must contain SPDK_BLOB_EXTERNAL_SNAPSHOT
8858 	 *   - blob->parent_id must be SPDK_BLOBID_EXTERNAL_SNAPSHOT.
8859 	 *
8860 	 * No other blob that descends from the esnap clone may have any of
8861 	 * those set.
8862 	 */
8863 	struct spdk_blob_store	*bs = g_bs;
8864 	const uint32_t		blocklen = bs->io_unit_size;
8865 	struct spdk_blob_opts	opts;
8866 	struct ut_esnap_opts	esnap_opts;
8867 	struct spdk_blob	*blob, *snap_blob;
8868 	spdk_blob_id		blobid, snap_blobid;
8869 	bool			destroyed = false;
8870 
8871 	/* Create the esnap clone */
8872 	ut_esnap_opts_init(blocklen, 2048, __func__, &destroyed, &esnap_opts);
8873 	ut_spdk_blob_opts_init(&opts);
8874 	opts.esnap_id = &esnap_opts;
8875 	opts.esnap_id_len = sizeof(esnap_opts);
8876 	opts.num_clusters = 10;
8877 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
8878 	poll_threads();
8879 	CU_ASSERT(g_bserrno == 0);
8880 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8881 	blobid = g_blobid;
8882 
8883 	/* Open the blob. */
8884 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8885 	poll_threads();
8886 	CU_ASSERT(g_bserrno == 0);
8887 	CU_ASSERT(g_blob != NULL);
8888 	blob = g_blob;
8889 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8890 
8891 	/*
8892 	 * Create a snapshot of the blob. The snapshot becomes the esnap clone.
8893 	 */
8894 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8895 	poll_threads();
8896 	CU_ASSERT(g_bserrno == 0);
8897 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8898 	snap_blobid = g_blobid;
8899 
8900 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8901 	poll_threads();
8902 	CU_ASSERT(g_bserrno == 0);
8903 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8904 	snap_blob = g_blob;
8905 
8906 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8907 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8908 
8909 	/*
8910 	 * Delete the snapshot.  The original blob becomes the esnap clone.
8911 	 */
8912 	ut_blob_close_and_delete(bs, snap_blob);
8913 	snap_blob = NULL;
8914 	snap_blobid = SPDK_BLOBID_INVALID;
8915 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8916 
8917 	/*
8918 	 * Create the snapshot again, then delete the original blob.  The
8919 	 * snapshot should survive as the esnap clone.
8920 	 */
8921 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
8922 	poll_threads();
8923 	CU_ASSERT(g_bserrno == 0);
8924 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8925 	snap_blobid = g_blobid;
8926 
8927 	spdk_bs_open_blob(bs, snap_blobid, blob_op_with_handle_complete, NULL);
8928 	poll_threads();
8929 	CU_ASSERT(g_bserrno == 0);
8930 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8931 	snap_blob = g_blob;
8932 
8933 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8934 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8935 
8936 	ut_blob_close_and_delete(bs, blob);
8937 	blob = NULL;
8938 	blobid = SPDK_BLOBID_INVALID;
8939 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8940 
8941 	/*
8942 	 * Clone the snapshot.  The snapshot continues to be the esnap clone.
8943 	 */
8944 	spdk_bs_create_clone(bs, snap_blobid, NULL, blob_op_with_id_complete, NULL);
8945 	poll_threads();
8946 	CU_ASSERT(g_bserrno == 0);
8947 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
8948 	blobid = g_blobid;
8949 
8950 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
8951 	poll_threads();
8952 	CU_ASSERT(g_bserrno == 0);
8953 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
8954 	blob = g_blob;
8955 
8956 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
8957 	UT_ASSERT_IS_ESNAP_CLONE(snap_blob, &esnap_opts, sizeof(esnap_opts));
8958 
8959 	/*
8960 	 * Delete the snapshot. The clone becomes the esnap clone.
8961 	 */
8962 	ut_blob_close_and_delete(bs, snap_blob);
8963 	snap_blob = NULL;
8964 	snap_blobid = SPDK_BLOBID_INVALID;
8965 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
8966 
8967 	/*
8968 	 * Clean up
8969 	 */
8970 	ut_blob_close_and_delete(bs, blob);
8971 }
8972 
8973 static uint64_t
8974 _blob_esnap_clone_hydrate(bool inflate)
8975 {
8976 	struct spdk_blob_store	*bs = g_bs;
8977 	struct spdk_blob_opts	opts;
8978 	struct ut_esnap_opts	esnap_opts;
8979 	struct spdk_blob	*blob;
8980 	spdk_blob_id		blobid;
8981 	struct spdk_io_channel *channel;
8982 	bool			destroyed = false;
8983 	const uint32_t		blocklen = spdk_bs_get_io_unit_size(bs);
8984 	const uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
8985 	const uint64_t		esnap_num_clusters = 4;
8986 	const uint32_t		esnap_sz = cluster_sz * esnap_num_clusters;
8987 	const uint64_t		esnap_num_blocks = esnap_sz / blocklen;
8988 	uint64_t		num_failures = CU_get_number_of_failures();
8989 
8990 	channel = spdk_bs_alloc_io_channel(bs);
8991 	SPDK_CU_ASSERT_FATAL(channel != NULL);
8992 
8993 	/* Create the esnap clone */
8994 	ut_spdk_blob_opts_init(&opts);
8995 	ut_esnap_opts_init(blocklen, esnap_num_blocks, __func__, &destroyed, &esnap_opts);
8996 	opts.esnap_id = &esnap_opts;
8997 	opts.esnap_id_len = sizeof(esnap_opts);
8998 	opts.num_clusters = esnap_num_clusters;
8999 	spdk_bs_create_blob_ext(bs, &opts, blob_op_with_id_complete, NULL);
9000 	poll_threads();
9001 	CU_ASSERT(g_bserrno == 0);
9002 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9003 	blobid = g_blobid;
9004 
9005 	/* Open the esnap clone */
9006 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9007 	poll_threads();
9008 	CU_ASSERT(g_bserrno == 0);
9009 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9010 	blob = g_blob;
9011 	UT_ASSERT_IS_ESNAP_CLONE(blob, &esnap_opts, sizeof(esnap_opts));
9012 
9013 	/*
9014 	 * Inflate or decouple  the blob then verify that it is no longer an esnap clone and has
9015 	 * right content
9016 	 */
9017 	if (inflate) {
9018 		spdk_bs_inflate_blob(bs, channel, blobid, blob_op_complete, NULL);
9019 	} else {
9020 		spdk_bs_blob_decouple_parent(bs, channel, blobid, blob_op_complete, NULL);
9021 	}
9022 	poll_threads();
9023 	CU_ASSERT(g_bserrno == 0);
9024 	UT_ASSERT_IS_NOT_ESNAP_CLONE(blob);
9025 	CU_ASSERT(blob_esnap_verify_contents(blob, channel, 0, esnap_sz, esnap_sz, "read"));
9026 	ut_blob_close_and_delete(bs, blob);
9027 
9028 	/*
9029 	 * Clean up
9030 	 */
9031 	spdk_bs_free_io_channel(channel);
9032 	poll_threads();
9033 
9034 	/* Return number of new failures */
9035 	return CU_get_number_of_failures() - num_failures;
9036 }
9037 
9038 static void
9039 blob_esnap_clone_inflate(void)
9040 {
9041 	_blob_esnap_clone_hydrate(true);
9042 }
9043 
9044 static void
9045 blob_esnap_clone_decouple(void)
9046 {
9047 	_blob_esnap_clone_hydrate(false);
9048 }
9049 
9050 static void
9051 blob_esnap_hotplug(void)
9052 {
9053 	struct spdk_blob_store	*bs = g_bs;
9054 	struct ut_esnap_opts	esnap1_opts, esnap2_opts;
9055 	struct spdk_blob_opts	opts;
9056 	struct spdk_blob	*blob;
9057 	struct spdk_bs_dev	*bs_dev;
9058 	struct ut_esnap_dev	*esnap_dev;
9059 	uint32_t		cluster_sz = spdk_bs_get_cluster_size(bs);
9060 	uint32_t		block_sz = spdk_bs_get_io_unit_size(bs);
9061 	const uint32_t		esnap_num_clusters = 4;
9062 	uint64_t		esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9063 	bool			destroyed1 = false, destroyed2 = false;
9064 	uint64_t		start_thread = g_ut_thread_id;
9065 	struct spdk_io_channel	*ch0, *ch1;
9066 	char			buf[block_sz];
9067 
9068 	/* Create and open an esnap clone blob */
9069 	ut_spdk_blob_opts_init(&opts);
9070 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1", &destroyed1, &esnap1_opts);
9071 	opts.esnap_id = &esnap1_opts;
9072 	opts.esnap_id_len = sizeof(esnap1_opts);
9073 	opts.num_clusters = esnap_num_clusters;
9074 	blob = ut_blob_create_and_open(bs, &opts);
9075 	CU_ASSERT(blob != NULL);
9076 	CU_ASSERT(spdk_blob_is_esnap_clone(blob));
9077 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9078 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9079 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1") == 0);
9080 
9081 	/* Replace the external snapshot */
9082 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap2", &destroyed2, &esnap2_opts);
9083 	bs_dev = ut_esnap_dev_alloc(&esnap2_opts);
9084 	CU_ASSERT(!destroyed1);
9085 	CU_ASSERT(!destroyed2);
9086 	g_bserrno = 0xbad;
9087 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9088 	poll_threads();
9089 	CU_ASSERT(g_bserrno == 0);
9090 	CU_ASSERT(destroyed1);
9091 	CU_ASSERT(!destroyed2);
9092 	SPDK_CU_ASSERT_FATAL(bs_dev == blob->back_bs_dev);
9093 	SPDK_CU_ASSERT_FATAL(bs_dev == spdk_blob_get_esnap_bs_dev(blob));
9094 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9095 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap2") == 0);
9096 
9097 	/* Create a couple channels */
9098 	set_thread(0);
9099 	ch0 = spdk_bs_alloc_io_channel(bs);
9100 	CU_ASSERT(ch0 != NULL);
9101 	spdk_blob_io_read(blob, ch0, buf, 0, 1, bs_op_complete, NULL);
9102 	set_thread(1);
9103 	ch1 = spdk_bs_alloc_io_channel(bs);
9104 	CU_ASSERT(ch1 != NULL);
9105 	spdk_blob_io_read(blob, ch1, buf, 0, 1, bs_op_complete, NULL);
9106 	set_thread(start_thread);
9107 	poll_threads();
9108 	CU_ASSERT(esnap_dev->num_channels == 2);
9109 
9110 	/* Replace the external snapshot */
9111 	ut_esnap_opts_init(block_sz, esnap_num_blocks, "esnap1a", &destroyed1, &esnap1_opts);
9112 	bs_dev = ut_esnap_dev_alloc(&esnap1_opts);
9113 	destroyed1 = destroyed2 = false;
9114 	g_bserrno = 0xbad;
9115 	spdk_blob_set_esnap_bs_dev(blob, bs_dev, bs_op_complete, NULL);
9116 	poll_threads();
9117 	CU_ASSERT(g_bserrno == 0);
9118 	CU_ASSERT(!destroyed1);
9119 	CU_ASSERT(destroyed2);
9120 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9121 	esnap_dev = (struct ut_esnap_dev *)blob->back_bs_dev;
9122 	CU_ASSERT(strcmp(esnap_dev->ut_opts.name, "esnap1a") == 0);
9123 
9124 	/* Clean up */
9125 	set_thread(0);
9126 	spdk_bs_free_io_channel(ch0);
9127 	set_thread(1);
9128 	spdk_bs_free_io_channel(ch1);
9129 	set_thread(start_thread);
9130 	g_bserrno = 0xbad;
9131 	spdk_blob_close(blob, bs_op_complete, NULL);
9132 	poll_threads();
9133 	CU_ASSERT(g_bserrno == 0);
9134 }
9135 
9136 static bool g_blob_is_degraded;
9137 static int g_blob_is_degraded_called;
9138 
9139 static bool
9140 _blob_is_degraded(struct spdk_bs_dev *dev)
9141 {
9142 	g_blob_is_degraded_called++;
9143 	return g_blob_is_degraded;
9144 }
9145 
9146 static void
9147 blob_is_degraded(void)
9148 {
9149 	struct spdk_bs_dev bs_is_degraded_null = { 0 };
9150 	struct spdk_bs_dev bs_is_degraded = { .is_degraded = _blob_is_degraded };
9151 
9152 	/* No back_bs_dev, no bs->dev->is_degraded */
9153 	g_blob_is_degraded_called = 0;
9154 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9155 	CU_ASSERT(g_blob_is_degraded_called == 0);
9156 
9157 	/* No back_bs_dev, blobstore device degraded */
9158 	g_bs->dev->is_degraded = _blob_is_degraded;
9159 	g_blob_is_degraded_called = 0;
9160 	g_blob_is_degraded = true;
9161 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9162 	CU_ASSERT(g_blob_is_degraded_called == 1);
9163 
9164 	/* No back_bs_dev, blobstore device not degraded */
9165 	g_bs->dev->is_degraded = _blob_is_degraded;
9166 	g_blob_is_degraded_called = 0;
9167 	g_blob_is_degraded = false;
9168 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9169 	CU_ASSERT(g_blob_is_degraded_called == 1);
9170 
9171 	/* back_bs_dev does not define is_degraded, no bs->dev->is_degraded */
9172 	g_bs->dev->is_degraded = NULL;
9173 	g_blob->back_bs_dev = &bs_is_degraded_null;
9174 	g_blob_is_degraded_called = 0;
9175 	g_blob_is_degraded = false;
9176 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9177 	CU_ASSERT(g_blob_is_degraded_called == 0);
9178 
9179 	/* back_bs_dev is not degraded, no bs->dev->is_degraded */
9180 	g_bs->dev->is_degraded = NULL;
9181 	g_blob->back_bs_dev = &bs_is_degraded;
9182 	g_blob_is_degraded_called = 0;
9183 	g_blob_is_degraded = false;
9184 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9185 	CU_ASSERT(g_blob_is_degraded_called == 1);
9186 
9187 	/* back_bs_dev is degraded, no bs->dev->is_degraded */
9188 	g_bs->dev->is_degraded = NULL;
9189 	g_blob->back_bs_dev = &bs_is_degraded;
9190 	g_blob_is_degraded_called = 0;
9191 	g_blob_is_degraded = true;
9192 	CU_ASSERT(spdk_blob_is_degraded(g_blob));
9193 	CU_ASSERT(g_blob_is_degraded_called == 1);
9194 
9195 	/* back_bs_dev is not degraded, blobstore device is not degraded */
9196 	g_bs->dev->is_degraded = _blob_is_degraded;
9197 	g_blob->back_bs_dev = &bs_is_degraded;
9198 	g_blob_is_degraded_called = 0;
9199 	g_blob_is_degraded = false;
9200 	CU_ASSERT(!spdk_blob_is_degraded(g_blob));
9201 	CU_ASSERT(g_blob_is_degraded_called == 2);
9202 
9203 	g_blob->back_bs_dev = NULL;
9204 }
9205 
9206 /* Resize a blob which is a clone created from snapshot. Verify read/writes to
9207  * expanded clone blob. Then inflate the clone blob. */
9208 static void
9209 blob_clone_resize(void)
9210 {
9211 	struct spdk_blob_store *bs = g_bs;
9212 	struct spdk_blob_opts opts;
9213 	struct spdk_blob *blob, *clone, *snap_blob, *snap_blob_rsz;
9214 	spdk_blob_id blobid, cloneid, snapid1, snapid2;
9215 	uint64_t pages_per_cluster;
9216 	uint8_t payload_read[bs->dev->blocklen];
9217 	uint8_t payload_write[bs->dev->blocklen];
9218 	struct spdk_io_channel *channel;
9219 	uint64_t free_clusters;
9220 
9221 	channel = spdk_bs_alloc_io_channel(bs);
9222 	SPDK_CU_ASSERT_FATAL(channel != NULL);
9223 
9224 	pages_per_cluster = spdk_bs_get_cluster_size(bs) / spdk_bs_get_page_size(bs);
9225 
9226 	/* Create blob with 10 clusters */
9227 	ut_spdk_blob_opts_init(&opts);
9228 	opts.num_clusters = 10;
9229 
9230 	blob = ut_blob_create_and_open(bs, &opts);
9231 	blobid = spdk_blob_get_id(blob);
9232 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == 10);
9233 
9234 	/* Create snapshot */
9235 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9236 	poll_threads();
9237 	CU_ASSERT(g_bserrno == 0);
9238 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9239 	snapid1 = g_blobid;
9240 
9241 	spdk_bs_create_clone(bs, snapid1, NULL, blob_op_with_id_complete, NULL);
9242 	poll_threads();
9243 	CU_ASSERT(g_bserrno == 0);
9244 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9245 	cloneid = g_blobid;
9246 
9247 	spdk_bs_open_blob(bs, cloneid, blob_op_with_handle_complete, NULL);
9248 	poll_threads();
9249 	CU_ASSERT(g_bserrno == 0);
9250 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9251 	clone = g_blob;
9252 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 10);
9253 
9254 	g_bserrno = -1;
9255 	spdk_blob_resize(clone, 20, blob_op_complete, NULL);
9256 	poll_threads();
9257 	CU_ASSERT(g_bserrno == 0);
9258 	CU_ASSERT(spdk_blob_get_num_clusters(clone) == 20);
9259 
9260 	/* Create another snapshot after resizing the clone */
9261 	spdk_bs_create_snapshot(bs, cloneid, NULL, blob_op_with_id_complete, NULL);
9262 	poll_threads();
9263 	CU_ASSERT(g_bserrno == 0);
9264 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9265 	snapid2 = g_blobid;
9266 
9267 	/* Open the snapshot blobs */
9268 	spdk_bs_open_blob(bs, snapid1, blob_op_with_handle_complete, NULL);
9269 	CU_ASSERT(g_bserrno == 0);
9270 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9271 	snap_blob = g_blob;
9272 	CU_ASSERT(snap_blob->data_ro == true);
9273 	CU_ASSERT(snap_blob->md_ro == true);
9274 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob) == 10);
9275 
9276 	spdk_bs_open_blob(bs, snapid2, blob_op_with_handle_complete, NULL);
9277 	CU_ASSERT(g_bserrno == 0);
9278 	SPDK_CU_ASSERT_FATAL(g_blob != NULL);
9279 	snap_blob_rsz = g_blob;
9280 	CU_ASSERT(snap_blob_rsz->data_ro == true);
9281 	CU_ASSERT(snap_blob_rsz->md_ro == true);
9282 	CU_ASSERT(spdk_blob_get_num_clusters(snap_blob_rsz) == 20);
9283 
9284 	/* Confirm that clone is backed by snap_blob_rsz, and snap_blob_rsz is backed by snap_blob */
9285 	SPDK_CU_ASSERT_FATAL(snap_blob->back_bs_dev == NULL);
9286 	SPDK_CU_ASSERT_FATAL(blob->back_bs_dev != NULL);
9287 	SPDK_CU_ASSERT_FATAL(snap_blob_rsz->back_bs_dev != NULL);
9288 
9289 	/* Write and read from pre-resize ranges */
9290 	g_bserrno = -1;
9291 	memset(payload_write, 0xE5, sizeof(payload_write));
9292 	spdk_blob_io_write(clone, channel, payload_write, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9293 	poll_threads();
9294 	CU_ASSERT(g_bserrno == 0);
9295 
9296 	g_bserrno = -1;
9297 	memset(payload_read, 0x00, sizeof(payload_read));
9298 	spdk_blob_io_read(clone, channel, payload_read, 5 * pages_per_cluster, 1, blob_op_complete, NULL);
9299 	poll_threads();
9300 	CU_ASSERT(g_bserrno == 0);
9301 	CU_ASSERT(memcmp(payload_write, payload_read, BLOCKLEN) == 0);
9302 
9303 	/* Write and read from post-resize ranges */
9304 	g_bserrno = -1;
9305 	memset(payload_write, 0xE5, sizeof(payload_write));
9306 	spdk_blob_io_write(clone, channel, payload_write, 15 * pages_per_cluster, 1, blob_op_complete,
9307 			   NULL);
9308 	poll_threads();
9309 	CU_ASSERT(g_bserrno == 0);
9310 
9311 	g_bserrno = -1;
9312 	memset(payload_read, 0x00, sizeof(payload_read));
9313 	spdk_blob_io_read(clone, channel, payload_read, 15 * pages_per_cluster, 1, blob_op_complete, NULL);
9314 	poll_threads();
9315 	CU_ASSERT(g_bserrno == 0);
9316 	CU_ASSERT(memcmp(payload_write, payload_read, bs->dev->blocklen) == 0);
9317 
9318 	/* Now do full blob inflation of the resized blob/clone. */
9319 	free_clusters = spdk_bs_free_cluster_count(bs);
9320 	spdk_bs_inflate_blob(bs, channel, cloneid, blob_op_complete, NULL);
9321 	poll_threads();
9322 	CU_ASSERT(g_bserrno == 0);
9323 	/* We wrote to 2 clusters earlier, all remaining 18 clusters in
9324 	 * blob should get allocated after inflation */
9325 	CU_ASSERT(spdk_bs_free_cluster_count(bs) == free_clusters - 18);
9326 
9327 	spdk_blob_close(clone, blob_op_complete, NULL);
9328 	poll_threads();
9329 	CU_ASSERT(g_bserrno == 0);
9330 
9331 	spdk_blob_close(snap_blob, blob_op_complete, NULL);
9332 	poll_threads();
9333 	CU_ASSERT(g_bserrno == 0);
9334 
9335 	spdk_blob_close(snap_blob_rsz, blob_op_complete, NULL);
9336 	poll_threads();
9337 	CU_ASSERT(g_bserrno == 0);
9338 
9339 	ut_blob_close_and_delete(bs, blob);
9340 
9341 	spdk_bs_free_io_channel(channel);
9342 }
9343 
9344 
9345 static void
9346 blob_esnap_clone_resize(void)
9347 {
9348 	struct spdk_bs_dev *dev;
9349 	struct spdk_blob_store *bs;
9350 	struct spdk_bs_opts bsopts;
9351 	struct spdk_blob_opts opts;
9352 	struct ut_esnap_opts esnap_opts;
9353 	struct spdk_blob *blob;
9354 	uint32_t block, esnap_blksz = 512, bs_blksz = 512;
9355 	const uint32_t cluster_sz = 4 * g_phys_blocklen;
9356 	const uint64_t esnap_num_clusters = 4;
9357 	const uint32_t esnap_sz = cluster_sz * esnap_num_clusters;
9358 	const uint64_t esnap_num_blocks = esnap_sz / esnap_blksz;
9359 	uint64_t blob_num_blocks = esnap_sz / bs_blksz;
9360 	struct spdk_io_channel *bs_ch;
9361 
9362 	spdk_bs_opts_init(&bsopts, sizeof(bsopts));
9363 	bsopts.cluster_sz = cluster_sz;
9364 	bsopts.esnap_bs_dev_create = ut_esnap_create;
9365 	/* Create device with desired block size */
9366 	dev = init_dev();
9367 	dev->blocklen = bs_blksz;
9368 	dev->blockcnt = DEV_BUFFER_SIZE / dev->blocklen;
9369 	/* Initialize a new blob store */
9370 	spdk_bs_init(dev, &bsopts, bs_op_with_handle_complete, NULL);
9371 	poll_threads();
9372 	CU_ASSERT(g_bserrno == 0);
9373 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9374 	SPDK_CU_ASSERT_FATAL(g_bs->io_unit_size == bs_blksz);
9375 	bs = g_bs;
9376 
9377 	bs_ch = spdk_bs_alloc_io_channel(bs);
9378 	SPDK_CU_ASSERT_FATAL(bs_ch != NULL);
9379 
9380 	/* Create and open the esnap clone  */
9381 	ut_spdk_blob_opts_init(&opts);
9382 	ut_esnap_opts_init(esnap_blksz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9383 	opts.esnap_id = &esnap_opts;
9384 	opts.esnap_id_len = sizeof(esnap_opts);
9385 	opts.num_clusters = esnap_num_clusters;
9386 	blob = ut_blob_create_and_open(bs, &opts);
9387 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9388 
9389 	g_bserrno = -1;
9390 	spdk_blob_resize(blob, esnap_num_clusters * 2, blob_op_complete, NULL);
9391 	poll_threads();
9392 	CU_ASSERT(g_bserrno == 0);
9393 	CU_ASSERT(spdk_blob_get_num_clusters(blob) == esnap_num_clusters * 2);
9394 
9395 	/* Write one blob block at a time; verify that the surrounding blocks are OK */
9396 	blob_num_blocks = (spdk_blob_get_num_clusters(blob) * cluster_sz) / bs_blksz;
9397 	for (block = 0; block < blob_num_blocks; block++) {
9398 		char buf[bs_blksz];
9399 		union ut_word word;
9400 		word.f.blob_id = 0xfedcba90;
9401 		word.f.lba = block;
9402 		ut_memset8(buf, word.num, bs_blksz);
9403 		spdk_blob_io_write(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9404 		poll_threads();
9405 		CU_ASSERT(g_bserrno == 0);
9406 		if (g_bserrno != 0) {
9407 			break;
9408 		}
9409 		/* Read and verify the block before the current block */
9410 		if (block != 0) {
9411 			spdk_blob_io_read(blob, bs_ch, buf, block - 1, 1, bs_op_complete, NULL);
9412 			poll_threads();
9413 			CU_ASSERT(g_bserrno == 0);
9414 			if (g_bserrno != 0) {
9415 				break;
9416 			}
9417 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9418 							      (block - 1) * bs_blksz, bs_blksz));
9419 		}
9420 		/* Read and verify the current block */
9421 		spdk_blob_io_read(blob, bs_ch, buf, block, 1, bs_op_complete, NULL);
9422 		poll_threads();
9423 		CU_ASSERT(g_bserrno == 0);
9424 		if (g_bserrno != 0) {
9425 			break;
9426 		}
9427 		CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, word.f.blob_id,
9428 						      block * bs_blksz, bs_blksz));
9429 		/* Check the block that follows */
9430 		if (block + 1 < blob_num_blocks) {
9431 			g_bserrno = 0xbad;
9432 			spdk_blob_io_read(blob, bs_ch, buf, block + 1, 1, bs_op_complete, NULL);
9433 			poll_threads();
9434 			CU_ASSERT(g_bserrno == 0);
9435 			if (g_bserrno != 0) {
9436 				break;
9437 			}
9438 			CU_ASSERT(ut_esnap_content_is_correct(buf, bs_blksz, blob->id,
9439 							      (block + 1) * bs_blksz,
9440 							      esnap_blksz));
9441 		}
9442 	}
9443 	/* Clean up */
9444 	spdk_bs_free_io_channel(bs_ch);
9445 	g_bserrno = 0xbad;
9446 	spdk_blob_close(blob, blob_op_complete, NULL);
9447 	poll_threads();
9448 	CU_ASSERT(g_bserrno == 0);
9449 	spdk_bs_unload(g_bs, bs_op_complete, NULL);
9450 	poll_threads();
9451 	CU_ASSERT(g_bserrno == 0);
9452 	g_bs = NULL;
9453 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9454 }
9455 
9456 static void
9457 bs_dev_io_complete_cb(struct spdk_io_channel *channel, void *cb_arg, int bserrno)
9458 {
9459 	g_bserrno = bserrno;
9460 }
9461 
9462 static void
9463 blob_shallow_copy(void)
9464 {
9465 	struct spdk_blob_store *bs = g_bs;
9466 	struct spdk_blob_opts blob_opts;
9467 	struct spdk_blob *blob;
9468 	spdk_blob_id blobid;
9469 	uint64_t num_clusters = 4;
9470 	struct spdk_bs_dev *ext_dev;
9471 	struct spdk_bs_dev_cb_args ext_args;
9472 	struct spdk_io_channel *bdev_ch, *blob_ch;
9473 	uint8_t buf1[DEV_BUFFER_BLOCKLEN];
9474 	uint8_t buf2[DEV_BUFFER_BLOCKLEN];
9475 	uint64_t io_units_per_cluster;
9476 	uint64_t offset;
9477 	int rc;
9478 
9479 	blob_ch = spdk_bs_alloc_io_channel(bs);
9480 	SPDK_CU_ASSERT_FATAL(blob_ch != NULL);
9481 
9482 	/* Set blob dimension and as thin provisioned */
9483 	ut_spdk_blob_opts_init(&blob_opts);
9484 	blob_opts.thin_provision = true;
9485 	blob_opts.num_clusters = num_clusters;
9486 
9487 	/* Create a blob */
9488 	blob = ut_blob_create_and_open(bs, &blob_opts);
9489 	SPDK_CU_ASSERT_FATAL(blob != NULL);
9490 	blobid = spdk_blob_get_id(blob);
9491 	io_units_per_cluster = bs_io_units_per_cluster(blob);
9492 
9493 	/* Write on cluster 2 and 4 of blob */
9494 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9495 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9496 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9497 		poll_threads();
9498 		CU_ASSERT(g_bserrno == 0);
9499 	}
9500 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9501 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9502 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9503 		poll_threads();
9504 		CU_ASSERT(g_bserrno == 0);
9505 	}
9506 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9507 
9508 	/* Make a snapshot over blob */
9509 	spdk_bs_create_snapshot(bs, blobid, NULL, blob_op_with_id_complete, NULL);
9510 	poll_threads();
9511 	CU_ASSERT(g_bserrno == 0);
9512 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 0);
9513 
9514 	/* Write on cluster 1 and 3 of blob */
9515 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9516 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9517 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9518 		poll_threads();
9519 		CU_ASSERT(g_bserrno == 0);
9520 	}
9521 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9522 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9523 		spdk_blob_io_write(blob, blob_ch, buf1, offset, 1, blob_op_complete, NULL);
9524 		poll_threads();
9525 		CU_ASSERT(g_bserrno == 0);
9526 	}
9527 	CU_ASSERT(spdk_blob_get_num_allocated_clusters(blob) == 2);
9528 
9529 	/* Shallow copy with a not read only blob */
9530 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9531 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9532 				       blob_shallow_copy_status_cb, NULL,
9533 				       blob_op_complete, NULL);
9534 	CU_ASSERT(rc == 0);
9535 	poll_threads();
9536 	CU_ASSERT(g_bserrno == -EPERM);
9537 	ext_dev->destroy(ext_dev);
9538 
9539 	/* Set blob read only */
9540 	spdk_blob_set_read_only(blob);
9541 	spdk_blob_sync_md(blob, blob_op_complete, NULL);
9542 	poll_threads();
9543 	CU_ASSERT(g_bserrno == 0);
9544 
9545 	/* Shallow copy over a spdk_bs_dev with incorrect size */
9546 	ext_dev = init_ext_dev(1, DEV_BUFFER_BLOCKLEN);
9547 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9548 				       blob_shallow_copy_status_cb, NULL,
9549 				       blob_op_complete, NULL);
9550 	CU_ASSERT(rc == 0);
9551 	poll_threads();
9552 	CU_ASSERT(g_bserrno == -EINVAL);
9553 	ext_dev->destroy(ext_dev);
9554 
9555 	/* Shallow copy over a spdk_bs_dev with incorrect block len */
9556 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN * 2);
9557 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9558 				       blob_shallow_copy_status_cb, NULL,
9559 				       blob_op_complete, NULL);
9560 	CU_ASSERT(rc == 0);
9561 	poll_threads();
9562 	CU_ASSERT(g_bserrno == -EINVAL);
9563 	ext_dev->destroy(ext_dev);
9564 
9565 	/* Initialize ext_dev for the successuful shallow copy */
9566 	ext_dev = init_ext_dev(num_clusters * 1024 * 1024, DEV_BUFFER_BLOCKLEN);
9567 	bdev_ch = ext_dev->create_channel(ext_dev);
9568 	SPDK_CU_ASSERT_FATAL(bdev_ch != NULL);
9569 	ext_args.cb_fn = bs_dev_io_complete_cb;
9570 	for (offset = 0; offset < 4 * io_units_per_cluster; offset++) {
9571 		memset(buf2, 0xff, DEV_BUFFER_BLOCKLEN);
9572 		ext_dev->write(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9573 		poll_threads();
9574 		CU_ASSERT(g_bserrno == 0);
9575 	}
9576 
9577 	/* Correct shallow copy of blob over bdev */
9578 	rc = spdk_bs_blob_shallow_copy(bs, blob_ch, blobid, ext_dev,
9579 				       blob_shallow_copy_status_cb, NULL,
9580 				       blob_op_complete, NULL);
9581 	CU_ASSERT(rc == 0);
9582 	poll_thread_times(0, 1);
9583 	CU_ASSERT(g_copied_clusters_count == 1);
9584 	poll_thread_times(0, 2);
9585 	CU_ASSERT(g_bserrno == 0);
9586 	CU_ASSERT(g_copied_clusters_count == 2);
9587 
9588 	/* Read from bdev */
9589 	/* Only cluster 1 and 3 must be filled */
9590 	/* Clusters 2 and 4 should not have been touched */
9591 	for (offset = 0; offset < io_units_per_cluster; offset++) {
9592 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9593 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9594 		poll_threads();
9595 		CU_ASSERT(g_bserrno == 0);
9596 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9597 	}
9598 	for (offset = io_units_per_cluster; offset < 2 * io_units_per_cluster; offset++) {
9599 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9600 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9601 		poll_threads();
9602 		CU_ASSERT(g_bserrno == 0);
9603 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9604 	}
9605 	for (offset = 2 * io_units_per_cluster; offset < 3 * io_units_per_cluster; offset++) {
9606 		memset(buf1, offset, DEV_BUFFER_BLOCKLEN);
9607 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9608 		poll_threads();
9609 		CU_ASSERT(g_bserrno == 0);
9610 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9611 	}
9612 	for (offset = 3 * io_units_per_cluster; offset < 4 * io_units_per_cluster; offset++) {
9613 		memset(buf1, 0xff, DEV_BUFFER_BLOCKLEN);
9614 		ext_dev->read(ext_dev, bdev_ch, buf2, offset, 1, &ext_args);
9615 		poll_threads();
9616 		CU_ASSERT(g_bserrno == 0);
9617 		CU_ASSERT(memcmp(buf1, buf2, DEV_BUFFER_BLOCKLEN) == 0);
9618 	}
9619 
9620 	/* Clean up */
9621 	ext_dev->destroy_channel(ext_dev, bdev_ch);
9622 	ext_dev->destroy(ext_dev);
9623 	spdk_bs_free_io_channel(blob_ch);
9624 	ut_blob_close_and_delete(bs, blob);
9625 	poll_threads();
9626 }
9627 
9628 static void
9629 blob_set_parent(void)
9630 {
9631 	struct spdk_blob_store *bs = g_bs;
9632 	struct spdk_blob_opts opts;
9633 	struct ut_esnap_opts esnap_opts;
9634 	struct spdk_blob *blob1, *blob2, *blob3, *blob4, *blob5;
9635 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, blobid5,
9636 		     snapshotid1, snapshotid2, snapshotid3;
9637 	uint32_t cluster_sz, block_sz;
9638 	const uint32_t esnap_num_clusters = 4;
9639 	uint64_t esnap_num_blocks;
9640 	spdk_blob_id ids[2];
9641 	size_t clone_count = 2;
9642 
9643 	cluster_sz = spdk_bs_get_cluster_size(bs);
9644 	block_sz = spdk_bs_get_io_unit_size(bs);
9645 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9646 
9647 	/* Create a normal blob and make a couple of snapshots */
9648 	ut_spdk_blob_opts_init(&opts);
9649 	blob1 = ut_blob_create_and_open(bs, &opts);
9650 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9651 	blobid1 = spdk_blob_get_id(blob1);
9652 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9653 	poll_threads();
9654 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9655 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9656 	snapshotid1 = g_blobid;
9657 	spdk_bs_create_snapshot(bs, blobid1, NULL, blob_op_with_id_complete, NULL);
9658 	poll_threads();
9659 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9660 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9661 	snapshotid2 = g_blobid;
9662 
9663 	/* Call set_parent with an invalid snapshotid */
9664 	spdk_bs_blob_set_parent(bs, blobid1, SPDK_BLOBID_INVALID, blob_op_complete, NULL);
9665 	poll_threads();
9666 	CU_ASSERT(g_bserrno == -EINVAL);
9667 
9668 	/* Call set_parent with blobid and snapshotid the same */
9669 	spdk_bs_blob_set_parent(bs, blobid1, blobid1, blob_op_complete, NULL);
9670 	poll_threads();
9671 	CU_ASSERT(g_bserrno == -EINVAL);
9672 
9673 	/* Call set_parent with a blob and its parent snapshot */
9674 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid2, blob_op_complete, NULL);
9675 	poll_threads();
9676 	CU_ASSERT(g_bserrno == -EEXIST);
9677 
9678 	/* Create an esnap clone blob */
9679 	ut_spdk_blob_opts_init(&opts);
9680 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9681 	opts.esnap_id = &esnap_opts;
9682 	opts.esnap_id_len = sizeof(esnap_opts);
9683 	opts.num_clusters = esnap_num_clusters;
9684 	blob2 = ut_blob_create_and_open(bs, &opts);
9685 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9686 	blobid2 = spdk_blob_get_id(blob2);
9687 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9688 
9689 	/* Call set_parent with a non snapshot parent */
9690 	spdk_bs_blob_set_parent(bs, blobid2, blobid1, blob_op_complete, NULL);
9691 	poll_threads();
9692 	CU_ASSERT(g_bserrno == -EINVAL);
9693 
9694 	/* Call set_parent with blob and snapshot of different size */
9695 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid1, blob_op_complete, NULL);
9696 	poll_threads();
9697 	CU_ASSERT(g_bserrno == -EINVAL);
9698 
9699 	/* Call set_parent correctly with a snapshot's clone blob */
9700 	spdk_bs_blob_set_parent(bs, blobid1, snapshotid1, blob_op_complete, NULL);
9701 	poll_threads();
9702 	CU_ASSERT(g_bserrno == 0);
9703 
9704 	/* Check relations */
9705 	CU_ASSERT(spdk_blob_is_clone(blob1));
9706 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid1) == snapshotid1);
9707 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid1, ids, &clone_count) == 0);
9708 	CU_ASSERT(clone_count == 2);
9709 	CU_ASSERT(ids[1] == blobid1);
9710 
9711 	/* Create another normal blob with size equal to esnap size and make a snapshot */
9712 	ut_spdk_blob_opts_init(&opts);
9713 	opts.num_clusters = esnap_num_clusters;
9714 	opts.thin_provision = true;
9715 	blob3 = ut_blob_create_and_open(bs, &opts);
9716 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9717 	blobid3 = spdk_blob_get_id(blob3);
9718 	spdk_bs_create_snapshot(bs, blobid3, NULL, blob_op_with_id_complete, NULL);
9719 	poll_threads();
9720 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9721 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9722 	snapshotid3 = g_blobid;
9723 
9724 	/* Call set_parent correctly with an esnap's clone blob */
9725 	spdk_bs_blob_set_parent(bs, blobid2, snapshotid3, blob_op_complete, NULL);
9726 	poll_threads();
9727 	CU_ASSERT(g_bserrno == 0);
9728 
9729 	/* Check relations */
9730 	CU_ASSERT(!spdk_blob_is_esnap_clone(blob2));
9731 	CU_ASSERT(spdk_blob_is_clone(blob2));
9732 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid2) == snapshotid3);
9733 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid3, ids, &clone_count) == 0);
9734 	CU_ASSERT(clone_count == 2);
9735 	CU_ASSERT(ids[1] == blobid2);
9736 
9737 	/* Create a not thin-provisioned blob that is not a clone */
9738 	ut_spdk_blob_opts_init(&opts);
9739 	opts.thin_provision = false;
9740 	blob4 = ut_blob_create_and_open(bs, &opts);
9741 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9742 	blobid4 = spdk_blob_get_id(blob4);
9743 
9744 	/* Call set_parent with a blob that isn't a clone and that isn't thin-provisioned */
9745 	spdk_bs_blob_set_parent(bs, blobid4, snapshotid2, blob_op_complete, NULL);
9746 	poll_threads();
9747 	CU_ASSERT(g_bserrno == -EINVAL);
9748 
9749 	/* Create a thin-provisioned blob that is not a clone */
9750 	ut_spdk_blob_opts_init(&opts);
9751 	opts.thin_provision = true;
9752 	blob5 = ut_blob_create_and_open(bs, &opts);
9753 	SPDK_CU_ASSERT_FATAL(blob5 != NULL);
9754 	blobid5 = spdk_blob_get_id(blob5);
9755 
9756 	/* Call set_parent correctly with a blob that isn't a clone */
9757 	spdk_bs_blob_set_parent(bs, blobid5, snapshotid2, blob_op_complete, NULL);
9758 	poll_threads();
9759 	CU_ASSERT(g_bserrno == 0);
9760 
9761 	/* Check relations */
9762 	CU_ASSERT(spdk_blob_is_clone(blob5));
9763 	CU_ASSERT(spdk_blob_get_parent_snapshot(bs, blobid5) == snapshotid2);
9764 	CU_ASSERT(spdk_blob_get_clones(bs, snapshotid2, ids, &clone_count) == 0);
9765 	CU_ASSERT(clone_count == 1);
9766 	CU_ASSERT(ids[0] == blobid5);
9767 
9768 	/* Clean up */
9769 	ut_blob_close_and_delete(bs, blob5);
9770 	ut_blob_close_and_delete(bs, blob4);
9771 	ut_blob_close_and_delete(bs, blob3);
9772 	ut_blob_close_and_delete(bs, blob2);
9773 	ut_blob_close_and_delete(bs, blob1);
9774 	spdk_bs_delete_blob(bs, snapshotid3, blob_op_complete, NULL);
9775 	poll_threads();
9776 	CU_ASSERT(g_bserrno == 0);
9777 	spdk_bs_delete_blob(bs, snapshotid2, blob_op_complete, NULL);
9778 	poll_threads();
9779 	CU_ASSERT(g_bserrno == 0);
9780 	spdk_bs_delete_blob(bs, snapshotid1, blob_op_complete, NULL);
9781 	poll_threads();
9782 	CU_ASSERT(g_bserrno == 0);
9783 }
9784 
9785 static void
9786 blob_set_external_parent(void)
9787 {
9788 	struct spdk_blob_store *bs = g_bs;
9789 	struct spdk_blob_opts opts;
9790 	struct ut_esnap_opts esnap_opts, esnap_opts2;
9791 	struct spdk_blob *blob1, *blob2, *blob3, *blob4;
9792 	spdk_blob_id blobid1, blobid2, blobid3, blobid4, snapshotid;
9793 	uint32_t cluster_sz, block_sz;
9794 	const uint32_t esnap_num_clusters = 4;
9795 	uint64_t esnap_num_blocks;
9796 	struct spdk_bs_dev *esnap_dev1, *esnap_dev2, *esnap_dev3;
9797 	const void *esnap_id;
9798 	size_t esnap_id_len;
9799 	int rc;
9800 
9801 	cluster_sz = spdk_bs_get_cluster_size(bs);
9802 	block_sz = spdk_bs_get_io_unit_size(bs);
9803 	esnap_num_blocks = cluster_sz * esnap_num_clusters / block_sz;
9804 	esnap_dev1 = init_dev();
9805 	esnap_dev2 = init_dev();
9806 	esnap_dev3 = init_dev();
9807 
9808 	/* Create an esnap clone blob */
9809 	ut_spdk_blob_opts_init(&opts);
9810 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9811 	opts.esnap_id = &esnap_opts;
9812 	opts.esnap_id_len = sizeof(esnap_opts);
9813 	opts.num_clusters = esnap_num_clusters;
9814 	blob1 = ut_blob_create_and_open(bs, &opts);
9815 	SPDK_CU_ASSERT_FATAL(blob1 != NULL);
9816 	blobid1 = spdk_blob_get_id(blob1);
9817 	CU_ASSERT(spdk_blob_is_esnap_clone(blob1));
9818 
9819 	/* Call set_esternal_parent with blobid and esnapid the same */
9820 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, &blobid1, sizeof(blobid1),
9821 					 blob_op_complete, NULL);
9822 	CU_ASSERT(g_bserrno == -EINVAL);
9823 
9824 	/* Call set_external_parent with esnap of incompatible size */
9825 	esnap_dev1->blockcnt = esnap_num_blocks - 1;
9826 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9827 					 blob_op_complete, NULL);
9828 	CU_ASSERT(g_bserrno == -EINVAL);
9829 
9830 	/* Call set_external_parent with a blob and its parent esnap */
9831 	esnap_dev1->blocklen = block_sz;
9832 	esnap_dev1->blockcnt = esnap_num_blocks;
9833 	spdk_bs_blob_set_external_parent(bs, blobid1, esnap_dev1, opts.esnap_id, opts.esnap_id_len,
9834 					 blob_op_complete, NULL);
9835 	poll_threads();
9836 	CU_ASSERT(g_bserrno == -EEXIST);
9837 
9838 	/* Create a blob that is a clone of a snapshots */
9839 	ut_spdk_blob_opts_init(&opts);
9840 	blob2 = ut_blob_create_and_open(bs, &opts);
9841 	SPDK_CU_ASSERT_FATAL(blob2 != NULL);
9842 	blobid2 = spdk_blob_get_id(blob2);
9843 	spdk_bs_create_snapshot(bs, blobid2, NULL, blob_op_with_id_complete, NULL);
9844 	poll_threads();
9845 	SPDK_CU_ASSERT_FATAL(g_bserrno == 0);
9846 	SPDK_CU_ASSERT_FATAL(g_blobid != SPDK_BLOBID_INVALID);
9847 	snapshotid = g_blobid;
9848 
9849 	/* Call set_parent correctly with a snapshot's clone blob */
9850 	esnap_dev2->blocklen = block_sz;
9851 	esnap_dev2->blockcnt = esnap_num_blocks;
9852 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts2);
9853 	spdk_bs_blob_set_external_parent(bs, blobid2, esnap_dev2, &esnap_opts2, sizeof(esnap_opts2),
9854 					 blob_op_complete, NULL);
9855 	poll_threads();
9856 	CU_ASSERT(g_bserrno == 0);
9857 
9858 	/* Check relations */
9859 	rc = spdk_blob_get_esnap_id(blob2, &esnap_id, &esnap_id_len);
9860 	CU_ASSERT(spdk_blob_is_esnap_clone(blob2));
9861 	CU_ASSERT(!spdk_blob_is_clone(blob2));
9862 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts2) &&
9863 		  memcmp(esnap_id, &esnap_opts2, esnap_id_len) == 0);
9864 	CU_ASSERT(blob2->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9865 
9866 	/* Create a not thin-provisioned blob that is not a clone */
9867 	ut_spdk_blob_opts_init(&opts);
9868 	opts.thin_provision = false;
9869 	blob3 = ut_blob_create_and_open(bs, &opts);
9870 	SPDK_CU_ASSERT_FATAL(blob3 != NULL);
9871 	blobid3 = spdk_blob_get_id(blob3);
9872 
9873 	/* Call set_external_parent with a blob that isn't a clone and that isn't thin-provisioned */
9874 	spdk_bs_blob_set_external_parent(bs, blobid3, esnap_dev1, &esnap_opts, sizeof(esnap_opts),
9875 					 blob_op_complete, NULL);
9876 	poll_threads();
9877 	CU_ASSERT(g_bserrno == -EINVAL);
9878 
9879 	/* Create a thin-provisioned blob that is not a clone */
9880 	ut_spdk_blob_opts_init(&opts);
9881 	opts.thin_provision = true;
9882 	blob4 = ut_blob_create_and_open(bs, &opts);
9883 	SPDK_CU_ASSERT_FATAL(blob4 != NULL);
9884 	blobid4 = spdk_blob_get_id(blob4);
9885 
9886 	/* Call set_external_parent correctly with a blob that isn't a clone */
9887 	esnap_dev3->blocklen = block_sz;
9888 	esnap_dev3->blockcnt = esnap_num_blocks;
9889 	ut_esnap_opts_init(block_sz, esnap_num_blocks, __func__, NULL, &esnap_opts);
9890 	spdk_bs_blob_set_external_parent(bs, blobid4, esnap_dev3, &esnap_opts, sizeof(esnap_opts),
9891 					 blob_op_complete, NULL);
9892 	poll_threads();
9893 	CU_ASSERT(g_bserrno == 0);
9894 
9895 	/* Check relations */
9896 	rc = spdk_blob_get_esnap_id(blob4, &esnap_id, &esnap_id_len);
9897 	CU_ASSERT(spdk_blob_is_esnap_clone(blob4));
9898 	CU_ASSERT(!spdk_blob_is_clone(blob4));
9899 	CU_ASSERT(rc == 0 && esnap_id_len == sizeof(esnap_opts) &&
9900 		  memcmp(esnap_id, &esnap_opts, esnap_id_len) == 0);
9901 	CU_ASSERT(blob4->parent_id == SPDK_BLOBID_EXTERNAL_SNAPSHOT);
9902 
9903 	ut_blob_close_and_delete(bs, blob4);
9904 	ut_blob_close_and_delete(bs, blob3);
9905 	ut_blob_close_and_delete(bs, blob2);
9906 	ut_blob_close_and_delete(bs, blob1);
9907 	spdk_bs_delete_blob(bs, snapshotid, blob_op_complete, NULL);
9908 	dev_destroy(esnap_dev1);
9909 	poll_threads();
9910 	CU_ASSERT(g_bserrno == 0);
9911 }
9912 
9913 static void
9914 suite_bs_setup(void)
9915 {
9916 	struct spdk_bs_dev *dev;
9917 
9918 	dev = init_dev();
9919 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9920 	spdk_bs_init(dev, NULL, bs_op_with_handle_complete, NULL);
9921 	poll_threads();
9922 	CU_ASSERT(g_bserrno == 0);
9923 	CU_ASSERT(g_bs != NULL);
9924 }
9925 
9926 static void
9927 suite_esnap_bs_setup(void)
9928 {
9929 	struct spdk_bs_dev	*dev;
9930 	struct spdk_bs_opts	bs_opts;
9931 
9932 	dev = init_dev();
9933 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9934 	spdk_bs_opts_init(&bs_opts, sizeof(bs_opts));
9935 	bs_opts.cluster_sz = 4 * g_phys_blocklen;
9936 	bs_opts.esnap_bs_dev_create = ut_esnap_create;
9937 	spdk_bs_init(dev, &bs_opts, bs_op_with_handle_complete, NULL);
9938 	poll_threads();
9939 	CU_ASSERT(g_bserrno == 0);
9940 	SPDK_CU_ASSERT_FATAL(g_bs != NULL);
9941 }
9942 
9943 static void
9944 suite_bs_cleanup(void)
9945 {
9946 	if (g_bs != NULL) {
9947 		spdk_bs_unload(g_bs, bs_op_complete, NULL);
9948 		poll_threads();
9949 		CU_ASSERT(g_bserrno == 0);
9950 		g_bs = NULL;
9951 	}
9952 	memset(g_dev_buffer, 0, DEV_BUFFER_SIZE);
9953 }
9954 
9955 static struct spdk_blob *
9956 ut_blob_create_and_open(struct spdk_blob_store *bs, struct spdk_blob_opts *blob_opts)
9957 {
9958 	struct spdk_blob *blob;
9959 	struct spdk_blob_opts create_blob_opts;
9960 	spdk_blob_id blobid;
9961 
9962 	if (blob_opts == NULL) {
9963 		ut_spdk_blob_opts_init(&create_blob_opts);
9964 		blob_opts = &create_blob_opts;
9965 	}
9966 
9967 	spdk_bs_create_blob_ext(bs, blob_opts, blob_op_with_id_complete, NULL);
9968 	poll_threads();
9969 	CU_ASSERT(g_bserrno == 0);
9970 	CU_ASSERT(g_blobid != SPDK_BLOBID_INVALID);
9971 	blobid = g_blobid;
9972 	g_blobid = -1;
9973 
9974 	spdk_bs_open_blob(bs, blobid, blob_op_with_handle_complete, NULL);
9975 	poll_threads();
9976 	CU_ASSERT(g_bserrno == 0);
9977 	CU_ASSERT(g_blob != NULL);
9978 	blob = g_blob;
9979 
9980 	g_blob = NULL;
9981 	g_bserrno = -1;
9982 
9983 	return blob;
9984 }
9985 
9986 static void
9987 ut_blob_close_and_delete(struct spdk_blob_store *bs, struct spdk_blob *blob)
9988 {
9989 	spdk_blob_id blobid = spdk_blob_get_id(blob);
9990 
9991 	spdk_blob_close(blob, blob_op_complete, NULL);
9992 	poll_threads();
9993 	CU_ASSERT(g_bserrno == 0);
9994 	g_blob = NULL;
9995 
9996 	spdk_bs_delete_blob(bs, blobid, blob_op_complete, NULL);
9997 	poll_threads();
9998 	CU_ASSERT(g_bserrno == 0);
9999 	g_bserrno = -1;
10000 }
10001 
10002 static void
10003 suite_blob_setup(void)
10004 {
10005 	suite_bs_setup();
10006 	CU_ASSERT(g_bs != NULL);
10007 
10008 	g_blob = ut_blob_create_and_open(g_bs, NULL);
10009 	CU_ASSERT(g_blob != NULL);
10010 }
10011 
10012 static void
10013 suite_blob_cleanup(void)
10014 {
10015 	ut_blob_close_and_delete(g_bs, g_blob);
10016 	CU_ASSERT(g_blob == NULL);
10017 
10018 	suite_bs_cleanup();
10019 	CU_ASSERT(g_bs == NULL);
10020 }
10021 
10022 static int
10023 ut_setup_config_nocopy_noextent(void)
10024 {
10025 	g_dev_copy_enabled = false;
10026 	g_use_extent_table = false;
10027 	g_phys_blocklen = 4096;
10028 
10029 	return 0;
10030 }
10031 
10032 static int
10033 ut_setup_config_nocopy_extent(void)
10034 {
10035 	g_dev_copy_enabled = false;
10036 	g_use_extent_table = true;
10037 	g_phys_blocklen = 4096;
10038 
10039 	return 0;
10040 }
10041 
10042 static int
10043 ut_setup_config_copy_noextent(void)
10044 {
10045 	g_dev_copy_enabled = true;
10046 	g_use_extent_table = false;
10047 	g_phys_blocklen = 4096;
10048 
10049 	return 0;
10050 }
10051 
10052 static int
10053 ut_setup_config_copy_extent(void)
10054 {
10055 	g_dev_copy_enabled = true;
10056 	g_use_extent_table = true;
10057 	g_phys_blocklen = 4096;
10058 
10059 	return 0;
10060 }
10061 
10062 struct ut_config {
10063 	const char *suffix;
10064 	CU_InitializeFunc setup_cb;
10065 };
10066 
10067 int
10068 main(int argc, char **argv)
10069 {
10070 	CU_pSuite		suite, suite_bs, suite_blob, suite_esnap_bs;
10071 	unsigned int		i, num_failures;
10072 	char			suite_name[4096];
10073 	struct ut_config	*config;
10074 	struct ut_config	configs[] = {
10075 		{"nocopy_noextent", ut_setup_config_nocopy_noextent},
10076 		{"nocopy_extent", ut_setup_config_nocopy_extent},
10077 		{"copy_noextent", ut_setup_config_copy_noextent},
10078 		{"copy_extent", ut_setup_config_copy_extent},
10079 	};
10080 
10081 	CU_initialize_registry();
10082 
10083 	for (i = 0; i < SPDK_COUNTOF(configs); ++i) {
10084 		config = &configs[i];
10085 
10086 		snprintf(suite_name, sizeof(suite_name), "blob_%s", config->suffix);
10087 		suite = CU_add_suite(suite_name, config->setup_cb, NULL);
10088 
10089 		snprintf(suite_name, sizeof(suite_name), "blob_bs_%s", config->suffix);
10090 		suite_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10091 				suite_bs_setup, suite_bs_cleanup);
10092 
10093 		snprintf(suite_name, sizeof(suite_name), "blob_blob_%s", config->suffix);
10094 		suite_blob = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10095 				suite_blob_setup, suite_blob_cleanup);
10096 
10097 		snprintf(suite_name, sizeof(suite_name), "blob_esnap_bs_%s", config->suffix);
10098 		suite_esnap_bs = CU_add_suite_with_setup_and_teardown(suite_name, config->setup_cb, NULL,
10099 				 suite_esnap_bs_setup,
10100 				 suite_bs_cleanup);
10101 
10102 		CU_ADD_TEST(suite, blob_init);
10103 		CU_ADD_TEST(suite_bs, blob_open);
10104 		CU_ADD_TEST(suite_bs, blob_create);
10105 		CU_ADD_TEST(suite_bs, blob_create_loop);
10106 		CU_ADD_TEST(suite_bs, blob_create_fail);
10107 		CU_ADD_TEST(suite_bs, blob_create_internal);
10108 		CU_ADD_TEST(suite_bs, blob_create_zero_extent);
10109 		CU_ADD_TEST(suite, blob_thin_provision);
10110 		CU_ADD_TEST(suite_bs, blob_snapshot);
10111 		CU_ADD_TEST(suite_bs, blob_clone);
10112 		CU_ADD_TEST(suite_bs, blob_inflate);
10113 		CU_ADD_TEST(suite_bs, blob_delete);
10114 		CU_ADD_TEST(suite_bs, blob_resize_test);
10115 		CU_ADD_TEST(suite_bs, blob_resize_thin_test);
10116 		CU_ADD_TEST(suite, blob_read_only);
10117 		CU_ADD_TEST(suite_bs, channel_ops);
10118 		CU_ADD_TEST(suite_bs, blob_super);
10119 		CU_ADD_TEST(suite_blob, blob_write);
10120 		CU_ADD_TEST(suite_blob, blob_read);
10121 		CU_ADD_TEST(suite_blob, blob_rw_verify);
10122 		CU_ADD_TEST(suite_bs, blob_rw_verify_iov);
10123 		CU_ADD_TEST(suite_blob, blob_rw_verify_iov_nomem);
10124 		CU_ADD_TEST(suite_blob, blob_rw_iov_read_only);
10125 		CU_ADD_TEST(suite_bs, blob_unmap);
10126 		CU_ADD_TEST(suite_bs, blob_iter);
10127 		CU_ADD_TEST(suite_blob, blob_xattr);
10128 		CU_ADD_TEST(suite_bs, blob_parse_md);
10129 		CU_ADD_TEST(suite, bs_load);
10130 		CU_ADD_TEST(suite_bs, bs_load_pending_removal);
10131 		CU_ADD_TEST(suite, bs_load_custom_cluster_size);
10132 		CU_ADD_TEST(suite, bs_load_after_failed_grow);
10133 		CU_ADD_TEST(suite_bs, bs_unload);
10134 		CU_ADD_TEST(suite, bs_cluster_sz);
10135 		CU_ADD_TEST(suite_bs, bs_usable_clusters);
10136 		CU_ADD_TEST(suite, bs_resize_md);
10137 		CU_ADD_TEST(suite, bs_destroy);
10138 		CU_ADD_TEST(suite, bs_type);
10139 		CU_ADD_TEST(suite, bs_super_block);
10140 		CU_ADD_TEST(suite, bs_test_recover_cluster_count);
10141 		CU_ADD_TEST(suite, bs_grow_live);
10142 		CU_ADD_TEST(suite, bs_grow_live_no_space);
10143 		CU_ADD_TEST(suite, bs_test_grow);
10144 		CU_ADD_TEST(suite, blob_serialize_test);
10145 		CU_ADD_TEST(suite_bs, blob_crc);
10146 		CU_ADD_TEST(suite, super_block_crc);
10147 		CU_ADD_TEST(suite_blob, blob_dirty_shutdown);
10148 		CU_ADD_TEST(suite_bs, blob_flags);
10149 		CU_ADD_TEST(suite_bs, bs_version);
10150 		CU_ADD_TEST(suite_bs, blob_set_xattrs_test);
10151 		CU_ADD_TEST(suite_bs, blob_thin_prov_alloc);
10152 		CU_ADD_TEST(suite_bs, blob_insert_cluster_msg_test);
10153 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw);
10154 		CU_ADD_TEST(suite, blob_thin_prov_write_count_io);
10155 		CU_ADD_TEST(suite, blob_thin_prov_unmap_cluster);
10156 		CU_ADD_TEST(suite_bs, blob_thin_prov_rle);
10157 		CU_ADD_TEST(suite_bs, blob_thin_prov_rw_iov);
10158 		CU_ADD_TEST(suite, bs_load_iter_test);
10159 		CU_ADD_TEST(suite_bs, blob_snapshot_rw);
10160 		CU_ADD_TEST(suite_bs, blob_snapshot_rw_iov);
10161 		CU_ADD_TEST(suite, blob_relations);
10162 		CU_ADD_TEST(suite, blob_relations2);
10163 		CU_ADD_TEST(suite, blob_relations3);
10164 		CU_ADD_TEST(suite, blobstore_clean_power_failure);
10165 		CU_ADD_TEST(suite, blob_delete_snapshot_power_failure);
10166 		CU_ADD_TEST(suite, blob_create_snapshot_power_failure);
10167 		CU_ADD_TEST(suite_bs, blob_inflate_rw);
10168 		CU_ADD_TEST(suite_bs, blob_snapshot_freeze_io);
10169 		CU_ADD_TEST(suite_bs, blob_operation_split_rw);
10170 		CU_ADD_TEST(suite_bs, blob_operation_split_rw_iov);
10171 		CU_ADD_TEST(suite, blob_io_unit);
10172 		CU_ADD_TEST(suite, blob_io_unit_compatibility);
10173 		CU_ADD_TEST(suite_bs, blob_simultaneous_operations);
10174 		CU_ADD_TEST(suite_bs, blob_persist_test);
10175 		CU_ADD_TEST(suite_bs, blob_decouple_snapshot);
10176 		CU_ADD_TEST(suite_bs, blob_seek_io_unit);
10177 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_create);
10178 		CU_ADD_TEST(suite_bs, blob_nested_freezes);
10179 		CU_ADD_TEST(suite, blob_ext_md_pages);
10180 		CU_ADD_TEST(suite, blob_esnap_io_4096_4096);
10181 		CU_ADD_TEST(suite, blob_esnap_io_512_512);
10182 		CU_ADD_TEST(suite, blob_esnap_io_4096_512);
10183 		CU_ADD_TEST(suite, blob_esnap_io_512_4096);
10184 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_thread_add_remove);
10185 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_snapshot);
10186 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_inflate);
10187 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_decouple);
10188 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_clone_reload);
10189 		CU_ADD_TEST(suite_esnap_bs, blob_esnap_hotplug);
10190 		CU_ADD_TEST(suite_blob, blob_is_degraded);
10191 		CU_ADD_TEST(suite_bs, blob_clone_resize);
10192 		CU_ADD_TEST(suite, blob_esnap_clone_resize);
10193 		CU_ADD_TEST(suite_bs, blob_shallow_copy);
10194 		CU_ADD_TEST(suite_esnap_bs, blob_set_parent);
10195 		CU_ADD_TEST(suite_esnap_bs, blob_set_external_parent);
10196 	}
10197 
10198 	allocate_threads(2);
10199 	set_thread(0);
10200 
10201 	g_dev_buffer = calloc(1, DEV_BUFFER_SIZE);
10202 
10203 	num_failures = spdk_ut_run_tests(argc, argv, NULL);
10204 
10205 	free(g_dev_buffer);
10206 
10207 	free_threads();
10208 
10209 	return num_failures;
10210 }
10211